hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7142cca0f48800d8b25507f5d1b79a5a49af070
| 245
|
py
|
Python
|
simdata/hakata/script/dummy_db.py
|
RDC4Smart-Mobility/UniSim
|
872a22ccdac859b9a12f11a9f5d20467e9db18ee
|
[
"MIT"
] | null | null | null |
simdata/hakata/script/dummy_db.py
|
RDC4Smart-Mobility/UniSim
|
872a22ccdac859b9a12f11a9f5d20467e9db18ee
|
[
"MIT"
] | null | null | null |
simdata/hakata/script/dummy_db.py
|
RDC4Smart-Mobility/UniSim
|
872a22ccdac859b9a12f11a9f5d20467e9db18ee
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from unisim import DB
class DummyDB(DB):
def connect(self):
pass
def disconnect(self):
pass
def init_table(self):
pass
def store(self, tick, objects):
pass
| 14.411765
| 35
| 0.526531
|
from unisim import DB
class DummyDB(DB):
def connect(self):
pass
def disconnect(self):
pass
def init_table(self):
pass
def store(self, tick, objects):
pass
| true
| true
|
f7142d1dd2c3894eb628d06b70747641aac633ec
| 7,231
|
py
|
Python
|
paramunittest.py
|
rik0/ParamUnittest
|
e064fb382c6da355ae7242e79ea1bf14fb2b43e9
|
[
"BSD-2-Clause"
] | 7
|
2016-03-17T07:34:39.000Z
|
2019-08-09T05:31:38.000Z
|
paramunittest.py
|
rik0/ParamUnittest
|
e064fb382c6da355ae7242e79ea1bf14fb2b43e9
|
[
"BSD-2-Clause"
] | 2
|
2015-01-18T03:35:14.000Z
|
2017-03-27T18:11:41.000Z
|
paramunittest.py
|
rik0/ParamUnittest
|
e064fb382c6da355ae7242e79ea1bf14fb2b43e9
|
[
"BSD-2-Clause"
] | 4
|
2015-10-23T07:42:31.000Z
|
2021-01-15T02:28:11.000Z
|
# Copyright 2012 Enrico Franchi
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import unittest
import collections
import importlib
__all__ = [
'parametrized',
'ParametrizedTestCase',
]
def _process_parameters(parameters_seq):
processed_parameters_seq = []
for parameters in parameters_seq:
if isinstance(parameters, collections.Mapping):
processed_parameters_seq.append((tuple(),
dict(parameters)))
elif (len(parameters) == 2
and isinstance(parameters[0], collections.Sequence)
and isinstance(parameters[1], collections.Mapping)):
processed_parameters_seq.append((tuple(parameters[0]),
dict(parameters[1])))
else:
processed_parameters_seq.append((tuple(parameters),
dict()))
return processed_parameters_seq
def _build_name(name, index):
return '%s_%d' % (name, index)
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
class ParametrizedTestCase(unittest.TestCase):
def setParameters(self, *args, **kwargs):
raise NotImplementedError(
('setParameters must be implemented '
'because it receives the parameters.'))
def getParameters(self):
"""
Return the parameters with which this test case was instantiated.
"""
raise NotImplementedError(
'getParameters should have been patched by parametrized.')
def getFullParametersSequence(self):
raise NotImplementedError(
'getFullParametersSequence should have been patched by parametrized.')
def getTestCaseIndex(self):
"""
Return the index of the current test case according to the list of
parametes passed to parametrized.
"""
raise NotImplementedError(
'getTestCaseIndex should have been patched by parametrized.')
def getFullParametersSequence(self):
"""
Return the full normalized list of parameters passed to parametrized.
"""
raise NotImplementedError(
'getFullParametersSequence should have been patched by parametrized.')
def __str__(self):
try:
return "%s[%d](%s) (%s)" % (self._testMethodName,
self.getTestCaseIndex(),
self.getParameters(),
strclass(self.__class__))
except NotImplementedError:
return "%s[...](...) (%s)" % (self._testMethodName,
strclass(self.__class__))
def __repr__(self):
try:
return "<%s[%d](%s) testMethod=%s>" % (strclass(self.__class__),
self.getTestCaseIndex(),
self.getParameters(),
self._testMethodName)
except NotImplementedError:
return "<%s[...](...) testMethod=%s>" % (strclass(self.__class__),
self._testMethodName)
class PropagateSetAttr(type):
def __new__(mcs, name, bases, dct):
dct['setattr_observers'] = []
cls = super(PropagateSetAttr, mcs).__new__(mcs, name, bases, dct)
return cls
def __setattr__(cls, key, value):
for observer in cls.setattr_observers:
setattr(observer, key, value)
def make_propagator(cls, setattr_observers):
SkippableTest = PropagateSetAttr('SkippableTest', (unittest.TestCase,),
{})
SkippableTest.setattr_observers.extend(setattr_observers)
return SkippableTest
def parametrized(*parameters_seq):
parameters_seq = _process_parameters(parameters_seq)
def magic_module_set_test_case(cls):
if not hasattr(cls, 'setParameters'):
raise TypeError('%s does not have a setParameters method.' % (
cls.__name__, ))
module = importlib.import_module(cls.__module__)
generated_test_cases = []
for index, parameters in enumerate(parameters_seq):
name = _build_name(cls.__name__, index)
def closing_over(parameters=parameters, index=index):
def setUp(self):
self.setParameters(*parameters[0], **parameters[1])
cls.setUp(self)
def getParameters(self):
"""
Return the parameters with which this test case was instantiated.
"""
return parameters
def getTestCaseIndex(self):
"""
Return the index of the current test case according to the list of
parametes passed to parametrized.
"""
return index
def getFullParametersSequence(self):
"""
Return the full normalized list of parameters passed to parametrized.
"""
return copy.copy(parameters_seq)
return setUp, getParameters, getTestCaseIndex, getFullParametersSequence
(set_up, get_parameters,
get_test_case_index,
get_full_parameters_sequence) = closing_over()
new_class = type(name, (cls, ),
{'setUp': set_up,
'getParameters': get_parameters,
'getTestCaseIndex': get_test_case_index,
'getFullParametersSequence': get_full_parameters_sequence})
generated_test_cases.append(new_class)
setattr(module, name, new_class)
return make_propagator(cls, generated_test_cases)
return magic_module_set_test_case
| 40.396648
| 89
| 0.603098
|
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
import copy
import unittest
import collections
import importlib
__all__ = [
'parametrized',
'ParametrizedTestCase',
]
def _process_parameters(parameters_seq):
processed_parameters_seq = []
for parameters in parameters_seq:
if isinstance(parameters, collections.Mapping):
processed_parameters_seq.append((tuple(),
dict(parameters)))
elif (len(parameters) == 2
and isinstance(parameters[0], collections.Sequence)
and isinstance(parameters[1], collections.Mapping)):
processed_parameters_seq.append((tuple(parameters[0]),
dict(parameters[1])))
else:
processed_parameters_seq.append((tuple(parameters),
dict()))
return processed_parameters_seq
def _build_name(name, index):
return '%s_%d' % (name, index)
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
class ParametrizedTestCase(unittest.TestCase):
def setParameters(self, *args, **kwargs):
raise NotImplementedError(
('setParameters must be implemented '
'because it receives the parameters.'))
def getParameters(self):
raise NotImplementedError(
'getParameters should have been patched by parametrized.')
def getFullParametersSequence(self):
raise NotImplementedError(
'getFullParametersSequence should have been patched by parametrized.')
def getTestCaseIndex(self):
raise NotImplementedError(
'getTestCaseIndex should have been patched by parametrized.')
def getFullParametersSequence(self):
raise NotImplementedError(
'getFullParametersSequence should have been patched by parametrized.')
def __str__(self):
try:
return "%s[%d](%s) (%s)" % (self._testMethodName,
self.getTestCaseIndex(),
self.getParameters(),
strclass(self.__class__))
except NotImplementedError:
return "%s[...](...) (%s)" % (self._testMethodName,
strclass(self.__class__))
def __repr__(self):
try:
return "<%s[%d](%s) testMethod=%s>" % (strclass(self.__class__),
self.getTestCaseIndex(),
self.getParameters(),
self._testMethodName)
except NotImplementedError:
return "<%s[...](...) testMethod=%s>" % (strclass(self.__class__),
self._testMethodName)
class PropagateSetAttr(type):
def __new__(mcs, name, bases, dct):
dct['setattr_observers'] = []
cls = super(PropagateSetAttr, mcs).__new__(mcs, name, bases, dct)
return cls
def __setattr__(cls, key, value):
for observer in cls.setattr_observers:
setattr(observer, key, value)
def make_propagator(cls, setattr_observers):
SkippableTest = PropagateSetAttr('SkippableTest', (unittest.TestCase,),
{})
SkippableTest.setattr_observers.extend(setattr_observers)
return SkippableTest
def parametrized(*parameters_seq):
parameters_seq = _process_parameters(parameters_seq)
def magic_module_set_test_case(cls):
if not hasattr(cls, 'setParameters'):
raise TypeError('%s does not have a setParameters method.' % (
cls.__name__, ))
module = importlib.import_module(cls.__module__)
generated_test_cases = []
for index, parameters in enumerate(parameters_seq):
name = _build_name(cls.__name__, index)
def closing_over(parameters=parameters, index=index):
def setUp(self):
self.setParameters(*parameters[0], **parameters[1])
cls.setUp(self)
def getParameters(self):
return parameters
def getTestCaseIndex(self):
return index
def getFullParametersSequence(self):
return copy.copy(parameters_seq)
return setUp, getParameters, getTestCaseIndex, getFullParametersSequence
(set_up, get_parameters,
get_test_case_index,
get_full_parameters_sequence) = closing_over()
new_class = type(name, (cls, ),
{'setUp': set_up,
'getParameters': get_parameters,
'getTestCaseIndex': get_test_case_index,
'getFullParametersSequence': get_full_parameters_sequence})
generated_test_cases.append(new_class)
setattr(module, name, new_class)
return make_propagator(cls, generated_test_cases)
return magic_module_set_test_case
| true
| true
|
f7142e78dcfc85a5990b30355dbe0eeb484752fd
| 1,454
|
py
|
Python
|
download_data.py
|
EugenHotaj/ray-automl
|
f516c06f8c24559edac120941cd36e8720ecd228
|
[
"MIT"
] | null | null | null |
download_data.py
|
EugenHotaj/ray-automl
|
f516c06f8c24559edac120941cd36e8720ecd228
|
[
"MIT"
] | null | null | null |
download_data.py
|
EugenHotaj/ray-automl
|
f516c06f8c24559edac120941cd36e8720ecd228
|
[
"MIT"
] | null | null | null |
"""Script to download and cache all data."""
import os
from typing import List
import openml
from automl import openml_utils
BENCHMARK_TASKS = {"adult": 7592, "nomao": 9977, "phoneme": 9952}
FOLD_COL = "fold"
def download_openml_tasks(task_ids: List[int]):
"""Downloads the given task_ids from OpenML and dumps them as OpenMLTasks."""
tasks = openml.tasks.get_tasks(
task_ids, download_data=True, download_qualities=False
)
for task in tasks:
dataset = task.get_dataset()
df, _, categorical, columns = dataset.get_data()
label_col = dataset.default_target_attribute
feature_cols = [col for col in columns if col != label_col]
numerical_cols = [col for ind, col in zip(categorical, feature_cols) if not ind]
categorical_cols = [col for ind, col in zip(categorical, feature_cols) if ind]
df[FOLD_COL] = -1
splits = task.download_split().split[0] # We assume one repetition.
for split, idxs in splits.items():
idxs = idxs[0].test
df.loc[idxs, FOLD_COL] = split
out_path = openml_utils.task_path(task.task_id)
os.makedirs(os.path.dirname(out_path), exist_ok=True)
task = openml_utils.OpenMLTask(
df, feature_cols, numerical_cols, categorical_cols, label_col, FOLD_COL
)
task.dump(out_path)
if __name__ == "__main__":
download_openml_tasks(list(BENCHMARK_TASKS.values()))
| 33.813953
| 88
| 0.672627
|
import os
from typing import List
import openml
from automl import openml_utils
BENCHMARK_TASKS = {"adult": 7592, "nomao": 9977, "phoneme": 9952}
FOLD_COL = "fold"
def download_openml_tasks(task_ids: List[int]):
tasks = openml.tasks.get_tasks(
task_ids, download_data=True, download_qualities=False
)
for task in tasks:
dataset = task.get_dataset()
df, _, categorical, columns = dataset.get_data()
label_col = dataset.default_target_attribute
feature_cols = [col for col in columns if col != label_col]
numerical_cols = [col for ind, col in zip(categorical, feature_cols) if not ind]
categorical_cols = [col for ind, col in zip(categorical, feature_cols) if ind]
df[FOLD_COL] = -1
splits = task.download_split().split[0]
for split, idxs in splits.items():
idxs = idxs[0].test
df.loc[idxs, FOLD_COL] = split
out_path = openml_utils.task_path(task.task_id)
os.makedirs(os.path.dirname(out_path), exist_ok=True)
task = openml_utils.OpenMLTask(
df, feature_cols, numerical_cols, categorical_cols, label_col, FOLD_COL
)
task.dump(out_path)
if __name__ == "__main__":
download_openml_tasks(list(BENCHMARK_TASKS.values()))
| true
| true
|
f7142ea121c4efd6ef516ca222b10a3ea61550d2
| 3,746
|
py
|
Python
|
data_loader.py
|
SmirnovKol/recurrent-visual-attention
|
4cb8d9e768ae35f38439278bb8a7b4d6b253a537
|
[
"MIT"
] | 463
|
2017-12-25T12:36:08.000Z
|
2022-03-29T17:05:19.000Z
|
data_loader.py
|
Pandinosaurus/recurrent-visual-attention
|
a38ac8958ebf1c61a10c4d5320f1e31d3d0b73dd
|
[
"MIT"
] | 44
|
2018-01-16T08:41:36.000Z
|
2021-12-17T06:23:13.000Z
|
data_loader.py
|
Pandinosaurus/recurrent-visual-attention
|
a38ac8958ebf1c61a10c4d5320f1e31d3d0b73dd
|
[
"MIT"
] | 135
|
2017-12-26T05:09:03.000Z
|
2022-03-27T00:40:42.000Z
|
import numpy as np
from utils import plot_images
import torch
from torchvision import datasets
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
def get_train_valid_loader(
data_dir,
batch_size,
random_seed,
valid_size=0.1,
shuffle=True,
show_sample=False,
num_workers=4,
pin_memory=False,
):
"""Train and validation data loaders.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Args:
data_dir: path directory to the dataset.
batch_size: how many samples per batch to load.
random_seed: fix seed for reproducibility.
valid_size: percentage split of the training set used for
the validation set. Should be a float in the range [0, 1].
In the paper, this number is set to 0.1.
shuffle: whether to shuffle the train/validation indices.
show_sample: plot 9x9 sample grid of the dataset.
num_workers: number of subprocesses to use when loading the dataset.
pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
"""
error_msg = "[!] valid_size should be in the range [0, 1]."
assert (valid_size >= 0) and (valid_size <= 1), error_msg
# define transforms
normalize = transforms.Normalize((0.1307,), (0.3081,))
trans = transforms.Compose([transforms.ToTensor(), normalize])
# load dataset
dataset = datasets.MNIST(data_dir, train=True, download=True, transform=trans)
num_train = len(dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=train_sampler,
num_workers=num_workers,
pin_memory=pin_memory,
)
valid_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=valid_sampler,
num_workers=num_workers,
pin_memory=pin_memory,
)
# visualize some images
if show_sample:
sample_loader = torch.utils.data.DataLoader(
dataset,
batch_size=9,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=pin_memory,
)
data_iter = iter(sample_loader)
images, labels = data_iter.next()
X = images.numpy()
X = np.transpose(X, [0, 2, 3, 1])
plot_images(X, labels)
return (train_loader, valid_loader)
def get_test_loader(data_dir, batch_size, num_workers=4, pin_memory=False):
"""Test datalaoder.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Args:
data_dir: path directory to the dataset.
batch_size: how many samples per batch to load.
num_workers: number of subprocesses to use when loading the dataset.
pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
"""
# define transforms
normalize = transforms.Normalize((0.1307,), (0.3081,))
trans = transforms.Compose([transforms.ToTensor(), normalize])
# load dataset
dataset = datasets.MNIST(data_dir, train=False, download=True, transform=trans)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=pin_memory,
)
return data_loader
| 30.704918
| 83
| 0.664976
|
import numpy as np
from utils import plot_images
import torch
from torchvision import datasets
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
def get_train_valid_loader(
data_dir,
batch_size,
random_seed,
valid_size=0.1,
shuffle=True,
show_sample=False,
num_workers=4,
pin_memory=False,
):
error_msg = "[!] valid_size should be in the range [0, 1]."
assert (valid_size >= 0) and (valid_size <= 1), error_msg
normalize = transforms.Normalize((0.1307,), (0.3081,))
trans = transforms.Compose([transforms.ToTensor(), normalize])
dataset = datasets.MNIST(data_dir, train=True, download=True, transform=trans)
num_train = len(dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=train_sampler,
num_workers=num_workers,
pin_memory=pin_memory,
)
valid_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=valid_sampler,
num_workers=num_workers,
pin_memory=pin_memory,
)
if show_sample:
sample_loader = torch.utils.data.DataLoader(
dataset,
batch_size=9,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=pin_memory,
)
data_iter = iter(sample_loader)
images, labels = data_iter.next()
X = images.numpy()
X = np.transpose(X, [0, 2, 3, 1])
plot_images(X, labels)
return (train_loader, valid_loader)
def get_test_loader(data_dir, batch_size, num_workers=4, pin_memory=False):
normalize = transforms.Normalize((0.1307,), (0.3081,))
trans = transforms.Compose([transforms.ToTensor(), normalize])
dataset = datasets.MNIST(data_dir, train=False, download=True, transform=trans)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=pin_memory,
)
return data_loader
| true
| true
|
f7142ff349e7ada53e51a9c796f37baacff04ec9
| 1,290
|
py
|
Python
|
cl_progress/cl_progress.py
|
CORDEA/myPythonModules
|
790674a8f155a94804242b9b220eb6ac6efc8328
|
[
"Apache-2.0"
] | null | null | null |
cl_progress/cl_progress.py
|
CORDEA/myPythonModules
|
790674a8f155a94804242b9b220eb6ac6efc8328
|
[
"Apache-2.0"
] | null | null | null |
cl_progress/cl_progress.py
|
CORDEA/myPythonModules
|
790674a8f155a94804242b9b220eb6ac6efc8328
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# encoding:utf-8
#
# Copyright 2015-2017 Yoshihiro Tanaka
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__Author__ = "Yoshihiro Tanaka"
__date__ = "2015-02-02"
def progress(sent, flag):
import sys, commands
_SUC = '[SUCCEED]'
_FAL = '[FAILED]'
# ref. http://d.hatena.ne.jp/heavenshell/20090909/1252509749
colors = {'clear': '\033[0m', 'red': '\033[31m', 'green': '\033[32m'}
width = int(commands.getoutput('stty size').split()[1])
if flag:
result = _SUC
color = 'green'
else:
result = _FAL
color = 'red'
spaces = width - (len(sent) + len(result))
sys.stdout.write('%s%s' % (colors['clear'], sent + (' ' * spaces)))
sys.stdout.write('%s%s%s\n' % (colors[color], result, colors['clear']))
| 30
| 75
| 0.66124
|
__Author__ = "Yoshihiro Tanaka"
__date__ = "2015-02-02"
def progress(sent, flag):
import sys, commands
_SUC = '[SUCCEED]'
_FAL = '[FAILED]'
colors = {'clear': '\033[0m', 'red': '\033[31m', 'green': '\033[32m'}
width = int(commands.getoutput('stty size').split()[1])
if flag:
result = _SUC
color = 'green'
else:
result = _FAL
color = 'red'
spaces = width - (len(sent) + len(result))
sys.stdout.write('%s%s' % (colors['clear'], sent + (' ' * spaces)))
sys.stdout.write('%s%s%s\n' % (colors[color], result, colors['clear']))
| true
| true
|
f71430b176a3802c19f4d2638a14ba0259909022
| 863
|
py
|
Python
|
src/utils/osrm.py
|
sashakh/vroom-scripts
|
46b8abce2d8680f5f854965cccf57ac7856fe092
|
[
"BSD-2-Clause"
] | null | null | null |
src/utils/osrm.py
|
sashakh/vroom-scripts
|
46b8abce2d8680f5f854965cccf57ac7856fe092
|
[
"BSD-2-Clause"
] | null | null | null |
src/utils/osrm.py
|
sashakh/vroom-scripts
|
46b8abce2d8680f5f854965cccf57ac7856fe092
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
DEFAULT_IP = '0.0.0.0'
DEFAULT_PORT = '5000'
def format_request(service,
locs,
ip = DEFAULT_IP,
port = DEFAULT_PORT):
req = 'http://' + ip + ':' + port + '/'
req += service + '/v1/car/'
for loc in locs:
req += str(loc[0]) + ',' + str(loc[1]) + ';'
return req[:-1]
def route(locs,
extra_args = '',
ip = DEFAULT_IP,
port = DEFAULT_PORT):
# Building request.
req = format_request('route', locs, ip, port)
req += '?alternatives=false&steps=false&overview=full&continue_straight=false'
req += extra_args
return requests.get(req).json()
def table(locs,
ip = DEFAULT_IP,
port = DEFAULT_PORT):
req = format_request('table', locs, ip, port)
return requests.get(req).json()
| 23.324324
| 80
| 0.559676
|
import requests
DEFAULT_IP = '0.0.0.0'
DEFAULT_PORT = '5000'
def format_request(service,
locs,
ip = DEFAULT_IP,
port = DEFAULT_PORT):
req = 'http://' + ip + ':' + port + '/'
req += service + '/v1/car/'
for loc in locs:
req += str(loc[0]) + ',' + str(loc[1]) + ';'
return req[:-1]
def route(locs,
extra_args = '',
ip = DEFAULT_IP,
port = DEFAULT_PORT):
req = format_request('route', locs, ip, port)
req += '?alternatives=false&steps=false&overview=full&continue_straight=false'
req += extra_args
return requests.get(req).json()
def table(locs,
ip = DEFAULT_IP,
port = DEFAULT_PORT):
req = format_request('table', locs, ip, port)
return requests.get(req).json()
| true
| true
|
f7143197fc3c82b21a8db9b00f7324492cb578fa
| 1,210
|
py
|
Python
|
src/prometheus_async/__init__.py
|
hynek/prometheus_async
|
4abb25ac4f893c951131123989013df1286338d0
|
[
"Apache-2.0"
] | 49
|
2015-10-03T00:04:12.000Z
|
2019-05-13T10:32:02.000Z
|
src/prometheus_async/__init__.py
|
hynek/prometheus_async
|
4abb25ac4f893c951131123989013df1286338d0
|
[
"Apache-2.0"
] | 13
|
2015-10-07T21:15:23.000Z
|
2019-02-09T17:12:46.000Z
|
src/prometheus_async/__init__.py
|
hynek/prometheus_async
|
4abb25ac4f893c951131123989013df1286338d0
|
[
"Apache-2.0"
] | 12
|
2015-10-15T23:05:03.000Z
|
2019-02-09T15:49:07.000Z
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright 2016 Hynek Schlawack
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Async helpers for prometheus_client.
"""
__version__ = "22.3.0.dev0"
__title__ = "prometheus_async"
# __doc__ is None in when running with -OO / PYTHONOPTIMIZE=2.
__description__ = (__doc__ or "").strip()
__uri__ = "https://prometheus-async.readthedocs.io/"
__author__ = "Hynek Schlawack"
__email__ = "hs@ox.cx"
__license__ = "Apache License, Version 2.0"
__copyright__ = f"Copyright (c) 2016 {__author__}"
from . import aio
__all__ = ["aio"]
try:
from . import tx # noqa -- flake8 doesn't understand __all__.append
__all__.append("tx")
except ImportError:
pass
| 25.744681
| 74
| 0.733058
|
__version__ = "22.3.0.dev0"
__title__ = "prometheus_async"
__description__ = (__doc__ or "").strip()
__uri__ = "https://prometheus-async.readthedocs.io/"
__author__ = "Hynek Schlawack"
__email__ = "hs@ox.cx"
__license__ = "Apache License, Version 2.0"
__copyright__ = f"Copyright (c) 2016 {__author__}"
from . import aio
__all__ = ["aio"]
try:
from . import tx
__all__.append("tx")
except ImportError:
pass
| true
| true
|
f71431a16aaaf2c0f14e8c3eceaefa14bf68a0e5
| 5,134
|
py
|
Python
|
scrolls/errors.py
|
a-bison/scrolls-py
|
cd531bd0755a107e79afc5bd8a23f0905e1fc120
|
[
"BSD-3-Clause"
] | null | null | null |
scrolls/errors.py
|
a-bison/scrolls-py
|
cd531bd0755a107e79afc5bd8a23f0905e1fc120
|
[
"BSD-3-Clause"
] | null | null | null |
scrolls/errors.py
|
a-bison/scrolls-py
|
cd531bd0755a107e79afc5bd8a23f0905e1fc120
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Errors not dependent on any specific Scrolls types.
Typically, you won't need to instantiate any of these yourself. The base exception
for _all_ Scrolls errors is `ScrollError`. Any error that occurs while validating
script syntax or interpreting scripts will inherit from `PositionalError`.
"""
import functools
import math
import typing as t
__all__ = (
"format_positional_error",
"ScrollError",
"PositionalError",
"ParseError",
"ParseEofError",
"ParseExpectError",
"TokenizeError",
"TokenizeEofError"
)
@functools.lru_cache(128)
def format_positional_error(
line: int,
pos: int,
string: str,
message: str,
prior_lines: int = 3
) -> str:
"""Format a positional error generated by Scrolls.
Args:
line: The line the error was generated on.
pos: The character the error was generated on.
string: The script that generated the error.
message: The message associated with the error.
prior_lines: The number of lines that should be printed before the line
the error occurred on. The line containing the error will always
be printed.
Returns:
The formatted error message.
For example:
```text
...
1 print "World"
2 print "Foo"
3 print "Bar"
4 print "bad string
^
line 4 - Unexpected EOF while parsing string literal.
```
If there are more than `prior_lines` lines before the error, `...` will be
prepended to the output.
"""
zfill = max(1, int(math.log10(len(string))))
lines = [f"{n:0{zfill}} {l}" for n, l in enumerate(string.splitlines())]
printed_lines = lines[max(0, line - prior_lines): line + 1]
output_lines = [
*(["..."] if line - prior_lines >= 1 else []),
*printed_lines,
" "*(pos + 1 + zfill) + "^",
f"line {line}: {message}"
]
return "\n".join(output_lines)
class ScrollError(Exception):
"""Base class for all Scrolls-related errors."""
pass
class PositionalError(ScrollError):
"""Generic error that happened somewhere in a script.
Any error in tokenizing, parsing, or interpreting should inherit from this.
Typically you'll never need to instantiate one of these yourself, just catch it
and call `str` on it. This will return a formatted error message pointing to
where the error happened. See `format_positional_error` for more details.
Example usage:
```
try:
some_scrolls_function(...)
except PositionalError as e:
print("error:")
print(str(e))
```
Note that this will apply to any error that inherits from `PositionalError` as well.
If you want to do your own formatting, you can use the instance variables below to
generate your own messages.
"""
def __init__(
self,
line: int,
pos: int,
string: str,
message: str
):
self.line = line
"""The line the error occurred on."""
self.pos = pos
"""The character along `line` the error occurred at."""
self.string = string
"""The string that triggered the error. In all normal cases, this is a script."""
self.message = message
"""The message associated with this error."""
def __str__(self) -> str:
"""
Return a formatted error string pointing out in the script where this error
happened.
"""
return format_positional_error(
self.line,
self.pos,
self.string,
self.message
)
class TokenizeError(PositionalError):
"""Generic error raised while lexing/tokenizing a script."""
pass
class TokenizeEofError(TokenizeError):
"""Raised when the lexer/tokenizer hits an unexpected EOF (end of script)."""
pass
class ParseError(PositionalError):
"""Generic error raised during the parsing stage."""
def __init__(
self,
line: int,
pos: int,
string: str,
message: str
):
super().__init__(
line,
pos,
string,
message
)
# IMPLEMENTATION DETAIL
# Sets whether this parse error is fatal or not. Defaults to `False`.
# If `True`, a `ParseError` will cause all parsing to stop immediately and
# raise the error. If `fatal` is `False`, a parse function may try alternative
# parsing. Internally, `fatal = False` is used by `parse_choice` to determine
# which parsing function to choose. See `scrolls.ast` for more details.
self.fatal = False
class ParseEofError(ParseError):
"""Raised when an EOF is encountered too early while parsing a script."""
pass
class ParseExpectError(ParseError):
"""Raised when an unexpected token is encountered during parsing."""
pass
| 28.681564
| 90
| 0.599533
|
import functools
import math
import typing as t
__all__ = (
"format_positional_error",
"ScrollError",
"PositionalError",
"ParseError",
"ParseEofError",
"ParseExpectError",
"TokenizeError",
"TokenizeEofError"
)
@functools.lru_cache(128)
def format_positional_error(
line: int,
pos: int,
string: str,
message: str,
prior_lines: int = 3
) -> str:
zfill = max(1, int(math.log10(len(string))))
lines = [f"{n:0{zfill}} {l}" for n, l in enumerate(string.splitlines())]
printed_lines = lines[max(0, line - prior_lines): line + 1]
output_lines = [
*(["..."] if line - prior_lines >= 1 else []),
*printed_lines,
" "*(pos + 1 + zfill) + "^",
f"line {line}: {message}"
]
return "\n".join(output_lines)
class ScrollError(Exception):
pass
class PositionalError(ScrollError):
def __init__(
self,
line: int,
pos: int,
string: str,
message: str
):
self.line = line
self.pos = pos
self.string = string
self.message = message
def __str__(self) -> str:
return format_positional_error(
self.line,
self.pos,
self.string,
self.message
)
class TokenizeError(PositionalError):
pass
class TokenizeEofError(TokenizeError):
pass
class ParseError(PositionalError):
def __init__(
self,
line: int,
pos: int,
string: str,
message: str
):
super().__init__(
line,
pos,
string,
message
)
self.fatal = False
class ParseEofError(ParseError):
pass
class ParseExpectError(ParseError):
pass
| true
| true
|
f71431e0bae919d25b50e4bc0811e7098763a471
| 173
|
py
|
Python
|
virtual/lib/python3.6/site-packages/pylint/test/functional/broad_except.py
|
drewheathens/The-Moringa-Tribune
|
98ee4d63c9df6f1f7497fc6876960a822d914500
|
[
"MIT"
] | 69
|
2019-02-18T12:07:35.000Z
|
2022-03-12T10:38:32.000Z
|
virtual/lib/python3.6/site-packages/pylint/test/functional/broad_except.py
|
drewheathens/The-Moringa-Tribune
|
98ee4d63c9df6f1f7497fc6876960a822d914500
|
[
"MIT"
] | 32
|
2018-05-01T05:24:43.000Z
|
2022-03-11T23:20:39.000Z
|
virtual/lib/python3.6/site-packages/pylint/test/functional/broad_except.py
|
drewheathens/The-Moringa-Tribune
|
98ee4d63c9df6f1f7497fc6876960a822d914500
|
[
"MIT"
] | 28
|
2019-03-22T01:07:13.000Z
|
2022-02-21T16:38:27.000Z
|
# pylint: disable=missing-docstring
from __future__ import print_function
__revision__ = 0
try:
__revision__ += 1
except Exception: # [broad-except]
print('error')
| 19.222222
| 37
| 0.739884
|
from __future__ import print_function
__revision__ = 0
try:
__revision__ += 1
except Exception:
print('error')
| true
| true
|
f71431e15f97613abc12e56b17caf9d892de3bd9
| 1,359
|
py
|
Python
|
setup.py
|
butla/bravado-falcon
|
2c377db486150a6e0b93a4fb5970be9cf3e769d0
|
[
"MIT"
] | 2
|
2017-01-16T07:51:35.000Z
|
2020-02-17T21:44:13.000Z
|
setup.py
|
butla/bravado-falcon
|
2c377db486150a6e0b93a4fb5970be9cf3e769d0
|
[
"MIT"
] | null | null | null |
setup.py
|
butla/bravado-falcon
|
2c377db486150a6e0b93a4fb5970be9cf3e769d0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os.path
from setuptools import setup
project_name = 'bravado-falcon'
version = '0.1.0'
setup_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(setup_dir, 'requirements.txt')) as req_file:
requirements = [lib.split('==')[0] for lib in req_file.readlines()]
with open(os.path.join(setup_dir, 'README.rst')) as readme_file:
readme = readme_file.read()
setup(
name=project_name,
version=version,
description='Integration of Falcon API unit tests with Bravado.',
long_description=readme,
author='Michał Bultrowicz',
author_email='michal.bultrowicz@gmail.com',
url='https://github.com/butla/bravado-falcon',
packages=[
project_name.replace('-', '_'),
],
package_dir={project_name: project_name},
include_package_data=True,
install_requires=requirements,
license="MIT",
keywords='falcon bravado test',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
],
)
| 32.357143
| 71
| 0.65195
|
import os.path
from setuptools import setup
project_name = 'bravado-falcon'
version = '0.1.0'
setup_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(setup_dir, 'requirements.txt')) as req_file:
requirements = [lib.split('==')[0] for lib in req_file.readlines()]
with open(os.path.join(setup_dir, 'README.rst')) as readme_file:
readme = readme_file.read()
setup(
name=project_name,
version=version,
description='Integration of Falcon API unit tests with Bravado.',
long_description=readme,
author='Michał Bultrowicz',
author_email='michal.bultrowicz@gmail.com',
url='https://github.com/butla/bravado-falcon',
packages=[
project_name.replace('-', '_'),
],
package_dir={project_name: project_name},
include_package_data=True,
install_requires=requirements,
license="MIT",
keywords='falcon bravado test',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
],
)
| true
| true
|
f714330e3b625e83239ab4676720e506ac5de5a0
| 3,079
|
py
|
Python
|
Lib/distutils/command/bdist_dumb.py
|
SaadBazaz/ChinesePython
|
800955539dda912d4a1621bcf5a700aaaddc012f
|
[
"CNRI-Python-GPL-Compatible"
] | 3
|
2022-01-30T20:08:24.000Z
|
2022-02-12T08:51:12.000Z
|
Lib/distutils/command/bdist_dumb.py
|
SaadBazaz/ChinesePython
|
800955539dda912d4a1621bcf5a700aaaddc012f
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Lib/distutils/command/bdist_dumb.py
|
SaadBazaz/ChinesePython
|
800955539dda912d4a1621bcf5a700aaaddc012f
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
"""distutils.command.bdist_dumb
Implements the Distutils 'bdist_dumb' command (create a "dumb" built
distribution -- i.e., just an archive to be unpacked under $prefix or
$exec_prefix)."""
# created 2000/03/29, Greg Ward
__revision__ = "$Id: bdist_dumb.py,v 1.2 2002/04/12 09:44:05 sof34 Exp $"
import os
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import create_tree, remove_tree
from distutils.errors import *
class bdist_dumb (Command):
description = "create a \"dumb\" built distribution"
user_options = [('bdist-dir=', 'd',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('format=', 'f',
"archive format to create (tar, ztar, gztar, zip)"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
]
boolean_options = ['keep-temp']
default_format = { 'posix': 'gztar',
'nt': 'zip', }
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.format = None
self.keep_temp = 0
self.dist_dir = None
# initialize_options()
def finalize_options (self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'dumb')
if self.format is None:
try:
self.format = self.default_format[os.name]
except KeyError:
raise DistutilsPlatformError, \
("don't know how to create dumb built distributions " +
"on platform %s") % os.name
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'))
# finalize_options()
def run (self):
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
self.announce("installing to %s" % self.bdist_dir)
self.run_command('install')
# And make an archive relative to the root of the
# pseudo-installation tree.
archive_basename = "%s.%s" % (self.distribution.get_fullname(),
self.plat_name)
self.make_archive(os.path.join(self.dist_dir, archive_basename),
self.format,
root_dir=self.bdist_dir)
if not self.keep_temp:
remove_tree(self.bdist_dir, self.verbose, self.dry_run)
# run()
# class bdist_dumb
| 32.072917
| 77
| 0.559597
|
"""distutils.command.bdist_dumb
Implements the Distutils 'bdist_dumb' command (create a "dumb" built
distribution -- i.e., just an archive to be unpacked under $prefix or
$exec_prefix)."""
__revision__ = "$Id: bdist_dumb.py,v 1.2 2002/04/12 09:44:05 sof34 Exp $"
import os
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import create_tree, remove_tree
from distutils.errors import *
class bdist_dumb (Command):
description = "create a \"dumb\" built distribution"
user_options = [('bdist-dir=', 'd',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('format=', 'f',
"archive format to create (tar, ztar, gztar, zip)"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
]
boolean_options = ['keep-temp']
default_format = { 'posix': 'gztar',
'nt': 'zip', }
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.format = None
self.keep_temp = 0
self.dist_dir = None
def finalize_options (self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'dumb')
if self.format is None:
try:
self.format = self.default_format[os.name]
except KeyError:
raise DistutilsPlatformError, \
("don't know how to create dumb built distributions " +
"on platform %s") % os.name
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'))
# finalize_options()
def run (self):
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
self.announce("installing to %s" % self.bdist_dir)
self.run_command('install')
# And make an archive relative to the root of the
# pseudo-installation tree.
archive_basename = "%s.%s" % (self.distribution.get_fullname(),
self.plat_name)
self.make_archive(os.path.join(self.dist_dir, archive_basename),
self.format,
root_dir=self.bdist_dir)
if not self.keep_temp:
remove_tree(self.bdist_dir, self.verbose, self.dry_run)
# run()
# class bdist_dumb
| false
| true
|
f714331b5f57e69f93e8004c75487a73e41833cf
| 1,224
|
py
|
Python
|
config/urls.py
|
kdagley/publicrelations
|
dbf424c247028ed93881a5375b22d196cfeed175
|
[
"BSD-3-Clause"
] | null | null | null |
config/urls.py
|
kdagley/publicrelations
|
dbf424c247028ed93881a5375b22d196cfeed175
|
[
"BSD-3-Clause"
] | null | null | null |
config/urls.py
|
kdagley/publicrelations
|
dbf424c247028ed93881a5375b22d196cfeed175
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("pr.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
| 34.971429
| 91
| 0.693627
|
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
url(r'^admin/', include(admin.site.urls)),
url(r'^users/', include("pr.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
| true
| true
|
f7143399b8cc53aab5eb6e9b11ef706b2984f99f
| 14,667
|
py
|
Python
|
deprecated/python/urllib/kalign_urllib2.py
|
SamFent/webservice-clients
|
b4c1ab0d4e0535cc8e79a0d5e731aaafef3193f2
|
[
"Apache-2.0"
] | null | null | null |
deprecated/python/urllib/kalign_urllib2.py
|
SamFent/webservice-clients
|
b4c1ab0d4e0535cc8e79a0d5e731aaafef3193f2
|
[
"Apache-2.0"
] | null | null | null |
deprecated/python/urllib/kalign_urllib2.py
|
SamFent/webservice-clients
|
b4c1ab0d4e0535cc8e79a0d5e731aaafef3193f2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# $Id: kalign_urllib2.py 2809 2015-03-13 16:10:25Z uludag $
# ======================================================================
#
# Copyright 2009-2018 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================
# Kalign (REST) Python client using urllib2 and
# xmltramp (http://www.aaronsw.com/2002/xmltramp/).
#
# Tested with:
# Python 2.6.5 (Ubuntu 10.04 LTS)
# Python 2.7.3 (Ubuntu 12.04 LTS)
#
# See:
# http://www.ebi.ac.uk/Tools/webservices/services/msa/kalign_rest
# http://www.ebi.ac.uk/Tools/webservices/tutorials/python
# ======================================================================
# Load libraries
import platform, os, re, sys, time, urllib, urllib2
from xmltramp2 import xmltramp
from optparse import OptionParser
# Base URL for service
baseUrl = 'http://www.ebi.ac.uk/Tools/services/rest/kalign'
# Set interval for checking status
checkInterval = 10
# Output level
outputLevel = 1
# Debug level
debugLevel = 0
# Number of option arguments.
numOpts = len(sys.argv)
# Usage message
usage = "Usage: %prog [options...] [seqFile]"
description = """Kalign is a fast and accurate multiple sequence alignment algorithm."""
epilog = """For further information about the Kalign (REST) web service, see http://www.ebi.ac.uk/Tools/webservices/services/msa/kalign_rest."""
version = "$Id: kalign_urllib2.py 2809 2017-02-10 16:10:25Z afoix $"
# Process command-line options
parser = OptionParser(usage=usage, description=description, epilog=epilog, version=version)
# Tool specific options
parser.add_option('--stype', help='Sequence type: DNA or protein')
parser.add_option('--format', help='output format')
parser.add_option('--gapopen', help='Gap creation penalty')
parser.add_option('--gapext', help='Gap extension penalty')
parser.add_option('--termgap', help='Terminal gap penalty')
parser.add_option('--bonus', help='Bonus score')
parser.add_option('--sequence', help='Input sequences/alignment')
# General options
parser.add_option('--email', help='e-mail address')
parser.add_option('--title', help='job title')
parser.add_option('--outfile', help='file name for results')
parser.add_option('--outformat', help='output format for results')
parser.add_option('--jobid', help='job identifier')
parser.add_option('--async', action='store_true', help='asynchronous mode')
parser.add_option('--polljob', action="store_true", help='get job result')
parser.add_option('--resultTypes', action='store_true', help='get result types')
parser.add_option('--status', action="store_true", help='get job status')
parser.add_option('--params', action='store_true', help='list input parameters')
parser.add_option('--paramDetail', help='get details for parameter')
parser.add_option('--quiet', action='store_true', help='decrease output level')
parser.add_option('--verbose', action='store_true', help='increase output level')
parser.add_option('--baseURL', default=baseUrl, help='Base URL for service')
parser.add_option('--debugLevel', type='int', default=debugLevel, help='debug output level')
(options, args) = parser.parse_args()
# Increase output level
if options.verbose:
outputLevel += 1
# Decrease output level
if options.quiet:
outputLevel -= 1
# Debug level
if options.debugLevel:
debugLevel = options.debugLevel
# Debug print
def printDebugMessage(functionName, message, level):
if (level <= debugLevel):
print >> sys.stderr, '[' + functionName + '] ' + message
# User-agent for request (see RFC2616).
def getUserAgent():
printDebugMessage('getUserAgent', 'Begin', 11)
# Agent string for urllib2 library.
urllib_agent = 'Python-urllib/%s' % urllib2.__version__
clientRevision = '$Revision: 2809 $'
clientVersion = '0'
if len(clientRevision) > 11:
clientVersion = clientRevision[11:-2]
# Prepend client specific agent string.
user_agent = 'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % (
clientVersion, os.path.basename(__file__),
platform.python_version(), platform.system(),
urllib_agent
)
printDebugMessage('getUserAgent', 'user_agent: ' + user_agent, 12)
printDebugMessage('getUserAgent', 'End', 11)
return user_agent
# Wrapper for a REST (HTTP GET) request
def restRequest(url):
printDebugMessage('restRequest', 'Begin', 11)
printDebugMessage('restRequest', 'url: ' + url, 11)
# Errors are indicated by HTTP status codes.
try:
# Set the User-agent.
user_agent = getUserAgent()
http_headers = {'User-Agent': user_agent}
req = urllib2.Request(url, None, http_headers)
# Make the request (HTTP GET).
reqH = urllib2.urlopen(req)
result = reqH.read()
reqH.close()
# Errors are indicated by HTTP status codes.
except urllib2.HTTPError, ex:
# Trap exception and output the document to get error message.
print >> sys.stderr, ex.read()
raise
printDebugMessage('restRequest', 'End', 11)
return result
# Get input parameters list
def serviceGetParameters():
printDebugMessage('serviceGetParameters', 'Begin', 1)
requestUrl = baseUrl + '/parameters'
printDebugMessage('serviceGetParameters', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetParameters', 'End', 1)
return doc['id':]
# Print list of parameters
def printGetParameters():
printDebugMessage('printGetParameters', 'Begin', 1)
idList = serviceGetParameters()
for id in idList:
print id
printDebugMessage('printGetParameters', 'End', 1)
# Get input parameter information
def serviceGetParameterDetails(paramName):
printDebugMessage('serviceGetParameterDetails', 'Begin', 1)
printDebugMessage('serviceGetParameterDetails', 'paramName: ' + paramName, 2)
requestUrl = baseUrl + '/parameterdetails/' + paramName
printDebugMessage('serviceGetParameterDetails', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetParameterDetails', 'End', 1)
return doc
# Print description of a parameter
def printGetParameterDetails(paramName):
printDebugMessage('printGetParameterDetails', 'Begin', 1)
doc = serviceGetParameterDetails(paramName)
print str(doc.name) + "\t" + str(doc.type)
print doc.description
for value in doc.values:
print value.value,
if str(value.defaultValue) == 'true':
print 'default',
print
print "\t" + str(value.label)
if (hasattr(value, 'properties')):
for wsProperty in value.properties:
print "\t" + str(wsProperty.key) + "\t" + str(wsProperty.value)
# print doc
printDebugMessage('printGetParameterDetails', 'End', 1)
# Submit job
def serviceRun(email, title, params):
printDebugMessage('serviceRun', 'Begin', 1)
# Insert e-mail and title into params
params['email'] = email
if title:
params['title'] = title
requestUrl = baseUrl + '/run/'
printDebugMessage('serviceRun', 'requestUrl: ' + requestUrl, 2)
# Signature methods requires special handling (list)
applData = ''
if 'appl' in params:
# So extract from params
applList = params['appl']
del params['appl']
# Build the method data options
for appl in applList:
applData += '&appl=' + appl
# Get the data for the other options
requestData = urllib.urlencode(params)
# Concatenate the two parts.
requestData += applData
printDebugMessage('serviceRun', 'requestData: ' + requestData, 2)
# Errors are indicated by HTTP status codes.
try:
# Set the HTTP User-agent.
user_agent = getUserAgent()
http_headers = {'User-Agent': user_agent}
req = urllib2.Request(requestUrl, None, http_headers)
# Make the submission (HTTP POST).
reqH = urllib2.urlopen(req, requestData)
jobId = reqH.read()
reqH.close()
except urllib2.HTTPError, ex:
# Trap exception and output the document to get error message.
print >> sys.stderr, ex.read()
raise
printDebugMessage('serviceRun', 'jobId: ' + jobId, 2)
printDebugMessage('serviceRun', 'End', 1)
return jobId
# Get job status
def serviceGetStatus(jobId):
printDebugMessage('serviceGetStatus', 'Begin', 1)
printDebugMessage('serviceGetStatus', 'jobId: ' + jobId, 2)
requestUrl = baseUrl + '/status/' + jobId
printDebugMessage('serviceGetStatus', 'requestUrl: ' + requestUrl, 2)
status = restRequest(requestUrl)
printDebugMessage('serviceGetStatus', 'status: ' + status, 2)
printDebugMessage('serviceGetStatus', 'End', 1)
return status
# Print the status of a job
def printGetStatus(jobId):
printDebugMessage('printGetStatus', 'Begin', 1)
status = serviceGetStatus(jobId)
print status
printDebugMessage('printGetStatus', 'End', 1)
# Get available result types for job
def serviceGetResultTypes(jobId):
printDebugMessage('serviceGetResultTypes', 'Begin', 1)
printDebugMessage('serviceGetResultTypes', 'jobId: ' + jobId, 2)
requestUrl = baseUrl + '/resulttypes/' + jobId
printDebugMessage('serviceGetResultTypes', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetResultTypes', 'End', 1)
return doc['type':]
# Print list of available result types for a job.
def printGetResultTypes(jobId):
printDebugMessage('printGetResultTypes', 'Begin', 1)
resultTypeList = serviceGetResultTypes(jobId)
for resultType in resultTypeList:
print resultType['identifier']
if (hasattr(resultType, 'label')):
print "\t", resultType['label']
if (hasattr(resultType, 'description')):
print "\t", resultType['description']
if (hasattr(resultType, 'mediaType')):
print "\t", resultType['mediaType']
if (hasattr(resultType, 'fileSuffix')):
print "\t", resultType['fileSuffix']
printDebugMessage('printGetResultTypes', 'End', 1)
# Get result
def serviceGetResult(jobId, type_):
printDebugMessage('serviceGetResult', 'Begin', 1)
printDebugMessage('serviceGetResult', 'jobId: ' + jobId, 2)
printDebugMessage('serviceGetResult', 'type_: ' + type_, 2)
requestUrl = baseUrl + '/result/' + jobId + '/' + type_
result = restRequest(requestUrl)
printDebugMessage('serviceGetResult', 'End', 1)
return result
# Client-side poll
def clientPoll(jobId):
printDebugMessage('clientPoll', 'Begin', 1)
result = 'PENDING'
while result == 'RUNNING' or result == 'PENDING':
result = serviceGetStatus(jobId)
print >> sys.stderr, result
if result == 'RUNNING' or result == 'PENDING':
time.sleep(checkInterval)
printDebugMessage('clientPoll', 'End', 1)
# Get result for a jobid
def getResult(jobId):
printDebugMessage('getResult', 'Begin', 1)
printDebugMessage('getResult', 'jobId: ' + jobId, 1)
# Check status and wait if necessary
clientPoll(jobId)
# Get available result types
resultTypes = serviceGetResultTypes(jobId)
for resultType in resultTypes:
# Derive the filename for the result
if options.outfile:
filename = options.outfile + '.' + str(resultType['identifier']) + '.' + str(resultType['fileSuffix'])
else:
filename = jobId + '.' + str(resultType['identifier']) + '.' + str(resultType['fileSuffix'])
# Write a result file
if not options.outformat or options.outformat == str(resultType['identifier']):
# Get the result
result = serviceGetResult(jobId, str(resultType['identifier']))
fh = open(filename, 'w');
fh.write(result)
fh.close()
print filename
printDebugMessage('getResult', 'End', 1)
# Read a file
def readFile(filename):
printDebugMessage('readFile', 'Begin', 1)
fh = open(filename, 'r')
data = fh.read()
fh.close()
printDebugMessage('readFile', 'End', 1)
return data
# No options... print help.
if numOpts < 2:
parser.print_help()
# List parameters
elif options.params:
printGetParameters()
# Get parameter details
elif options.paramDetail:
printGetParameterDetails(options.paramDetail)
# Submit job
elif options.email and not options.jobid:
params = {}
if len(args) > 0:
if os.access(args[0], os.R_OK): # Read file into content
params['sequence'] = readFile(args[0])
else: # Argument is a sequence id
params['sequence'] = args[0]
elif options.sequence: # Specified via option
if os.access(options.sequence, os.R_OK): # Read file into content
params['sequence'] = readFile(options.sequence)
else: # Argument is a sequence id
params['sequence'] = options.sequence
# Add the other options (if defined)
if options.stype:
params['stype'] = options.stype
elif options.format:
params['format'] = options.format
elif options.gapopen:
params['gapopen'] = options.gapopen
elif options.gapext:
params['gapext'] = options.gapext
elif options.termgap:
params['termgap'] = options.termgap
elif options.bonus:
params['bonus'] = options.bonus
# Submit the job
jobid = serviceRun(options.email, options.title, params)
if options.async: # Async mode
print jobid
else: # Sync mode
print >> sys.stderr, jobid
time.sleep(5)
getResult(jobid)
# Get job status
elif options.status and options.jobid:
printGetStatus(options.jobid)
# List result types for job
elif options.resultTypes and options.jobid:
printGetResultTypes(options.jobid)
# Get results for job
elif options.polljob and options.jobid:
getResult(options.jobid)
else:
print >> sys.stderr, 'Error: unrecognised argument combination'
parser.print_help()
| 36.57606
| 144
| 0.670348
|
import platform, os, re, sys, time, urllib, urllib2
from xmltramp2 import xmltramp
from optparse import OptionParser
baseUrl = 'http://www.ebi.ac.uk/Tools/services/rest/kalign'
checkInterval = 10
outputLevel = 1
debugLevel = 0
numOpts = len(sys.argv)
usage = "Usage: %prog [options...] [seqFile]"
description = """Kalign is a fast and accurate multiple sequence alignment algorithm."""
epilog = """For further information about the Kalign (REST) web service, see http://www.ebi.ac.uk/Tools/webservices/services/msa/kalign_rest."""
version = "$Id: kalign_urllib2.py 2809 2017-02-10 16:10:25Z afoix $"
parser = OptionParser(usage=usage, description=description, epilog=epilog, version=version)
parser.add_option('--stype', help='Sequence type: DNA or protein')
parser.add_option('--format', help='output format')
parser.add_option('--gapopen', help='Gap creation penalty')
parser.add_option('--gapext', help='Gap extension penalty')
parser.add_option('--termgap', help='Terminal gap penalty')
parser.add_option('--bonus', help='Bonus score')
parser.add_option('--sequence', help='Input sequences/alignment')
parser.add_option('--email', help='e-mail address')
parser.add_option('--title', help='job title')
parser.add_option('--outfile', help='file name for results')
parser.add_option('--outformat', help='output format for results')
parser.add_option('--jobid', help='job identifier')
parser.add_option('--async', action='store_true', help='asynchronous mode')
parser.add_option('--polljob', action="store_true", help='get job result')
parser.add_option('--resultTypes', action='store_true', help='get result types')
parser.add_option('--status', action="store_true", help='get job status')
parser.add_option('--params', action='store_true', help='list input parameters')
parser.add_option('--paramDetail', help='get details for parameter')
parser.add_option('--quiet', action='store_true', help='decrease output level')
parser.add_option('--verbose', action='store_true', help='increase output level')
parser.add_option('--baseURL', default=baseUrl, help='Base URL for service')
parser.add_option('--debugLevel', type='int', default=debugLevel, help='debug output level')
(options, args) = parser.parse_args()
if options.verbose:
outputLevel += 1
if options.quiet:
outputLevel -= 1
if options.debugLevel:
debugLevel = options.debugLevel
def printDebugMessage(functionName, message, level):
if (level <= debugLevel):
print >> sys.stderr, '[' + functionName + '] ' + message
def getUserAgent():
printDebugMessage('getUserAgent', 'Begin', 11)
urllib_agent = 'Python-urllib/%s' % urllib2.__version__
clientRevision = '$Revision: 2809 $'
clientVersion = '0'
if len(clientRevision) > 11:
clientVersion = clientRevision[11:-2]
user_agent = 'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % (
clientVersion, os.path.basename(__file__),
platform.python_version(), platform.system(),
urllib_agent
)
printDebugMessage('getUserAgent', 'user_agent: ' + user_agent, 12)
printDebugMessage('getUserAgent', 'End', 11)
return user_agent
def restRequest(url):
printDebugMessage('restRequest', 'Begin', 11)
printDebugMessage('restRequest', 'url: ' + url, 11)
try:
user_agent = getUserAgent()
http_headers = {'User-Agent': user_agent}
req = urllib2.Request(url, None, http_headers)
reqH = urllib2.urlopen(req)
result = reqH.read()
reqH.close()
except urllib2.HTTPError, ex:
print >> sys.stderr, ex.read()
raise
printDebugMessage('restRequest', 'End', 11)
return result
def serviceGetParameters():
printDebugMessage('serviceGetParameters', 'Begin', 1)
requestUrl = baseUrl + '/parameters'
printDebugMessage('serviceGetParameters', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetParameters', 'End', 1)
return doc['id':]
def printGetParameters():
printDebugMessage('printGetParameters', 'Begin', 1)
idList = serviceGetParameters()
for id in idList:
print id
printDebugMessage('printGetParameters', 'End', 1)
def serviceGetParameterDetails(paramName):
printDebugMessage('serviceGetParameterDetails', 'Begin', 1)
printDebugMessage('serviceGetParameterDetails', 'paramName: ' + paramName, 2)
requestUrl = baseUrl + '/parameterdetails/' + paramName
printDebugMessage('serviceGetParameterDetails', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetParameterDetails', 'End', 1)
return doc
def printGetParameterDetails(paramName):
printDebugMessage('printGetParameterDetails', 'Begin', 1)
doc = serviceGetParameterDetails(paramName)
print str(doc.name) + "\t" + str(doc.type)
print doc.description
for value in doc.values:
print value.value,
if str(value.defaultValue) == 'true':
print 'default',
print
print "\t" + str(value.label)
if (hasattr(value, 'properties')):
for wsProperty in value.properties:
print "\t" + str(wsProperty.key) + "\t" + str(wsProperty.value)
printDebugMessage('printGetParameterDetails', 'End', 1)
def serviceRun(email, title, params):
printDebugMessage('serviceRun', 'Begin', 1)
params['email'] = email
if title:
params['title'] = title
requestUrl = baseUrl + '/run/'
printDebugMessage('serviceRun', 'requestUrl: ' + requestUrl, 2)
applData = ''
if 'appl' in params:
applList = params['appl']
del params['appl']
for appl in applList:
applData += '&appl=' + appl
requestData = urllib.urlencode(params)
requestData += applData
printDebugMessage('serviceRun', 'requestData: ' + requestData, 2)
try:
user_agent = getUserAgent()
http_headers = {'User-Agent': user_agent}
req = urllib2.Request(requestUrl, None, http_headers)
reqH = urllib2.urlopen(req, requestData)
jobId = reqH.read()
reqH.close()
except urllib2.HTTPError, ex:
print >> sys.stderr, ex.read()
raise
printDebugMessage('serviceRun', 'jobId: ' + jobId, 2)
printDebugMessage('serviceRun', 'End', 1)
return jobId
def serviceGetStatus(jobId):
printDebugMessage('serviceGetStatus', 'Begin', 1)
printDebugMessage('serviceGetStatus', 'jobId: ' + jobId, 2)
requestUrl = baseUrl + '/status/' + jobId
printDebugMessage('serviceGetStatus', 'requestUrl: ' + requestUrl, 2)
status = restRequest(requestUrl)
printDebugMessage('serviceGetStatus', 'status: ' + status, 2)
printDebugMessage('serviceGetStatus', 'End', 1)
return status
def printGetStatus(jobId):
printDebugMessage('printGetStatus', 'Begin', 1)
status = serviceGetStatus(jobId)
print status
printDebugMessage('printGetStatus', 'End', 1)
def serviceGetResultTypes(jobId):
printDebugMessage('serviceGetResultTypes', 'Begin', 1)
printDebugMessage('serviceGetResultTypes', 'jobId: ' + jobId, 2)
requestUrl = baseUrl + '/resulttypes/' + jobId
printDebugMessage('serviceGetResultTypes', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetResultTypes', 'End', 1)
return doc['type':]
def printGetResultTypes(jobId):
printDebugMessage('printGetResultTypes', 'Begin', 1)
resultTypeList = serviceGetResultTypes(jobId)
for resultType in resultTypeList:
print resultType['identifier']
if (hasattr(resultType, 'label')):
print "\t", resultType['label']
if (hasattr(resultType, 'description')):
print "\t", resultType['description']
if (hasattr(resultType, 'mediaType')):
print "\t", resultType['mediaType']
if (hasattr(resultType, 'fileSuffix')):
print "\t", resultType['fileSuffix']
printDebugMessage('printGetResultTypes', 'End', 1)
def serviceGetResult(jobId, type_):
printDebugMessage('serviceGetResult', 'Begin', 1)
printDebugMessage('serviceGetResult', 'jobId: ' + jobId, 2)
printDebugMessage('serviceGetResult', 'type_: ' + type_, 2)
requestUrl = baseUrl + '/result/' + jobId + '/' + type_
result = restRequest(requestUrl)
printDebugMessage('serviceGetResult', 'End', 1)
return result
def clientPoll(jobId):
printDebugMessage('clientPoll', 'Begin', 1)
result = 'PENDING'
while result == 'RUNNING' or result == 'PENDING':
result = serviceGetStatus(jobId)
print >> sys.stderr, result
if result == 'RUNNING' or result == 'PENDING':
time.sleep(checkInterval)
printDebugMessage('clientPoll', 'End', 1)
def getResult(jobId):
printDebugMessage('getResult', 'Begin', 1)
printDebugMessage('getResult', 'jobId: ' + jobId, 1)
clientPoll(jobId)
resultTypes = serviceGetResultTypes(jobId)
for resultType in resultTypes:
if options.outfile:
filename = options.outfile + '.' + str(resultType['identifier']) + '.' + str(resultType['fileSuffix'])
else:
filename = jobId + '.' + str(resultType['identifier']) + '.' + str(resultType['fileSuffix'])
if not options.outformat or options.outformat == str(resultType['identifier']):
result = serviceGetResult(jobId, str(resultType['identifier']))
fh = open(filename, 'w');
fh.write(result)
fh.close()
print filename
printDebugMessage('getResult', 'End', 1)
def readFile(filename):
printDebugMessage('readFile', 'Begin', 1)
fh = open(filename, 'r')
data = fh.read()
fh.close()
printDebugMessage('readFile', 'End', 1)
return data
if numOpts < 2:
parser.print_help()
elif options.params:
printGetParameters()
elif options.paramDetail:
printGetParameterDetails(options.paramDetail)
elif options.email and not options.jobid:
params = {}
if len(args) > 0:
if os.access(args[0], os.R_OK):
params['sequence'] = readFile(args[0])
else:
params['sequence'] = args[0]
elif options.sequence:
if os.access(options.sequence, os.R_OK):
params['sequence'] = readFile(options.sequence)
else:
params['sequence'] = options.sequence
if options.stype:
params['stype'] = options.stype
elif options.format:
params['format'] = options.format
elif options.gapopen:
params['gapopen'] = options.gapopen
elif options.gapext:
params['gapext'] = options.gapext
elif options.termgap:
params['termgap'] = options.termgap
elif options.bonus:
params['bonus'] = options.bonus
jobid = serviceRun(options.email, options.title, params)
if options.async:
print jobid
else:
print >> sys.stderr, jobid
time.sleep(5)
getResult(jobid)
elif options.status and options.jobid:
printGetStatus(options.jobid)
elif options.resultTypes and options.jobid:
printGetResultTypes(options.jobid)
elif options.polljob and options.jobid:
getResult(options.jobid)
else:
print >> sys.stderr, 'Error: unrecognised argument combination'
parser.print_help()
| false
| true
|
f714342388aea63bff603443250cc030b85ccfb7
| 7,152
|
py
|
Python
|
specklepy/api/resources/branch.py
|
jsdbroughton/specklepy
|
81a98ea938106001abae308e3cfe04a2c588f06a
|
[
"Apache-2.0"
] | null | null | null |
specklepy/api/resources/branch.py
|
jsdbroughton/specklepy
|
81a98ea938106001abae308e3cfe04a2c588f06a
|
[
"Apache-2.0"
] | null | null | null |
specklepy/api/resources/branch.py
|
jsdbroughton/specklepy
|
81a98ea938106001abae308e3cfe04a2c588f06a
|
[
"Apache-2.0"
] | null | null | null |
from gql import gql
from specklepy.api.resource import ResourceBase
from specklepy.api.models import Branch
from specklepy.logging import metrics
NAME = "branch"
METHODS = ["create"]
class Resource(ResourceBase):
"""API Access class for branches"""
def __init__(self, account, basepath, client) -> None:
super().__init__(
account=account,
basepath=basepath,
client=client,
name=NAME,
methods=METHODS,
)
self.schema = Branch
def create(
self, stream_id: str, name: str, description: str = "No description provided"
) -> str:
"""Create a new branch on this stream
Arguments:
name {str} -- the name of the new branch
description {str} -- a short description of the branch
Returns:
id {str} -- the newly created branch's id
"""
metrics.track(metrics.BRANCH, self.account, {"name": "create"})
query = gql(
"""
mutation BranchCreate($branch: BranchCreateInput!) {
branchCreate(branch: $branch)
}
"""
)
params = {
"branch": {
"streamId": stream_id,
"name": name,
"description": description,
}
}
return self.make_request(
query=query, params=params, return_type="branchCreate", parse_response=False
)
def get(self, stream_id: str, name: str, commits_limit: int = 10):
"""Get a branch by name from a stream
Arguments:
stream_id {str} -- the id of the stream to get the branch from
name {str} -- the name of the branch to get
commits_limit {int} -- maximum number of commits to get
Returns:
Branch -- the fetched branch with its latest commits
"""
metrics.track(metrics.BRANCH, self.account, {"name": "get"})
query = gql(
"""
query BranchGet($stream_id: String!, $name: String!, $commits_limit: Int!) {
stream(id: $stream_id) {
branch(name: $name) {
id,
name,
description,
commits (limit: $commits_limit) {
totalCount,
cursor,
items {
id,
referencedObject,
sourceApplication,
totalChildrenCount,
message,
authorName,
authorId,
branchName,
parents,
createdAt
}
}
}
}
}
"""
)
params = {"stream_id": stream_id, "name": name, "commits_limit": commits_limit}
return self.make_request(
query=query, params=params, return_type=["stream", "branch"]
)
def list(self, stream_id: str, branches_limit: int = 10, commits_limit: int = 10):
"""Get a list of branches from a given stream
Arguments:
stream_id {str} -- the id of the stream to get the branches from
branches_limit {int} -- maximum number of branches to get
commits_limit {int} -- maximum number of commits to get
Returns:
List[Branch] -- the branches on the stream
"""
metrics.track(metrics.BRANCH, self.account, {"name": "get"})
query = gql(
"""
query BranchesGet($stream_id: String!, $branches_limit: Int!, $commits_limit: Int!) {
stream(id: $stream_id) {
branches(limit: $branches_limit) {
items {
id
name
description
commits(limit: $commits_limit) {
totalCount
items{
id
message
referencedObject
sourceApplication
parents
authorId
authorName
branchName
createdAt
}
}
}
}
}
}
"""
)
params = {
"stream_id": stream_id,
"branches_limit": branches_limit,
"commits_limit": commits_limit,
}
return self.make_request(
query=query, params=params, return_type=["stream", "branches", "items"]
)
def update(
self, stream_id: str, branch_id: str, name: str = None, description: str = None
):
"""Update a branch
Arguments:
stream_id {str} -- the id of the stream containing the branch to update
branch_id {str} -- the id of the branch to update
name {str} -- optional: the updated branch name
description {str} -- optional: the updated branch description
Returns:
bool -- True if update is successfull
"""
metrics.track(metrics.BRANCH, self.account, {"name": "update"})
query = gql(
"""
mutation BranchUpdate($branch: BranchUpdateInput!) {
branchUpdate(branch: $branch)
}
"""
)
params = {
"branch": {
"streamId": stream_id,
"id": branch_id,
}
}
if name:
params["branch"]["name"] = name
if description:
params["branch"]["description"] = description
return self.make_request(
query=query, params=params, return_type="branchUpdate", parse_response=False
)
def delete(self, stream_id: str, branch_id: str):
"""Delete a branch
Arguments:
stream_id {str} -- the id of the stream containing the branch to delete
branch_id {str} -- the branch to delete
Returns:
bool -- True if deletion is successful
"""
metrics.track(metrics.BRANCH, self.account, {"name": "delete"})
query = gql(
"""
mutation BranchDelete($branch: BranchDeleteInput!) {
branchDelete(branch: $branch)
}
"""
)
params = {"branch": {"streamId": stream_id, "id": branch_id}}
return self.make_request(
query=query, params=params, return_type="branchDelete", parse_response=False
)
| 32.958525
| 97
| 0.457634
|
from gql import gql
from specklepy.api.resource import ResourceBase
from specklepy.api.models import Branch
from specklepy.logging import metrics
NAME = "branch"
METHODS = ["create"]
class Resource(ResourceBase):
def __init__(self, account, basepath, client) -> None:
super().__init__(
account=account,
basepath=basepath,
client=client,
name=NAME,
methods=METHODS,
)
self.schema = Branch
def create(
self, stream_id: str, name: str, description: str = "No description provided"
) -> str:
metrics.track(metrics.BRANCH, self.account, {"name": "create"})
query = gql(
"""
mutation BranchCreate($branch: BranchCreateInput!) {
branchCreate(branch: $branch)
}
"""
)
params = {
"branch": {
"streamId": stream_id,
"name": name,
"description": description,
}
}
return self.make_request(
query=query, params=params, return_type="branchCreate", parse_response=False
)
def get(self, stream_id: str, name: str, commits_limit: int = 10):
metrics.track(metrics.BRANCH, self.account, {"name": "get"})
query = gql(
"""
query BranchGet($stream_id: String!, $name: String!, $commits_limit: Int!) {
stream(id: $stream_id) {
branch(name: $name) {
id,
name,
description,
commits (limit: $commits_limit) {
totalCount,
cursor,
items {
id,
referencedObject,
sourceApplication,
totalChildrenCount,
message,
authorName,
authorId,
branchName,
parents,
createdAt
}
}
}
}
}
"""
)
params = {"stream_id": stream_id, "name": name, "commits_limit": commits_limit}
return self.make_request(
query=query, params=params, return_type=["stream", "branch"]
)
def list(self, stream_id: str, branches_limit: int = 10, commits_limit: int = 10):
metrics.track(metrics.BRANCH, self.account, {"name": "get"})
query = gql(
"""
query BranchesGet($stream_id: String!, $branches_limit: Int!, $commits_limit: Int!) {
stream(id: $stream_id) {
branches(limit: $branches_limit) {
items {
id
name
description
commits(limit: $commits_limit) {
totalCount
items{
id
message
referencedObject
sourceApplication
parents
authorId
authorName
branchName
createdAt
}
}
}
}
}
}
"""
)
params = {
"stream_id": stream_id,
"branches_limit": branches_limit,
"commits_limit": commits_limit,
}
return self.make_request(
query=query, params=params, return_type=["stream", "branches", "items"]
)
def update(
self, stream_id: str, branch_id: str, name: str = None, description: str = None
):
metrics.track(metrics.BRANCH, self.account, {"name": "update"})
query = gql(
"""
mutation BranchUpdate($branch: BranchUpdateInput!) {
branchUpdate(branch: $branch)
}
"""
)
params = {
"branch": {
"streamId": stream_id,
"id": branch_id,
}
}
if name:
params["branch"]["name"] = name
if description:
params["branch"]["description"] = description
return self.make_request(
query=query, params=params, return_type="branchUpdate", parse_response=False
)
def delete(self, stream_id: str, branch_id: str):
metrics.track(metrics.BRANCH, self.account, {"name": "delete"})
query = gql(
"""
mutation BranchDelete($branch: BranchDeleteInput!) {
branchDelete(branch: $branch)
}
"""
)
params = {"branch": {"streamId": stream_id, "id": branch_id}}
return self.make_request(
query=query, params=params, return_type="branchDelete", parse_response=False
)
| true
| true
|
f71434b3c8211cc2ab644b5205326ec0c652e164
| 5,009
|
py
|
Python
|
cnn/model_search.py
|
badrutdinovrr/darts
|
434708e63cbda8f710d3c1810d06ad31c11db923
|
[
"Apache-2.0"
] | null | null | null |
cnn/model_search.py
|
badrutdinovrr/darts
|
434708e63cbda8f710d3c1810d06ad31c11db923
|
[
"Apache-2.0"
] | null | null | null |
cnn/model_search.py
|
badrutdinovrr/darts
|
434708e63cbda8f710d3c1810d06ad31c11db923
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from operations import *
from torch.autograd import Variable
from genotypes import PRIMITIVES
from genotypes import Genotype
class MixedOp(nn.Module):
def __init__(self, C, stride):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
for primitive in PRIMITIVES:
op = OPS[primitive](C, stride, False)
if 'pool' in primitive:
op = nn.Sequential(op, nn.BatchNorm2d(C, affine=False))
self._ops.append(op)
def forward(self, x, weights):
return sum(w * op(x) for w, op in zip(weights, self._ops))
class Cell(nn.Module):
def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
self.reduction = reduction
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=False)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)
self._steps = steps
self._multiplier = multiplier
self._ops = nn.ModuleList()
self._bns = nn.ModuleList()
for i in range(self._steps):
for j in range(2+i):
stride = 2 if reduction and j < 2 else 1
op = MixedOp(C, stride)
self._ops.append(op)
def forward(self, s0, s1, weights):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
offset = 0
for i in range(self._steps):
s = sum(self._ops[offset+j](h, weights[offset+j]) for j, h in enumerate(states))
offset += len(states)
states.append(s)
return torch.cat(states[-self._multiplier:], dim=1)
class Network(nn.Module):
def __init__(self, C, num_classes, layers, criterion, steps=4, multiplier=4, stem_multiplier=3):
super(Network, self).__init__()
self._C = C
self._num_classes = num_classes
self._layers = layers
self._criterion = criterion
self._steps = steps
self._multiplier = multiplier
C_curr = stem_multiplier*C
self.stem = nn.Sequential(
nn.Conv2d(1, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers//3, 2*layers//3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, multiplier*C_curr
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self._initialize_alphas()
def new(self):
model_new = Network(self._C, self._num_classes, self._layers, self._criterion).cuda()
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def forward(self, input):
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
if cell.reduction:
weights = F.softmax(self.alphas_reduce, dim=-1)
else:
weights = F.softmax(self.alphas_normal, dim=-1)
s0, s1 = s1, cell(s0, s1, weights)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0),-1))
return logits
def _loss(self, input, target):
logits = self(input)
return self._criterion(logits, target)
def _initialize_alphas(self):
k = sum(1 for i in range(self._steps) for n in range(2+i))
num_ops = len(PRIMITIVES)
self.alphas_normal = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=True)
self.alphas_reduce = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=True)
self._arch_parameters = [
self.alphas_normal,
self.alphas_reduce,
]
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
def _parse(weights):
gene = []
n = 2
start = 0
for i in range(self._steps):
end = start + n
W = weights[start:end].copy()
edges = sorted(range(i + 2), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[:2]
for j in edges:
k_best = None
for k in range(len(W[j])):
if k != PRIMITIVES.index('none'):
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
gene.append((PRIMITIVES[k_best], j))
start = end
n += 1
return gene
gene_normal = _parse(F.softmax(self.alphas_normal, dim=-1).data.cpu().numpy())
gene_reduce = _parse(F.softmax(self.alphas_reduce, dim=-1).data.cpu().numpy())
concat = range(2+self._steps-self._multiplier, self._steps+2)
genotype = Genotype(
normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat
)
return genotype
| 30.542683
| 128
| 0.643242
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from operations import *
from torch.autograd import Variable
from genotypes import PRIMITIVES
from genotypes import Genotype
class MixedOp(nn.Module):
def __init__(self, C, stride):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
for primitive in PRIMITIVES:
op = OPS[primitive](C, stride, False)
if 'pool' in primitive:
op = nn.Sequential(op, nn.BatchNorm2d(C, affine=False))
self._ops.append(op)
def forward(self, x, weights):
return sum(w * op(x) for w, op in zip(weights, self._ops))
class Cell(nn.Module):
def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
self.reduction = reduction
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=False)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)
self._steps = steps
self._multiplier = multiplier
self._ops = nn.ModuleList()
self._bns = nn.ModuleList()
for i in range(self._steps):
for j in range(2+i):
stride = 2 if reduction and j < 2 else 1
op = MixedOp(C, stride)
self._ops.append(op)
def forward(self, s0, s1, weights):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
offset = 0
for i in range(self._steps):
s = sum(self._ops[offset+j](h, weights[offset+j]) for j, h in enumerate(states))
offset += len(states)
states.append(s)
return torch.cat(states[-self._multiplier:], dim=1)
class Network(nn.Module):
def __init__(self, C, num_classes, layers, criterion, steps=4, multiplier=4, stem_multiplier=3):
super(Network, self).__init__()
self._C = C
self._num_classes = num_classes
self._layers = layers
self._criterion = criterion
self._steps = steps
self._multiplier = multiplier
C_curr = stem_multiplier*C
self.stem = nn.Sequential(
nn.Conv2d(1, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers//3, 2*layers//3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, multiplier*C_curr
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self._initialize_alphas()
def new(self):
model_new = Network(self._C, self._num_classes, self._layers, self._criterion).cuda()
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def forward(self, input):
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
if cell.reduction:
weights = F.softmax(self.alphas_reduce, dim=-1)
else:
weights = F.softmax(self.alphas_normal, dim=-1)
s0, s1 = s1, cell(s0, s1, weights)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0),-1))
return logits
def _loss(self, input, target):
logits = self(input)
return self._criterion(logits, target)
def _initialize_alphas(self):
k = sum(1 for i in range(self._steps) for n in range(2+i))
num_ops = len(PRIMITIVES)
self.alphas_normal = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=True)
self.alphas_reduce = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=True)
self._arch_parameters = [
self.alphas_normal,
self.alphas_reduce,
]
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
def _parse(weights):
gene = []
n = 2
start = 0
for i in range(self._steps):
end = start + n
W = weights[start:end].copy()
edges = sorted(range(i + 2), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[:2]
for j in edges:
k_best = None
for k in range(len(W[j])):
if k != PRIMITIVES.index('none'):
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
gene.append((PRIMITIVES[k_best], j))
start = end
n += 1
return gene
gene_normal = _parse(F.softmax(self.alphas_normal, dim=-1).data.cpu().numpy())
gene_reduce = _parse(F.softmax(self.alphas_reduce, dim=-1).data.cpu().numpy())
concat = range(2+self._steps-self._multiplier, self._steps+2)
genotype = Genotype(
normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat
)
return genotype
| true
| true
|
f71434c24f3b7959298b19af49f4893c651e600c
| 2,465
|
py
|
Python
|
credoscript/adaptors/variationadaptor.py
|
tlb-lab/credoscript
|
32bdf08d84703dc2062dae4df1a95587d36c3cf7
|
[
"MIT"
] | null | null | null |
credoscript/adaptors/variationadaptor.py
|
tlb-lab/credoscript
|
32bdf08d84703dc2062dae4df1a95587d36c3cf7
|
[
"MIT"
] | null | null | null |
credoscript/adaptors/variationadaptor.py
|
tlb-lab/credoscript
|
32bdf08d84703dc2062dae4df1a95587d36c3cf7
|
[
"MIT"
] | null | null | null |
from sqlalchemy.sql.expression import and_
from credoscript.mixins.base import paginate
class VariationAdaptor(object):
"""
"""
def __init__(self, dynamic=False, paginate=False, per_page=100):
self.query = Variation.query
self.dynamic = dynamic
self.paginate = paginate
self.per_page = per_page
def fetch_by_variation_id(self, variation_id):
"""
"""
return self.query.get(variation_id)
def fetch_by_variation_name(self, variation_name):
"""
"""
return self.query.filter_by(variation_name=variation_name).first()
@paginate
def fetch_all_by_res_map_id(self, res_map_id, *expr, **kwargs):
"""
"""
query = self.query.join('Variation2PDB')
query = query.filter(Variation2PDB.res_map_id==res_map_id)
return query
@paginate
def fetch_all_by_chain_id(self, chain_id, *expr, **kwargs):
"""
"""
query = self.query.join('Variation2PDB')
query = query.join(Peptide, Peptide.res_map_id==Variation2PDB.res_map_id)
query = query.filter(and_(Peptide.chain_id==chain_id, *expr))
return query
@paginate
def fetch_all_ext_by_chain_id(self, chain_id, *expr, **kwargs):
"""
"""
query = self.query.join('Variation2UniProt','Variation2PDB','Peptide')
query = query.filter(and_(Peptide.chain_id==chain_id, *expr))
query = query.add_entity(Variation2UniProt)
query = query.add_entity(Peptide)
return query
@paginate
def fetch_all_by_phenotype_id(self, phenotype_id, *expr, **kwargs):
"""
"""
query = self.query.join('Annotations')
query = query.filter(and_(Annotation.phenotype_id==phenotype_id, *expr))
query = query.distinct()
return query
@paginate
def fetch_all_in_contact_with_ligand_id(self, ligand_id, *expr, **kwargs):
"""
Returns all variations that can be mapped onto binding sites defined by
the ligand having the input ligand identifier.
"""
query = self.query.join('Variation2BindingSites')
query = query.filter(and_(Variation2BindingSite.ligand_id==ligand_id,
*expr))
return query.distinct()
from ..models.variation import Variation, Annotation, Variation2UniProt, Variation2PDB, Variation2BindingSite
from ..models.peptide import Peptide
| 31.602564
| 109
| 0.643813
|
from sqlalchemy.sql.expression import and_
from credoscript.mixins.base import paginate
class VariationAdaptor(object):
def __init__(self, dynamic=False, paginate=False, per_page=100):
self.query = Variation.query
self.dynamic = dynamic
self.paginate = paginate
self.per_page = per_page
def fetch_by_variation_id(self, variation_id):
return self.query.get(variation_id)
def fetch_by_variation_name(self, variation_name):
return self.query.filter_by(variation_name=variation_name).first()
@paginate
def fetch_all_by_res_map_id(self, res_map_id, *expr, **kwargs):
query = self.query.join('Variation2PDB')
query = query.filter(Variation2PDB.res_map_id==res_map_id)
return query
@paginate
def fetch_all_by_chain_id(self, chain_id, *expr, **kwargs):
query = self.query.join('Variation2PDB')
query = query.join(Peptide, Peptide.res_map_id==Variation2PDB.res_map_id)
query = query.filter(and_(Peptide.chain_id==chain_id, *expr))
return query
@paginate
def fetch_all_ext_by_chain_id(self, chain_id, *expr, **kwargs):
query = self.query.join('Variation2UniProt','Variation2PDB','Peptide')
query = query.filter(and_(Peptide.chain_id==chain_id, *expr))
query = query.add_entity(Variation2UniProt)
query = query.add_entity(Peptide)
return query
@paginate
def fetch_all_by_phenotype_id(self, phenotype_id, *expr, **kwargs):
query = self.query.join('Annotations')
query = query.filter(and_(Annotation.phenotype_id==phenotype_id, *expr))
query = query.distinct()
return query
@paginate
def fetch_all_in_contact_with_ligand_id(self, ligand_id, *expr, **kwargs):
query = self.query.join('Variation2BindingSites')
query = query.filter(and_(Variation2BindingSite.ligand_id==ligand_id,
*expr))
return query.distinct()
from ..models.variation import Variation, Annotation, Variation2UniProt, Variation2PDB, Variation2BindingSite
from ..models.peptide import Peptide
| true
| true
|
f71435aefbab60525e1f6180d047b1c4a343f58a
| 957
|
py
|
Python
|
test/test_basic_software_asset_all_of.py
|
cons3rt/cons3rt-python-sdk
|
f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0
|
[
"RSA-MD"
] | null | null | null |
test/test_basic_software_asset_all_of.py
|
cons3rt/cons3rt-python-sdk
|
f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0
|
[
"RSA-MD"
] | null | null | null |
test/test_basic_software_asset_all_of.py
|
cons3rt/cons3rt-python-sdk
|
f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0
|
[
"RSA-MD"
] | null | null | null |
# coding: utf-8
"""
CONS3RT Web API
A CONS3RT ReSTful API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: apiteam@swagger.io
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.basic_software_asset_all_of import BasicSoftwareAssetAllOf # noqa: E501
from openapi_client.rest import ApiException
class TestBasicSoftwareAssetAllOf(unittest.TestCase):
"""BasicSoftwareAssetAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBasicSoftwareAssetAllOf(self):
"""Test BasicSoftwareAssetAllOf"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.basic_software_asset_all_of.BasicSoftwareAssetAllOf() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.341463
| 107
| 0.726228
|
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.basic_software_asset_all_of import BasicSoftwareAssetAllOf
from openapi_client.rest import ApiException
class TestBasicSoftwareAssetAllOf(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testBasicSoftwareAssetAllOf(self):
s
if __name__ == '__main__':
unittest.main()
| true
| true
|
f7143656ce4da10df1aaa3d84302fc6d8f3085ff
| 4,728
|
py
|
Python
|
tests/integration_tests/build/test_coverage.py
|
Mehigh17/firecracker
|
78c6b29f14f9e810c7426d935b5c4fbdfdfc4119
|
[
"Apache-2.0"
] | null | null | null |
tests/integration_tests/build/test_coverage.py
|
Mehigh17/firecracker
|
78c6b29f14f9e810c7426d935b5c4fbdfdfc4119
|
[
"Apache-2.0"
] | null | null | null |
tests/integration_tests/build/test_coverage.py
|
Mehigh17/firecracker
|
78c6b29f14f9e810c7426d935b5c4fbdfdfc4119
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests pertaining to line/branch test coverage for the Firecracker code base.
# TODO
- Put the coverage in `s3://spec.firecracker` and update it automatically.
target should be put in `s3://spec.firecracker` and automatically updated.
"""
import os
import platform
import re
import pytest
import framework.utils as utils
import host_tools.cargo_build as host # pylint: disable=import-error
COVERAGE_TARGET_PCT = 84.53
COVERAGE_MAX_DELTA = 0.05
CARGO_KCOV_REL_PATH = os.path.join(host.CARGO_BUILD_REL_PATH, 'kcov')
KCOV_COVERAGE_FILE = 'index.js'
"""kcov will aggregate coverage data in this file."""
KCOV_COVERED_LINES_REGEX = r'"covered_lines":"(\d+)"'
"""Regex for extracting number of total covered lines found by kcov."""
KCOV_TOTAL_LINES_REGEX = r'"total_lines" : "(\d+)"'
"""Regex for extracting number of total executable lines found by kcov."""
@pytest.mark.timeout(120)
@pytest.mark.skipif(
platform.machine() != "x86_64",
reason="no need to test it on multiple platforms"
)
def test_ensure_mod_tests():
"""Check that files containing unit tests have a 'tests' module defined."""
# List all source files containing rust #[test] attribute,
# (excluding generated files and integration test directories).
# Take the list and check each file contains 'mod tests {', output file
# name if it doesn't.
cmd = (
'/bin/bash '
'-c '
'"grep '
'--files-without-match '
'\'mod tests {\' '
'\\$(grep '
'--files-with-matches '
'--recursive '
'--exclude-dir=src/*_gen/* '
'\'\\#\\[test\\]\' ../src/*/src)" '
)
# The outer grep returns 0 even if it finds files without the match, so we
# ignore the return code.
result = utils.run_cmd(cmd, no_shell=False, ignore_return_code=True)
error_msg = (
'Tests found in files without a "tests" module:\n {}'
'To ensure code coverage is reported correctly, please check that '
'your tests are in a module named "tests".'.format(result.stdout)
)
assert not result.stdout, error_msg
@pytest.mark.timeout(400)
@pytest.mark.skipif(
platform.machine() != "x86_64",
reason="kcov hangs on aarch64"
)
def test_coverage(test_session_root_path, test_session_tmp_path):
"""Test line coverage with kcov.
The result is extracted from the $KCOV_COVERAGE_FILE file created by kcov
after a coverage run.
"""
exclude_pattern = (
'${CARGO_HOME:-$HOME/.cargo/},'
'build/,'
'tests/,'
'usr/lib/gcc,'
'lib/x86_64-linux-gnu/,'
# The following files/directories are auto-generated
'bootparam.rs,'
'elf.rs,'
'mpspec.rs,'
'msr_index.rs,'
'_gen'
)
exclude_region = '\'mod tests {\''
cmd = (
'CARGO_TARGET_DIR={} cargo kcov --all '
'--output {} -- '
'--exclude-pattern={} '
'--exclude-region={} --verify'
).format(
os.path.join(test_session_root_path, CARGO_KCOV_REL_PATH),
test_session_tmp_path,
exclude_pattern,
exclude_region
)
# By default, `cargo kcov` passes `--exclude-pattern=$CARGO_HOME --verify`
# to kcov. To pass others arguments, we need to include the defaults.
utils.run_cmd(cmd)
coverage_file = os.path.join(test_session_tmp_path, KCOV_COVERAGE_FILE)
with open(coverage_file) as cov_output:
contents = cov_output.read()
covered_lines = int(re.findall(KCOV_COVERED_LINES_REGEX, contents)[0])
total_lines = int(re.findall(KCOV_TOTAL_LINES_REGEX, contents)[0])
coverage = covered_lines / total_lines * 100
print("Number of executable lines: {}".format(total_lines))
print("Number of covered lines: {}".format(covered_lines))
print("Thus, coverage is: {:.2f}%".format(coverage))
coverage_low_msg = (
'Current code coverage ({:.2f}%) is below the target ({}%).'
.format(coverage, COVERAGE_TARGET_PCT)
)
min_coverage = COVERAGE_TARGET_PCT - COVERAGE_MAX_DELTA
assert coverage >= min_coverage, coverage_low_msg
# Get the name of the variable that needs updating.
namespace = globals()
cov_target_name = [name for name in namespace if namespace[name]
is COVERAGE_TARGET_PCT][0]
coverage_high_msg = (
'Current code coverage ({:.2f}%) is above the target ({}%).\n'
'Please update the value of {}.'
.format(coverage, COVERAGE_TARGET_PCT, cov_target_name)
)
assert coverage - COVERAGE_TARGET_PCT <= COVERAGE_MAX_DELTA,\
coverage_high_msg
| 32.833333
| 79
| 0.655245
|
import os
import platform
import re
import pytest
import framework.utils as utils
import host_tools.cargo_build as host
COVERAGE_TARGET_PCT = 84.53
COVERAGE_MAX_DELTA = 0.05
CARGO_KCOV_REL_PATH = os.path.join(host.CARGO_BUILD_REL_PATH, 'kcov')
KCOV_COVERAGE_FILE = 'index.js'
KCOV_COVERED_LINES_REGEX = r'"covered_lines":"(\d+)"'
KCOV_TOTAL_LINES_REGEX = r'"total_lines" : "(\d+)"'
@pytest.mark.timeout(120)
@pytest.mark.skipif(
platform.machine() != "x86_64",
reason="no need to test it on multiple platforms"
)
def test_ensure_mod_tests():
cmd = (
'/bin/bash '
'-c '
'"grep '
'--files-without-match '
'\'mod tests {\' '
'\\$(grep '
'--files-with-matches '
'--recursive '
'--exclude-dir=src/*_gen/* '
'\'\\#\\[test\\]\' ../src/*/src)" '
)
# The outer grep returns 0 even if it finds files without the match, so we
# ignore the return code.
result = utils.run_cmd(cmd, no_shell=False, ignore_return_code=True)
error_msg = (
'Tests found in files without a "tests" module:\n {}'
'To ensure code coverage is reported correctly, please check that '
'your tests are in a module named "tests".'.format(result.stdout)
)
assert not result.stdout, error_msg
@pytest.mark.timeout(400)
@pytest.mark.skipif(
platform.machine() != "x86_64",
reason="kcov hangs on aarch64"
)
def test_coverage(test_session_root_path, test_session_tmp_path):
exclude_pattern = (
'${CARGO_HOME:-$HOME/.cargo/},'
'build/,'
'tests/,'
'usr/lib/gcc,'
'lib/x86_64-linux-gnu/,'
# The following files/directories are auto-generated
'bootparam.rs,'
'elf.rs,'
'mpspec.rs,'
'msr_index.rs,'
'_gen'
)
exclude_region = '\'mod tests {\''
cmd = (
'CARGO_TARGET_DIR={} cargo kcov --all '
'--output {} -- '
'--exclude-pattern={} '
'--exclude-region={} --verify'
).format(
os.path.join(test_session_root_path, CARGO_KCOV_REL_PATH),
test_session_tmp_path,
exclude_pattern,
exclude_region
)
# By default, `cargo kcov` passes `--exclude-pattern=$CARGO_HOME --verify`
# to kcov. To pass others arguments, we need to include the defaults.
utils.run_cmd(cmd)
coverage_file = os.path.join(test_session_tmp_path, KCOV_COVERAGE_FILE)
with open(coverage_file) as cov_output:
contents = cov_output.read()
covered_lines = int(re.findall(KCOV_COVERED_LINES_REGEX, contents)[0])
total_lines = int(re.findall(KCOV_TOTAL_LINES_REGEX, contents)[0])
coverage = covered_lines / total_lines * 100
print("Number of executable lines: {}".format(total_lines))
print("Number of covered lines: {}".format(covered_lines))
print("Thus, coverage is: {:.2f}%".format(coverage))
coverage_low_msg = (
'Current code coverage ({:.2f}%) is below the target ({}%).'
.format(coverage, COVERAGE_TARGET_PCT)
)
min_coverage = COVERAGE_TARGET_PCT - COVERAGE_MAX_DELTA
assert coverage >= min_coverage, coverage_low_msg
# Get the name of the variable that needs updating.
namespace = globals()
cov_target_name = [name for name in namespace if namespace[name]
is COVERAGE_TARGET_PCT][0]
coverage_high_msg = (
'Current code coverage ({:.2f}%) is above the target ({}%).\n'
'Please update the value of {}.'
.format(coverage, COVERAGE_TARGET_PCT, cov_target_name)
)
assert coverage - COVERAGE_TARGET_PCT <= COVERAGE_MAX_DELTA,\
coverage_high_msg
| true
| true
|
f71436d10cc2c701fbdd2731e650a7b4d07afd22
| 6,393
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/networkrepository/cfat5005.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/networkrepository/cfat5005.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/networkrepository/cfat5005.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph c-fat500-5.
The graph is automatically retrieved from the NetworkRepository repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 22:54:22.066913
The undirected graph c-fat500-5 has 500 nodes and 23191 unweighted edges,
of which none are self-loops. The graph is quite dense as it has a density
of 0.18590 and is connected, as it has a single component. The graph median
node degree is 92, the mean node degree is 92.76 and the node degree mode
is 92. The top 5 most central nodes are 499 (degree 95), 498 (degree 95),
483 (degree 95), 482 (degree 95) and 467 (degree 95).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
@misc{dimacs,
author={{DIMACS}},
title={DIMACS Challenge},
note={http://dimacs.rutgers.edu/Challenges/}}
@article{rossi2014coloring,
title={Coloring Large Complex Networks},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle={Social Network Analysis and Mining},
pages={1--51},
year={2014}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import CFat5005
# Then load the graph
graph = CFat5005()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def CFat5005(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/networkrepository",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the c-fat500-5 graph.
The graph is automatically retrieved from the NetworkRepository repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of c-fat500-5 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 22:54:22.066913
The undirected graph c-fat500-5 has 500 nodes and 23191 unweighted edges,
of which none are self-loops. The graph is quite dense as it has a density
of 0.18590 and is connected, as it has a single component. The graph median
node degree is 92, the mean node degree is 92.76 and the node degree mode
is 92. The top 5 most central nodes are 499 (degree 95), 498 (degree 95),
483 (degree 95), 482 (degree 95) and 467 (degree 95).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
@misc{dimacs,
author={{DIMACS}},
title={DIMACS Challenge},
note={http://dimacs.rutgers.edu/Challenges/}}
@article{rossi2014coloring,
title={Coloring Large Complex Networks},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle={Social Network Analysis and Mining},
pages={1--51},
year={2014}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import CFat5005
# Then load the graph
graph = CFat5005()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="CFat5005",
dataset="networkrepository",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 31.185366
| 94
| 0.672141
|
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph
def CFat5005(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/networkrepository",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
return AutomaticallyRetrievedGraph(
graph_name="CFat5005",
dataset="networkrepository",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true
| true
|
f714374a1632476acaefbc832c81cdaf88352611
| 337
|
py
|
Python
|
app.py
|
munrojm/api
|
478eb7b7d65ee72c65c9c3a61aec02aed7aa5ffe
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
app.py
|
munrojm/api
|
478eb7b7d65ee72c65c9c3a61aec02aed7aa5ffe
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
app.py
|
munrojm/api
|
478eb7b7d65ee72c65c9c3a61aec02aed7aa5ffe
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
import os
from monty.serialization import loadfn
from fastapi import FastAPI
import mp_api.xas.api
xas_store = os.environ.get("XAS_STORE", "xas_store.json")
xas_store = loadfn(xas_store)
xas_router = mp_api.xas.api.get_router(xas_store)
app = FastAPI(title="Materials Project API", version="3.0.0-dev")
app.include_router(xas_router)
| 25.923077
| 65
| 0.789318
|
import os
from monty.serialization import loadfn
from fastapi import FastAPI
import mp_api.xas.api
xas_store = os.environ.get("XAS_STORE", "xas_store.json")
xas_store = loadfn(xas_store)
xas_router = mp_api.xas.api.get_router(xas_store)
app = FastAPI(title="Materials Project API", version="3.0.0-dev")
app.include_router(xas_router)
| true
| true
|
f7143796d07cbc04a71f65fa0722e8c7ea8bdc9a
| 1,080
|
py
|
Python
|
ros/src/tl_detector/light_classification/tl_classifier.py
|
andrewmegaris/sdc_capstoner
|
e37393c93a9b01d1682a5e214acb8ad12417e6e2
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/light_classification/tl_classifier.py
|
andrewmegaris/sdc_capstoner
|
e37393c93a9b01d1682a5e214acb8ad12417e6e2
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/light_classification/tl_classifier.py
|
andrewmegaris/sdc_capstoner
|
e37393c93a9b01d1682a5e214acb8ad12417e6e2
|
[
"MIT"
] | null | null | null |
from styx_msgs.msg import TrafficLight
import cv2
from keras.models import load_model
from numpy import newaxis
import numpy as np
import tensorflow as tf
import os
class TLClassifier(object):
def __init__(self):
path = os.getcwd()
self.model = load_model(path + '/light_classification/model.h5')
self.model._make_predict_function()
self.graph = tf.get_default_graph()
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
image = cv2.resize(image, (400, 400))
image = image.astype(float)
image = image / 255.0
image = image[newaxis,:,:,:]
with self.graph.as_default():
predictions = self.model.predict(image)
classification = np.argmax(predictions, axis=1)
if(classification[0] == 1):
return TrafficLight.RED
return TrafficLight.UNKNOWN
| 30
| 80
| 0.661111
|
from styx_msgs.msg import TrafficLight
import cv2
from keras.models import load_model
from numpy import newaxis
import numpy as np
import tensorflow as tf
import os
class TLClassifier(object):
def __init__(self):
path = os.getcwd()
self.model = load_model(path + '/light_classification/model.h5')
self.model._make_predict_function()
self.graph = tf.get_default_graph()
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
image = cv2.resize(image, (400, 400))
image = image.astype(float)
image = image / 255.0
image = image[newaxis,:,:,:]
with self.graph.as_default():
predictions = self.model.predict(image)
classification = np.argmax(predictions, axis=1)
if(classification[0] == 1):
return TrafficLight.RED
return TrafficLight.UNKNOWN
| false
| true
|
f714379ee3973b8021d36894d60ed8cb48ed5454
| 246
|
py
|
Python
|
exercicios/Lista4/Q14.py
|
AlexandrePeBrito/CursoUdemyPython
|
3de58cb30c9f333b32078309847179ff3f9d7e22
|
[
"MIT"
] | null | null | null |
exercicios/Lista4/Q14.py
|
AlexandrePeBrito/CursoUdemyPython
|
3de58cb30c9f333b32078309847179ff3f9d7e22
|
[
"MIT"
] | null | null | null |
exercicios/Lista4/Q14.py
|
AlexandrePeBrito/CursoUdemyPython
|
3de58cb30c9f333b32078309847179ff3f9d7e22
|
[
"MIT"
] | null | null | null |
#Faça um programa que leia um vetor de 10 posições e verifique
#se existem valores iguais e os escreva na tela.
vetor=[]
for c in range(0,10):
n=int(input("Informe um numero: "))
if n in vetor:
print(f"{n}")
vetor.append(n)
| 24.6
| 62
| 0.650407
|
vetor=[]
for c in range(0,10):
n=int(input("Informe um numero: "))
if n in vetor:
print(f"{n}")
vetor.append(n)
| true
| true
|
f71438eae2367cd2d781df2131122da34442181b
| 27,609
|
py
|
Python
|
nova/tests/unit/virt/test_block_device.py
|
gabriel-samfira/nova
|
5ef07cc04dbf0216452ae358e57d9ddac51f1803
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/virt/test_block_device.py
|
gabriel-samfira/nova
|
5ef07cc04dbf0216452ae358e57d9ddac51f1803
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/virt/test_block_device.py
|
gabriel-samfira/nova
|
5ef07cc04dbf0216452ae358e57d9ddac51f1803
|
[
"Apache-2.0"
] | null | null | null |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from oslo.serialization import jsonutils
from nova import block_device
from nova import context
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import matchers
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.volume import cinder
from nova.volume import encryptors
class TestDriverBlockDevice(test.NoDBTestCase):
driver_classes = {
'swap': driver_block_device.DriverSwapBlockDevice,
'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
'volume': driver_block_device.DriverVolumeBlockDevice,
'snapshot': driver_block_device.DriverSnapshotBlockDevice,
'image': driver_block_device.DriverImageBlockDevice,
'blank': driver_block_device.DriverBlankBlockDevice
}
swap_bdm = block_device.BlockDeviceDict(
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'disk_bus': 'scsi',
'volume_size': 2,
'boot_index': -1})
swap_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2,
'disk_bus': 'scsi'}
swap_legacy_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2}
ephemeral_bdm = block_device.BlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
'boot_index': -1})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
'disk_bus': 'scsi'}
ephemeral_legacy_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'virtual_name': 'ephemeral0',
'num': 0}
volume_bdm = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda1',
'source_type': 'volume',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 8,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'guest_format': 'ext4',
'connection_info': '{"fake": "connection_info"}',
'delete_on_termination': False,
'boot_index': 0})
volume_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': 'ext4',
'boot_index': 0}
volume_legacy_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False}
snapshot_bdm = block_device.BlockDeviceDict(
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
snapshot_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
snapshot_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
image_bdm = block_device.BlockDeviceDict(
{'id': 5, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 1,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'image_id': 'fake-image-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
image_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
image_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
blank_bdm = block_device.BlockDeviceDict(
{'id': 6, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'blank',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
blank_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
blank_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = self.mox.CreateMock(cinder.API)
self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
self.context = context.RequestContext('fake_user',
'fake_project')
def test_no_device_raises(self):
for name, cls in self.driver_classes.items():
self.assertRaises(driver_block_device._NotTransformable,
cls, {'no_device': True})
def _test_driver_device(self, name):
db_bdm = getattr(self, "%s_bdm" % name)
test_bdm = self.driver_classes[name](db_bdm)
self.assertThat(test_bdm, matchers.DictMatches(
getattr(self, "%s_driver_bdm" % name)))
for k, v in db_bdm.iteritems():
field_val = getattr(test_bdm._bdm_obj, k)
if isinstance(field_val, bool):
v = bool(v)
self.assertEqual(field_val, v)
self.assertThat(test_bdm.legacy(),
matchers.DictMatches(
getattr(self, "%s_legacy_driver_bdm" % name)))
# Test passthru attributes
for passthru in test_bdm._proxy_as_attr:
self.assertEqual(getattr(test_bdm, passthru),
getattr(test_bdm._bdm_obj, passthru))
# Make sure that all others raise _invalidType
for other_name, cls in self.driver_classes.iteritems():
if other_name == name:
continue
self.assertRaises(driver_block_device._InvalidType,
cls,
getattr(self, '%s_bdm' % name))
# Test the save method
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
test_bdm.save(self.context)
for fld, alias in test_bdm._update_on_save.iteritems():
self.assertEqual(test_bdm[alias or fld],
getattr(test_bdm._bdm_obj, fld))
save_mock.assert_called_once_with(self.context)
# Test the save method with no context passed
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
test_bdm.save()
save_mock.assert_called_once_with()
def _test_driver_default_size(self, name):
size = 'swap_size' if name == 'swap' else 'size'
no_size_bdm = getattr(self, "%s_bdm" % name).copy()
no_size_bdm['volume_size'] = None
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
del no_size_bdm['volume_size']
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
def test_driver_swap_block_device(self):
self._test_driver_device("swap")
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
def test_driver_ephemeral_default_size(self):
self._test_driver_default_size('ephemeral')
def test_driver_volume_block_device(self):
self._test_driver_device("volume")
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
self.assertEqual(test_bdm['connection_info'],
jsonutils.loads(test_bdm._bdm_obj.connection_info))
self.assertEqual(test_bdm._bdm_obj.id, 3)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
self.assertEqual(test_bdm.volume_size, 8)
def test_driver_snapshot_block_device(self):
self._test_driver_device("snapshot")
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 4)
self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
self.assertEqual(test_bdm.volume_size, 3)
def test_driver_image_block_device(self):
self._test_driver_device('image')
test_bdm = self.driver_classes['image'](
self.image_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 5)
self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
self.assertEqual(test_bdm.volume_size, 1)
def test_driver_image_block_device_destination_local(self):
self._test_driver_device('image')
bdm = self.image_bdm.copy()
bdm['destination_type'] = 'local'
self.assertRaises(driver_block_device._InvalidType,
self.driver_classes['image'], bdm)
def test_driver_blank_block_device(self):
self._test_driver_device('blank')
test_bdm = self.driver_classes['blank'](
self.blank_bdm)
self.assertEqual(6, test_bdm._bdm_obj.id)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
self.assertEqual(3, test_bdm.volume_size)
def _test_volume_attach(self, driver_bdm, bdm_dict,
fake_volume, check_attach=True,
fail_check_attach=False, driver_attach=False,
fail_driver_attach=False, volume_attach=True,
access_mode='rw'):
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save')
self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata')
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'access_mode': access_mode}}
expected_conn_info = {'data': {'access_mode': access_mode},
'serial': fake_volume['id']}
enc_data = {'fake': 'enc_data'}
self.volume_api.get(self.context,
fake_volume['id']).AndReturn(fake_volume)
if check_attach:
if not fail_check_attach:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndReturn(None)
else:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndRaise(
test.TestingException)
return instance, expected_conn_info
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(connection_info)
if driver_attach:
encryptors.get_encryption_metadata(
elevated_context, self.volume_api, fake_volume['id'],
connection_info).AndReturn(enc_data)
if not fail_driver_attach:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndReturn(None)
else:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndRaise(test.TestingException)
self.volume_api.terminate_connection(
elevated_context, fake_volume['id'],
expected_conn_info).AndReturn(None)
return instance, expected_conn_info
if volume_attach:
self.volume_api.attach(elevated_context, fake_volume['id'],
'fake_uuid', bdm_dict['device_name'],
mode=access_mode).AndReturn(None)
driver_bdm._bdm_obj.save(self.context).AndReturn(None)
return instance, expected_conn_info
def test_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_ro(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, access_mode='ro')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def check_volume_attach_check_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.asserRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver)
def test_volume_no_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=False)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=False)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_no_check_driver_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=True)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=True)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def check_volume_attach_driver_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.asserRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=True)
def test_refresh_connection(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'multipath_id': 'fake_multipath_id'}}
expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'},
'serial': 'fake-volume-id-2'}
self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
self.context, test_bdm.volume_id,
connector).AndReturn(connection_info)
test_bdm._bdm_obj.save(self.context).AndReturn(None)
self.mox.ReplayAll()
test_bdm.refresh_connection_info(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_snapshot_attach_no_volume(self):
no_volume_snapshot = self.snapshot_bdm.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['snapshot'](no_volume_snapshot)
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3,
'', '', snapshot).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_snapshot, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume(self):
no_volume_image = self.image_bdm.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['image'](no_volume_image)
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.create(self.context, 1,
'', '', image_id=image['id']).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_image, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_volume(self):
test_bdm = self.driver_classes['image'](
self.image_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_blank_attach_volume(self):
no_blank_volume = self.blank_bdm.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['blank'](no_blank_volume)
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': 'fake-uuid'})
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': 'fake-uuid-blank-vol'}
with contextlib.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(volume_class, 'attach')
) as (vol_create, vol_attach):
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
vol_create.assert_called_once_with(self.context,
test_bdm.volume_size,
'fake-uuid-blank-vol',
'')
vol_attach.assert_called_once_with(self.context, instance,
self.volume_api,
self.virt_driver,
do_check_attach=True)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_convert_block_devices(self):
converted = driver_block_device._convert_block_devices(
self.driver_classes['volume'],
[self.volume_bdm, self.ephemeral_bdm])
self.assertEqual(converted, [self.volume_driver_bdm])
def test_legacy_block_devices(self):
test_snapshot = self.driver_classes['snapshot'](
self.snapshot_bdm)
block_device_mapping = [test_snapshot, test_snapshot]
legacy_bdm = driver_block_device.legacy_block_devices(
block_device_mapping)
self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm,
self.snapshot_legacy_driver_bdm])
# Test that the ephemerals work as expected
test_ephemerals = [self.driver_classes['ephemeral'](
self.ephemeral_bdm) for _ in xrange(2)]
expected = [self.ephemeral_legacy_driver_bdm.copy()
for _ in xrange(2)]
expected[0]['virtual_name'] = 'ephemeral0'
expected[0]['num'] = 0
expected[1]['virtual_name'] = 'ephemeral1'
expected[1]['num'] = 1
legacy_ephemerals = driver_block_device.legacy_block_devices(
test_ephemerals)
self.assertEqual(expected, legacy_ephemerals)
def test_get_swap(self):
swap = [self.swap_driver_bdm]
legacy_swap = [self.swap_legacy_driver_bdm]
no_swap = [self.volume_driver_bdm]
self.assertEqual(swap[0], driver_block_device.get_swap(swap))
self.assertEqual(legacy_swap[0],
driver_block_device.get_swap(legacy_swap))
self.assertIsNone(driver_block_device.get_swap(no_swap))
self.assertIsNone(driver_block_device.get_swap([]))
def test_is_implemented(self):
for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm,
self.ephemeral_bdm, self.snapshot_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
local_image = self.image_bdm.copy()
local_image['destination_type'] = 'local'
self.assertFalse(driver_block_device.is_implemented(local_image))
def test_is_block_device_mapping(self):
test_swap = self.driver_classes['swap'](self.swap_bdm)
test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm)
test_image = self.driver_classes['image'](self.image_bdm)
test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm)
test_volume = self.driver_classes['volume'](self.volume_bdm)
test_blank = self.driver_classes['blank'](self.blank_bdm)
for bdm in (test_image, test_snapshot, test_volume, test_blank):
self.assertTrue(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
for bdm in (test_swap, test_ephemeral):
self.assertFalse(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
| 40.305109
| 78
| 0.604477
|
import contextlib
import mock
from oslo.serialization import jsonutils
from nova import block_device
from nova import context
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import matchers
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.volume import cinder
from nova.volume import encryptors
class TestDriverBlockDevice(test.NoDBTestCase):
driver_classes = {
'swap': driver_block_device.DriverSwapBlockDevice,
'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
'volume': driver_block_device.DriverVolumeBlockDevice,
'snapshot': driver_block_device.DriverSnapshotBlockDevice,
'image': driver_block_device.DriverImageBlockDevice,
'blank': driver_block_device.DriverBlankBlockDevice
}
swap_bdm = block_device.BlockDeviceDict(
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'disk_bus': 'scsi',
'volume_size': 2,
'boot_index': -1})
swap_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2,
'disk_bus': 'scsi'}
swap_legacy_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2}
ephemeral_bdm = block_device.BlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
'boot_index': -1})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
'disk_bus': 'scsi'}
ephemeral_legacy_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'virtual_name': 'ephemeral0',
'num': 0}
volume_bdm = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda1',
'source_type': 'volume',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 8,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'guest_format': 'ext4',
'connection_info': '{"fake": "connection_info"}',
'delete_on_termination': False,
'boot_index': 0})
volume_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': 'ext4',
'boot_index': 0}
volume_legacy_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False}
snapshot_bdm = block_device.BlockDeviceDict(
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
snapshot_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
snapshot_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
image_bdm = block_device.BlockDeviceDict(
{'id': 5, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 1,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'image_id': 'fake-image-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
image_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
image_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
blank_bdm = block_device.BlockDeviceDict(
{'id': 6, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'blank',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
blank_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
blank_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = self.mox.CreateMock(cinder.API)
self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
self.context = context.RequestContext('fake_user',
'fake_project')
def test_no_device_raises(self):
for name, cls in self.driver_classes.items():
self.assertRaises(driver_block_device._NotTransformable,
cls, {'no_device': True})
def _test_driver_device(self, name):
db_bdm = getattr(self, "%s_bdm" % name)
test_bdm = self.driver_classes[name](db_bdm)
self.assertThat(test_bdm, matchers.DictMatches(
getattr(self, "%s_driver_bdm" % name)))
for k, v in db_bdm.iteritems():
field_val = getattr(test_bdm._bdm_obj, k)
if isinstance(field_val, bool):
v = bool(v)
self.assertEqual(field_val, v)
self.assertThat(test_bdm.legacy(),
matchers.DictMatches(
getattr(self, "%s_legacy_driver_bdm" % name)))
for passthru in test_bdm._proxy_as_attr:
self.assertEqual(getattr(test_bdm, passthru),
getattr(test_bdm._bdm_obj, passthru))
for other_name, cls in self.driver_classes.iteritems():
if other_name == name:
continue
self.assertRaises(driver_block_device._InvalidType,
cls,
getattr(self, '%s_bdm' % name))
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
test_bdm.save(self.context)
for fld, alias in test_bdm._update_on_save.iteritems():
self.assertEqual(test_bdm[alias or fld],
getattr(test_bdm._bdm_obj, fld))
save_mock.assert_called_once_with(self.context)
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
test_bdm.save()
save_mock.assert_called_once_with()
def _test_driver_default_size(self, name):
size = 'swap_size' if name == 'swap' else 'size'
no_size_bdm = getattr(self, "%s_bdm" % name).copy()
no_size_bdm['volume_size'] = None
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
del no_size_bdm['volume_size']
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
def test_driver_swap_block_device(self):
self._test_driver_device("swap")
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
def test_driver_ephemeral_default_size(self):
self._test_driver_default_size('ephemeral')
def test_driver_volume_block_device(self):
self._test_driver_device("volume")
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
self.assertEqual(test_bdm['connection_info'],
jsonutils.loads(test_bdm._bdm_obj.connection_info))
self.assertEqual(test_bdm._bdm_obj.id, 3)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
self.assertEqual(test_bdm.volume_size, 8)
def test_driver_snapshot_block_device(self):
self._test_driver_device("snapshot")
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 4)
self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
self.assertEqual(test_bdm.volume_size, 3)
def test_driver_image_block_device(self):
self._test_driver_device('image')
test_bdm = self.driver_classes['image'](
self.image_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 5)
self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
self.assertEqual(test_bdm.volume_size, 1)
def test_driver_image_block_device_destination_local(self):
self._test_driver_device('image')
bdm = self.image_bdm.copy()
bdm['destination_type'] = 'local'
self.assertRaises(driver_block_device._InvalidType,
self.driver_classes['image'], bdm)
def test_driver_blank_block_device(self):
self._test_driver_device('blank')
test_bdm = self.driver_classes['blank'](
self.blank_bdm)
self.assertEqual(6, test_bdm._bdm_obj.id)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
self.assertEqual(3, test_bdm.volume_size)
def _test_volume_attach(self, driver_bdm, bdm_dict,
fake_volume, check_attach=True,
fail_check_attach=False, driver_attach=False,
fail_driver_attach=False, volume_attach=True,
access_mode='rw'):
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save')
self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata')
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'access_mode': access_mode}}
expected_conn_info = {'data': {'access_mode': access_mode},
'serial': fake_volume['id']}
enc_data = {'fake': 'enc_data'}
self.volume_api.get(self.context,
fake_volume['id']).AndReturn(fake_volume)
if check_attach:
if not fail_check_attach:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndReturn(None)
else:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndRaise(
test.TestingException)
return instance, expected_conn_info
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(connection_info)
if driver_attach:
encryptors.get_encryption_metadata(
elevated_context, self.volume_api, fake_volume['id'],
connection_info).AndReturn(enc_data)
if not fail_driver_attach:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndReturn(None)
else:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndRaise(test.TestingException)
self.volume_api.terminate_connection(
elevated_context, fake_volume['id'],
expected_conn_info).AndReturn(None)
return instance, expected_conn_info
if volume_attach:
self.volume_api.attach(elevated_context, fake_volume['id'],
'fake_uuid', bdm_dict['device_name'],
mode=access_mode).AndReturn(None)
driver_bdm._bdm_obj.save(self.context).AndReturn(None)
return instance, expected_conn_info
def test_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_ro(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, access_mode='ro')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def check_volume_attach_check_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.asserRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver)
def test_volume_no_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=False)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=False)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_no_check_driver_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=True)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=True)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def check_volume_attach_driver_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.asserRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=True)
def test_refresh_connection(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'multipath_id': 'fake_multipath_id'}}
expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'},
'serial': 'fake-volume-id-2'}
self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
self.context, test_bdm.volume_id,
connector).AndReturn(connection_info)
test_bdm._bdm_obj.save(self.context).AndReturn(None)
self.mox.ReplayAll()
test_bdm.refresh_connection_info(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_snapshot_attach_no_volume(self):
no_volume_snapshot = self.snapshot_bdm.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['snapshot'](no_volume_snapshot)
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3,
'', '', snapshot).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_snapshot, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume(self):
no_volume_image = self.image_bdm.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['image'](no_volume_image)
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.create(self.context, 1,
'', '', image_id=image['id']).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_image, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_volume(self):
test_bdm = self.driver_classes['image'](
self.image_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_blank_attach_volume(self):
no_blank_volume = self.blank_bdm.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['blank'](no_blank_volume)
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': 'fake-uuid'})
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': 'fake-uuid-blank-vol'}
with contextlib.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(volume_class, 'attach')
) as (vol_create, vol_attach):
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
vol_create.assert_called_once_with(self.context,
test_bdm.volume_size,
'fake-uuid-blank-vol',
'')
vol_attach.assert_called_once_with(self.context, instance,
self.volume_api,
self.virt_driver,
do_check_attach=True)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_convert_block_devices(self):
converted = driver_block_device._convert_block_devices(
self.driver_classes['volume'],
[self.volume_bdm, self.ephemeral_bdm])
self.assertEqual(converted, [self.volume_driver_bdm])
def test_legacy_block_devices(self):
test_snapshot = self.driver_classes['snapshot'](
self.snapshot_bdm)
block_device_mapping = [test_snapshot, test_snapshot]
legacy_bdm = driver_block_device.legacy_block_devices(
block_device_mapping)
self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm,
self.snapshot_legacy_driver_bdm])
test_ephemerals = [self.driver_classes['ephemeral'](
self.ephemeral_bdm) for _ in xrange(2)]
expected = [self.ephemeral_legacy_driver_bdm.copy()
for _ in xrange(2)]
expected[0]['virtual_name'] = 'ephemeral0'
expected[0]['num'] = 0
expected[1]['virtual_name'] = 'ephemeral1'
expected[1]['num'] = 1
legacy_ephemerals = driver_block_device.legacy_block_devices(
test_ephemerals)
self.assertEqual(expected, legacy_ephemerals)
def test_get_swap(self):
swap = [self.swap_driver_bdm]
legacy_swap = [self.swap_legacy_driver_bdm]
no_swap = [self.volume_driver_bdm]
self.assertEqual(swap[0], driver_block_device.get_swap(swap))
self.assertEqual(legacy_swap[0],
driver_block_device.get_swap(legacy_swap))
self.assertIsNone(driver_block_device.get_swap(no_swap))
self.assertIsNone(driver_block_device.get_swap([]))
def test_is_implemented(self):
for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm,
self.ephemeral_bdm, self.snapshot_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
local_image = self.image_bdm.copy()
local_image['destination_type'] = 'local'
self.assertFalse(driver_block_device.is_implemented(local_image))
def test_is_block_device_mapping(self):
test_swap = self.driver_classes['swap'](self.swap_bdm)
test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm)
test_image = self.driver_classes['image'](self.image_bdm)
test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm)
test_volume = self.driver_classes['volume'](self.volume_bdm)
test_blank = self.driver_classes['blank'](self.blank_bdm)
for bdm in (test_image, test_snapshot, test_volume, test_blank):
self.assertTrue(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
for bdm in (test_swap, test_ephemeral):
self.assertFalse(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
| true
| true
|
f71439ce9d32a0f70a4540340143a4985060ff8f
| 8,950
|
py
|
Python
|
algorithm/RL/DDPG.py
|
915288938lx/Personae-master-01
|
0885c37956bd3f9157c66109e09755a51ad5d3a1
|
[
"MIT"
] | null | null | null |
algorithm/RL/DDPG.py
|
915288938lx/Personae-master-01
|
0885c37956bd3f9157c66109e09755a51ad5d3a1
|
[
"MIT"
] | null | null | null |
algorithm/RL/DDPG.py
|
915288938lx/Personae-master-01
|
0885c37956bd3f9157c66109e09755a51ad5d3a1
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import tensorflow as tf
import numpy as np
import os
from algorithm import config
from base.env.market import Market
from checkpoints import CHECKPOINTS_DIR
from base.algorithm.model import BaseRLTFModel
from helper.args_parser import model_launcher_parser
from helper.data_logger import generate_algorithm_logger, generate_market_logger
class Algorithm(BaseRLTFModel):
def __init__(self, session, env, a_space, s_space, **options):
super(Algorithm, self).__init__(session, env, a_space, s_space, **options)
self.actor_loss, self.critic_loss = .0, .0
# Initialize buffer.
self.buffer = np.zeros((self.buffer_size, self.s_space * 2 + 1 + 1))
self.buffer_length = 0
self._init_input()
self._init_nn()
self._init_op()
self._init_saver()
self._init_summary_writer()
def _init_input(self):
self.s = tf.placeholder(tf.float32, [None, self.s_space], 'state')
self.r = tf.placeholder(tf.float32, [None, 1], 'reward')
self.s_next = tf.placeholder(tf.float32, [None, self.s_space], 'state_next')
def _init_nn(self):
# Initialize predict actor and critic.
self.a_predict = self.__build_actor_nn(self.s, "predict/actor", trainable=True)
self.q_predict = self.__build_critic(self.s, self.a_predict, "predict/critic", trainable=True)
# Initialize target actor and critic.
self.a_next = self.__build_actor_nn(self.s_next, "target/actor", trainable=False)
self.q_next = self.__build_critic(self.s_next, self.a_next, "target/critic", trainable=False)
# Save scopes
self.scopes = ["predict/actor", "target/actor", "predict/critic", "target/critic"]
def _init_op(self):
# Get actor and critic parameters.
params = [tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope) for scope in self.scopes]
zipped_a_params, zipped_c_params = zip(params[0], params[1]), zip(params[2], params[3])
# Initialize update actor and critic op.
self.update_a = [tf.assign(t_a, (1 - self.tau) * t_a + self.tau * p_a) for p_a, t_a in zipped_a_params]
self.update_c = [tf.assign(t_c, (1 - self.tau) * t_c + self.tau * p_c) for p_c, t_c in zipped_c_params]
# Initialize actor loss and train op.
with tf.variable_scope('actor_loss'):
self.a_loss = -tf.reduce_mean(self.q_predict)
with tf.variable_scope('actor_train'):
self.a_train_op = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.a_loss, var_list=params[0])
# Initialize critic loss and train op.
self.q_target = self.r + self.gamma * self.q_next
with tf.variable_scope('critic_loss'):
self.c_loss = tf.losses.mean_squared_error(self.q_target, self.q_predict)
with tf.variable_scope('critic_train'):
self.c_train_op = tf.train.RMSPropOptimizer(self.learning_rate * 2).minimize(self.c_loss, var_list=params[2])
# Initialize variables.
self.session.run(tf.global_variables_initializer())
def run(self):
if self.mode != 'train':
self.restore()
else:
for episode in range(self.episodes):
self.log_loss(episode)
s = self.env.reset(self.mode)
while True:
c, a, a_index = self.predict(s)
s_next, r, status, info = self.env.forward(c, a)
self.save_transition(s, a_index, r, s_next)
self.train()
s = s_next
if status == self.env.Done:
self.env.trader.log_asset(episode)
break
if self.enable_saver and episode % 10 == 0:
self.save(episode)
def train(self):
if self.buffer_length < self.buffer_size:
return
self.session.run([self.update_a, self.update_c])
s, a, r, s_next = self.get_transition_batch()
self.critic_loss, _ = self.session.run([self.c_loss, self.c_train_op], {self.s: s, self.a_predict: a, self.r: r, self.s_next: s_next})
self.actor_loss, _ = self.session.run([self.a_loss, self.a_train_op], {self.s: s})
def predict(self, s):
a = self.session.run(self.a_predict, {self.s: s})[0][0]
return self.get_stock_code_and_action(a, use_greedy=True, use_prob=True if self.mode == 'train' else False)
def save_transition(self, s, a, r, s_next):
transition = np.hstack((s, [[a]], [[r]], s_next))
self.buffer[self.buffer_length % self.buffer_size, :] = transition
self.buffer_length += 1
def get_transition_batch(self):
indices = np.random.choice(self.buffer_size, size=self.batch_size)
batch = self.buffer[indices, :]
s = batch[:, :self.s_space]
a = batch[:, self.s_space: self.s_space + 1]
r = batch[:, -self.s_space - 1: -self.s_space]
s_next = batch[:, -self.s_space:]
return s, a, r, s_next
def log_loss(self, episode):
self.logger.warning("Episode: {0} | Actor Loss: {1:.2f} | Critic Loss: {2:.2f}".format(episode,
self.actor_loss,
self.critic_loss))
def __build_actor_nn(self, state, scope, trainable=True):
w_init, b_init = tf.random_normal_initializer(.0, .001), tf.constant_initializer(.1)
with tf.variable_scope(scope):
# state is ? * code_count * data_dim.
first_dense = tf.layers.dense(state,
64,
tf.nn.relu,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
action = tf.layers.dense(first_dense,
1,
tf.nn.sigmoid,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
return tf.multiply(action, self.a_space - 1)
@staticmethod
def __build_critic(state, action, scope, trainable=True):
w_init, b_init = tf.random_normal_initializer(.0, .3), tf.constant_initializer(.1)
with tf.variable_scope(scope):
s_first_dense = tf.layers.dense(state,
32,
tf.nn.relu,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
a_first_dense = tf.layers.dense(action,
32,
tf.nn.relu,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
q_value = tf.layers.dense(tf.nn.relu(s_first_dense + a_first_dense),
1,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
return q_value
def main(args):
mode = args.mode
# mode = 'test'
codes = args.codes
# codes = ["AU88", "RB88", "CU88", "AL88"]
# codes = ["T9999"]
market = args.market
# market = 'future'
episode = args.episode
# episode = 2000
# training_data_ratio = 0.5
training_data_ratio = args.training_data_ratio
model_name = os.path.basename(__file__).split('.')[0]
env = Market(codes, start_date="2012-01-01", end_date="2019-07-19", **{
"market": market,
# "use_sequence": True,
"logger": generate_market_logger(model_name),
"training_data_ratio": training_data_ratio,
})
algorithm = Algorithm(tf.Session(config=config), env, env.trader.action_space, env.data_dim, **{
"mode": mode,
"episodes": episode,
"enable_saver": True,
"learning_rate": 0.003,
"enable_summary_writer": True,
"logger": generate_algorithm_logger(model_name),
"save_path": os.path.join(CHECKPOINTS_DIR, "RL", model_name, market, "model"),
"summary_path": os.path.join(CHECKPOINTS_DIR, "RL", model_name, market, "summary"),
})
algorithm.run()
algorithm.eval()
algorithm.plot()
if __name__ == '__main__':
main(model_launcher_parser.parse_args())
| 42.018779
| 142
| 0.557765
|
import tensorflow as tf
import numpy as np
import os
from algorithm import config
from base.env.market import Market
from checkpoints import CHECKPOINTS_DIR
from base.algorithm.model import BaseRLTFModel
from helper.args_parser import model_launcher_parser
from helper.data_logger import generate_algorithm_logger, generate_market_logger
class Algorithm(BaseRLTFModel):
def __init__(self, session, env, a_space, s_space, **options):
super(Algorithm, self).__init__(session, env, a_space, s_space, **options)
self.actor_loss, self.critic_loss = .0, .0
self.buffer = np.zeros((self.buffer_size, self.s_space * 2 + 1 + 1))
self.buffer_length = 0
self._init_input()
self._init_nn()
self._init_op()
self._init_saver()
self._init_summary_writer()
def _init_input(self):
self.s = tf.placeholder(tf.float32, [None, self.s_space], 'state')
self.r = tf.placeholder(tf.float32, [None, 1], 'reward')
self.s_next = tf.placeholder(tf.float32, [None, self.s_space], 'state_next')
def _init_nn(self):
self.a_predict = self.__build_actor_nn(self.s, "predict/actor", trainable=True)
self.q_predict = self.__build_critic(self.s, self.a_predict, "predict/critic", trainable=True)
self.a_next = self.__build_actor_nn(self.s_next, "target/actor", trainable=False)
self.q_next = self.__build_critic(self.s_next, self.a_next, "target/critic", trainable=False)
self.scopes = ["predict/actor", "target/actor", "predict/critic", "target/critic"]
def _init_op(self):
params = [tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope) for scope in self.scopes]
zipped_a_params, zipped_c_params = zip(params[0], params[1]), zip(params[2], params[3])
self.update_a = [tf.assign(t_a, (1 - self.tau) * t_a + self.tau * p_a) for p_a, t_a in zipped_a_params]
self.update_c = [tf.assign(t_c, (1 - self.tau) * t_c + self.tau * p_c) for p_c, t_c in zipped_c_params]
with tf.variable_scope('actor_loss'):
self.a_loss = -tf.reduce_mean(self.q_predict)
with tf.variable_scope('actor_train'):
self.a_train_op = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.a_loss, var_list=params[0])
self.q_target = self.r + self.gamma * self.q_next
with tf.variable_scope('critic_loss'):
self.c_loss = tf.losses.mean_squared_error(self.q_target, self.q_predict)
with tf.variable_scope('critic_train'):
self.c_train_op = tf.train.RMSPropOptimizer(self.learning_rate * 2).minimize(self.c_loss, var_list=params[2])
self.session.run(tf.global_variables_initializer())
def run(self):
if self.mode != 'train':
self.restore()
else:
for episode in range(self.episodes):
self.log_loss(episode)
s = self.env.reset(self.mode)
while True:
c, a, a_index = self.predict(s)
s_next, r, status, info = self.env.forward(c, a)
self.save_transition(s, a_index, r, s_next)
self.train()
s = s_next
if status == self.env.Done:
self.env.trader.log_asset(episode)
break
if self.enable_saver and episode % 10 == 0:
self.save(episode)
def train(self):
if self.buffer_length < self.buffer_size:
return
self.session.run([self.update_a, self.update_c])
s, a, r, s_next = self.get_transition_batch()
self.critic_loss, _ = self.session.run([self.c_loss, self.c_train_op], {self.s: s, self.a_predict: a, self.r: r, self.s_next: s_next})
self.actor_loss, _ = self.session.run([self.a_loss, self.a_train_op], {self.s: s})
def predict(self, s):
a = self.session.run(self.a_predict, {self.s: s})[0][0]
return self.get_stock_code_and_action(a, use_greedy=True, use_prob=True if self.mode == 'train' else False)
def save_transition(self, s, a, r, s_next):
transition = np.hstack((s, [[a]], [[r]], s_next))
self.buffer[self.buffer_length % self.buffer_size, :] = transition
self.buffer_length += 1
def get_transition_batch(self):
indices = np.random.choice(self.buffer_size, size=self.batch_size)
batch = self.buffer[indices, :]
s = batch[:, :self.s_space]
a = batch[:, self.s_space: self.s_space + 1]
r = batch[:, -self.s_space - 1: -self.s_space]
s_next = batch[:, -self.s_space:]
return s, a, r, s_next
def log_loss(self, episode):
self.logger.warning("Episode: {0} | Actor Loss: {1:.2f} | Critic Loss: {2:.2f}".format(episode,
self.actor_loss,
self.critic_loss))
def __build_actor_nn(self, state, scope, trainable=True):
w_init, b_init = tf.random_normal_initializer(.0, .001), tf.constant_initializer(.1)
with tf.variable_scope(scope):
first_dense = tf.layers.dense(state,
64,
tf.nn.relu,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
action = tf.layers.dense(first_dense,
1,
tf.nn.sigmoid,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
return tf.multiply(action, self.a_space - 1)
@staticmethod
def __build_critic(state, action, scope, trainable=True):
w_init, b_init = tf.random_normal_initializer(.0, .3), tf.constant_initializer(.1)
with tf.variable_scope(scope):
s_first_dense = tf.layers.dense(state,
32,
tf.nn.relu,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
a_first_dense = tf.layers.dense(action,
32,
tf.nn.relu,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
q_value = tf.layers.dense(tf.nn.relu(s_first_dense + a_first_dense),
1,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
return q_value
def main(args):
mode = args.mode
codes = args.codes
market = args.market
episode = args.episode
training_data_ratio = args.training_data_ratio
model_name = os.path.basename(__file__).split('.')[0]
env = Market(codes, start_date="2012-01-01", end_date="2019-07-19", **{
"market": market,
"logger": generate_market_logger(model_name),
"training_data_ratio": training_data_ratio,
})
algorithm = Algorithm(tf.Session(config=config), env, env.trader.action_space, env.data_dim, **{
"mode": mode,
"episodes": episode,
"enable_saver": True,
"learning_rate": 0.003,
"enable_summary_writer": True,
"logger": generate_algorithm_logger(model_name),
"save_path": os.path.join(CHECKPOINTS_DIR, "RL", model_name, market, "model"),
"summary_path": os.path.join(CHECKPOINTS_DIR, "RL", model_name, market, "summary"),
})
algorithm.run()
algorithm.eval()
algorithm.plot()
if __name__ == '__main__':
main(model_launcher_parser.parse_args())
| true
| true
|
f7143a6df31b6e88eabff6f5aaf40943f677d15c
| 6,824
|
py
|
Python
|
pynotify/__init__.py
|
dhgrs/pynotify
|
5bdfb0108466b7779f5bb7643b272c96f05c6f7c
|
[
"MIT"
] | null | null | null |
pynotify/__init__.py
|
dhgrs/pynotify
|
5bdfb0108466b7779f5bb7643b272c96f05c6f7c
|
[
"MIT"
] | null | null | null |
pynotify/__init__.py
|
dhgrs/pynotify
|
5bdfb0108466b7779f5bb7643b272c96f05c6f7c
|
[
"MIT"
] | null | null | null |
import subprocess
class NotificationError(Exception):
pass
class BaseNotification:
def set_typed_variable(self, value, specified_type):
if isinstance(value, specified_type):
return value
else:
raise NotificationError(
'can only set '
f'{specified_type.__name__} '
f'(not "{value.__class__.__name__}")'
)
# Main
def notify(self):
raise NotImplementedError()
class OSSpecificNotification(BaseNotification):
'''
OSSpecificNotification:
OS ごとの通知
'''
def __init__(self):
import platform
self.system = platform.system()
# macOS 用の通知
def darwin_notify(self):
raise NotImplementedError()
# Linux 用の通知
def linux_notify(self):
raise NotImplementedError()
# Windows 用の通知
def windows_notify(self):
raise NotImplementedError()
# 通知の実行
def notify(self):
if self.system == 'Darwin':
self.darwin_notify()
elif self.system == 'Linux':
self.linux_notify()
elif self.system == 'Windows':
self.windows_notify()
else:
NotificationError(f'{self.system} is supported system')
class MessageNotification(BaseNotification):
'''
MessageNotification:
メッセージを打ち込める通知
引数:
message(str): 本文
'''
def __init__(self, message):
self._message = None
self.set_message(message)
# message のプロパティ用
def get_message(self):
return self._message
def set_message(self, message):
self._message = self.set_typed_variable(message, str)
message = property(get_message, set_message)
class WebhookNotification(MessageNotification):
'''
WebhookNotification:
Webhook による通知
引数:
message(str): 本文
url(str): Webhook の URL
'''
def __init__(self, message, url):
super().__init__(message)
self._url = None
self.set_url(url)
# url のプロパティ用
def get_url(self):
return self._url
def set_url(self, url):
self._url = self.set_typed_variable(url, str)
url = property(get_url, set_url)
class TokenNotification(MessageNotification):
'''
TokenNotification:
Token による通知
引数:
message(str): 本文
token(str): トークン
'''
def __init__(self, message, token):
super().__init__(message)
self._token = None
self.set_token(token)
# token のプロパティ用
def get_token(self):
return self._token
def set_token(self, token):
self._token = self.set_typed_variable(token, str)
token = property(get_token, set_token)
class BeepNotification(OSSpecificNotification):
'''
BeepNotification:
ビープ音による通知
引数:
times(int): ビープ音の回数
'''
def __init__(self, times):
super().__init__()
self._times = None
self.set_times(times)
# times のプロパティ用
def get_times(self):
return self._times
def set_times(self, times):
self._times = self.set_typed_variable(times, int)
times = property(get_times, set_times)
# 通知の実行
def darwin_notify(self):
cmd = ['osascript', '-e', f'beep {self._times}']
subprocess.run(cmd)
def linux_notify(self):
import time
for _ in range(self._times):
cmd = ['xkbbell']
time.sleep(0.5)
subprocess.run(cmd)
class CenterNotification(MessageNotification):
'''
CenterNotification:
通知センターによる通知
引数:
message(str): 本文
title(str): タイトル
subtitle(str): サブタイトル
sound(bool): 音の有無
'''
def __init__(self, message, title=None, subtitle=None, sound=True):
super().__init__(message)
self._title = None
self._subtitle = None
self._sound = None
if title:
self.set_title(title)
if subtitle:
self.set_subtitle(subtitle)
if sound:
self.set_sound(sound)
# title のプロパティ用
def get_title(self):
return self._title
def set_title(self, title):
self._title = self.set_typed_variable(title, str)
# タイトルとサブタイトルの両方がないといけないため、
# 片方だけ設定された場合、もう一方を空白にする
if not self._subtitle:
self._subtitle = ' '
title = property(get_title, set_title)
# subtitle のプロパティ用
def get_subtitle(self):
return self._subtitle
def set_subtitle(self, subtitle):
self._subtitle = self.set_typed_variable(subtitle, str)
# タイトルとサブタイトルの両方がないといけないため、
# 片方だけ設定された場合、もう一方を空白にする
if not self._title:
self._title = ' '
subtitle = property(get_subtitle, set_subtitle)
# sound のプロパティ用
def get_sound(self):
return self._sound
def set_sound(self, sound):
self._sound = self.set_typed_variable(sound, bool)
sound = property(get_sound, set_sound)
# 通知の実行
def notify(self):
_message = f'display notification \"{self._message}\"'
_title = \
f'with title \"{self._title}\" subtitle \"{self._subtitle}\"' \
if self._title and self._subtitle else ''
_sound = 'sound name \"\"' if self._sound else ''
cmd = ['osascript', '-e', f'{_message} {_title} {_sound}']
subprocess.run(cmd)
class SlackNotification(WebhookNotification):
'''
SlackNotification:
Slack による通知
引数(WebhookNotification):
message(str): 本文
url(str): Incoming Webhook の URL
'''
# 通知の実行
def notify(self):
import json
import requests
data = {'text': self._message}
requests.post(self._url, data=json.dumps(data))
class DiscordNotification(WebhookNotification):
'''
DiscordNotification:
Discord による通知
引数(WebhookNotification):
message(str): 本文
url(str): Discord の Webhook の URL
'''
# 通知の実行
def notify(self):
import json
import requests
data = {'content': self._message}
requests.post(
self._url,
headers={'Content-Type': 'application/json'},
data=json.dumps(data)
)
class LineNotification(TokenNotification):
'''
LineNotification:
Line による通知
引数:
message(str): 本文
token(str): LINE Notify のトークン
'''
def __init__(self, message, token):
super().__init__(message, token)
self.URL = 'https://notify-api.line.me/api/notify'
# 通知の実行
def notify(self):
import requests
headers = {'Authorization': f'Bearer {self._token}'}
params = {'message': self._message}
requests.post(
self.URL,
headers=headers,
params=params
)
| 24.028169
| 75
| 0.592175
|
import subprocess
class NotificationError(Exception):
pass
class BaseNotification:
def set_typed_variable(self, value, specified_type):
if isinstance(value, specified_type):
return value
else:
raise NotificationError(
'can only set '
f'{specified_type.__name__} '
f'(not "{value.__class__.__name__}")'
)
def notify(self):
raise NotImplementedError()
class OSSpecificNotification(BaseNotification):
def __init__(self):
import platform
self.system = platform.system()
def darwin_notify(self):
raise NotImplementedError()
def linux_notify(self):
raise NotImplementedError()
def windows_notify(self):
raise NotImplementedError()
def notify(self):
if self.system == 'Darwin':
self.darwin_notify()
elif self.system == 'Linux':
self.linux_notify()
elif self.system == 'Windows':
self.windows_notify()
else:
NotificationError(f'{self.system} is supported system')
class MessageNotification(BaseNotification):
def __init__(self, message):
self._message = None
self.set_message(message)
def get_message(self):
return self._message
def set_message(self, message):
self._message = self.set_typed_variable(message, str)
message = property(get_message, set_message)
class WebhookNotification(MessageNotification):
def __init__(self, message, url):
super().__init__(message)
self._url = None
self.set_url(url)
def get_url(self):
return self._url
def set_url(self, url):
self._url = self.set_typed_variable(url, str)
url = property(get_url, set_url)
class TokenNotification(MessageNotification):
def __init__(self, message, token):
super().__init__(message)
self._token = None
self.set_token(token)
def get_token(self):
return self._token
def set_token(self, token):
self._token = self.set_typed_variable(token, str)
token = property(get_token, set_token)
class BeepNotification(OSSpecificNotification):
def __init__(self, times):
super().__init__()
self._times = None
self.set_times(times)
def get_times(self):
return self._times
def set_times(self, times):
self._times = self.set_typed_variable(times, int)
times = property(get_times, set_times)
def darwin_notify(self):
cmd = ['osascript', '-e', f'beep {self._times}']
subprocess.run(cmd)
def linux_notify(self):
import time
for _ in range(self._times):
cmd = ['xkbbell']
time.sleep(0.5)
subprocess.run(cmd)
class CenterNotification(MessageNotification):
def __init__(self, message, title=None, subtitle=None, sound=True):
super().__init__(message)
self._title = None
self._subtitle = None
self._sound = None
if title:
self.set_title(title)
if subtitle:
self.set_subtitle(subtitle)
if sound:
self.set_sound(sound)
def get_title(self):
return self._title
def set_title(self, title):
self._title = self.set_typed_variable(title, str)
if not self._subtitle:
self._subtitle = ' '
title = property(get_title, set_title)
def get_subtitle(self):
return self._subtitle
def set_subtitle(self, subtitle):
self._subtitle = self.set_typed_variable(subtitle, str)
if not self._title:
self._title = ' '
subtitle = property(get_subtitle, set_subtitle)
def get_sound(self):
return self._sound
def set_sound(self, sound):
self._sound = self.set_typed_variable(sound, bool)
sound = property(get_sound, set_sound)
def notify(self):
_message = f'display notification \"{self._message}\"'
_title = \
f'with title \"{self._title}\" subtitle \"{self._subtitle}\"' \
if self._title and self._subtitle else ''
_sound = 'sound name \"\"' if self._sound else ''
cmd = ['osascript', '-e', f'{_message} {_title} {_sound}']
subprocess.run(cmd)
class SlackNotification(WebhookNotification):
def notify(self):
import json
import requests
data = {'text': self._message}
requests.post(self._url, data=json.dumps(data))
class DiscordNotification(WebhookNotification):
def notify(self):
import json
import requests
data = {'content': self._message}
requests.post(
self._url,
headers={'Content-Type': 'application/json'},
data=json.dumps(data)
)
class LineNotification(TokenNotification):
def __init__(self, message, token):
super().__init__(message, token)
self.URL = 'https://notify-api.line.me/api/notify'
def notify(self):
import requests
headers = {'Authorization': f'Bearer {self._token}'}
params = {'message': self._message}
requests.post(
self.URL,
headers=headers,
params=params
)
| true
| true
|
f7143a7938cf66264f124bc702bc410c903aa5bf
| 147
|
py
|
Python
|
FastAPISQLAlchamyGraphQL/app/mutations/__init__.py
|
scionoftech/FastAPI-Full-Stack-Samples
|
e7d42661ed59324ff20f419d05c6cd1e7dab7e97
|
[
"MIT"
] | 29
|
2021-03-31T02:42:59.000Z
|
2022-03-12T16:20:05.000Z
|
FastAPIMongoEngineGraphQL/app/mutations/__init__.py
|
scionoftech/FastAPI-Full-Stack-Samples
|
e7d42661ed59324ff20f419d05c6cd1e7dab7e97
|
[
"MIT"
] | null | null | null |
FastAPIMongoEngineGraphQL/app/mutations/__init__.py
|
scionoftech/FastAPI-Full-Stack-Samples
|
e7d42661ed59324ff20f419d05c6cd1e7dab7e97
|
[
"MIT"
] | 4
|
2021-08-21T01:02:00.000Z
|
2022-01-09T15:33:51.000Z
|
from .user import CreateUser, AuthUser, UpdateUser, DeleteUser, UpdatePassword
from .articles import CreateArticle, UpdateArticle, DeleteArticle
| 49
| 79
| 0.836735
|
from .user import CreateUser, AuthUser, UpdateUser, DeleteUser, UpdatePassword
from .articles import CreateArticle, UpdateArticle, DeleteArticle
| true
| true
|
f7143afde7eec54cc467e2279b80d92472d6fb74
| 319
|
py
|
Python
|
bitly_api/__init__.py
|
galeone/bitly-api-python
|
162add496ba2b42675b36581178902cce516cdf7
|
[
"Apache-2.0"
] | 3
|
2018-08-29T08:53:57.000Z
|
2019-02-22T19:56:11.000Z
|
bitly_api/__init__.py
|
galeone/bitly-api-python
|
162add496ba2b42675b36581178902cce516cdf7
|
[
"Apache-2.0"
] | null | null | null |
bitly_api/__init__.py
|
galeone/bitly-api-python
|
162add496ba2b42675b36581178902cce516cdf7
|
[
"Apache-2.0"
] | 1
|
2019-06-28T20:30:47.000Z
|
2019-06-28T20:30:47.000Z
|
from .bitly_api import Connection, BitlyError, Error
__version__ = '0.3'
__author__ = "Jehiah Czebotar <jehiah@gmail.com>"
__all__ = ["Connection", "BitlyError", "Error"]
__doc__ = """
This is a python library for the bitly api
all methods raise BitlyError on an unexpected response, or a problem with input
format
"""
| 31.9
| 79
| 0.752351
|
from .bitly_api import Connection, BitlyError, Error
__version__ = '0.3'
__author__ = "Jehiah Czebotar <jehiah@gmail.com>"
__all__ = ["Connection", "BitlyError", "Error"]
__doc__ = """
This is a python library for the bitly api
all methods raise BitlyError on an unexpected response, or a problem with input
format
"""
| true
| true
|
f7143b7edbae191e6924ad8021d9f11a2e53d982
| 2,262
|
py
|
Python
|
src/oscar/apps/dashboard/app.py
|
abirafdirp/revania
|
70272b842316e8df57b0bc8a0dc669c3af4ec8f9
|
[
"BSD-3-Clause"
] | 2
|
2015-12-11T00:19:15.000Z
|
2021-11-14T19:44:42.000Z
|
src/oscar/apps/dashboard/app.py
|
abirafdirp/revania
|
70272b842316e8df57b0bc8a0dc669c3af4ec8f9
|
[
"BSD-3-Clause"
] | null | null | null |
src/oscar/apps/dashboard/app.py
|
abirafdirp/revania
|
70272b842316e8df57b0bc8a0dc669c3af4ec8f9
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import url, include
from oscar.core.application import Application
from oscar.core.loading import get_class
class DashboardApplication(Application):
name = 'dashboard'
permissions_map = {
'index': (['is_staff'], ['partner.dashboard_access']),
}
index_view = get_class('dashboard.views', 'IndexView')
reports_app = get_class('dashboard.reports.app', 'application')
orders_app = get_class('dashboard.orders.app', 'application')
users_app = get_class('dashboard.users.app', 'application')
catalogue_app = get_class('dashboard.catalogue.app', 'application')
promotions_app = get_class('dashboard.promotions.app', 'application')
pages_app = get_class('dashboard.pages.app', 'application')
partners_app = get_class('dashboard.partners.app', 'application')
offers_app = get_class('dashboard.offers.app', 'application')
ranges_app = get_class('dashboard.ranges.app', 'application')
reviews_app = get_class('dashboard.reviews.app', 'application')
vouchers_app = get_class('dashboard.vouchers.app', 'application')
comms_app = get_class('dashboard.communications.app', 'application')
shipping_app = get_class('dashboard.shipping.app', 'application')
def get_urls(self):
urls = [
url(r'^$', self.index_view.as_view(), name='index'),
url(r'^catalogue/', include(self.catalogue_app.urls)),
url(r'^reports/', include(self.reports_app.urls)),
url(r'^orders/', include(self.orders_app.urls)),
url(r'^users/', include(self.users_app.urls)),
url(r'^content-blocks/', include(self.promotions_app.urls)),
url(r'^pages/', include(self.pages_app.urls)),
url(r'^partners/', include(self.partners_app.urls)),
url(r'^offers/', include(self.offers_app.urls)),
url(r'^ranges/', include(self.ranges_app.urls)),
url(r'^reviews/', include(self.reviews_app.urls)),
url(r'^vouchers/', include(self.vouchers_app.urls)),
url(r'^comms/', include(self.comms_app.urls)),
url(r'^shipping/', include(self.shipping_app.urls)),
]
return self.post_process_urls(urls)
application = DashboardApplication()
| 46.163265
| 73
| 0.665782
|
from django.conf.urls import url, include
from oscar.core.application import Application
from oscar.core.loading import get_class
class DashboardApplication(Application):
name = 'dashboard'
permissions_map = {
'index': (['is_staff'], ['partner.dashboard_access']),
}
index_view = get_class('dashboard.views', 'IndexView')
reports_app = get_class('dashboard.reports.app', 'application')
orders_app = get_class('dashboard.orders.app', 'application')
users_app = get_class('dashboard.users.app', 'application')
catalogue_app = get_class('dashboard.catalogue.app', 'application')
promotions_app = get_class('dashboard.promotions.app', 'application')
pages_app = get_class('dashboard.pages.app', 'application')
partners_app = get_class('dashboard.partners.app', 'application')
offers_app = get_class('dashboard.offers.app', 'application')
ranges_app = get_class('dashboard.ranges.app', 'application')
reviews_app = get_class('dashboard.reviews.app', 'application')
vouchers_app = get_class('dashboard.vouchers.app', 'application')
comms_app = get_class('dashboard.communications.app', 'application')
shipping_app = get_class('dashboard.shipping.app', 'application')
def get_urls(self):
urls = [
url(r'^$', self.index_view.as_view(), name='index'),
url(r'^catalogue/', include(self.catalogue_app.urls)),
url(r'^reports/', include(self.reports_app.urls)),
url(r'^orders/', include(self.orders_app.urls)),
url(r'^users/', include(self.users_app.urls)),
url(r'^content-blocks/', include(self.promotions_app.urls)),
url(r'^pages/', include(self.pages_app.urls)),
url(r'^partners/', include(self.partners_app.urls)),
url(r'^offers/', include(self.offers_app.urls)),
url(r'^ranges/', include(self.ranges_app.urls)),
url(r'^reviews/', include(self.reviews_app.urls)),
url(r'^vouchers/', include(self.vouchers_app.urls)),
url(r'^comms/', include(self.comms_app.urls)),
url(r'^shipping/', include(self.shipping_app.urls)),
]
return self.post_process_urls(urls)
application = DashboardApplication()
| true
| true
|
f7143dd95c98480c7753abe771970d9fae229904
| 20,780
|
py
|
Python
|
pilosa/orm.py
|
philoprove/python-pilosa
|
c0edc8d0fe1687b9afd61c8bc4dd236b3c73fb78
|
[
"BSD-3-Clause"
] | null | null | null |
pilosa/orm.py
|
philoprove/python-pilosa
|
c0edc8d0fe1687b9afd61c8bc4dd236b3c73fb78
|
[
"BSD-3-Clause"
] | null | null | null |
pilosa/orm.py
|
philoprove/python-pilosa
|
c0edc8d0fe1687b9afd61c8bc4dd236b3c73fb78
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2017 Pilosa Corp.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
import json
from .exceptions import PilosaError
from .validator import validate_index_name, validate_frame_name, validate_label
__all__ = ("TimeQuantum", "CacheType", "Schema", "Index", "PQLQuery", "PQLBatchQuery")
_TIME_FORMAT = "%Y-%m-%dT%H:%M"
class TimeQuantum:
"""Valid time quantum values for frames having support for that.
* See: `Data Model <https://www.pilosa.com/docs/data-model/>`_
"""
NONE = None
YEAR = None
MONTH = None
DAY = None
HOUR = None
YEAR_MONTH = None
MONTH_DAY = None
DAY_HOUR = None
YEAR_MONTH_DAY = None
MONTH_DAY_HOUR = None
YEAR_MONTH_DAY_HOUR = None
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __eq__(self, other):
if isinstance(other, TimeQuantum):
return self.value == other.value
return False
TimeQuantum.NONE = TimeQuantum("")
TimeQuantum.YEAR = TimeQuantum("Y")
TimeQuantum.MONTH = TimeQuantum("M")
TimeQuantum.DAY = TimeQuantum("D")
TimeQuantum.HOUR = TimeQuantum("H")
TimeQuantum.YEAR_MONTH = TimeQuantum("YM")
TimeQuantum.MONTH_DAY = TimeQuantum("MD")
TimeQuantum.DAY_HOUR = TimeQuantum("DH")
TimeQuantum.YEAR_MONTH_DAY = TimeQuantum("YMD")
TimeQuantum.MONTH_DAY_HOUR = TimeQuantum("MDH")
TimeQuantum.YEAR_MONTH_DAY_HOUR = TimeQuantum("YMDH")
class CacheType:
DEFAULT = None
LRU = None
RANKED = None
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __eq__(self, other):
if isinstance(other, CacheType):
return self.value == other.value
return False
CacheType.DEFAULT = CacheType("")
CacheType.LRU = CacheType("lru")
CacheType.RANKED = CacheType("ranked")
class Schema:
"""Schema is a container for index objects"""
def __init__(self):
self._indexes = {}
def __eq__(self, other):
if id(self) == id(other):
return True
if not isinstance(other, self.__class__):
return False
return self._indexes == other._indexes
def __ne__(self, other):
return not self.__eq__(other)
def index(self, name, column_label="columnID", time_quantum=TimeQuantum.NONE):
"""Returns an index object with the given name and options.
If the index didn't exist in the schema, it is added to the schema.
:param str name: index name
:param str column_label: a valid column label
:param pilosa.TimeQuantum time_quantum: Sets the time quantum
:return: Index object
* See `Data Model <https://www.pilosa.com/docs/data-model/>`_
* See `Query Language <https://www.pilosa.com/docs/query-language/>`_
"""
index = self._indexes.get(name)
if index is None:
index = Index(name, column_label, time_quantum)
self._indexes[name] = index
return index
def _diff(self, other):
result = Schema()
for index_name, index in self._indexes.items():
if index_name not in other._indexes:
# if the index doesn't exist in the other schema, simply copy it
result._indexes[index_name] = index.copy()
else:
# the index exists in the other schema; check the frames
result_index = index.copy(frames=False)
for frame_name, frame in index._frames.items():
# the frame doesn't exist in the other scheme, copy it
if frame_name not in result_index._frames:
result_index._frames[frame_name] = frame.copy()
# check whether we modified result index
if len(result_index._frames) > 0:
result._indexes[index_name] = result_index
return result
class Index:
"""The purpose of the Index is to represent a data namespace.
You cannot perform cross-index queries. Column-level attributes are global to the Index.
:param str name: index name
:param str column_label: a valid column label
:param pilosa.TimeQuantum time_quantum: Sets the time quantum
* See `Data Model <https://www.pilosa.com/docs/data-model/>`_
* See `Query Language <https://www.pilosa.com/docs/query-language/>`_
"""
def __init__(self, name, column_label="columnID", time_quantum=TimeQuantum.NONE):
validate_index_name(name)
validate_label(column_label)
self.name = name
self.column_label = column_label
self.time_quantum = time_quantum
self._frames = {}
def __eq__(self, other):
if id(self) == id(other):
return True
if not isinstance(other, self.__class__):
return False
return self._meta_eq(other) and \
self._frames == other._frames
def __ne__(self, other):
return not self.__eq__(other)
def _meta_eq(self, other):
return self.name == other.name and \
self.column_label == other.column_label and \
self.time_quantum == other.time_quantum
def copy(self, frames=True):
index = Index(self.name, column_label=self.column_label, time_quantum=self.time_quantum)
if frames:
index._frames = dict((name, frame.copy()) for name, frame in self._frames.items())
return index
def frame(self, name, row_label="rowID", time_quantum=TimeQuantum.NONE,
inverse_enabled=False, cache_type=CacheType.DEFAULT, cache_size=0):
"""Creates a frame object with the specified name and defaults.
:param str name: frame name
:param str row_label: a valid row label
:param pilosa.TimeQuantum time_quantum: Sets the time quantum for the frame. If a Frame has a time quantum, then Views are generated for each of the defined time segments.
:param bool inverse_enabled:
:param pilosa.CacheType cache_type: ``CacheType.DEFAULT``, ``CacheType.LRU`` or ``CacheType.RANKED``
:param int cache_size: Values greater than 0 sets the cache size. Otherwise uses the default cache size
:return: Pilosa frame
:rtype: pilosa.Frame
"""
frame = self._frames.get(name)
if frame is None:
frame = Frame(self, name, row_label, time_quantum,
inverse_enabled, cache_type, cache_size)
self._frames[name] = frame
return frame
def raw_query(self, query):
"""Creates a raw query.
Note that the query is not validated before sending to the server.
:param str query:
:return: Pilosa query
:rtype: pilosa.PQLQuery
"""
return PQLQuery(query, self)
def batch_query(self, *queries):
"""Creates a batch query.
:param pilosa.PQLQuery queries: the queries in the batch
:return: Pilosa batch query
:rtype: pilosa.PQLBatchQuery
"""
q = PQLBatchQuery(self)
q.add(*queries)
return q
def union(self, *bitmaps):
"""Creates a ``Union`` query.
``Union`` performs a logical OR on the results of each BITMAP_CALL query passed to it.
:param pilosa.PQLBitmapQuery bitmaps: 0 or more bitmap queries to union
:return: Pilosa bitmap query
:rtype: pilosa.PQLBitmapQuery
"""
return self._bitmap_op("Union", bitmaps)
def intersect(self, *bitmaps):
"""Creates an ``Intersect`` query.
``Intersect`` performs a logical AND on the results of each BITMAP_CALL query passed to it.
:param pilosa.PQLBitmapQuery bitmaps: 1 or more bitmap queries to intersect
:return: Pilosa bitmap query
:rtype: pilosa.PQLBitmapQuery
:raise PilosaError: if the number of bitmaps is less than 1
"""
if len(bitmaps) < 1:
raise PilosaError("Number of bitmap queries should be greater or equal to 1")
return self._bitmap_op("Intersect", bitmaps)
def difference(self, *bitmaps):
"""Creates a ``Difference`` query.
``Difference`` returns all of the bits from the first BITMAP_CALL argument passed to it,
without the bits from each subsequent BITMAP_CALL.
:param pilosa.PQLBitmapQuery bitmaps: 0 or more bitmap queries to differentiate
:return: Pilosa bitmap query
:rtype: pilosa.PQLBitmapQuery
:raise PilosaError: if the number of bitmaps is less than 1
"""
if len(bitmaps) < 1:
raise PilosaError("Number of bitmap queries should be greater or equal to 1")
return self._bitmap_op("Difference", bitmaps)
def count(self, bitmap):
"""Creates a Count query.
``Count`` returns the number of set bits in the BITMAP_CALL passed in.
:param pilosa.PQLQuery bitmap: the bitmap query
:return: Pilosa query
:rtype: pilosa.PQLQuery
"""
return PQLQuery(u"Count(%s)" % bitmap.serialize(), self)
def set_column_attrs(self, column_id, attrs):
"""Creates a SetColumnAttrs query.
``SetColumnAttrs`` associates arbitrary key/value pairs with a column in an index.
Following object types are accepted:
* int
* str
* bool
* float
:param int column_id:
:param dict attrs: column attributes
:return: Pilosa query
:rtype: pilosa.PQLQuery
"""
attrs_str = _create_attributes_str(attrs)
return PQLQuery(u"SetColumnAttrs(%s=%d, %s)" %
(self.column_label, column_id, attrs_str), self)
def _bitmap_op(self, name, bitmaps):
return PQLQuery(u"%s(%s)" % (name, u", ".join(b.serialize() for b in bitmaps)), self)
class Frame:
"""Frames are used to segment and define different functional characteristics within your entire index.
You can think of a Frame as a table-like data partition within your Index.
Row-level attributes are namespaced at the Frame level.
Do not create a Frame object directly. Instead, use ``pilosa.Index.frame`` method.
* See `Data Model <https://www.pilosa.com/docs/data-model/>`_
* See `Query Language <https://www.pilosa.com/docs/query-language/>`_
"""
def __init__(self, index, name, row_label, time_quantum, inverse_enabled,
cache_type, cache_size):
validate_frame_name(name)
validate_label(row_label)
self.index = index
self.name = name
self.time_quantum = time_quantum
self.inverse_enabled = inverse_enabled
self.cache_type = cache_type
self.cache_size = cache_size
self.row_label = row_label
self.column_label = index.column_label
def __eq__(self, other):
if id(self) == id(other):
return True
if not isinstance(other, self.__class__):
return False
# Note that we skip comparing the frames of the indexes by using index._meta_eq
# in order to avoid a call cycle
return self.name == other.name and \
self.index._meta_eq(other.index) and \
self.row_label == other.row_label and \
self.time_quantum == other.time_quantum and \
self.inverse_enabled == other.inverse_enabled and \
self.cache_type == other.cache_type and \
self.cache_size == other.cache_size
def __ne__(self, other):
return not self.__eq__(other)
def copy(self):
return Frame(self.index, self.name, self.row_label, self.time_quantum,
self.inverse_enabled, self.cache_type, self.cache_size)
def bitmap(self, row_id):
"""Creates a Bitmap query.
Bitmap retrieves the indices of all the set bits in a row or column based on whether the row label or column label is given in the query. It also retrieves any attributes set on that row or column.
This variant of Bitmap query uses the row label.
:param int row_id:
:return: Pilosa bitmap query
:rtype: pilosa.PQLBitmapQuery
"""
return PQLQuery(u"Bitmap(%s=%d, frame='%s')" % (self.row_label, row_id, self.name),
self.index)
def inverse_bitmap(self, column_id):
"""Creates a Bitmap query.
``Bitmap`` retrieves the indices of all the set bits in a row or column based on whether the row label or column label is given in the query. It also retrieves any attributes set on that row or column.
This variant of Bitmap query uses the column label.
:param int column_id:
:return: Pilosa bitmap query
:rtype: pilosa.PQLBitmapQuery
"""
return PQLQuery(u"Bitmap(%s=%d, frame='%s')" % (self.column_label, column_id, self.name),
self.index)
def setbit(self, row_id, column_id, timestamp=None):
"""Creates a SetBit query.
``SetBit`` assigns a value of 1 to a bit in the binary matrix, thus associating the given row in the given frame with the given column.
:param int row_id:
:param int column_id:
:param pilosa.TimeStamp timestamp:
:return: Pilosa query
:rtype: pilosa.PQLQuery
"""
ts = ", timestamp='%s'" % timestamp.strftime(_TIME_FORMAT) if timestamp else ''
return PQLQuery(u"SetBit(%s=%d, frame='%s', %s=%d%s)" % \
(self.row_label, row_id, self.name, self.column_label, column_id, ts),
self.index)
def clearbit(self, row_id, column_id):
"""Creates a ClearBit query.
``ClearBit`` assigns a value of 0 to a bit in the binary matrix, thus disassociating the given row in the given frame from the given column.
:param int row_id:
:param int column_id:
:return: Pilosa query
:rtype: pilosa.PQLQuery
"""
return PQLQuery(u"ClearBit(%s=%d, frame='%s', %s=%d)" % \
(self.row_label, row_id, self.name, self.column_label, column_id),
self.index)
def topn(self, n, bitmap=None, field="", *values):
"""Creates a TopN query.
``TopN`` returns the id and count of the top n bitmaps (by count of bits) in the frame.
* see: `TopN Query <https://www.pilosa.com/docs/query-language/#topn>`_
:param int n: number of items to return
:param pilosa.PQLBitmapQuery bitmap: a PQL Bitmap query
:param field str field: field name
:param object values: filter values to be matched against the field
"""
return self._topn(n, bitmap, field, False, *values)
def inverse_topn(self, n, bitmap=None, field="", *values):
"""Creates a TopN query.
``TopN`` returns the id and count of the top n bitmaps (by count of bits) in the frame.
This version sets `inverse=true`.
* see: `TopN Query <https://www.pilosa.com/docs/query-language/#topn>`_
:param int n: number of items to return
:param pilosa.PQLBitmapQuery bitmap: a PQL Bitmap query
:param field str field: field name
:param object values: filter values to be matched against the field
"""
return self._topn(n, bitmap, field, True, *values)
def _topn(self, n, bitmap=None, field="", inverse=False, *values):
parts = ["frame='%s'" % self.name, "n=%d" % n, "inverse=%s" % ('true' if inverse else 'false')]
if bitmap:
parts.insert(0, bitmap.serialize())
if field:
validate_label(field)
values_str = json.dumps(values, separators=(',', ': '))
parts.extend(["field='%s'" % field, "filters=%s" % values_str])
qry = u"TopN(%s)" % ", ".join(parts)
return PQLQuery(qry, self.index)
def range(self, row_id, start, end):
"""Creates a Range query.
Similar to ``Bitmap``, but only returns bits which were set with timestamps between the given start and end timestamps.
* see: `Range Query <https://www.pilosa.com/docs/query-language/#range>`_
:param int row_id:
:param datetime.datetime start: start timestamp
:param datetime.datetime end: end timestamp
"""
return self._range(self.row_label, row_id, start, end)
def inverse_range(self, column_id, start, end):
"""Creates a Range query.
Similar to ``Bitmap``, but only returns bits which were set with timestamps between the given start and end timestamps.
:param int column_id:
:param datetime.datetime start: start timestamp
:param datetime.datetime end: end timestamp
"""
return self._range(self.column_label, column_id, start, end)
def _range(self, label, rowcol_id, start, end):
start_str = start.strftime(_TIME_FORMAT)
end_str = end.strftime(_TIME_FORMAT)
return PQLQuery(u"Range(%s=%d, frame='%s', start='%s', end='%s')" %
(label, rowcol_id, self.name, start_str, end_str),
self.index)
def set_row_attrs(self, row_id, attrs):
"""Creates a SetRowAttrs query.
``SetRowAttrs`` associates arbitrary key/value pairs with a row in a frame.
Following object types are accepted:
* int
* str
* bool
* float
:param int row_id:
:param dict attrs: row attributes
:return: Pilosa query
:rtype: pilosa.PQLQuery
"""
attrs_str = _create_attributes_str(attrs)
return PQLQuery(u"SetRowAttrs(%s=%d, frame='%s', %s)" %
(self.row_label, row_id, self.name, attrs_str),
self.index)
def _get_options_string(self):
data = {"rowLabel": self.row_label}
if self.inverse_enabled:
data["inverseEnabled"] = True
if self.time_quantum != TimeQuantum.NONE:
data["timeQuantum"] = str(self.time_quantum)
if self.cache_type != CacheType.DEFAULT:
data["cacheType"] = str(self.cache_type)
if self.cache_size > 0:
data["cacheSize"] = self.cache_size
return json.dumps({"options": data}, sort_keys=True)
class PQLQuery:
def __init__(self, pql, index):
self.pql = pql
self.index = index
def serialize(self):
return self.pql
def _create_attributes_str(attrs):
kvs = []
try:
for k, v in attrs.items():
# TODO: make key use its own validator
validate_label(k)
kvs.append("%s=%s" % (k, json.dumps(v)))
return ", ".join(sorted(kvs))
except TypeError:
raise PilosaError("Error while converting values")
class PQLBatchQuery:
def __init__(self, index):
self.index = index
self.queries = []
def add(self, *queries):
self.queries.extend(queries)
def serialize(self):
return u''.join(q.serialize() for q in self.queries)
| 36.392294
| 209
| 0.626853
|
import json
from .exceptions import PilosaError
from .validator import validate_index_name, validate_frame_name, validate_label
__all__ = ("TimeQuantum", "CacheType", "Schema", "Index", "PQLQuery", "PQLBatchQuery")
_TIME_FORMAT = "%Y-%m-%dT%H:%M"
class TimeQuantum:
NONE = None
YEAR = None
MONTH = None
DAY = None
HOUR = None
YEAR_MONTH = None
MONTH_DAY = None
DAY_HOUR = None
YEAR_MONTH_DAY = None
MONTH_DAY_HOUR = None
YEAR_MONTH_DAY_HOUR = None
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __eq__(self, other):
if isinstance(other, TimeQuantum):
return self.value == other.value
return False
TimeQuantum.NONE = TimeQuantum("")
TimeQuantum.YEAR = TimeQuantum("Y")
TimeQuantum.MONTH = TimeQuantum("M")
TimeQuantum.DAY = TimeQuantum("D")
TimeQuantum.HOUR = TimeQuantum("H")
TimeQuantum.YEAR_MONTH = TimeQuantum("YM")
TimeQuantum.MONTH_DAY = TimeQuantum("MD")
TimeQuantum.DAY_HOUR = TimeQuantum("DH")
TimeQuantum.YEAR_MONTH_DAY = TimeQuantum("YMD")
TimeQuantum.MONTH_DAY_HOUR = TimeQuantum("MDH")
TimeQuantum.YEAR_MONTH_DAY_HOUR = TimeQuantum("YMDH")
class CacheType:
DEFAULT = None
LRU = None
RANKED = None
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __eq__(self, other):
if isinstance(other, CacheType):
return self.value == other.value
return False
CacheType.DEFAULT = CacheType("")
CacheType.LRU = CacheType("lru")
CacheType.RANKED = CacheType("ranked")
class Schema:
def __init__(self):
self._indexes = {}
def __eq__(self, other):
if id(self) == id(other):
return True
if not isinstance(other, self.__class__):
return False
return self._indexes == other._indexes
def __ne__(self, other):
return not self.__eq__(other)
def index(self, name, column_label="columnID", time_quantum=TimeQuantum.NONE):
index = self._indexes.get(name)
if index is None:
index = Index(name, column_label, time_quantum)
self._indexes[name] = index
return index
def _diff(self, other):
result = Schema()
for index_name, index in self._indexes.items():
if index_name not in other._indexes:
result._indexes[index_name] = index.copy()
else:
# the index exists in the other schema; check the frames
result_index = index.copy(frames=False)
for frame_name, frame in index._frames.items():
# the frame doesn't exist in the other scheme, copy it
if frame_name not in result_index._frames:
result_index._frames[frame_name] = frame.copy()
if len(result_index._frames) > 0:
result._indexes[index_name] = result_index
return result
class Index:
def __init__(self, name, column_label="columnID", time_quantum=TimeQuantum.NONE):
validate_index_name(name)
validate_label(column_label)
self.name = name
self.column_label = column_label
self.time_quantum = time_quantum
self._frames = {}
def __eq__(self, other):
if id(self) == id(other):
return True
if not isinstance(other, self.__class__):
return False
return self._meta_eq(other) and \
self._frames == other._frames
def __ne__(self, other):
return not self.__eq__(other)
def _meta_eq(self, other):
return self.name == other.name and \
self.column_label == other.column_label and \
self.time_quantum == other.time_quantum
def copy(self, frames=True):
index = Index(self.name, column_label=self.column_label, time_quantum=self.time_quantum)
if frames:
index._frames = dict((name, frame.copy()) for name, frame in self._frames.items())
return index
def frame(self, name, row_label="rowID", time_quantum=TimeQuantum.NONE,
inverse_enabled=False, cache_type=CacheType.DEFAULT, cache_size=0):
frame = self._frames.get(name)
if frame is None:
frame = Frame(self, name, row_label, time_quantum,
inverse_enabled, cache_type, cache_size)
self._frames[name] = frame
return frame
def raw_query(self, query):
return PQLQuery(query, self)
def batch_query(self, *queries):
q = PQLBatchQuery(self)
q.add(*queries)
return q
def union(self, *bitmaps):
return self._bitmap_op("Union", bitmaps)
def intersect(self, *bitmaps):
if len(bitmaps) < 1:
raise PilosaError("Number of bitmap queries should be greater or equal to 1")
return self._bitmap_op("Intersect", bitmaps)
def difference(self, *bitmaps):
if len(bitmaps) < 1:
raise PilosaError("Number of bitmap queries should be greater or equal to 1")
return self._bitmap_op("Difference", bitmaps)
def count(self, bitmap):
return PQLQuery(u"Count(%s)" % bitmap.serialize(), self)
def set_column_attrs(self, column_id, attrs):
attrs_str = _create_attributes_str(attrs)
return PQLQuery(u"SetColumnAttrs(%s=%d, %s)" %
(self.column_label, column_id, attrs_str), self)
def _bitmap_op(self, name, bitmaps):
return PQLQuery(u"%s(%s)" % (name, u", ".join(b.serialize() for b in bitmaps)), self)
class Frame:
def __init__(self, index, name, row_label, time_quantum, inverse_enabled,
cache_type, cache_size):
validate_frame_name(name)
validate_label(row_label)
self.index = index
self.name = name
self.time_quantum = time_quantum
self.inverse_enabled = inverse_enabled
self.cache_type = cache_type
self.cache_size = cache_size
self.row_label = row_label
self.column_label = index.column_label
def __eq__(self, other):
if id(self) == id(other):
return True
if not isinstance(other, self.__class__):
return False
return self.name == other.name and \
self.index._meta_eq(other.index) and \
self.row_label == other.row_label and \
self.time_quantum == other.time_quantum and \
self.inverse_enabled == other.inverse_enabled and \
self.cache_type == other.cache_type and \
self.cache_size == other.cache_size
def __ne__(self, other):
return not self.__eq__(other)
def copy(self):
return Frame(self.index, self.name, self.row_label, self.time_quantum,
self.inverse_enabled, self.cache_type, self.cache_size)
def bitmap(self, row_id):
return PQLQuery(u"Bitmap(%s=%d, frame='%s')" % (self.row_label, row_id, self.name),
self.index)
def inverse_bitmap(self, column_id):
return PQLQuery(u"Bitmap(%s=%d, frame='%s')" % (self.column_label, column_id, self.name),
self.index)
def setbit(self, row_id, column_id, timestamp=None):
ts = ", timestamp='%s'" % timestamp.strftime(_TIME_FORMAT) if timestamp else ''
return PQLQuery(u"SetBit(%s=%d, frame='%s', %s=%d%s)" % \
(self.row_label, row_id, self.name, self.column_label, column_id, ts),
self.index)
def clearbit(self, row_id, column_id):
return PQLQuery(u"ClearBit(%s=%d, frame='%s', %s=%d)" % \
(self.row_label, row_id, self.name, self.column_label, column_id),
self.index)
def topn(self, n, bitmap=None, field="", *values):
return self._topn(n, bitmap, field, False, *values)
def inverse_topn(self, n, bitmap=None, field="", *values):
return self._topn(n, bitmap, field, True, *values)
def _topn(self, n, bitmap=None, field="", inverse=False, *values):
parts = ["frame='%s'" % self.name, "n=%d" % n, "inverse=%s" % ('true' if inverse else 'false')]
if bitmap:
parts.insert(0, bitmap.serialize())
if field:
validate_label(field)
values_str = json.dumps(values, separators=(',', ': '))
parts.extend(["field='%s'" % field, "filters=%s" % values_str])
qry = u"TopN(%s)" % ", ".join(parts)
return PQLQuery(qry, self.index)
def range(self, row_id, start, end):
return self._range(self.row_label, row_id, start, end)
def inverse_range(self, column_id, start, end):
return self._range(self.column_label, column_id, start, end)
def _range(self, label, rowcol_id, start, end):
start_str = start.strftime(_TIME_FORMAT)
end_str = end.strftime(_TIME_FORMAT)
return PQLQuery(u"Range(%s=%d, frame='%s', start='%s', end='%s')" %
(label, rowcol_id, self.name, start_str, end_str),
self.index)
def set_row_attrs(self, row_id, attrs):
attrs_str = _create_attributes_str(attrs)
return PQLQuery(u"SetRowAttrs(%s=%d, frame='%s', %s)" %
(self.row_label, row_id, self.name, attrs_str),
self.index)
def _get_options_string(self):
data = {"rowLabel": self.row_label}
if self.inverse_enabled:
data["inverseEnabled"] = True
if self.time_quantum != TimeQuantum.NONE:
data["timeQuantum"] = str(self.time_quantum)
if self.cache_type != CacheType.DEFAULT:
data["cacheType"] = str(self.cache_type)
if self.cache_size > 0:
data["cacheSize"] = self.cache_size
return json.dumps({"options": data}, sort_keys=True)
class PQLQuery:
def __init__(self, pql, index):
self.pql = pql
self.index = index
def serialize(self):
return self.pql
def _create_attributes_str(attrs):
kvs = []
try:
for k, v in attrs.items():
validate_label(k)
kvs.append("%s=%s" % (k, json.dumps(v)))
return ", ".join(sorted(kvs))
except TypeError:
raise PilosaError("Error while converting values")
class PQLBatchQuery:
def __init__(self, index):
self.index = index
self.queries = []
def add(self, *queries):
self.queries.extend(queries)
def serialize(self):
return u''.join(q.serialize() for q in self.queries)
| true
| true
|
f7143e436422c57c3d9fcd782f05537d8d957896
| 6,211
|
py
|
Python
|
MaxiNet/WorkerServer/p4_mininet.py
|
bramamurthy/P4SwitchesInMaxiNet
|
b7c941690d46b110b12469a9fb9c23de8e6b965f
|
[
"MIT"
] | 1
|
2018-05-09T16:57:03.000Z
|
2018-05-09T16:57:03.000Z
|
MaxiNet/WorkerServer/p4_mininet.py
|
bramamurthy/P4SwitchesInMaxiNet
|
b7c941690d46b110b12469a9fb9c23de8e6b965f
|
[
"MIT"
] | null | null | null |
MaxiNet/WorkerServer/p4_mininet.py
|
bramamurthy/P4SwitchesInMaxiNet
|
b7c941690d46b110b12469a9fb9c23de8e6b965f
|
[
"MIT"
] | null | null | null |
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mininet.net import Mininet
from mininet.node import Switch, Host
from mininet.log import setLogLevel, info, error, debug
from mininet.moduledeps import pathCheck
from sys import exit
import os
import tempfile
import socket
from time import sleep
from netstat import check_listening_on_port
# Added by RB
from parse_exp_cfg import *
import pdb # Added by RB
SWITCH_START_TIMEOUT = 10 # seconds
class P4Host(Host):
def config(self, **params):
r = super(Host, self).config(**params)
self.defaultIntf().rename("eth0")
for off in ["rx", "tx", "sg"]:
cmd = "/sbin/ethtool --offload eth0 %s off" % off
self.cmd(cmd)
# disable IPv6
self.cmd("sysctl -w net.ipv6.conf.all.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.default.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.lo.disable_ipv6=1")
return r
def describe(self):
print "**********"
print self.name
print "default interface: %s\t%s\t%s" %(
self.defaultIntf().name,
self.defaultIntf().IP(),
self.defaultIntf().MAC()
)
print "**********"
class P4Switch(Switch):
"""P4 virtual switch"""
device_id = 0
def __init__(self, name, sw_path = None, json_path = None,
thrift_port = None,
pcap_dump = False,
log_console = False,
verbose = False,
device_id = None,
enable_debugger = False,
**kwargs):
Switch.__init__(self, name, **kwargs)
assert(sw_path)
assert(json_path)
# make sure that the provided sw_path is valid
pathCheck(sw_path)
# make sure that the provided JSON file exists
if not os.path.isfile(json_path):
error("Invalid JSON file.\n")
exit(1)
self.sw_path = sw_path
self.json_path = json_path
self.verbose = verbose
logfile = "/tmp/p4s.{}.log".format(self.name)
self.output = open(logfile, 'w')
self.thrift_port = thrift_port
if check_listening_on_port(self.thrift_port):
error('%s cannot bind port %d because it is bound by another process\n' % (self.name, self.grpc_port))
exit(1)
self.pcap_dump = pcap_dump
self.enable_debugger = enable_debugger
self.log_console = log_console
if device_id is not None:
self.device_id = device_id
P4Switch.device_id = max(P4Switch.device_id, device_id)
else:
self.device_id = P4Switch.device_id
P4Switch.device_id += 1
self.nanomsg = "ipc:///tmp/bm-{}-log.ipc".format(self.device_id)
@classmethod
def setup(cls):
pass
def check_switch_started(self, pid):
"""While the process is running (pid exists), we check if the Thrift
server has been started. If the Thrift server is ready, we assume that
the switch was started successfully. This is only reliable if the Thrift
server is started at the end of the init process"""
while True:
if not os.path.exists(os.path.join("/proc", str(pid))):
return False
if check_listening_on_port(self.thrift_port):
return True
sleep(0.5)
def start(self, controllers):
"Start up a new P4 switch"
info("Starting P4 switch {}.\n".format(self.name))
args = [self.sw_path]
for port, intf in self.intfs.items():
# print "Mininet switch start ..."
# print "switch name ...", self.name
# print "Port ...", port
# print "Intf ...", intf
if not intf.IP():
# print "Args Extend ..."
# print "Port ...", str(port)
# print "Intf Name ...", intf.name
args.extend(['-i', str(port) + "@" + intf.name])
if self.pcap_dump:
my_pcap_dir = get_exp_pcap_dir()
pcap_arg = "--pcap="+my_pcap_dir
args.append(pcap_arg) # Modified by RB
# args.append("--pcap")
# args.append("--useFiles")
if self.thrift_port:
args.extend(['--thrift-port', str(self.thrift_port)])
if self.nanomsg:
args.extend(['--nanolog', self.nanomsg])
args.extend(['--device-id', str(self.device_id)])
P4Switch.device_id += 1
args.append(self.json_path)
if self.enable_debugger:
args.append("--debugger")
if self.log_console:
args.append("--log-console")
logfile = "/tmp/p4s.{}.log".format(self.name)
info(' '.join(args) + "\n")
pid = None
with tempfile.NamedTemporaryFile() as f:
# self.cmd(' '.join(args) + ' > /dev/null 2>&1 &')
self.cmd(' '.join(args) + ' >' + logfile + ' 2>&1 & echo $! >> ' + f.name)
pid = int(f.read())
debug("P4 switch {} PID is {}.\n".format(self.name, pid))
if not self.check_switch_started(pid):
error("P4 switch {} did not start correctly.\n".format(self.name))
exit(1)
info("P4 switch {} has been started.\n".format(self.name))
def stop(self):
"Terminate P4 switch."
self.output.flush()
self.cmd('kill %' + self.sw_path)
self.cmd('wait')
self.deleteIntfs()
def attach(self, intf):
"Connect a data port"
assert(0)
def detach(self, intf):
"Disconnect a data port"
assert(0)
| 35.289773
| 114
| 0.580583
|
from mininet.net import Mininet
from mininet.node import Switch, Host
from mininet.log import setLogLevel, info, error, debug
from mininet.moduledeps import pathCheck
from sys import exit
import os
import tempfile
import socket
from time import sleep
from netstat import check_listening_on_port
from parse_exp_cfg import *
import pdb
SWITCH_START_TIMEOUT = 10
class P4Host(Host):
def config(self, **params):
r = super(Host, self).config(**params)
self.defaultIntf().rename("eth0")
for off in ["rx", "tx", "sg"]:
cmd = "/sbin/ethtool --offload eth0 %s off" % off
self.cmd(cmd)
self.cmd("sysctl -w net.ipv6.conf.all.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.default.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.lo.disable_ipv6=1")
return r
def describe(self):
print "**********"
print self.name
print "default interface: %s\t%s\t%s" %(
self.defaultIntf().name,
self.defaultIntf().IP(),
self.defaultIntf().MAC()
)
print "**********"
class P4Switch(Switch):
"""P4 virtual switch"""
device_id = 0
def __init__(self, name, sw_path = None, json_path = None,
thrift_port = None,
pcap_dump = False,
log_console = False,
verbose = False,
device_id = None,
enable_debugger = False,
**kwargs):
Switch.__init__(self, name, **kwargs)
assert(sw_path)
assert(json_path)
pathCheck(sw_path)
if not os.path.isfile(json_path):
error("Invalid JSON file.\n")
exit(1)
self.sw_path = sw_path
self.json_path = json_path
self.verbose = verbose
logfile = "/tmp/p4s.{}.log".format(self.name)
self.output = open(logfile, 'w')
self.thrift_port = thrift_port
if check_listening_on_port(self.thrift_port):
error('%s cannot bind port %d because it is bound by another process\n' % (self.name, self.grpc_port))
exit(1)
self.pcap_dump = pcap_dump
self.enable_debugger = enable_debugger
self.log_console = log_console
if device_id is not None:
self.device_id = device_id
P4Switch.device_id = max(P4Switch.device_id, device_id)
else:
self.device_id = P4Switch.device_id
P4Switch.device_id += 1
self.nanomsg = "ipc:///tmp/bm-{}-log.ipc".format(self.device_id)
@classmethod
def setup(cls):
pass
def check_switch_started(self, pid):
"""While the process is running (pid exists), we check if the Thrift
server has been started. If the Thrift server is ready, we assume that
the switch was started successfully. This is only reliable if the Thrift
server is started at the end of the init process"""
while True:
if not os.path.exists(os.path.join("/proc", str(pid))):
return False
if check_listening_on_port(self.thrift_port):
return True
sleep(0.5)
def start(self, controllers):
"Start up a new P4 switch"
info("Starting P4 switch {}.\n".format(self.name))
args = [self.sw_path]
for port, intf in self.intfs.items():
if not intf.IP():
args.extend(['-i', str(port) + "@" + intf.name])
if self.pcap_dump:
my_pcap_dir = get_exp_pcap_dir()
pcap_arg = "--pcap="+my_pcap_dir
args.append(pcap_arg)
if self.thrift_port:
args.extend(['--thrift-port', str(self.thrift_port)])
if self.nanomsg:
args.extend(['--nanolog', self.nanomsg])
args.extend(['--device-id', str(self.device_id)])
P4Switch.device_id += 1
args.append(self.json_path)
if self.enable_debugger:
args.append("--debugger")
if self.log_console:
args.append("--log-console")
logfile = "/tmp/p4s.{}.log".format(self.name)
info(' '.join(args) + "\n")
pid = None
with tempfile.NamedTemporaryFile() as f:
self.cmd(' '.join(args) + ' >' + logfile + ' 2>&1 & echo $! >> ' + f.name)
pid = int(f.read())
debug("P4 switch {} PID is {}.\n".format(self.name, pid))
if not self.check_switch_started(pid):
error("P4 switch {} did not start correctly.\n".format(self.name))
exit(1)
info("P4 switch {} has been started.\n".format(self.name))
def stop(self):
"Terminate P4 switch."
self.output.flush()
self.cmd('kill %' + self.sw_path)
self.cmd('wait')
self.deleteIntfs()
def attach(self, intf):
"Connect a data port"
assert(0)
def detach(self, intf):
"Disconnect a data port"
assert(0)
| false
| true
|
f7143e71d4927605031e54ebefb2763f34929e39
| 9,923
|
py
|
Python
|
old_code/YoutubeVideo.py
|
lukewest/Movie-Extra-Downloader
|
f5ba12a2f1a34fd4aa892eb0379342b131076a70
|
[
"MIT"
] | 23
|
2018-08-08T14:28:59.000Z
|
2022-03-22T15:45:10.000Z
|
old_code/YoutubeVideo.py
|
lukewest/Movie-Extra-Downloader
|
f5ba12a2f1a34fd4aa892eb0379342b131076a70
|
[
"MIT"
] | 13
|
2018-08-08T14:50:29.000Z
|
2022-01-27T09:05:18.000Z
|
old_code/YoutubeVideo.py
|
lukewest/Movie-Extra-Downloader
|
f5ba12a2f1a34fd4aa892eb0379342b131076a70
|
[
"MIT"
] | 9
|
2018-08-12T14:08:15.000Z
|
2021-09-18T01:08:04.000Z
|
from _socket import timeout
from urllib.error import URLError
from pytube import YouTube
from pytube.exceptions import RegexMatchError
from old_code.Stream import Stream
import time
import tools as tools
class YoutubeVideo(object):
# todo (2): subtitles
conn_errors = 0
def __init__(self, url, score=0, preferred_container='mp4', min_resolution=360,
max_resolution=1080, force_preferred_container=False):
########################################
self.url = None
self.source = None
self.delete = None
self.complete = None
self.is_play_trailer = None
self.title = None
self.thumbnail_url = None
self.channel = None
self.tags = list()
self.view_count = None
self.rating = None
self.adjusted_rating = None
self.resolution = None
self.quality_score = None
self.length = None
self.resolution_ratio = None
self.streams = list()
self.best_video_stream = None
self.best_audio_stream = None
self.best_combined_stream = None
########################################
self.url = url
self.delete = False
self.is_play_trailer = False
self.complete = True
tries = 0
while True:
try:
self.source = YouTube(url)
except KeyError as e:
if e.args[0] == 'url':
self.delete = True
self.is_play_trailer = True
# todo (1): add youtube-dl info grabber/downloader
# stuff I need: title, length, keywords?
return
elif e.args[0] == 'url_encoded_fmt_stream_map':
if tries > 4:
print('Failed to load youtube data, retrying. Reason: ' + str(e))
self.delete = True
return
print('Failed to load youtube data, retrying. Reason: ' + str(e))
time.sleep(2)
tries += 1
else:
raise
except RegexMatchError as e:
print('Pytube failed to load video info. Reason: ' + url + ': ' + str(e))
self.delete = True
return
except timeout as e:
if tries > 4:
print('Pytube failed to load video info. Reason: ' + str(e))
self.complete = False
if Stream.conn_errors > 2:
raise
else:
Stream.conn_errors += 1
return
print('Pytube failed to load video info. Reason: ' + str(e) + ', retrying...')
tries += 1
time.sleep(1)
except URLError as e:
if tries > 2:
print('Pytube failed to load video info. Reason: ' + str(e))
self.complete = False
if YoutubeVideo.conn_errors > 2:
raise
else:
YoutubeVideo.conn_errors += 1
return
print('Pytube failed to load video info. Reason: ' + str(e) + ', retrying...')
time.sleep(1)
tries += 1
else:
YoutubeVideo.conn_errors = 0
break
self.score = score
self.title = self.source.title
self.title = tools.get_clean_string(self.title)
self.rating = float(self.source.player_config_args['avg_rating'])
self.view_count = int(self.source.player_config_args['view_count'])
self.channel = self.source.player_config_args['author']
self.length = self.source.player_config_args['length_seconds']
self.thumbnail_url = self.source.thumbnail_url
try:
self.thumbnail_url = self.source.thumbnail_url
except KeyError:
self.thumbnail_url = None
try:
self.tags = self.source.player_config_args['keywords'].split(',')
except KeyError:
self.tags = ''
if self.view_count < 100:
self.view_count = 100
self.adjusted_rating = self.rating * (1 - 1 / ((self.view_count / 60) ** 0.5))
self.load_streams(min_resolution, max_resolution)
self.update_quality_score(preferred_container)
self.update_best_audio_stream(preferred_container, force_preferred_container)
self.update_best_video_stream(preferred_container, force_preferred_container)
self.update_best_combined_stream(preferred_container, force_preferred_container)
if self.is_play_trailer:
self.update_youtube_dl_info()
def update_youtube_dl_info(self):
pass
def update_quality_score(self, preferred_container='mp4'):
self.quality_score = 0
max_res = 0
for stream in self.streams:
if stream.type != 'video':
continue
quality_score = 0
pixel_bitrate = stream.bitrate_per_pixel
if stream.resolution == 1080:
pixel_bitrate /= 1
quality_score = 120
elif stream.resolution == 720:
pixel_bitrate /= 1.22
quality_score = 108
elif stream.resolution == 480:
pixel_bitrate /= 1.52
quality_score = 65
elif stream.resolution == 360:
pixel_bitrate /= 1.39
quality_score = 40
elif stream.resolution == 240:
pixel_bitrate /= 2.15
quality_score = 20
elif stream.resolution == 144:
pixel_bitrate /= 2.65
quality_score = 10
if preferred_container.lower() == stream.container:
quality_score *= 1.2
quality_score *= pixel_bitrate
if stream.resolution > max_res:
self.quality_score = quality_score
max_res = stream.resolution
self.resolution_ratio = stream.size[0] / stream.size[1]
elif stream.resolution == max_res:
if quality_score > self.quality_score:
self.quality_score = quality_score
def load_streams(self, min_resolution=360, max_resolution=1080):
self.streams = list()
self.complete = True
for source_stream in self.source.streams.fmt_streams:
stream = Stream(source_stream, int(self.length))
if stream.complete:
if stream.resolution is not None:
if stream.resolution > max_resolution or stream.resolution < min_resolution:
continue
self.streams.append(stream)
elif stream.retry:
self.complete = False
if Stream.conn_errors != 0:
self.complete = False
def update_best_video_stream(self, preferred_container='mp4', force_preferred_container=False):
highest_resolution = 0
best_stream = None
highest_pref_resolution = 0
best_pref_stream = None
for stream in self.streams:
if 'video' != stream.type:
continue
if stream.resolution > highest_resolution:
highest_resolution = stream.resolution
best_stream = stream
if stream.container.lower() == preferred_container.lower():
if stream.resolution > highest_pref_resolution:
highest_pref_resolution = stream.resolution
best_pref_stream = stream
if highest_resolution == highest_pref_resolution or force_preferred_container:
ret = best_pref_stream
else:
ret = best_stream
self.best_video_stream = ret
def update_best_audio_stream(self, preferred_container='mp4', force_preferred_container=False):
highest_bitrate = 0
best_stream = None
highest_pref_bitrate = 0
best_pref_stream = None
for stream in self.streams:
if 'audio' != stream.type:
continue
if stream.bitrate > highest_bitrate:
highest_bitrate = stream.bitrate
best_stream = stream
if stream.container.lower() == preferred_container.lower():
if stream.bitrate > highest_pref_bitrate:
highest_pref_bitrate = stream.bitrate
best_pref_stream = stream
if highest_bitrate <= highest_pref_bitrate * 1.35 or force_preferred_container:
ret = best_pref_stream
else:
ret = best_stream
self.best_audio_stream = ret
def update_best_combined_stream(self, preferred_container='mp4', force_preferred_container=False):
highest_resolution = 0
for stream in self.streams:
if 'combined' != stream.type:
continue
if stream.resolution > highest_resolution:
highest_resolution = stream.resolution
max_score = 0
selected_stream = None
for stream in self.streams:
if 'combined' != stream.type:
continue
score = 0
resolution = stream.resolution
if force_preferred_container:
if stream.container != preferred_container:
continue
if resolution == highest_resolution:
score += 10 ** 1
if stream.container == preferred_container:
score += 10 ** 0
if score > max_score:
max_score = score
selected_stream = stream
self.best_combined_stream = selected_stream
| 33.866894
| 102
| 0.550338
|
from _socket import timeout
from urllib.error import URLError
from pytube import YouTube
from pytube.exceptions import RegexMatchError
from old_code.Stream import Stream
import time
import tools as tools
class YoutubeVideo(object):
conn_errors = 0
def __init__(self, url, score=0, preferred_container='mp4', min_resolution=360,
max_resolution=1080, force_preferred_container=False):
except RegexMatchError as e:
print('Pytube failed to load video info. Reason: ' + url + ': ' + str(e))
self.delete = True
return
except timeout as e:
if tries > 4:
print('Pytube failed to load video info. Reason: ' + str(e))
self.complete = False
if Stream.conn_errors > 2:
raise
else:
Stream.conn_errors += 1
return
print('Pytube failed to load video info. Reason: ' + str(e) + ', retrying...')
tries += 1
time.sleep(1)
except URLError as e:
if tries > 2:
print('Pytube failed to load video info. Reason: ' + str(e))
self.complete = False
if YoutubeVideo.conn_errors > 2:
raise
else:
YoutubeVideo.conn_errors += 1
return
print('Pytube failed to load video info. Reason: ' + str(e) + ', retrying...')
time.sleep(1)
tries += 1
else:
YoutubeVideo.conn_errors = 0
break
self.score = score
self.title = self.source.title
self.title = tools.get_clean_string(self.title)
self.rating = float(self.source.player_config_args['avg_rating'])
self.view_count = int(self.source.player_config_args['view_count'])
self.channel = self.source.player_config_args['author']
self.length = self.source.player_config_args['length_seconds']
self.thumbnail_url = self.source.thumbnail_url
try:
self.thumbnail_url = self.source.thumbnail_url
except KeyError:
self.thumbnail_url = None
try:
self.tags = self.source.player_config_args['keywords'].split(',')
except KeyError:
self.tags = ''
if self.view_count < 100:
self.view_count = 100
self.adjusted_rating = self.rating * (1 - 1 / ((self.view_count / 60) ** 0.5))
self.load_streams(min_resolution, max_resolution)
self.update_quality_score(preferred_container)
self.update_best_audio_stream(preferred_container, force_preferred_container)
self.update_best_video_stream(preferred_container, force_preferred_container)
self.update_best_combined_stream(preferred_container, force_preferred_container)
if self.is_play_trailer:
self.update_youtube_dl_info()
def update_youtube_dl_info(self):
pass
def update_quality_score(self, preferred_container='mp4'):
self.quality_score = 0
max_res = 0
for stream in self.streams:
if stream.type != 'video':
continue
quality_score = 0
pixel_bitrate = stream.bitrate_per_pixel
if stream.resolution == 1080:
pixel_bitrate /= 1
quality_score = 120
elif stream.resolution == 720:
pixel_bitrate /= 1.22
quality_score = 108
elif stream.resolution == 480:
pixel_bitrate /= 1.52
quality_score = 65
elif stream.resolution == 360:
pixel_bitrate /= 1.39
quality_score = 40
elif stream.resolution == 240:
pixel_bitrate /= 2.15
quality_score = 20
elif stream.resolution == 144:
pixel_bitrate /= 2.65
quality_score = 10
if preferred_container.lower() == stream.container:
quality_score *= 1.2
quality_score *= pixel_bitrate
if stream.resolution > max_res:
self.quality_score = quality_score
max_res = stream.resolution
self.resolution_ratio = stream.size[0] / stream.size[1]
elif stream.resolution == max_res:
if quality_score > self.quality_score:
self.quality_score = quality_score
def load_streams(self, min_resolution=360, max_resolution=1080):
self.streams = list()
self.complete = True
for source_stream in self.source.streams.fmt_streams:
stream = Stream(source_stream, int(self.length))
if stream.complete:
if stream.resolution is not None:
if stream.resolution > max_resolution or stream.resolution < min_resolution:
continue
self.streams.append(stream)
elif stream.retry:
self.complete = False
if Stream.conn_errors != 0:
self.complete = False
def update_best_video_stream(self, preferred_container='mp4', force_preferred_container=False):
highest_resolution = 0
best_stream = None
highest_pref_resolution = 0
best_pref_stream = None
for stream in self.streams:
if 'video' != stream.type:
continue
if stream.resolution > highest_resolution:
highest_resolution = stream.resolution
best_stream = stream
if stream.container.lower() == preferred_container.lower():
if stream.resolution > highest_pref_resolution:
highest_pref_resolution = stream.resolution
best_pref_stream = stream
if highest_resolution == highest_pref_resolution or force_preferred_container:
ret = best_pref_stream
else:
ret = best_stream
self.best_video_stream = ret
def update_best_audio_stream(self, preferred_container='mp4', force_preferred_container=False):
highest_bitrate = 0
best_stream = None
highest_pref_bitrate = 0
best_pref_stream = None
for stream in self.streams:
if 'audio' != stream.type:
continue
if stream.bitrate > highest_bitrate:
highest_bitrate = stream.bitrate
best_stream = stream
if stream.container.lower() == preferred_container.lower():
if stream.bitrate > highest_pref_bitrate:
highest_pref_bitrate = stream.bitrate
best_pref_stream = stream
if highest_bitrate <= highest_pref_bitrate * 1.35 or force_preferred_container:
ret = best_pref_stream
else:
ret = best_stream
self.best_audio_stream = ret
def update_best_combined_stream(self, preferred_container='mp4', force_preferred_container=False):
highest_resolution = 0
for stream in self.streams:
if 'combined' != stream.type:
continue
if stream.resolution > highest_resolution:
highest_resolution = stream.resolution
max_score = 0
selected_stream = None
for stream in self.streams:
if 'combined' != stream.type:
continue
score = 0
resolution = stream.resolution
if force_preferred_container:
if stream.container != preferred_container:
continue
if resolution == highest_resolution:
score += 10 ** 1
if stream.container == preferred_container:
score += 10 ** 0
if score > max_score:
max_score = score
selected_stream = stream
self.best_combined_stream = selected_stream
| true
| true
|
f7143ea3ef7f254f2d3187ba1ded0afb09ea30ff
| 23,487
|
py
|
Python
|
tools/trainpar_deepqmri.py
|
fragrussu/qMRINet
|
418cbe22cefa2974d8a97b359324ff4c35865d22
|
[
"BSD-2-Clause"
] | 3
|
2020-10-22T23:37:36.000Z
|
2022-02-18T09:39:42.000Z
|
tools/trainpar_deepqmri.py
|
fragrussu/qMRINet
|
418cbe22cefa2974d8a97b359324ff4c35865d22
|
[
"BSD-2-Clause"
] | null | null | null |
tools/trainpar_deepqmri.py
|
fragrussu/qMRINet
|
418cbe22cefa2974d8a97b359324ff4c35865d22
|
[
"BSD-2-Clause"
] | null | null | null |
# Author: Francesco Grussu, University College London
# <f.grussu@ucl.ac.uk> <francegrussu@gmail.com>
#
# Code released under BSD Two-Clause license
#
# Copyright (c) 2020 University College London.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
### Load libraries
import argparse, os, sys
from numpy import matlib
import numpy as np
import torch
from torch import nn
from torch import Tensor
from torch.utils.data import DataLoader
from torch import autograd
import pickle as pk
from pathlib import Path as pt
sys.path.insert(0, os.path.dirname(pt(__file__).absolute()) )
import deepqmri
if __name__ == "__main__":
### Print help and parse arguments
parser = argparse.ArgumentParser(description='This program trains a qMRI-net for quantitative MRI parameter estimation. A qMRI-Nnet enables voxel-by-voxel estimation of microstructural properties from sets of MRI images aacquired by varying the MRI sequence parameters. Author: Francesco Grussu, University College London (<f.grussu@ucl.ac.uk><francegrussu@gmail.com>). Code released under BSD Two-Clause license. Copyright (c) 2020 University College London. All rights reserved.')
parser.add_argument('sig_train', help='path to a pickle binary file storing the input training MRI signals as a numpy matrix (rows: voxels; columns: measurements)')
parser.add_argument('param_train', help='path to a pickle binary file storing the training tissue parameter data as a numpy matrix (rows: voxels; columns: parameters)')
parser.add_argument('sig_val', help='path to a pickle binary file storing the input validation MRI signals as a numpy matrix (rows: voxels; columns: measurements)')
parser.add_argument('param_val', help='path to a pickle binary file storing the validation tissue parameters as a numpy matrix (rows: voxels; columns: parameters)')
parser.add_argument('mri_model', help='string indicating the MRI model to fit (choose among: "pr_hybriddwi" for prostate hybrid diffusion-relaxometry imaging; "br_sirsmdt" for brain saturation recovery diffusion tensor on spherical mean signals; "twocompdwite" for a two-compartment diffusion-t2 relaxation model without anisotropy). Tissue parameters will be: model "pr_hybriddwi", parameters vl, v s.t. ve=(1-vl)*v, Dl, De, Ds, t2l, t2e, t2s, s0, where l/e/stroma stands for lumen/epithelium/stroma; model "br_sirsmdt", parameters dpar, kperp s.t. dperp=kperp*dpar, t1, s0; model "twocompdwite", parameters v, Da, t2a, Db, Kb, t2b, s0')
parser.add_argument('mri_prot', help='path to text file storing the MRI protocol. For model "pr_hybriddwi" and "twocompdwite" it must contain a matrix where the 1st row stores b-values in s/mm^2, while 2nd row echo times in ms; for model "br_sirsmdt" it must contain a matrix where the 1st row stores preparation times (saturation-inversion delay) in ms, the 2nd row inversion times (inversion-excitation delay) in ms, the 3rd row b-values in s/mm^2. For a pure inversion recovery (i.e. no saturation pulse), use a very large number for the saturation-inversion delay (at least 5 times the maximum expected T1). Different entries should be separated by spaces')
parser.add_argument('out_base', help='base name of output directory (a string built with the network parameters will be added to the base). The output directory will contain the following output files: ** losstrain.bin, pickle binary storing the training loss as a numpy matrix (shape: epoch x batch); ** lossval.bin, pickle binary storing the validation loss as a numpy matrix (shape: epoch x 1); ** nnet_epoch0.bin, pickle binary storing the qMRI-net at initialisation; ** nnet_epoch0.pth, Pytorch binary storing the qMRI-net at initialisation; ** nnet_epoch<FINAL_EPOCH>.bin, pickle binary storing the qMRI-net at the final epoch; ** nnet_lossvalmin.bin, pickle binary storing the trained qMRI-net at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); * nnet_lossvalmin.pth, Pytorch binary storing the trained qMRI-net at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); ** nnet_lossvalmin_sigval.bin, prediction of the validation signals (shape: voxels x measurements) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); ** nnet_lossvalmin_tissueval.bin, prediction of tissue parameters from validation signals (shape: voxels x number_of_tissue_parameters) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); ** nnet_lossvalmin.info, text file reporting information regarding the epoch with the lowest validation loss; ** lossval_min.txt, miniimum validation loss; ** nnet_lossvalmin_sigtest.bin, prediction of the test signals (shape: voxels x measurements) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information), if those signals are provided; ** nnet_lossvalmin_tissuetest.bin, prediction of tissue parameters from test signals (shape: voxels x number_of_tissue_parameters) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information) if test signals are provided')
parser.add_argument('--nn', metavar='<list>', help='array storing the number of hidden neurons, separated by hyphens (example: 30-15-8). The first number (input neurons) must equal the number of measurements in the protocol (Nmeas); the last number (output neurons) must equal the number of parameters in the model (Npar, 9 for model "pr_hybriddwi", 4 for model "br_sirsmdt", 7 for model "twocompdwite"). Default: Nmeas-(Npar + (Nmeas minus Npar))/2-Npar, where Nmeas is the number of MRI measurements and Npar is the number of tissue parameters for the signal model to fit')
parser.add_argument('--pdrop', metavar='<value>', default='0.0', help='dropout probability in each layer of the neural network. Default: 0.0')
parser.add_argument('--noepoch', metavar='<value>', default='500', help='number of epochs used for training. Default: 500')
parser.add_argument('--lrate', metavar='<value>', default='0.001', help='learning rate. Default: 0.001')
parser.add_argument('--mbatch', metavar='<value>', help='number of voxels in each training mini-batch. Default: 1/80 of the total number of training voxels (minimum: 2 voxels)')
parser.add_argument('--seed', metavar='<value>', default='19102018', help='integer used as a seed for Numpy and PyTorch random number generators. Default: 19102018')
parser.add_argument('--nwork', metavar='<value>', default='0', help='number of workers for data loader. Default: 0')
parser.add_argument('--dtest', metavar='<file>', help='path to an option input pickle binary file storing test MRI signals as a numpy matrix (rows: voxels; columns: measurements)')
parser.add_argument('--parmin', metavar='<value>', help='list of lower bounds of tissue parameters. Entries corresponding to different parameters should be separated by a comma (for example: 0.5,0.2,250,0.5 for model br_sirsmdt). Tissue parameters are: model "pr_hybriddwi", parameters vl, v s.t. ve=(1-vl)*v, Dl, De, Ds, t2l, t2e, t2s, s0, where l/e/stroma stands for lumen/epithelium/stroma; model "br_sirsmdt", parameters dpar, kperp s.t. dperp=kperp*dpar, t1, s0; model "twocompdwite", parameters v, Da, t2a, Db, Kb, t2b, s0, where a and b indicate compartments a and b. If not specified, default tissue parameter ranges are used.')
parser.add_argument('--parmax', metavar='<value>', help='list of upper bounds of tissue parameters. Entries corresponding to different parameters should be separated by a comma (for example: 2.4,0.9,3000,5.0 for model br_sirsmdt). Tissue parameters are: model "pr_hybriddwi", parameters vl, v s.t. ve=(1-vl)*v, Dl, De, Ds, t2l, t2e, t2s, s0, where l/e/stroma stands for lumen/epithelium/stroma; model "br_sirsmdt", parameters dpar, kperp s.t. dperp=kperp*dpar, t1, s0; model "twocompdwite", parameters v, Da, t2a, Db, Kb, t2b, s0, where a and b indicate compartments a and b. If not specified, default tissue parameter ranges are used.')
args = parser.parse_args()
### Get some of the inputs
pdrop = float(args.pdrop)
noepoch = int(args.noepoch)
lrate = float(args.lrate)
seed = int(args.seed)
nwork = int(args.nwork)
mrimodel = args.mri_model
### Print some information
print('')
print('')
print('********************************************************************')
print(' TRAIN A qMRI-NET (qmripar CLASS) ')
print('********************************************************************')
print('')
print('** Input training MRI signals: {}'.format(args.sig_train))
print('** Input training tissue parameters: {}'.format(args.param_train))
print('** Input validation MRI signals: {}'.format(args.sig_val))
print('** Input validation tissue parameters: {}'.format(args.param_val))
if args.dtest is not None:
print('** Input test MRI signals: {}'.format(args.dtest))
### Load training MRI signals
fh = open(args.sig_train,'rb')
datatrain = pk.load(fh)
fh.close()
nvox_train = datatrain.shape[0]
nmeas_train = datatrain.shape[1]
### Load validation MRI signals
fh = open(args.sig_val,'rb')
dataval = pk.load(fh)
fh.close()
nvox_val = dataval.shape[0]
if dataval.shape[1]!=datatrain.shape[1]:
raise RuntimeError('the number of MRI measurements in the validation set differs from the training set!')
### Load test MRI signals
if args.dtest is not None:
fh = open(args.dtest,'rb')
datatest = np.float32(pk.load(fh))
fh.close()
if datatest.shape[1]!=datatrain.shape[1]:
raise RuntimeError('the number of MRI measurements in the test set differs from the training set!')
### Load training tissue parameters
fh = open(args.param_train,'rb')
prmtrain = pk.load(fh)
npar_train = prmtrain.shape[1]
fh.close()
if prmtrain.shape[0]!=datatrain.shape[0]:
raise RuntimeError('the number of voxels in the training parameters differs from the training MRI signals!')
### Load validation tissue parameters
fh = open(args.param_val,'rb')
prmval = pk.load(fh)
fh.close()
if prmval.shape[0]!=dataval.shape[0]:
raise RuntimeError('the number of voxels in the validation parameters differs from the validation MRI signals!')
if prmval.shape[1]!=prmtrain.shape[1]:
raise RuntimeError('the number of validation parameters differs from the number of training parameters!')
### Get number of mini-batches
if args.mbatch is None:
mbatch = int(float(datatrain.shape[0]) / 80.0) # Default: 1/80 of the total number of training voxels
else:
mbatch = int(args.mbatch)
if (mbatch>datatrain.shape[0]):
mbatch = datatrain.shape[0]
if(mbatch<2):
mbatch = int(2)
### Load MRI protocol
try:
mriprot = np.loadtxt(args.mri_prot)
except:
raise RuntimeError('the format of the MRI protocol is not understood!')
### Check that MRI model exists
if ( (mrimodel!='pr_hybriddwi') and (mrimodel!='br_sirsmdt') and (mrimodel!='twocompdwite') ):
raise RuntimeError('the chosen MRI model is not implemented. Sorry!')
if (mrimodel=='pr_hybriddwi'):
s0idx = 8
elif (mrimodel=='br_sirsmdt'):
s0idx = 3
elif (mrimodel=='twocompdwite'):
s0idx = 6
### Get specifics for hidden layers
if args.nn is None:
if (mrimodel=='pr_hybriddwi'):
npars = 9
elif (mrimodel=='br_sirsmdt'):
npars = 4
elif (mrimodel=='twocompdwite'):
npars = 7
else:
raise RuntimeError('the chosen MRI model is not implemented. Sorry!')
nhidden = np.array([int(nmeas_train) , int(float(npars)+0.5*( float(nmeas_train) - float(npars))) , int(npars)])
nhidden_str = '{}-{}-{}'.format( int(nmeas_train) , int(float(npars)+0.5*( float(nmeas_train) - float(npars))) , int(npars) )
else:
nhidden = (args.nn).split('-')
nhidden = np.array( list(map( int,nhidden )) )
nhidden_str = args.nn
### Get optional user-defined bounds for tissue parameters
if (args.parmin is not None) or (args.parmax is not None):
if (args.parmin is not None) and (args.parmax is None):
raise RuntimeError('you need to set both parmin and parmax options simultaneously')
if (args.parmax is not None) and (args.parmin is None):
raise RuntimeError('you need to set both parmin and parmax options simultaneously')
# Lower bound
pminbound = (args.parmin).split(',')
pminbound = np.array( list(map( float, pminbound )) )
# Upper bound
pmaxbound = (args.parmax).split(',')
pmaxbound = np.array( list(map( float, pmaxbound )) )
### Create output base name
out_base_dir = '{}_nhidden{}_pdrop{}_noepoch{}_lr{}_mbatch{}_seed{}'.format(args.out_base,nhidden_str,pdrop,noepoch,lrate,mbatch,seed)
if(os.path.isdir(out_base_dir)==False):
os.mkdir(out_base_dir)
### Print some more information
print('** Output directory: {}'.format(out_base_dir))
print('')
print('')
print('PARAMETERS')
print('')
print('** Hidden neurons: {}'.format(nhidden))
print('** Dropout probability: {}'.format(pdrop))
print('** Number of epochs: {}'.format(noepoch))
print('** Learning rate: {}'.format(lrate))
print('** Number of voxels in a mini-batch: {}'.format(mbatch))
print('** Seed: {}'.format(seed))
print('** Number of workers for data loader: {}'.format(nwork))
### Set random seeds
np.random.seed(seed) # Random seed for reproducibility: NumPy
torch.manual_seed(seed) # Random seed for reproducibility: PyTorch
### Normalise MRI signals and convert to single precision
max_val_train = np.transpose( matlib.repmat(np.max(datatrain,axis=1),nmeas_train,1) )
datatrain = np.float32( datatrain / max_val_train )
max_val_val = np.transpose( matlib.repmat(np.max(dataval,axis=1),nmeas_train,1) )
dataval = np.float32( dataval / max_val_val )
if args.dtest is not None:
max_val_test = np.transpose( matlib.repmat(np.max(datatest,axis=1),nmeas_train,1) )
datatest = np.float32( datatest / max_val_test )
prmtrain = np.float32(prmtrain)
prmval = np.float32(prmval)
### Create mini-batches on training data with data loader
loadertrain = DataLoader(np.concatenate((datatrain,prmtrain),axis=1), batch_size=mbatch, shuffle=True, num_workers=nwork)
### Allocate memory for losses
nobatch=0 # Count how many mini-batches of size mbatch we created
for signals in loadertrain:
nobatch = nobatch+1
losstrain = np.zeros((noepoch,nobatch)) + np.nan
lossval = np.zeros((noepoch,1)) + np.nan
### Instantiate the network and training objects, and save the intantiated network
nnet = deepqmri.qmripar(nhidden,pdrop,mrimodel,mriprot).cpu() # Instantiate neural network
if (args.parmin is not None) or (args.parmax is not None):
nnet.changelim(pminbound,pmaxbound) # Change tissue parameter ranges
print('** Tissue parameter names: {}'.format(nnet.param_name))
print('** Tissue parameter lower bounds: {}'.format(nnet.param_min))
print('** Tissue parameter upper bounds: {}'.format(nnet.param_max))
print('')
print('')
nnetloss = nn.MSELoss() # Loss: L2 norm (mean squared error, Gaussian noise)
nnetopt = torch.optim.Adam(nnet.parameters(), lr=lrate) # Network trained with ADAM optimiser
torch.save( nnet.state_dict(), os.path.join(out_base_dir,'epoch0_net.pth') ) # Save network at epoch 0 (i.e. at initialisation)
nnet_file = open(os.path.join(out_base_dir,'epoch0_net.bin'),'wb')
pk.dump(nnet,nnet_file,pk.HIGHEST_PROTOCOL)
nnet_file.close()
### Create normalisation tensors for model parameters
slope_norm_tr = np.ones((mbatch , npar_train))
offset_norm_tr = np.ones((mbatch , npar_train))
for pp in range(0,npar_train):
slope_norm_tr[:,pp] = 1.0 / (nnet.param_max[pp] - nnet.param_min[pp])
offset_norm_tr[:,pp] = (-1.0*nnet.param_min[pp]) / (nnet.param_max[pp] - nnet.param_min[pp])
slope_norm_tr = Tensor(np.float32(slope_norm_tr))
offset_norm_tr = Tensor(np.float32(offset_norm_tr))
slope_norm_val = np.ones((nvox_val , npar_train))
offset_norm_val = np.ones((nvox_val , npar_train))
for pp in range(0,npar_train):
slope_norm_val[:,pp] = 1.0 / (nnet.param_max[pp] - nnet.param_min[pp])
offset_norm_val[:,pp] = (-1.0*nnet.param_min[pp]) / (nnet.param_max[pp] - nnet.param_min[pp])
slope_norm_val = Tensor(np.float32(slope_norm_val))
offset_norm_val = Tensor(np.float32(offset_norm_val))
### Run training
# Loop over epochs
loss_val_prev = np.inf
for epoch in range(noepoch):
print(' EPOCH {}/{}'.format(epoch+1,noepoch))
print('')
# Loop over mini-batches for at a fixed epoch
minibatch_id = 0
for signals in loadertrain:
# Pass the mini-batch through the network and store the training loss
output = nnet( Tensor(signals[:,0:nmeas_train]) ) # Pass MRI measurements and estimate tissue parmaters
try:
lossmeas_train = nnetloss(Tensor(output)*slope_norm_tr + offset_norm_tr, Tensor(signals[:,nmeas_train:nmeas_train+npar_train])*slope_norm_tr + offset_norm_tr) # Training loss
except:
raise RuntimeError('The number of training voxels must be a multiple of the size of the mini-batch!')
# Back propagation
nnetopt.zero_grad() # Evaluate loss gradient with respect to network parameters at the output layer
lossmeas_train.backward() # Backpropage the loss gradient through previous layers
nnetopt.step() # Update network parameters
# Store loss for the current mini-batch of training
losstrain[epoch,minibatch_id] = Tensor.numpy(lossmeas_train.data)
# Update mini-batch counter
minibatch_id = minibatch_id + 1
# Run validation
nnet.eval() # Set network to evaluation mode (deactivates dropout)
tissueval_nnet = nnet( Tensor(dataval) ) # Output of full network (predicted tissue parameters)
dataval_nnet = nnet.getsignals( Tensor(tissueval_nnet) ) # Estimate MRI signals
dataval_nnet = dataval_nnet.detach().numpy()
max_val_val_out = np.transpose( matlib.repmat(np.max(dataval_nnet,axis=1),nmeas_train,1) )
lossmeas_val = nnetloss( Tensor(tissueval_nnet)*slope_norm_val + offset_norm_val , Tensor(prmval)*slope_norm_val + offset_norm_val ) # Validation loss
# Store validation loss
lossval[epoch,0] = Tensor.numpy(lossmeas_val.data)
# Save trained network at current epoch if validation loss has decreased
if(Tensor.numpy(lossmeas_val.data)<=loss_val_prev):
print(' ... validation loss has decreased. Saving net...')
# Save network
torch.save( nnet.state_dict(), os.path.join(out_base_dir,'lossvalmin_net.pth') )
nnet_file = open(os.path.join(out_base_dir,'lossvalmin_net.bin'),'wb')
pk.dump(nnet,nnet_file,pk.HIGHEST_PROTOCOL)
nnet_file.close()
# Save information on the epoch
nnet_text = open(os.path.join(out_base_dir,'lossvalmin.info'),'w')
nnet_text.write('Epoch {} (indices starting from 0)'.format(epoch));
nnet_text.close();
# Update value of best validation loss so far
loss_val_prev = Tensor.numpy(lossmeas_val.data)
# Save predicted validation tissue parameters
tissueval_nnet = tissueval_nnet.detach().numpy()
tissueval_nnet[:,s0idx] = (max_val_val[:,0]/max_val_val_out[:,0])*tissueval_nnet[:,s0idx] # Rescale s0 (any column of would work)
tissueval_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_tissueval.bin'),'wb')
pk.dump(tissueval_nnet,tissueval_nnet_file,pk.HIGHEST_PROTOCOL)
tissueval_nnet_file.close()
# Save predicted validation signals
dataval_nnet = (max_val_val/max_val_val_out)*dataval_nnet
dataval_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_sigval.bin'),'wb')
pk.dump(dataval_nnet,dataval_nnet_file,pk.HIGHEST_PROTOCOL)
dataval_nnet_file.close()
# Analyse test data if provided
if args.dtest is not None:
# Get neuronal activations as well as predicted test tissue parameters and test MRI signals
tissuetest_nnet = nnet( Tensor(datatest) ) # Output of network (estimated tissue parameters)
datatest_nnet = nnet.getsignals( Tensor(tissuetest_nnet) ) # Predicted MRI signals
datatest_nnet = datatest_nnet.detach().numpy()
max_val_test_out = np.transpose( matlib.repmat(np.max(datatest_nnet,axis=1),nmeas_train,1) )
# Save predicted test tissue parameters
tissuetest_nnet = tissuetest_nnet.detach().numpy()
tissuetest_nnet[:,s0idx] = (max_val_test[:,0]/max_val_test_out[:,0])*tissuetest_nnet[:,s0idx] # Rescale s0 (any column of max_val_test works)
tissuetest_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_tissuetest.bin'),'wb')
pk.dump(tissuetest_nnet,tissuetest_nnet_file,pk.HIGHEST_PROTOCOL)
tissuetest_nnet_file.close()
# Save predicted test signals
datatest_nnet = (max_val_test/max_val_test_out)*datatest_nnet # Rescale signal
datatest_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_sigtest.bin'),'wb')
pk.dump(datatest_nnet,datatest_nnet_file,pk.HIGHEST_PROTOCOL)
datatest_nnet_file.close()
# Set network back to training mode
nnet.train()
# Print some information
print('')
print(' TRAINING INFO:')
print(' Trainig loss: {:.12f}; validation loss: {:.12f}'.format(Tensor.numpy(lossmeas_train.data), Tensor.numpy(lossmeas_val.data)) )
print('')
# Save the final network
nnet.eval()
torch.save( nnet.state_dict(), os.path.join(out_base_dir,'epoch{}_net.pth'.format(noepoch)) )
nnet_file = open(os.path.join(out_base_dir,'epoch{}_net.bin'.format(noepoch)),'wb')
pk.dump(nnet,nnet_file,pk.HIGHEST_PROTOCOL)
nnet_file.close()
# Save the training and validation loss
losstrain_file = open(os.path.join(out_base_dir,'losstrain.bin'),'wb')
pk.dump(losstrain,losstrain_file,pk.HIGHEST_PROTOCOL)
losstrain_file.close()
lossval_file = open(os.path.join(out_base_dir,'lossval.bin'),'wb')
pk.dump(lossval,lossval_file,pk.HIGHEST_PROTOCOL)
lossval_file.close()
np.savetxt(os.path.join(out_base_dir,'lossval_min.txt'), [np.nanmin(lossval)], fmt='%.12f', delimiter=' ')
| 60.689922
| 2,100
| 0.732788
|
mpy import matlib
import numpy as np
import torch
from torch import nn
from torch import Tensor
from torch.utils.data import DataLoader
from torch import autograd
import pickle as pk
from pathlib import Path as pt
sys.path.insert(0, os.path.dirname(pt(__file__).absolute()) )
import deepqmri
if __name__ == "__main__":
ns a qMRI-net for quantitative MRI parameter estimation. A qMRI-Nnet enables voxel-by-voxel estimation of microstructural properties from sets of MRI images aacquired by varying the MRI sequence parameters. Author: Francesco Grussu, University College London (<f.grussu@ucl.ac.uk><francegrussu@gmail.com>). Code released under BSD Two-Clause license. Copyright (c) 2020 University College London. All rights reserved.')
parser.add_argument('sig_train', help='path to a pickle binary file storing the input training MRI signals as a numpy matrix (rows: voxels; columns: measurements)')
parser.add_argument('param_train', help='path to a pickle binary file storing the training tissue parameter data as a numpy matrix (rows: voxels; columns: parameters)')
parser.add_argument('sig_val', help='path to a pickle binary file storing the input validation MRI signals as a numpy matrix (rows: voxels; columns: measurements)')
parser.add_argument('param_val', help='path to a pickle binary file storing the validation tissue parameters as a numpy matrix (rows: voxels; columns: parameters)')
parser.add_argument('mri_model', help='string indicating the MRI model to fit (choose among: "pr_hybriddwi" for prostate hybrid diffusion-relaxometry imaging; "br_sirsmdt" for brain saturation recovery diffusion tensor on spherical mean signals; "twocompdwite" for a two-compartment diffusion-t2 relaxation model without anisotropy). Tissue parameters will be: model "pr_hybriddwi", parameters vl, v s.t. ve=(1-vl)*v, Dl, De, Ds, t2l, t2e, t2s, s0, where l/e/stroma stands for lumen/epithelium/stroma; model "br_sirsmdt", parameters dpar, kperp s.t. dperp=kperp*dpar, t1, s0; model "twocompdwite", parameters v, Da, t2a, Db, Kb, t2b, s0')
parser.add_argument('mri_prot', help='path to text file storing the MRI protocol. For model "pr_hybriddwi" and "twocompdwite" it must contain a matrix where the 1st row stores b-values in s/mm^2, while 2nd row echo times in ms; for model "br_sirsmdt" it must contain a matrix where the 1st row stores preparation times (saturation-inversion delay) in ms, the 2nd row inversion times (inversion-excitation delay) in ms, the 3rd row b-values in s/mm^2. For a pure inversion recovery (i.e. no saturation pulse), use a very large number for the saturation-inversion delay (at least 5 times the maximum expected T1). Different entries should be separated by spaces')
parser.add_argument('out_base', help='base name of output directory (a string built with the network parameters will be added to the base). The output directory will contain the following output files: ** losstrain.bin, pickle binary storing the training loss as a numpy matrix (shape: epoch x batch); ** lossval.bin, pickle binary storing the validation loss as a numpy matrix (shape: epoch x 1); ** nnet_epoch0.bin, pickle binary storing the qMRI-net at initialisation; ** nnet_epoch0.pth, Pytorch binary storing the qMRI-net at initialisation; ** nnet_epoch<FINAL_EPOCH>.bin, pickle binary storing the qMRI-net at the final epoch; ** nnet_lossvalmin.bin, pickle binary storing the trained qMRI-net at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); * nnet_lossvalmin.pth, Pytorch binary storing the trained qMRI-net at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); ** nnet_lossvalmin_sigval.bin, prediction of the validation signals (shape: voxels x measurements) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); ** nnet_lossvalmin_tissueval.bin, prediction of tissue parameters from validation signals (shape: voxels x number_of_tissue_parameters) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); ** nnet_lossvalmin.info, text file reporting information regarding the epoch with the lowest validation loss; ** lossval_min.txt, miniimum validation loss; ** nnet_lossvalmin_sigtest.bin, prediction of the test signals (shape: voxels x measurements) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information), if those signals are provided; ** nnet_lossvalmin_tissuetest.bin, prediction of tissue parameters from test signals (shape: voxels x number_of_tissue_parameters) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information) if test signals are provided')
parser.add_argument('--nn', metavar='<list>', help='array storing the number of hidden neurons, separated by hyphens (example: 30-15-8). The first number (input neurons) must equal the number of measurements in the protocol (Nmeas); the last number (output neurons) must equal the number of parameters in the model (Npar, 9 for model "pr_hybriddwi", 4 for model "br_sirsmdt", 7 for model "twocompdwite"). Default: Nmeas-(Npar + (Nmeas minus Npar))/2-Npar, where Nmeas is the number of MRI measurements and Npar is the number of tissue parameters for the signal model to fit')
parser.add_argument('--pdrop', metavar='<value>', default='0.0', help='dropout probability in each layer of the neural network. Default: 0.0')
parser.add_argument('--noepoch', metavar='<value>', default='500', help='number of epochs used for training. Default: 500')
parser.add_argument('--lrate', metavar='<value>', default='0.001', help='learning rate. Default: 0.001')
parser.add_argument('--mbatch', metavar='<value>', help='number of voxels in each training mini-batch. Default: 1/80 of the total number of training voxels (minimum: 2 voxels)')
parser.add_argument('--seed', metavar='<value>', default='19102018', help='integer used as a seed for Numpy and PyTorch random number generators. Default: 19102018')
parser.add_argument('--nwork', metavar='<value>', default='0', help='number of workers for data loader. Default: 0')
parser.add_argument('--dtest', metavar='<file>', help='path to an option input pickle binary file storing test MRI signals as a numpy matrix (rows: voxels; columns: measurements)')
parser.add_argument('--parmin', metavar='<value>', help='list of lower bounds of tissue parameters. Entries corresponding to different parameters should be separated by a comma (for example: 0.5,0.2,250,0.5 for model br_sirsmdt). Tissue parameters are: model "pr_hybriddwi", parameters vl, v s.t. ve=(1-vl)*v, Dl, De, Ds, t2l, t2e, t2s, s0, where l/e/stroma stands for lumen/epithelium/stroma; model "br_sirsmdt", parameters dpar, kperp s.t. dperp=kperp*dpar, t1, s0; model "twocompdwite", parameters v, Da, t2a, Db, Kb, t2b, s0, where a and b indicate compartments a and b. If not specified, default tissue parameter ranges are used.')
parser.add_argument('--parmax', metavar='<value>', help='list of upper bounds of tissue parameters. Entries corresponding to different parameters should be separated by a comma (for example: 2.4,0.9,3000,5.0 for model br_sirsmdt). Tissue parameters are: model "pr_hybriddwi", parameters vl, v s.t. ve=(1-vl)*v, Dl, De, Ds, t2l, t2e, t2s, s0, where l/e/stroma stands for lumen/epithelium/stroma; model "br_sirsmdt", parameters dpar, kperp s.t. dperp=kperp*dpar, t1, s0; model "twocompdwite", parameters v, Da, t2a, Db, Kb, t2b, s0, where a and b indicate compartments a and b. If not specified, default tissue parameter ranges are used.')
args = parser.parse_args()
oepoch)
lrate = float(args.lrate)
seed = int(args.seed)
nwork = int(args.nwork)
mrimodel = args.mri_model
**************************************************')
print(' TRAIN A qMRI-NET (qmripar CLASS) ')
print('********************************************************************')
print('')
print('** Input training MRI signals: {}'.format(args.sig_train))
print('** Input training tissue parameters: {}'.format(args.param_train))
print('** Input validation MRI signals: {}'.format(args.sig_val))
print('** Input validation tissue parameters: {}'.format(args.param_val))
if args.dtest is not None:
print('** Input test MRI signals: {}'.format(args.dtest))
h)
fh.close()
nvox_train = datatrain.shape[0]
nmeas_train = datatrain.shape[1]
lose()
nvox_val = dataval.shape[0]
if dataval.shape[1]!=datatrain.shape[1]:
raise RuntimeError('the number of MRI measurements in the validation set differs from the training set!')
test,'rb')
datatest = np.float32(pk.load(fh))
fh.close()
if datatest.shape[1]!=datatrain.shape[1]:
raise RuntimeError('the number of MRI measurements in the test set differs from the training set!')
ain = prmtrain.shape[1]
fh.close()
if prmtrain.shape[0]!=datatrain.shape[0]:
raise RuntimeError('the number of voxels in the training parameters differs from the training MRI signals!')
prmval.shape[0]!=dataval.shape[0]:
raise RuntimeError('the number of voxels in the validation parameters differs from the validation MRI signals!')
if prmval.shape[1]!=prmtrain.shape[1]:
raise RuntimeError('the number of validation parameters differs from the number of training parameters!')
shape[0]) / 80.0)
else:
mbatch = int(args.mbatch)
if (mbatch>datatrain.shape[0]):
mbatch = datatrain.shape[0]
if(mbatch<2):
mbatch = int(2)
prot)
except:
raise RuntimeError('the format of the MRI protocol is not understood!')
t') and (mrimodel!='twocompdwite') ):
raise RuntimeError('the chosen MRI model is not implemented. Sorry!')
if (mrimodel=='pr_hybriddwi'):
s0idx = 8
elif (mrimodel=='br_sirsmdt'):
s0idx = 3
elif (mrimodel=='twocompdwite'):
s0idx = 6
9
elif (mrimodel=='br_sirsmdt'):
npars = 4
elif (mrimodel=='twocompdwite'):
npars = 7
else:
raise RuntimeError('the chosen MRI model is not implemented. Sorry!')
nhidden = np.array([int(nmeas_train) , int(float(npars)+0.5*( float(nmeas_train) - float(npars))) , int(npars)])
nhidden_str = '{}-{}-{}'.format( int(nmeas_train) , int(float(npars)+0.5*( float(nmeas_train) - float(npars))) , int(npars) )
else:
nhidden = (args.nn).split('-')
nhidden = np.array( list(map( int,nhidden )) )
nhidden_str = args.nn
s None):
raise RuntimeError('you need to set both parmin and parmax options simultaneously')
if (args.parmax is not None) and (args.parmin is None):
raise RuntimeError('you need to set both parmin and parmax options simultaneously')
pminbound = (args.parmin).split(',')
pminbound = np.array( list(map( float, pminbound )) )
pmaxbound = (args.parmax).split(',')
pmaxbound = np.array( list(map( float, pmaxbound )) )
{}_mbatch{}_seed{}'.format(args.out_base,nhidden_str,pdrop,noepoch,lrate,mbatch,seed)
if(os.path.isdir(out_base_dir)==False):
os.mkdir(out_base_dir)
int('')
print('')
print('PARAMETERS')
print('')
print('** Hidden neurons: {}'.format(nhidden))
print('** Dropout probability: {}'.format(pdrop))
print('** Number of epochs: {}'.format(noepoch))
print('** Learning rate: {}'.format(lrate))
print('** Number of voxels in a mini-batch: {}'.format(mbatch))
print('** Seed: {}'.format(seed))
print('** Number of workers for data loader: {}'.format(nwork))
manual_seed(seed)
( datatrain / max_val_train )
max_val_val = np.transpose( matlib.repmat(np.max(dataval,axis=1),nmeas_train,1) )
dataval = np.float32( dataval / max_val_val )
if args.dtest is not None:
max_val_test = np.transpose( matlib.repmat(np.max(datatest,axis=1),nmeas_train,1) )
datatest = np.float32( datatest / max_val_test )
prmtrain = np.float32(prmtrain)
prmval = np.float32(prmval)
rkers=nwork)
obatch+1
losstrain = np.zeros((noepoch,nobatch)) + np.nan
lossval = np.zeros((noepoch,1)) + np.nan
pmaxbound)
print('** Tissue parameter names: {}'.format(nnet.param_name))
print('** Tissue parameter lower bounds: {}'.format(nnet.param_min))
print('** Tissue parameter upper bounds: {}'.format(nnet.param_max))
print('')
print('')
nnetloss = nn.MSELoss()
nnetopt = torch.optim.Adam(nnet.parameters(), lr=lrate)
torch.save( nnet.state_dict(), os.path.join(out_base_dir,'epoch0_net.pth') )
nnet_file = open(os.path.join(out_base_dir,'epoch0_net.bin'),'wb')
pk.dump(nnet,nnet_file,pk.HIGHEST_PROTOCOL)
nnet_file.close()
pp in range(0,npar_train):
slope_norm_tr[:,pp] = 1.0 / (nnet.param_max[pp] - nnet.param_min[pp])
offset_norm_tr[:,pp] = (-1.0*nnet.param_min[pp]) / (nnet.param_max[pp] - nnet.param_min[pp])
slope_norm_tr = Tensor(np.float32(slope_norm_tr))
offset_norm_tr = Tensor(np.float32(offset_norm_tr))
slope_norm_val = np.ones((nvox_val , npar_train))
offset_norm_val = np.ones((nvox_val , npar_train))
for pp in range(0,npar_train):
slope_norm_val[:,pp] = 1.0 / (nnet.param_max[pp] - nnet.param_min[pp])
offset_norm_val[:,pp] = (-1.0*nnet.param_min[pp]) / (nnet.param_max[pp] - nnet.param_min[pp])
slope_norm_val = Tensor(np.float32(slope_norm_val))
offset_norm_val = Tensor(np.float32(offset_norm_val))
or epoch in range(noepoch):
print(' EPOCH {}/{}'.format(epoch+1,noepoch))
print('')
minibatch_id = 0
for signals in loadertrain:
output = nnet( Tensor(signals[:,0:nmeas_train]) )
try:
lossmeas_train = nnetloss(Tensor(output)*slope_norm_tr + offset_norm_tr, Tensor(signals[:,nmeas_train:nmeas_train+npar_train])*slope_norm_tr + offset_norm_tr)
except:
raise RuntimeError('The number of training voxels must be a multiple of the size of the mini-batch!')
nnetopt.zero_grad()
lossmeas_train.backward()
nnetopt.step()
losstrain[epoch,minibatch_id] = Tensor.numpy(lossmeas_train.data)
minibatch_id = minibatch_id + 1
nnet.eval()
tissueval_nnet = nnet( Tensor(dataval) )
dataval_nnet = nnet.getsignals( Tensor(tissueval_nnet) )
dataval_nnet = dataval_nnet.detach().numpy()
max_val_val_out = np.transpose( matlib.repmat(np.max(dataval_nnet,axis=1),nmeas_train,1) )
lossmeas_val = nnetloss( Tensor(tissueval_nnet)*slope_norm_val + offset_norm_val , Tensor(prmval)*slope_norm_val + offset_norm_val )
lossval[epoch,0] = Tensor.numpy(lossmeas_val.data)
if(Tensor.numpy(lossmeas_val.data)<=loss_val_prev):
print(' ... validation loss has decreased. Saving net...')
torch.save( nnet.state_dict(), os.path.join(out_base_dir,'lossvalmin_net.pth') )
nnet_file = open(os.path.join(out_base_dir,'lossvalmin_net.bin'),'wb')
pk.dump(nnet,nnet_file,pk.HIGHEST_PROTOCOL)
nnet_file.close()
nnet_text = open(os.path.join(out_base_dir,'lossvalmin.info'),'w')
nnet_text.write('Epoch {} (indices starting from 0)'.format(epoch));
nnet_text.close();
loss_val_prev = Tensor.numpy(lossmeas_val.data)
tissueval_nnet = tissueval_nnet.detach().numpy()
tissueval_nnet[:,s0idx] = (max_val_val[:,0]/max_val_val_out[:,0])*tissueval_nnet[:,s0idx]
tissueval_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_tissueval.bin'),'wb')
pk.dump(tissueval_nnet,tissueval_nnet_file,pk.HIGHEST_PROTOCOL)
tissueval_nnet_file.close()
dataval_nnet = (max_val_val/max_val_val_out)*dataval_nnet
dataval_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_sigval.bin'),'wb')
pk.dump(dataval_nnet,dataval_nnet_file,pk.HIGHEST_PROTOCOL)
dataval_nnet_file.close()
if args.dtest is not None:
tissuetest_nnet = nnet( Tensor(datatest) )
datatest_nnet = nnet.getsignals( Tensor(tissuetest_nnet) )
datatest_nnet = datatest_nnet.detach().numpy()
max_val_test_out = np.transpose( matlib.repmat(np.max(datatest_nnet,axis=1),nmeas_train,1) )
tissuetest_nnet = tissuetest_nnet.detach().numpy()
tissuetest_nnet[:,s0idx] = (max_val_test[:,0]/max_val_test_out[:,0])*tissuetest_nnet[:,s0idx]
tissuetest_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_tissuetest.bin'),'wb')
pk.dump(tissuetest_nnet,tissuetest_nnet_file,pk.HIGHEST_PROTOCOL)
tissuetest_nnet_file.close()
datatest_nnet = (max_val_test/max_val_test_out)*datatest_nnet
datatest_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_sigtest.bin'),'wb')
pk.dump(datatest_nnet,datatest_nnet_file,pk.HIGHEST_PROTOCOL)
datatest_nnet_file.close()
nnet.train()
print('')
print(' TRAINING INFO:')
print(' Trainig loss: {:.12f}; validation loss: {:.12f}'.format(Tensor.numpy(lossmeas_train.data), Tensor.numpy(lossmeas_val.data)) )
print('')
nnet.eval()
torch.save( nnet.state_dict(), os.path.join(out_base_dir,'epoch{}_net.pth'.format(noepoch)) )
nnet_file = open(os.path.join(out_base_dir,'epoch{}_net.bin'.format(noepoch)),'wb')
pk.dump(nnet,nnet_file,pk.HIGHEST_PROTOCOL)
nnet_file.close()
losstrain_file = open(os.path.join(out_base_dir,'losstrain.bin'),'wb')
pk.dump(losstrain,losstrain_file,pk.HIGHEST_PROTOCOL)
losstrain_file.close()
lossval_file = open(os.path.join(out_base_dir,'lossval.bin'),'wb')
pk.dump(lossval,lossval_file,pk.HIGHEST_PROTOCOL)
lossval_file.close()
np.savetxt(os.path.join(out_base_dir,'lossval_min.txt'), [np.nanmin(lossval)], fmt='%.12f', delimiter=' ')
| true
| true
|
f71442b16a12f46a840756d2038ff554248234be
| 9,869
|
py
|
Python
|
ansible/lib/ansible/modules/core/cloud/google/gce_pd.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/modules/core/cloud/google/gce_pd.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/modules/core/cloud/google/gce_pd.py
|
kiv-box/redis
|
966a0c3f0a51282cd173b42a6e249d23f4e89dec
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_pd
version_added: "1.4"
short_description: utilize GCE persistent disk resources
description:
- This module can create and destroy unformatted GCE persistent disks
U(https://developers.google.com/compute/docs/disks#persistentdisks).
It also supports attaching and detaching disks from running instances.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
detach_only:
description:
- do not destroy the disk, merely detach it from an instance
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
instance_name:
description:
- instance name if you wish to attach or detach the disk
required: false
default: null
aliases: []
mode:
description:
- GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
required: false
default: "READ_ONLY"
choices: ["READ_WRITE", "READ_ONLY"]
aliases: []
name:
description:
- name of the disk
required: true
default: null
aliases: []
size_gb:
description:
- whole integer size of disk (in GB) to create, default is 10 GB
required: false
default: 10
aliases: []
image:
description:
- the source image to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
snapshot:
description:
- the source snapshot to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
zone:
description:
- zone in which to create the disk
required: false
default: "us-central1-b"
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
required: false
default: null
aliases: []
credentials_file:
version_added: "2.1.0"
description:
- path to the JSON file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
disk_type:
version_added: "1.9"
description:
- type of disk provisioned
required: false
default: "pd-standard"
choices: ["pd-standard", "pd-ssd"]
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
'''
EXAMPLES = '''
# Simple attachment action to an existing instance
- local_action:
module: gce_pd
instance_name: notlocalhost
size_gb: 5
name: pd
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, ResourceInUseError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def main():
module = AnsibleModule(
argument_spec = dict(
detach_only = dict(type='bool'),
instance_name = dict(),
mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
name = dict(required=True),
size_gb = dict(default=10),
disk_type = dict(default='pd-standard'),
image = dict(),
snapshot = dict(),
state = dict(default='present'),
zone = dict(default='us-central1-b'),
service_account_email = dict(),
pem_file = dict(),
credentials_file = dict(),
project_id = dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) is required for this module')
gce = gce_connect(module)
detach_only = module.params.get('detach_only')
instance_name = module.params.get('instance_name')
mode = module.params.get('mode')
name = module.params.get('name')
size_gb = module.params.get('size_gb')
disk_type = module.params.get('disk_type')
image = module.params.get('image')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
zone = module.params.get('zone')
if detach_only and not instance_name:
module.fail_json(
msg='Must specify an instance name when detaching a disk',
changed=False)
disk = inst = None
changed = is_attached = False
json_output = { 'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type }
if detach_only:
json_output['detach_only'] = True
json_output['detached_from_instance'] = instance_name
if instance_name:
# user wants to attach/detach from an existing instance
try:
inst = gce.ex_get_node(instance_name, zone)
# is the disk attached?
for d in inst.extra['disks']:
if d['deviceName'] == name:
is_attached = True
json_output['attached_mode'] = d['mode']
json_output['attached_to_instance'] = inst.name
except:
pass
# find disk if it already exists
try:
disk = gce.ex_get_volume(name)
json_output['size_gb'] = int(disk.size)
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants a disk to exist. If "instance_name" is supplied the user
# also wants it attached
if state in ['active', 'present']:
if not size_gb:
module.fail_json(msg="Must supply a size_gb", changed=False)
try:
size_gb = int(round(float(size_gb)))
if size_gb < 1:
raise Exception
except:
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
changed=False)
if instance_name and inst is None:
module.fail_json(msg='Instance %s does not exist in zone %s' % (
instance_name, zone), changed=False)
if not disk:
if image is not None and snapshot is not None:
module.fail_json(
msg='Cannot give both image (%s) and snapshot (%s)' % (
image, snapshot), changed=False)
lc_image = None
lc_snapshot = None
if image is not None:
lc_image = gce.ex_get_image(image)
elif snapshot is not None:
lc_snapshot = gce.ex_get_snapshot(snapshot)
try:
disk = gce.create_volume(
size_gb, name, location=zone, image=lc_image,
snapshot=lc_snapshot, ex_disk_type=disk_type)
except ResourceExistsError:
pass
except QuotaExceededError:
module.fail_json(msg='Requested disk size exceeds quota',
changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['size_gb'] = size_gb
if image is not None:
json_output['image'] = image
if snapshot is not None:
json_output['snapshot'] = snapshot
changed = True
if inst and not is_attached:
try:
gce.attach_volume(inst, disk, device=name, ex_mode=mode)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['attached_to_instance'] = inst.name
json_output['attached_mode'] = mode
changed = True
# user wants to delete a disk (or perhaps just detach it).
if state in ['absent', 'deleted'] and disk:
if inst and is_attached:
try:
gce.detach_volume(disk, ex_node=inst)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
if not detach_only:
try:
gce.destroy_volume(disk)
except ResourceInUseError as e:
module.fail_json(msg=str(e.value), changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
json_output['changed'] = changed
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
| 32.251634
| 95
| 0.616375
|
DOCUMENTATION = '''
---
module: gce_pd
version_added: "1.4"
short_description: utilize GCE persistent disk resources
description:
- This module can create and destroy unformatted GCE persistent disks
U(https://developers.google.com/compute/docs/disks#persistentdisks).
It also supports attaching and detaching disks from running instances.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
detach_only:
description:
- do not destroy the disk, merely detach it from an instance
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
instance_name:
description:
- instance name if you wish to attach or detach the disk
required: false
default: null
aliases: []
mode:
description:
- GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
required: false
default: "READ_ONLY"
choices: ["READ_WRITE", "READ_ONLY"]
aliases: []
name:
description:
- name of the disk
required: true
default: null
aliases: []
size_gb:
description:
- whole integer size of disk (in GB) to create, default is 10 GB
required: false
default: 10
aliases: []
image:
description:
- the source image to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
snapshot:
description:
- the source snapshot to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
zone:
description:
- zone in which to create the disk
required: false
default: "us-central1-b"
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
required: false
default: null
aliases: []
credentials_file:
version_added: "2.1.0"
description:
- path to the JSON file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
disk_type:
version_added: "1.9"
description:
- type of disk provisioned
required: false
default: "pd-standard"
choices: ["pd-standard", "pd-ssd"]
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
'''
EXAMPLES = '''
# Simple attachment action to an existing instance
- local_action:
module: gce_pd
instance_name: notlocalhost
size_gb: 5
name: pd
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, ResourceInUseError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def main():
module = AnsibleModule(
argument_spec = dict(
detach_only = dict(type='bool'),
instance_name = dict(),
mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
name = dict(required=True),
size_gb = dict(default=10),
disk_type = dict(default='pd-standard'),
image = dict(),
snapshot = dict(),
state = dict(default='present'),
zone = dict(default='us-central1-b'),
service_account_email = dict(),
pem_file = dict(),
credentials_file = dict(),
project_id = dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) is required for this module')
gce = gce_connect(module)
detach_only = module.params.get('detach_only')
instance_name = module.params.get('instance_name')
mode = module.params.get('mode')
name = module.params.get('name')
size_gb = module.params.get('size_gb')
disk_type = module.params.get('disk_type')
image = module.params.get('image')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
zone = module.params.get('zone')
if detach_only and not instance_name:
module.fail_json(
msg='Must specify an instance name when detaching a disk',
changed=False)
disk = inst = None
changed = is_attached = False
json_output = { 'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type }
if detach_only:
json_output['detach_only'] = True
json_output['detached_from_instance'] = instance_name
if instance_name:
try:
inst = gce.ex_get_node(instance_name, zone)
for d in inst.extra['disks']:
if d['deviceName'] == name:
is_attached = True
json_output['attached_mode'] = d['mode']
json_output['attached_to_instance'] = inst.name
except:
pass
try:
disk = gce.ex_get_volume(name)
json_output['size_gb'] = int(disk.size)
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if state in ['active', 'present']:
if not size_gb:
module.fail_json(msg="Must supply a size_gb", changed=False)
try:
size_gb = int(round(float(size_gb)))
if size_gb < 1:
raise Exception
except:
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
changed=False)
if instance_name and inst is None:
module.fail_json(msg='Instance %s does not exist in zone %s' % (
instance_name, zone), changed=False)
if not disk:
if image is not None and snapshot is not None:
module.fail_json(
msg='Cannot give both image (%s) and snapshot (%s)' % (
image, snapshot), changed=False)
lc_image = None
lc_snapshot = None
if image is not None:
lc_image = gce.ex_get_image(image)
elif snapshot is not None:
lc_snapshot = gce.ex_get_snapshot(snapshot)
try:
disk = gce.create_volume(
size_gb, name, location=zone, image=lc_image,
snapshot=lc_snapshot, ex_disk_type=disk_type)
except ResourceExistsError:
pass
except QuotaExceededError:
module.fail_json(msg='Requested disk size exceeds quota',
changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['size_gb'] = size_gb
if image is not None:
json_output['image'] = image
if snapshot is not None:
json_output['snapshot'] = snapshot
changed = True
if inst and not is_attached:
try:
gce.attach_volume(inst, disk, device=name, ex_mode=mode)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['attached_to_instance'] = inst.name
json_output['attached_mode'] = mode
changed = True
if state in ['absent', 'deleted'] and disk:
if inst and is_attached:
try:
gce.detach_volume(disk, ex_node=inst)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
if not detach_only:
try:
gce.destroy_volume(disk)
except ResourceInUseError as e:
module.fail_json(msg=str(e.value), changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
json_output['changed'] = changed
module.exit_json(**json_output)
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
| true
| true
|
f71442ba66bcddc2b3b52f67bbd9823def89ad03
| 476
|
py
|
Python
|
Program's_Contributed_By_Contributors/AI-Summer-Course/py-master/Basics/Exercise/13_read_write_files/exercise_2_stocks.py
|
SDGraph/Hacktoberfest2k21
|
8f8aead15afa10ea12e1b23ece515a10a882de28
|
[
"MIT"
] | null | null | null |
Program's_Contributed_By_Contributors/AI-Summer-Course/py-master/Basics/Exercise/13_read_write_files/exercise_2_stocks.py
|
SDGraph/Hacktoberfest2k21
|
8f8aead15afa10ea12e1b23ece515a10a882de28
|
[
"MIT"
] | null | null | null |
Program's_Contributed_By_Contributors/AI-Summer-Course/py-master/Basics/Exercise/13_read_write_files/exercise_2_stocks.py
|
SDGraph/Hacktoberfest2k21
|
8f8aead15afa10ea12e1b23ece515a10a882de28
|
[
"MIT"
] | null | null | null |
with open("stocks.csv", "r") as f, open("output.csv", "w") as out:
out.write("Company Name,PE Ratio, PB Ratio\n")
next(f) # This will skip first line in the file which is a header
for line in f:
tokens = line.split(",")
stock = tokens[0]
price = float(tokens[1])
eps = float(tokens[2])
book = float(tokens[3])
pe = round(price / eps, 2)
pb = round(price / book, 2)
out.write(f"{stock},{pe},{pb}\n")
| 36.615385
| 70
| 0.546218
|
with open("stocks.csv", "r") as f, open("output.csv", "w") as out:
out.write("Company Name,PE Ratio, PB Ratio\n")
next(f)
for line in f:
tokens = line.split(",")
stock = tokens[0]
price = float(tokens[1])
eps = float(tokens[2])
book = float(tokens[3])
pe = round(price / eps, 2)
pb = round(price / book, 2)
out.write(f"{stock},{pe},{pb}\n")
| true
| true
|
f71442ee9da45672024ed542f4f081204ce1ee75
| 4,356
|
py
|
Python
|
redash/utils/parameterized_query.py
|
quanpower/redash
|
2a37cb31d95703c239e1edf3d3d9e0f9c2eaf857
|
[
"BSD-2-Clause"
] | 1
|
2021-01-20T18:57:12.000Z
|
2021-01-20T18:57:12.000Z
|
redash/utils/parameterized_query.py
|
quanpower/redash
|
2a37cb31d95703c239e1edf3d3d9e0f9c2eaf857
|
[
"BSD-2-Clause"
] | null | null | null |
redash/utils/parameterized_query.py
|
quanpower/redash
|
2a37cb31d95703c239e1edf3d3d9e0f9c2eaf857
|
[
"BSD-2-Clause"
] | null | null | null |
import pystache
from functools import partial
from flask_login import current_user
from redash.authentication.org_resolving import current_org
from numbers import Number
from redash import models
from redash.utils import mustache_render, json_loads
from redash.permissions import require_access, view_only
from funcy import distinct
from dateutil.parser import parse
def _pluck_name_and_value(default_column, row):
row = {k.lower(): v for k, v in row.items()}
name_column = "name" if "name" in row.keys() else default_column.lower()
value_column = "value" if "value" in row.keys() else default_column.lower()
return {"name": row[name_column], "value": row[value_column]}
def _load_result(query_id):
query = models.Query.get_by_id_and_org(query_id, current_org)
require_access(query.data_source.groups, current_user, view_only)
query_result = models.QueryResult.get_by_id_and_org(query.latest_query_data_id, current_org)
return json_loads(query_result.data)
def dropdown_values(query_id):
data = _load_result(query_id)
first_column = data["columns"][0]["name"]
pluck = partial(_pluck_name_and_value, first_column)
return map(pluck, data["rows"])
def _collect_key_names(nodes):
keys = []
for node in nodes._parse_tree:
if isinstance(node, pystache.parser._EscapeNode):
keys.append(node.key)
elif isinstance(node, pystache.parser._SectionNode):
keys.append(node.key)
keys.extend(_collect_key_names(node.parsed))
return distinct(keys)
def _collect_query_parameters(query):
nodes = pystache.parse(query)
keys = _collect_key_names(nodes)
return keys
def _parameter_names(parameter_values):
names = []
for key, value in parameter_values.iteritems():
if isinstance(value, dict):
for inner_key in value.keys():
names.append(u'{}.{}'.format(key, inner_key))
else:
names.append(key)
return names
def _is_date(string):
try:
parse(string)
return True
except ValueError:
return False
def _is_date_range(obj):
try:
return _is_date(obj["start"]) and _is_date(obj["end"])
except (KeyError, TypeError):
return False
class ParameterizedQuery(object):
def __init__(self, template, schema=None):
self.schema = schema or []
self.template = template
self.query = template
self.parameters = {}
def apply(self, parameters):
invalid_parameter_names = [key for (key, value) in parameters.iteritems() if not self._valid(key, value)]
if invalid_parameter_names:
raise InvalidParameterError(invalid_parameter_names)
else:
self.parameters.update(parameters)
self.query = mustache_render(self.template, self.parameters)
return self
def _valid(self, name, value):
if not self.schema:
return True
definition = next((definition for definition in self.schema if definition["name"] == name), None)
if not definition:
return False
validators = {
"text": lambda value: isinstance(value, basestring),
"number": lambda value: isinstance(value, Number),
"enum": lambda value: value in definition["enumOptions"],
"query": lambda value: value in [v["value"] for v in dropdown_values(definition["queryId"])],
"date": _is_date,
"datetime-local": _is_date,
"datetime-with-seconds": _is_date,
"date-range": _is_date_range,
"datetime-range": _is_date_range,
"datetime-range-with-seconds": _is_date_range,
}
validate = validators.get(definition["type"], lambda x: False)
return validate(value)
@property
def missing_params(self):
query_parameters = set(_collect_query_parameters(self.template))
return set(query_parameters) - set(_parameter_names(self.parameters))
@property
def text(self):
return self.query
class InvalidParameterError(Exception):
def __init__(self, parameters):
message = u"The following parameter values are incompatible with their definitions: {}".format(", ".join(parameters))
super(InvalidParameterError, self).__init__(message)
| 31.565217
| 125
| 0.672635
|
import pystache
from functools import partial
from flask_login import current_user
from redash.authentication.org_resolving import current_org
from numbers import Number
from redash import models
from redash.utils import mustache_render, json_loads
from redash.permissions import require_access, view_only
from funcy import distinct
from dateutil.parser import parse
def _pluck_name_and_value(default_column, row):
row = {k.lower(): v for k, v in row.items()}
name_column = "name" if "name" in row.keys() else default_column.lower()
value_column = "value" if "value" in row.keys() else default_column.lower()
return {"name": row[name_column], "value": row[value_column]}
def _load_result(query_id):
query = models.Query.get_by_id_and_org(query_id, current_org)
require_access(query.data_source.groups, current_user, view_only)
query_result = models.QueryResult.get_by_id_and_org(query.latest_query_data_id, current_org)
return json_loads(query_result.data)
def dropdown_values(query_id):
data = _load_result(query_id)
first_column = data["columns"][0]["name"]
pluck = partial(_pluck_name_and_value, first_column)
return map(pluck, data["rows"])
def _collect_key_names(nodes):
keys = []
for node in nodes._parse_tree:
if isinstance(node, pystache.parser._EscapeNode):
keys.append(node.key)
elif isinstance(node, pystache.parser._SectionNode):
keys.append(node.key)
keys.extend(_collect_key_names(node.parsed))
return distinct(keys)
def _collect_query_parameters(query):
nodes = pystache.parse(query)
keys = _collect_key_names(nodes)
return keys
def _parameter_names(parameter_values):
names = []
for key, value in parameter_values.iteritems():
if isinstance(value, dict):
for inner_key in value.keys():
names.append(u'{}.{}'.format(key, inner_key))
else:
names.append(key)
return names
def _is_date(string):
try:
parse(string)
return True
except ValueError:
return False
def _is_date_range(obj):
try:
return _is_date(obj["start"]) and _is_date(obj["end"])
except (KeyError, TypeError):
return False
class ParameterizedQuery(object):
def __init__(self, template, schema=None):
self.schema = schema or []
self.template = template
self.query = template
self.parameters = {}
def apply(self, parameters):
invalid_parameter_names = [key for (key, value) in parameters.iteritems() if not self._valid(key, value)]
if invalid_parameter_names:
raise InvalidParameterError(invalid_parameter_names)
else:
self.parameters.update(parameters)
self.query = mustache_render(self.template, self.parameters)
return self
def _valid(self, name, value):
if not self.schema:
return True
definition = next((definition for definition in self.schema if definition["name"] == name), None)
if not definition:
return False
validators = {
"text": lambda value: isinstance(value, basestring),
"number": lambda value: isinstance(value, Number),
"enum": lambda value: value in definition["enumOptions"],
"query": lambda value: value in [v["value"] for v in dropdown_values(definition["queryId"])],
"date": _is_date,
"datetime-local": _is_date,
"datetime-with-seconds": _is_date,
"date-range": _is_date_range,
"datetime-range": _is_date_range,
"datetime-range-with-seconds": _is_date_range,
}
validate = validators.get(definition["type"], lambda x: False)
return validate(value)
@property
def missing_params(self):
query_parameters = set(_collect_query_parameters(self.template))
return set(query_parameters) - set(_parameter_names(self.parameters))
@property
def text(self):
return self.query
class InvalidParameterError(Exception):
def __init__(self, parameters):
message = u"The following parameter values are incompatible with their definitions: {}".format(", ".join(parameters))
super(InvalidParameterError, self).__init__(message)
| true
| true
|
f7144329ccafee0fa6d6b0aae0ee85c8503eceb0
| 13,247
|
py
|
Python
|
codigo/process_datos_abiertos.py
|
Morisset/Mexico-datos
|
29d5ed1079732d5d809bc14eb5d3438662508728
|
[
"MIT"
] | null | null | null |
codigo/process_datos_abiertos.py
|
Morisset/Mexico-datos
|
29d5ed1079732d5d809bc14eb5d3438662508728
|
[
"MIT"
] | null | null | null |
codigo/process_datos_abiertos.py
|
Morisset/Mexico-datos
|
29d5ed1079732d5d809bc14eb5d3438662508728
|
[
"MIT"
] | null | null | null |
import os
import csv
import pandas as pd
import geopandas as gpd
from datetime import datetime, timedelta
## PROCESSING FUNCTIONS ##
def confirmados_diarios_por_estado(datos, entidades):
"""
Calcula el número total de casos confirmados por fecha y por estado.
Input:
- datos: datos abiertos de COVID-19 en México disponibles en [1].
Output:
- serie: Serie de tiempo de nuevos casos confirmados por dia para cada
entidad federativa en México.
[1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127
"""
series = (datos[datos['RESULTADO'] == 1]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def negativos_diarios_por_estado(datos, entidades):
"""
Calcula el número total de casos negativos por fecha y por estado.
Input:
- datos: datos abiertos de COVID-19 en México disponibles en [1].
Output:
- series: Serie de tiempo de nuevas pruebas negativas por dia para cada
entidad federativa en México.
[1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127
"""
series = (datos[datos['RESULTADO'] == 2]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def pruebas_pendientes_diarias_por_estado(datos, entidades):
"""
Calcula el número de pruebas pendientes por fecha y por estado.
Input:
- datos: datos abiertos de COVID-19 en México disponibles en [1].
Output:
- series: Serie de tiempo de nuevas pruebas pendientes por dia para cada
entidad federativa en México.
[1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127
"""
series = (datos[datos['RESULTADO'] == 3]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def pruebas_totales_diarias_por_estado(datos, entidades):
"""
Calcula el número total de pruebas realizadas por fecha y por estado.
Input:
- datos: datos abiertos de COVID-19 en México disponibles en [1].
Output:
- series: Serie de tiempo de nuevas pruebas totales por dia para cada
entidad federativa en México.
[1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127
"""
series = (datos
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def defunciones_diarias_por_estado(datos, entidades):
"""
Calcula el número de defunciones por fecha y por estado.
Input:
- datos: datos abiertos de COVID-19 en México disponibles en [1].
Output:
- series: Serie de tiempo de nuevas muertes por dia para cada entidad
federativa en México.
[1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127
"""
idx = (datos['RESULTADO'] == 1) & (datos['FECHA_DEF'] != '9999-99-99')
series = (datos[idx]
.groupby(['ENTIDAD_UM', 'FECHA_DEF'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def hospitalizados_diarios_por_estado(datos, entidades):
"""
Calcula el número de pacientes hopitalizados por fecha y por estado.
Input:
- datos: datos abiertos de COVID-19 en México disponibles en [1].
Output:
- series: Serie de tiempo de nuevos hospitalizados por dia para cada entidad
federativa en México.
[1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127
"""
# esta serie incluye UCI + noUCI
idx = (datos['RESULTADO'] == 1) & (datos['TIPO_PACIENTE'] == 2)
series = (datos[idx]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def ambulatorios_diarios_por_estado(datos, entidades):
"""
Calcula el número de pacientes ambulatorios por fecha y por estado.
Input:
- datos: datos abiertos de COVID-19 en México disponibles en [1].
Output:
- series: Serie de tiempo de nuevos pacientes infectados ambulatorios por
dia para cada entidad federativa en México.
[1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127
"""
idx = (datos['RESULTADO'] == 1) & (datos['TIPO_PACIENTE'] == 1)
series = (datos[idx]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def uci_diarios_por_estado(datos, entidades):
"""
Calcula el número de pacientes ingresados a una UCI por fecha y por estado.
Input:
- datos: datos abiertos de COVID-19 en México disponibles en [1].
Output:
- series: Serie de tiempo de nuevos pacientes en UCI por dia para cada
entidad federativa en México.
[1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127
"""
idx = (datos['RESULTADO'] == 1) & (datos['UCI'] == 1)
series = (datos[idx]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
## HELPER FUNCTIONS ##
def get_formato_series(series, entidades):
"""
Convierte groupby a formato tidy (columnas son estados e indice es la fecha).
Input:
- series:
DataFrame en formato groupby agrupada for una columna que corresponde a
entidades federativas y otra columna que corresponde a una fecha.
- entidades:
diccionario de clave_de_entidad => nombre_de_entidad.
Output:
- series:
DataFrame en formato tidy, con los nombres de los estados como columnas
(la primer columna es el total nacional) y con la fecha como indice.
"""
diccionario_cambio_edos = {'Ciudad De México': 'Ciudad de México',
'Coahuila De Zaragoza': 'Coahuila',
'Michoacán De Ocampo': 'Michoacán',
'Veracruz De Ignacio De La Llave': 'Veracruz'}
series = series.unstack(level=0).fillna(0).astype('int')
# Formato para mexicovid19/Mexico-datos
series.index.name = 'Fecha'
series.index = pd.to_datetime(series.index)
# Formato oficial de DGE
series = series.rename(columns=entidades)
# Formato específico de nuestro repositorio
series = series.rename(columns=diccionario_cambio_edos)
series = series.reindex(sorted(series.columns), axis=1)
# Formato de agregado nacional
series.loc[:, 'Nacional'] = series.sum(axis=1)
# Reordenar columnas para que los casos nacionales queden primero
cols = list(series.columns)
cols = cols[-1:] + cols[:-1]
series = series[cols]
# Llenamos ceros para fechas sin informacion
idx = pd.date_range(series.index.min(), series.index.max())
series = series.reindex(idx, fill_value=0)
series.index.name = 'Fecha'
return series
if __name__ == '__main__':
update_time = datetime.now() - timedelta(hours=6)
date = datetime.now() - timedelta(days=1)
date_filename = date.strftime('%Y%m%d')
date_iso = date.strftime('%Y-%m-%d')
repo = '..'
dir_datos_abiertos = os.path.join(repo, 'datos_abiertos', '')
dir_datos = os.path.join(repo, 'datos', '')
dir_geo = os.path.join(dir_datos, 'geograficos', '')
dir_demograficos = os.path.join(dir_datos, 'demograficos_variables', '')
dir_series_dge = os.path.join(dir_datos_abiertos, 'series_de_tiempo', '')
dir_series = os.path.join(dir_datos, 'series_de_tiempo', '')
dir_input = os.path.join(dir_datos_abiertos, 'raw', '')
input_filename = dir_input + f'datos_abiertos_{date_filename}.zip'
## READING ##
# Lee los datos abiertos
datos_abiertos_df = pd.read_csv(input_filename, compression='zip')
# Lee catalogo de entidades (hoja de calculo 'Catálogo de ENTIDADES' en
# el archivo 'diccionario_datos/Catalogos_0412.xlsx''; ha sido convertido a csv)
cat = (pd.read_csv(dir_input + 'diccionario_datos/catalogo_entidades.csv')
.set_index('CLAVE_ENTIDAD')['ENTIDAD_FEDERATIVA']
.to_dict())
# cambia mayúsculas de estados por formato título
entidades = {key: val.title() for (key, val) in cat.items()}
# Datos abiertos
files = ['covid19_mex_confirmados.csv',
'covid19_mex_negativos.csv',
'covid19_mex_pendientes.csv',
'covid19_mex_pruebas-totales.csv',
'covid19_mex_muertes.csv',
'covid19_mex_hospitalizados.csv',
'covid19_mex_uci.csv',
'covid19_mex_ambulatorios.csv']
funciones = [confirmados_diarios_por_estado,
negativos_diarios_por_estado,
pruebas_pendientes_diarias_por_estado,
pruebas_totales_diarias_por_estado,
defunciones_diarias_por_estado,
hospitalizados_diarios_por_estado,
uci_diarios_por_estado,
ambulatorios_diarios_por_estado]
dfs = [func(datos_abiertos_df, entidades) for func in funciones]
for f, df in zip(files, dfs):
df.to_csv(f'{dir_series_dge}/nuevos/{f}')
df.cumsum().to_csv(f'{dir_series_dge}/acumulados/{f}')
## Series de tiempo estaticas (solo actualiza ultima fila) ##
# Formato unix sin quotes
csv.register_dialect('unixnq', delimiter=',', lineterminator='\n',
quoting=csv.QUOTE_NONE)
# Totales por estado
totales_file = dir_series + 'covid19_mex_casos_totales.csv'
fila_totales = dfs[0].cumsum().tail(1) # confirmados_diarios_por_estado
with open(totales_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_totales.values[0].tolist())
# Casos ultimas 24h
nuevos_file = dir_series + 'covid19_mex_casos_nuevos.csv'
totales_df = pd.read_csv(totales_file)
fila_nuevos = (totales_df.iloc[-1, 1:] - totales_df.iloc[-2, 1:]).astype(int)
with open(nuevos_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_nuevos.values.tolist()) # a series
# Muertes por estado
muertes_file = dir_series + 'covid19_mex_muertes.csv'
fila_muertes = dfs[4].cumsum().tail(1) # defunciones_diarias_por_estado
with open(muertes_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_muertes.values[0].tolist())
# Muertes nuevas por estado
muertes_nuevas_file = dir_series + 'covid19_mex_muertes_nuevas.csv'
muertes_df = pd.read_csv(muertes_file)
fila_nuevas = (muertes_df.iloc[-1, 1:] - muertes_df.iloc[-2, 1:]).astype(int)
with open(muertes_nuevas_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_nuevas.values.tolist()) # a series
# Sospechosos por estado
sospechosos_file = dir_series + 'covid19_mex_sospechosos.csv'
# pruebas_pendientes_diarias_por_estado
fila_sospechosos = dfs[2].cumsum().tail(1)
with open(sospechosos_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_sospechosos.values[0].tolist())
# Sospechosos por estado
negativos_file = dir_series + 'covid19_mex_negativos.csv'
fila_negativos = dfs[1].cumsum().tail(1) # negativos_diarios_por_estado
with open(negativos_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_negativos.values[0].tolist())
## Totales por estado en el archivo geojson ##
geojson_file = dir_geo + 'mexico.geojson'
edos_hoy_file = dir_datos + 'estados_hoy.csv'
updated_file = dir_datos + 'last_updated.csv'
gdf = gpd.read_file(geojson_file).set_index('name')
gdf.totales = fila_totales.drop('Nacional', axis=1).squeeze()
gdf.nuevos = fila_nuevos.drop('Nacional').squeeze() # series
gdf.muertes = fila_muertes.drop('Nacional', axis=1).squeeze()
gdf.muertes_nuevas = fila_nuevas.drop('Nacional').squeeze() # series
gdf.sospechosos = fila_sospechosos.drop('Nacional', axis=1).squeeze()
gdf.negativos = fila_negativos.drop('Nacional', axis=1).squeeze()
gdf.totales_100k = gdf.totales * 100000 / gdf.population
gdf.muertes_100k = gdf.muertes * 100000 / gdf.population
gdf.updated_at = str(update_time).replace(' ', 'T')
gdf = gdf.reset_index()
assert gdf.shape[1] == 14
gdf.to_file(geojson_file, driver='GeoJSON')
gdf.loc[0:0, ['updated_at']].to_csv(updated_file, index=False)
### Estados hoy ###
cols_edos_hoy = ['name', 'totales', 'nuevos',
'muertes', 'muertes_nuevas', 'sospechosos', 'negativos']
map_cols = {'name': 'Estado',
'totales': 'Confirmados totales',
'nuevos': 'Confirmados nuevos',
'muertes': 'Defunciones',
'muertes_nuevas': 'Defunciones nuevas',
'sospechosos': 'Sospechosos totales',
'negativos': 'Negativos totales'}
edos_hoy_df = gdf[cols_edos_hoy].rename(columns=map_cols)
edos_hoy_df.to_csv(edos_hoy_file, index=False)
print(f'Se procesaron exitosamente los datos abiertos de {input_filename}')
| 35.802703
| 84
| 0.658111
|
import os
import csv
import pandas as pd
import geopandas as gpd
from datetime import datetime, timedelta
r_estado(datos, entidades):
series = (datos[datos['RESULTADO'] == 1]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def negativos_diarios_por_estado(datos, entidades):
series = (datos[datos['RESULTADO'] == 2]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def pruebas_pendientes_diarias_por_estado(datos, entidades):
series = (datos[datos['RESULTADO'] == 3]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def pruebas_totales_diarias_por_estado(datos, entidades):
series = (datos
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def defunciones_diarias_por_estado(datos, entidades):
idx = (datos['RESULTADO'] == 1) & (datos['FECHA_DEF'] != '9999-99-99')
series = (datos[idx]
.groupby(['ENTIDAD_UM', 'FECHA_DEF'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def hospitalizados_diarios_por_estado(datos, entidades):
idx = (datos['RESULTADO'] == 1) & (datos['TIPO_PACIENTE'] == 2)
series = (datos[idx]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def ambulatorios_diarios_por_estado(datos, entidades):
idx = (datos['RESULTADO'] == 1) & (datos['TIPO_PACIENTE'] == 1)
series = (datos[idx]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def uci_diarios_por_estado(datos, entidades):
idx = (datos['RESULTADO'] == 1) & (datos['UCI'] == 1)
series = (datos[idx]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
(series, entidades):
diccionario_cambio_edos = {'Ciudad De México': 'Ciudad de México',
'Coahuila De Zaragoza': 'Coahuila',
'Michoacán De Ocampo': 'Michoacán',
'Veracruz De Ignacio De La Llave': 'Veracruz'}
series = series.unstack(level=0).fillna(0).astype('int')
series.index.name = 'Fecha'
series.index = pd.to_datetime(series.index)
series = series.rename(columns=entidades)
series = series.rename(columns=diccionario_cambio_edos)
series = series.reindex(sorted(series.columns), axis=1)
series.loc[:, 'Nacional'] = series.sum(axis=1)
cols = list(series.columns)
cols = cols[-1:] + cols[:-1]
series = series[cols]
idx = pd.date_range(series.index.min(), series.index.max())
series = series.reindex(idx, fill_value=0)
series.index.name = 'Fecha'
return series
if __name__ == '__main__':
update_time = datetime.now() - timedelta(hours=6)
date = datetime.now() - timedelta(days=1)
date_filename = date.strftime('%Y%m%d')
date_iso = date.strftime('%Y-%m-%d')
repo = '..'
dir_datos_abiertos = os.path.join(repo, 'datos_abiertos', '')
dir_datos = os.path.join(repo, 'datos', '')
dir_geo = os.path.join(dir_datos, 'geograficos', '')
dir_demograficos = os.path.join(dir_datos, 'demograficos_variables', '')
dir_series_dge = os.path.join(dir_datos_abiertos, 'series_de_tiempo', '')
dir_series = os.path.join(dir_datos, 'series_de_tiempo', '')
dir_input = os.path.join(dir_datos_abiertos, 'raw', '')
input_filename = dir_input + f'datos_abiertos_{date_filename}.zip'
s_abiertos_df = pd.read_csv(input_filename, compression='zip')
cat = (pd.read_csv(dir_input + 'diccionario_datos/catalogo_entidades.csv')
.set_index('CLAVE_ENTIDAD')['ENTIDAD_FEDERATIVA']
.to_dict())
# cambia mayúsculas de estados por formato título
entidades = {key: val.title() for (key, val) in cat.items()}
# Datos abiertos
files = ['covid19_mex_confirmados.csv',
'covid19_mex_negativos.csv',
'covid19_mex_pendientes.csv',
'covid19_mex_pruebas-totales.csv',
'covid19_mex_muertes.csv',
'covid19_mex_hospitalizados.csv',
'covid19_mex_uci.csv',
'covid19_mex_ambulatorios.csv']
funciones = [confirmados_diarios_por_estado,
negativos_diarios_por_estado,
pruebas_pendientes_diarias_por_estado,
pruebas_totales_diarias_por_estado,
defunciones_diarias_por_estado,
hospitalizados_diarios_por_estado,
uci_diarios_por_estado,
ambulatorios_diarios_por_estado]
dfs = [func(datos_abiertos_df, entidades) for func in funciones]
for f, df in zip(files, dfs):
df.to_csv(f'{dir_series_dge}/nuevos/{f}')
df.cumsum().to_csv(f'{dir_series_dge}/acumulados/{f}')
## Series de tiempo estaticas (solo actualiza ultima fila) ##
# Formato unix sin quotes
csv.register_dialect('unixnq', delimiter=',', lineterminator='\n',
quoting=csv.QUOTE_NONE)
# Totales por estado
totales_file = dir_series + 'covid19_mex_casos_totales.csv'
fila_totales = dfs[0].cumsum().tail(1) # confirmados_diarios_por_estado
with open(totales_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_totales.values[0].tolist())
# Casos ultimas 24h
nuevos_file = dir_series + 'covid19_mex_casos_nuevos.csv'
totales_df = pd.read_csv(totales_file)
fila_nuevos = (totales_df.iloc[-1, 1:] - totales_df.iloc[-2, 1:]).astype(int)
with open(nuevos_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_nuevos.values.tolist()) # a series
# Muertes por estado
muertes_file = dir_series + 'covid19_mex_muertes.csv'
fila_muertes = dfs[4].cumsum().tail(1) # defunciones_diarias_por_estado
with open(muertes_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_muertes.values[0].tolist())
# Muertes nuevas por estado
muertes_nuevas_file = dir_series + 'covid19_mex_muertes_nuevas.csv'
muertes_df = pd.read_csv(muertes_file)
fila_nuevas = (muertes_df.iloc[-1, 1:] - muertes_df.iloc[-2, 1:]).astype(int)
with open(muertes_nuevas_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_nuevas.values.tolist()) # a series
# Sospechosos por estado
sospechosos_file = dir_series + 'covid19_mex_sospechosos.csv'
# pruebas_pendientes_diarias_por_estado
fila_sospechosos = dfs[2].cumsum().tail(1)
with open(sospechosos_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_sospechosos.values[0].tolist())
# Sospechosos por estado
negativos_file = dir_series + 'covid19_mex_negativos.csv'
fila_negativos = dfs[1].cumsum().tail(1) # negativos_diarios_por_estado
with open(negativos_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_negativos.values[0].tolist())
## Totales por estado en el archivo geojson ##
geojson_file = dir_geo + 'mexico.geojson'
edos_hoy_file = dir_datos + 'estados_hoy.csv'
updated_file = dir_datos + 'last_updated.csv'
gdf = gpd.read_file(geojson_file).set_index('name')
gdf.totales = fila_totales.drop('Nacional', axis=1).squeeze()
gdf.nuevos = fila_nuevos.drop('Nacional').squeeze() # series
gdf.muertes = fila_muertes.drop('Nacional', axis=1).squeeze()
gdf.muertes_nuevas = fila_nuevas.drop('Nacional').squeeze() # series
gdf.sospechosos = fila_sospechosos.drop('Nacional', axis=1).squeeze()
gdf.negativos = fila_negativos.drop('Nacional', axis=1).squeeze()
gdf.totales_100k = gdf.totales * 100000 / gdf.population
gdf.muertes_100k = gdf.muertes * 100000 / gdf.population
gdf.updated_at = str(update_time).replace(' ', 'T')
gdf = gdf.reset_index()
assert gdf.shape[1] == 14
gdf.to_file(geojson_file, driver='GeoJSON')
gdf.loc[0:0, ['updated_at']].to_csv(updated_file, index=False)
### Estados hoy ###
cols_edos_hoy = ['name', 'totales', 'nuevos',
'muertes', 'muertes_nuevas', 'sospechosos', 'negativos']
map_cols = {'name': 'Estado',
'totales': 'Confirmados totales',
'nuevos': 'Confirmados nuevos',
'muertes': 'Defunciones',
'muertes_nuevas': 'Defunciones nuevas',
'sospechosos': 'Sospechosos totales',
'negativos': 'Negativos totales'}
edos_hoy_df = gdf[cols_edos_hoy].rename(columns=map_cols)
edos_hoy_df.to_csv(edos_hoy_file, index=False)
print(f'Se procesaron exitosamente los datos abiertos de {input_filename}')
| true
| true
|
f71443471e33b1d928697eb1bc2dc49d6db4519d
| 14,277
|
py
|
Python
|
lib/python3.8/site-packages/ansible_collections/community/network/plugins/modules/pn_trunk.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
lib/python3.8/site-packages/ansible_collections/community/network/plugins/modules/pn_trunk.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
lib/python3.8/site-packages/ansible_collections/community/network/plugins/modules/pn_trunk.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
#!/usr/bin/python
""" PN CLI trunk-create/trunk-delete/trunk-modify """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: pn_trunk
author: "Pluribus Networks (@amitsi)"
short_description: CLI command to create/delete/modify a trunk.
deprecated:
removed_in: 2.0.0 # was Ansible 2.12
why: Doesn't support latest Pluribus Networks netvisor
alternative: Latest modules will be pushed in Ansible future versions.
description:
- Execute trunk-create or trunk-delete command.
- Trunks can be used to aggregate network links at Layer 2 on the local
switch. Use this command to create a new trunk.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
default: 'local'
state:
description:
- State the action to perform. Use 'present' to create trunk,
'absent' to delete trunk and 'update' to modify trunk.
required: True
choices: ['present', 'absent', 'update']
pn_name:
description:
- Specify the name for the trunk configuration.
required: true
pn_ports:
description:
- Specify the port number(s) for the link(s) to aggregate into the trunk.
- Required for trunk-create.
pn_speed:
description:
- Specify the port speed or disable the port.
choices: ['disable', '10m', '100m', '1g', '2.5g', '10g', '40g']
pn_egress_rate_limit:
description:
- Specify an egress port data rate limit for the configuration.
pn_jumbo:
description:
- Specify if the port can receive jumbo frames.
type: bool
pn_lacp_mode:
description:
- Specify the LACP mode for the configuration.
choices: ['off', 'passive', 'active']
pn_lacp_priority:
description:
- Specify the LACP priority. This is a number between 1 and 65535 with a
default value of 32768.
pn_lacp_timeout:
description:
- Specify the LACP time out as slow (30 seconds) or fast (4seconds).
The default value is slow.
choices: ['slow', 'fast']
pn_lacp_fallback:
description:
- Specify the LACP fallback mode as bundles or individual.
choices: ['bundle', 'individual']
pn_lacp_fallback_timeout:
description:
- Specify the LACP fallback timeout in seconds. The range is between 30
and 60 seconds with a default value of 50 seconds.
pn_edge_switch:
description:
- Specify if the switch is an edge switch.
type: bool
pn_pause:
description:
- Specify if pause frames are sent.
type: bool
pn_description:
description:
- Specify a description for the trunk configuration.
pn_loopback:
description:
- Specify loopback if you want to use loopback.
type: bool
pn_mirror_receive:
description:
- Specify if the configuration receives mirrored traffic.
type: bool
pn_unknown_ucast_level:
description:
- Specify an unknown unicast level in percent. The default value is 100%.
pn_unknown_mcast_level:
description:
- Specify an unknown multicast level in percent. The default value is 100%.
pn_broadcast_level:
description:
- Specify a broadcast level in percent. The default value is 100%.
pn_port_macaddr:
description:
- Specify the MAC address of the port.
pn_loopvlans:
description:
- Specify a list of looping vlans.
pn_routing:
description:
- Specify if the port participates in routing on the network.
type: bool
pn_host:
description:
- Host facing port control setting.
type: bool
'''
EXAMPLES = """
- name: Create trunk
community.network.pn_trunk:
state: 'present'
pn_name: 'spine-to-leaf'
pn_ports: '11,12,13,14'
- name: Delete trunk
community.network.pn_trunk:
state: 'absent'
pn_name: 'spine-to-leaf'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the trunk command.
returned: always
type: list
stderr:
description: The set of error responses from the trunk command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
# Ansible boiler-plate
from ansible.module_utils.basic import AnsibleModule
TRUNK_EXISTS = None
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks for idempotency using the trunk-show command.
If a trunk with given name exists, return TRUNK_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: TRUNK_EXISTS
"""
name = module.params['pn_name']
show = cli + ' trunk-show format switch,name no-show-headers'
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
# Global flags
global TRUNK_EXISTS
if name in out:
TRUNK_EXISTS = True
else:
TRUNK_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'trunk-create'
if state == 'absent':
command = 'trunk-delete'
if state == 'update':
command = 'trunk-modify'
return command
def main():
""" This portion is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state=dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_name=dict(required=True, type='str'),
pn_ports=dict(type='str'),
pn_speed=dict(type='str',
choices=['disable', '10m', '100m', '1g', '2.5g',
'10g', '40g']),
pn_egress_rate_limit=dict(type='str'),
pn_jumbo=dict(type='bool'),
pn_lacp_mode=dict(type='str', choices=[
'off', 'passive', 'active']),
pn_lacp_priority=dict(type='int'),
pn_lacp_timeout=dict(type='str', choices=['slow', 'fast']),
pn_lacp_fallback=dict(type='str', choices=[
'bundle', 'individual']),
pn_lacp_fallback_timeout=dict(type='str'),
pn_edge_switch=dict(type='bool'),
pn_pause=dict(type='bool'),
pn_description=dict(type='str'),
pn_loopback=dict(type='bool'),
pn_mirror_receive=dict(type='bool'),
pn_unknown_ucast_level=dict(type='str'),
pn_unknown_mcast_level=dict(type='str'),
pn_broadcast_level=dict(type='str'),
pn_port_macaddr=dict(type='str'),
pn_loopvlans=dict(type='str'),
pn_routing=dict(type='bool'),
pn_host=dict(type='bool')
),
required_if=(
["state", "present", ["pn_name", "pn_ports"]],
["state", "absent", ["pn_name"]],
["state", "update", ["pn_name"]]
)
)
# Accessing the arguments
state = module.params['state']
name = module.params['pn_name']
ports = module.params['pn_ports']
speed = module.params['pn_speed']
egress_rate_limit = module.params['pn_egress_rate_limit']
jumbo = module.params['pn_jumbo']
lacp_mode = module.params['pn_lacp_mode']
lacp_priority = module.params['pn_lacp_priority']
lacp_timeout = module.params['pn_lacp_timeout']
lacp_fallback = module.params['pn_lacp_fallback']
lacp_fallback_timeout = module.params['pn_lacp_fallback_timeout']
edge_switch = module.params['pn_edge_switch']
pause = module.params['pn_pause']
description = module.params['pn_description']
loopback = module.params['pn_loopback']
mirror_receive = module.params['pn_mirror_receive']
unknown_ucast_level = module.params['pn_unknown_ucast_level']
unknown_mcast_level = module.params['pn_unknown_mcast_level']
broadcast_level = module.params['pn_broadcast_level']
port_macaddr = module.params['pn_port_macaddr']
loopvlans = module.params['pn_loopvlans']
routing = module.params['pn_routing']
host = module.params['pn_host']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
if command == 'trunk-delete':
check_cli(module, cli)
if TRUNK_EXISTS is False:
module.exit_json(
skipped=True,
msg='Trunk with name %s does not exist' % name
)
cli += ' %s name %s ' % (command, name)
else:
if command == 'trunk-create':
check_cli(module, cli)
if TRUNK_EXISTS is True:
module.exit_json(
skipped=True,
msg='Trunk with name %s already exists' % name
)
cli += ' %s name %s ' % (command, name)
# Appending options
if ports:
cli += ' ports ' + ports
if speed:
cli += ' speed ' + speed
if egress_rate_limit:
cli += ' egress-rate-limit ' + egress_rate_limit
if jumbo is True:
cli += ' jumbo '
if jumbo is False:
cli += ' no-jumbo '
if lacp_mode:
cli += ' lacp-mode ' + lacp_mode
if lacp_priority:
cli += ' lacp-priority ' + lacp_priority
if lacp_timeout:
cli += ' lacp-timeout ' + lacp_timeout
if lacp_fallback:
cli += ' lacp-fallback ' + lacp_fallback
if lacp_fallback_timeout:
cli += ' lacp-fallback-timeout ' + lacp_fallback_timeout
if edge_switch is True:
cli += ' edge-switch '
if edge_switch is False:
cli += ' no-edge-switch '
if pause is True:
cli += ' pause '
if pause is False:
cli += ' no-pause '
if description:
cli += ' description ' + description
if loopback is True:
cli += ' loopback '
if loopback is False:
cli += ' no-loopback '
if mirror_receive is True:
cli += ' mirror-receive-only '
if mirror_receive is False:
cli += ' no-mirror-receive-only '
if unknown_ucast_level:
cli += ' unknown-ucast-level ' + unknown_ucast_level
if unknown_mcast_level:
cli += ' unknown-mcast-level ' + unknown_mcast_level
if broadcast_level:
cli += ' broadcast-level ' + broadcast_level
if port_macaddr:
cli += ' port-mac-address ' + port_macaddr
if loopvlans:
cli += ' loopvlans ' + loopvlans
if routing is True:
cli += ' routing '
if routing is False:
cli += ' no-routing '
if host is True:
cli += ' host-enable '
if host is False:
cli += ' host-disable '
run_cli(module, cli)
if __name__ == '__main__':
main()
| 30.835853
| 81
| 0.62254
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: pn_trunk
author: "Pluribus Networks (@amitsi)"
short_description: CLI command to create/delete/modify a trunk.
deprecated:
removed_in: 2.0.0 # was Ansible 2.12
why: Doesn't support latest Pluribus Networks netvisor
alternative: Latest modules will be pushed in Ansible future versions.
description:
- Execute trunk-create or trunk-delete command.
- Trunks can be used to aggregate network links at Layer 2 on the local
switch. Use this command to create a new trunk.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
default: 'local'
state:
description:
- State the action to perform. Use 'present' to create trunk,
'absent' to delete trunk and 'update' to modify trunk.
required: True
choices: ['present', 'absent', 'update']
pn_name:
description:
- Specify the name for the trunk configuration.
required: true
pn_ports:
description:
- Specify the port number(s) for the link(s) to aggregate into the trunk.
- Required for trunk-create.
pn_speed:
description:
- Specify the port speed or disable the port.
choices: ['disable', '10m', '100m', '1g', '2.5g', '10g', '40g']
pn_egress_rate_limit:
description:
- Specify an egress port data rate limit for the configuration.
pn_jumbo:
description:
- Specify if the port can receive jumbo frames.
type: bool
pn_lacp_mode:
description:
- Specify the LACP mode for the configuration.
choices: ['off', 'passive', 'active']
pn_lacp_priority:
description:
- Specify the LACP priority. This is a number between 1 and 65535 with a
default value of 32768.
pn_lacp_timeout:
description:
- Specify the LACP time out as slow (30 seconds) or fast (4seconds).
The default value is slow.
choices: ['slow', 'fast']
pn_lacp_fallback:
description:
- Specify the LACP fallback mode as bundles or individual.
choices: ['bundle', 'individual']
pn_lacp_fallback_timeout:
description:
- Specify the LACP fallback timeout in seconds. The range is between 30
and 60 seconds with a default value of 50 seconds.
pn_edge_switch:
description:
- Specify if the switch is an edge switch.
type: bool
pn_pause:
description:
- Specify if pause frames are sent.
type: bool
pn_description:
description:
- Specify a description for the trunk configuration.
pn_loopback:
description:
- Specify loopback if you want to use loopback.
type: bool
pn_mirror_receive:
description:
- Specify if the configuration receives mirrored traffic.
type: bool
pn_unknown_ucast_level:
description:
- Specify an unknown unicast level in percent. The default value is 100%.
pn_unknown_mcast_level:
description:
- Specify an unknown multicast level in percent. The default value is 100%.
pn_broadcast_level:
description:
- Specify a broadcast level in percent. The default value is 100%.
pn_port_macaddr:
description:
- Specify the MAC address of the port.
pn_loopvlans:
description:
- Specify a list of looping vlans.
pn_routing:
description:
- Specify if the port participates in routing on the network.
type: bool
pn_host:
description:
- Host facing port control setting.
type: bool
'''
EXAMPLES = """
- name: Create trunk
community.network.pn_trunk:
state: 'present'
pn_name: 'spine-to-leaf'
pn_ports: '11,12,13,14'
- name: Delete trunk
community.network.pn_trunk:
state: 'absent'
pn_name: 'spine-to-leaf'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the trunk command.
returned: always
type: list
stderr:
description: The set of error responses from the trunk command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
# Ansible boiler-plate
from ansible.module_utils.basic import AnsibleModule
TRUNK_EXISTS = None
def pn_cli(module):
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
name = module.params['pn_name']
show = cli + ' trunk-show format switch,name no-show-headers'
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
# Global flags
global TRUNK_EXISTS
if name in out:
TRUNK_EXISTS = True
else:
TRUNK_EXISTS = False
def run_cli(module, cli):
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
command = None
if state == 'present':
command = 'trunk-create'
if state == 'absent':
command = 'trunk-delete'
if state == 'update':
command = 'trunk-modify'
return command
def main():
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state=dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_name=dict(required=True, type='str'),
pn_ports=dict(type='str'),
pn_speed=dict(type='str',
choices=['disable', '10m', '100m', '1g', '2.5g',
'10g', '40g']),
pn_egress_rate_limit=dict(type='str'),
pn_jumbo=dict(type='bool'),
pn_lacp_mode=dict(type='str', choices=[
'off', 'passive', 'active']),
pn_lacp_priority=dict(type='int'),
pn_lacp_timeout=dict(type='str', choices=['slow', 'fast']),
pn_lacp_fallback=dict(type='str', choices=[
'bundle', 'individual']),
pn_lacp_fallback_timeout=dict(type='str'),
pn_edge_switch=dict(type='bool'),
pn_pause=dict(type='bool'),
pn_description=dict(type='str'),
pn_loopback=dict(type='bool'),
pn_mirror_receive=dict(type='bool'),
pn_unknown_ucast_level=dict(type='str'),
pn_unknown_mcast_level=dict(type='str'),
pn_broadcast_level=dict(type='str'),
pn_port_macaddr=dict(type='str'),
pn_loopvlans=dict(type='str'),
pn_routing=dict(type='bool'),
pn_host=dict(type='bool')
),
required_if=(
["state", "present", ["pn_name", "pn_ports"]],
["state", "absent", ["pn_name"]],
["state", "update", ["pn_name"]]
)
)
# Accessing the arguments
state = module.params['state']
name = module.params['pn_name']
ports = module.params['pn_ports']
speed = module.params['pn_speed']
egress_rate_limit = module.params['pn_egress_rate_limit']
jumbo = module.params['pn_jumbo']
lacp_mode = module.params['pn_lacp_mode']
lacp_priority = module.params['pn_lacp_priority']
lacp_timeout = module.params['pn_lacp_timeout']
lacp_fallback = module.params['pn_lacp_fallback']
lacp_fallback_timeout = module.params['pn_lacp_fallback_timeout']
edge_switch = module.params['pn_edge_switch']
pause = module.params['pn_pause']
description = module.params['pn_description']
loopback = module.params['pn_loopback']
mirror_receive = module.params['pn_mirror_receive']
unknown_ucast_level = module.params['pn_unknown_ucast_level']
unknown_mcast_level = module.params['pn_unknown_mcast_level']
broadcast_level = module.params['pn_broadcast_level']
port_macaddr = module.params['pn_port_macaddr']
loopvlans = module.params['pn_loopvlans']
routing = module.params['pn_routing']
host = module.params['pn_host']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
if command == 'trunk-delete':
check_cli(module, cli)
if TRUNK_EXISTS is False:
module.exit_json(
skipped=True,
msg='Trunk with name %s does not exist' % name
)
cli += ' %s name %s ' % (command, name)
else:
if command == 'trunk-create':
check_cli(module, cli)
if TRUNK_EXISTS is True:
module.exit_json(
skipped=True,
msg='Trunk with name %s already exists' % name
)
cli += ' %s name %s ' % (command, name)
# Appending options
if ports:
cli += ' ports ' + ports
if speed:
cli += ' speed ' + speed
if egress_rate_limit:
cli += ' egress-rate-limit ' + egress_rate_limit
if jumbo is True:
cli += ' jumbo '
if jumbo is False:
cli += ' no-jumbo '
if lacp_mode:
cli += ' lacp-mode ' + lacp_mode
if lacp_priority:
cli += ' lacp-priority ' + lacp_priority
if lacp_timeout:
cli += ' lacp-timeout ' + lacp_timeout
if lacp_fallback:
cli += ' lacp-fallback ' + lacp_fallback
if lacp_fallback_timeout:
cli += ' lacp-fallback-timeout ' + lacp_fallback_timeout
if edge_switch is True:
cli += ' edge-switch '
if edge_switch is False:
cli += ' no-edge-switch '
if pause is True:
cli += ' pause '
if pause is False:
cli += ' no-pause '
if description:
cli += ' description ' + description
if loopback is True:
cli += ' loopback '
if loopback is False:
cli += ' no-loopback '
if mirror_receive is True:
cli += ' mirror-receive-only '
if mirror_receive is False:
cli += ' no-mirror-receive-only '
if unknown_ucast_level:
cli += ' unknown-ucast-level ' + unknown_ucast_level
if unknown_mcast_level:
cli += ' unknown-mcast-level ' + unknown_mcast_level
if broadcast_level:
cli += ' broadcast-level ' + broadcast_level
if port_macaddr:
cli += ' port-mac-address ' + port_macaddr
if loopvlans:
cli += ' loopvlans ' + loopvlans
if routing is True:
cli += ' routing '
if routing is False:
cli += ' no-routing '
if host is True:
cli += ' host-enable '
if host is False:
cli += ' host-disable '
run_cli(module, cli)
if __name__ == '__main__':
main()
| true
| true
|
f7144496800e55420ec75dde8d365a87524ea74a
| 37
|
py
|
Python
|
rssdldmng/__init__.py
|
alexpayne482/rssdldmng
|
4428f10171902861702fc0f528d3d9576923541a
|
[
"MIT"
] | null | null | null |
rssdldmng/__init__.py
|
alexpayne482/rssdldmng
|
4428f10171902861702fc0f528d3d9576923541a
|
[
"MIT"
] | 1
|
2019-11-25T15:54:02.000Z
|
2019-11-25T15:54:02.000Z
|
rssdldmng/__init__.py
|
alexpayne482/rssdldmng
|
4428f10171902861702fc0f528d3d9576923541a
|
[
"MIT"
] | null | null | null |
"""Init file for RSS downloader."""
| 18.5
| 36
| 0.648649
| true
| true
|
|
f71444d8f4c578982eaf1f4ddd50ab20ff8817b7
| 4,618
|
py
|
Python
|
test/python/testconsole.py
|
malywonsz/txtai
|
ace1b04161062430887eb2153961abcd819a5afb
|
[
"Apache-2.0"
] | null | null | null |
test/python/testconsole.py
|
malywonsz/txtai
|
ace1b04161062430887eb2153961abcd819a5afb
|
[
"Apache-2.0"
] | 47
|
2021-10-02T22:48:03.000Z
|
2021-12-29T02:36:20.000Z
|
test/python/testconsole.py
|
malywonsz/txtai
|
ace1b04161062430887eb2153961abcd819a5afb
|
[
"Apache-2.0"
] | null | null | null |
"""
Console module tests
"""
import contextlib
import io
import os
import tempfile
import unittest
from txtai.console import Console
from txtai.embeddings import Embeddings
APPLICATION = """
path: %s
workflow:
test:
tasks:
- task: console
"""
class TestConsole(unittest.TestCase):
"""
Console tests.
"""
@classmethod
def setUpClass(cls):
"""
Initialize test data.
"""
cls.data = [
"US tops 5 million confirmed virus cases",
"Canada's last fully intact ice shelf has suddenly collapsed, forming a Manhattan-sized iceberg",
"Beijing mobilises invasion craft along coast as Taiwan tensions escalate",
"The National Park Service warns against sacrificing slower friends in a bear attack",
"Maine man wins $1M from $25 lottery ticket",
"Make huge profits without work, earn up to $100,000 a day",
]
# Create embeddings model, backed by sentence-transformers & transformers
cls.embeddings = Embeddings({"path": "sentence-transformers/nli-mpnet-base-v2", "content": True})
# Create an index for the list of text
cls.embeddings.index([(uid, text, None) for uid, text in enumerate(cls.data)])
# Create app paths
cls.apppath = os.path.join(tempfile.gettempdir(), "console.yml")
cls.embedpath = os.path.join(tempfile.gettempdir(), "embeddings.console")
# Create app.yml
with open(cls.apppath, "w", encoding="utf-8") as out:
out.write(APPLICATION % cls.embedpath)
# Save index
cls.embeddings.save(cls.embedpath)
# Create console
cls.console = Console(cls.embedpath)
def testApplication(self):
"""
Test application
"""
self.assertIn("console.yml", self.command(f".load {self.apppath}"))
self.assertIn("1", self.command(".limit 1"))
self.assertIn("Maine man wins", self.command("feel good story"))
def testConfig(self):
"""
Test .config command
"""
self.assertIn("tasks", self.command(".config"))
def testEmbeddings(self):
"""
Test embeddings index
"""
self.assertIn("embeddings", self.command(f".load {self.embedpath}"))
self.assertIn("1", self.command(".limit 1"))
self.assertIn("Maine man wins", self.command("feel good story"))
def testEmbeddingsNoDatabase(self):
"""
Test embeddings with no database/content
"""
console = Console()
# Create embeddings model, backed by sentence-transformers & transformers
embeddings = Embeddings({"path": "sentence-transformers/nli-mpnet-base-v2"})
# Create an index for the list of text
embeddings.index([(uid, text, None) for uid, text in enumerate(self.data)])
# Set embeddings on console
console.app = embeddings
self.assertIn("4", self.command("feel good story", console))
def testEmpty(self):
"""
Test empty console instance
"""
console = Console()
self.assertIn("AttributeError", self.command("search", console))
def testHighlight(self):
"""
Test .highlight command
"""
self.assertIn("highlight", self.command(".highlight"))
self.assertIn("wins", self.command("feel good story"))
self.assertIn("Taiwan", self.command("asia"))
def testPreloop(self):
"""
Test preloop
"""
self.assertIn("txtai console", self.preloop())
def testWorkflow(self):
"""
Test .workflow command
"""
self.command(f".load {self.apppath}")
self.assertIn("echo", self.command(".workflow test echo"))
def command(self, command, console=None):
"""
Runs a console command.
Args:
command: command to run
console: console instance, defaults to self.console
Returns:
command output
"""
# Run info
output = io.StringIO()
with contextlib.redirect_stdout(output):
if not console:
console = self.console
console.onecmd(command)
return output.getvalue()
def preloop(self):
"""
Runs console.preloop and redirects stdout.
Returns:
preloop output
"""
# Run info
output = io.StringIO()
with contextlib.redirect_stdout(output):
self.console.preloop()
return output.getvalue()
| 26.54023
| 109
| 0.591815
|
import contextlib
import io
import os
import tempfile
import unittest
from txtai.console import Console
from txtai.embeddings import Embeddings
APPLICATION = """
path: %s
workflow:
test:
tasks:
- task: console
"""
class TestConsole(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data = [
"US tops 5 million confirmed virus cases",
"Canada's last fully intact ice shelf has suddenly collapsed, forming a Manhattan-sized iceberg",
"Beijing mobilises invasion craft along coast as Taiwan tensions escalate",
"The National Park Service warns against sacrificing slower friends in a bear attack",
"Maine man wins $1M from $25 lottery ticket",
"Make huge profits without work, earn up to $100,000 a day",
]
# Create embeddings model, backed by sentence-transformers & transformers
cls.embeddings = Embeddings({"path": "sentence-transformers/nli-mpnet-base-v2", "content": True})
# Create an index for the list of text
cls.embeddings.index([(uid, text, None) for uid, text in enumerate(cls.data)])
# Create app paths
cls.apppath = os.path.join(tempfile.gettempdir(), "console.yml")
cls.embedpath = os.path.join(tempfile.gettempdir(), "embeddings.console")
# Create app.yml
with open(cls.apppath, "w", encoding="utf-8") as out:
out.write(APPLICATION % cls.embedpath)
# Save index
cls.embeddings.save(cls.embedpath)
# Create console
cls.console = Console(cls.embedpath)
def testApplication(self):
self.assertIn("console.yml", self.command(f".load {self.apppath}"))
self.assertIn("1", self.command(".limit 1"))
self.assertIn("Maine man wins", self.command("feel good story"))
def testConfig(self):
self.assertIn("tasks", self.command(".config"))
def testEmbeddings(self):
self.assertIn("embeddings", self.command(f".load {self.embedpath}"))
self.assertIn("1", self.command(".limit 1"))
self.assertIn("Maine man wins", self.command("feel good story"))
def testEmbeddingsNoDatabase(self):
console = Console()
# Create embeddings model, backed by sentence-transformers & transformers
embeddings = Embeddings({"path": "sentence-transformers/nli-mpnet-base-v2"})
# Create an index for the list of text
embeddings.index([(uid, text, None) for uid, text in enumerate(self.data)])
# Set embeddings on console
console.app = embeddings
self.assertIn("4", self.command("feel good story", console))
def testEmpty(self):
console = Console()
self.assertIn("AttributeError", self.command("search", console))
def testHighlight(self):
self.assertIn("highlight", self.command(".highlight"))
self.assertIn("wins", self.command("feel good story"))
self.assertIn("Taiwan", self.command("asia"))
def testPreloop(self):
self.assertIn("txtai console", self.preloop())
def testWorkflow(self):
self.command(f".load {self.apppath}")
self.assertIn("echo", self.command(".workflow test echo"))
def command(self, command, console=None):
# Run info
output = io.StringIO()
with contextlib.redirect_stdout(output):
if not console:
console = self.console
console.onecmd(command)
return output.getvalue()
def preloop(self):
# Run info
output = io.StringIO()
with contextlib.redirect_stdout(output):
self.console.preloop()
return output.getvalue()
| true
| true
|
f7144505df291fdcf3ff246068f77eaa76ee3d0a
| 7,859
|
py
|
Python
|
django/apps/config.py
|
DrMeers/django
|
83a3add4bed8d8d49f93b30c817c66908b0a26ba
|
[
"BSD-3-Clause"
] | 1
|
2019-02-10T19:33:27.000Z
|
2019-02-10T19:33:27.000Z
|
django/apps/config.py
|
avkryukov/django
|
f90be002d9d3c10b87c74741986e2cbf9f2b858e
|
[
"BSD-3-Clause"
] | null | null | null |
django/apps/config.py
|
avkryukov/django
|
f90be002d9d3c10b87c74741986e2cbf9f2b858e
|
[
"BSD-3-Clause"
] | null | null | null |
from importlib import import_module
import os
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import module_has_submodule
from django.utils._os import upath
MODELS_MODULE_NAME = 'models'
class AppConfig(object):
"""
Class representing a Django application and its configuration.
"""
def __init__(self, app_name, app_module):
# Full Python path to the application eg. 'django.contrib.admin'.
self.name = app_name
# Root module for the application eg. <module 'django.contrib.admin'
# from 'django/contrib/admin/__init__.pyc'>.
self.module = app_module
# The following attributes could be defined at the class level in a
# subclass, hence the test-and-set pattern.
# Last component of the Python path to the application eg. 'admin'.
# This value must be unique across a Django project.
if not hasattr(self, 'label'):
self.label = app_name.rpartition(".")[2]
# Human-readable name for the application eg. "Admin".
if not hasattr(self, 'verbose_name'):
self.verbose_name = self.label.title()
# Filesystem path to the application directory eg.
# u'/usr/lib/python2.7/dist-packages/django/contrib/admin'. Unicode on
# Python 2 and a str on Python 3.
if not hasattr(self, 'path'):
self.path = self._path_from_module(app_module)
# Module containing models eg. <module 'django.contrib.admin.models'
# from 'django/contrib/admin/models.pyc'>. Set by import_models().
# None if the application doesn't have a models module.
self.models_module = None
# Mapping of lower case model names to model classes. Initally set to
# None to prevent accidental access before import_models() runs.
self.models = None
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.label)
def _path_from_module(self, module):
"""Attempt to determine app's filesystem path from its module."""
# See #21874 for extended discussion of the behavior of this method in
# various cases.
# Convert paths to list because Python 3.3 _NamespacePath does not
# support indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) != 1:
filename = getattr(module, '__file__', None)
if filename is not None:
paths = [os.path.dirname(filename)]
if len(paths) > 1:
raise ImproperlyConfigured(
"The app module %r has multiple filesystem locations (%r); "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module, paths))
elif not paths:
raise ImproperlyConfigured(
"The app module %r has no filesystem location, "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module,))
return upath(paths[0])
@classmethod
def create(cls, entry):
"""
Factory that creates an app config from an entry in INSTALLED_APPS.
"""
try:
# If import_module succeeds, entry is a path to an app module,
# which may specify an app config class with default_app_config.
# Otherwise, entry is a path to an app config class or an error.
module = import_module(entry)
except ImportError:
mod_path, _, cls_name = entry.rpartition('.')
# Raise the original exception when entry cannot be a path to an
# app config class.
if not mod_path:
raise
else:
try:
# If this works, the app module specifies an app config class.
entry = module.default_app_config
except AttributeError:
# Otherwise, it simply uses the default app config class.
return cls(entry, module)
else:
mod_path, _, cls_name = entry.rpartition('.')
# If we're reaching this point, we must load the app config class
# located at <mod_path>.<cls_name>.
# Avoid django.utils.module_loading.import_by_path because it
# masks errors -- it reraises ImportError as ImproperlyConfigured.
mod = import_module(mod_path)
try:
cls = getattr(mod, cls_name)
except AttributeError:
# Emulate the error that "from <mod_path> import <cls_name>"
# would raise when <mod_path> exists but not <cls_name>, with
# more context (Python just says "cannot import name ...").
raise ImportError(
"cannot import name '%s' from '%s'" % (cls_name, mod_path))
# Check for obvious errors. (This check prevents duck typing, but
# it could be removed if it became a problem in practice.)
if not issubclass(cls, AppConfig):
raise ImproperlyConfigured(
"'%s' isn't a subclass of AppConfig." % entry)
# Obtain app name here rather than in AppClass.__init__ to keep
# all error checking for entries in INSTALLED_APPS in one place.
try:
app_name = cls.name
except AttributeError:
raise ImproperlyConfigured(
"'%s' must supply a name attribute." % entry)
# Ensure app_name points to a valid module.
app_module = import_module(app_name)
# Entry is a path to an app config class.
return cls(app_name, app_module)
def get_model(self, model_name):
"""
Returns the model with the given case-insensitive model_name.
Raises LookupError if no model exists with this name.
"""
if self.models is None:
raise LookupError(
"App '%s' doesn't have any models." % self.label)
try:
return self.models[model_name.lower()]
except KeyError:
raise LookupError(
"App '%s' doesn't have a '%s' model." % (self.label, model_name))
def get_models(self, include_auto_created=False,
include_deferred=False, include_swapped=False):
"""
Returns an iterable of models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models created to satisfy deferred attribute queries,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
Keyword arguments aren't documented; they're a private API.
"""
for model in self.models.values():
if model._deferred and not include_deferred:
continue
if model._meta.auto_created and not include_auto_created:
continue
if model._meta.swapped and not include_swapped:
continue
yield model
def import_models(self, all_models):
# Dictionary of models for this app, primarily maintained in the
# 'all_models' attribute of the Apps this AppConfig is attached to.
# Injected as a parameter because it gets populated when models are
# imported, which might happen before populate() imports models.
self.models = all_models
if module_has_submodule(self.module, MODELS_MODULE_NAME):
models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME)
self.models_module = import_module(models_module_name)
def ready(self):
"""
Override this method in subclasses to run code when Django starts.
"""
| 40.096939
| 81
| 0.618908
|
from importlib import import_module
import os
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import module_has_submodule
from django.utils._os import upath
MODELS_MODULE_NAME = 'models'
class AppConfig(object):
def __init__(self, app_name, app_module):
self.name = app_name
self.module = app_module
if not hasattr(self, 'label'):
self.label = app_name.rpartition(".")[2]
if not hasattr(self, 'verbose_name'):
self.verbose_name = self.label.title()
if not hasattr(self, 'path'):
self.path = self._path_from_module(app_module)
self.models_module = None
# Mapping of lower case model names to model classes. Initally set to
# None to prevent accidental access before import_models() runs.
self.models = None
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.label)
def _path_from_module(self, module):
# See #21874 for extended discussion of the behavior of this method in
# various cases.
# Convert paths to list because Python 3.3 _NamespacePath does not
# support indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) != 1:
filename = getattr(module, '__file__', None)
if filename is not None:
paths = [os.path.dirname(filename)]
if len(paths) > 1:
raise ImproperlyConfigured(
"The app module %r has multiple filesystem locations (%r); "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module, paths))
elif not paths:
raise ImproperlyConfigured(
"The app module %r has no filesystem location, "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module,))
return upath(paths[0])
@classmethod
def create(cls, entry):
try:
# If import_module succeeds, entry is a path to an app module,
# which may specify an app config class with default_app_config.
# Otherwise, entry is a path to an app config class or an error.
module = import_module(entry)
except ImportError:
mod_path, _, cls_name = entry.rpartition('.')
# Raise the original exception when entry cannot be a path to an
# app config class.
if not mod_path:
raise
else:
try:
# If this works, the app module specifies an app config class.
entry = module.default_app_config
except AttributeError:
# Otherwise, it simply uses the default app config class.
return cls(entry, module)
else:
mod_path, _, cls_name = entry.rpartition('.')
# If we're reaching this point, we must load the app config class
mod = import_module(mod_path)
try:
cls = getattr(mod, cls_name)
except AttributeError:
raise ImportError(
"cannot import name '%s' from '%s'" % (cls_name, mod_path))
if not issubclass(cls, AppConfig):
raise ImproperlyConfigured(
"'%s' isn't a subclass of AppConfig." % entry)
# Obtain app name here rather than in AppClass.__init__ to keep
# all error checking for entries in INSTALLED_APPS in one place.
try:
app_name = cls.name
except AttributeError:
raise ImproperlyConfigured(
"'%s' must supply a name attribute." % entry)
# Ensure app_name points to a valid module.
app_module = import_module(app_name)
# Entry is a path to an app config class.
return cls(app_name, app_module)
def get_model(self, model_name):
if self.models is None:
raise LookupError(
"App '%s' doesn't have any models." % self.label)
try:
return self.models[model_name.lower()]
except KeyError:
raise LookupError(
"App '%s' doesn't have a '%s' model." % (self.label, model_name))
def get_models(self, include_auto_created=False,
include_deferred=False, include_swapped=False):
for model in self.models.values():
if model._deferred and not include_deferred:
continue
if model._meta.auto_created and not include_auto_created:
continue
if model._meta.swapped and not include_swapped:
continue
yield model
def import_models(self, all_models):
# Dictionary of models for this app, primarily maintained in the
# 'all_models' attribute of the Apps this AppConfig is attached to.
# Injected as a parameter because it gets populated when models are
# imported, which might happen before populate() imports models.
self.models = all_models
if module_has_submodule(self.module, MODELS_MODULE_NAME):
models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME)
self.models_module = import_module(models_module_name)
def ready(self):
| true
| true
|
f7144537f8fc87d001f4ac40dde7224820902c65
| 632
|
py
|
Python
|
django-server/climate_commander/jobs/migrations/0003_job_run_time.py
|
jrising/climate-commander
|
123cf5a07b87eb1a3bdb44378ee27712b6563ec3
|
[
"MIT"
] | null | null | null |
django-server/climate_commander/jobs/migrations/0003_job_run_time.py
|
jrising/climate-commander
|
123cf5a07b87eb1a3bdb44378ee27712b6563ec3
|
[
"MIT"
] | 1
|
2016-08-03T21:49:58.000Z
|
2016-08-03T21:49:58.000Z
|
django-server/climate_commander/jobs/migrations/0003_job_run_time.py
|
jrising/climate-commander
|
123cf5a07b87eb1a3bdb44378ee27712b6563ec3
|
[
"MIT"
] | 1
|
2016-07-13T18:19:56.000Z
|
2016-07-13T18:19:56.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-19 05:37
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('jobs', '0002_remove_job_run_time'),
]
operations = [
migrations.AddField(
model_name='job',
name='run_time',
field=models.DateTimeField(default=datetime.datetime(2016, 8, 19, 5, 37, 14, 816610, tzinfo=utc), verbose_name='Time of the Last Run'),
preserve_default=False,
),
]
| 26.333333
| 147
| 0.647152
|
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('jobs', '0002_remove_job_run_time'),
]
operations = [
migrations.AddField(
model_name='job',
name='run_time',
field=models.DateTimeField(default=datetime.datetime(2016, 8, 19, 5, 37, 14, 816610, tzinfo=utc), verbose_name='Time of the Last Run'),
preserve_default=False,
),
]
| true
| true
|
f714453f736bf7e39cc67b173d03bf9106ffd006
| 4,152
|
py
|
Python
|
benchmark/startQiskit2375.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit2375.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit2375.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=40
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.x(input_qubit[3]) # number=27
prog.h(input_qubit[3]) # number=34
prog.cz(input_qubit[0],input_qubit[3]) # number=35
prog.h(input_qubit[3]) # number=36
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.cx(input_qubit[3],input_qubit[0]) # number=37
prog.z(input_qubit[3]) # number=38
prog.cx(input_qubit[3],input_qubit[0]) # number=39
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[2],input_qubit[0]) # number=10
prog.h(input_qubit[0]) # number=14
prog.h(input_qubit[1]) # number=30
prog.cz(input_qubit[2],input_qubit[0]) # number=15
prog.h(input_qubit[0]) # number=16
prog.cx(input_qubit[0],input_qubit[2]) # number=20
prog.x(input_qubit[2]) # number=21
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.cx(input_qubit[0],input_qubit[2]) # number=17
prog.cx(input_qubit[0],input_qubit[2]) # number=23
prog.x(input_qubit[2]) # number=24
prog.cx(input_qubit[0],input_qubit[2]) # number=25
prog.cx(input_qubit[0],input_qubit[2]) # number=19
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2375.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.890756
| 140
| 0.651734
|
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.x(input_qubit[3])
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.cx(input_qubit[3],input_qubit[0])
prog.z(input_qubit[3])
prog.cx(input_qubit[3],input_qubit[0])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
prog.cx(input_qubit[2],input_qubit[0])
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.cz(input_qubit[2],input_qubit[0])
prog.h(input_qubit[0])
prog.cx(input_qubit[0],input_qubit[2])
prog.x(input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
prog.x(input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2375.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true
| true
|
f7144556651053589116ff8ee6290dc79a7bff13
| 1,851
|
py
|
Python
|
rdmo/questions/migrations/0038_rename_de_to_lang2.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | 77
|
2016-08-09T11:40:20.000Z
|
2022-03-06T11:03:26.000Z
|
rdmo/questions/migrations/0038_rename_de_to_lang2.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | 377
|
2016-07-01T13:59:36.000Z
|
2022-03-30T13:53:19.000Z
|
rdmo/questions/migrations/0038_rename_de_to_lang2.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | 47
|
2016-06-23T11:32:19.000Z
|
2022-03-01T11:34:37.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-29 16:22
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('questions', '0037_rename_en_to_lang1'),
]
operations = [
migrations.RenameField(
model_name='catalog',
old_name='title_de',
new_name='title_lang2',
),
migrations.RenameField(
model_name='question',
old_name='help_de',
new_name='help_lang2',
),
migrations.RenameField(
model_name='question',
old_name='text_de',
new_name='text_lang2',
),
migrations.RenameField(
model_name='question',
old_name='verbose_name_de',
new_name='verbose_name_lang2',
),
migrations.RenameField(
model_name='question',
old_name='verbose_name_plural_de',
new_name='verbose_name_plural_lang2',
),
migrations.RenameField(
model_name='questionset',
old_name='help_de',
new_name='help_lang2',
),
migrations.RenameField(
model_name='questionset',
old_name='title_de',
new_name='title_lang2',
),
migrations.RenameField(
model_name='questionset',
old_name='verbose_name_de',
new_name='verbose_name_lang2',
),
migrations.RenameField(
model_name='questionset',
old_name='verbose_name_plural_de',
new_name='verbose_name_plural_lang2',
),
migrations.RenameField(
model_name='section',
old_name='title_de',
new_name='title_lang2',
),
]
| 28.045455
| 49
| 0.553755
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('questions', '0037_rename_en_to_lang1'),
]
operations = [
migrations.RenameField(
model_name='catalog',
old_name='title_de',
new_name='title_lang2',
),
migrations.RenameField(
model_name='question',
old_name='help_de',
new_name='help_lang2',
),
migrations.RenameField(
model_name='question',
old_name='text_de',
new_name='text_lang2',
),
migrations.RenameField(
model_name='question',
old_name='verbose_name_de',
new_name='verbose_name_lang2',
),
migrations.RenameField(
model_name='question',
old_name='verbose_name_plural_de',
new_name='verbose_name_plural_lang2',
),
migrations.RenameField(
model_name='questionset',
old_name='help_de',
new_name='help_lang2',
),
migrations.RenameField(
model_name='questionset',
old_name='title_de',
new_name='title_lang2',
),
migrations.RenameField(
model_name='questionset',
old_name='verbose_name_de',
new_name='verbose_name_lang2',
),
migrations.RenameField(
model_name='questionset',
old_name='verbose_name_plural_de',
new_name='verbose_name_plural_lang2',
),
migrations.RenameField(
model_name='section',
old_name='title_de',
new_name='title_lang2',
),
]
| true
| true
|
f71445882aac3e35cd2d41b9696c200ce10affe8
| 3,635
|
py
|
Python
|
examples/paper_generation_code/2020-07-31-local_fmri_training_mouse.py
|
CoMind-Technologies/deepinterpolation
|
2f583c4fdde4ed92139e40eb8076dd5b129d29d9
|
[
"Unlicense"
] | 178
|
2020-10-16T19:51:21.000Z
|
2022-03-11T01:25:22.000Z
|
examples/paper_generation_code/2020-07-31-local_fmri_training_mouse.py
|
CoMind-Technologies/deepinterpolation
|
2f583c4fdde4ed92139e40eb8076dd5b129d29d9
|
[
"Unlicense"
] | 46
|
2020-10-17T14:28:23.000Z
|
2022-02-18T18:09:12.000Z
|
examples/paper_generation_code/2020-07-31-local_fmri_training_mouse.py
|
CoMind-Technologies/deepinterpolation
|
2f583c4fdde4ed92139e40eb8076dd5b129d29d9
|
[
"Unlicense"
] | 40
|
2020-10-18T19:01:27.000Z
|
2022-03-17T15:49:54.000Z
|
import deepinterpolation as de
import sys
from shutil import copyfile
import os
from deepinterpolation.generic import JsonSaver, ClassLoader
import datetime
from typing import Any, Dict
now = datetime.datetime.now()
run_uid = now.strftime("%Y_%m_%d_%H_%M")
training_param = {}
generator_param = {}
network_param = {}
generator_test_param = {}
steps_per_epoch = 10
generator_test_param["type"] = "generator"
generator_test_param["name"] = "FmriGenerator"
generator_test_param["pre_post_x"] = 3
generator_test_param["pre_post_y"] = 2
generator_test_param["pre_post_z"] = 3
generator_test_param[
"train_path"
] = "/Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/mouse/sub-106-ses-1-func-sub-106_ses-1_task-rest_acq-EPI_bold.nii"
generator_test_param["batch_size"] = 1000
generator_test_param["start_frame"] = 0
generator_test_param["end_frame"] = 100
generator_test_param["total_nb_block"] = 10000
generator_test_param["steps_per_epoch"] = steps_per_epoch
generator_param["type"] = "generator"
generator_param["name"] = "FmriGenerator"
generator_param["pre_post_x"] = 3
generator_param["pre_post_y"] = 2
generator_param["pre_post_z"] = 3
generator_param[
"train_path"
] = "/Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/mouse/sub-106-ses-1-func-sub-106_ses-1_task-rest_acq-EPI_bold.nii"
generator_param["batch_size"] = 1000
generator_param["start_frame"] = 0
generator_param["end_frame"] = 400
generator_param["total_nb_block"] = 5000000
generator_param["steps_per_epoch"] = steps_per_epoch
network_param["type"] = "network"
network_param["name"] = "fmri_volume_dense_denoiser"
training_param["type"] = "trainer"
training_param["name"] = "core_trainer"
training_param["run_uid"] = run_uid
training_param["batch_size"] = generator_test_param["batch_size"]
training_param["steps_per_epoch"] = steps_per_epoch
training_param["period_save"] = 10
training_param["nb_gpus"] = 0
training_param["apply_learning_decay"] = 0
training_param["nb_times_through_data"] = 1
training_param["learning_rate"] = 0.0001
training_param["loss"] = "mean_absolute_error"
training_param["model_string"] = (
network_param["name"]
+ "_"
+ training_param["loss"]
+ "_"
+ training_param["run_uid"]
)
jobdir = (
"//Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/trained_fmri_models/"
+ training_param["model_string"]
+ "_"
+ run_uid
)
training_param["output_dir"] = jobdir
try:
os.mkdir(jobdir)
except:
print("folder already exists")
path_training = os.path.join(jobdir, "training.json")
json_obj = JsonSaver(training_param)
json_obj.save_json(path_training)
path_generator = os.path.join(jobdir, "generator.json")
json_obj = JsonSaver(generator_param)
json_obj.save_json(path_generator)
path_test_generator = os.path.join(jobdir, "test_generator.json")
json_obj = JsonSaver(generator_test_param)
json_obj.save_json(path_test_generator)
path_network = os.path.join(jobdir, "network.json")
json_obj = JsonSaver(network_param)
json_obj.save_json(path_network)
generator_obj = ClassLoader(path_generator)
generator_test_obj = ClassLoader(path_test_generator)
network_obj = ClassLoader(path_network)
trainer_obj = ClassLoader(path_training)
train_generator = generator_obj.find_and_build()(path_generator)
test_generator = generator_test_obj.find_and_build()(path_test_generator)
network_callback = network_obj.find_and_build()(path_network)
training_class = trainer_obj.find_and_build()(
train_generator, test_generator, network_callback, path_training
)
training_class.run()
training_class.finalize()
| 30.041322
| 148
| 0.784869
|
import deepinterpolation as de
import sys
from shutil import copyfile
import os
from deepinterpolation.generic import JsonSaver, ClassLoader
import datetime
from typing import Any, Dict
now = datetime.datetime.now()
run_uid = now.strftime("%Y_%m_%d_%H_%M")
training_param = {}
generator_param = {}
network_param = {}
generator_test_param = {}
steps_per_epoch = 10
generator_test_param["type"] = "generator"
generator_test_param["name"] = "FmriGenerator"
generator_test_param["pre_post_x"] = 3
generator_test_param["pre_post_y"] = 2
generator_test_param["pre_post_z"] = 3
generator_test_param[
"train_path"
] = "/Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/mouse/sub-106-ses-1-func-sub-106_ses-1_task-rest_acq-EPI_bold.nii"
generator_test_param["batch_size"] = 1000
generator_test_param["start_frame"] = 0
generator_test_param["end_frame"] = 100
generator_test_param["total_nb_block"] = 10000
generator_test_param["steps_per_epoch"] = steps_per_epoch
generator_param["type"] = "generator"
generator_param["name"] = "FmriGenerator"
generator_param["pre_post_x"] = 3
generator_param["pre_post_y"] = 2
generator_param["pre_post_z"] = 3
generator_param[
"train_path"
] = "/Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/mouse/sub-106-ses-1-func-sub-106_ses-1_task-rest_acq-EPI_bold.nii"
generator_param["batch_size"] = 1000
generator_param["start_frame"] = 0
generator_param["end_frame"] = 400
generator_param["total_nb_block"] = 5000000
generator_param["steps_per_epoch"] = steps_per_epoch
network_param["type"] = "network"
network_param["name"] = "fmri_volume_dense_denoiser"
training_param["type"] = "trainer"
training_param["name"] = "core_trainer"
training_param["run_uid"] = run_uid
training_param["batch_size"] = generator_test_param["batch_size"]
training_param["steps_per_epoch"] = steps_per_epoch
training_param["period_save"] = 10
training_param["nb_gpus"] = 0
training_param["apply_learning_decay"] = 0
training_param["nb_times_through_data"] = 1
training_param["learning_rate"] = 0.0001
training_param["loss"] = "mean_absolute_error"
training_param["model_string"] = (
network_param["name"]
+ "_"
+ training_param["loss"]
+ "_"
+ training_param["run_uid"]
)
jobdir = (
"//Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/trained_fmri_models/"
+ training_param["model_string"]
+ "_"
+ run_uid
)
training_param["output_dir"] = jobdir
try:
os.mkdir(jobdir)
except:
print("folder already exists")
path_training = os.path.join(jobdir, "training.json")
json_obj = JsonSaver(training_param)
json_obj.save_json(path_training)
path_generator = os.path.join(jobdir, "generator.json")
json_obj = JsonSaver(generator_param)
json_obj.save_json(path_generator)
path_test_generator = os.path.join(jobdir, "test_generator.json")
json_obj = JsonSaver(generator_test_param)
json_obj.save_json(path_test_generator)
path_network = os.path.join(jobdir, "network.json")
json_obj = JsonSaver(network_param)
json_obj.save_json(path_network)
generator_obj = ClassLoader(path_generator)
generator_test_obj = ClassLoader(path_test_generator)
network_obj = ClassLoader(path_network)
trainer_obj = ClassLoader(path_training)
train_generator = generator_obj.find_and_build()(path_generator)
test_generator = generator_test_obj.find_and_build()(path_test_generator)
network_callback = network_obj.find_and_build()(path_network)
training_class = trainer_obj.find_and_build()(
train_generator, test_generator, network_callback, path_training
)
training_class.run()
training_class.finalize()
| true
| true
|
f71445884d094696a2b319a9793ec87601132945
| 1,030
|
py
|
Python
|
code/Ex02.py
|
mariolpantunes/ml-deti
|
a47fdb5df70e3f6fda5768be14f97462dfe057fb
|
[
"MIT"
] | 8
|
2016-04-25T22:36:35.000Z
|
2016-10-29T16:47:34.000Z
|
code/Ex02.py
|
mariolpantunes/ml-deti
|
a47fdb5df70e3f6fda5768be14f97462dfe057fb
|
[
"MIT"
] | null | null | null |
code/Ex02.py
|
mariolpantunes/ml-deti
|
a47fdb5df70e3f6fda5768be14f97462dfe057fb
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import arff
import numpy as np
from sklearn import linear_model
# Load dataset
dataset = arff.load(open('dataset/dataset01.arff', 'r'))
data = np.array(dataset['data'])
# Reshape vector
X1 = data[:, 0].reshape(-1, 1)
X2 = np.multiply(X1, X1)
X = np.concatenate((X1, X2), axis=1)
Y = data[:, 1].reshape(-1, 1)
# Plot points
plt.scatter(X1, Y, color='black')
plt.xticks(())
plt.yticks(())
plt.show()
# Create linear regression object
model = linear_model.LinearRegression()
# Train the model using X and Y
model.fit(X, Y)
# The coefficients
print("Y = %.2fX^2 + %.2fX + %.2f" % (model.coef_[0][0], model.coef_[0][1], model.intercept_))
# The mean square error
print("Residual sum of squares: %.2f" % np.mean((model.predict(X) - Y) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % model.score(X, Y))
# Plot outputs
plt.scatter(X1, Y, color='black')
plt.plot(X1, model.predict(X), color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| 23.409091
| 94
| 0.67767
|
import matplotlib.pyplot as plt
import arff
import numpy as np
from sklearn import linear_model
dataset = arff.load(open('dataset/dataset01.arff', 'r'))
data = np.array(dataset['data'])
X1 = data[:, 0].reshape(-1, 1)
X2 = np.multiply(X1, X1)
X = np.concatenate((X1, X2), axis=1)
Y = data[:, 1].reshape(-1, 1)
plt.scatter(X1, Y, color='black')
plt.xticks(())
plt.yticks(())
plt.show()
model = linear_model.LinearRegression()
model.fit(X, Y)
print("Y = %.2fX^2 + %.2fX + %.2f" % (model.coef_[0][0], model.coef_[0][1], model.intercept_))
print("Residual sum of squares: %.2f" % np.mean((model.predict(X) - Y) ** 2))
print('Variance score: %.2f' % model.score(X, Y))
plt.scatter(X1, Y, color='black')
plt.plot(X1, model.predict(X), color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| true
| true
|
f714468f5d55c957348c0992aa36ec674a65a747
| 291
|
py
|
Python
|
test/test_fit.py
|
malyvsen/unifit
|
4cd6eceb9fa0dda31a742bd34b22f70a80464bef
|
[
"MIT"
] | null | null | null |
test/test_fit.py
|
malyvsen/unifit
|
4cd6eceb9fa0dda31a742bd34b22f70a80464bef
|
[
"MIT"
] | null | null | null |
test/test_fit.py
|
malyvsen/unifit
|
4cd6eceb9fa0dda31a742bd34b22f70a80464bef
|
[
"MIT"
] | null | null | null |
import scipy.stats
import unifit
class TestFit:
data = scipy.stats.cauchy.rvs(size=256)
def test_basic(self):
unifit.fit(self.data)
def test_unnamed(self):
unifit.fit(
self.data,
distributions=unifit.distributions.values()
)
| 16.166667
| 55
| 0.611684
|
import scipy.stats
import unifit
class TestFit:
data = scipy.stats.cauchy.rvs(size=256)
def test_basic(self):
unifit.fit(self.data)
def test_unnamed(self):
unifit.fit(
self.data,
distributions=unifit.distributions.values()
)
| true
| true
|
f71446a08f9ffe05ef9b5e466dd97b0725d3771b
| 60,170
|
py
|
Python
|
nim.py
|
FauveNoir/allumette
|
e5b90aa795c1d4001e3bfcf88056a215337fd70e
|
[
"OML"
] | 1
|
2017-02-09T16:42:09.000Z
|
2017-02-09T16:42:09.000Z
|
nim.py
|
FauveNoir/allumette
|
e5b90aa795c1d4001e3bfcf88056a215337fd70e
|
[
"OML"
] | null | null | null |
nim.py
|
FauveNoir/allumette
|
e5b90aa795c1d4001e3bfcf88056a215337fd70e
|
[
"OML"
] | 3
|
2017-02-04T02:17:46.000Z
|
2017-12-20T11:02:36.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import random
import sys
import time
import re
import copy
from optparse import OptionParser
import pygame
from pygame.locals import *
version = "0.1"
usage = "usage: %prog [ --lvl [0-5] | ]"
parser = OptionParser(usage=usage, version="%prog 0.1")
parser.add_option("-m", help="Number of match",
default=0, action="store", dest="numberOfMatch")
parser.add_option("-v", help="The variant of Nim",
default=0, action="store", dest="varient")
parser.add_option("-w", help="Mode, there is two values possibles “ttl” and “ltl”",
default=0, action="store", dest="varient")
(options, args) = parser.parse_args()
if not options.numberOfMatch:
# If no lelvel was explicitly choosen by the user, it is automatically set
# to 0.
options.numberOfMatch = 15
innitialNumberOfMatch = int(options.numberOfMatch)
currentNumberOfMatch = int(innitialNumberOfMatch)
class borderSize:
def __init__(self):
self.top = 0
self.bototm = 0
self.right = 0
self.left = 0
class surfaceInformations:
def __init__(self):
self.width = 0
self.height = 0
self.y = 0
self.x = 0
self.top = 0
self.bototm = 0
self.right = 0
self.left = 0
if self.y != 0:
self.ratio = self.x / self.y
class whatToDo:
def __init__(self):
self.programHaveToContinue = True
self.variant = "trivial"
self.number = numberOfInitialMatch
self.wtw = "ttl"
print("This is Nim " + version + "\n")
mainDir = os.path.dirname(os.path.realpath(__file__))
# Colour deffinitions
background_colour = (144, 124, 106)
text_zone_colour = (81, 69, 58)
history_area_colour = (69, 59, 49)
indicator_colour = (70, 60, 50)
prompt_colour = (25, 21, 18)
creme_colour = (236, 228, 217)
yellow_colour = (205, 153, 29)
winingMainText_colour = (236, 232, 228)
purple_colour = (133, 0, 58)
red = (225, 0, 0)
class variants:
def __init__(self):
self.name = ""
self.number = 15
self.wtw = "ttl"
trivial = variants()
trivial.name = "Trivial"
trivial.number = 15
trivial.wtw = "ttl"
marienbad = variants()
marienbad.name = "Marienbad"
marienbad.number = 5
marienbad.wtw = "ttl"
knowenVarients = [trivial, marienbad]
viarentNames = []
for varientRow in knowenVarients:
viarentNames.append(varientRow.name)
# Sizes deffinitions
xSize = 640
ySize = 480
textZoneHeigh = 16
maxPaddingBetwenMatch = 3
matchPicRatio = 6.925
numberOfInitialMatch = innitialNumberOfMatch
historyAreaWidth = 67
circleRadius = 10
gameAreaDim = [0, 0]
matchAreaDim = [0, 0]
matchAreaPos = [0, 0]
indicatorDim = [127, 55]
matchAreaBorder = borderSize()
matchAreaBorder.top = 40
matchAreaBorder.bottom = 80
matchAreaBorder.left = 40
matchAreaBorder.right = 40
trianglePromptWidth = 7
textUserInput = []
normaUserInput = []
textUserInput = []
normalUserInput = []
exMode = False
normalMode = True
textToAnalyse = ""
normalTextToAnalyse = ""
allowedMatchDel = ["1", "2", "3"]
pygame.init()
screen = pygame.display.set_mode((xSize, ySize), RESIZABLE)
charInputed = [K_TAB, K_SPACE, K_EXCLAIM, K_QUOTEDBL, K_HASH, K_DOLLAR, K_AMPERSAND, K_QUOTE, K_LEFTPAREN, K_RIGHTPAREN, K_ASTERISK, K_PLUS, K_COMMA, K_MINUS, K_PERIOD, K_SLASH, K_0, K_1, K_2, K_3, K_4, K_5, K_6, K_7, K_8, K_9, K_COLON, K_SEMICOLON, K_LESS, K_EQUALS, K_GREATER, K_QUESTION,
K_AT, K_LEFTBRACKET, K_BACKSLASH, K_RIGHTBRACKET, K_CARET, K_UNDERSCORE, K_BACKQUOTE, K_a, K_b, K_c, K_d, K_e, K_f, K_g, K_h, K_i, K_j, K_k, K_l, K_m, K_n, K_o, K_p, K_q, K_r, K_s, K_t, K_u, K_v, K_w, K_x, K_y, K_z, K_KP_PERIOD, K_KP_DIVIDE, K_KP_MULTIPLY, K_KP_MINUS, K_KP_PLUS, K_KP_EQUALS]
def makeTextZone(nameToDisplay, secondName):
# Redifining variables
xSize, ySize = screen.get_size()
# Textzone deffinition
textZone = pygame.Surface((xSize, textZoneHeigh))
textZone.fill(text_zone_colour)
heighTextZonePosition = ySize - textZoneHeigh
promptFont = pygame.font.SysFont("monospace", 14, bold=True)
# Option title deffinition
secondPromptZone = pygame.Surface((1, 1))
secondPromptZoneInfo = surfaceInformations()
secondEcart = 0
secondLittleEcart = 0
secondPromptZoneInfo.width = 0
if secondName != None:
textSecondSizeWidth, textSecondSizeHeight = promptFont.size(secondName)
secondPromptZoneInfo.width = textSecondSizeWidth + 8
secondPromptZoneInfo.heigh = textZoneHeigh
secondPromptZone = pygame.Surface((secondPromptZoneInfo.width, secondPromptZoneInfo.heigh))
secondPromptZone.fill(yellow_colour)
secondPromptText = promptFont.render(secondName, 1, prompt_colour)
secondTextSizeWidth, secondTextSizeHeight = promptFont.size(secondName)
secondPromptTriangle = pygame.draw.polygon(screen, prompt_colour, [[secondPromptZoneInfo.width, ySize - textZoneHeigh], [
secondPromptZoneInfo.width, ySize], [secondPromptZoneInfo.width + trianglePromptWidth, ySize - (textZoneHeigh / 2)]], 0)
secondEcart = secondPromptZoneInfo.width + trianglePromptWidth
secondLittleEcart = trianglePromptWidth
# promptzone deffinition
textSizeWidth, textSizeHeight = promptFont.size(nameToDisplay)
promptZoneInfo = surfaceInformations()
promptZoneInfo.width = textSizeWidth + 8
promptZoneInfo.heigh = textZoneHeigh
promptZone = pygame.Surface((promptZoneInfo.width + secondLittleEcart, promptZoneInfo.heigh))
promptZone.fill(prompt_colour)
promptText = promptFont.render(nameToDisplay, 1, (205, 153, 29))
textSizeWidth, textSizeHeight = promptFont.size(nameToDisplay)
# initialize font; must be called after 'pygame.init()' to avoid 'Font not
# Initialized' error
myfont = pygame.font.SysFont("monospace", 14)
# render text
label = myfont.render("".join(textUserInput), 1, (255, 255, 255))
#bliting cascade
screen.blit(textZone, (0, heighTextZonePosition))
screen.blit(promptZone, (0 + secondPromptZoneInfo.width, heighTextZonePosition))
promptTriangle = pygame.draw.polygon(screen, prompt_colour, [[promptZoneInfo.width + secondEcart, ySize - textZoneHeigh], [
promptZoneInfo.width + secondEcart, ySize], [promptZoneInfo.width + secondEcart + trianglePromptWidth, ySize - (textZoneHeigh / 2)]], 0)
screen.blit(promptText, (4 + secondEcart, heighTextZonePosition + 1))
if secondName != None:
screen.blit(secondPromptZone, (0, heighTextZonePosition))
screen.blit(secondPromptText, (4, heighTextZonePosition + 1))
secondPromptTriangle = pygame.draw.polygon(screen, yellow_colour, [[secondPromptZoneInfo.width, ySize - textZoneHeigh], [
secondPromptZoneInfo.width, ySize], [secondPromptZoneInfo.width + trianglePromptWidth, ySize - (textZoneHeigh / 2)]], 0)
screen.blit(label, (promptZoneInfo.width +
trianglePromptWidth + 4, heighTextZonePosition))
finalNormalUserInput = ""
def analyseTyping(variant, numberOfInitialMatch, wtw):
global programHaveToContinue
global textUserInput
global normalUserInput
global exMode
global normalMode
global textToAnalyse
global normalTextToAnalyse
global screen
global finalNormalUserInput
global generalState
keyboardInput = dict()
keyboardInput["mode"] = "normal"
keyboardInput["content"] = ""
functionHaveToContinue = True
for event in pygame.event.get():
if event.type == VIDEORESIZE:
screen = pygame.display.set_mode(event.size, RESIZABLE)
if event.type == QUIT:
programHaveToContinue = False
if event.type == KEYDOWN:
if (event.unicode == ":") and ("".join(normalUserInput) == ""):
exMode = True
normalMode = False
if exMode == True:
if event.key is K_ESCAPE:
exMode = False
normalMode = True
textUserInput = []
elif event.key in charInputed:
textUserInput.append(event.unicode)
elif event.key == K_BACKSPACE and textUserInput != []:
del textUserInput[-1]
if len(textUserInput) == 1:
exMode = False
normalMode = True
del textUserInput[-1]
elif event.key in [K_RETURN, K_KP_ENTER]:
textToAnalyse = "".join(textUserInput[1:])
textUserInput = []
exMode = False
if textUserInput == []:
exMode = False
normalMode = True
elif normalMode == True:
if (event.key is K_ESCAPE) and (normalUserInput != []):
normalUserInput = []
elif event.key == K_p:
normalUserInput = []
keyboardInput["mode"] = "pause"
elif (event.key is K_ESCAPE) and (normalUserInput == []):
normalUserInput = []
keyboardInput["mode"] = "escape"
elif (event.key not in [K_RETURN, K_KP_ENTER, K_ESCAPE]):
normalUserInput.append(event.unicode)
elif (event.key in [K_RETURN, K_KP_ENTER]):
finalNormalUserInput = "".join(normalUserInput)
normalUserInput = []
if textToAnalyse == "about":
textToAnalyse = ""
aboutScreen(screen)
elif textToAnalyse in ["quit", "q"]:
textToAnalyse = ""
programHaveToContinue = False
# elif textToAnalyse in ["new", "n"]:
#elif re.match("n(ew| *)?$", textToAnalyse) is not None:
elif re.match("n(ew)?( +((trivial)|(marienbad)))?( +[0-9]+)?( +(((ttl)|(take-the-last))|((ltl)|(let-the-last))))? *$", textToAnalyse) is not None:
programHaveToContinue = True
functionHaveToContinue = False
syntaxToExtractOptions = "n(ew)?( +(?P<variente>(trivial|marienbad)))?( +(?P<number>[0-9]+))?( +(?P<wtw>((ttl)|(ltl))))?"
newGameOptions = re.match(syntaxToExtractOptions,textToAnalyse)
textToAnalyse = ""
if (newGameOptions.group("variente") == None) :
generalState.variant = variant
else:
generalState.variant = newGameOptions.group("variente")
if ( newGameOptions.group("number") == None) :
generalState.number = numberOfInitialMatch
else:
generalState.number = int(newGameOptions.group("number"))
if ( newGameOptions.group("wtw") == None) :
generalState.wtw = wtw
else:
generalState.wtw = newGameOptions.group("wtw")
print("New " + str(generalState.variant) + ";" + str(generalState.number) + ";" + str(generalState.wtw) + " game.")
elif keyboardInput["mode"] == "escape":
keyboardInput["mode"] = "escape"
elif keyboardInput["mode"] == "pause":
keyboardInput["mode"] = "pause"
else:
keyboardInput["mode"] = "ex"
keyboardInput["content"] = textToAnalyse
if normalUserInput != []:
keyboardInput["mode"] = "normal"
keyboardInput["content"] = normalUserInput
return functionHaveToContinue, keyboardInput
def makeAPause(variant, numberOfInitialMatch, wtw, beginingOfGame):
global winingMainText_colour
global indicator_colour
global programHaveToContinue
resumeMainText_colour = (163, 143, 125)
pauseMainText_colour = winingMainText_colour
pauseTextInfo = surfaceInformations()
resumeTextInfo = surfaceInformations()
timeBeforePause = int(time.time()) - beginingOfGame
timeOfEndOfGame = int(time.time()) - beginingOfGame
functionHaveToContinue = True
while functionHaveToContinue and programHaveToContinue:
xSize, ySize = screen.get_size()
functionHaveToContinue, textToanalyse = analyseTyping(None, None, None)
screen.fill(indicator_colour)
if textToanalyse["mode"] == "escape":
functionHaveToContinue = False
# Bliting the text "PAUSE"
pauseTextContent = "Pause".upper()
pauseFont = pygame.font.SysFont("CMU Typewriter Text", 112, bold=True)
pauseText = pauseFont.render(pauseTextContent, 1, pauseMainText_colour)
pauseTextInfo.width, pauseTextInfo.height = pauseFont.size(pauseTextContent)
pauseTextInfo.x = (xSize - pauseTextInfo.width) / 2
pauseTextInfo.y = (ySize/2) - pauseTextInfo.height
screen.blit(pauseText, (pauseTextInfo.x, pauseTextInfo.y))
# Bliting the text resume text
resumeTextContent = "Type Escape key to continue."
resumeFont = pygame.font.SysFont("CMU Typewriter Text", 14, bold=True)
resumeText = resumeFont.render(resumeTextContent, 1, resumeMainText_colour)
resumeTextInfo.width, resumeTextInfo.height = resumeFont.size(resumeTextContent)
resumeTextInfo.x = (xSize - resumeTextInfo.width) / 2
resumeTextInfo.y = (ySize- 14) - resumeTextInfo.height - 30
screen.blit(resumeText, (resumeTextInfo.x, resumeTextInfo.y))
makeTextZone(variant,"Pause")
#####################
pygame.display.flip()
#####################
timeToReturn = int(time.time()) - timeBeforePause
return timeToReturn
def makeTimetZone(beginingOfGame):
timeZoneInformation = surfaceInformations()
timeZoneBackground = surfaceInformations()
timeZoneInformation.left = 2
timeZoneInformation.right = 2
xSize, ySize = screen.get_size()
myfont = pygame.font.SysFont("monospace", 14)
secondSinceBegining = int(time.time()) - beginingOfGame
m, s = divmod(secondSinceBegining, 60)
h, m = divmod(m, 60)
timePassed = "%02d:%02d" % (m, s)
heighTextZonePosition = ySize - textZoneHeigh
timeZoneText = myfont.render(timePassed, 1, (0, 0, 0))
timeZoneInformation.width, timeZoneInformation.height = myfont.size(
timePassed)
timeZoneInformation.x = xSize - timeZoneInformation.width - timeZoneInformation.left
timeZoneInformation.y = ySize - textZoneHeigh
timeZoneBackground.width = timeZoneInformation.width + \
(timeZoneInformation.left + timeZoneInformation.right)
timeZoneBackground.height = textZoneHeigh
timeZoneBackground.y = heighTextZonePosition
timeZoneBackground.x = timeZoneInformation.x - 2
timeZoneBackgroundSurface = pygame.Surface(
(timeZoneBackground.width, timeZoneBackground.height))
timeZoneBackgroundSurface.fill(creme_colour)
screen.blit(timeZoneBackgroundSurface,
(timeZoneBackground.x, timeZoneBackground.y))
screen.blit(timeZoneText, (timeZoneInformation.x, timeZoneInformation.y))
timeZoneBorder = pygame.draw.polygon(screen, yellow_colour, [[timeZoneBackground.x, timeZoneBackground.y], [timeZoneBackground.x, timeZoneBackground.y + timeZoneBackground.height - 2], [
timeZoneBackground.x + timeZoneBackground.width - 2, timeZoneBackground.y + timeZoneBackground.height - 2], [timeZoneBackground.x + timeZoneBackground.width - 2, timeZoneBackground.y]], 2)
return timeZoneBackground.width
normalUserInput = []
def aboutScreen(screen):
global programHaveToContinue
global textUserInput
global normalUserInput
global exMode
global normalMode
global textToAnalyse
global normalTextToAnalyse
functionHaveToContinue = True
keyboardInput = dict()
keyboardInput["mode"] = "normal"
keyboardInput["content"] = ""
while functionHaveToContinue and programHaveToContinue:
functionHaveToContinue, textToanalyse = analyseTyping(None, None, None)
if textToanalyse["mode"] == "escape":
functionHaveToContinue = False
# Appling variables
screen.fill(background_colour)
xSize, ySize = screen.get_size()
# Illustartion deffinition
illustrationInformation = surfaceInformations()
illustration = pygame.image.load(
mainDir + "/" + "about-illustration.png").convert_alpha()
illustrationInformation.width, illustrationInformation.height = illustration.get_size()
illustrationInformationRatio = illustrationInformation.width / \
illustrationInformation.height
if illustrationInformation.width > xSize:
illustrationInformation.width = xSize * (3 / 4)
illustrationInformation.height = illustrationInformation.width / \
illustrationInformationRatio
if illustrationInformation.height > ySize:
illustrationInformation.height = ySize * (3 / 4)
illustrationInformation.width = illustrationInformation.height * \
illustrationInformationRatio
illustrationInformation.y = (
ySize - illustrationInformation.height) / 2
illustrationInformation.x = (xSize - illustrationInformation.width) / 2
illustration = pygame.transform.scale(illustration, (int(
illustrationInformation.width), int(illustrationInformation.height)))
screen.blit(illustration, (illustrationInformation.x,
illustrationInformation.y))
makeTextZone("About", None)
#####################
pygame.display.flip()
#####################
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
def playTrivial(currentMatchNumber,wtw):
if wtw == "ttl":
modulator = 0
elif wtw == "ltl":
modulator = 1
if currentMatchNumber != 0:
if ((currentMatchNumber - 1) % 4) == modulator:
answer = 1
elif ((currentMatchNumber - 2) % 4) == modulator:
answer = 2
elif ((currentMatchNumber - 3) % 4) == modulator:
answer = 3
else:
answer = random.randint(1, 3)
else:
answer = 0
return answer
def trivialAnalysis(currentMatchNumber, initialMatchNumber, wtw, userInput):
if currentMatchNumber != 0:
numberOfMatchToDel = 0
if currentMatchNumber >= 3:
authorisedNumbers = [3, 2, 1]
elif currentMatchNumber == 2:
authorisedNumbers = [2, 1]
elif currentMatchNumber == 1:
authorisedNumbers = [1]
if list(userInput)[0] == "=":
action = "application"
stringToEvaluate = userInput[1:]
elif list(userInput)[0] == "-":
action = "soustraction"
stringToEvaluate = userInput[1:]
else:
action = "soustraction"
stringToEvaluate = userInput
if representsInt(stringToEvaluate):
if action == "soustraction":
numberOfMatchToDel = int(stringToEvaluate)
elif action == "application":
numberOfMatchToDel = currentMatchNumber - int(stringToEvaluate)
else:
answer = [False, "“" + userInput + "” is not a valid syntax."]
if numberOfMatchToDel != 0:
if numberOfMatchToDel in authorisedNumbers:
numberLetByUser = initialMatchNumber - numberOfMatchToDel
answer = [True, numberLetByUser, numberOfMatchToDel]
else:
answer = [False, "“" +
str(numberOfMatchToDel) + "” is too big."]
elif (numberOfMatchToDel == 0):
answer = [False, "“0” is not a valid answer."]
else:
answer = [True, 0, 0]
return answer
def winingFallingScreenMatchExplosion(winer, variant, numberOfInitialMatch, time):
xSize, ySize = screen.get_size()
if winer == True:
matchInformation = surfaceInformations()
matchS = []
match = 0
while match < 1000:
matchS.append(pygame.image.load(
mainDir + "/" + "match-animation.png").convert_alpha())
matchInformation.heigh = random.randint(0, ySize)
matchInformation.weight = random.randint(0, xSize)
rotation = random.randint(0, 360)
matchS[match] = pygame.transform.rotate(matchS[match], rotation)
screen.blit(
matchS[match], (matchInformation.weight, matchInformation.heigh))
match = match + 1
elif winer == False:
print("machin")
def formateSecondToDotedTime(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
if h == 0:
formatedTime = "%02d:%02d" % (m, s)
else:
formatedTime = "%02d:%02d:%02d" % (h, m, s)
return formatedTime
def winingFallingScreen(winer, variant, numberOfInitialMatch, time):
global indicator_colour
global winingMainText_colour
global purple_colour
lineSeparationColor = (205, 153, 29)
helpText_color = (163, 143, 125)
fallingMainText_colour = winingMainText_colour
xSize, ySize = screen.get_size()
time = formateSecondToDotedTime(time)
if winer == True:
winingTextInfo = surfaceInformations()
winingTimeTextInfo = surfaceInformations()
winingHelpTextInfo = surfaceInformations()
screen.fill(indicator_colour)
# Bliting the text "You win"
winingFont = pygame.font.SysFont("CMU Typewriter Text", 44, bold=True)
winingText = winingFont.render("You win!", 1, winingMainText_colour)
winingTextInfo.width, winingTextInfo.height = winingFont.size("You win!")
winingTextInfo.x = (xSize - winingTextInfo.width) / 2
winingTextInfo.y = 40
screen.blit(winingText, (winingTextInfo.x, winingTextInfo.y))
# Bliting the time passed
winingTimeFont = pygame.font.SysFont("CMU Typewriter Text", 137, bold=True)
winingTimeText = winingTimeFont.render(time, 1, lineSeparationColor)
winingTimeTextInfo.width, winingTimeTextInfo.height = winingTimeFont.size(time)
winingTimeTextInfo.x = (xSize - winingTimeTextInfo.width) / 2
winingTimeTextInfo.y = 90
screen.blit(winingTimeText, (winingTimeTextInfo.x, winingTimeTextInfo.y))
# Bliting help text
helpText = "Type :new to begin new game or :help for more options."
winingHelpFont = pygame.font.SysFont("CMU Typewriter Text", 23, bold=True)
winingHelpText = winingHelpFont.render(helpText, 1, helpText_color)
winingHelpTextInfo.width, winingHelpTextInfo.height = winingHelpFont.size(helpText)
winingHelpTextInfo.x = (xSize - winingHelpTextInfo.width) / 2
winingHelpTextInfo.y = ySize-90
screen.blit(winingHelpText, (winingHelpTextInfo.x, winingHelpTextInfo.y))
elif winer == False:
fallingTextInfo = surfaceInformations()
fallingTimeTextInfo = surfaceInformations()
fallingHelpTextInfo = surfaceInformations()
screen.fill(purple_colour)
# Bliting the text "You win"
fallingTextContent = "You loose!"
fallingFont = pygame.font.SysFont("CMU Typewriter Text", 52, bold=True)
fallingText = fallingFont.render(fallingTextContent, 1, fallingMainText_colour)
fallingTextInfo.width, fallingTextInfo.height = fallingFont.size(fallingTextContent)
fallingTextInfo.x = (xSize - fallingTextInfo.width) / 2
fallingTextInfo.y = (ySize/2) - fallingTextInfo.height
screen.blit(fallingText, (fallingTextInfo.x, fallingTextInfo.y))
# Bliting help text
helpText = "Type :new to begin new game or :help for more options."
fallingHelpFont = pygame.font.SysFont("CMU Typewriter Text", 23, bold=True)
fallingHelpText = fallingHelpFont.render(helpText, 1, helpText_color)
fallingHelpTextInfo.width, fallingHelpTextInfo.height = fallingHelpFont.size(helpText)
fallingHelpTextInfo.x = (xSize - fallingHelpTextInfo.width) / 2
fallingHelpTextInfo.y = ySize-90
screen.blit(fallingHelpText, (fallingHelpTextInfo.x, fallingHelpTextInfo.y))
def printMarienbadListOfTry(screen, listOfTry):
global historyAreaWidth
historyFont = pygame.font.SysFont("monospace", 14, bold=True)
pageUpDownFont = pygame.font.SysFont("monospace", 18, bold=True)
pageUpDownColor = (220, 36, 4)
lineSeparationColor = (205, 153, 29)
realLineSeparationPlayed = (54,46,38)
xSize, ySize = screen.get_size()
arrowBackground = []
row = 0
arrowPosX = 40
delledNumberPosX = 53
scroowlingHistory = 0
rightHistoryAreaWidth = 0
for aTryGame in listOfTry:
tempSizeWidth, tempSizeHeigh = historyFont.size(aTryGame)
if tempSizeWidth > rightHistoryAreaWidth:
rightHistoryAreaWidth=tempSizeWidth
rightHistoryAreaWidth=rightHistoryAreaWidth+2
historyAreaWidth = rightHistoryAreaWidth + 35 + 20
historyZone = pygame.Surface((historyAreaWidth, ySize))
historyZone.fill(history_area_colour)
screen.blit(historyZone, (0, 0))
while row < len(listOfTry):
if (row % 2 == 0): # even
row_coulour = (234, 226, 215)
arrowSign = "←"
else: # odd
row_coulour = (207, 194, 184)
arrowSign = "→"
arrowBackground.append(pygame.Surface(
(historyAreaWidth, textZoneHeigh)))
arrowBackground[row].fill(row_coulour)
rowPosY = ySize - textZoneHeigh - \
(len(listOfTry) - row) * textZoneHeigh
historyNumberText = historyFont.render(str(row), 1, (0, 0, 0))
historyArrowText = historyFont.render(arrowSign, 1, (0, 0, 0))
numberDelledText = historyFont.render(
str(listOfTry[row]), 1, (0, 0, 0))
screen.blit(arrowBackground[row], (0, rowPosY))
screen.blit(historyNumberText, (2, rowPosY + 2))
screen.blit(historyArrowText, (arrowPosX, rowPosY + 2))
screen.blit(numberDelledText, (delledNumberPosX, rowPosY + 2))
row = row + 1
realHistoryHeigh = (len(listOfTry) + 1) * textZoneHeigh
lineHistorySeparation = pygame.Surface((1, ySize))
lineHistorySeparation.fill(lineSeparationColor)
screen.blit(lineHistorySeparation, (35, 0))
realLineHistorySeparation = pygame.Surface((1, realHistoryHeigh))
realLineHistorySeparation.fill(realLineSeparationPlayed)
screen.blit(realLineHistorySeparation, (35, ySize-realHistoryHeigh))
if realHistoryHeigh > ySize:
pageUpText = pageUpDownFont.render("⇈", 1, pageUpDownColor)
screen.blit(pageUpText, (historyAreaWidth + 8, 4))
shadowTop = pygame.image.load(mainDir + "/" + "history-top-shadow.png").convert_alpha()
shadowTop = pygame.transform.scale(shadowTop, (historyAreaWidth, 8))
screen.blit(shadowTop, (0, 0))
def printListOfTry(screen, listOfTry):
historyFont = pygame.font.SysFont("monospace", 14, bold=True)
pageUpDownFont = pygame.font.SysFont("monospace", 18, bold=True)
pageUpDownColor = (220, 36, 4)
lineSeparationColor = (205, 153, 29)
realLineSeparationPlayed = (54,46,38)
xSize, ySize = screen.get_size()
arrowBackground = []
row = 0
arrowPosX = 40
delledNumberPosX = 53
historyZone = pygame.Surface((historyAreaWidth, ySize))
historyZone.fill(history_area_colour)
screen.blit(historyZone, (0, 0))
scroowlingHistory = 0
while row < len(listOfTry):
if (row % 2 == 0): # even
row_coulour = (234, 226, 215)
arrowSign = "←"
else: # odd
row_coulour = (207, 194, 184)
arrowSign = "→"
if listOfTry[row] == 1:
numberToDelColor = (0, 126, 223)
if listOfTry[row] == 2:
numberToDelColor = (40, 149, 0)
if listOfTry[row] == 3:
numberToDelColor = (215, 0, 95)
print("This row: " + str(row))
arrowBackground.append(pygame.Surface(
(historyAreaWidth, textZoneHeigh)))
print(len(arrowBackground))
arrowBackground[row].fill(row_coulour)
rowPosY = ySize - textZoneHeigh - \
(len(listOfTry) - row) * textZoneHeigh
historyNumberText = historyFont.render(str(row), 1, (0, 0, 0))
historyArrowText = historyFont.render(arrowSign, 1, (0, 0, 0))
numberDelledText = historyFont.render(
str(listOfTry[row]), 1, numberToDelColor)
screen.blit(arrowBackground[row], (0, rowPosY))
screen.blit(historyNumberText, (2, rowPosY + 2))
screen.blit(historyArrowText, (arrowPosX, rowPosY + 2))
screen.blit(numberDelledText, (delledNumberPosX, rowPosY + 2))
row = row + 1
print("It success")
realHistoryHeigh = (len(listOfTry) + 1) * textZoneHeigh
lineHistorySeparation = pygame.Surface((1, ySize))
lineHistorySeparation.fill(lineSeparationColor)
screen.blit(lineHistorySeparation, (35, 0))
realLineHistorySeparation = pygame.Surface((1, realHistoryHeigh))
realLineHistorySeparation.fill(realLineSeparationPlayed)
screen.blit(realLineHistorySeparation, (35, ySize-realHistoryHeigh))
if realHistoryHeigh > ySize:
pageUpText = pageUpDownFont.render("⇈", 1, pageUpDownColor)
screen.blit(pageUpText, (historyAreaWidth + 8, 4))
shadowTop = pygame.image.load(mainDir + "/" + "history-top-shadow.png").convert_alpha()
shadowTop = pygame.transform.scale(shadowTop, (historyAreaWidth, 8))
screen.blit(shadowTop, (0, 0))
def showVariant(screen, wtw, posX):
yellow_colour = (205, 153, 29)
xSize, ySize = screen.get_size()
variantFont = pygame.font.SysFont("monospace", 14, bold=True)
wtwText = variantFont.render(wtw, 1, (225, 225, 225))
# Size deffinition
variantBackgroundInformation = surfaceInformations()
variantBackgroundInformation.left = 2
variantBackgroundInformation.right = 2
variantBackgroundInformation.height = textZoneHeigh
variantBackgroundInformation.y = ySize - textZoneHeigh
variantTextInformation = surfaceInformations()
variantTextInformation.width, variantTextInformation.height = variantFont.size(wtw)
variantBackgroundInformation.width = variantTextInformation.width
variantBackgroundInformation.width = variantBackgroundInformation.width + variantBackgroundInformation.left + variantBackgroundInformation.right
variantBackgroundInformation.x = xSize - variantBackgroundInformation.width - posX
variantTextInformation.x = variantBackgroundInformation.x + 1 + variantBackgroundInformation.left
variantTextInformation.y = variantBackgroundInformation.y + 1
#creation
variantBackground = pygame.Surface(
(variantBackgroundInformation.width, variantBackgroundInformation.height))
variantBackground.fill(yellow_colour)
#Blitting
screen.blit(variantBackground, (variantBackgroundInformation.x, variantBackgroundInformation.y))
screen.blit(wtwText, (variantTextInformation.x, variantTextInformation.y))
#Ending
return variantBackgroundInformation.width + variantBackgroundInformation.left + variantBackgroundInformation.right
def trivial(numberOfInitialMatch, wtw, screen):
global programHaveToContinue
global textUserInput
global normalUserInput
global exMode
global normalMode
global textToAnalyse
global normalTextToAnalyse
global finalNormalUserInput
allowedEntry = ["1", "2", "3"]
beginingOfGame = int(time.time())
currentNumberOfMatch = numberOfInitialMatch
normalTextInformation = surfaceInformations()
indicatorTextInformation = surfaceInformations()
listOfTry = []
functionHaveToContinue = True
myfont = pygame.font.SysFont("monospace", 14)
errorToDisplay = False
weHaveAWiner = False
winer = None
while functionHaveToContinue and programHaveToContinue and (weHaveAWiner == False):
userPlayed = 0
computerPlayed = 0
functionHaveToContinue, textToanalyse = analyseTyping(
"trivial", numberOfInitialMatch, wtw)
if textToanalyse["mode"] == "pause":
print("In pause")
beginingOfGame = makeAPause("Trivial", numberOfInitialMatch, wtw, beginingOfGame)
# Redifining variables
xSize, ySize = screen.get_size()
gameAreaDim[0] = xSize - historyAreaWidth
# indicator area variables
indicatorPosition = ((historyAreaWidth + ((xSize - historyAreaWidth) -
indicatorDim[0]) / 2), ySize - textZoneHeigh - indicatorDim[1])
indicatorArea = pygame.Surface((indicatorDim[0], indicatorDim[1]))
# Appling variables
screen.fill(background_colour)
if weHaveAWiner == False:
printListOfTry(screen, listOfTry)
# Indicator area deffinition
indicatorArea.fill(indicator_colour)
screen.blit(indicatorArea, (indicatorPosition[
0], indicatorPosition[1]))
indicatorBorderPositionLeft = (
int(indicatorPosition[0] + circleRadius), int(indicatorPosition[1]))
pygame.draw.circle(screen, indicator_colour, (indicatorBorderPositionLeft[
0], indicatorBorderPositionLeft[1]), circleRadius)
indicatorBorderPositionRight = (int(
indicatorPosition[0] + indicatorDim[0] - circleRadius), int(indicatorPosition[1]))
pygame.draw.circle(screen, indicator_colour, (indicatorBorderPositionRight[
0], indicatorBorderPositionRight[1]), circleRadius)
indicatorRadiusCompleterPosition = (
indicatorPosition[0] + circleRadius, indicatorPosition[1] - circleRadius)
indicatorRadiusCompleterDim = (
indicatorDim[0] - 2 * circleRadius, circleRadius)
indicatorRadiusCompleterArea = pygame.Surface(
(indicatorRadiusCompleterDim[0], indicatorRadiusCompleterDim[1]))
indicatorRadiusCompleterArea.fill(indicator_colour)
screen.blit(indicatorRadiusCompleterArea, (indicatorRadiusCompleterPosition[
0], indicatorRadiusCompleterPosition[1]))
# Matchs deffinition
maxMatchAreaDim = [xSize - historyAreaWidth - (2 * matchAreaBorder.right), ySize - textZoneHeigh - indicatorDim[
1] - matchAreaBorder.top - matchAreaBorder.bottom]
maxMatchDim = [0, 0]
maxMatchDim[0] = maxMatchAreaDim[0] / (numberOfInitialMatch * 1.5)
maxMatchDim[1] = maxMatchDim[0] * matchPicRatio
if maxMatchDim[1] > maxMatchAreaDim[1]:
matchDim = [int(maxMatchAreaDim[1] / matchPicRatio),
int(maxMatchAreaDim[1])]
else:
matchDim = [int(maxMatchDim[0]), int(
maxMatchDim[0] * matchPicRatio)]
tempImageMatch = pygame.image.load(mainDir + "/" + "match.png").convert_alpha()
matchMaxWidth, matchMaxHeight = tempImageMatch.get_rect().size
if matchDim[0] > matchMaxWidth:
matchDim[0] = matchMaxWidth
matchDim[1] = matchMaxHeight
matchAreaDim = [matchDim[0] * numberOfInitialMatch, matchDim[1]]
matchAreaPos = [historyAreaWidth + matchAreaBorder.left + (
(maxMatchAreaDim[0] - matchAreaDim[0]) / 2), (ySize - indicatorDim[1] - matchDim[1]) / 2]
secondMatchAreaPos = [matchAreaPos[
0] + (matchAreaDim[0] - (numberOfInitialMatch * 1.5) * matchDim[0]) / 2, matchAreaPos[1]]
matchRessizing = matchMaxWidth/matchDim[0]
if wtw == "ttl":
lastBurnedMatch = [1, 2, 3]
elif wtw == "ltl":
lastBurnedMatch = [2, 3, 4]
i = 0
matchS = []
while i < numberOfInitialMatch:
if i < currentNumberOfMatch:
if currentNumberOfMatch in lastBurnedMatch:
initialSignDistanceToMatch = matchDim[1]/7
if i+1 in lastBurnedMatch:
matchS.append(pygame.image.load(
mainDir + "/" + "match-burned.png").convert_alpha())
else:
matchS.append(pygame.image.load(
mainDir + "/" + "match.png").convert_alpha())
else:
initialSignDistanceToMatch = matchDim[1]/24
if i >= (currentNumberOfMatch - 3):
matchS.append(pygame.image.load(
mainDir + "/" + "match-allowed.png").convert_alpha())
else:
matchS.append(pygame.image.load(
mainDir + "/" + "match.png").convert_alpha())
else:
matchS.append(pygame.image.load(
mainDir + "/" + "match-void.png").convert_alpha())
matchLeftVoid = 0
if i != 0:
matchLeftVoid = matchDim[0] / 2
currentMatchPos = [secondMatchAreaPos[
0] + i * (matchLeftVoid + matchDim[0]), secondMatchAreaPos[1]]
matchS[i] = pygame.transform.scale(
matchS[i], (matchDim[0], matchDim[1]))
screen.blit(
matchS[i], (currentMatchPos[0], currentMatchPos[1]))
if i == 0:
#adding crown or warning sign
initialSignPos = [0,0]
initialSignPos[1] = currentMatchPos[1] - initialSignDistanceToMatch
if wtw == "ttl":
initialSign = pygame.image.load(mainDir + "/" + "crown.png").convert_alpha()
if wtw == "ltl":
initialSign = pygame.image.load(mainDir + "/" + "skull.png").convert_alpha()
initialSignSize = initialSign.get_rect().size
initialSignSize = [int(initialSignSize[0]/matchRessizing),int(initialSignSize[1]/matchRessizing)]
initialSign = pygame.transform.scale(initialSign, (initialSignSize[0], initialSignSize[1]))
initialSignPos[0] = (currentMatchPos[0]+(matchDim[0]/2)) - (initialSignSize[0]/2)
screen.blit(initialSign, (initialSignPos[0], initialSignPos[1]))
i = i + 1
indicatorFont = pygame.font.SysFont("monospace", 34)
indicatorTextContent = str(
currentNumberOfMatch) + "/" + str(numberOfInitialMatch)
indicatorText = indicatorFont.render(
indicatorTextContent, 1, (255, 255, 255))
indicatorTextInformation.width, indicatorTextInformation.height = indicatorFont.size(
indicatorTextContent)
indicatorTextInformation.x = indicatorPosition[
0] + (indicatorDim[0] - indicatorTextInformation.width) / 2
indicatorTextInformation.y = indicatorPosition[1] + 5
screen.blit(indicatorText, (indicatorTextInformation.x,
indicatorTextInformation.y))
if finalNormalUserInput:
getFromAnalysis = trivialAnalysis(
currentNumberOfMatch, numberOfInitialMatch, wtw, finalNormalUserInput)
finalNormalUserInput = False
if getFromAnalysis[0] == True:
userPlayed = getFromAnalysis[2]
listOfTry.append(userPlayed)
else:
errorToDisplay = getFromAnalysis[1]
if getFromAnalysis[0] == True:
computerPlayed = playTrivial(
currentNumberOfMatch - userPlayed,wtw)
listOfTry.append(computerPlayed)
currentNumberOfMatch = currentNumberOfMatch - userPlayed
if ((currentNumberOfMatch == 0) and (wtw == "ttl")) or ((currentNumberOfMatch == 1) and (wtw == "ltl")):
winer = True
else:
currentNumberOfMatch = currentNumberOfMatch - computerPlayed
if (currentNumberOfMatch == 0 and (wtw == "ttl")) or ((currentNumberOfMatch == 1) and (wtw == "ltl")):
winer = False
numberOfMatchDelled = numberOfInitialMatch - currentNumberOfMatch
if (currentNumberOfMatch == 0 and (wtw == "ttl")) or ((currentNumberOfMatch == 1) and (wtw == "ltl")):
weHaveAWiner = True
timeOfEndOfGame = int(time.time()) - beginingOfGame
else:
print("we have a winer")
timeOfEndOfGame = int(time.time()) - beginingOfGame
if textToanalyse in allowedEntry:
normalTextZone = myfont.render(
"".join(textToanalyse), 1, (255, 255, 255))
screen.blit(normalTextZone, (100, 100))
makeTextZone("Trivial", None)
timeZoneWidth = makeTimetZone(beginingOfGame)
wtwZoneWidth = showVariant(screen, wtw, timeZoneWidth)
if textToanalyse["mode"] == "normal":
errorToDisplay = False
normalText = myfont.render(
"".join(textToanalyse["content"]), 1, (255, 255, 255))
normalTextInformation.width, normalTextInformation.height = normalText.get_size()
normalTextInformation.x = xSize - normalTextInformation.width - 5 - wtwZoneWidth - timeZoneWidth
normalTextInformation.y = ySize - textZoneHeigh
screen.blit(normalText, (normalTextInformation.x,
normalTextInformation.y))
if errorToDisplay != False:
normalText = myfont.render(errorToDisplay, 1, red)
normalTextInformation.width, normalTextInformation.height = normalText.get_size()
normalTextInformation.x = xSize - normalTextInformation.width - 5 - wtwZoneWidth - timeZoneWidth
normalTextInformation.y = ySize - textZoneHeigh
screen.blit(normalText, (normalTextInformation.x,
normalTextInformation.y))
# testSurface = pygame.Surface((indicatorTextInformation.width, indicatorTextInformation.height))
# testSurface.fill(red)
# screen.blit(testSurface, (indicatorTextInformation.x,indicatorTextInformation.y))
#####################
pygame.display.flip()
#####################
while functionHaveToContinue and programHaveToContinue:
winingFallingScreen(
winer, wtw, numberOfInitialMatch, timeOfEndOfGame)
functionHaveToContinue, textToanalyse = analyseTyping(
"trivial", numberOfInitialMatch, wtw)
makeTextZone("Trivial", None)
#####################
pygame.display.flip()
#####################
return False
def marienbadInitialColumns(numberOfLines):
matchMatrix = []
columns = (numberOfLines*2)-1
number = 0
i = 1
while i <= columns:
if i <= (columns/2)+1:
number=number+1
else:
number=number-1
matchMatrix.append(number)
i=i+1
return matchMatrix
def marienbadIsItAWinerSituation(matchMatrix, wtw):
columnWithMatch = []
i=0
for row in matchMatrix:
if row != 0:
columnWithMatch.append(i)
i=i+1
if wtw == "ttl":
if len(columnWithMatch)==1:
winingColumn=columnWithMatch
else:
winingColumn=False
elif wtw == "ltl":
if (len(columnWithMatch)==1) and (matchMatrix[columnWithMatch[0]] > 1):
winingColumn=columnWithMatch
elif (len(columnWithMatch) == 2 ) and (matchMatrix[columnWithMatch[0]] == 1) and (matchMatrix[columnWithMatch[1]] == 1):
winingColumn=columnWithMatch
else:
winingColumn=False
else:
winingColumn=False
return winingColumn
def getNimSum(matchMatrix):
columns = len(matchMatrix)
numberOfLines = int((columns+1)/2)
lineSums = [0] * numberOfLines
i=0
for column in matchMatrix:
j=0
while j < column:
lineSums[j]=lineSums[j]+1
j=j+1
i=i+1
return lineSums
def playMarienbad(matchMatrix,wtw):
columns = len(matchMatrix)
numberOfLines = int((columns+1)/2)
lineSums = getNimSum(matchMatrix)
allowdedColumnToPlay = []
i=0
for column in matchMatrix:
if column > 0:
allowdedColumnToPlay.append(i)
i=i+1
lineSumsBinari = calculateLineSumsBinari(lineSums)
print(lineSumsBinari)
finalSum = sum(lineSumsBinari)
listOfDigits=list(str(finalSum))
print(listOfDigits)
itIsPossibleToWin = False
for aDigit in listOfDigits:
if (int(aDigit)%2 == 1):
itIsPossibleToWin = True
matchLineContainingOdd = None
if itIsPossibleToWin == False:
columnToPlay = random.sample(allowdedColumnToPlay, 1)[0]
maxNumberInTheColumn=matchMatrix[columnToPlay]
numberOfMatchToPlay = random.randint(1,maxNumberInTheColumn)
whatComputerWillPlay = [columnToPlay,numberOfMatchToPlay]
columnToPlay = whatComputerWillPlay
else:
theSumColumnContainingTheOddDigit = marienbadWitchColumnIsOdd(listOfDigits)
matchLineContainingOdd = marienbadWitchMatchLineContainOdd(matchMatrix)
columnToPlay = matchLineContainingOdd
return columnToPlay
def marienbadWitchColumnIsOdd(listOfDigits):
for i in range(len(listOfDigits)):
aDigit = listOfDigits[i]
if (int(aDigit)%2 == 1):
return i
def calculateLineSumsBinari(lineSums):
lineSumsBinari = []
i = 0
for decimalNum in lineSums:
lineSumsBinari.append(int("{0:b}".format(decimalNum)))
return lineSumsBinari
def marienbadWitchMatchLineContainOdd(matchMatrix):
lineSums = getNimSum(matchMatrix)
lineSumsBinari = calculateLineSumsBinari(lineSums)
finalSum = sum(lineSumsBinari)
listOfDigits=list(str(finalSum))
theSumColumnContainingTheOddDigit = marienbadWitchColumnIsOdd(listOfDigits)
# Convert LineSums to Binary representation
lineSumsBinari = []
i = 0
for decimalNum in lineSums:
lineSumsBinari.append(int("{0:b}".format(decimalNum)))
# Normalise non-sinificative zeros
i = 0
maxLen = 0
for binaryNum in lineSumsBinari:
tempLen = len(str(binaryNum))
if tempLen > maxLen:
maxLen = tempLen
i=i+1
i = 0
for binaryNum in lineSumsBinari:
tempLen = len(str(binaryNum))
howZeroToAdd = maxLen - tempLen
if howZeroToAdd > 0:
for j in range(1,howZeroToAdd+1):
lineSumsBinari[i] = "0" + str(lineSumsBinari[i])
else:
lineSumsBinari[i] = str(lineSumsBinari[i])
i=i+1
#Only let the theSumColumnContainingTheOddDigitNTH digit in each binaryNum
octetsOfDesiredColumn = []
i = 0
for binaryNum in lineSumsBinari:
extractedOctet = list(str(binaryNum))[theSumColumnContainingTheOddDigit]
octetsOfDesiredColumn.append(extractedOctet)
i=i+1
# Search the lines containing 1
i = 0
linesImpliyingOdd = []
for i in range(0,len(octetsOfDesiredColumn)):
if octetsOfDesiredColumn[i] == "1":
linesImpliyingOdd.append(i)
i=i+1
higherMatchLine = linesImpliyingOdd[-1]
# Search the column matching the lines.
i = 0
for match in matchMatrix:
if match == higherMatchLine:
theColumn=i
i=i+1
print("matchMatrix: " + str(matchMatrix))
print("lineSums: " + str(lineSums))
print("higherMatchLine: " + str(higherMatchLine))
print("Là ↓")
print(theColumn)
return(theColumn)
def marienbadAnalysis(matchMatrix, userInput):
# Constant for all the folowing operations
columns = len(matchMatrix)
numberOfLines = 2 * (columns+1)
allowedColumns = range(columns)
maximumMatchMatrix = marienbadInitialColumns(numberOfLines)
# Test if it is possible to play
continueFunction = False
for column in matchMatrix:
if (column != 0) and (continueFunction == False) :
continueFunction = True
if (continueFunction == True):
numberOfMatchsToDel = 0
syntaxToTestImputValidity = "^ *([0-9]+) *(=|-) *([0-9]+) *$"
if re.match(syntaxToTestImputValidity, userInput) is not None:
print("True")
syntaxToExtractOptions = "^ *(?P<column>[0-9]+) *(?P<operator>(=|-)) *(?P<numberOfMatchUsed>[0-9]+) *$"
deletingMatchOparation = re.match(syntaxToExtractOptions,userInput)
columnToDelOnIt = int(deletingMatchOparation.group("column"))
numberOfMatchUsed = int(deletingMatchOparation.group("numberOfMatchUsed"))
delletingOperator = deletingMatchOparation.group("operator")
if (columnToDelOnIt in allowedColumns) :
if (numberOfMatchUsed != 0) or (delletingOperator != "-"):
if (delletingOperator == "=") :
if (numberOfMatchUsed <= matchMatrix[columnToDelOnIt]):
numberOfMatchsToDel = matchMatrix[columnToDelOnIt]-numberOfMatchUsed
matchMatrix[columnToDelOnIt] = matchMatrix[columnToDelOnIt]-numberOfMatchsToDel
answer = [True, matchMatrix, str(columnToDelOnIt) + "-" + str(numberOfMatchsToDel)]
else:
answer = [False, "You can not set a number higher than content."]
elif (delletingOperator == "-") :
if (numberOfMatchUsed <= matchMatrix[columnToDelOnIt]):
numberOfMatchsToDel = numberOfMatchUsed
matchMatrix[columnToDelOnIt] = matchMatrix[columnToDelOnIt]-numberOfMatchsToDel
answer = [True, matchMatrix, str(columnToDelOnIt) + "-" + str(numberOfMatchsToDel)]
else:
answer = [False, "You can not use a number higher than content."]
else:
answer = [False, "You can not del no match!"]
else:
answer = [False, "“" + str(deletingMatchOparation.group("column")) + "” is not in valid range."]
else:
answer = [False, "“" + userInput + "” is not a valid syntax."]
else:
answer = [False, 0]
return answer
def marienbad(numberOfLines, wtw, screen):
global programHaveToContinue
global textUserInput
global normalUserInput
global exMode
global normalMode
global textToAnalyse
global normalTextToAnalyse
global finalNormalUserInput
global historyAreaWidth
maximumMatchMatrix = marienbadInitialColumns(numberOfLines)
currentMatchMatrix = copy.deepcopy(maximumMatchMatrix)
numberOfColumns = numberOfLines*2 - 1
# Initialisation
beginingOfGame = int(time.time())
listOfTry = []
functionHaveToContinue = True
errorToDisplay = False
weHaveAWiner = False
winer = None
while functionHaveToContinue and programHaveToContinue and (weHaveAWiner == False):
userPlayed = 0
computerPlayed = 0
if weHaveAWiner == False:
functionHaveToContinue, textToanalyse = analyseTyping("marienbad", numberOfLines, wtw)
if textToanalyse["mode"] == "pause":
print("In pause")
beginingOfGame = makeAPause("Marienbad", numberOfInitialMatch, wtw, beginingOfGame)
# Redifining variables
xSize, ySize = screen.get_size()
gameAreaDim[0] = xSize - historyAreaWidth
# loading images
tempImageMatch = pygame.image.load(mainDir + "/" + "match.png").convert_alpha()
# Creatiing surface information
gameAreaInfo = surfaceInformations()
realGameAreaInfo = surfaceInformations()
matchInfo = surfaceInformations()
maxMatchInfo = surfaceInformations()
matchAreaInfo = surfaceInformations()
normalTextInformation = surfaceInformations()
wtwZoneInfo = surfaceInformations()
columnNumberInfo = surfaceInformations()
matchHorizontalSeparation = 0
# Fixing constants
matchInfo.top = 10
realGameAreaInfo.top = 20
realGameAreaInfo.bottom = 30
realGameAreaInfo.left = 30
realGameAreaInfo.right = 30
# Calculatiing element’s size
realGameAreaInfo.height = ySize - textZoneHeigh - realGameAreaInfo.top - realGameAreaInfo.bottom
realGameAreaInfo.width = xSize - historyAreaWidth - realGameAreaInfo.left - realGameAreaInfo.right
maxMatchInfo.width, maxMatchInfo.height = tempImageMatch.get_rect().size
matchInfo.height = realGameAreaInfo.height / (numberOfLines*1.2)
matchInfo.top = matchInfo.height*0.2
if matchInfo.height >= maxMatchInfo.height:
matchInfo.height = maxMatchInfo.height
matchInfo.width = maxMatchInfo.width
else:
matchInfo.width = matchInfo.height / matchPicRatio
matchHorizontalSeparation = (realGameAreaInfo.width - (matchInfo.width*numberOfColumns)) / (numberOfColumns-1)
if matchHorizontalSeparation > matchInfo.height*0.66:
matchHorizontalSeparation = matchInfo.height*0.66
# calculating positions
matchAreaInfo.width = matchInfo.width*numberOfColumns + (numberOfColumns-1)*matchHorizontalSeparation
realGameAreaInfo.x = historyAreaWidth + realGameAreaInfo.left + (realGameAreaInfo.width-matchAreaInfo.width)/2
matchAreaInfo.height = matchInfo.height*numberOfLines + (numberOfLines-1)*matchInfo.top
realGameAreaInfo.y = realGameAreaInfo.top + (realGameAreaInfo.height-matchAreaInfo.height)/2
matchPositions = []
i = 0
for numberOfMatchInAColumn in maximumMatchMatrix:
j = 0
matchPositions.append([])
cumuledX = matchInfo.width + matchHorizontalSeparation
while j < numberOfMatchInAColumn:
matchPositions[i].append(surfaceInformations())
cumuledY = matchInfo.height + matchInfo.top
matchPositions[i][j].x = realGameAreaInfo.x + i*cumuledX
matchPositions[i][j].y = ySize-textZoneHeigh - realGameAreaInfo.y - (j+1)*cumuledY
j=j+1
i = i+1
# Bliting first interface
screen.fill(background_colour)
printMarienbadListOfTry(screen, listOfTry)
# Treating normal imput
if finalNormalUserInput:
getFromAnalysis = marienbadAnalysis(currentMatchMatrix, finalNormalUserInput)
finalNormalUserInput = False
if getFromAnalysis[0] == True:
currentMatchMatrix = getFromAnalysis[1]
listOfTry.append(getFromAnalysis[2])
else:
errorToDisplay = getFromAnalysis[1]
if getFromAnalysis[0] == True:
computerPlayed = playMarienbad(currentMatchMatrix,wtw)
listOfTry.append(str(computerPlayed) + "-" + "1")
currentMatchMatrix[computerPlayed] = currentMatchMatrix[computerPlayed]-1
# Defining if we are in wining position
winingColumn = marienbadIsItAWinerSituation(currentMatchMatrix, wtw)
# Bliting the game
columnNumberFont = pygame.font.SysFont("monospace", 18, bold=True)
i = 0
for column in matchPositions:
j = 0
for match in column:
if (currentMatchMatrix[i] < maximumMatchMatrix[i]) and (j+1 > currentMatchMatrix[i]):
visualMatch = pygame.image.load(mainDir + "/" + "match-void.png").convert_alpha()
else:
if winingColumn:
visualMatch = pygame.image.load(mainDir + "/" + "match-burned.png").convert_alpha()
else:
visualMatch = pygame.image.load(mainDir + "/" + "match.png").convert_alpha()
visualMatch = pygame.transform.scale(visualMatch, (int(matchInfo.width), int(matchInfo.height)))
screen.blit(visualMatch, (match.x, match.y))
j=j+1
columnNumberImage = columnNumberFont.render(str(i), 1, (0, 0,0))
columnNumberInfo.width, columnNumberInfo.height = columnNumberImage.get_size()
columnNumberInfo.x = column[0].x + (column[0].width/2) - (columnNumberInfo.width/2)
screen.blit(columnNumberImage, (columnNumberInfo.x, column[0].y+matchInfo.height+12))
i = i+1
# Bliting second interface
makeTextZone("Marienbad", None)
timeZoneWidth = makeTimetZone(beginingOfGame)
wtwZoneWidth = showVariant(screen, wtw, timeZoneWidth)
# Display normal mode text
normalFont = pygame.font.SysFont("monospace", 14)
if textToanalyse["mode"] == "normal":
errorToDisplay = False
normalText = normalFont.render(
"".join(textToanalyse["content"]), 1, (255, 255, 255))
normalTextInformation.width, normalTextInformation.height = normalText.get_size()
normalTextInformation.x = xSize - normalTextInformation.width - 5 - wtwZoneWidth - timeZoneWidth
normalTextInformation.y = ySize - textZoneHeigh
screen.blit(normalText, (normalTextInformation.x,
normalTextInformation.y))
if errorToDisplay != False:
normalText = normalFont.render(errorToDisplay, 1, red)
normalTextInformation.width, normalTextInformation.height = normalText.get_size()
normalTextInformation.x = xSize - normalTextInformation.width - 5 - wtwZoneWidth - timeZoneWidth
normalTextInformation.y = ySize - textZoneHeigh
screen.blit(normalText, (normalTextInformation.x,
normalTextInformation.y))
#####################
pygame.display.flip()
#####################
else:
print("we have a winer")
timeOfEndOfGame = int(time.time()) - beginingOfGame
while functionHaveToContinue and programHaveToContinue:
winingFallingScreen(
winer, wtw, numberOfInitialMatch, timeOfEndOfGame)
functionHaveToContinue, textToanalyse = analyseTyping(
"marienbad", numberOfInitialMatch, wtw)
makeTextZone("Marienbad", None)
#####################
pygame.display.flip()
#####################
return False
programHaveToContinue = True
variant = None
generalState = whatToDo()
def main(variant="trivial", number=numberOfInitialMatch, wtw="ttl"):
global generalState
global programHaveToContinue
while programHaveToContinue:
if variant not in [0, None, ""]:
variant = generalState.variant
if number not in [0, None, ""]:
number = generalState.number
if wtw not in [0, None, ""]:
wtw = generalState.wtw
if variant == "trivial":
trivial(number, wtw, screen)
elif variant == "marienbad":
marienbad(number, wtw, screen)
main("trivial", numberOfInitialMatch, "ttl")
| 38.76933
| 307
| 0.627372
|
import os
import random
import sys
import time
import re
import copy
from optparse import OptionParser
import pygame
from pygame.locals import *
version = "0.1"
usage = "usage: %prog [ --lvl [0-5] | ]"
parser = OptionParser(usage=usage, version="%prog 0.1")
parser.add_option("-m", help="Number of match",
default=0, action="store", dest="numberOfMatch")
parser.add_option("-v", help="The variant of Nim",
default=0, action="store", dest="varient")
parser.add_option("-w", help="Mode, there is two values possibles “ttl” and “ltl”",
default=0, action="store", dest="varient")
(options, args) = parser.parse_args()
if not options.numberOfMatch:
options.numberOfMatch = 15
innitialNumberOfMatch = int(options.numberOfMatch)
currentNumberOfMatch = int(innitialNumberOfMatch)
class borderSize:
def __init__(self):
self.top = 0
self.bototm = 0
self.right = 0
self.left = 0
class surfaceInformations:
def __init__(self):
self.width = 0
self.height = 0
self.y = 0
self.x = 0
self.top = 0
self.bototm = 0
self.right = 0
self.left = 0
if self.y != 0:
self.ratio = self.x / self.y
class whatToDo:
def __init__(self):
self.programHaveToContinue = True
self.variant = "trivial"
self.number = numberOfInitialMatch
self.wtw = "ttl"
print("This is Nim " + version + "\n")
mainDir = os.path.dirname(os.path.realpath(__file__))
background_colour = (144, 124, 106)
text_zone_colour = (81, 69, 58)
history_area_colour = (69, 59, 49)
indicator_colour = (70, 60, 50)
prompt_colour = (25, 21, 18)
creme_colour = (236, 228, 217)
yellow_colour = (205, 153, 29)
winingMainText_colour = (236, 232, 228)
purple_colour = (133, 0, 58)
red = (225, 0, 0)
class variants:
def __init__(self):
self.name = ""
self.number = 15
self.wtw = "ttl"
trivial = variants()
trivial.name = "Trivial"
trivial.number = 15
trivial.wtw = "ttl"
marienbad = variants()
marienbad.name = "Marienbad"
marienbad.number = 5
marienbad.wtw = "ttl"
knowenVarients = [trivial, marienbad]
viarentNames = []
for varientRow in knowenVarients:
viarentNames.append(varientRow.name)
xSize = 640
ySize = 480
textZoneHeigh = 16
maxPaddingBetwenMatch = 3
matchPicRatio = 6.925
numberOfInitialMatch = innitialNumberOfMatch
historyAreaWidth = 67
circleRadius = 10
gameAreaDim = [0, 0]
matchAreaDim = [0, 0]
matchAreaPos = [0, 0]
indicatorDim = [127, 55]
matchAreaBorder = borderSize()
matchAreaBorder.top = 40
matchAreaBorder.bottom = 80
matchAreaBorder.left = 40
matchAreaBorder.right = 40
trianglePromptWidth = 7
textUserInput = []
normaUserInput = []
textUserInput = []
normalUserInput = []
exMode = False
normalMode = True
textToAnalyse = ""
normalTextToAnalyse = ""
allowedMatchDel = ["1", "2", "3"]
pygame.init()
screen = pygame.display.set_mode((xSize, ySize), RESIZABLE)
charInputed = [K_TAB, K_SPACE, K_EXCLAIM, K_QUOTEDBL, K_HASH, K_DOLLAR, K_AMPERSAND, K_QUOTE, K_LEFTPAREN, K_RIGHTPAREN, K_ASTERISK, K_PLUS, K_COMMA, K_MINUS, K_PERIOD, K_SLASH, K_0, K_1, K_2, K_3, K_4, K_5, K_6, K_7, K_8, K_9, K_COLON, K_SEMICOLON, K_LESS, K_EQUALS, K_GREATER, K_QUESTION,
K_AT, K_LEFTBRACKET, K_BACKSLASH, K_RIGHTBRACKET, K_CARET, K_UNDERSCORE, K_BACKQUOTE, K_a, K_b, K_c, K_d, K_e, K_f, K_g, K_h, K_i, K_j, K_k, K_l, K_m, K_n, K_o, K_p, K_q, K_r, K_s, K_t, K_u, K_v, K_w, K_x, K_y, K_z, K_KP_PERIOD, K_KP_DIVIDE, K_KP_MULTIPLY, K_KP_MINUS, K_KP_PLUS, K_KP_EQUALS]
def makeTextZone(nameToDisplay, secondName):
xSize, ySize = screen.get_size()
textZone = pygame.Surface((xSize, textZoneHeigh))
textZone.fill(text_zone_colour)
heighTextZonePosition = ySize - textZoneHeigh
promptFont = pygame.font.SysFont("monospace", 14, bold=True)
secondPromptZone = pygame.Surface((1, 1))
secondPromptZoneInfo = surfaceInformations()
secondEcart = 0
secondLittleEcart = 0
secondPromptZoneInfo.width = 0
if secondName != None:
textSecondSizeWidth, textSecondSizeHeight = promptFont.size(secondName)
secondPromptZoneInfo.width = textSecondSizeWidth + 8
secondPromptZoneInfo.heigh = textZoneHeigh
secondPromptZone = pygame.Surface((secondPromptZoneInfo.width, secondPromptZoneInfo.heigh))
secondPromptZone.fill(yellow_colour)
secondPromptText = promptFont.render(secondName, 1, prompt_colour)
secondTextSizeWidth, secondTextSizeHeight = promptFont.size(secondName)
secondPromptTriangle = pygame.draw.polygon(screen, prompt_colour, [[secondPromptZoneInfo.width, ySize - textZoneHeigh], [
secondPromptZoneInfo.width, ySize], [secondPromptZoneInfo.width + trianglePromptWidth, ySize - (textZoneHeigh / 2)]], 0)
secondEcart = secondPromptZoneInfo.width + trianglePromptWidth
secondLittleEcart = trianglePromptWidth
textSizeWidth, textSizeHeight = promptFont.size(nameToDisplay)
promptZoneInfo = surfaceInformations()
promptZoneInfo.width = textSizeWidth + 8
promptZoneInfo.heigh = textZoneHeigh
promptZone = pygame.Surface((promptZoneInfo.width + secondLittleEcart, promptZoneInfo.heigh))
promptZone.fill(prompt_colour)
promptText = promptFont.render(nameToDisplay, 1, (205, 153, 29))
textSizeWidth, textSizeHeight = promptFont.size(nameToDisplay)
# Initialized' error
myfont = pygame.font.SysFont("monospace", 14)
label = myfont.render("".join(textUserInput), 1, (255, 255, 255))
screen.blit(textZone, (0, heighTextZonePosition))
screen.blit(promptZone, (0 + secondPromptZoneInfo.width, heighTextZonePosition))
promptTriangle = pygame.draw.polygon(screen, prompt_colour, [[promptZoneInfo.width + secondEcart, ySize - textZoneHeigh], [
promptZoneInfo.width + secondEcart, ySize], [promptZoneInfo.width + secondEcart + trianglePromptWidth, ySize - (textZoneHeigh / 2)]], 0)
screen.blit(promptText, (4 + secondEcart, heighTextZonePosition + 1))
if secondName != None:
screen.blit(secondPromptZone, (0, heighTextZonePosition))
screen.blit(secondPromptText, (4, heighTextZonePosition + 1))
secondPromptTriangle = pygame.draw.polygon(screen, yellow_colour, [[secondPromptZoneInfo.width, ySize - textZoneHeigh], [
secondPromptZoneInfo.width, ySize], [secondPromptZoneInfo.width + trianglePromptWidth, ySize - (textZoneHeigh / 2)]], 0)
screen.blit(label, (promptZoneInfo.width +
trianglePromptWidth + 4, heighTextZonePosition))
finalNormalUserInput = ""
def analyseTyping(variant, numberOfInitialMatch, wtw):
global programHaveToContinue
global textUserInput
global normalUserInput
global exMode
global normalMode
global textToAnalyse
global normalTextToAnalyse
global screen
global finalNormalUserInput
global generalState
keyboardInput = dict()
keyboardInput["mode"] = "normal"
keyboardInput["content"] = ""
functionHaveToContinue = True
for event in pygame.event.get():
if event.type == VIDEORESIZE:
screen = pygame.display.set_mode(event.size, RESIZABLE)
if event.type == QUIT:
programHaveToContinue = False
if event.type == KEYDOWN:
if (event.unicode == ":") and ("".join(normalUserInput) == ""):
exMode = True
normalMode = False
if exMode == True:
if event.key is K_ESCAPE:
exMode = False
normalMode = True
textUserInput = []
elif event.key in charInputed:
textUserInput.append(event.unicode)
elif event.key == K_BACKSPACE and textUserInput != []:
del textUserInput[-1]
if len(textUserInput) == 1:
exMode = False
normalMode = True
del textUserInput[-1]
elif event.key in [K_RETURN, K_KP_ENTER]:
textToAnalyse = "".join(textUserInput[1:])
textUserInput = []
exMode = False
if textUserInput == []:
exMode = False
normalMode = True
elif normalMode == True:
if (event.key is K_ESCAPE) and (normalUserInput != []):
normalUserInput = []
elif event.key == K_p:
normalUserInput = []
keyboardInput["mode"] = "pause"
elif (event.key is K_ESCAPE) and (normalUserInput == []):
normalUserInput = []
keyboardInput["mode"] = "escape"
elif (event.key not in [K_RETURN, K_KP_ENTER, K_ESCAPE]):
normalUserInput.append(event.unicode)
elif (event.key in [K_RETURN, K_KP_ENTER]):
finalNormalUserInput = "".join(normalUserInput)
normalUserInput = []
if textToAnalyse == "about":
textToAnalyse = ""
aboutScreen(screen)
elif textToAnalyse in ["quit", "q"]:
textToAnalyse = ""
programHaveToContinue = False
elif re.match("n(ew)?( +((trivial)|(marienbad)))?( +[0-9]+)?( +(((ttl)|(take-the-last))|((ltl)|(let-the-last))))? *$", textToAnalyse) is not None:
programHaveToContinue = True
functionHaveToContinue = False
syntaxToExtractOptions = "n(ew)?( +(?P<variente>(trivial|marienbad)))?( +(?P<number>[0-9]+))?( +(?P<wtw>((ttl)|(ltl))))?"
newGameOptions = re.match(syntaxToExtractOptions,textToAnalyse)
textToAnalyse = ""
if (newGameOptions.group("variente") == None) :
generalState.variant = variant
else:
generalState.variant = newGameOptions.group("variente")
if ( newGameOptions.group("number") == None) :
generalState.number = numberOfInitialMatch
else:
generalState.number = int(newGameOptions.group("number"))
if ( newGameOptions.group("wtw") == None) :
generalState.wtw = wtw
else:
generalState.wtw = newGameOptions.group("wtw")
print("New " + str(generalState.variant) + ";" + str(generalState.number) + ";" + str(generalState.wtw) + " game.")
elif keyboardInput["mode"] == "escape":
keyboardInput["mode"] = "escape"
elif keyboardInput["mode"] == "pause":
keyboardInput["mode"] = "pause"
else:
keyboardInput["mode"] = "ex"
keyboardInput["content"] = textToAnalyse
if normalUserInput != []:
keyboardInput["mode"] = "normal"
keyboardInput["content"] = normalUserInput
return functionHaveToContinue, keyboardInput
def makeAPause(variant, numberOfInitialMatch, wtw, beginingOfGame):
global winingMainText_colour
global indicator_colour
global programHaveToContinue
resumeMainText_colour = (163, 143, 125)
pauseMainText_colour = winingMainText_colour
pauseTextInfo = surfaceInformations()
resumeTextInfo = surfaceInformations()
timeBeforePause = int(time.time()) - beginingOfGame
timeOfEndOfGame = int(time.time()) - beginingOfGame
functionHaveToContinue = True
while functionHaveToContinue and programHaveToContinue:
xSize, ySize = screen.get_size()
functionHaveToContinue, textToanalyse = analyseTyping(None, None, None)
screen.fill(indicator_colour)
if textToanalyse["mode"] == "escape":
functionHaveToContinue = False
pauseTextContent = "Pause".upper()
pauseFont = pygame.font.SysFont("CMU Typewriter Text", 112, bold=True)
pauseText = pauseFont.render(pauseTextContent, 1, pauseMainText_colour)
pauseTextInfo.width, pauseTextInfo.height = pauseFont.size(pauseTextContent)
pauseTextInfo.x = (xSize - pauseTextInfo.width) / 2
pauseTextInfo.y = (ySize/2) - pauseTextInfo.height
screen.blit(pauseText, (pauseTextInfo.x, pauseTextInfo.y))
resumeTextContent = "Type Escape key to continue."
resumeFont = pygame.font.SysFont("CMU Typewriter Text", 14, bold=True)
resumeText = resumeFont.render(resumeTextContent, 1, resumeMainText_colour)
resumeTextInfo.width, resumeTextInfo.height = resumeFont.size(resumeTextContent)
resumeTextInfo.x = (xSize - resumeTextInfo.width) / 2
resumeTextInfo.y = (ySize- 14) - resumeTextInfo.height - 30
screen.blit(resumeText, (resumeTextInfo.x, resumeTextInfo.y))
makeTextZone(variant,"Pause")
nceBegining = int(time.time()) - beginingOfGame
m, s = divmod(secondSinceBegining, 60)
h, m = divmod(m, 60)
timePassed = "%02d:%02d" % (m, s)
heighTextZonePosition = ySize - textZoneHeigh
timeZoneText = myfont.render(timePassed, 1, (0, 0, 0))
timeZoneInformation.width, timeZoneInformation.height = myfont.size(
timePassed)
timeZoneInformation.x = xSize - timeZoneInformation.width - timeZoneInformation.left
timeZoneInformation.y = ySize - textZoneHeigh
timeZoneBackground.width = timeZoneInformation.width + \
(timeZoneInformation.left + timeZoneInformation.right)
timeZoneBackground.height = textZoneHeigh
timeZoneBackground.y = heighTextZonePosition
timeZoneBackground.x = timeZoneInformation.x - 2
timeZoneBackgroundSurface = pygame.Surface(
(timeZoneBackground.width, timeZoneBackground.height))
timeZoneBackgroundSurface.fill(creme_colour)
screen.blit(timeZoneBackgroundSurface,
(timeZoneBackground.x, timeZoneBackground.y))
screen.blit(timeZoneText, (timeZoneInformation.x, timeZoneInformation.y))
timeZoneBorder = pygame.draw.polygon(screen, yellow_colour, [[timeZoneBackground.x, timeZoneBackground.y], [timeZoneBackground.x, timeZoneBackground.y + timeZoneBackground.height - 2], [
timeZoneBackground.x + timeZoneBackground.width - 2, timeZoneBackground.y + timeZoneBackground.height - 2], [timeZoneBackground.x + timeZoneBackground.width - 2, timeZoneBackground.y]], 2)
return timeZoneBackground.width
normalUserInput = []
def aboutScreen(screen):
global programHaveToContinue
global textUserInput
global normalUserInput
global exMode
global normalMode
global textToAnalyse
global normalTextToAnalyse
functionHaveToContinue = True
keyboardInput = dict()
keyboardInput["mode"] = "normal"
keyboardInput["content"] = ""
while functionHaveToContinue and programHaveToContinue:
functionHaveToContinue, textToanalyse = analyseTyping(None, None, None)
if textToanalyse["mode"] == "escape":
functionHaveToContinue = False
screen.fill(background_colour)
xSize, ySize = screen.get_size()
illustrationInformation = surfaceInformations()
illustration = pygame.image.load(
mainDir + "/" + "about-illustration.png").convert_alpha()
illustrationInformation.width, illustrationInformation.height = illustration.get_size()
illustrationInformationRatio = illustrationInformation.width / \
illustrationInformation.height
if illustrationInformation.width > xSize:
illustrationInformation.width = xSize * (3 / 4)
illustrationInformation.height = illustrationInformation.width / \
illustrationInformationRatio
if illustrationInformation.height > ySize:
illustrationInformation.height = ySize * (3 / 4)
illustrationInformation.width = illustrationInformation.height * \
illustrationInformationRatio
illustrationInformation.y = (
ySize - illustrationInformation.height) / 2
illustrationInformation.x = (xSize - illustrationInformation.width) / 2
illustration = pygame.transform.scale(illustration, (int(
illustrationInformation.width), int(illustrationInformation.height)))
screen.blit(illustration, (illustrationInformation.x,
illustrationInformation.y))
makeTextZone("About", None)
chNumber - 2) % 4) == modulator:
answer = 2
elif ((currentMatchNumber - 3) % 4) == modulator:
answer = 3
else:
answer = random.randint(1, 3)
else:
answer = 0
return answer
def trivialAnalysis(currentMatchNumber, initialMatchNumber, wtw, userInput):
if currentMatchNumber != 0:
numberOfMatchToDel = 0
if currentMatchNumber >= 3:
authorisedNumbers = [3, 2, 1]
elif currentMatchNumber == 2:
authorisedNumbers = [2, 1]
elif currentMatchNumber == 1:
authorisedNumbers = [1]
if list(userInput)[0] == "=":
action = "application"
stringToEvaluate = userInput[1:]
elif list(userInput)[0] == "-":
action = "soustraction"
stringToEvaluate = userInput[1:]
else:
action = "soustraction"
stringToEvaluate = userInput
if representsInt(stringToEvaluate):
if action == "soustraction":
numberOfMatchToDel = int(stringToEvaluate)
elif action == "application":
numberOfMatchToDel = currentMatchNumber - int(stringToEvaluate)
else:
answer = [False, "“" + userInput + "” is not a valid syntax."]
if numberOfMatchToDel != 0:
if numberOfMatchToDel in authorisedNumbers:
numberLetByUser = initialMatchNumber - numberOfMatchToDel
answer = [True, numberLetByUser, numberOfMatchToDel]
else:
answer = [False, "“" +
str(numberOfMatchToDel) + "” is too big."]
elif (numberOfMatchToDel == 0):
answer = [False, "“0” is not a valid answer."]
else:
answer = [True, 0, 0]
return answer
def winingFallingScreenMatchExplosion(winer, variant, numberOfInitialMatch, time):
xSize, ySize = screen.get_size()
if winer == True:
matchInformation = surfaceInformations()
matchS = []
match = 0
while match < 1000:
matchS.append(pygame.image.load(
mainDir + "/" + "match-animation.png").convert_alpha())
matchInformation.heigh = random.randint(0, ySize)
matchInformation.weight = random.randint(0, xSize)
rotation = random.randint(0, 360)
matchS[match] = pygame.transform.rotate(matchS[match], rotation)
screen.blit(
matchS[match], (matchInformation.weight, matchInformation.heigh))
match = match + 1
elif winer == False:
print("machin")
def formateSecondToDotedTime(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
if h == 0:
formatedTime = "%02d:%02d" % (m, s)
else:
formatedTime = "%02d:%02d:%02d" % (h, m, s)
return formatedTime
def winingFallingScreen(winer, variant, numberOfInitialMatch, time):
global indicator_colour
global winingMainText_colour
global purple_colour
lineSeparationColor = (205, 153, 29)
helpText_color = (163, 143, 125)
fallingMainText_colour = winingMainText_colour
xSize, ySize = screen.get_size()
time = formateSecondToDotedTime(time)
if winer == True:
winingTextInfo = surfaceInformations()
winingTimeTextInfo = surfaceInformations()
winingHelpTextInfo = surfaceInformations()
screen.fill(indicator_colour)
winingFont = pygame.font.SysFont("CMU Typewriter Text", 44, bold=True)
winingText = winingFont.render("You win!", 1, winingMainText_colour)
winingTextInfo.width, winingTextInfo.height = winingFont.size("You win!")
winingTextInfo.x = (xSize - winingTextInfo.width) / 2
winingTextInfo.y = 40
screen.blit(winingText, (winingTextInfo.x, winingTextInfo.y))
winingTimeFont = pygame.font.SysFont("CMU Typewriter Text", 137, bold=True)
winingTimeText = winingTimeFont.render(time, 1, lineSeparationColor)
winingTimeTextInfo.width, winingTimeTextInfo.height = winingTimeFont.size(time)
winingTimeTextInfo.x = (xSize - winingTimeTextInfo.width) / 2
winingTimeTextInfo.y = 90
screen.blit(winingTimeText, (winingTimeTextInfo.x, winingTimeTextInfo.y))
helpText = "Type :new to begin new game or :help for more options."
winingHelpFont = pygame.font.SysFont("CMU Typewriter Text", 23, bold=True)
winingHelpText = winingHelpFont.render(helpText, 1, helpText_color)
winingHelpTextInfo.width, winingHelpTextInfo.height = winingHelpFont.size(helpText)
winingHelpTextInfo.x = (xSize - winingHelpTextInfo.width) / 2
winingHelpTextInfo.y = ySize-90
screen.blit(winingHelpText, (winingHelpTextInfo.x, winingHelpTextInfo.y))
elif winer == False:
fallingTextInfo = surfaceInformations()
fallingTimeTextInfo = surfaceInformations()
fallingHelpTextInfo = surfaceInformations()
screen.fill(purple_colour)
fallingTextContent = "You loose!"
fallingFont = pygame.font.SysFont("CMU Typewriter Text", 52, bold=True)
fallingText = fallingFont.render(fallingTextContent, 1, fallingMainText_colour)
fallingTextInfo.width, fallingTextInfo.height = fallingFont.size(fallingTextContent)
fallingTextInfo.x = (xSize - fallingTextInfo.width) / 2
fallingTextInfo.y = (ySize/2) - fallingTextInfo.height
screen.blit(fallingText, (fallingTextInfo.x, fallingTextInfo.y))
helpText = "Type :new to begin new game or :help for more options."
fallingHelpFont = pygame.font.SysFont("CMU Typewriter Text", 23, bold=True)
fallingHelpText = fallingHelpFont.render(helpText, 1, helpText_color)
fallingHelpTextInfo.width, fallingHelpTextInfo.height = fallingHelpFont.size(helpText)
fallingHelpTextInfo.x = (xSize - fallingHelpTextInfo.width) / 2
fallingHelpTextInfo.y = ySize-90
screen.blit(fallingHelpText, (fallingHelpTextInfo.x, fallingHelpTextInfo.y))
def printMarienbadListOfTry(screen, listOfTry):
global historyAreaWidth
historyFont = pygame.font.SysFont("monospace", 14, bold=True)
pageUpDownFont = pygame.font.SysFont("monospace", 18, bold=True)
pageUpDownColor = (220, 36, 4)
lineSeparationColor = (205, 153, 29)
realLineSeparationPlayed = (54,46,38)
xSize, ySize = screen.get_size()
arrowBackground = []
row = 0
arrowPosX = 40
delledNumberPosX = 53
scroowlingHistory = 0
rightHistoryAreaWidth = 0
for aTryGame in listOfTry:
tempSizeWidth, tempSizeHeigh = historyFont.size(aTryGame)
if tempSizeWidth > rightHistoryAreaWidth:
rightHistoryAreaWidth=tempSizeWidth
rightHistoryAreaWidth=rightHistoryAreaWidth+2
historyAreaWidth = rightHistoryAreaWidth + 35 + 20
historyZone = pygame.Surface((historyAreaWidth, ySize))
historyZone.fill(history_area_colour)
screen.blit(historyZone, (0, 0))
while row < len(listOfTry):
if (row % 2 == 0):
row_coulour = (234, 226, 215)
arrowSign = "←"
else:
row_coulour = (207, 194, 184)
arrowSign = "→"
arrowBackground.append(pygame.Surface(
(historyAreaWidth, textZoneHeigh)))
arrowBackground[row].fill(row_coulour)
rowPosY = ySize - textZoneHeigh - \
(len(listOfTry) - row) * textZoneHeigh
historyNumberText = historyFont.render(str(row), 1, (0, 0, 0))
historyArrowText = historyFont.render(arrowSign, 1, (0, 0, 0))
numberDelledText = historyFont.render(
str(listOfTry[row]), 1, (0, 0, 0))
screen.blit(arrowBackground[row], (0, rowPosY))
screen.blit(historyNumberText, (2, rowPosY + 2))
screen.blit(historyArrowText, (arrowPosX, rowPosY + 2))
screen.blit(numberDelledText, (delledNumberPosX, rowPosY + 2))
row = row + 1
realHistoryHeigh = (len(listOfTry) + 1) * textZoneHeigh
lineHistorySeparation = pygame.Surface((1, ySize))
lineHistorySeparation.fill(lineSeparationColor)
screen.blit(lineHistorySeparation, (35, 0))
realLineHistorySeparation = pygame.Surface((1, realHistoryHeigh))
realLineHistorySeparation.fill(realLineSeparationPlayed)
screen.blit(realLineHistorySeparation, (35, ySize-realHistoryHeigh))
if realHistoryHeigh > ySize:
pageUpText = pageUpDownFont.render("⇈", 1, pageUpDownColor)
screen.blit(pageUpText, (historyAreaWidth + 8, 4))
shadowTop = pygame.image.load(mainDir + "/" + "history-top-shadow.png").convert_alpha()
shadowTop = pygame.transform.scale(shadowTop, (historyAreaWidth, 8))
screen.blit(shadowTop, (0, 0))
def printListOfTry(screen, listOfTry):
historyFont = pygame.font.SysFont("monospace", 14, bold=True)
pageUpDownFont = pygame.font.SysFont("monospace", 18, bold=True)
pageUpDownColor = (220, 36, 4)
lineSeparationColor = (205, 153, 29)
realLineSeparationPlayed = (54,46,38)
xSize, ySize = screen.get_size()
arrowBackground = []
row = 0
arrowPosX = 40
delledNumberPosX = 53
historyZone = pygame.Surface((historyAreaWidth, ySize))
historyZone.fill(history_area_colour)
screen.blit(historyZone, (0, 0))
scroowlingHistory = 0
while row < len(listOfTry):
if (row % 2 == 0):
row_coulour = (234, 226, 215)
arrowSign = "←"
else:
row_coulour = (207, 194, 184)
arrowSign = "→"
if listOfTry[row] == 1:
numberToDelColor = (0, 126, 223)
if listOfTry[row] == 2:
numberToDelColor = (40, 149, 0)
if listOfTry[row] == 3:
numberToDelColor = (215, 0, 95)
print("This row: " + str(row))
arrowBackground.append(pygame.Surface(
(historyAreaWidth, textZoneHeigh)))
print(len(arrowBackground))
arrowBackground[row].fill(row_coulour)
rowPosY = ySize - textZoneHeigh - \
(len(listOfTry) - row) * textZoneHeigh
historyNumberText = historyFont.render(str(row), 1, (0, 0, 0))
historyArrowText = historyFont.render(arrowSign, 1, (0, 0, 0))
numberDelledText = historyFont.render(
str(listOfTry[row]), 1, numberToDelColor)
screen.blit(arrowBackground[row], (0, rowPosY))
screen.blit(historyNumberText, (2, rowPosY + 2))
screen.blit(historyArrowText, (arrowPosX, rowPosY + 2))
screen.blit(numberDelledText, (delledNumberPosX, rowPosY + 2))
row = row + 1
print("It success")
realHistoryHeigh = (len(listOfTry) + 1) * textZoneHeigh
lineHistorySeparation = pygame.Surface((1, ySize))
lineHistorySeparation.fill(lineSeparationColor)
screen.blit(lineHistorySeparation, (35, 0))
realLineHistorySeparation = pygame.Surface((1, realHistoryHeigh))
realLineHistorySeparation.fill(realLineSeparationPlayed)
screen.blit(realLineHistorySeparation, (35, ySize-realHistoryHeigh))
if realHistoryHeigh > ySize:
pageUpText = pageUpDownFont.render("⇈", 1, pageUpDownColor)
screen.blit(pageUpText, (historyAreaWidth + 8, 4))
shadowTop = pygame.image.load(mainDir + "/" + "history-top-shadow.png").convert_alpha()
shadowTop = pygame.transform.scale(shadowTop, (historyAreaWidth, 8))
screen.blit(shadowTop, (0, 0))
def showVariant(screen, wtw, posX):
yellow_colour = (205, 153, 29)
xSize, ySize = screen.get_size()
variantFont = pygame.font.SysFont("monospace", 14, bold=True)
wtwText = variantFont.render(wtw, 1, (225, 225, 225))
variantBackgroundInformation = surfaceInformations()
variantBackgroundInformation.left = 2
variantBackgroundInformation.right = 2
variantBackgroundInformation.height = textZoneHeigh
variantBackgroundInformation.y = ySize - textZoneHeigh
variantTextInformation = surfaceInformations()
variantTextInformation.width, variantTextInformation.height = variantFont.size(wtw)
variantBackgroundInformation.width = variantTextInformation.width
variantBackgroundInformation.width = variantBackgroundInformation.width + variantBackgroundInformation.left + variantBackgroundInformation.right
variantBackgroundInformation.x = xSize - variantBackgroundInformation.width - posX
variantTextInformation.x = variantBackgroundInformation.x + 1 + variantBackgroundInformation.left
variantTextInformation.y = variantBackgroundInformation.y + 1
variantBackground = pygame.Surface(
(variantBackgroundInformation.width, variantBackgroundInformation.height))
variantBackground.fill(yellow_colour)
screen.blit(variantBackground, (variantBackgroundInformation.x, variantBackgroundInformation.y))
screen.blit(wtwText, (variantTextInformation.x, variantTextInformation.y))
return variantBackgroundInformation.width + variantBackgroundInformation.left + variantBackgroundInformation.right
def trivial(numberOfInitialMatch, wtw, screen):
global programHaveToContinue
global textUserInput
global normalUserInput
global exMode
global normalMode
global textToAnalyse
global normalTextToAnalyse
global finalNormalUserInput
allowedEntry = ["1", "2", "3"]
beginingOfGame = int(time.time())
currentNumberOfMatch = numberOfInitialMatch
normalTextInformation = surfaceInformations()
indicatorTextInformation = surfaceInformations()
listOfTry = []
functionHaveToContinue = True
myfont = pygame.font.SysFont("monospace", 14)
errorToDisplay = False
weHaveAWiner = False
winer = None
while functionHaveToContinue and programHaveToContinue and (weHaveAWiner == False):
userPlayed = 0
computerPlayed = 0
functionHaveToContinue, textToanalyse = analyseTyping(
"trivial", numberOfInitialMatch, wtw)
if textToanalyse["mode"] == "pause":
print("In pause")
beginingOfGame = makeAPause("Trivial", numberOfInitialMatch, wtw, beginingOfGame)
xSize, ySize = screen.get_size()
gameAreaDim[0] = xSize - historyAreaWidth
indicatorPosition = ((historyAreaWidth + ((xSize - historyAreaWidth) -
indicatorDim[0]) / 2), ySize - textZoneHeigh - indicatorDim[1])
indicatorArea = pygame.Surface((indicatorDim[0], indicatorDim[1]))
screen.fill(background_colour)
if weHaveAWiner == False:
printListOfTry(screen, listOfTry)
indicatorArea.fill(indicator_colour)
screen.blit(indicatorArea, (indicatorPosition[
0], indicatorPosition[1]))
indicatorBorderPositionLeft = (
int(indicatorPosition[0] + circleRadius), int(indicatorPosition[1]))
pygame.draw.circle(screen, indicator_colour, (indicatorBorderPositionLeft[
0], indicatorBorderPositionLeft[1]), circleRadius)
indicatorBorderPositionRight = (int(
indicatorPosition[0] + indicatorDim[0] - circleRadius), int(indicatorPosition[1]))
pygame.draw.circle(screen, indicator_colour, (indicatorBorderPositionRight[
0], indicatorBorderPositionRight[1]), circleRadius)
indicatorRadiusCompleterPosition = (
indicatorPosition[0] + circleRadius, indicatorPosition[1] - circleRadius)
indicatorRadiusCompleterDim = (
indicatorDim[0] - 2 * circleRadius, circleRadius)
indicatorRadiusCompleterArea = pygame.Surface(
(indicatorRadiusCompleterDim[0], indicatorRadiusCompleterDim[1]))
indicatorRadiusCompleterArea.fill(indicator_colour)
screen.blit(indicatorRadiusCompleterArea, (indicatorRadiusCompleterPosition[
0], indicatorRadiusCompleterPosition[1]))
maxMatchAreaDim = [xSize - historyAreaWidth - (2 * matchAreaBorder.right), ySize - textZoneHeigh - indicatorDim[
1] - matchAreaBorder.top - matchAreaBorder.bottom]
maxMatchDim = [0, 0]
maxMatchDim[0] = maxMatchAreaDim[0] / (numberOfInitialMatch * 1.5)
maxMatchDim[1] = maxMatchDim[0] * matchPicRatio
if maxMatchDim[1] > maxMatchAreaDim[1]:
matchDim = [int(maxMatchAreaDim[1] / matchPicRatio),
int(maxMatchAreaDim[1])]
else:
matchDim = [int(maxMatchDim[0]), int(
maxMatchDim[0] * matchPicRatio)]
tempImageMatch = pygame.image.load(mainDir + "/" + "match.png").convert_alpha()
matchMaxWidth, matchMaxHeight = tempImageMatch.get_rect().size
if matchDim[0] > matchMaxWidth:
matchDim[0] = matchMaxWidth
matchDim[1] = matchMaxHeight
matchAreaDim = [matchDim[0] * numberOfInitialMatch, matchDim[1]]
matchAreaPos = [historyAreaWidth + matchAreaBorder.left + (
(maxMatchAreaDim[0] - matchAreaDim[0]) / 2), (ySize - indicatorDim[1] - matchDim[1]) / 2]
secondMatchAreaPos = [matchAreaPos[
0] + (matchAreaDim[0] - (numberOfInitialMatch * 1.5) * matchDim[0]) / 2, matchAreaPos[1]]
matchRessizing = matchMaxWidth/matchDim[0]
if wtw == "ttl":
lastBurnedMatch = [1, 2, 3]
elif wtw == "ltl":
lastBurnedMatch = [2, 3, 4]
i = 0
matchS = []
while i < numberOfInitialMatch:
if i < currentNumberOfMatch:
if currentNumberOfMatch in lastBurnedMatch:
initialSignDistanceToMatch = matchDim[1]/7
if i+1 in lastBurnedMatch:
matchS.append(pygame.image.load(
mainDir + "/" + "match-burned.png").convert_alpha())
else:
matchS.append(pygame.image.load(
mainDir + "/" + "match.png").convert_alpha())
else:
initialSignDistanceToMatch = matchDim[1]/24
if i >= (currentNumberOfMatch - 3):
matchS.append(pygame.image.load(
mainDir + "/" + "match-allowed.png").convert_alpha())
else:
matchS.append(pygame.image.load(
mainDir + "/" + "match.png").convert_alpha())
else:
matchS.append(pygame.image.load(
mainDir + "/" + "match-void.png").convert_alpha())
matchLeftVoid = 0
if i != 0:
matchLeftVoid = matchDim[0] / 2
currentMatchPos = [secondMatchAreaPos[
0] + i * (matchLeftVoid + matchDim[0]), secondMatchAreaPos[1]]
matchS[i] = pygame.transform.scale(
matchS[i], (matchDim[0], matchDim[1]))
screen.blit(
matchS[i], (currentMatchPos[0], currentMatchPos[1]))
if i == 0:
initialSignPos = [0,0]
initialSignPos[1] = currentMatchPos[1] - initialSignDistanceToMatch
if wtw == "ttl":
initialSign = pygame.image.load(mainDir + "/" + "crown.png").convert_alpha()
if wtw == "ltl":
initialSign = pygame.image.load(mainDir + "/" + "skull.png").convert_alpha()
initialSignSize = initialSign.get_rect().size
initialSignSize = [int(initialSignSize[0]/matchRessizing),int(initialSignSize[1]/matchRessizing)]
initialSign = pygame.transform.scale(initialSign, (initialSignSize[0], initialSignSize[1]))
initialSignPos[0] = (currentMatchPos[0]+(matchDim[0]/2)) - (initialSignSize[0]/2)
screen.blit(initialSign, (initialSignPos[0], initialSignPos[1]))
i = i + 1
indicatorFont = pygame.font.SysFont("monospace", 34)
indicatorTextContent = str(
currentNumberOfMatch) + "/" + str(numberOfInitialMatch)
indicatorText = indicatorFont.render(
indicatorTextContent, 1, (255, 255, 255))
indicatorTextInformation.width, indicatorTextInformation.height = indicatorFont.size(
indicatorTextContent)
indicatorTextInformation.x = indicatorPosition[
0] + (indicatorDim[0] - indicatorTextInformation.width) / 2
indicatorTextInformation.y = indicatorPosition[1] + 5
screen.blit(indicatorText, (indicatorTextInformation.x,
indicatorTextInformation.y))
if finalNormalUserInput:
getFromAnalysis = trivialAnalysis(
currentNumberOfMatch, numberOfInitialMatch, wtw, finalNormalUserInput)
finalNormalUserInput = False
if getFromAnalysis[0] == True:
userPlayed = getFromAnalysis[2]
listOfTry.append(userPlayed)
else:
errorToDisplay = getFromAnalysis[1]
if getFromAnalysis[0] == True:
computerPlayed = playTrivial(
currentNumberOfMatch - userPlayed,wtw)
listOfTry.append(computerPlayed)
currentNumberOfMatch = currentNumberOfMatch - userPlayed
if ((currentNumberOfMatch == 0) and (wtw == "ttl")) or ((currentNumberOfMatch == 1) and (wtw == "ltl")):
winer = True
else:
currentNumberOfMatch = currentNumberOfMatch - computerPlayed
if (currentNumberOfMatch == 0 and (wtw == "ttl")) or ((currentNumberOfMatch == 1) and (wtw == "ltl")):
winer = False
numberOfMatchDelled = numberOfInitialMatch - currentNumberOfMatch
if (currentNumberOfMatch == 0 and (wtw == "ttl")) or ((currentNumberOfMatch == 1) and (wtw == "ltl")):
weHaveAWiner = True
timeOfEndOfGame = int(time.time()) - beginingOfGame
else:
print("we have a winer")
timeOfEndOfGame = int(time.time()) - beginingOfGame
if textToanalyse in allowedEntry:
normalTextZone = myfont.render(
"".join(textToanalyse), 1, (255, 255, 255))
screen.blit(normalTextZone, (100, 100))
makeTextZone("Trivial", None)
timeZoneWidth = makeTimetZone(beginingOfGame)
wtwZoneWidth = showVariant(screen, wtw, timeZoneWidth)
if textToanalyse["mode"] == "normal":
errorToDisplay = False
normalText = myfont.render(
"".join(textToanalyse["content"]), 1, (255, 255, 255))
normalTextInformation.width, normalTextInformation.height = normalText.get_size()
normalTextInformation.x = xSize - normalTextInformation.width - 5 - wtwZoneWidth - timeZoneWidth
normalTextInformation.y = ySize - textZoneHeigh
screen.blit(normalText, (normalTextInformation.x,
normalTextInformation.y))
if errorToDisplay != False:
normalText = myfont.render(errorToDisplay, 1, red)
normalTextInformation.width, normalTextInformation.height = normalText.get_size()
normalTextInformation.x = xSize - normalTextInformation.width - 5 - wtwZoneWidth - timeZoneWidth
normalTextInformation.y = ySize - textZoneHeigh
screen.blit(normalText, (normalTextInformation.x,
normalTextInformation.y))
chMatrix:
if row != 0:
columnWithMatch.append(i)
i=i+1
if wtw == "ttl":
if len(columnWithMatch)==1:
winingColumn=columnWithMatch
else:
winingColumn=False
elif wtw == "ltl":
if (len(columnWithMatch)==1) and (matchMatrix[columnWithMatch[0]] > 1):
winingColumn=columnWithMatch
elif (len(columnWithMatch) == 2 ) and (matchMatrix[columnWithMatch[0]] == 1) and (matchMatrix[columnWithMatch[1]] == 1):
winingColumn=columnWithMatch
else:
winingColumn=False
else:
winingColumn=False
return winingColumn
def getNimSum(matchMatrix):
columns = len(matchMatrix)
numberOfLines = int((columns+1)/2)
lineSums = [0] * numberOfLines
i=0
for column in matchMatrix:
j=0
while j < column:
lineSums[j]=lineSums[j]+1
j=j+1
i=i+1
return lineSums
def playMarienbad(matchMatrix,wtw):
columns = len(matchMatrix)
numberOfLines = int((columns+1)/2)
lineSums = getNimSum(matchMatrix)
allowdedColumnToPlay = []
i=0
for column in matchMatrix:
if column > 0:
allowdedColumnToPlay.append(i)
i=i+1
lineSumsBinari = calculateLineSumsBinari(lineSums)
print(lineSumsBinari)
finalSum = sum(lineSumsBinari)
listOfDigits=list(str(finalSum))
print(listOfDigits)
itIsPossibleToWin = False
for aDigit in listOfDigits:
if (int(aDigit)%2 == 1):
itIsPossibleToWin = True
matchLineContainingOdd = None
if itIsPossibleToWin == False:
columnToPlay = random.sample(allowdedColumnToPlay, 1)[0]
maxNumberInTheColumn=matchMatrix[columnToPlay]
numberOfMatchToPlay = random.randint(1,maxNumberInTheColumn)
whatComputerWillPlay = [columnToPlay,numberOfMatchToPlay]
columnToPlay = whatComputerWillPlay
else:
theSumColumnContainingTheOddDigit = marienbadWitchColumnIsOdd(listOfDigits)
matchLineContainingOdd = marienbadWitchMatchLineContainOdd(matchMatrix)
columnToPlay = matchLineContainingOdd
return columnToPlay
def marienbadWitchColumnIsOdd(listOfDigits):
for i in range(len(listOfDigits)):
aDigit = listOfDigits[i]
if (int(aDigit)%2 == 1):
return i
def calculateLineSumsBinari(lineSums):
lineSumsBinari = []
i = 0
for decimalNum in lineSums:
lineSumsBinari.append(int("{0:b}".format(decimalNum)))
return lineSumsBinari
def marienbadWitchMatchLineContainOdd(matchMatrix):
lineSums = getNimSum(matchMatrix)
lineSumsBinari = calculateLineSumsBinari(lineSums)
finalSum = sum(lineSumsBinari)
listOfDigits=list(str(finalSum))
theSumColumnContainingTheOddDigit = marienbadWitchColumnIsOdd(listOfDigits)
lineSumsBinari = []
i = 0
for decimalNum in lineSums:
lineSumsBinari.append(int("{0:b}".format(decimalNum)))
i = 0
maxLen = 0
for binaryNum in lineSumsBinari:
tempLen = len(str(binaryNum))
if tempLen > maxLen:
maxLen = tempLen
i=i+1
i = 0
for binaryNum in lineSumsBinari:
tempLen = len(str(binaryNum))
howZeroToAdd = maxLen - tempLen
if howZeroToAdd > 0:
for j in range(1,howZeroToAdd+1):
lineSumsBinari[i] = "0" + str(lineSumsBinari[i])
else:
lineSumsBinari[i] = str(lineSumsBinari[i])
i=i+1
octetsOfDesiredColumn = []
i = 0
for binaryNum in lineSumsBinari:
extractedOctet = list(str(binaryNum))[theSumColumnContainingTheOddDigit]
octetsOfDesiredColumn.append(extractedOctet)
i=i+1
i = 0
linesImpliyingOdd = []
for i in range(0,len(octetsOfDesiredColumn)):
if octetsOfDesiredColumn[i] == "1":
linesImpliyingOdd.append(i)
i=i+1
higherMatchLine = linesImpliyingOdd[-1]
i = 0
for match in matchMatrix:
if match == higherMatchLine:
theColumn=i
i=i+1
print("matchMatrix: " + str(matchMatrix))
print("lineSums: " + str(lineSums))
print("higherMatchLine: " + str(higherMatchLine))
print("Là ↓")
print(theColumn)
return(theColumn)
def marienbadAnalysis(matchMatrix, userInput):
columns = len(matchMatrix)
numberOfLines = 2 * (columns+1)
allowedColumns = range(columns)
maximumMatchMatrix = marienbadInitialColumns(numberOfLines)
continueFunction = False
for column in matchMatrix:
if (column != 0) and (continueFunction == False) :
continueFunction = True
if (continueFunction == True):
numberOfMatchsToDel = 0
syntaxToTestImputValidity = "^ *([0-9]+) *(=|-) *([0-9]+) *$"
if re.match(syntaxToTestImputValidity, userInput) is not None:
print("True")
syntaxToExtractOptions = "^ *(?P<column>[0-9]+) *(?P<operator>(=|-)) *(?P<numberOfMatchUsed>[0-9]+) *$"
deletingMatchOparation = re.match(syntaxToExtractOptions,userInput)
columnToDelOnIt = int(deletingMatchOparation.group("column"))
numberOfMatchUsed = int(deletingMatchOparation.group("numberOfMatchUsed"))
delletingOperator = deletingMatchOparation.group("operator")
if (columnToDelOnIt in allowedColumns) :
if (numberOfMatchUsed != 0) or (delletingOperator != "-"):
if (delletingOperator == "=") :
if (numberOfMatchUsed <= matchMatrix[columnToDelOnIt]):
numberOfMatchsToDel = matchMatrix[columnToDelOnIt]-numberOfMatchUsed
matchMatrix[columnToDelOnIt] = matchMatrix[columnToDelOnIt]-numberOfMatchsToDel
answer = [True, matchMatrix, str(columnToDelOnIt) + "-" + str(numberOfMatchsToDel)]
else:
answer = [False, "You can not set a number higher than content."]
elif (delletingOperator == "-") :
if (numberOfMatchUsed <= matchMatrix[columnToDelOnIt]):
numberOfMatchsToDel = numberOfMatchUsed
matchMatrix[columnToDelOnIt] = matchMatrix[columnToDelOnIt]-numberOfMatchsToDel
answer = [True, matchMatrix, str(columnToDelOnIt) + "-" + str(numberOfMatchsToDel)]
else:
answer = [False, "You can not use a number higher than content."]
else:
answer = [False, "You can not del no match!"]
else:
answer = [False, "“" + str(deletingMatchOparation.group("column")) + "” is not in valid range."]
else:
answer = [False, "“" + userInput + "” is not a valid syntax."]
else:
answer = [False, 0]
return answer
def marienbad(numberOfLines, wtw, screen):
global programHaveToContinue
global textUserInput
global normalUserInput
global exMode
global normalMode
global textToAnalyse
global normalTextToAnalyse
global finalNormalUserInput
global historyAreaWidth
maximumMatchMatrix = marienbadInitialColumns(numberOfLines)
currentMatchMatrix = copy.deepcopy(maximumMatchMatrix)
numberOfColumns = numberOfLines*2 - 1
beginingOfGame = int(time.time())
listOfTry = []
functionHaveToContinue = True
errorToDisplay = False
weHaveAWiner = False
winer = None
while functionHaveToContinue and programHaveToContinue and (weHaveAWiner == False):
userPlayed = 0
computerPlayed = 0
if weHaveAWiner == False:
functionHaveToContinue, textToanalyse = analyseTyping("marienbad", numberOfLines, wtw)
if textToanalyse["mode"] == "pause":
print("In pause")
beginingOfGame = makeAPause("Marienbad", numberOfInitialMatch, wtw, beginingOfGame)
xSize, ySize = screen.get_size()
gameAreaDim[0] = xSize - historyAreaWidth
tempImageMatch = pygame.image.load(mainDir + "/" + "match.png").convert_alpha()
gameAreaInfo = surfaceInformations()
realGameAreaInfo = surfaceInformations()
matchInfo = surfaceInformations()
maxMatchInfo = surfaceInformations()
matchAreaInfo = surfaceInformations()
normalTextInformation = surfaceInformations()
wtwZoneInfo = surfaceInformations()
columnNumberInfo = surfaceInformations()
matchHorizontalSeparation = 0
matchInfo.top = 10
realGameAreaInfo.top = 20
realGameAreaInfo.bottom = 30
realGameAreaInfo.left = 30
realGameAreaInfo.right = 30
realGameAreaInfo.height = ySize - textZoneHeigh - realGameAreaInfo.top - realGameAreaInfo.bottom
realGameAreaInfo.width = xSize - historyAreaWidth - realGameAreaInfo.left - realGameAreaInfo.right
maxMatchInfo.width, maxMatchInfo.height = tempImageMatch.get_rect().size
matchInfo.height = realGameAreaInfo.height / (numberOfLines*1.2)
matchInfo.top = matchInfo.height*0.2
if matchInfo.height >= maxMatchInfo.height:
matchInfo.height = maxMatchInfo.height
matchInfo.width = maxMatchInfo.width
else:
matchInfo.width = matchInfo.height / matchPicRatio
matchHorizontalSeparation = (realGameAreaInfo.width - (matchInfo.width*numberOfColumns)) / (numberOfColumns-1)
if matchHorizontalSeparation > matchInfo.height*0.66:
matchHorizontalSeparation = matchInfo.height*0.66
matchAreaInfo.width = matchInfo.width*numberOfColumns + (numberOfColumns-1)*matchHorizontalSeparation
realGameAreaInfo.x = historyAreaWidth + realGameAreaInfo.left + (realGameAreaInfo.width-matchAreaInfo.width)/2
matchAreaInfo.height = matchInfo.height*numberOfLines + (numberOfLines-1)*matchInfo.top
realGameAreaInfo.y = realGameAreaInfo.top + (realGameAreaInfo.height-matchAreaInfo.height)/2
matchPositions = []
i = 0
for numberOfMatchInAColumn in maximumMatchMatrix:
j = 0
matchPositions.append([])
cumuledX = matchInfo.width + matchHorizontalSeparation
while j < numberOfMatchInAColumn:
matchPositions[i].append(surfaceInformations())
cumuledY = matchInfo.height + matchInfo.top
matchPositions[i][j].x = realGameAreaInfo.x + i*cumuledX
matchPositions[i][j].y = ySize-textZoneHeigh - realGameAreaInfo.y - (j+1)*cumuledY
j=j+1
i = i+1
screen.fill(background_colour)
printMarienbadListOfTry(screen, listOfTry)
if finalNormalUserInput:
getFromAnalysis = marienbadAnalysis(currentMatchMatrix, finalNormalUserInput)
finalNormalUserInput = False
if getFromAnalysis[0] == True:
currentMatchMatrix = getFromAnalysis[1]
listOfTry.append(getFromAnalysis[2])
else:
errorToDisplay = getFromAnalysis[1]
if getFromAnalysis[0] == True:
computerPlayed = playMarienbad(currentMatchMatrix,wtw)
listOfTry.append(str(computerPlayed) + "-" + "1")
currentMatchMatrix[computerPlayed] = currentMatchMatrix[computerPlayed]-1
winingColumn = marienbadIsItAWinerSituation(currentMatchMatrix, wtw)
columnNumberFont = pygame.font.SysFont("monospace", 18, bold=True)
i = 0
for column in matchPositions:
j = 0
for match in column:
if (currentMatchMatrix[i] < maximumMatchMatrix[i]) and (j+1 > currentMatchMatrix[i]):
visualMatch = pygame.image.load(mainDir + "/" + "match-void.png").convert_alpha()
else:
if winingColumn:
visualMatch = pygame.image.load(mainDir + "/" + "match-burned.png").convert_alpha()
else:
visualMatch = pygame.image.load(mainDir + "/" + "match.png").convert_alpha()
visualMatch = pygame.transform.scale(visualMatch, (int(matchInfo.width), int(matchInfo.height)))
screen.blit(visualMatch, (match.x, match.y))
j=j+1
columnNumberImage = columnNumberFont.render(str(i), 1, (0, 0,0))
columnNumberInfo.width, columnNumberInfo.height = columnNumberImage.get_size()
columnNumberInfo.x = column[0].x + (column[0].width/2) - (columnNumberInfo.width/2)
screen.blit(columnNumberImage, (columnNumberInfo.x, column[0].y+matchInfo.height+12))
i = i+1
makeTextZone("Marienbad", None)
timeZoneWidth = makeTimetZone(beginingOfGame)
wtwZoneWidth = showVariant(screen, wtw, timeZoneWidth)
normalFont = pygame.font.SysFont("monospace", 14)
if textToanalyse["mode"] == "normal":
errorToDisplay = False
normalText = normalFont.render(
"".join(textToanalyse["content"]), 1, (255, 255, 255))
normalTextInformation.width, normalTextInformation.height = normalText.get_size()
normalTextInformation.x = xSize - normalTextInformation.width - 5 - wtwZoneWidth - timeZoneWidth
normalTextInformation.y = ySize - textZoneHeigh
screen.blit(normalText, (normalTextInformation.x,
normalTextInformation.y))
if errorToDisplay != False:
normalText = normalFont.render(errorToDisplay, 1, red)
normalTextInformation.width, normalTextInformation.height = normalText.get_size()
normalTextInformation.x = xSize - normalTextInformation.width - 5 - wtwZoneWidth - timeZoneWidth
normalTextInformation.y = ySize - textZoneHeigh
screen.blit(normalText, (normalTextInformation.x,
normalTextInformation.y))
makeTextZone("Marienbad", None)
number = generalState.number
if wtw not in [0, None, ""]:
wtw = generalState.wtw
if variant == "trivial":
trivial(number, wtw, screen)
elif variant == "marienbad":
marienbad(number, wtw, screen)
main("trivial", numberOfInitialMatch, "ttl")
| true
| true
|
f7144816b85989a438082526f2dc145b5a22fa38
| 470
|
py
|
Python
|
animalid/random_id.py
|
Alphadelta14/animalid
|
0b97a84ead34be2de623de1258aae16c4e8d83d2
|
[
"MIT"
] | 5
|
2016-12-15T14:56:15.000Z
|
2022-02-15T13:32:33.000Z
|
animalid/random_id.py
|
Alphadelta14/animalid
|
0b97a84ead34be2de623de1258aae16c4e8d83d2
|
[
"MIT"
] | 1
|
2016-10-06T17:37:39.000Z
|
2016-10-06T17:37:39.000Z
|
animalid/random_id.py
|
Alphadelta14/animalid
|
0b97a84ead34be2de623de1258aae16c4e8d83d2
|
[
"MIT"
] | 1
|
2020-12-10T16:05:29.000Z
|
2020-12-10T16:05:29.000Z
|
"""Where the magic happens."""
import random
from animalid import alloys, animals, colors, fabrics, opinions, origins, shapes, sizes
FIRST_ADJECTIVES = opinions + shapes + sizes
SECOND_ADJECTIVES = alloys + colors + fabrics + origins
def generate_animal_id():
"""What it's all about."""
return "_".join(
[
random.choice(FIRST_ADJECTIVES),
random.choice(SECOND_ADJECTIVES),
random.choice(animals),
]
)
| 23.5
| 87
| 0.644681
|
import random
from animalid import alloys, animals, colors, fabrics, opinions, origins, shapes, sizes
FIRST_ADJECTIVES = opinions + shapes + sizes
SECOND_ADJECTIVES = alloys + colors + fabrics + origins
def generate_animal_id():
return "_".join(
[
random.choice(FIRST_ADJECTIVES),
random.choice(SECOND_ADJECTIVES),
random.choice(animals),
]
)
| true
| true
|
f71448ef2c575cc60ec1ec5c6f6dc91a3603fb16
| 6,237
|
py
|
Python
|
rcnn/lib/python3.6/site-packages/sphinx/make_mode.py
|
dreamingweaver/making_passportImage
|
68f23411780ff82abe934dfae5fc04acb80f2c49
|
[
"MIT"
] | 1
|
2019-01-12T13:17:32.000Z
|
2019-01-12T13:17:32.000Z
|
rcnn/lib/python3.6/site-packages/sphinx/make_mode.py
|
dreamingweaver/making_passportImage
|
68f23411780ff82abe934dfae5fc04acb80f2c49
|
[
"MIT"
] | null | null | null |
rcnn/lib/python3.6/site-packages/sphinx/make_mode.py
|
dreamingweaver/making_passportImage
|
68f23411780ff82abe934dfae5fc04acb80f2c49
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
sphinx.make_mode
~~~~~~~~~~~~~~~~
sphinx-build -M command-line handling.
This replaces the old, platform-dependent and once-generated content
of Makefile / make.bat.
This is in its own module so that importing it is fast. It should not
import the main Sphinx modules (like sphinx.applications, sphinx.builders).
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import subprocess
import sys
from os import path
import sphinx
from sphinx import cmdline
from sphinx.util.console import color_terminal, nocolor, bold, blue # type: ignore
from sphinx.util.osutil import cd, rmtree
if False:
# For type annotation
from typing import List # NOQA
proj_name = os.getenv('SPHINXPROJ', '<project>')
BUILDERS = [
("", "html", "to make standalone HTML files"),
("", "dirhtml", "to make HTML files named index.html in directories"),
("", "singlehtml", "to make a single large HTML file"),
("", "pickle", "to make pickle files"),
("", "json", "to make JSON files"),
("", "htmlhelp", "to make HTML files and an HTML help project"),
("", "qthelp", "to make HTML files and a qthelp project"),
("", "devhelp", "to make HTML files and a Devhelp project"),
("", "epub", "to make an epub"),
("", "latex", "to make LaTeX files, you can set PAPER=a4 or PAPER=letter"),
("posix", "latexpdf", "to make LaTeX and PDF files (default pdflatex)"),
("posix", "latexpdfja", "to make LaTeX files and run them through platex/dvipdfmx"),
("", "text", "to make text files"),
("", "man", "to make manual pages"),
("", "texinfo", "to make Texinfo files"),
("posix", "info", "to make Texinfo files and run them through makeinfo"),
("", "gettext", "to make PO message catalogs"),
("", "changes", "to make an overview of all changed/added/deprecated items"),
("", "xml", "to make Docutils-native XML files"),
("", "pseudoxml", "to make pseudoxml-XML files for display purposes"),
("", "linkcheck", "to check all external links for integrity"),
("", "doctest", "to run all doctests embedded in the documentation "
"(if enabled)"),
("", "coverage", "to run coverage check of the documentation (if enabled)"),
]
class Make(object):
def __init__(self, srcdir, builddir, opts):
# type: (unicode, unicode, List[unicode]) -> None
self.srcdir = srcdir
self.builddir = builddir
self.opts = opts
self.makecmd = os.environ.get('MAKE', 'make') # refer $MAKE to determine make command
def builddir_join(self, *comps):
# type: (unicode) -> unicode
return path.join(self.builddir, *comps)
def build_clean(self):
# type: () -> int
if not path.exists(self.builddir):
return 0
elif not path.isdir(self.builddir):
print("Error: %r is not a directory!" % self.builddir)
return 1
print("Removing everything under %r..." % self.builddir)
for item in os.listdir(self.builddir):
rmtree(self.builddir_join(item))
return 0
def build_help(self):
# type: () -> None
if not color_terminal():
nocolor()
print(bold("Sphinx v%s" % sphinx.__display_version__))
print("Please use `make %s' where %s is one of" % ((blue('target'),) * 2)) # type: ignore # NOQA
for osname, bname, description in BUILDERS:
if not osname or os.name == osname:
print(' %s %s' % (blue(bname.ljust(10)), description))
def build_latexpdf(self):
# type: () -> int
if self.run_generic_build('latex') > 0:
return 1
try:
with cd(self.builddir_join('latex')):
return subprocess.call([self.makecmd, 'all-pdf'])
except OSError:
print('Error: Failed to run: %s' % self.makecmd)
return 1
def build_latexpdfja(self):
# type: () -> int
if self.run_generic_build('latex') > 0:
return 1
try:
with cd(self.builddir_join('latex')):
return subprocess.call([self.makecmd, 'all-pdf-ja'])
except OSError:
print('Error: Failed to run: %s' % self.makecmd)
return 1
def build_info(self):
# type: () -> int
if self.run_generic_build('texinfo') > 0:
return 1
try:
with cd(self.builddir_join('texinfo')):
return subprocess.call([self.makecmd, 'info'])
except OSError:
print('Error: Failed to run: %s' % self.makecmd)
return 1
def build_gettext(self):
# type: () -> int
dtdir = self.builddir_join('gettext', '.doctrees')
if self.run_generic_build('gettext', doctreedir=dtdir) > 0:
return 1
return 0
def run_generic_build(self, builder, doctreedir=None):
# type: (unicode, unicode) -> int
# compatibility with old Makefile
papersize = os.getenv('PAPER', '')
opts = self.opts
if papersize in ('a4', 'letter'):
opts.extend(['-D', 'latex_elements.papersize=' + papersize + 'paper'])
if doctreedir is None:
doctreedir = self.builddir_join('doctrees')
args = ['-b', builder,
'-d', doctreedir,
self.srcdir,
self.builddir_join(builder)]
return cmdline.main(args + opts)
def run_make_mode(args):
# type: (List[unicode]) -> int
if len(args) < 3:
print('Error: at least 3 arguments (builder, source '
'dir, build dir) are required.', file=sys.stderr)
return 1
make = Make(args[1], args[2], args[3:])
run_method = 'build_' + args[0]
if hasattr(make, run_method):
return getattr(make, run_method)()
return make.run_generic_build(args[0])
| 37.125
| 106
| 0.568062
|
from __future__ import print_function
import os
import subprocess
import sys
from os import path
import sphinx
from sphinx import cmdline
from sphinx.util.console import color_terminal, nocolor, bold, blue
from sphinx.util.osutil import cd, rmtree
if False:
from typing import List
proj_name = os.getenv('SPHINXPROJ', '<project>')
BUILDERS = [
("", "html", "to make standalone HTML files"),
("", "dirhtml", "to make HTML files named index.html in directories"),
("", "singlehtml", "to make a single large HTML file"),
("", "pickle", "to make pickle files"),
("", "json", "to make JSON files"),
("", "htmlhelp", "to make HTML files and an HTML help project"),
("", "qthelp", "to make HTML files and a qthelp project"),
("", "devhelp", "to make HTML files and a Devhelp project"),
("", "epub", "to make an epub"),
("", "latex", "to make LaTeX files, you can set PAPER=a4 or PAPER=letter"),
("posix", "latexpdf", "to make LaTeX and PDF files (default pdflatex)"),
("posix", "latexpdfja", "to make LaTeX files and run them through platex/dvipdfmx"),
("", "text", "to make text files"),
("", "man", "to make manual pages"),
("", "texinfo", "to make Texinfo files"),
("posix", "info", "to make Texinfo files and run them through makeinfo"),
("", "gettext", "to make PO message catalogs"),
("", "changes", "to make an overview of all changed/added/deprecated items"),
("", "xml", "to make Docutils-native XML files"),
("", "pseudoxml", "to make pseudoxml-XML files for display purposes"),
("", "linkcheck", "to check all external links for integrity"),
("", "doctest", "to run all doctests embedded in the documentation "
"(if enabled)"),
("", "coverage", "to run coverage check of the documentation (if enabled)"),
]
class Make(object):
def __init__(self, srcdir, builddir, opts):
self.srcdir = srcdir
self.builddir = builddir
self.opts = opts
self.makecmd = os.environ.get('MAKE', 'make')
def builddir_join(self, *comps):
return path.join(self.builddir, *comps)
def build_clean(self):
if not path.exists(self.builddir):
return 0
elif not path.isdir(self.builddir):
print("Error: %r is not a directory!" % self.builddir)
return 1
print("Removing everything under %r..." % self.builddir)
for item in os.listdir(self.builddir):
rmtree(self.builddir_join(item))
return 0
def build_help(self):
if not color_terminal():
nocolor()
print(bold("Sphinx v%s" % sphinx.__display_version__))
print("Please use `make %s' where %s is one of" % ((blue('target'),) * 2)) # type: ignore # NOQA
for osname, bname, description in BUILDERS:
if not osname or os.name == osname:
print(' %s %s' % (blue(bname.ljust(10)), description))
def build_latexpdf(self):
# type: () -> int
if self.run_generic_build('latex') > 0:
return 1
try:
with cd(self.builddir_join('latex')):
return subprocess.call([self.makecmd, 'all-pdf'])
except OSError:
print('Error: Failed to run: %s' % self.makecmd)
return 1
def build_latexpdfja(self):
# type: () -> int
if self.run_generic_build('latex') > 0:
return 1
try:
with cd(self.builddir_join('latex')):
return subprocess.call([self.makecmd, 'all-pdf-ja'])
except OSError:
print('Error: Failed to run: %s' % self.makecmd)
return 1
def build_info(self):
# type: () -> int
if self.run_generic_build('texinfo') > 0:
return 1
try:
with cd(self.builddir_join('texinfo')):
return subprocess.call([self.makecmd, 'info'])
except OSError:
print('Error: Failed to run: %s' % self.makecmd)
return 1
def build_gettext(self):
# type: () -> int
dtdir = self.builddir_join('gettext', '.doctrees')
if self.run_generic_build('gettext', doctreedir=dtdir) > 0:
return 1
return 0
def run_generic_build(self, builder, doctreedir=None):
# type: (unicode, unicode) -> int
# compatibility with old Makefile
papersize = os.getenv('PAPER', '')
opts = self.opts
if papersize in ('a4', 'letter'):
opts.extend(['-D', 'latex_elements.papersize=' + papersize + 'paper'])
if doctreedir is None:
doctreedir = self.builddir_join('doctrees')
args = ['-b', builder,
'-d', doctreedir,
self.srcdir,
self.builddir_join(builder)]
return cmdline.main(args + opts)
def run_make_mode(args):
# type: (List[unicode]) -> int
if len(args) < 3:
print('Error: at least 3 arguments (builder, source '
'dir, build dir) are required.', file=sys.stderr)
return 1
make = Make(args[1], args[2], args[3:])
run_method = 'build_' + args[0]
if hasattr(make, run_method):
return getattr(make, run_method)()
return make.run_generic_build(args[0])
| true
| true
|
f7144910af197a90e026161df30c5200d7a0dd17
| 1,623
|
py
|
Python
|
greengrass-v2/poll-api/artifacts/com.greengrass.FakeApi/1.0.0/app.py
|
dhwalters423/iot-reference-architectures
|
cb966fec51b73c4403744b0e8a6060f05fe92013
|
[
"MIT-0"
] | 1
|
2022-01-20T12:26:42.000Z
|
2022-01-20T12:26:42.000Z
|
greengrass-v2/poll-api/artifacts/com.greengrass.FakeApi/1.0.0/app.py
|
dhwalters423/iot-reference-architectures
|
cb966fec51b73c4403744b0e8a6060f05fe92013
|
[
"MIT-0"
] | null | null | null |
greengrass-v2/poll-api/artifacts/com.greengrass.FakeApi/1.0.0/app.py
|
dhwalters423/iot-reference-architectures
|
cb966fec51b73c4403744b0e8a6060f05fe92013
|
[
"MIT-0"
] | null | null | null |
#!/usr/bin/env python3
import json
import time
from random import gauss
from flask import Flask
number_of_devices = 10
number_of_values_per_second = 2
last_request = None
app = Flask(__name__)
@app.route('/')
def index():
return 'Server is running'
def get_time_ms():
return int(time.time() * 1000)
def generate_one_device(device_number, number_of_values, time_between_values):
temp_data = []
now = get_time_ms()
for i in range(number_of_values):
value = gauss(5, 2)
temp_data.append(
[int(now - gauss(1000 * time_between_values, 500)), "datum", str(value), value, 0])
return {f"device_{device_number}": temp_data}
@app.route('/data')
def data():
global last_request
now = get_time_ms()
if last_request is None:
last_request = get_time_ms() - 10000
# Generate the desired number of values per second
number_of_values = int((now - last_request) / 1000 * number_of_values_per_second)
if number_of_values == 0:
return json.dumps({})
last_request = now
temp_data = {}
for i in range(number_of_devices):
temp_data.update(generate_one_device(i, number_of_values, 1))
return json.dumps({"device_data": {
"descriptions": [
"timestamp",
"name",
"text_value",
"numeric_value",
"source"
],
"points": temp_data
}
})
| 23.521739
| 95
| 0.557609
|
import json
import time
from random import gauss
from flask import Flask
number_of_devices = 10
number_of_values_per_second = 2
last_request = None
app = Flask(__name__)
@app.route('/')
def index():
return 'Server is running'
def get_time_ms():
return int(time.time() * 1000)
def generate_one_device(device_number, number_of_values, time_between_values):
temp_data = []
now = get_time_ms()
for i in range(number_of_values):
value = gauss(5, 2)
temp_data.append(
[int(now - gauss(1000 * time_between_values, 500)), "datum", str(value), value, 0])
return {f"device_{device_number}": temp_data}
@app.route('/data')
def data():
global last_request
now = get_time_ms()
if last_request is None:
last_request = get_time_ms() - 10000
number_of_values = int((now - last_request) / 1000 * number_of_values_per_second)
if number_of_values == 0:
return json.dumps({})
last_request = now
temp_data = {}
for i in range(number_of_devices):
temp_data.update(generate_one_device(i, number_of_values, 1))
return json.dumps({"device_data": {
"descriptions": [
"timestamp",
"name",
"text_value",
"numeric_value",
"source"
],
"points": temp_data
}
})
| true
| true
|
f714494bfe0ecac22c74155fa6c7a76f477af690
| 2,430
|
py
|
Python
|
src/stats/intro_stats.py
|
JacobEkedahl/detect-intros-from-video
|
9b2bac1c7209558711072f967a3359d2ca698cd4
|
[
"MIT"
] | 5
|
2020-06-05T05:10:25.000Z
|
2022-03-10T05:12:14.000Z
|
src/stats/intro_stats.py
|
JacobEkedahl/detect-intros-from-video
|
9b2bac1c7209558711072f967a3359d2ca698cd4
|
[
"MIT"
] | null | null | null |
src/stats/intro_stats.py
|
JacobEkedahl/detect-intros-from-video
|
9b2bac1c7209558711072f967a3359d2ca698cd4
|
[
"MIT"
] | 3
|
2020-06-06T13:21:23.000Z
|
2021-03-08T22:24:18.000Z
|
import matplotlib.pyplot as plt
import utils.extractor as extractor
import utils.file_handler as file_handler
import utils.time_handler as time_handler
def plot_intros():
intros = extractor.get_intros_from_data()
only_valid_intros = [x for x in intros if not x["end"] == "00:00:00"]
x_data = map(get_start_time_seconds, only_valid_intros)
y_data = map(get_size_from_intro, only_valid_intros)
# naming the x axis
plt.xlabel('Start time of intro (Seconds)')
# naming the y axis
plt.ylabel('Length of intro (Seconds)')
plt.grid(True)
plt.scatter(list(x_data), list(y_data))
plt.show()
def plot_hist_sizes():
intros = extractor.get_intros_from_data()
only_valid_intros = [x for x in intros if not x["end"] == "00:00:00"]
x_data = list(map(get_size_from_intro, only_valid_intros))
plt.xlabel('Length of intro (Seconds)')
plt.ylabel('Frequency')
plt.grid(True)
plt.hist(x_data, bins=40)
plt.show()
def plot_hist_frequency():
intros = extractor.get_intros_from_data()
only_valid_intros = [x for x in intros if not x["end"] == "00:00:00"]
x_data = list(map(get_start_time_seconds, only_valid_intros))
plt.xlabel('Start time of intro (Seconds)')
plt.ylabel('Frequency')
plt.grid(True)
plt.hist(x_data, bins=60)
plt.show()
def plot_all_intros():
x_titles = ['Start time of intro (Seconds)', 'Length of intro (Seconds)']
y_title = 'Frequency'
titles = ['Start times of intros','Lengths of intros']
colors = ['blue', 'blue']
bins = [60, 40]
intros = extractor.get_intros_from_data()
only_valid_intros = [x for x in intros if not x["end"] == "00:00:00"]
x_size = list(map(get_size_from_intro, only_valid_intros))
x_start = list(map(get_start_time_seconds, only_valid_intros))
x_data = [x_start, x_size]
fig, axs = plt.subplots(1, 2)
axs = axs.ravel()
for idx, ax in enumerate(axs):
ax.hist(x_data[idx], bins=bins[idx], fc=colors[idx])
# ax.set_title(titles[idx])
ax.set_xlabel(x_titles[idx])
ax.set_ylabel(y_title)
ax.grid()
plt.tight_layout()
plt.show()
def get_size_from_intro(intro):
start = time_handler.timestamp(intro["start"]) / 1000
end = time_handler.timestamp(intro["end"]) / 1000
return abs(start - end)
def get_start_time_seconds(intro):
return time_handler.timestamp(intro["start"]) / 1000
| 33.75
| 77
| 0.676955
|
import matplotlib.pyplot as plt
import utils.extractor as extractor
import utils.file_handler as file_handler
import utils.time_handler as time_handler
def plot_intros():
intros = extractor.get_intros_from_data()
only_valid_intros = [x for x in intros if not x["end"] == "00:00:00"]
x_data = map(get_start_time_seconds, only_valid_intros)
y_data = map(get_size_from_intro, only_valid_intros)
plt.xlabel('Start time of intro (Seconds)')
plt.ylabel('Length of intro (Seconds)')
plt.grid(True)
plt.scatter(list(x_data), list(y_data))
plt.show()
def plot_hist_sizes():
intros = extractor.get_intros_from_data()
only_valid_intros = [x for x in intros if not x["end"] == "00:00:00"]
x_data = list(map(get_size_from_intro, only_valid_intros))
plt.xlabel('Length of intro (Seconds)')
plt.ylabel('Frequency')
plt.grid(True)
plt.hist(x_data, bins=40)
plt.show()
def plot_hist_frequency():
intros = extractor.get_intros_from_data()
only_valid_intros = [x for x in intros if not x["end"] == "00:00:00"]
x_data = list(map(get_start_time_seconds, only_valid_intros))
plt.xlabel('Start time of intro (Seconds)')
plt.ylabel('Frequency')
plt.grid(True)
plt.hist(x_data, bins=60)
plt.show()
def plot_all_intros():
x_titles = ['Start time of intro (Seconds)', 'Length of intro (Seconds)']
y_title = 'Frequency'
titles = ['Start times of intros','Lengths of intros']
colors = ['blue', 'blue']
bins = [60, 40]
intros = extractor.get_intros_from_data()
only_valid_intros = [x for x in intros if not x["end"] == "00:00:00"]
x_size = list(map(get_size_from_intro, only_valid_intros))
x_start = list(map(get_start_time_seconds, only_valid_intros))
x_data = [x_start, x_size]
fig, axs = plt.subplots(1, 2)
axs = axs.ravel()
for idx, ax in enumerate(axs):
ax.hist(x_data[idx], bins=bins[idx], fc=colors[idx])
ax.set_xlabel(x_titles[idx])
ax.set_ylabel(y_title)
ax.grid()
plt.tight_layout()
plt.show()
def get_size_from_intro(intro):
start = time_handler.timestamp(intro["start"]) / 1000
end = time_handler.timestamp(intro["end"]) / 1000
return abs(start - end)
def get_start_time_seconds(intro):
return time_handler.timestamp(intro["start"]) / 1000
| true
| true
|
f7144a1dcf2877b5b9556bdaf5f3fa28830fe1f3
| 930
|
py
|
Python
|
perfkitbenchmarker/linux_packages/libpng.py
|
xiaolihope/PerfKitBenchmarker-1.7.0
|
7699b1073a80d7a92fd3db93da742b93a2ecf900
|
[
"Apache-2.0"
] | null | null | null |
perfkitbenchmarker/linux_packages/libpng.py
|
xiaolihope/PerfKitBenchmarker-1.7.0
|
7699b1073a80d7a92fd3db93da742b93a2ecf900
|
[
"Apache-2.0"
] | null | null | null |
perfkitbenchmarker/linux_packages/libpng.py
|
xiaolihope/PerfKitBenchmarker-1.7.0
|
7699b1073a80d7a92fd3db93da742b93a2ecf900
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing libpng installation and cleanup functions."""
def YumInstall(vm):
"""Installs the libpng package on the VM."""
vm.InstallPackages('libpng')
vm.InstallPackages('libpng-devel')
def AptInstall(vm):
"""Installs the libpng package on the VM."""
vm.InstallPackages('libpng3 libpng12-dev')
| 33.214286
| 74
| 0.754839
|
def YumInstall(vm):
vm.InstallPackages('libpng')
vm.InstallPackages('libpng-devel')
def AptInstall(vm):
vm.InstallPackages('libpng3 libpng12-dev')
| true
| true
|
f7144a6156a81fa3ee7667295196b9e059922910
| 1,860
|
py
|
Python
|
tests/test_strings.py
|
idanmoradarthas/DataScienceUtils
|
be4806ebcb9ab0e2cdd189842227bd242f0c8910
|
[
"MIT"
] | 19
|
2019-12-26T15:44:58.000Z
|
2021-06-14T00:36:24.000Z
|
tests/test_strings.py
|
federicodecillia/DataScienceUtils
|
be4806ebcb9ab0e2cdd189842227bd242f0c8910
|
[
"MIT"
] | 2
|
2019-12-06T12:32:41.000Z
|
2020-11-27T11:54:15.000Z
|
tests/test_strings.py
|
federicodecillia/DataScienceUtils
|
be4806ebcb9ab0e2cdd189842227bd242f0c8910
|
[
"MIT"
] | 3
|
2021-01-16T09:08:15.000Z
|
2021-01-29T10:57:11.000Z
|
import pandas
from ds_utils.strings import append_tags_to_frame, extract_significant_terms_from_subset
def test_append_tags_to_frame():
x_train = pandas.DataFrame([{"article_name": "1", "article_tags": "ds,ml,dl"},
{"article_name": "2", "article_tags": "ds,ml"}])
x_test = pandas.DataFrame([{"article_name": "3", "article_tags": "ds,ml,py"}])
x_train_expected = pandas.DataFrame([{"article_name": "1", "tag_ds": 1, "tag_ml": 1, "tag_dl": 1},
{"article_name": "2", "tag_ds": 1, "tag_ml": 1, "tag_dl": 0}],
columns=["article_name", "tag_dl", "tag_ds", "tag_ml"])
x_test_expected = pandas.DataFrame([{"article_name": "3", "tag_ds": 1, "tag_ml": 1, "tag_dl": 0}],
columns=["article_name", "tag_dl", "tag_ds", "tag_ml"])
x_train_with_tags, x_test_with_tags = append_tags_to_frame(x_train, x_test, "article_tags", "tag_")
pandas.testing.assert_frame_equal(x_train_expected, x_train_with_tags, check_like=True)
pandas.testing.assert_frame_equal(x_test_expected, x_test_with_tags, check_like=True)
def test_significant_terms():
corpus = ['This is the first document.', 'This document is the second document.', 'And this is the third one.',
'Is this the first document?']
data_frame = pandas.DataFrame(corpus, columns=["content"])
subset_data_frame = data_frame[data_frame.index > 1]
terms = extract_significant_terms_from_subset(data_frame, subset_data_frame, "content")
expected = pandas.Series(
[1.0, 1.0, 1.0, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.5, 0.25, 0.0],
index=['third', 'one', 'and', 'this', 'the', 'is', 'first', 'document', 'second'])
pandas.testing.assert_series_equal(expected, terms)
| 54.705882
| 115
| 0.64086
|
import pandas
from ds_utils.strings import append_tags_to_frame, extract_significant_terms_from_subset
def test_append_tags_to_frame():
x_train = pandas.DataFrame([{"article_name": "1", "article_tags": "ds,ml,dl"},
{"article_name": "2", "article_tags": "ds,ml"}])
x_test = pandas.DataFrame([{"article_name": "3", "article_tags": "ds,ml,py"}])
x_train_expected = pandas.DataFrame([{"article_name": "1", "tag_ds": 1, "tag_ml": 1, "tag_dl": 1},
{"article_name": "2", "tag_ds": 1, "tag_ml": 1, "tag_dl": 0}],
columns=["article_name", "tag_dl", "tag_ds", "tag_ml"])
x_test_expected = pandas.DataFrame([{"article_name": "3", "tag_ds": 1, "tag_ml": 1, "tag_dl": 0}],
columns=["article_name", "tag_dl", "tag_ds", "tag_ml"])
x_train_with_tags, x_test_with_tags = append_tags_to_frame(x_train, x_test, "article_tags", "tag_")
pandas.testing.assert_frame_equal(x_train_expected, x_train_with_tags, check_like=True)
pandas.testing.assert_frame_equal(x_test_expected, x_test_with_tags, check_like=True)
def test_significant_terms():
corpus = ['This is the first document.', 'This document is the second document.', 'And this is the third one.',
'Is this the first document?']
data_frame = pandas.DataFrame(corpus, columns=["content"])
subset_data_frame = data_frame[data_frame.index > 1]
terms = extract_significant_terms_from_subset(data_frame, subset_data_frame, "content")
expected = pandas.Series(
[1.0, 1.0, 1.0, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.5, 0.25, 0.0],
index=['third', 'one', 'and', 'this', 'the', 'is', 'first', 'document', 'second'])
pandas.testing.assert_series_equal(expected, terms)
| true
| true
|
f7144b8fa809f715ad4be47d6e7cdd7ba4be43fd
| 63
|
py
|
Python
|
caravaggio_rest_api/haystack/__init__.py
|
xalperte/django-caravaggio-rest-api
|
36fcdc6b77982fc7fd2462f2c8997911f14047c4
|
[
"MIT"
] | null | null | null |
caravaggio_rest_api/haystack/__init__.py
|
xalperte/django-caravaggio-rest-api
|
36fcdc6b77982fc7fd2462f2c8997911f14047c4
|
[
"MIT"
] | null | null | null |
caravaggio_rest_api/haystack/__init__.py
|
xalperte/django-caravaggio-rest-api
|
36fcdc6b77982fc7fd2462f2c8997911f14047c4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*
# Copyright (c) 2018 PreSeries Tech, SL
| 21
| 39
| 0.619048
| true
| true
|
|
f7144c3823d0412a188fb793a469ea4fa0b57caf
| 139
|
py
|
Python
|
examples/container.py
|
hugovk/Cyberbrain
|
3b3789a7c23edf95c7f7bac94c2d165e9aaf86ed
|
[
"MIT"
] | 2,440
|
2019-09-21T04:21:55.000Z
|
2022-03-30T09:47:47.000Z
|
examples/container.py
|
hugovk/Cyberbrain
|
3b3789a7c23edf95c7f7bac94c2d165e9aaf86ed
|
[
"MIT"
] | 103
|
2019-09-21T15:19:59.000Z
|
2022-03-28T06:27:40.000Z
|
examples/container.py
|
hugovk/Cyberbrain
|
3b3789a7c23edf95c7f7bac94c2d165e9aaf86ed
|
[
"MIT"
] | 162
|
2019-07-16T08:03:18.000Z
|
2022-03-30T02:51:21.000Z
|
from cyberbrain import trace
@trace
def container():
x = list(range(1000))
return x
if __name__ == "__main__":
container()
| 11.583333
| 28
| 0.647482
|
from cyberbrain import trace
@trace
def container():
x = list(range(1000))
return x
if __name__ == "__main__":
container()
| true
| true
|
f7144d1cbcf7cf787868c444942d133284af243b
| 7,461
|
py
|
Python
|
lib/kubernetes/client/models/v1_lease_spec.py
|
splunkenizer/splunk_as_a_service_app
|
97c4aaf927d2171bf131126cf9b70489ac75bc5a
|
[
"Apache-2.0"
] | 7
|
2019-12-21T00:14:14.000Z
|
2021-03-11T14:51:37.000Z
|
lib/kubernetes/client/models/v1_lease_spec.py
|
splunkenizer/splunk_as_a_service_app
|
97c4aaf927d2171bf131126cf9b70489ac75bc5a
|
[
"Apache-2.0"
] | 29
|
2019-10-09T11:16:21.000Z
|
2020-06-23T09:32:09.000Z
|
lib/kubernetes/client/models/v1_lease_spec.py
|
splunkenizer/splunk_as_a_service_app
|
97c4aaf927d2171bf131126cf9b70489ac75bc5a
|
[
"Apache-2.0"
] | 1
|
2021-05-07T10:13:31.000Z
|
2021-05-07T10:13:31.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1LeaseSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'acquire_time': 'datetime',
'holder_identity': 'str',
'lease_duration_seconds': 'int',
'lease_transitions': 'int',
'renew_time': 'datetime'
}
attribute_map = {
'acquire_time': 'acquireTime',
'holder_identity': 'holderIdentity',
'lease_duration_seconds': 'leaseDurationSeconds',
'lease_transitions': 'leaseTransitions',
'renew_time': 'renewTime'
}
def __init__(self, acquire_time=None, holder_identity=None, lease_duration_seconds=None, lease_transitions=None, renew_time=None):
"""
V1LeaseSpec - a model defined in Swagger
"""
self._acquire_time = None
self._holder_identity = None
self._lease_duration_seconds = None
self._lease_transitions = None
self._renew_time = None
self.discriminator = None
if acquire_time is not None:
self.acquire_time = acquire_time
if holder_identity is not None:
self.holder_identity = holder_identity
if lease_duration_seconds is not None:
self.lease_duration_seconds = lease_duration_seconds
if lease_transitions is not None:
self.lease_transitions = lease_transitions
if renew_time is not None:
self.renew_time = renew_time
@property
def acquire_time(self):
"""
Gets the acquire_time of this V1LeaseSpec.
acquireTime is a time when the current lease was acquired.
:return: The acquire_time of this V1LeaseSpec.
:rtype: datetime
"""
return self._acquire_time
@acquire_time.setter
def acquire_time(self, acquire_time):
"""
Sets the acquire_time of this V1LeaseSpec.
acquireTime is a time when the current lease was acquired.
:param acquire_time: The acquire_time of this V1LeaseSpec.
:type: datetime
"""
self._acquire_time = acquire_time
@property
def holder_identity(self):
"""
Gets the holder_identity of this V1LeaseSpec.
holderIdentity contains the identity of the holder of a current lease.
:return: The holder_identity of this V1LeaseSpec.
:rtype: str
"""
return self._holder_identity
@holder_identity.setter
def holder_identity(self, holder_identity):
"""
Sets the holder_identity of this V1LeaseSpec.
holderIdentity contains the identity of the holder of a current lease.
:param holder_identity: The holder_identity of this V1LeaseSpec.
:type: str
"""
self._holder_identity = holder_identity
@property
def lease_duration_seconds(self):
"""
Gets the lease_duration_seconds of this V1LeaseSpec.
leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.
:return: The lease_duration_seconds of this V1LeaseSpec.
:rtype: int
"""
return self._lease_duration_seconds
@lease_duration_seconds.setter
def lease_duration_seconds(self, lease_duration_seconds):
"""
Sets the lease_duration_seconds of this V1LeaseSpec.
leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.
:param lease_duration_seconds: The lease_duration_seconds of this V1LeaseSpec.
:type: int
"""
self._lease_duration_seconds = lease_duration_seconds
@property
def lease_transitions(self):
"""
Gets the lease_transitions of this V1LeaseSpec.
leaseTransitions is the number of transitions of a lease between holders.
:return: The lease_transitions of this V1LeaseSpec.
:rtype: int
"""
return self._lease_transitions
@lease_transitions.setter
def lease_transitions(self, lease_transitions):
"""
Sets the lease_transitions of this V1LeaseSpec.
leaseTransitions is the number of transitions of a lease between holders.
:param lease_transitions: The lease_transitions of this V1LeaseSpec.
:type: int
"""
self._lease_transitions = lease_transitions
@property
def renew_time(self):
"""
Gets the renew_time of this V1LeaseSpec.
renewTime is a time when the current holder of a lease has last updated the lease.
:return: The renew_time of this V1LeaseSpec.
:rtype: datetime
"""
return self._renew_time
@renew_time.setter
def renew_time(self, renew_time):
"""
Sets the renew_time of this V1LeaseSpec.
renewTime is a time when the current holder of a lease has last updated the lease.
:param renew_time: The renew_time of this V1LeaseSpec.
:type: datetime
"""
self._renew_time = renew_time
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1LeaseSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 31.217573
| 162
| 0.600456
|
from pprint import pformat
from six import iteritems
import re
class V1LeaseSpec(object):
swagger_types = {
'acquire_time': 'datetime',
'holder_identity': 'str',
'lease_duration_seconds': 'int',
'lease_transitions': 'int',
'renew_time': 'datetime'
}
attribute_map = {
'acquire_time': 'acquireTime',
'holder_identity': 'holderIdentity',
'lease_duration_seconds': 'leaseDurationSeconds',
'lease_transitions': 'leaseTransitions',
'renew_time': 'renewTime'
}
def __init__(self, acquire_time=None, holder_identity=None, lease_duration_seconds=None, lease_transitions=None, renew_time=None):
self._acquire_time = None
self._holder_identity = None
self._lease_duration_seconds = None
self._lease_transitions = None
self._renew_time = None
self.discriminator = None
if acquire_time is not None:
self.acquire_time = acquire_time
if holder_identity is not None:
self.holder_identity = holder_identity
if lease_duration_seconds is not None:
self.lease_duration_seconds = lease_duration_seconds
if lease_transitions is not None:
self.lease_transitions = lease_transitions
if renew_time is not None:
self.renew_time = renew_time
@property
def acquire_time(self):
return self._acquire_time
@acquire_time.setter
def acquire_time(self, acquire_time):
self._acquire_time = acquire_time
@property
def holder_identity(self):
return self._holder_identity
@holder_identity.setter
def holder_identity(self, holder_identity):
self._holder_identity = holder_identity
@property
def lease_duration_seconds(self):
return self._lease_duration_seconds
@lease_duration_seconds.setter
def lease_duration_seconds(self, lease_duration_seconds):
self._lease_duration_seconds = lease_duration_seconds
@property
def lease_transitions(self):
return self._lease_transitions
@lease_transitions.setter
def lease_transitions(self, lease_transitions):
self._lease_transitions = lease_transitions
@property
def renew_time(self):
return self._renew_time
@renew_time.setter
def renew_time(self, renew_time):
self._renew_time = renew_time
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1LeaseSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f7144fc6d2b714a04c1490bde3b0de182a0d41aa
| 874
|
py
|
Python
|
setup.py
|
estanislaoledesma/genper
|
5996b8bc199d8cecc74b7f6d03b67a4c356b4beb
|
[
"MIT"
] | 2
|
2021-09-24T20:10:40.000Z
|
2021-12-23T21:03:16.000Z
|
setup.py
|
estanislaoledesma/genper
|
5996b8bc199d8cecc74b7f6d03b67a4c356b4beb
|
[
"MIT"
] | 4
|
2021-09-24T19:25:38.000Z
|
2021-12-22T00:49:07.000Z
|
setup.py
|
estanislaoledesma/genper
|
5996b8bc199d8cecc74b7f6d03b67a4c356b4beb
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import os
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
from setuptools import setup, find_packages
setup(
name = "genper",
version = "1.0.0",
author = "Estanislao Ledesma",
author_email = "estanislaomledesma@gmail.com",
description = ("Software de tomografía por microondas"),
license = "MIT",
keywords = "genper tomografía microondas",
packages = find_packages(),
long_description = read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
)
| 31.214286
| 79
| 0.663616
|
import os
# README file and 2) it's easier to type in the README file than to put a raw
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
from setuptools import setup, find_packages
setup(
name = "genper",
version = "1.0.0",
author = "Estanislao Ledesma",
author_email = "estanislaomledesma@gmail.com",
description = ("Software de tomografía por microondas"),
license = "MIT",
keywords = "genper tomografía microondas",
packages = find_packages(),
long_description = read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
)
| true
| true
|
f714504d98c7f4400644588df8c63bfbda6d348d
| 6,903
|
py
|
Python
|
spiketoolkit/validation/quality_metric_classes/parameter_dictionaries.py
|
seankmartin/spiketoolkit
|
38261d95045b1cd689363579c10ab3aa0a1ab7c0
|
[
"MIT"
] | null | null | null |
spiketoolkit/validation/quality_metric_classes/parameter_dictionaries.py
|
seankmartin/spiketoolkit
|
38261d95045b1cd689363579c10ab3aa0a1ab7c0
|
[
"MIT"
] | null | null | null |
spiketoolkit/validation/quality_metric_classes/parameter_dictionaries.py
|
seankmartin/spiketoolkit
|
38261d95045b1cd689363579c10ab3aa0a1ab7c0
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
recording_params_dict = OrderedDict([('apply_filter', True), ('freq_min',300.0), ('freq_max',6000.0)])
#Defining GUI Params
keys = list(recording_params_dict.keys())
types = [type(recording_params_dict[key]) for key in keys]
values = [recording_params_dict[key] for key in keys]
recording_gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "If True, apply filter"},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "High-pass frequency"},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "Low-pass frequency"}]
feature_params_dict = OrderedDict([('max_spikes_per_unit',300), ('recompute_info',False), ('save_features_props',True)])
#Defining GUI Params
keys = list(feature_params_dict.keys())
types = [type(feature_params_dict[key]) for key in keys]
values = [feature_params_dict[key] for key in keys]
feature_gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "The maximum number of spikes to extract per unit to compute features."},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "If True, will always re-extract waveforms."},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "If true, it will save the features in the sorting extractor."}]
amplitude_params_dict = OrderedDict([('amp_method',"absolute"), ('amp_peak',"both"), ('amp_frames_before',3), ('amp_frames_after',3)])
#Defining GUI Params
keys = list(amplitude_params_dict.keys())
types = [type(amplitude_params_dict[key]) for key in keys]
values = [amplitude_params_dict[key] for key in keys]
amplitude_gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned. If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes."},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default)"},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "Frames before peak to compute amplitude"},
{'name': keys[3], 'type': str(types[3].__name__), 'value': values[3], 'default': values[3], 'title': "Frames after peak to compute amplitude"}]
pca_scores_params_dict = OrderedDict([('n_comp',3), ('ms_before',1.0), ('ms_after',2.0), ('dtype',None), ('max_spikes_for_pca',100000)])
#Defining GUI Params
keys = list(pca_scores_params_dict.keys())
types = [type(pca_scores_params_dict[key]) for key in keys]
values = [pca_scores_params_dict[key] for key in keys]
pca_scores_gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "n_compFeatures in template-gui format"},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "Time period in ms to cut waveforms before the spike events"},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "Time period in ms to cut waveforms after the spike events"},
{'name': keys[3], 'type': 'dtype', 'value': values[3], 'default': values[3], 'title': "The numpy dtype of the waveforms"},
{'name': keys[4], 'type': str(types[4].__name__), 'value': values[4], 'default': values[4], 'title': "The maximum number of spikes to use to compute PCA."}]
epoch_params_dict =OrderedDict([('epoch_tuples',None), ('epoch_names',None)])
def get_recording_params():
return recording_params_dict.copy()
def get_amplitude_params():
return amplitude_params_dict.copy()
def get_pca_scores_params():
return pca_scores_params_dict.copy()
def get_epoch_params():
return epoch_params_dict.copy()
def get_feature_params():
return feature_params_dict.copy()
def get_recording_gui_params():
return recording_gui_params.copy()
def get_amplitude_gui_params():
return amplitude_gui_params.copy()
def get_pca_scores_gui_params():
return pca_scores_gui_params.copy()
def get_feature_gui_params():
return feature_gui_params.copy()
def update_param_dicts(recording_params=None, amplitude_params=None,
pca_scores_params=None, epoch_params=None,
feature_params=None):
param_dicts = []
if recording_params is not None:
if not set(recording_params.keys()).issubset(
set(get_recording_params().keys())
):
raise ValueError("Improper parameter entered into the recording param dict.")
else:
recording_params = OrderedDict(get_recording_params(), **recording_params)
param_dicts.append(recording_params)
if amplitude_params is not None:
if not set(amplitude_params.keys()).issubset(
set(get_amplitude_params().keys())
):
raise ValueError("Improper parameter entered into the amplitude param dict.")
else:
amplitude_params = OrderedDict(get_amplitude_params(), **amplitude_params)
param_dicts.append(amplitude_params)
if pca_scores_params is not None:
if not set(pca_scores_params.keys()).issubset(
set(get_pca_scores_params().keys())
):
raise ValueError("Improper parameter entered into the amplitude param dict.")
else:
pca_scores_params = OrderedDict(get_pca_scores_params(), **pca_scores_params)
param_dicts.append(pca_scores_params)
if epoch_params is not None:
if not set(epoch_params.keys()).issubset(
set(get_epoch_params().keys())
):
raise ValueError("Improper parameter entered into the epoch params dict")
else:
epoch_params = OrderedDict(get_epoch_params(), **epoch_params)
param_dicts.append(epoch_params)
if feature_params is not None:
if not set(feature_params.keys()).issubset(
set(get_feature_params().keys())
):
raise ValueError("Improper parameter entered into the feature param dict.")
else:
feature_params = OrderedDict(get_feature_params(), **feature_params)
param_dicts.append(feature_params)
return param_dicts
| 56.121951
| 310
| 0.653774
|
from collections import OrderedDict
recording_params_dict = OrderedDict([('apply_filter', True), ('freq_min',300.0), ('freq_max',6000.0)])
keys = list(recording_params_dict.keys())
types = [type(recording_params_dict[key]) for key in keys]
values = [recording_params_dict[key] for key in keys]
recording_gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "If True, apply filter"},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "High-pass frequency"},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "Low-pass frequency"}]
feature_params_dict = OrderedDict([('max_spikes_per_unit',300), ('recompute_info',False), ('save_features_props',True)])
keys = list(feature_params_dict.keys())
types = [type(feature_params_dict[key]) for key in keys]
values = [feature_params_dict[key] for key in keys]
feature_gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "The maximum number of spikes to extract per unit to compute features."},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "If True, will always re-extract waveforms."},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "If true, it will save the features in the sorting extractor."}]
amplitude_params_dict = OrderedDict([('amp_method',"absolute"), ('amp_peak',"both"), ('amp_frames_before',3), ('amp_frames_after',3)])
keys = list(amplitude_params_dict.keys())
types = [type(amplitude_params_dict[key]) for key in keys]
values = [amplitude_params_dict[key] for key in keys]
amplitude_gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned. If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes."},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default)"},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "Frames before peak to compute amplitude"},
{'name': keys[3], 'type': str(types[3].__name__), 'value': values[3], 'default': values[3], 'title': "Frames after peak to compute amplitude"}]
pca_scores_params_dict = OrderedDict([('n_comp',3), ('ms_before',1.0), ('ms_after',2.0), ('dtype',None), ('max_spikes_for_pca',100000)])
keys = list(pca_scores_params_dict.keys())
types = [type(pca_scores_params_dict[key]) for key in keys]
values = [pca_scores_params_dict[key] for key in keys]
pca_scores_gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "n_compFeatures in template-gui format"},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "Time period in ms to cut waveforms before the spike events"},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "Time period in ms to cut waveforms after the spike events"},
{'name': keys[3], 'type': 'dtype', 'value': values[3], 'default': values[3], 'title': "The numpy dtype of the waveforms"},
{'name': keys[4], 'type': str(types[4].__name__), 'value': values[4], 'default': values[4], 'title': "The maximum number of spikes to use to compute PCA."}]
epoch_params_dict =OrderedDict([('epoch_tuples',None), ('epoch_names',None)])
def get_recording_params():
return recording_params_dict.copy()
def get_amplitude_params():
return amplitude_params_dict.copy()
def get_pca_scores_params():
return pca_scores_params_dict.copy()
def get_epoch_params():
return epoch_params_dict.copy()
def get_feature_params():
return feature_params_dict.copy()
def get_recording_gui_params():
return recording_gui_params.copy()
def get_amplitude_gui_params():
return amplitude_gui_params.copy()
def get_pca_scores_gui_params():
return pca_scores_gui_params.copy()
def get_feature_gui_params():
return feature_gui_params.copy()
def update_param_dicts(recording_params=None, amplitude_params=None,
pca_scores_params=None, epoch_params=None,
feature_params=None):
param_dicts = []
if recording_params is not None:
if not set(recording_params.keys()).issubset(
set(get_recording_params().keys())
):
raise ValueError("Improper parameter entered into the recording param dict.")
else:
recording_params = OrderedDict(get_recording_params(), **recording_params)
param_dicts.append(recording_params)
if amplitude_params is not None:
if not set(amplitude_params.keys()).issubset(
set(get_amplitude_params().keys())
):
raise ValueError("Improper parameter entered into the amplitude param dict.")
else:
amplitude_params = OrderedDict(get_amplitude_params(), **amplitude_params)
param_dicts.append(amplitude_params)
if pca_scores_params is not None:
if not set(pca_scores_params.keys()).issubset(
set(get_pca_scores_params().keys())
):
raise ValueError("Improper parameter entered into the amplitude param dict.")
else:
pca_scores_params = OrderedDict(get_pca_scores_params(), **pca_scores_params)
param_dicts.append(pca_scores_params)
if epoch_params is not None:
if not set(epoch_params.keys()).issubset(
set(get_epoch_params().keys())
):
raise ValueError("Improper parameter entered into the epoch params dict")
else:
epoch_params = OrderedDict(get_epoch_params(), **epoch_params)
param_dicts.append(epoch_params)
if feature_params is not None:
if not set(feature_params.keys()).issubset(
set(get_feature_params().keys())
):
raise ValueError("Improper parameter entered into the feature param dict.")
else:
feature_params = OrderedDict(get_feature_params(), **feature_params)
param_dicts.append(feature_params)
return param_dicts
| true
| true
|
f71450750ea8e5b04888fcc1b9d0708bbc947036
| 1,243
|
py
|
Python
|
test/proj4/proj-regression-EPSG-3857-20.py
|
dvuckovic/magics-test
|
bd8baf97b0db986f6adf63700d3cf77bbcbad2f2
|
[
"Apache-2.0"
] | 7
|
2019-03-19T09:32:41.000Z
|
2022-02-07T13:20:33.000Z
|
test/proj4/proj-regression-EPSG-3857-20.py
|
dvuckovic/magics-test
|
bd8baf97b0db986f6adf63700d3cf77bbcbad2f2
|
[
"Apache-2.0"
] | 2
|
2021-03-30T05:37:20.000Z
|
2021-08-17T13:58:04.000Z
|
test/proj4/proj-regression-EPSG-3857-20.py
|
dvuckovic/magics-test
|
bd8baf97b0db986f6adf63700d3cf77bbcbad2f2
|
[
"Apache-2.0"
] | 5
|
2019-03-19T10:43:46.000Z
|
2021-09-09T14:28:39.000Z
|
from Magics.macro import *
import os
def plot_area(epsg, llx, lly, urx, ury):
img = os.path.basename(__file__).split('.')[0]
title = "Projection {} : [{:.2f}, {:.2f}, {:.2f}, {:.2f}]".format(epsg, llx, lly, urx, ury)
#Setting output
png = output(
output_formats = ['png'],
output_name = img,
output_name_first_page_number = 'off')
#Setting the geographical area
area = mmap(
subpage_lower_left_latitude = lly,
subpage_lower_left_longitude = llx,
subpage_map_projection = epsg,
subpage_upper_right_latitude = ury,
subpage_upper_right_longitude = urx,
subpage_map_area_definition = "corners"
)
#Setting the coastlines
background = mcoast(
map_coastline_land_shade = 'on',
map_coastline_resolution = "medium",
map_coastline_land_shade_colour = 'cream')
#Picking the grib metadata
title = mtext(
text_lines = [title],
text_justification = 'left',
text_font_size = 0.6,
text_colour = 'charcoal')
#Plotting
plot(png,area,background,title,)
plot_area("EPSG:3857", -19.537526614209707, 21.73608176192727, 45.466740592414304, 81.98066721424705 )
| 28.906977
| 103
| 0.631537
|
from Magics.macro import *
import os
def plot_area(epsg, llx, lly, urx, ury):
img = os.path.basename(__file__).split('.')[0]
title = "Projection {} : [{:.2f}, {:.2f}, {:.2f}, {:.2f}]".format(epsg, llx, lly, urx, ury)
png = output(
output_formats = ['png'],
output_name = img,
output_name_first_page_number = 'off')
area = mmap(
subpage_lower_left_latitude = lly,
subpage_lower_left_longitude = llx,
subpage_map_projection = epsg,
subpage_upper_right_latitude = ury,
subpage_upper_right_longitude = urx,
subpage_map_area_definition = "corners"
)
background = mcoast(
map_coastline_land_shade = 'on',
map_coastline_resolution = "medium",
map_coastline_land_shade_colour = 'cream')
title = mtext(
text_lines = [title],
text_justification = 'left',
text_font_size = 0.6,
text_colour = 'charcoal')
plot(png,area,background,title,)
plot_area("EPSG:3857", -19.537526614209707, 21.73608176192727, 45.466740592414304, 81.98066721424705 )
| true
| true
|
f71451d02fba81b192946c47e4158536448a5bed
| 40
|
py
|
Python
|
test-django-project/testapp/urls.py
|
rhenter/django-utils
|
7e2901ac1efc3db47977b98e45754e40bfef6891
|
[
"MIT"
] | 20
|
2021-01-21T13:04:44.000Z
|
2022-03-26T22:03:19.000Z
|
test-django-project/testapp/urls.py
|
rhenter/django-utils
|
7e2901ac1efc3db47977b98e45754e40bfef6891
|
[
"MIT"
] | 4
|
2019-03-15T18:13:49.000Z
|
2019-03-20T00:06:46.000Z
|
test-django-project/testapp/urls.py
|
rhenter/django-utils
|
7e2901ac1efc3db47977b98e45754e40bfef6891
|
[
"MIT"
] | 6
|
2021-01-21T13:27:45.000Z
|
2022-03-26T21:28:22.000Z
|
app_name = 'testapp'
urlpatterns = [
]
| 8
| 20
| 0.65
|
app_name = 'testapp'
urlpatterns = [
]
| true
| true
|
f714526286c921b2969c494f081547696e8bff4f
| 8,039
|
py
|
Python
|
python/paddle/fluid/tests/unittests/dist_fleet_heter_pipeline_ctr.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 11
|
2016-08-29T07:43:26.000Z
|
2016-08-29T07:51:24.000Z
|
python/paddle/fluid/tests/unittests/dist_fleet_heter_pipeline_ctr.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/dist_fleet_heter_pipeline_ctr.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 1
|
2021-12-09T08:59:17.000Z
|
2021-12-09T08:59:17.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Distribute CTR model for test fleet api
"""
from __future__ import print_function
import shutil
import tempfile
import time
import paddle
import paddle.fluid as fluid
import os
import numpy as np
import ctr_dataset_reader
from test_dist_fleet_heter_base import runtime_main, FleetDistHeterRunnerBase
from dist_fleet_ctr import TestDistCTR2x2, fake_ctr_reader
paddle.enable_static()
# Fix seed for test
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1
class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase):
"""
For test CTR model, using Fleet api
"""
def net(self, args, batch_size=4, lr=0.01):
"""
network definition
Args:
batch_size(int): the size of mini-batch for training
lr(float): learning rate of training
Returns:
avg_cost: LoDTensor of cost.
"""
dnn_input_dim, lr_input_dim = int(1e5), int(1e5)
with fluid.device_guard("cpu"):
dnn_data = fluid.layers.data(name="dnn_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False)
lr_data = fluid.layers.data(name="lr_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False)
label = fluid.layers.data(name="click",
shape=[-1, 1],
dtype="float32",
lod_level=0,
append_batch_size=False)
datas = [dnn_data, lr_data, label]
# build dnn model
dnn_layer_dims = [128, 64, 32, 1]
dnn_embedding = fluid.layers.embedding(
is_distributed=False,
input=dnn_data,
size=[dnn_input_dim, dnn_layer_dims[0]],
param_attr=fluid.ParamAttr(
name="deep_embedding",
initializer=fluid.initializer.Constant(value=0.01)),
is_sparse=True)
dnn_pool = fluid.layers.sequence_pool(input=dnn_embedding,
pool_type="sum")
dnn_out = dnn_pool
# build lr model
lr_embbding = fluid.layers.embedding(
is_distributed=False,
input=lr_data,
size=[lr_input_dim, 1],
param_attr=fluid.ParamAttr(
name="wide_embedding",
initializer=fluid.initializer.Constant(value=0.01)),
is_sparse=True)
lr_pool = fluid.layers.sequence_pool(input=lr_embbding,
pool_type="sum")
with fluid.device_guard("gpu"):
for i, dim in enumerate(dnn_layer_dims[1:]):
fc = fluid.layers.fc(
input=dnn_out,
size=dim,
act="relu",
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.01)),
name='dnn-fc-%d' % i)
dnn_out = fc
with fluid.device_guard("cpu"):
merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1)
label = fluid.layers.cast(label, dtype="int64")
predict = fluid.layers.fc(input=merge_layer, size=2, act='softmax')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
fluid.layers.Print(avg_cost, message="avg_cost")
self.feeds = datas
self.train_file_path = ["fake1", "fake2"]
self.avg_cost = avg_cost
self.predict = predict
return avg_cost
def check_model_right(self, dirname):
model_filename = os.path.join(dirname, "__model__")
with open(model_filename, "rb") as f:
program_desc_str = f.read()
program = fluid.Program.parse_from_string(program_desc_str)
with open(os.path.join(dirname, "__model__.proto"), "w") as wn:
wn.write(str(program))
def do_dataset_training(self, fleet):
train_file_list = ctr_dataset_reader.prepare_fake_data()
exe = fluid.Executor(fluid.CPUPlace())
real_program = fluid.default_main_program(
)._heter_pipeline_opt["section_program"]
print(real_program)
exe.run(fluid.default_startup_program())
fleet.init_worker()
thread_num = int(os.getenv("CPU_NUM", 2))
batch_size = 128
filelist = fleet.util.get_file_shard(train_file_list)
print("filelist: {}".format(filelist))
# config dataset
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_batch_size(batch_size)
dataset.set_use_var(self.feeds)
pipe_command = 'python3 ctr_dataset_reader.py'
dataset.set_pipe_command(pipe_command)
dataset.set_filelist(filelist)
dataset.set_thread(thread_num)
for epoch_id in range(1):
pass_start = time.time()
dataset.set_filelist(filelist)
exe.train_from_dataset(program=fluid.default_main_program(),
dataset=dataset,
fetch_list=[self.avg_cost],
fetch_info=["cost"],
print_period=2,
debug=int(os.getenv("Debug", "0")))
pass_time = time.time() - pass_start
print("do_dataset_training done. using time {}".format(pass_time))
exe.close()
def do_dataset_heter_training(self, fleet):
exe = fluid.Executor()
exe.run(fluid.default_startup_program())
fleet.init_worker()
real_program = fluid.default_main_program(
)._heter_pipeline_opt["section_program"]
print(real_program)
thread_num = int(os.getenv("CPU_NUM", 2))
batch_size = 128
pass_start = time.time()
exe.train_from_dataset(program=fluid.default_main_program(),
fetch_list=[self.avg_cost],
fetch_info=["cost"],
print_period=2,
debug=int(os.getenv("Debug", "0")))
exe.close()
pass_time = time.time() - pass_start
print("do_dataset_heter_training done. using time {}".format(pass_time))
#for epoch_id in range(1):
# pass_start = time.time()
# dataset.set_filelist(filelist)
# exe.train_from_dataset(
# program=fluid.default_main_program(),
# dataset=dataset,
# fetch_list=[self.avg_cost],
# fetch_info=["cost"],
# print_period=2,
# debug=int(os.getenv("Debug", "0")))
# pass_time = time.time() - pass_start
# print("do_dataset_heter_training done. using time {}".format(pass_time))
if __name__ == "__main__":
runtime_main(TestHeterPipelinePsCTR2x2)
| 36.876147
| 85
| 0.559771
|
from __future__ import print_function
import shutil
import tempfile
import time
import paddle
import paddle.fluid as fluid
import os
import numpy as np
import ctr_dataset_reader
from test_dist_fleet_heter_base import runtime_main, FleetDistHeterRunnerBase
from dist_fleet_ctr import TestDistCTR2x2, fake_ctr_reader
paddle.enable_static()
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1
class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase):
def net(self, args, batch_size=4, lr=0.01):
dnn_input_dim, lr_input_dim = int(1e5), int(1e5)
with fluid.device_guard("cpu"):
dnn_data = fluid.layers.data(name="dnn_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False)
lr_data = fluid.layers.data(name="lr_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False)
label = fluid.layers.data(name="click",
shape=[-1, 1],
dtype="float32",
lod_level=0,
append_batch_size=False)
datas = [dnn_data, lr_data, label]
dnn_layer_dims = [128, 64, 32, 1]
dnn_embedding = fluid.layers.embedding(
is_distributed=False,
input=dnn_data,
size=[dnn_input_dim, dnn_layer_dims[0]],
param_attr=fluid.ParamAttr(
name="deep_embedding",
initializer=fluid.initializer.Constant(value=0.01)),
is_sparse=True)
dnn_pool = fluid.layers.sequence_pool(input=dnn_embedding,
pool_type="sum")
dnn_out = dnn_pool
lr_embbding = fluid.layers.embedding(
is_distributed=False,
input=lr_data,
size=[lr_input_dim, 1],
param_attr=fluid.ParamAttr(
name="wide_embedding",
initializer=fluid.initializer.Constant(value=0.01)),
is_sparse=True)
lr_pool = fluid.layers.sequence_pool(input=lr_embbding,
pool_type="sum")
with fluid.device_guard("gpu"):
for i, dim in enumerate(dnn_layer_dims[1:]):
fc = fluid.layers.fc(
input=dnn_out,
size=dim,
act="relu",
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.01)),
name='dnn-fc-%d' % i)
dnn_out = fc
with fluid.device_guard("cpu"):
merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1)
label = fluid.layers.cast(label, dtype="int64")
predict = fluid.layers.fc(input=merge_layer, size=2, act='softmax')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
fluid.layers.Print(avg_cost, message="avg_cost")
self.feeds = datas
self.train_file_path = ["fake1", "fake2"]
self.avg_cost = avg_cost
self.predict = predict
return avg_cost
def check_model_right(self, dirname):
model_filename = os.path.join(dirname, "__model__")
with open(model_filename, "rb") as f:
program_desc_str = f.read()
program = fluid.Program.parse_from_string(program_desc_str)
with open(os.path.join(dirname, "__model__.proto"), "w") as wn:
wn.write(str(program))
def do_dataset_training(self, fleet):
train_file_list = ctr_dataset_reader.prepare_fake_data()
exe = fluid.Executor(fluid.CPUPlace())
real_program = fluid.default_main_program(
)._heter_pipeline_opt["section_program"]
print(real_program)
exe.run(fluid.default_startup_program())
fleet.init_worker()
thread_num = int(os.getenv("CPU_NUM", 2))
batch_size = 128
filelist = fleet.util.get_file_shard(train_file_list)
print("filelist: {}".format(filelist))
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_batch_size(batch_size)
dataset.set_use_var(self.feeds)
pipe_command = 'python3 ctr_dataset_reader.py'
dataset.set_pipe_command(pipe_command)
dataset.set_filelist(filelist)
dataset.set_thread(thread_num)
for epoch_id in range(1):
pass_start = time.time()
dataset.set_filelist(filelist)
exe.train_from_dataset(program=fluid.default_main_program(),
dataset=dataset,
fetch_list=[self.avg_cost],
fetch_info=["cost"],
print_period=2,
debug=int(os.getenv("Debug", "0")))
pass_time = time.time() - pass_start
print("do_dataset_training done. using time {}".format(pass_time))
exe.close()
def do_dataset_heter_training(self, fleet):
exe = fluid.Executor()
exe.run(fluid.default_startup_program())
fleet.init_worker()
real_program = fluid.default_main_program(
)._heter_pipeline_opt["section_program"]
print(real_program)
thread_num = int(os.getenv("CPU_NUM", 2))
batch_size = 128
pass_start = time.time()
exe.train_from_dataset(program=fluid.default_main_program(),
fetch_list=[self.avg_cost],
fetch_info=["cost"],
print_period=2,
debug=int(os.getenv("Debug", "0")))
exe.close()
pass_time = time.time() - pass_start
print("do_dataset_heter_training done. using time {}".format(pass_time))
if __name__ == "__main__":
runtime_main(TestHeterPipelinePsCTR2x2)
| true
| true
|
f714526e5dcd58db9ee80da6053783ce7e2a9594
| 1,916
|
py
|
Python
|
src/restLayer/app/api_data_sci.py
|
ucsd-ccbb/Oncolist
|
a3c7ecde6f665a665873e5aa7be5bc3778f5b17e
|
[
"MIT"
] | null | null | null |
src/restLayer/app/api_data_sci.py
|
ucsd-ccbb/Oncolist
|
a3c7ecde6f665a665873e5aa7be5bc3778f5b17e
|
[
"MIT"
] | null | null | null |
src/restLayer/app/api_data_sci.py
|
ucsd-ccbb/Oncolist
|
a3c7ecde6f665a665873e5aa7be5bc3778f5b17e
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python
import sys
import pymongo
import argparse
from bson import ObjectId
from gevent.pywsgi import WSGIServer
from geventwebsocket.handler import WebSocketHandler
import bottle
from bottle import Bottle, redirect, request, response, static_file, request
from bson.json_util import dumps
import author_gene_clustering_module
bottle.BaseRequest.MEMFILE_MAX = 1024 * 1024
import app
api = Bottle()
log = app.get_logger('api_alt')
# generic API for returning the record count for a specific mongo database/collection
@api.get('/ds/getmessage')
def ds_getmessage():
return {
'message' : 'success'
}
# generic API for returning the record count for a specific mongo database/collection
@api.get('/ds/getbpnet/:genes')
def ds_get_bp_net(genes):
genes_list = genes.split(',')
graph_json = author_gene_clustering_module.analyze_AG_bipartite_network(genes_list)
if (request.query.callback):
response.content_type = "application/javascript"
return "%s(%s);" % (request.query.callback, graph_json)
return graph_json
return {
'message' : graph_json
}
# run the web server
def main():
status = 0
parser = argparse.ArgumentParser()
parser.add_argument('port', nargs='?', type=int, help='HTTP port', default=80)
args = parser.parse_args()
print 'starting web server on port %s' % args.port
print 'press control-c to quit'
try:
server = WSGIServer(('0.0.0.0', args.port), api, handler_class=WebSocketHandler)
log.info('entering main loop')
server.serve_forever()
except KeyboardInterrupt:
log.info('exiting main loop')
except Exception as e:
str = 'could not start web server: %s' % e
log.error(str)
print str
status = 1
log.info('exiting with status %d', status)
return status
if __name__ == '__main__':
sys.exit(main())
| 26.246575
| 88
| 0.694676
|
import sys
import pymongo
import argparse
from bson import ObjectId
from gevent.pywsgi import WSGIServer
from geventwebsocket.handler import WebSocketHandler
import bottle
from bottle import Bottle, redirect, request, response, static_file, request
from bson.json_util import dumps
import author_gene_clustering_module
bottle.BaseRequest.MEMFILE_MAX = 1024 * 1024
import app
api = Bottle()
log = app.get_logger('api_alt')
@api.get('/ds/getmessage')
def ds_getmessage():
return {
'message' : 'success'
}
@api.get('/ds/getbpnet/:genes')
def ds_get_bp_net(genes):
genes_list = genes.split(',')
graph_json = author_gene_clustering_module.analyze_AG_bipartite_network(genes_list)
if (request.query.callback):
response.content_type = "application/javascript"
return "%s(%s);" % (request.query.callback, graph_json)
return graph_json
return {
'message' : graph_json
}
def main():
status = 0
parser = argparse.ArgumentParser()
parser.add_argument('port', nargs='?', type=int, help='HTTP port', default=80)
args = parser.parse_args()
print 'starting web server on port %s' % args.port
print 'press control-c to quit'
try:
server = WSGIServer(('0.0.0.0', args.port), api, handler_class=WebSocketHandler)
log.info('entering main loop')
server.serve_forever()
except KeyboardInterrupt:
log.info('exiting main loop')
except Exception as e:
str = 'could not start web server: %s' % e
log.error(str)
print str
status = 1
log.info('exiting with status %d', status)
return status
if __name__ == '__main__':
sys.exit(main())
| false
| true
|
f71452cf2e938c16778cf2d6bdada38cde5b86ec
| 716
|
py
|
Python
|
tests/builder/model/test_model_builder.py
|
shfshf/deliverable_model
|
d1f34c4a719bd392033f3f9c9ccb2dbbcf6ec264
|
[
"Apache-2.0"
] | null | null | null |
tests/builder/model/test_model_builder.py
|
shfshf/deliverable_model
|
d1f34c4a719bd392033f3f9c9ccb2dbbcf6ec264
|
[
"Apache-2.0"
] | null | null | null |
tests/builder/model/test_model_builder.py
|
shfshf/deliverable_model
|
d1f34c4a719bd392033f3f9c9ccb2dbbcf6ec264
|
[
"Apache-2.0"
] | null | null | null |
import filecmp
from deliverable_model.builder.model.model_builder import ModelBuilder
def test_build(datadir, tmpdir):
model_builder = ModelBuilder()
model_builder.add_keras_h5_model(datadir / "fixture" / "keras_h5_model")
model_builder.save()
config = model_builder.serialize(tmpdir)
assert config == {
"converter_for_request": "converter_for_request",
"converter_for_response": "converter_for_response",
"custom_object_dependency": [],
"type": "keras_h5_model",
"version": "1.0",
}
dircmp_obj = filecmp.dircmp(datadir / "expected", tmpdir)
assert not dircmp_obj.diff_files
assert model_builder.get_dependency() == ["tensorflow"]
| 26.518519
| 76
| 0.702514
|
import filecmp
from deliverable_model.builder.model.model_builder import ModelBuilder
def test_build(datadir, tmpdir):
model_builder = ModelBuilder()
model_builder.add_keras_h5_model(datadir / "fixture" / "keras_h5_model")
model_builder.save()
config = model_builder.serialize(tmpdir)
assert config == {
"converter_for_request": "converter_for_request",
"converter_for_response": "converter_for_response",
"custom_object_dependency": [],
"type": "keras_h5_model",
"version": "1.0",
}
dircmp_obj = filecmp.dircmp(datadir / "expected", tmpdir)
assert not dircmp_obj.diff_files
assert model_builder.get_dependency() == ["tensorflow"]
| true
| true
|
f71452f12de9bbaccc936c3a7641f37ccf59fe6e
| 92,701
|
py
|
Python
|
python/ccxt/ndax.py
|
allunderone/ccxt
|
b9e62462ad27a83ba6b0ec0ebd567357fdb7f2da
|
[
"MIT"
] | 1
|
2018-08-20T09:38:13.000Z
|
2018-08-20T09:38:13.000Z
|
python/ccxt/ndax.py
|
allunderone/ccxt
|
b9e62462ad27a83ba6b0ec0ebd567357fdb7f2da
|
[
"MIT"
] | null | null | null |
python/ccxt/ndax.py
|
allunderone/ccxt
|
b9e62462ad27a83ba6b0ec0ebd567357fdb7f2da
|
[
"MIT"
] | 1
|
2019-01-02T01:32:45.000Z
|
2019-01-02T01:32:45.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import OrderNotFound
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class ndax(Exchange):
def describe(self):
return self.deep_extend(super(ndax, self).describe(), {
'id': 'ndax',
'name': 'NDAX',
'countries': ['US'], # United States
'rateLimit': 1000,
'pro': True,
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'createDepositAddress': True,
'createOrder': True,
'editOrder': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderTrades': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTrades': True,
'fetchWithdrawals': True,
'signIn': True,
},
'timeframes': {
'1m': '60',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'1w': '604800',
'1M': '2419200',
'4M': '9676800',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/108623144-67a3ef00-744e-11eb-8140-75c6b851e945.jpg',
'test': {
'public': 'https://ndaxmarginstaging.cdnhop.net:8443/AP',
'private': 'https://ndaxmarginstaging.cdnhop.net:8443/AP',
},
'api': {
'public': 'https://api.ndax.io:8443/AP',
'private': 'https://api.ndax.io:8443/AP',
},
'www': 'https://ndax.io',
'doc': [
'https://apidoc.ndax.io/',
],
'fees': 'https://ndax.io/fees',
'referral': 'https://one.ndax.io/bfQiSL',
},
'api': {
'public': {
'get': [
'Activate2FA',
'Authenticate2FA',
'AuthenticateUser',
'GetL2Snapshot',
'GetLevel1',
'GetValidate2FARequiredEndpoints',
'LogOut',
'GetTickerHistory',
'GetProduct',
'GetProducts',
'GetInstrument',
'GetInstruments',
'Ping',
'trades', # undocumented
'GetLastTrades', # undocumented
'SubscribeLevel1',
'SubscribeLevel2',
'SubscribeTicker',
'SubscribeTrades',
'SubscribeBlockTrades',
'UnsubscribeBlockTrades',
'UnsubscribeLevel1',
'UnsubscribeLevel2',
'UnsubscribeTicker',
'UnsubscribeTrades',
'Authenticate', # undocumented
],
},
'private': {
'get': [
'GetUserAccountInfos',
'GetUserAccounts',
'GetUserAffiliateCount',
'GetUserAffiliateTag',
'GetUserConfig',
'GetAllUnredactedUserConfigsForUser',
'GetUnredactedUserConfigByKey',
'GetUserDevices',
'GetUserReportTickets',
'GetUserReportWriterResultRecords',
'GetAccountInfo',
'GetAccountPositions',
'GetAllAccountConfigs',
'GetTreasuryProductsForAccount',
'GetAccountTrades',
'GetAccountTransactions',
'GetOpenTradeReports',
'GetAllOpenTradeReports',
'GetTradesHistory',
'GetOpenOrders',
'GetOpenQuotes',
'GetOrderFee',
'GetOrderHistory',
'GetOrdersHistory',
'GetOrderStatus',
'GetOmsFeeTiers',
'GetAccountDepositTransactions',
'GetAccountWithdrawTransactions',
'GetAllDepositRequestInfoTemplates',
'GetDepositInfo',
'GetDepositRequestInfoTemplate',
'GetDeposits',
'GetDepositTicket',
'GetDepositTickets',
'GetOMSWithdrawFees',
'GetWithdrawFee',
'GetWithdraws',
'GetWithdrawTemplate',
'GetWithdrawTemplateTypes',
'GetWithdrawTicket',
'GetWithdrawTickets',
],
'post': [
'AddUserAffiliateTag',
'CancelUserReport',
'RegisterNewDevice',
'SubscribeAccountEvents',
'UpdateUserAffiliateTag',
'GenerateTradeActivityReport',
'GenerateTransactionActivityReport',
'GenerateTreasuryActivityReport',
'ScheduleTradeActivityReport',
'ScheduleTransactionActivityReport',
'ScheduleTreasuryActivityReport',
'CancelAllOrders',
'CancelOrder',
'CancelQuote',
'CancelReplaceOrder',
'CreateQuote',
'ModifyOrder',
'SendOrder',
'SubmitBlockTrade',
'UpdateQuote',
'CancelWithdraw',
'CreateDepositTicket',
'CreateWithdrawTicket',
'SubmitDepositTicketComment',
'SubmitWithdrawTicketComment',
'GetOrderHistoryByOrderId',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.2 / 100,
'taker': 0.25 / 100,
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
# these credentials are required for signIn() and withdraw()
# 'login': True,
# 'password': True,
# 'twofa': True,
},
'precisionMode': TICK_SIZE,
'exceptions': {
'exact': {
'Not_Enough_Funds': InsufficientFunds, # {"status":"Rejected","errormsg":"Not_Enough_Funds","errorcode":101}
'Server Error': ExchangeError, # {"result":false,"errormsg":"Server Error","errorcode":102,"detail":null}
'Resource Not Found': OrderNotFound, # {"result":false,"errormsg":"Resource Not Found","errorcode":104,"detail":null}
},
'broad': {
'Invalid InstrumentId': BadSymbol, # {"result":false,"errormsg":"Invalid InstrumentId: 10000","errorcode":100,"detail":null}
'This endpoint requires 2FACode along with the payload': AuthenticationError,
},
},
'options': {
'omsId': 1,
'orderTypes': {
'Market': 1,
'Limit': 2,
'StopMarket': 3,
'StopLimit': 4,
'TrailingStopMarket': 5,
'TrailingStopLimit': 6,
'BlockTrade': 7,
},
},
})
def sign_in(self, params={}):
self.check_required_credentials()
if self.login is None or self.password is None or self.twofa is None:
raise AuthenticationError(self.id + ' signIn() requires exchange.login, exchange.password and exchange.twofa credentials')
request = {
'grant_type': 'client_credentials', # the only supported value
}
response = self.publicGetAuthenticate(self.extend(request, params))
#
# {
# "Authenticated":true,
# "Requires2FA":true,
# "AuthType":"Google",
# "AddtlInfo":"",
# "Pending2FaToken": "6f5c4e66-f3ee-493e-9227-31cc0583b55f"
# }
#
sessionToken = self.safe_string(response, 'SessionToken')
if sessionToken is not None:
self.options['sessionToken'] = sessionToken
return response
pending2faToken = self.safe_string(response, 'Pending2FaToken')
if pending2faToken is not None:
self.options['pending2faToken'] = pending2faToken
request = {
'Code': self.oath(),
}
response = self.publicGetAuthenticate2FA(self.extend(request, params))
#
# {
# "Authenticated": True,
# "UserId":57765,
# "SessionToken":"4a2a5857-c4e5-4fac-b09e-2c4c30b591a0"
# }
#
sessionToken = self.safe_string(response, 'SessionToken')
self.options['sessionToken'] = sessionToken
return response
return response
def fetch_currencies(self, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
request = {
'omsId': omsId,
}
response = self.publicGetGetProducts(self.extend(request, params))
#
# [
# {
# "OMSId":1,
# "ProductId":1,
# "Product":"BTC",
# "ProductFullName":"Bitcoin",
# "ProductType":"CryptoCurrency",
# "DecimalPlaces":8,
# "TickSize":0.0000000100000000000000000000,
# "NoFees":false,
# "IsDisabled":false,
# "MarginEnabled":false
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'ProductId')
name = self.safe_string(currency, 'ProductFullName')
type = self.safe_string(currency, 'ProductType')
code = self.safe_currency_code(self.safe_string(currency, 'Product'))
precision = self.safe_number(currency, 'TickSize')
isDisabled = self.safe_value(currency, 'IsDisabled')
active = not isDisabled
result[code] = {
'id': id,
'name': name,
'code': code,
'type': type,
'precision': precision,
'info': currency,
'active': active,
'fee': None,
'limits': self.limits,
}
return result
def fetch_markets(self, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
request = {
'omsId': omsId,
}
response = self.publicGetGetInstruments(self.extend(request, params))
#
# [
# {
# "OMSId":1,
# "InstrumentId":3,
# "Symbol":"LTCBTC",
# "Product1":3,
# "Product1Symbol":"LTC",
# "Product2":1,
# "Product2Symbol":"BTC",
# "InstrumentType":"Standard",
# "VenueInstrumentId":3,
# "VenueId":1,
# "SortIndex":0,
# "SessionStatus":"Running",
# "PreviousSessionStatus":"Stopped",
# "SessionStatusDateTime":"2020-11-25T19:42:15.245Z",
# "SelfTradePrevention":true,
# "QuantityIncrement":0.0000000100000000000000000000,
# "PriceIncrement":0.0000000100000000000000000000,
# "MinimumQuantity":0.0100000000000000000000000000,
# "MinimumPrice":0.0000010000000000000000000000,
# "VenueSymbol":"LTCBTC",
# "IsDisable":false,
# "MasterDataId":0,
# "PriceCollarThreshold":0.0000000000000000000000000000,
# "PriceCollarPercent":0.0000000000000000000000000000,
# "PriceCollarEnabled":false,
# "PriceFloorLimit":0.0000000000000000000000000000,
# "PriceFloorLimitEnabled":false,
# "PriceCeilingLimit":0.0000000000000000000000000000,
# "PriceCeilingLimitEnabled":false,
# "CreateWithMarketRunning":true,
# "AllowOnlyMarketMakerCounterParty":false,
# "PriceCollarIndexDifference":0.0000000000000000000000000000,
# "PriceCollarConvertToOtcEnabled":false,
# "PriceCollarConvertToOtcClientUserId":0,
# "PriceCollarConvertToOtcAccountId":0,
# "PriceCollarConvertToOtcThreshold":0.0000000000000000000000000000,
# "OtcConvertSizeThreshold":0.0000000000000000000000000000,
# "OtcConvertSizeEnabled":false,
# "OtcTradesPublic":true,
# "PriceTier":0
# },
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'InstrumentId')
# lowercaseId = self.safe_string_lower(market, 'symbol')
baseId = self.safe_string(market, 'Product1')
quoteId = self.safe_string(market, 'Product2')
base = self.safe_currency_code(self.safe_string(market, 'Product1Symbol'))
quote = self.safe_currency_code(self.safe_string(market, 'Product2Symbol'))
symbol = base + '/' + quote
precision = {
'amount': self.safe_number(market, 'QuantityIncrement'),
'price': self.safe_number(market, 'PriceIncrement'),
}
sessionStatus = self.safe_string(market, 'SessionStatus')
isDisable = self.safe_value(market, 'IsDisable')
sessionRunning = (sessionStatus == 'Running')
active = True if (sessionRunning and not isDisable) else False
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_number(market, 'MinimumQuantity'),
'max': None,
},
'price': {
'min': self.safe_number(market, 'MinimumPrice'),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
})
return result
def parse_order_book(self, orderbook, symbol, timestamp=None, bidsKey='bids', asksKey='asks', priceKey=6, amountKey=8):
nonce = None
result = {
'symbol': symbol,
'bids': [],
'asks': [],
'timestamp': None,
'datetime': None,
'nonce': None,
}
for i in range(0, len(orderbook)):
level = orderbook[i]
if timestamp is None:
timestamp = self.safe_integer(level, 2)
else:
newTimestamp = self.safe_integer(level, 2)
timestamp = max(timestamp, newTimestamp)
if nonce is None:
nonce = self.safe_integer(level, 0)
else:
newNonce = self.safe_integer(level, 0)
nonce = max(nonce, newNonce)
bidask = self.parse_bid_ask(level, priceKey, amountKey)
levelSide = self.safe_integer(level, 9)
side = asksKey if levelSide else bidsKey
result[side].append(bidask)
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
result['timestamp'] = timestamp
result['datetime'] = self.iso8601(timestamp)
result['nonce'] = nonce
return result
def fetch_order_book(self, symbol, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
market = self.market(symbol)
limit = 100 if (limit is None) else limit # default 100
request = {
'omsId': omsId,
'InstrumentId': market['id'],
'Depth': limit, # default 100
}
response = self.publicGetGetL2Snapshot(self.extend(request, params))
#
# [
# [
# 0, # 0 MDUpdateId
# 1, # 1 Number of Unique Accounts
# 123, # 2 ActionDateTime in Posix format X 1000
# 0, # 3 ActionType 0(New), 1(Update), 2(Delete)
# 0.0, # 4 LastTradePrice
# 0, # 5 Number of Orders
# 0.0, # 6 Price
# 0, # 7 ProductPairCode
# 0.0, # 8 Quantity
# 0, # 9 Side
# ],
# [97244115,1,1607456142963,0,19069.32,1,19069.31,8,0.140095,0],
# [97244115,0,1607456142963,0,19069.32,1,19068.64,8,0.0055,0],
# [97244115,0,1607456142963,0,19069.32,1,19068.26,8,0.021291,0],
# [97244115,1,1607456142964,0,19069.32,1,19069.32,8,0.099636,1],
# [97244115,0,1607456142964,0,19069.32,1,19069.98,8,0.1,1],
# [97244115,0,1607456142964,0,19069.32,1,19069.99,8,0.141604,1],
# ]
#
return self.parse_order_book(response, symbol)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "OMSId":1,
# "InstrumentId":8,
# "BestBid":19069.31,
# "BestOffer":19069.32,
# "LastTradedPx":19069.32,
# "LastTradedQty":0.0001,
# "LastTradeTime":1607040406424,
# "SessionOpen":19069.32,
# "SessionHigh":19069.32,
# "SessionLow":19069.32,
# "SessionClose":19069.32,
# "Volume":0.0001,
# "CurrentDayVolume":0.0001,
# "CurrentDayNotional":1.906932,
# "CurrentDayNumTrades":1,
# "CurrentDayPxChange":0.00,
# "Rolling24HrVolume":0.000000000000000000000000000,
# "Rolling24HrNotional":0.00000000000000000000000,
# "Rolling24NumTrades":0,
# "Rolling24HrPxChange":0,
# "TimeStamp":"1607040406425",
# "BidQty":0,
# "AskQty":0,
# "BidOrderCt":0,
# "AskOrderCt":0,
# "Rolling24HrPxChangePercent":0,
# }
#
timestamp = self.safe_integer(ticker, 'TimeStamp')
marketId = self.safe_string(ticker, 'InstrumentId')
symbol = self.safe_symbol(marketId, market)
last = self.safe_number(ticker, 'LastTradedPx')
percentage = self.safe_number(ticker, 'Rolling24HrPxChangePercent')
change = self.safe_number(ticker, 'Rolling24HrPxChange')
open = self.safe_number(ticker, 'SessionOpen')
average = None
if (last is not None) and (change is not None):
average = self.sum(last, open) / 2
baseVolume = self.safe_number(ticker, 'Rolling24HrVolume')
quoteVolume = self.safe_number(ticker, 'Rolling24HrNotional')
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'SessionHigh'),
'low': self.safe_number(ticker, 'SessionLow'),
'bid': self.safe_number(ticker, 'BestBid'),
'bidVolume': None, # self.safe_number(ticker, 'BidQty'), always shows 0
'ask': self.safe_number(ticker, 'BestOffer'),
'askVolume': None, # self.safe_number(ticker, 'AskQty'), always shows 0
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
market = self.market(symbol)
request = {
'omsId': omsId,
'InstrumentId': market['id'],
}
response = self.publicGetGetLevel1(self.extend(request, params))
#
# {
# "OMSId":1,
# "InstrumentId":8,
# "BestBid":19069.31,
# "BestOffer":19069.32,
# "LastTradedPx":19069.32,
# "LastTradedQty":0.0001,
# "LastTradeTime":1607040406424,
# "SessionOpen":19069.32,
# "SessionHigh":19069.32,
# "SessionLow":19069.32,
# "SessionClose":19069.32,
# "Volume":0.0001,
# "CurrentDayVolume":0.0001,
# "CurrentDayNotional":1.906932,
# "CurrentDayNumTrades":1,
# "CurrentDayPxChange":0.00,
# "Rolling24HrVolume":0.000000000000000000000000000,
# "Rolling24HrNotional":0.00000000000000000000000,
# "Rolling24NumTrades":0,
# "Rolling24HrPxChange":0,
# "TimeStamp":"1607040406425",
# "BidQty":0,
# "AskQty":0,
# "BidOrderCt":0,
# "AskOrderCt":0,
# "Rolling24HrPxChangePercent":0,
# }
#
return self.parse_ticker(response, market)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1501603632000, # 0 DateTime
# 2700.33, # 1 High
# 2687.01, # 2 Low
# 2687.01, # 3 Open
# 2687.01, # 4 Close
# 24.86100992, # 5 Volume
# 0, # 6 Inside Bid Price
# 2870.95, # 7 Inside Ask Price
# 1 # 8 InstrumentId
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
market = self.market(symbol)
request = {
'omsId': omsId,
'InstrumentId': market['id'],
'Interval': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe)
now = self.milliseconds()
if since is None:
if limit is not None:
request['FromDate'] = self.ymdhms(now - duration * limit * 1000)
request['ToDate'] = self.ymdhms(now)
else:
request['FromDate'] = self.ymdhms(since)
if limit is None:
request['ToDate'] = self.ymdhms(now)
else:
request['ToDate'] = self.ymdhms(self.sum(since, duration * limit * 1000))
response = self.publicGetGetTickerHistory(self.extend(request, params))
#
# [
# [1607299260000,19069.32,19069.32,19069.32,19069.32,0,19069.31,19069.32,8,1607299200000],
# [1607299320000,19069.32,19069.32,19069.32,19069.32,0,19069.31,19069.32,8,1607299260000],
# [1607299380000,19069.32,19069.32,19069.32,19069.32,0,19069.31,19069.32,8,1607299320000],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# [
# 6913253, # 0 TradeId
# 8, # 1 ProductPairCode
# 0.03340802, # 2 Quantity
# 19116.08, # 3 Price
# 2543425077, # 4 Order1
# 2543425482, # 5 Order2
# 1606935922416, # 6 Tradetime
# 0, # 7 Direction
# 1, # 8 TakerSide
# 0, # 9 BlockTrade
# 0, # 10 Either Order1ClientId or Order2ClientId
# ]
#
# fetchMyTrades(private)
#
# {
# "OMSId":1,
# "ExecutionId":16916567,
# "TradeId":14476351,
# "OrderId":2543565231,
# "AccountId":449,
# "AccountName":"igor@ccxt.trade",
# "SubAccountId":0,
# "ClientOrderId":0,
# "InstrumentId":8,
# "Side":"Sell",
# "OrderType":"Market",
# "Quantity":0.1230000000000000000000000000,
# "RemainingQuantity":0.0000000000000000000000000000,
# "Price":19069.310000000000000000000000,
# "Value":2345.5251300000000000000000000,
# "CounterParty":"7",
# "OrderTradeRevision":1,
# "Direction":"NoChange",
# "IsBlockTrade":false,
# "Fee":1.1727625650000000000000000000,
# "FeeProductId":8,
# "OrderOriginator":446,
# "UserName":"igor@ccxt.trade",
# "TradeTimeMS":1607565031569,
# "MakerTaker":"Taker",
# "AdapterTradeId":0,
# "InsideBid":19069.310000000000000000000000,
# "InsideBidSize":0.2400950000000000000000000000,
# "InsideAsk":19069.320000000000000000000000,
# "InsideAskSize":0.0997360000000000000000000000,
# "IsQuote":false,
# "CounterPartyClientUserId":1,
# "NotionalProductId":2,
# "NotionalRate":1.0000000000000000000000000000,
# "NotionalValue":2345.5251300000000000000000000,
# "NotionalHoldAmount":0,
# "TradeTime":637431618315686826
# }
#
# fetchOrderTrades
#
# {
# "Side":"Sell",
# "OrderId":2543565235,
# "Price":18600.000000000000000000000000,
# "Quantity":0.0000000000000000000000000000,
# "DisplayQuantity":0.0000000000000000000000000000,
# "Instrument":8,
# "Account":449,
# "AccountName":"igor@ccxt.trade",
# "OrderType":"Limit",
# "ClientOrderId":0,
# "OrderState":"FullyExecuted",
# "ReceiveTime":1607585844956,
# "ReceiveTimeTicks":637431826449564182,
# "LastUpdatedTime":1607585844959,
# "LastUpdatedTimeTicks":637431826449593893,
# "OrigQuantity":0.1230000000000000000000000000,
# "QuantityExecuted":0.1230000000000000000000000000,
# "GrossValueExecuted":2345.3947500000000000000000000,
# "ExecutableValue":0.0000000000000000000000000000,
# "AvgPrice":19068.250000000000000000000000,
# "CounterPartyId":0,
# "ChangeReason":"Trade",
# "OrigOrderId":2543565235,
# "OrigClOrdId":0,
# "EnteredBy":446,
# "UserName":"igor@ccxt.trade",
# "IsQuote":false,
# "InsideAsk":19069.320000000000000000000000,
# "InsideAskSize":0.0997360000000000000000000000,
# "InsideBid":19068.250000000000000000000000,
# "InsideBidSize":1.3300010000000000000000000000,
# "LastTradePrice":19068.250000000000000000000000,
# "RejectReason":"",
# "IsLockedIn":false,
# "CancelReason":"",
# "OrderFlag":"0",
# "UseMargin":false,
# "StopPrice":0.0000000000000000000000000000,
# "PegPriceType":"Unknown",
# "PegOffset":0.0000000000000000000000000000,
# "PegLimitOffset":0.0000000000000000000000000000,
# "IpAddress":"x.x.x.x",
# "ClientOrderIdUuid":null,
# "OMSId":1
# }
#
priceString = None
amountString = None
cost = None
timestamp = None
id = None
marketId = None
side = None
orderId = None
takerOrMaker = None
fee = None
type = None
if isinstance(trade, list):
priceString = self.safe_string(trade, 3)
amountString = self.safe_string(trade, 2)
timestamp = self.safe_integer(trade, 6)
id = self.safe_string(trade, 0)
marketId = self.safe_string(trade, 1)
takerSide = self.safe_value(trade, 8)
side = 'sell' if takerSide else 'buy'
orderId = self.safe_string(trade, 4)
else:
timestamp = self.safe_integer_2(trade, 'TradeTimeMS', 'ReceiveTime')
id = self.safe_string(trade, 'TradeId')
orderId = self.safe_string_2(trade, 'OrderId', 'OrigOrderId')
marketId = self.safe_string_2(trade, 'InstrumentId', 'Instrument')
priceString = self.safe_string(trade, 'Price')
amountString = self.safe_string(trade, 'Quantity')
cost = self.safe_number_2(trade, 'Value', 'GrossValueExecuted')
takerOrMaker = self.safe_string_lower(trade, 'MakerTaker')
side = self.safe_string_lower(trade, 'Side')
type = self.safe_string_lower(trade, 'OrderType')
feeCost = self.safe_number(trade, 'Fee')
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'FeeProductId')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
if cost is None:
cost = self.parse_number(Precise.string_mul(priceString, amountString))
symbol = self.safe_symbol(marketId, market)
return {
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
market = self.market(symbol)
request = {
'omsId': omsId,
'InstrumentId': market['id'],
}
if limit is not None:
request['Count'] = limit
response = self.publicGetGetLastTrades(self.extend(request, params))
#
# [
# [6913253,8,0.03340802,19116.08,2543425077,2543425482,1606935922416,0,1,0,0],
# [6913254,8,0.01391671,19117.42,2543427510,2543427811,1606935927998,1,1,0,0],
# [6913255,8,0.000006,19107.81,2543430495,2543430793,1606935933881,2,0,0,0],
# ]
#
return self.parse_trades(response, market, since, limit)
def fetch_accounts(self, params={}):
if not self.login:
raise AuthenticationError(self.id + ' fetchAccounts() requires exchange.login email credential')
omsId = self.safe_integer(self.options, 'omsId', 1)
self.check_required_credentials()
request = {
'omsId': omsId,
'UserId': self.uid,
'UserName': self.login,
}
response = self.privateGetGetUserAccounts(self.extend(request, params))
#
# [449] # comma-separated list of account ids
#
result = []
for i in range(0, len(response)):
accountId = self.safe_string(response, i)
result.append({
'id': accountId,
'type': None,
'currency': None,
'info': accountId,
})
return result
def fetch_balance(self, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
}
response = self.privateGetGetAccountPositions(self.extend(request, params))
#
# [
# {
# "OMSId":1,
# "AccountId":449,
# "ProductSymbol":"BTC",
# "ProductId":1,
# "Amount":10.000000000000000000000000000,
# "Hold":0,
# "PendingDeposits":0.0000000000000000000000000000,
# "PendingWithdraws":0.0000000000000000000000000000,
# "TotalDayDeposits":10.000000000000000000000000000,
# "TotalMonthDeposits":10.000000000000000000000000000,
# "TotalYearDeposits":10.000000000000000000000000000,
# "TotalDayDepositNotional":10.000000000000000000000000000,
# "TotalMonthDepositNotional":10.000000000000000000000000000,
# "TotalYearDepositNotional":10.000000000000000000000000000,
# "TotalDayWithdraws":0,
# "TotalMonthWithdraws":0,
# "TotalYearWithdraws":0,
# "TotalDayWithdrawNotional":0,
# "TotalMonthWithdrawNotional":0,
# "TotalYearWithdrawNotional":0,
# "NotionalProductId":8,
# "NotionalProductSymbol":"USDT",
# "NotionalValue":10.000000000000000000000000000,
# "NotionalHoldAmount":0,
# "NotionalRate":1
# },
# ]
#
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'ProductId')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'Amount')
account['used'] = self.safe_string(balance, 'Hold')
result[code] = account
return self.parse_balance(result)
def parse_ledger_entry_type(self, type):
types = {
'Trade': 'trade',
'Deposit': 'transaction',
'Withdraw': 'transaction',
'Transfer': 'transfer',
'OrderHold': 'trade',
'WithdrawHold': 'transaction',
'DepositHold': 'transaction',
'MarginHold': 'trade',
'ManualHold': 'trade',
'ManualEntry': 'trade',
'MarginAcquisition': 'trade',
'MarginRelinquish': 'trade',
'MarginQuoteHold': 'trade',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# "TransactionId":2663709493,
# "ReferenceId":68,
# "OMSId":1,
# "AccountId":449,
# "CR":10.000000000000000000000000000,
# "DR":0.0000000000000000000000000000,
# "Counterparty":3,
# "TransactionType":"Other",
# "ReferenceType":"Deposit",
# "ProductId":1,
# "Balance":10.000000000000000000000000000,
# "TimeStamp":1607532331591
# }
#
id = self.safe_string(item, 'TransactionId')
account = self.safe_string(item, 'AccountId')
referenceId = self.safe_string(item, 'ReferenceId')
referenceAccount = self.safe_string(item, 'Counterparty')
type = self.parse_ledger_entry_type(self.safe_string(item, 'ReferenceType'))
currencyId = self.safe_string(item, 'ProductId')
code = self.safe_currency_code(currencyId, currency)
credit = self.safe_number(item, 'CR')
debit = self.safe_number(item, 'DR')
amount = None
direction = None
if credit > 0:
amount = credit
direction = 'in'
elif debit > 0:
amount = debit
direction = 'out'
timestamp = self.safe_integer(item, 'TimeStamp')
before = None
after = self.safe_number(item, 'Balance')
if direction == 'out':
before = self.sum(after, amount)
elif direction == 'in':
before = max(0, after - amount)
status = 'ok'
return {
'info': item,
'id': id,
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': None,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
}
if limit is not None:
request['Depth'] = limit
response = self.privateGetGetAccountTransactions(self.extend(request, params))
#
# [
# {
# "TransactionId":2663709493,
# "ReferenceId":68,
# "OMSId":1,
# "AccountId":449,
# "CR":10.000000000000000000000000000,
# "DR":0.0000000000000000000000000000,
# "Counterparty":3,
# "TransactionType":"Other",
# "ReferenceType":"Deposit",
# "ProductId":1,
# "Balance":10.000000000000000000000000000,
# "TimeStamp":1607532331591
# },
# ]
#
currency = None
if code is not None:
currency = self.currency(code)
return self.parse_ledger(response, currency, since, limit)
def parse_order_status(self, status):
statuses = {
'Accepted': 'open',
'Rejected': 'rejected',
'Working': 'open',
'Canceled': 'canceled',
'Expired': 'expired',
'FullyExecuted': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "status":"Accepted",
# "errormsg":"",
# "OrderId": 2543565231
# }
#
# editOrder
#
# {
# "ReplacementOrderId": 1234,
# "ReplacementClOrdId": 1561,
# "OrigOrderId": 5678,
# "OrigClOrdId": 91011,
# }
#
# fetchOpenOrders, fetchClosedOrders
#
# {
# "Side":"Buy",
# "OrderId":2543565233,
# "Price":19010,
# "Quantity":0.345,
# "DisplayQuantity":0.345,
# "Instrument":8,
# "Account":449,
# "AccountName":"igor@ccxt.trade",
# "OrderType":"Limit",
# "ClientOrderId":0,
# "OrderState":"Working",
# "ReceiveTime":1607579326003,
# "ReceiveTimeTicks":637431761260028981,
# "LastUpdatedTime":1607579326005,
# "LastUpdatedTimeTicks":637431761260054714,
# "OrigQuantity":0.345,
# "QuantityExecuted":0,
# "GrossValueExecuted":0,
# "ExecutableValue":0,
# "AvgPrice":0,
# "CounterPartyId":0,
# "ChangeReason":"NewInputAccepted",
# "OrigOrderId":2543565233,
# "OrigClOrdId":0,
# "EnteredBy":446,
# "UserName":"igor@ccxt.trade",
# "IsQuote":false,
# "InsideAsk":19069.32,
# "InsideAskSize":0.099736,
# "InsideBid":19068.25,
# "InsideBidSize":1.330001,
# "LastTradePrice":19068.25,
# "RejectReason":"",
# "IsLockedIn":false,
# "CancelReason":"",
# "OrderFlag":"AddedToBook",
# "UseMargin":false,
# "StopPrice":0,
# "PegPriceType":"Unknown",
# "PegOffset":0,
# "PegLimitOffset":0,
# "IpAddress":null,
# "ClientOrderIdUuid":null,
# "OMSId":1
# }
#
id = self.safe_string_2(order, 'ReplacementOrderId', 'OrderId')
timestamp = self.safe_integer(order, 'ReceiveTime')
lastTradeTimestamp = self.safe_integer(order, 'LastUpdatedTime')
marketId = self.safe_string(order, 'Instrument')
symbol = self.safe_symbol(marketId, market)
side = self.safe_string_lower(order, 'Side')
type = self.safe_string_lower(order, 'OrderType')
clientOrderId = self.safe_string_2(order, 'ReplacementClOrdId', 'ClientOrderId')
price = self.safe_number(order, 'Price', 0.0)
price = price if (price > 0.0) else None
amount = self.safe_number(order, 'OrigQuantity')
filled = self.safe_number(order, 'QuantityExecuted')
cost = self.safe_number(order, 'GrossValueExecuted')
average = self.safe_number(order, 'AvgPrice', 0.0)
average = average if (average > 0) else None
stopPrice = self.safe_number(order, 'StopPrice', 0.0)
stopPrice = stopPrice if (stopPrice > 0.0) else None
timeInForce = None
status = self.parse_order_status(self.safe_string(order, 'OrderState'))
fee = None
trades = None
return self.safe_order({
'id': id,
'clientOrderId': clientOrderId,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'cost': cost,
'amount': amount,
'filled': filled,
'average': average,
'remaining': None,
'fee': fee,
'trades': trades,
})
def create_order(self, symbol, type, side, amount, price=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
clientOrderId = self.safe_integer_2(params, 'ClientOrderId', 'clientOrderId')
params = self.omit(params, ['accountId', 'AccountId', 'clientOrderId', 'ClientOrderId'])
market = self.market(symbol)
orderSide = 0 if (side == 'buy') else 1
request = {
'InstrumentId': int(market['id']),
'omsId': omsId,
'AccountId': accountId,
'TimeInForce': 1, # 0 Unknown, 1 GTC by default, 2 OPG execute as close to opening price as possible, 3 IOC immediate or canceled, 4 FOK fill-or-kill, 5 GTX good 'til executed, 6 GTD good 'til date
# 'ClientOrderId': clientOrderId, # defaults to 0
# If self order is order A, OrderIdOCO refers to the order ID of an order B(which is not the order being created by self call).
# If order B executes, then order A created by self call is canceled.
# You can also set up order B to watch order A in the same way, but that may require an update to order B to make it watch self one, which could have implications for priority in the order book.
# See CancelReplaceOrder and ModifyOrder.
# 'OrderIdOCO': 0, # The order ID if One Cancels the Other.
# 'UseDisplayQuantity': False, # If you enter a Limit order with a reserve, you must set UseDisplayQuantity to True
'Side': orderSide, # 0 Buy, 1 Sell, 2 Short, 3 unknown an error condition
'Quantity': float(self.amount_to_precision(symbol, amount)),
'OrderType': self.safe_integer(self.options['orderTypes'], self.capitalize(type)), # 0 Unknown, 1 Market, 2 Limit, 3 StopMarket, 4 StopLimit, 5 TrailingStopMarket, 6 TrailingStopLimit, 7 BlockTrade
# 'PegPriceType': 3, # 1 Last, 2 Bid, 3 Ask, 4 Midpoint
# 'LimitPrice': float(self.price_to_precision(symbol, price)),
}
# If OrderType=1(Market), Side=0(Buy), and LimitPrice is supplied, the Market order will execute up to the value specified
if price is not None:
request['LimitPrice'] = float(self.price_to_precision(symbol, price))
if clientOrderId is not None:
request['ClientOrderId'] = clientOrderId
response = self.privatePostSendOrder(self.extend(request, params))
#
# {
# "status":"Accepted",
# "errormsg":"",
# "OrderId": 2543565231
# }
#
return self.parse_order(response, market)
def edit_order(self, id, symbol, type, side, amount, price=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
clientOrderId = self.safe_integer_2(params, 'ClientOrderId', 'clientOrderId')
params = self.omit(params, ['accountId', 'AccountId', 'clientOrderId', 'ClientOrderId'])
market = self.market(symbol)
orderSide = 0 if (side == 'buy') else 1
request = {
'OrderIdToReplace': int(id),
'InstrumentId': int(market['id']),
'omsId': omsId,
'AccountId': accountId,
'TimeInForce': 1, # 0 Unknown, 1 GTC by default, 2 OPG execute as close to opening price as possible, 3 IOC immediate or canceled, 4 FOK fill-or-kill, 5 GTX good 'til executed, 6 GTD good 'til date
# 'ClientOrderId': clientOrderId, # defaults to 0
# If self order is order A, OrderIdOCO refers to the order ID of an order B(which is not the order being created by self call).
# If order B executes, then order A created by self call is canceled.
# You can also set up order B to watch order A in the same way, but that may require an update to order B to make it watch self one, which could have implications for priority in the order book.
# See CancelReplaceOrder and ModifyOrder.
# 'OrderIdOCO': 0, # The order ID if One Cancels the Other.
# 'UseDisplayQuantity': False, # If you enter a Limit order with a reserve, you must set UseDisplayQuantity to True
'Side': orderSide, # 0 Buy, 1 Sell, 2 Short, 3 unknown an error condition
'Quantity': float(self.amount_to_precision(symbol, amount)),
'OrderType': self.safe_integer(self.options['orderTypes'], self.capitalize(type)), # 0 Unknown, 1 Market, 2 Limit, 3 StopMarket, 4 StopLimit, 5 TrailingStopMarket, 6 TrailingStopLimit, 7 BlockTrade
# 'PegPriceType': 3, # 1 Last, 2 Bid, 3 Ask, 4 Midpoint
# 'LimitPrice': float(self.price_to_precision(symbol, price)),
}
# If OrderType=1(Market), Side=0(Buy), and LimitPrice is supplied, the Market order will execute up to the value specified
if price is not None:
request['LimitPrice'] = float(self.price_to_precision(symbol, price))
if clientOrderId is not None:
request['ClientOrderId'] = clientOrderId
response = self.privatePostCancelReplaceOrder(self.extend(request, params))
#
# {
# "replacementOrderId": 1234,
# "replacementClOrdId": 1561,
# "origOrderId": 5678,
# "origClOrdId": 91011,
# }
#
return self.parse_order(response, market)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
# 'InstrumentId': market['id'],
# 'TradeId': 123, # If you specify TradeId, GetTradesHistory can return all states for a single trade
# 'OrderId': 456, # If specified, the call returns all trades associated with the order
# 'UserId': integer. The ID of the logged-in user. If not specified, the call returns trades associated with the users belonging to the default account for the logged-in user of self OMS.
# 'StartTimeStamp': long integer. The historical date and time at which to begin the trade report, in POSIX format. If not specified, reverts to the start date of self account on the trading venue.
# 'EndTimeStamp': long integer. Date at which to end the trade report, in POSIX format.
# 'Depth': integer. In self case, the count of trades to return, counting from the StartIndex. If Depth is not specified, returns all trades between BeginTimeStamp and EndTimeStamp, beginning at StartIndex.
# 'StartIndex': 0 # from the most recent trade 0 and moving backwards in time
# 'ExecutionId': 123, # The ID of the individual buy or sell execution. If not specified, returns all.
}
market = None
if symbol is not None:
market = self.market(symbol)
request['InstrumentId'] = market['id']
if since is not None:
request['StartTimeStamp'] = int(since / 1000)
if limit is not None:
request['Depth'] = limit
response = self.privateGetGetTradesHistory(self.extend(request, params))
#
# [
# {
# "OMSId":1,
# "ExecutionId":16916567,
# "TradeId":14476351,
# "OrderId":2543565231,
# "AccountId":449,
# "AccountName":"igor@ccxt.trade",
# "SubAccountId":0,
# "ClientOrderId":0,
# "InstrumentId":8,
# "Side":"Sell",
# "OrderType":"Market",
# "Quantity":0.1230000000000000000000000000,
# "RemainingQuantity":0.0000000000000000000000000000,
# "Price":19069.310000000000000000000000,
# "Value":2345.5251300000000000000000000,
# "CounterParty":"7",
# "OrderTradeRevision":1,
# "Direction":"NoChange",
# "IsBlockTrade":false,
# "Fee":1.1727625650000000000000000000,
# "FeeProductId":8,
# "OrderOriginator":446,
# "UserName":"igor@ccxt.trade",
# "TradeTimeMS":1607565031569,
# "MakerTaker":"Taker",
# "AdapterTradeId":0,
# "InsideBid":19069.310000000000000000000000,
# "InsideBidSize":0.2400950000000000000000000000,
# "InsideAsk":19069.320000000000000000000000,
# "InsideAskSize":0.0997360000000000000000000000,
# "IsQuote":false,
# "CounterPartyClientUserId":1,
# "NotionalProductId":2,
# "NotionalRate":1.0000000000000000000000000000,
# "NotionalValue":2345.5251300000000000000000000,
# "NotionalHoldAmount":0,
# "TradeTime":637431618315686826
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def cancel_all_orders(self, symbol=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
}
if symbol is not None:
market = self.market(symbol)
request['IntrumentId'] = market['id']
response = self.privatePostCancelAllOrders(self.extend(request, params))
#
# {
# "result":true,
# "errormsg":null,
# "errorcode":0,
# "detail":null
# }
#
return response
def cancel_order(self, id, symbol=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
# defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
# accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
# params = self.omit(params, ['accountId', 'AccountId'])
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'omsId': omsId,
# 'AccountId': accountId,
}
clientOrderId = self.safe_integer_2(params, 'clientOrderId', 'ClOrderId')
if clientOrderId is not None:
request['ClOrderId'] = clientOrderId
else:
request['OrderId'] = int(id)
params = self.omit(params, ['clientOrderId', 'ClOrderId'])
response = self.privatePostCancelOrder(self.extend(request, params))
order = self.parse_order(response, market)
return self.extend(order, {
'id': id,
'clientOrderId': clientOrderId,
})
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'omsId': omsId,
'AccountId': accountId,
}
response = self.privateGetGetOpenOrders(self.extend(request, params))
#
# [
# {
# "Side":"Buy",
# "OrderId":2543565233,
# "Price":19010,
# "Quantity":0.345,
# "DisplayQuantity":0.345,
# "Instrument":8,
# "Account":449,
# "AccountName":"igor@ccxt.trade",
# "OrderType":"Limit",
# "ClientOrderId":0,
# "OrderState":"Working",
# "ReceiveTime":1607579326003,
# "ReceiveTimeTicks":637431761260028981,
# "LastUpdatedTime":1607579326005,
# "LastUpdatedTimeTicks":637431761260054714,
# "OrigQuantity":0.345,
# "QuantityExecuted":0,
# "GrossValueExecuted":0,
# "ExecutableValue":0,
# "AvgPrice":0,
# "CounterPartyId":0,
# "ChangeReason":"NewInputAccepted",
# "OrigOrderId":2543565233,
# "OrigClOrdId":0,
# "EnteredBy":446,
# "UserName":"igor@ccxt.trade",
# "IsQuote":false,
# "InsideAsk":19069.32,
# "InsideAskSize":0.099736,
# "InsideBid":19068.25,
# "InsideBidSize":1.330001,
# "LastTradePrice":19068.25,
# "RejectReason":"",
# "IsLockedIn":false,
# "CancelReason":"",
# "OrderFlag":"AddedToBook",
# "UseMargin":false,
# "StopPrice":0,
# "PegPriceType":"Unknown",
# "PegOffset":0,
# "PegLimitOffset":0,
# "IpAddress":null,
# "ClientOrderIdUuid":null,
# "OMSId":1
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
# 'ClientOrderId': clientOrderId,
# 'OriginalOrderId': id,
# 'OriginalClientOrderId': long integer,
# 'UserId': integer,
# 'InstrumentId': market['id'],
# 'StartTimestamp': since,
# 'EndTimestamp': self.milliseconds(),
# 'Depth': limit,
# 'StartIndex': 0,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['InstrumentId'] = market['id']
if since is not None:
request['StartTimeStamp'] = int(since / 1000)
if limit is not None:
request['Depth'] = limit
response = self.privateGetGetOrdersHistory(self.extend(request, params))
#
# [
# {
# "Side":"Buy",
# "OrderId":2543565233,
# "Price":19010.000000000000000000000000,
# "Quantity":0.0000000000000000000000000000,
# "DisplayQuantity":0.3450000000000000000000000000,
# "Instrument":8,
# "Account":449,
# "AccountName":"igor@ccxt.trade",
# "OrderType":"Limit",
# "ClientOrderId":0,
# "OrderState":"Canceled",
# "ReceiveTime":1607579326003,
# "ReceiveTimeTicks":637431761260028981,
# "LastUpdatedTime":1607580965346,
# "LastUpdatedTimeTicks":637431777653463754,
# "OrigQuantity":0.3450000000000000000000000000,
# "QuantityExecuted":0.0000000000000000000000000000,
# "GrossValueExecuted":0.0000000000000000000000000000,
# "ExecutableValue":0.0000000000000000000000000000,
# "AvgPrice":0.0000000000000000000000000000,
# "CounterPartyId":0,
# "ChangeReason":"UserModified",
# "OrigOrderId":2543565233,
# "OrigClOrdId":0,
# "EnteredBy":446,
# "UserName":"igor@ccxt.trade",
# "IsQuote":false,
# "InsideAsk":19069.320000000000000000000000,
# "InsideAskSize":0.0997360000000000000000000000,
# "InsideBid":19068.250000000000000000000000,
# "InsideBidSize":1.3300010000000000000000000000,
# "LastTradePrice":19068.250000000000000000000000,
# "RejectReason":"",
# "IsLockedIn":false,
# "CancelReason":"UserModified",
# "OrderFlag":"AddedToBook, RemovedFromBook",
# "UseMargin":false,
# "StopPrice":0.0000000000000000000000000000,
# "PegPriceType":"Unknown",
# "PegOffset":0.0000000000000000000000000000,
# "PegLimitOffset":0.0000000000000000000000000000,
# "IpAddress":"x.x.x.x",
# "ClientOrderIdUuid":null,
# "OMSId":1
# },
# ]
#
return self.parse_orders(response, market, since, limit)
def fetch_order(self, id, symbol=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'omsId': omsId,
'AccountId': accountId,
'OrderId': int(id),
}
response = self.privateGetGetOrderStatus(self.extend(request, params))
#
# {
# "Side":"Sell",
# "OrderId":2543565232,
# "Price":0.0000000000000000000000000000,
# "Quantity":0.0000000000000000000000000000,
# "DisplayQuantity":0.0000000000000000000000000000,
# "Instrument":8,
# "Account":449,
# "AccountName":"igor@ccxt.trade",
# "OrderType":"Market",
# "ClientOrderId":0,
# "OrderState":"FullyExecuted",
# "ReceiveTime":1607569475591,
# "ReceiveTimeTicks":637431662755912377,
# "LastUpdatedTime":1607569475596,
# "LastUpdatedTimeTicks":637431662755960902,
# "OrigQuantity":1.0000000000000000000000000000,
# "QuantityExecuted":1.0000000000000000000000000000,
# "GrossValueExecuted":19068.270478610000000000000000,
# "ExecutableValue":0.0000000000000000000000000000,
# "AvgPrice":19068.270478610000000000000000,
# "CounterPartyId":0,
# "ChangeReason":"Trade",
# "OrigOrderId":2543565232,
# "OrigClOrdId":0,
# "EnteredBy":446,
# "UserName":"igor@ccxt.trade",
# "IsQuote":false,
# "InsideAsk":19069.320000000000000000000000,
# "InsideAskSize":0.0997360000000000000000000000,
# "InsideBid":19069.310000000000000000000000,
# "InsideBidSize":0.2400950000000000000000000000,
# "LastTradePrice":19069.310000000000000000000000,
# "RejectReason":"",
# "IsLockedIn":false,
# "CancelReason":"",
# "OrderFlag":"0",
# "UseMargin":false,
# "StopPrice":0.0000000000000000000000000000,
# "PegPriceType":"Unknown",
# "PegOffset":0.0000000000000000000000000000,
# "PegLimitOffset":0.0000000000000000000000000000,
# "IpAddress":"x.x.x.x",
# "ClientOrderIdUuid":null,
# "OMSId":1
# }
#
return self.parse_order(response, market)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
# defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
# accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
# params = self.omit(params, ['accountId', 'AccountId'])
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'OMSId': int(omsId),
# 'AccountId': accountId,
'OrderId': int(id),
}
response = self.privatePostGetOrderHistoryByOrderId(self.extend(request, params))
#
# [
# {
# "Side":"Sell",
# "OrderId":2543565235,
# "Price":18600.000000000000000000000000,
# "Quantity":0.0000000000000000000000000000,
# "DisplayQuantity":0.0000000000000000000000000000,
# "Instrument":8,
# "Account":449,
# "AccountName":"igor@ccxt.trade",
# "OrderType":"Limit",
# "ClientOrderId":0,
# "OrderState":"FullyExecuted",
# "ReceiveTime":1607585844956,
# "ReceiveTimeTicks":637431826449564182,
# "LastUpdatedTime":1607585844959,
# "LastUpdatedTimeTicks":637431826449593893,
# "OrigQuantity":0.1230000000000000000000000000,
# "QuantityExecuted":0.1230000000000000000000000000,
# "GrossValueExecuted":2345.3947500000000000000000000,
# "ExecutableValue":0.0000000000000000000000000000,
# "AvgPrice":19068.250000000000000000000000,
# "CounterPartyId":0,
# "ChangeReason":"Trade",
# "OrigOrderId":2543565235,
# "OrigClOrdId":0,
# "EnteredBy":446,
# "UserName":"igor@ccxt.trade",
# "IsQuote":false,
# "InsideAsk":19069.320000000000000000000000,
# "InsideAskSize":0.0997360000000000000000000000,
# "InsideBid":19068.250000000000000000000000,
# "InsideBidSize":1.3300010000000000000000000000,
# "LastTradePrice":19068.250000000000000000000000,
# "RejectReason":"",
# "IsLockedIn":false,
# "CancelReason":"",
# "OrderFlag":"0",
# "UseMargin":false,
# "StopPrice":0.0000000000000000000000000000,
# "PegPriceType":"Unknown",
# "PegOffset":0.0000000000000000000000000000,
# "PegLimitOffset":0.0000000000000000000000000000,
# "IpAddress":"x.x.x.x",
# "ClientOrderIdUuid":null,
# "OMSId":1
# },
# ]
#
grouped = self.group_by(response, 'ChangeReason')
trades = self.safe_value(grouped, 'Trade', [])
return self.parse_trades(trades, market, since, limit)
def fetch_deposit_address(self, code, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
currency = self.currency(code)
request = {
'omsId': omsId,
'AccountId': accountId,
'ProductId': currency['id'],
'GenerateNewKey': False,
}
response = self.privateGetGetDepositInfo(self.extend(request, params))
#
# {
# "result":true,
# "errormsg":null,
# "statuscode":0,
# "AssetManagerId":1,
# "AccountId":57922,
# "AssetId":16,
# "ProviderId":23,
# "DepositInfo":"[\"0x8A27564b5c30b91C93B1591821642420F323a210\"]"
# }
#
return self.parse_deposit_address(response, currency)
def parse_deposit_address(self, depositAddress, currency=None):
#
# fetchDepositAddress, createDepositAddress
#
# {
# "result":true,
# "errormsg":null,
# "statuscode":0,
# "AssetManagerId":1,
# "AccountId":449,
# "AssetId":1,
# "ProviderId":1,
# "DepositInfo":"[\"r3e95RwVsLH7yCbnMfyh7SA8FdwUJCB4S2?memo=241452010\"]"
# }
#
depositInfoString = self.safe_string(depositAddress, 'DepositInfo')
depositInfo = json.loads(depositInfoString)
depositInfoLength = len(depositInfo)
lastString = self.safe_string(depositInfo, depositInfoLength - 1)
parts = lastString.split('?memo=')
address = self.safe_string(parts, 0)
tag = self.safe_string(parts, 1)
code = None
if currency is not None:
code = currency['code']
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
def create_deposit_address(self, code, params={}):
request = {
'GenerateNewKey': True,
}
return self.fetch_deposit_address(code, self.extend(request, params))
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
currency = None
if code is not None:
currency = self.currency(code)
request = {
'omsId': omsId,
'AccountId': accountId,
}
response = self.privateGetGetDeposits(self.extend(request, params))
#
# [
# {
# "OMSId":1,
# "DepositId":44,
# "AccountId":449,
# "SubAccountId":0,
# "ProductId":4,
# "Amount":200.00000000000000000000000000,
# "LastUpdateTimeStamp":637431291261187806,
# "ProductType":"CryptoCurrency",
# "TicketStatus":"FullyProcessed",
# "DepositInfo":"{}",
# "DepositCode":"ab0e23d5-a9ce-4d94-865f-9ab464fb1de3",
# "TicketNumber":71,
# "NotionalProductId":13,
# "NotionalValue":200.00000000000000000000000000,
# "FeeAmount":0.0000000000000000000000000000,
# },
# ]
#
return self.parse_transactions(response, currency, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
currency = None
if code is not None:
currency = self.currency(code)
request = {
'omsId': omsId,
'AccountId': accountId,
}
response = self.privateGetGetWithdraws(self.extend(request, params))
#
# [
# {
# "Amount": 0.0,
# "FeeAmount": 0.0,
# "NotionalValue": 0.0,
# "WithdrawId": 0,
# "AssetManagerId": 0,
# "AccountId": 0,
# "AssetId": 0,
# "TemplateForm": "{\"TemplateType\": \"TetherRPCWithdraw\",\"Comment\": \"TestWithdraw\",\"ExternalAddress\": \"ms6C3pKAAr8gRCcnVebs8VRkVrjcvqNYv3\"}",
# "TemplateFormType": "TetherRPCWithdraw",
# "omsId": 0,
# "TicketStatus": 0,
# "TicketNumber": 0,
# "WithdrawTransactionDetails": "",
# "WithdrawType": "",
# "WithdrawCode": "490b4fa3-53fc-44f4-bd29-7e16be86fba3",
# "AssetType": 0,
# "Reaccepted": True,
# "NotionalProductId": 0
# },
# ]
#
return self.parse_transactions(response, currency, since, limit)
def parse_transaction_status_by_type(self, status, type=None):
statusesByType = {
'deposit': {
'New': 'pending', # new ticket awaiting operator review
'AdminProcessing': 'pending', # an admin is looking at the ticket
'Accepted': 'pending', # an admin accepts the ticket
'Rejected': 'rejected', # admin rejects the ticket
'SystemProcessing': 'pending', # automatic processing; an unlikely status for a deposit
'FullyProcessed': 'ok', # the deposit has concluded
'Failed': 'failed', # the deposit has failed for some reason
'Pending': 'pending', # Account Provider has set status to pending
'Confirmed': 'pending', # Account Provider confirms the deposit
'AmlProcessing': 'pending', # anti-money-laundering process underway
'AmlAccepted': 'pending', # anti-money-laundering process successful
'AmlRejected': 'rejected', # deposit did not stand up to anti-money-laundering process
'AmlFailed': 'failed', # anti-money-laundering process failed/did not complete
'LimitsAccepted': 'pending', # deposit meets limits for fiat or crypto asset
'LimitsRejected': 'rejected', # deposit does not meet limits for fiat or crypto asset
},
'withdrawal': {
'New': 'pending', # awaiting operator review
'AdminProcessing': 'pending', # An admin is looking at the ticket
'Accepted': 'pending', # withdrawal will proceed
'Rejected': 'rejected', # admin or automatic rejection
'SystemProcessing': 'pending', # automatic processing underway
'FullyProcessed': 'ok', # the withdrawal has concluded
'Failed': 'failed', # the withdrawal failed for some reason
'Pending': 'pending', # the admin has placed the withdrawal in pending status
'Pending2Fa': 'pending', # user must click 2-factor authentication confirmation link
'AutoAccepted': 'pending', # withdrawal will be automatically processed
'Delayed': 'pending', # waiting for funds to be allocated for the withdrawal
'UserCanceled': 'canceled', # withdraw canceled by user or Superuser
'AdminCanceled': 'canceled', # withdraw canceled by Superuser
'AmlProcessing': 'pending', # anti-money-laundering process underway
'AmlAccepted': 'pending', # anti-money-laundering process complete
'AmlRejected': 'rejected', # withdrawal did not stand up to anti-money-laundering process
'AmlFailed': 'failed', # withdrawal did not complete anti-money-laundering process
'LimitsAccepted': 'pending', # withdrawal meets limits for fiat or crypto asset
'LimitsRejected': 'rejected', # withdrawal does not meet limits for fiat or crypto asset
'Submitted': 'pending', # withdrawal sent to Account Provider; awaiting blockchain confirmation
'Confirmed': 'pending', # Account Provider confirms that withdrawal is on the blockchain
'ManuallyConfirmed': 'pending', # admin has sent withdrawal via wallet or admin function directly; marks ticket as FullyProcessed; debits account
'Confirmed2Fa': 'pending', # user has confirmed withdraw via 2-factor authentication.
},
}
statuses = self.safe_value(statusesByType, type, {})
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# "OMSId":1,
# "DepositId":44,
# "AccountId":449,
# "SubAccountId":0,
# "ProductId":4,
# "Amount":200.00000000000000000000000000,
# "LastUpdateTimeStamp":637431291261187806,
# "ProductType":"CryptoCurrency",
# "TicketStatus":"FullyProcessed",
# "DepositInfo":"{}",
# "DepositCode":"ab0e23d5-a9ce-4d94-865f-9ab464fb1de3",
# "TicketNumber":71,
# "NotionalProductId":13,
# "NotionalValue":200.00000000000000000000000000,
# "FeeAmount":0.0000000000000000000000000000,
# }
#
# fetchWithdrawals
#
# {
# "Amount": 0.0,
# "FeeAmount": 0.0,
# "NotionalValue": 0.0,
# "WithdrawId": 0,
# "AssetManagerId": 0,
# "AccountId": 0,
# "AssetId": 0,
# "TemplateForm": "{\"TemplateType\": \"TetherRPCWithdraw\",\"Comment\": \"TestWithdraw\",\"ExternalAddress\": \"ms6C3pKAAr8gRCcnVebs8VRkVrjcvqNYv3\"}",
# "TemplateFormType": "TetherRPCWithdraw",
# "omsId": 0,
# "TicketStatus": 0,
# "TicketNumber": 0,
# "WithdrawTransactionDetails": "",
# "WithdrawType": "",
# "WithdrawCode": "490b4fa3-53fc-44f4-bd29-7e16be86fba3",
# "AssetType": 0,
# "Reaccepted": True,
# "NotionalProductId": 0
# }
#
id = self.safe_string(transaction, 'DepositId')
txid = None
currencyId = self.safe_string(transaction, 'ProductId')
code = self.safe_currency_code(currencyId, currency)
timestamp = None
type = None
if 'DepositId' in transaction:
type = 'deposit'
elif 'WithdrawId' in transaction:
type = 'withdrawal'
templateFormString = self.safe_string(transaction, 'TemplateForm')
address = None
updated = self.safe_integer(transaction, 'LastUpdateTimeStamp')
if templateFormString is not None:
templateForm = json.loads(templateFormString)
address = self.safe_string(templateForm, 'ExternalAddress')
txid = self.safe_string(templateForm, 'TxId')
timestamp = self.safe_integer(templateForm, 'TimeSubmitted')
updated = self.safe_integer(templateForm, 'LastUpdated', updated)
addressTo = address
status = self.parse_transaction_status_by_type(self.safe_string(transaction, 'TicketStatus'), type)
amount = self.safe_number(transaction, 'Amount')
feeCost = self.safe_number(transaction, 'FeeAmount')
fee = None
if feeCost is not None:
fee = {'currency': code, 'cost': feeCost}
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'addressTo': addressTo,
'addressFrom': None,
'tag': None,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
# self method required login, password and twofa key
sessionToken = self.safe_string(self.options, 'sessionToken')
if sessionToken is None:
raise AuthenticationError(self.id + ' call signIn() method to obtain a session token')
self.check_address(address)
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
currency = self.currency(code)
withdrawTemplateTypesRequest = {
'omsId': omsId,
'AccountId': accountId,
'ProductId': currency['id'],
}
withdrawTemplateTypesResponse = self.privateGetGetWithdrawTemplateTypes(withdrawTemplateTypesRequest)
#
# {
# result: True,
# errormsg: null,
# statuscode: "0",
# TemplateTypes: [
# {AccountProviderId: "14", TemplateName: "ToExternalBitcoinAddress", AccountProviderName: "BitgoRPC-BTC"},
# {AccountProviderId: "20", TemplateName: "ToExternalBitcoinAddress", AccountProviderName: "TrezorBTC"},
# {AccountProviderId: "31", TemplateName: "BTC", AccountProviderName: "BTC Fireblocks 1"}
# ]
# }
#
templateTypes = self.safe_value(withdrawTemplateTypesResponse, 'TemplateTypes', [])
firstTemplateType = self.safe_value(templateTypes, 0)
if firstTemplateType is None:
raise ExchangeError(self.id + ' withdraw() could not find a withdraw template type for ' + currency['code'])
templateName = self.safe_string(firstTemplateType, 'TemplateName')
withdrawTemplateRequest = {
'omsId': omsId,
'AccountId': accountId,
'ProductId': currency['id'],
'TemplateType': templateName,
'AccountProviderId': firstTemplateType['AccountProviderId'],
}
withdrawTemplateResponse = self.privateGetGetWithdrawTemplate(withdrawTemplateRequest)
#
# {
# result: True,
# errormsg: null,
# statuscode: "0",
# Template: "{\"TemplateType\":\"ToExternalBitcoinAddress\",\"Comment\":\"\",\"ExternalAddress\":\"\"}"
# }
#
template = self.safe_string(withdrawTemplateResponse, 'Template')
if template is None:
raise ExchangeError(self.id + ' withdraw() could not find a withdraw template for ' + currency['code'])
withdrawTemplate = json.loads(template)
withdrawTemplate['ExternalAddress'] = address
if tag is not None:
if 'Memo' in withdrawTemplate:
withdrawTemplate['Memo'] = tag
withdrawPayload = {
'omsId': omsId,
'AccountId': accountId,
'ProductId': currency['id'],
'TemplateForm': self.json(withdrawTemplate),
'TemplateType': templateName,
}
withdrawRequest = {
'TfaType': 'Google',
'TFaCode': self.oath(),
'Payload': self.json(withdrawPayload),
}
response = self.privatePostCreateWithdrawTicket(self.deep_extend(withdrawRequest, params))
return {
'info': response,
'id': self.safe_string(response, 'Id'),
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if path == 'Authenticate':
auth = self.login + ':' + self.password
auth64 = self.string_to_base64(auth)
headers = {
'Authorization': 'Basic ' + self.decode(auth64),
# 'Content-Type': 'application/json',
}
elif path == 'Authenticate2FA':
pending2faToken = self.safe_string(self.options, 'pending2faToken')
if pending2faToken is not None:
headers = {
'Pending2FaToken': pending2faToken,
# 'Content-Type': 'application/json',
}
query = self.omit(query, 'pending2faToken')
if query:
url += '?' + self.urlencode(query)
elif api == 'private':
self.check_required_credentials()
sessionToken = self.safe_string(self.options, 'sessionToken')
if sessionToken is None:
nonce = str(self.nonce())
auth = nonce + self.uid + self.apiKey
signature = self.hmac(self.encode(auth), self.encode(self.secret))
headers = {
'Nonce': nonce,
'APIKey': self.apiKey,
'Signature': signature,
'UserId': self.uid,
}
else:
headers = {
'APToken': sessionToken,
}
if method == 'POST':
headers['Content-Type'] = 'application/json'
body = self.json(query)
else:
if query:
url += '?' + self.urlencode(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if code == 404:
raise AuthenticationError(self.id + ' ' + body)
if response is None:
return
#
# {"status":"Rejected","errormsg":"Not_Enough_Funds","errorcode":101}
# {"result":false,"errormsg":"Server Error","errorcode":102,"detail":null}
#
message = self.safe_string(response, 'errormsg')
if (message is not None) and (message != ''):
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
raise ExchangeError(feedback)
| 44.503601
| 218
| 0.51199
|
ge import Exchange
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import OrderNotFound
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class ndax(Exchange):
def describe(self):
return self.deep_extend(super(ndax, self).describe(), {
'id': 'ndax',
'name': 'NDAX',
'countries': ['US'],
'rateLimit': 1000,
'pro': True,
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'createDepositAddress': True,
'createOrder': True,
'editOrder': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderTrades': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTrades': True,
'fetchWithdrawals': True,
'signIn': True,
},
'timeframes': {
'1m': '60',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'1w': '604800',
'1M': '2419200',
'4M': '9676800',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/108623144-67a3ef00-744e-11eb-8140-75c6b851e945.jpg',
'test': {
'public': 'https://ndaxmarginstaging.cdnhop.net:8443/AP',
'private': 'https://ndaxmarginstaging.cdnhop.net:8443/AP',
},
'api': {
'public': 'https://api.ndax.io:8443/AP',
'private': 'https://api.ndax.io:8443/AP',
},
'www': 'https://ndax.io',
'doc': [
'https://apidoc.ndax.io/',
],
'fees': 'https://ndax.io/fees',
'referral': 'https://one.ndax.io/bfQiSL',
},
'api': {
'public': {
'get': [
'Activate2FA',
'Authenticate2FA',
'AuthenticateUser',
'GetL2Snapshot',
'GetLevel1',
'GetValidate2FARequiredEndpoints',
'LogOut',
'GetTickerHistory',
'GetProduct',
'GetProducts',
'GetInstrument',
'GetInstruments',
'Ping',
'trades',
'GetLastTrades',
'SubscribeLevel1',
'SubscribeLevel2',
'SubscribeTicker',
'SubscribeTrades',
'SubscribeBlockTrades',
'UnsubscribeBlockTrades',
'UnsubscribeLevel1',
'UnsubscribeLevel2',
'UnsubscribeTicker',
'UnsubscribeTrades',
'Authenticate',
],
},
'private': {
'get': [
'GetUserAccountInfos',
'GetUserAccounts',
'GetUserAffiliateCount',
'GetUserAffiliateTag',
'GetUserConfig',
'GetAllUnredactedUserConfigsForUser',
'GetUnredactedUserConfigByKey',
'GetUserDevices',
'GetUserReportTickets',
'GetUserReportWriterResultRecords',
'GetAccountInfo',
'GetAccountPositions',
'GetAllAccountConfigs',
'GetTreasuryProductsForAccount',
'GetAccountTrades',
'GetAccountTransactions',
'GetOpenTradeReports',
'GetAllOpenTradeReports',
'GetTradesHistory',
'GetOpenOrders',
'GetOpenQuotes',
'GetOrderFee',
'GetOrderHistory',
'GetOrdersHistory',
'GetOrderStatus',
'GetOmsFeeTiers',
'GetAccountDepositTransactions',
'GetAccountWithdrawTransactions',
'GetAllDepositRequestInfoTemplates',
'GetDepositInfo',
'GetDepositRequestInfoTemplate',
'GetDeposits',
'GetDepositTicket',
'GetDepositTickets',
'GetOMSWithdrawFees',
'GetWithdrawFee',
'GetWithdraws',
'GetWithdrawTemplate',
'GetWithdrawTemplateTypes',
'GetWithdrawTicket',
'GetWithdrawTickets',
],
'post': [
'AddUserAffiliateTag',
'CancelUserReport',
'RegisterNewDevice',
'SubscribeAccountEvents',
'UpdateUserAffiliateTag',
'GenerateTradeActivityReport',
'GenerateTransactionActivityReport',
'GenerateTreasuryActivityReport',
'ScheduleTradeActivityReport',
'ScheduleTransactionActivityReport',
'ScheduleTreasuryActivityReport',
'CancelAllOrders',
'CancelOrder',
'CancelQuote',
'CancelReplaceOrder',
'CreateQuote',
'ModifyOrder',
'SendOrder',
'SubmitBlockTrade',
'UpdateQuote',
'CancelWithdraw',
'CreateDepositTicket',
'CreateWithdrawTicket',
'SubmitDepositTicketComment',
'SubmitWithdrawTicketComment',
'GetOrderHistoryByOrderId',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.2 / 100,
'taker': 0.25 / 100,
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'precisionMode': TICK_SIZE,
'exceptions': {
'exact': {
'Not_Enough_Funds': InsufficientFunds,
'Server Error': ExchangeError,
'Resource Not Found': OrderNotFound,
},
'broad': {
'Invalid InstrumentId': BadSymbol,
'This endpoint requires 2FACode along with the payload': AuthenticationError,
},
},
'options': {
'omsId': 1,
'orderTypes': {
'Market': 1,
'Limit': 2,
'StopMarket': 3,
'StopLimit': 4,
'TrailingStopMarket': 5,
'TrailingStopLimit': 6,
'BlockTrade': 7,
},
},
})
def sign_in(self, params={}):
self.check_required_credentials()
if self.login is None or self.password is None or self.twofa is None:
raise AuthenticationError(self.id + ' signIn() requires exchange.login, exchange.password and exchange.twofa credentials')
request = {
'grant_type': 'client_credentials',
}
response = self.publicGetAuthenticate(self.extend(request, params))
sessionToken = self.safe_string(response, 'SessionToken')
if sessionToken is not None:
self.options['sessionToken'] = sessionToken
return response
pending2faToken = self.safe_string(response, 'Pending2FaToken')
if pending2faToken is not None:
self.options['pending2faToken'] = pending2faToken
request = {
'Code': self.oath(),
}
response = self.publicGetAuthenticate2FA(self.extend(request, params))
sessionToken = self.safe_string(response, 'SessionToken')
self.options['sessionToken'] = sessionToken
return response
return response
def fetch_currencies(self, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
request = {
'omsId': omsId,
}
response = self.publicGetGetProducts(self.extend(request, params))
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'ProductId')
name = self.safe_string(currency, 'ProductFullName')
type = self.safe_string(currency, 'ProductType')
code = self.safe_currency_code(self.safe_string(currency, 'Product'))
precision = self.safe_number(currency, 'TickSize')
isDisabled = self.safe_value(currency, 'IsDisabled')
active = not isDisabled
result[code] = {
'id': id,
'name': name,
'code': code,
'type': type,
'precision': precision,
'info': currency,
'active': active,
'fee': None,
'limits': self.limits,
}
return result
def fetch_markets(self, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
request = {
'omsId': omsId,
}
response = self.publicGetGetInstruments(self.extend(request, params))
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'InstrumentId')
baseId = self.safe_string(market, 'Product1')
quoteId = self.safe_string(market, 'Product2')
base = self.safe_currency_code(self.safe_string(market, 'Product1Symbol'))
quote = self.safe_currency_code(self.safe_string(market, 'Product2Symbol'))
symbol = base + '/' + quote
precision = {
'amount': self.safe_number(market, 'QuantityIncrement'),
'price': self.safe_number(market, 'PriceIncrement'),
}
sessionStatus = self.safe_string(market, 'SessionStatus')
isDisable = self.safe_value(market, 'IsDisable')
sessionRunning = (sessionStatus == 'Running')
active = True if (sessionRunning and not isDisable) else False
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_number(market, 'MinimumQuantity'),
'max': None,
},
'price': {
'min': self.safe_number(market, 'MinimumPrice'),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
})
return result
def parse_order_book(self, orderbook, symbol, timestamp=None, bidsKey='bids', asksKey='asks', priceKey=6, amountKey=8):
nonce = None
result = {
'symbol': symbol,
'bids': [],
'asks': [],
'timestamp': None,
'datetime': None,
'nonce': None,
}
for i in range(0, len(orderbook)):
level = orderbook[i]
if timestamp is None:
timestamp = self.safe_integer(level, 2)
else:
newTimestamp = self.safe_integer(level, 2)
timestamp = max(timestamp, newTimestamp)
if nonce is None:
nonce = self.safe_integer(level, 0)
else:
newNonce = self.safe_integer(level, 0)
nonce = max(nonce, newNonce)
bidask = self.parse_bid_ask(level, priceKey, amountKey)
levelSide = self.safe_integer(level, 9)
side = asksKey if levelSide else bidsKey
result[side].append(bidask)
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
result['timestamp'] = timestamp
result['datetime'] = self.iso8601(timestamp)
result['nonce'] = nonce
return result
def fetch_order_book(self, symbol, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
market = self.market(symbol)
limit = 100 if (limit is None) else limit
request = {
'omsId': omsId,
'InstrumentId': market['id'],
'Depth': limit,
}
response = self.publicGetGetL2Snapshot(self.extend(request, params))
bol)
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_integer(ticker, 'TimeStamp')
marketId = self.safe_string(ticker, 'InstrumentId')
symbol = self.safe_symbol(marketId, market)
last = self.safe_number(ticker, 'LastTradedPx')
percentage = self.safe_number(ticker, 'Rolling24HrPxChangePercent')
change = self.safe_number(ticker, 'Rolling24HrPxChange')
open = self.safe_number(ticker, 'SessionOpen')
average = None
if (last is not None) and (change is not None):
average = self.sum(last, open) / 2
baseVolume = self.safe_number(ticker, 'Rolling24HrVolume')
quoteVolume = self.safe_number(ticker, 'Rolling24HrNotional')
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'SessionHigh'),
'low': self.safe_number(ticker, 'SessionLow'),
'bid': self.safe_number(ticker, 'BestBid'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'BestOffer'),
'askVolume': None,
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
market = self.market(symbol)
request = {
'omsId': omsId,
'InstrumentId': market['id'],
}
response = self.publicGetGetLevel1(self.extend(request, params))
return self.parse_ticker(response, market)
def parse_ohlcv(self, ohlcv, market=None):
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
market = self.market(symbol)
request = {
'omsId': omsId,
'InstrumentId': market['id'],
'Interval': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe)
now = self.milliseconds()
if since is None:
if limit is not None:
request['FromDate'] = self.ymdhms(now - duration * limit * 1000)
request['ToDate'] = self.ymdhms(now)
else:
request['FromDate'] = self.ymdhms(since)
if limit is None:
request['ToDate'] = self.ymdhms(now)
else:
request['ToDate'] = self.ymdhms(self.sum(since, duration * limit * 1000))
response = self.publicGetGetTickerHistory(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
priceString = None
amountString = None
cost = None
timestamp = None
id = None
marketId = None
side = None
orderId = None
takerOrMaker = None
fee = None
type = None
if isinstance(trade, list):
priceString = self.safe_string(trade, 3)
amountString = self.safe_string(trade, 2)
timestamp = self.safe_integer(trade, 6)
id = self.safe_string(trade, 0)
marketId = self.safe_string(trade, 1)
takerSide = self.safe_value(trade, 8)
side = 'sell' if takerSide else 'buy'
orderId = self.safe_string(trade, 4)
else:
timestamp = self.safe_integer_2(trade, 'TradeTimeMS', 'ReceiveTime')
id = self.safe_string(trade, 'TradeId')
orderId = self.safe_string_2(trade, 'OrderId', 'OrigOrderId')
marketId = self.safe_string_2(trade, 'InstrumentId', 'Instrument')
priceString = self.safe_string(trade, 'Price')
amountString = self.safe_string(trade, 'Quantity')
cost = self.safe_number_2(trade, 'Value', 'GrossValueExecuted')
takerOrMaker = self.safe_string_lower(trade, 'MakerTaker')
side = self.safe_string_lower(trade, 'Side')
type = self.safe_string_lower(trade, 'OrderType')
feeCost = self.safe_number(trade, 'Fee')
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'FeeProductId')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
if cost is None:
cost = self.parse_number(Precise.string_mul(priceString, amountString))
symbol = self.safe_symbol(marketId, market)
return {
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
market = self.market(symbol)
request = {
'omsId': omsId,
'InstrumentId': market['id'],
}
if limit is not None:
request['Count'] = limit
response = self.publicGetGetLastTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def fetch_accounts(self, params={}):
if not self.login:
raise AuthenticationError(self.id + ' fetchAccounts() requires exchange.login email credential')
omsId = self.safe_integer(self.options, 'omsId', 1)
self.check_required_credentials()
request = {
'omsId': omsId,
'UserId': self.uid,
'UserName': self.login,
}
response = self.privateGetGetUserAccounts(self.extend(request, params))
for i in range(0, len(response)):
accountId = self.safe_string(response, i)
result.append({
'id': accountId,
'type': None,
'currency': None,
'info': accountId,
})
return result
def fetch_balance(self, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
}
response = self.privateGetGetAccountPositions(self.extend(request, params))
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'ProductId')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'Amount')
account['used'] = self.safe_string(balance, 'Hold')
result[code] = account
return self.parse_balance(result)
def parse_ledger_entry_type(self, type):
types = {
'Trade': 'trade',
'Deposit': 'transaction',
'Withdraw': 'transaction',
'Transfer': 'transfer',
'OrderHold': 'trade',
'WithdrawHold': 'transaction',
'DepositHold': 'transaction',
'MarginHold': 'trade',
'ManualHold': 'trade',
'ManualEntry': 'trade',
'MarginAcquisition': 'trade',
'MarginRelinquish': 'trade',
'MarginQuoteHold': 'trade',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
id = self.safe_string(item, 'TransactionId')
account = self.safe_string(item, 'AccountId')
referenceId = self.safe_string(item, 'ReferenceId')
referenceAccount = self.safe_string(item, 'Counterparty')
type = self.parse_ledger_entry_type(self.safe_string(item, 'ReferenceType'))
currencyId = self.safe_string(item, 'ProductId')
code = self.safe_currency_code(currencyId, currency)
credit = self.safe_number(item, 'CR')
debit = self.safe_number(item, 'DR')
amount = None
direction = None
if credit > 0:
amount = credit
direction = 'in'
elif debit > 0:
amount = debit
direction = 'out'
timestamp = self.safe_integer(item, 'TimeStamp')
before = None
after = self.safe_number(item, 'Balance')
if direction == 'out':
before = self.sum(after, amount)
elif direction == 'in':
before = max(0, after - amount)
status = 'ok'
return {
'info': item,
'id': id,
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': None,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
}
if limit is not None:
request['Depth'] = limit
response = self.privateGetGetAccountTransactions(self.extend(request, params))
currency = None
if code is not None:
currency = self.currency(code)
return self.parse_ledger(response, currency, since, limit)
def parse_order_status(self, status):
statuses = {
'Accepted': 'open',
'Rejected': 'rejected',
'Working': 'open',
'Canceled': 'canceled',
'Expired': 'expired',
'FullyExecuted': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
id = self.safe_string_2(order, 'ReplacementOrderId', 'OrderId')
timestamp = self.safe_integer(order, 'ReceiveTime')
lastTradeTimestamp = self.safe_integer(order, 'LastUpdatedTime')
marketId = self.safe_string(order, 'Instrument')
symbol = self.safe_symbol(marketId, market)
side = self.safe_string_lower(order, 'Side')
type = self.safe_string_lower(order, 'OrderType')
clientOrderId = self.safe_string_2(order, 'ReplacementClOrdId', 'ClientOrderId')
price = self.safe_number(order, 'Price', 0.0)
price = price if (price > 0.0) else None
amount = self.safe_number(order, 'OrigQuantity')
filled = self.safe_number(order, 'QuantityExecuted')
cost = self.safe_number(order, 'GrossValueExecuted')
average = self.safe_number(order, 'AvgPrice', 0.0)
average = average if (average > 0) else None
stopPrice = self.safe_number(order, 'StopPrice', 0.0)
stopPrice = stopPrice if (stopPrice > 0.0) else None
timeInForce = None
status = self.parse_order_status(self.safe_string(order, 'OrderState'))
fee = None
trades = None
return self.safe_order({
'id': id,
'clientOrderId': clientOrderId,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'cost': cost,
'amount': amount,
'filled': filled,
'average': average,
'remaining': None,
'fee': fee,
'trades': trades,
})
def create_order(self, symbol, type, side, amount, price=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
clientOrderId = self.safe_integer_2(params, 'ClientOrderId', 'clientOrderId')
params = self.omit(params, ['accountId', 'AccountId', 'clientOrderId', 'ClientOrderId'])
market = self.market(symbol)
orderSide = 0 if (side == 'buy') else 1
request = {
'InstrumentId': int(market['id']),
'omsId': omsId,
'AccountId': accountId,
'TimeInForce': 1,
'OrderType': self.safe_integer(self.options['orderTypes'], self.capitalize(type)),
if price is not None:
request['LimitPrice'] = float(self.price_to_precision(symbol, price))
if clientOrderId is not None:
request['ClientOrderId'] = clientOrderId
response = self.privatePostSendOrder(self.extend(request, params))
return self.parse_order(response, market)
def edit_order(self, id, symbol, type, side, amount, price=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
clientOrderId = self.safe_integer_2(params, 'ClientOrderId', 'clientOrderId')
params = self.omit(params, ['accountId', 'AccountId', 'clientOrderId', 'ClientOrderId'])
market = self.market(symbol)
orderSide = 0 if (side == 'buy') else 1
request = {
'OrderIdToReplace': int(id),
'InstrumentId': int(market['id']),
'omsId': omsId,
'AccountId': accountId,
'TimeInForce': 1,
'OrderType': self.safe_integer(self.options['orderTypes'], self.capitalize(type)),
if price is not None:
request['LimitPrice'] = float(self.price_to_precision(symbol, price))
if clientOrderId is not None:
request['ClientOrderId'] = clientOrderId
response = self.privatePostCancelReplaceOrder(self.extend(request, params))
return self.parse_order(response, market)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
quest['StartTimeStamp'] = int(since / 1000)
if limit is not None:
request['Depth'] = limit
response = self.privateGetGetTradesHistory(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def cancel_all_orders(self, symbol=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
}
if symbol is not None:
market = self.market(symbol)
request['IntrumentId'] = market['id']
response = self.privatePostCancelAllOrders(self.extend(request, params))
return response
def cancel_order(self, id, symbol=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'omsId': omsId,
}
clientOrderId = self.safe_integer_2(params, 'clientOrderId', 'ClOrderId')
if clientOrderId is not None:
request['ClOrderId'] = clientOrderId
else:
request['OrderId'] = int(id)
params = self.omit(params, ['clientOrderId', 'ClOrderId'])
response = self.privatePostCancelOrder(self.extend(request, params))
order = self.parse_order(response, market)
return self.extend(order, {
'id': id,
'clientOrderId': clientOrderId,
})
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'omsId': omsId,
'AccountId': accountId,
}
response = self.privateGetGetOpenOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['InstrumentId'] = market['id']
if since is not None:
request['StartTimeStamp'] = int(since / 1000)
if limit is not None:
request['Depth'] = limit
response = self.privateGetGetOrdersHistory(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
def fetch_order(self, id, symbol=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'omsId': omsId,
'AccountId': accountId,
'OrderId': int(id),
}
response = self.privateGetGetOrderStatus(self.extend(request, params))
return self.parse_order(response, market)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'OMSId': int(omsId),
'OrderId': int(id),
}
response = self.privatePostGetOrderHistoryByOrderId(self.extend(request, params))
grouped = self.group_by(response, 'ChangeReason')
trades = self.safe_value(grouped, 'Trade', [])
return self.parse_trades(trades, market, since, limit)
def fetch_deposit_address(self, code, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
currency = self.currency(code)
request = {
'omsId': omsId,
'AccountId': accountId,
'ProductId': currency['id'],
'GenerateNewKey': False,
}
response = self.privateGetGetDepositInfo(self.extend(request, params))
return self.parse_deposit_address(response, currency)
def parse_deposit_address(self, depositAddress, currency=None):
depositInfoString = self.safe_string(depositAddress, 'DepositInfo')
depositInfo = json.loads(depositInfoString)
depositInfoLength = len(depositInfo)
lastString = self.safe_string(depositInfo, depositInfoLength - 1)
parts = lastString.split('?memo=')
address = self.safe_string(parts, 0)
tag = self.safe_string(parts, 1)
code = None
if currency is not None:
code = currency['code']
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
def create_deposit_address(self, code, params={}):
request = {
'GenerateNewKey': True,
}
return self.fetch_deposit_address(code, self.extend(request, params))
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
currency = None
if code is not None:
currency = self.currency(code)
request = {
'omsId': omsId,
'AccountId': accountId,
}
response = self.privateGetGetDeposits(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
currency = None
if code is not None:
currency = self.currency(code)
request = {
'omsId': omsId,
'AccountId': accountId,
}
response = self.privateGetGetWithdraws(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit)
def parse_transaction_status_by_type(self, status, type=None):
statusesByType = {
'deposit': {
'New': 'pending',
'AdminProcessing': 'pending',
'Accepted': 'pending',
'Rejected': 'rejected',
'SystemProcessing': 'pending',
'FullyProcessed': 'ok',
'Failed': 'failed',
'Pending': 'pending',
'Confirmed': 'pending',
'AmlProcessing': 'pending',
'AmlAccepted': 'pending',
'AmlRejected': 'rejected',
'AmlFailed': 'failed',
'LimitsAccepted': 'pending',
'LimitsRejected': 'rejected',
},
'withdrawal': {
'New': 'pending',
'AdminProcessing': 'pending',
'Accepted': 'pending',
'Rejected': 'rejected',
'SystemProcessing': 'pending',
'FullyProcessed': 'ok',
'Failed': 'failed',
'Pending': 'pending',
'Pending2Fa': 'pending',
'AutoAccepted': 'pending',
'Delayed': 'pending',
'UserCanceled': 'canceled',
'AdminCanceled': 'canceled',
'AmlProcessing': 'pending',
'AmlAccepted': 'pending',
'AmlRejected': 'rejected',
'AmlFailed': 'failed',
'LimitsAccepted': 'pending',
'LimitsRejected': 'rejected',
'Submitted': 'pending',
'Confirmed': 'pending',
'ManuallyConfirmed': 'pending',
'Confirmed2Fa': 'pending',
},
}
statuses = self.safe_value(statusesByType, type, {})
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
id = self.safe_string(transaction, 'DepositId')
txid = None
currencyId = self.safe_string(transaction, 'ProductId')
code = self.safe_currency_code(currencyId, currency)
timestamp = None
type = None
if 'DepositId' in transaction:
type = 'deposit'
elif 'WithdrawId' in transaction:
type = 'withdrawal'
templateFormString = self.safe_string(transaction, 'TemplateForm')
address = None
updated = self.safe_integer(transaction, 'LastUpdateTimeStamp')
if templateFormString is not None:
templateForm = json.loads(templateFormString)
address = self.safe_string(templateForm, 'ExternalAddress')
txid = self.safe_string(templateForm, 'TxId')
timestamp = self.safe_integer(templateForm, 'TimeSubmitted')
updated = self.safe_integer(templateForm, 'LastUpdated', updated)
addressTo = address
status = self.parse_transaction_status_by_type(self.safe_string(transaction, 'TicketStatus'), type)
amount = self.safe_number(transaction, 'Amount')
feeCost = self.safe_number(transaction, 'FeeAmount')
fee = None
if feeCost is not None:
fee = {'currency': code, 'cost': feeCost}
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'addressTo': addressTo,
'addressFrom': None,
'tag': None,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
sessionToken = self.safe_string(self.options, 'sessionToken')
if sessionToken is None:
raise AuthenticationError(self.id + ' call signIn() method to obtain a session token')
self.check_address(address)
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
currency = self.currency(code)
withdrawTemplateTypesRequest = {
'omsId': omsId,
'AccountId': accountId,
'ProductId': currency['id'],
}
withdrawTemplateTypesResponse = self.privateGetGetWithdrawTemplateTypes(withdrawTemplateTypesRequest)
templateTypes = self.safe_value(withdrawTemplateTypesResponse, 'TemplateTypes', [])
firstTemplateType = self.safe_value(templateTypes, 0)
if firstTemplateType is None:
raise ExchangeError(self.id + ' withdraw() could not find a withdraw template type for ' + currency['code'])
templateName = self.safe_string(firstTemplateType, 'TemplateName')
withdrawTemplateRequest = {
'omsId': omsId,
'AccountId': accountId,
'ProductId': currency['id'],
'TemplateType': templateName,
'AccountProviderId': firstTemplateType['AccountProviderId'],
}
withdrawTemplateResponse = self.privateGetGetWithdrawTemplate(withdrawTemplateRequest)
template = self.safe_string(withdrawTemplateResponse, 'Template')
if template is None:
raise ExchangeError(self.id + ' withdraw() could not find a withdraw template for ' + currency['code'])
withdrawTemplate = json.loads(template)
withdrawTemplate['ExternalAddress'] = address
if tag is not None:
if 'Memo' in withdrawTemplate:
withdrawTemplate['Memo'] = tag
withdrawPayload = {
'omsId': omsId,
'AccountId': accountId,
'ProductId': currency['id'],
'TemplateForm': self.json(withdrawTemplate),
'TemplateType': templateName,
}
withdrawRequest = {
'TfaType': 'Google',
'TFaCode': self.oath(),
'Payload': self.json(withdrawPayload),
}
response = self.privatePostCreateWithdrawTicket(self.deep_extend(withdrawRequest, params))
return {
'info': response,
'id': self.safe_string(response, 'Id'),
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if path == 'Authenticate':
auth = self.login + ':' + self.password
auth64 = self.string_to_base64(auth)
headers = {
'Authorization': 'Basic ' + self.decode(auth64),
}
elif path == 'Authenticate2FA':
pending2faToken = self.safe_string(self.options, 'pending2faToken')
if pending2faToken is not None:
headers = {
'Pending2FaToken': pending2faToken,
}
query = self.omit(query, 'pending2faToken')
if query:
url += '?' + self.urlencode(query)
elif api == 'private':
self.check_required_credentials()
sessionToken = self.safe_string(self.options, 'sessionToken')
if sessionToken is None:
nonce = str(self.nonce())
auth = nonce + self.uid + self.apiKey
signature = self.hmac(self.encode(auth), self.encode(self.secret))
headers = {
'Nonce': nonce,
'APIKey': self.apiKey,
'Signature': signature,
'UserId': self.uid,
}
else:
headers = {
'APToken': sessionToken,
}
if method == 'POST':
headers['Content-Type'] = 'application/json'
body = self.json(query)
else:
if query:
url += '?' + self.urlencode(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if code == 404:
raise AuthenticationError(self.id + ' ' + body)
if response is None:
return
message = self.safe_string(response, 'errormsg')
if (message is not None) and (message != ''):
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
raise ExchangeError(feedback)
| true
| true
|
f714530361b2ac4050c9c8318d7b66818f4b64c0
| 546
|
py
|
Python
|
symposion/conference/admin.py
|
priyanshuraj7829/symposion
|
6b522f1f798d53cf0a481ecbac002dc4d0b5ab2f
|
[
"BSD-3-Clause"
] | 147
|
2015-01-13T11:24:12.000Z
|
2022-03-20T20:31:52.000Z
|
symposion/conference/admin.py
|
priyanshuraj7829/symposion
|
6b522f1f798d53cf0a481ecbac002dc4d0b5ab2f
|
[
"BSD-3-Clause"
] | 758
|
2015-03-18T13:39:25.000Z
|
2022-03-31T13:14:09.000Z
|
symposion/conference/admin.py
|
priyanshuraj7829/symposion
|
6b522f1f798d53cf0a481ecbac002dc4d0b5ab2f
|
[
"BSD-3-Clause"
] | 83
|
2015-01-16T04:46:54.000Z
|
2020-10-02T07:45:48.000Z
|
from django.contrib import admin
from symposion.conference.models import Conference, Section
class SectionInline(admin.TabularInline):
model = Section
prepopulated_fields = {"slug": ("name",)}
extra = 1
class ConferenceAdmin(admin.ModelAdmin):
list_display = ("title", "start_date", "end_date")
inlines = [SectionInline, ]
admin.site.register(Conference, ConferenceAdmin)
admin.site.register(
Section,
prepopulated_fields={"slug": ("name",)},
list_display=("name", "conference", "start_date", "end_date")
)
| 23.73913
| 65
| 0.710623
|
from django.contrib import admin
from symposion.conference.models import Conference, Section
class SectionInline(admin.TabularInline):
model = Section
prepopulated_fields = {"slug": ("name",)}
extra = 1
class ConferenceAdmin(admin.ModelAdmin):
list_display = ("title", "start_date", "end_date")
inlines = [SectionInline, ]
admin.site.register(Conference, ConferenceAdmin)
admin.site.register(
Section,
prepopulated_fields={"slug": ("name",)},
list_display=("name", "conference", "start_date", "end_date")
)
| true
| true
|
f714542afbe3ce6340bb9e918a90bcd27446491c
| 667
|
py
|
Python
|
manage.py
|
mukhametdinovigor/where_to_go
|
7374807a6b9bde3b0d2ec03f99f4c73718f7e63c
|
[
"MIT"
] | null | null | null |
manage.py
|
mukhametdinovigor/where_to_go
|
7374807a6b9bde3b0d2ec03f99f4c73718f7e63c
|
[
"MIT"
] | 2
|
2022-01-13T03:53:40.000Z
|
2022-03-12T01:00:24.000Z
|
manage.py
|
mukhametdinovigor/where_to_go
|
7374807a6b9bde3b0d2ec03f99f4c73718f7e63c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'where_to_go.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29
| 75
| 0.68066
|
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'where_to_go.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true
| true
|
f714546188fa61ae283a9f70bf3b94a702bd550e
| 238
|
py
|
Python
|
2nd_minimum_no_list.py
|
Akshara2820/Python_Folder
|
06782f88b45f907a4836e073c51f603bb19f9aa9
|
[
"MIT"
] | null | null | null |
2nd_minimum_no_list.py
|
Akshara2820/Python_Folder
|
06782f88b45f907a4836e073c51f603bb19f9aa9
|
[
"MIT"
] | null | null | null |
2nd_minimum_no_list.py
|
Akshara2820/Python_Folder
|
06782f88b45f907a4836e073c51f603bb19f9aa9
|
[
"MIT"
] | null | null | null |
num=[50,40,23,70,56,100,18,]
l=len(num)
a=0
mini1=num[a]
i=0
x=num
while i<l:
if x[i]<=mini1:
mini1=x[i]
i+=1
y=0
mini2=num[y]
a=0
c=num
m=0
while m<l:
if mini2>num[m]>mini1:
mini2=num[m]
m+=1
print(mini2)
| 11.333333
| 28
| 0.546218
|
num=[50,40,23,70,56,100,18,]
l=len(num)
a=0
mini1=num[a]
i=0
x=num
while i<l:
if x[i]<=mini1:
mini1=x[i]
i+=1
y=0
mini2=num[y]
a=0
c=num
m=0
while m<l:
if mini2>num[m]>mini1:
mini2=num[m]
m+=1
print(mini2)
| true
| true
|
f71454f89dd5ee3d75234b5625c7636f0d8d8344
| 99
|
py
|
Python
|
vecino/__init__.py
|
sniperkit/snk.fork.vecino
|
a140171795e68fb7c9e26a72a585bd6aeb4e35a9
|
[
"Apache-2.0"
] | null | null | null |
vecino/__init__.py
|
sniperkit/snk.fork.vecino
|
a140171795e68fb7c9e26a72a585bd6aeb4e35a9
|
[
"Apache-2.0"
] | null | null | null |
vecino/__init__.py
|
sniperkit/snk.fork.vecino
|
a140171795e68fb7c9e26a72a585bd6aeb4e35a9
|
[
"Apache-2.0"
] | null | null | null |
from vecino.similar_repositories import SimilarRepositories
from vecino.__main__ import initialize
| 33
| 59
| 0.89899
|
from vecino.similar_repositories import SimilarRepositories
from vecino.__main__ import initialize
| true
| true
|
f7145511c4c4a602dc7d916f5a9d093870f5b3f0
| 40,188
|
py
|
Python
|
research/object_detection/eval_util.py
|
slomrafgrav/models
|
daa6c0415e47bdc52ad6434dc2bdb5d8aeb4f7ce
|
[
"Apache-2.0"
] | 79
|
2019-03-02T17:40:25.000Z
|
2021-08-17T13:22:03.000Z
|
research/object_detection/eval_util.py
|
ywy0318/models
|
91a59c78e8c48e8a1b2fec37143e52dae3f066c1
|
[
"Apache-2.0"
] | 8
|
2019-05-14T10:10:50.000Z
|
2020-12-20T14:05:29.000Z
|
research/object_detection/eval_util.py
|
ywy0318/models
|
91a59c78e8c48e8a1b2fec37143e52dae3f066c1
|
[
"Apache-2.0"
] | 27
|
2019-02-04T01:45:48.000Z
|
2021-03-18T02:39:28.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utility functions for evaluation."""
import collections
import os
import time
import numpy as np
import tensorflow as tf
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
from object_detection.metrics import coco_evaluation
from object_detection.utils import label_map_util
from object_detection.utils import object_detection_evaluation
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import visualization_utils as vis_utils
slim = tf.contrib.slim
# A dictionary of metric names to classes that implement the metric. The classes
# in the dictionary must implement
# utils.object_detection_evaluation.DetectionEvaluator interface.
EVAL_METRICS_CLASS_DICT = {
'coco_detection_metrics':
coco_evaluation.CocoDetectionEvaluator,
'coco_mask_metrics':
coco_evaluation.CocoMaskEvaluator,
'oid_challenge_detection_metrics':
object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,
'pascal_voc_detection_metrics':
object_detection_evaluation.PascalDetectionEvaluator,
'weighted_pascal_voc_detection_metrics':
object_detection_evaluation.WeightedPascalDetectionEvaluator,
'pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.PascalInstanceSegmentationEvaluator,
'weighted_pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator,
'oid_V2_detection_metrics':
object_detection_evaluation.OpenImagesDetectionEvaluator,
}
EVAL_DEFAULT_METRIC = 'coco_detection_metrics'
def write_metrics(metrics, global_step, summary_dir):
"""Write metrics to a summary directory.
Args:
metrics: A dictionary containing metric names and values.
global_step: Global step at which the metrics are computed.
summary_dir: Directory to write tensorflow summaries to.
"""
tf.logging.info('Writing metrics to tf summary.')
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
for key in sorted(metrics):
summary = tf.Summary(value=[
tf.Summary.Value(tag=key, simple_value=metrics[key]),
])
summary_writer.add_summary(summary, global_step)
tf.logging.info('%s: %f', key, metrics[key])
tf.logging.info('Metrics written to tf summary.')
# TODO(rathodv): Add tests.
def visualize_detection_results(result_dict,
tag,
global_step,
categories,
summary_dir='',
export_dir='',
agnostic_mode=False,
show_groundtruth=False,
groundtruth_box_visualization_color='black',
min_score_thresh=.5,
max_num_predictions=20,
skip_scores=False,
skip_labels=False,
keep_image_id_for_visualization_export=False):
"""Visualizes detection results and writes visualizations to image summaries.
This function visualizes an image with its detected bounding boxes and writes
to image summaries which can be viewed on tensorboard. It optionally also
writes images to a directory. In the case of missing entry in the label map,
unknown class name in the visualization is shown as "N/A".
Args:
result_dict: a dictionary holding groundtruth and detection
data corresponding to each image being evaluated. The following keys
are required:
'original_image': a numpy array representing the image with shape
[1, height, width, 3] or [1, height, width, 1]
'detection_boxes': a numpy array of shape [N, 4]
'detection_scores': a numpy array of shape [N]
'detection_classes': a numpy array of shape [N]
The following keys are optional:
'groundtruth_boxes': a numpy array of shape [N, 4]
'groundtruth_keypoints': a numpy array of shape [N, num_keypoints, 2]
Detections are assumed to be provided in decreasing order of score and for
display, and we assume that scores are probabilities between 0 and 1.
tag: tensorboard tag (string) to associate with image.
global_step: global step at which the visualization are generated.
categories: a list of dictionaries representing all possible categories.
Each dict in this list has the following keys:
'id': (required) an integer id uniquely identifying this category
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'
'supercategory': (optional) string representing the supercategory
e.g., 'animal', 'vehicle', 'food', etc
summary_dir: the output directory to which the image summaries are written.
export_dir: the output directory to which images are written. If this is
empty (default), then images are not exported.
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not.
show_groundtruth: boolean (default: False) controlling whether to show
groundtruth boxes in addition to detected boxes
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
min_score_thresh: minimum score threshold for a box to be visualized
max_num_predictions: maximum number of detections to visualize
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
keep_image_id_for_visualization_export: whether to keep image identifier in
filename when exported to export_dir
Raises:
ValueError: if result_dict does not contain the expected keys (i.e.,
'original_image', 'detection_boxes', 'detection_scores',
'detection_classes')
"""
detection_fields = fields.DetectionResultFields
input_fields = fields.InputDataFields
if not set([
input_fields.original_image,
detection_fields.detection_boxes,
detection_fields.detection_scores,
detection_fields.detection_classes,
]).issubset(set(result_dict.keys())):
raise ValueError('result_dict does not contain all expected keys.')
if show_groundtruth and input_fields.groundtruth_boxes not in result_dict:
raise ValueError('If show_groundtruth is enabled, result_dict must contain '
'groundtruth_boxes.')
tf.logging.info('Creating detection visualizations.')
category_index = label_map_util.create_category_index(categories)
image = np.squeeze(result_dict[input_fields.original_image], axis=0)
if image.shape[2] == 1: # If one channel image, repeat in RGB.
image = np.tile(image, [1, 1, 3])
detection_boxes = result_dict[detection_fields.detection_boxes]
detection_scores = result_dict[detection_fields.detection_scores]
detection_classes = np.int32((result_dict[
detection_fields.detection_classes]))
detection_keypoints = result_dict.get(detection_fields.detection_keypoints)
detection_masks = result_dict.get(detection_fields.detection_masks)
detection_boundaries = result_dict.get(detection_fields.detection_boundaries)
# Plot groundtruth underneath detections
if show_groundtruth:
groundtruth_boxes = result_dict[input_fields.groundtruth_boxes]
groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints)
vis_utils.visualize_boxes_and_labels_on_image_array(
image=image,
boxes=groundtruth_boxes,
classes=None,
scores=None,
category_index=category_index,
keypoints=groundtruth_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=None,
groundtruth_box_visualization_color=groundtruth_box_visualization_color)
vis_utils.visualize_boxes_and_labels_on_image_array(
image,
detection_boxes,
detection_classes,
detection_scores,
category_index,
instance_masks=detection_masks,
instance_boundaries=detection_boundaries,
keypoints=detection_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=max_num_predictions,
min_score_thresh=min_score_thresh,
agnostic_mode=agnostic_mode,
skip_scores=skip_scores,
skip_labels=skip_labels)
if export_dir:
if keep_image_id_for_visualization_export and result_dict[fields.
InputDataFields()
.key]:
export_path = os.path.join(export_dir, 'export-{}-{}.png'.format(
tag, result_dict[fields.InputDataFields().key]))
else:
export_path = os.path.join(export_dir, 'export-{}.png'.format(tag))
vis_utils.save_image_array_as_png(image, export_path)
summary = tf.Summary(value=[
tf.Summary.Value(
tag=tag,
image=tf.Summary.Image(
encoded_image_string=vis_utils.encode_image_array_as_png_str(
image)))
])
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
summary_writer.add_summary(summary, global_step)
tf.logging.info('Detection visualizations written to summary with tag %s.',
tag)
def _run_checkpoint_once(tensor_dict,
evaluators=None,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None):
"""Evaluates metrics defined in evaluators and returns summaries.
This function loads the latest checkpoint in checkpoint_dirs and evaluates
all metrics defined in evaluators. The metrics are processed in batch by the
batch_processor.
Args:
tensor_dict: a dictionary holding tensors representing a batch of detections
and corresponding groundtruth annotations.
evaluators: a list of object of type DetectionEvaluator to be used for
evaluation. Note that the metric names produced by different evaluators
must be unique.
batch_processor: a function taking four arguments:
1. tensor_dict: the same tensor_dict that is passed in as the first
argument to this function.
2. sess: a tensorflow session
3. batch_index: an integer representing the index of the batch amongst
all batches
By default, batch_processor is None, which defaults to running:
return sess.run(tensor_dict)
To skip an image, it suffices to return an empty dictionary in place of
result_dict.
checkpoint_dirs: list of directories to load into an EnsembleModel. If it
has only one directory, EnsembleModel will not be used --
a DetectionModel
will be instantiated directly. Not used if restore_fn is set.
variables_to_restore: None, or a dictionary mapping variable names found in
a checkpoint to model variables. The dictionary would normally be
generated by creating a tf.train.ExponentialMovingAverage object and
calling its variables_to_restore() method. Not used if restore_fn is set.
restore_fn: None, or a function that takes a tf.Session object and correctly
restores all necessary variables from the correct checkpoint file. If
None, attempts to restore from the first directory in checkpoint_dirs.
num_batches: the number of batches to use for evaluation.
master: the location of the Tensorflow session.
save_graph: whether or not the Tensorflow graph is stored as a pbtxt file.
save_graph_dir: where to store the Tensorflow graph on disk. If save_graph
is True this must be non-empty.
losses_dict: optional dictionary of scalar detection losses.
eval_export_path: Path for saving a json file that contains the detection
results in json format.
Returns:
global_step: the count of global steps.
all_evaluator_metrics: A dictionary containing metric names and values.
Raises:
ValueError: if restore_fn is None and checkpoint_dirs doesn't have at least
one element.
ValueError: if save_graph is True and save_graph_dir is not defined.
"""
if save_graph and not save_graph_dir:
raise ValueError('`save_graph_dir` must be defined.')
sess = tf.Session(master, graph=tf.get_default_graph())
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
if restore_fn:
restore_fn(sess)
else:
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dirs[0])
saver = tf.train.Saver(variables_to_restore)
saver.restore(sess, checkpoint_file)
if save_graph:
tf.train.write_graph(sess.graph_def, save_graph_dir, 'eval.pbtxt')
counters = {'skipped': 0, 'success': 0}
aggregate_result_losses_dict = collections.defaultdict(list)
with tf.contrib.slim.queues.QueueRunners(sess):
try:
for batch in range(int(num_batches)):
if (batch + 1) % 100 == 0:
tf.logging.info('Running eval ops batch %d/%d', batch + 1,
num_batches)
if not batch_processor:
try:
if not losses_dict:
losses_dict = {}
result_dict, result_losses_dict = sess.run([tensor_dict,
losses_dict])
counters['success'] += 1
except tf.errors.InvalidArgumentError:
tf.logging.info('Skipping image')
counters['skipped'] += 1
result_dict = {}
else:
result_dict, result_losses_dict = batch_processor(
tensor_dict, sess, batch, counters, losses_dict=losses_dict)
if not result_dict:
continue
for key, value in iter(result_losses_dict.items()):
aggregate_result_losses_dict[key].append(value)
for evaluator in evaluators:
# TODO(b/65130867): Use image_id tensor once we fix the input data
# decoders to return correct image_id.
# TODO(akuznetsa): result_dict contains batches of images, while
# add_single_ground_truth_image_info expects a single image. Fix
if (isinstance(result_dict, dict) and
fields.InputDataFields.key in result_dict and
result_dict[fields.InputDataFields.key]):
image_id = result_dict[fields.InputDataFields.key]
else:
image_id = batch
evaluator.add_single_ground_truth_image_info(
image_id=image_id, groundtruth_dict=result_dict)
evaluator.add_single_detected_image_info(
image_id=image_id, detections_dict=result_dict)
tf.logging.info('Running eval batches done.')
except tf.errors.OutOfRangeError:
tf.logging.info('Done evaluating -- epoch limit reached')
finally:
# When done, ask the threads to stop.
tf.logging.info('# success: %d', counters['success'])
tf.logging.info('# skipped: %d', counters['skipped'])
all_evaluator_metrics = {}
if eval_export_path and eval_export_path is not None:
for evaluator in evaluators:
if (isinstance(evaluator, coco_evaluation.CocoDetectionEvaluator) or
isinstance(evaluator, coco_evaluation.CocoMaskEvaluator)):
tf.logging.info('Started dumping to json file.')
evaluator.dump_detections_to_json_file(
json_output_path=eval_export_path)
tf.logging.info('Finished dumping to json file.')
for evaluator in evaluators:
metrics = evaluator.evaluate()
evaluator.clear()
if any(key in all_evaluator_metrics for key in metrics):
raise ValueError('Metric names between evaluators must not collide.')
all_evaluator_metrics.update(metrics)
global_step = tf.train.global_step(sess, tf.train.get_global_step())
for key, value in iter(aggregate_result_losses_dict.items()):
all_evaluator_metrics['Losses/' + key] = np.mean(value)
sess.close()
return (global_step, all_evaluator_metrics)
# TODO(rathodv): Add tests.
def repeated_checkpoint_run(tensor_dict,
summary_dir,
evaluators,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
eval_interval_secs=120,
max_number_of_evaluations=None,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None):
"""Periodically evaluates desired tensors using checkpoint_dirs or restore_fn.
This function repeatedly loads a checkpoint and evaluates a desired
set of tensors (provided by tensor_dict) and hands the resulting numpy
arrays to a function result_processor which can be used to further
process/save/visualize the results.
Args:
tensor_dict: a dictionary holding tensors representing a batch of detections
and corresponding groundtruth annotations.
summary_dir: a directory to write metrics summaries.
evaluators: a list of object of type DetectionEvaluator to be used for
evaluation. Note that the metric names produced by different evaluators
must be unique.
batch_processor: a function taking three arguments:
1. tensor_dict: the same tensor_dict that is passed in as the first
argument to this function.
2. sess: a tensorflow session
3. batch_index: an integer representing the index of the batch amongst
all batches
By default, batch_processor is None, which defaults to running:
return sess.run(tensor_dict)
checkpoint_dirs: list of directories to load into a DetectionModel or an
EnsembleModel if restore_fn isn't set. Also used to determine when to run
next evaluation. Must have at least one element.
variables_to_restore: None, or a dictionary mapping variable names found in
a checkpoint to model variables. The dictionary would normally be
generated by creating a tf.train.ExponentialMovingAverage object and
calling its variables_to_restore() method. Not used if restore_fn is set.
restore_fn: a function that takes a tf.Session object and correctly restores
all necessary variables from the correct checkpoint file.
num_batches: the number of batches to use for evaluation.
eval_interval_secs: the number of seconds between each evaluation run.
max_number_of_evaluations: the max number of iterations of the evaluation.
If the value is left as None the evaluation continues indefinitely.
master: the location of the Tensorflow session.
save_graph: whether or not the Tensorflow graph is saved as a pbtxt file.
save_graph_dir: where to save on disk the Tensorflow graph. If store_graph
is True this must be non-empty.
losses_dict: optional dictionary of scalar detection losses.
eval_export_path: Path for saving a json file that contains the detection
results in json format.
Returns:
metrics: A dictionary containing metric names and values in the latest
evaluation.
Raises:
ValueError: if max_num_of_evaluations is not None or a positive number.
ValueError: if checkpoint_dirs doesn't have at least one element.
"""
if max_number_of_evaluations and max_number_of_evaluations <= 0:
raise ValueError(
'`number_of_steps` must be either None or a positive number.')
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
last_evaluated_model_path = None
number_of_evaluations = 0
while True:
start = time.time()
tf.logging.info('Starting evaluation at ' + time.strftime(
'%Y-%m-%d-%H:%M:%S', time.gmtime()))
model_path = tf.train.latest_checkpoint(checkpoint_dirs[0])
if not model_path:
tf.logging.info('No model found in %s. Will try again in %d seconds',
checkpoint_dirs[0], eval_interval_secs)
elif model_path == last_evaluated_model_path:
tf.logging.info('Found already evaluated checkpoint. Will try again in '
'%d seconds', eval_interval_secs)
else:
last_evaluated_model_path = model_path
global_step, metrics = _run_checkpoint_once(
tensor_dict,
evaluators,
batch_processor,
checkpoint_dirs,
variables_to_restore,
restore_fn,
num_batches,
master,
save_graph,
save_graph_dir,
losses_dict=losses_dict,
eval_export_path=eval_export_path)
write_metrics(metrics, global_step, summary_dir)
number_of_evaluations += 1
if (max_number_of_evaluations and
number_of_evaluations >= max_number_of_evaluations):
tf.logging.info('Finished evaluation!')
break
time_to_next_eval = start + eval_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
return metrics
def _scale_box_to_absolute(args):
boxes, image_shape = args
return box_list_ops.to_absolute_coordinates(
box_list.BoxList(boxes), image_shape[0], image_shape[1]).get()
def _resize_detection_masks(args):
detection_boxes, detection_masks, image_shape = args
detection_masks_reframed = ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image_shape[0], image_shape[1])
return tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
def _resize_groundtruth_masks(args):
mask, image_shape = args
mask = tf.expand_dims(mask, 3)
mask = tf.image.resize_images(
mask,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(mask, 3), tf.uint8)
def _scale_keypoint_to_absolute(args):
keypoints, image_shape = args
return keypoint_ops.scale(keypoints, image_shape[0], image_shape[1])
def result_dict_for_single_example(image,
key,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False):
"""Merges all detection and groundtruth information for a single example.
Note that evaluation tools require classes that are 1-indexed, and so this
function performs the offset. If `class_agnostic` is True, all output classes
have label 1.
Args:
image: A single 4D uint8 image tensor of shape [1, H, W, C].
key: A single string tensor identifying the image.
detections: A dictionary of detections, returned from
DetectionModel.postprocess().
groundtruth: (Optional) Dictionary of groundtruth items, with fields:
'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
normalized coordinates.
'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 3D int64 tensor of instance masks
(Optional).
class_agnostic: Boolean indicating whether the detections are class-agnostic
(i.e. binary). Default False.
scale_to_absolute: Boolean indicating whether boxes and keypoints should be
scaled to absolute coordinates. Note that for IoU based evaluations, it
does not matter whether boxes are expressed in absolute or relative
coordinates. Default False.
Returns:
A dictionary with:
'original_image': A [1, H, W, C] uint8 image tensor.
'key': A string tensor with image identifier.
'detection_boxes': [max_detections, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`.
'detection_scores': [max_detections] float32 tensor of scores.
'detection_classes': [max_detections] int64 tensor of 1-indexed classes.
'detection_masks': [max_detections, H, W] float32 tensor of binarized
masks, reframed to full image masks.
'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`. (Optional)
'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
(Optional)
'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 3D int64 tensor of instance masks
(Optional).
"""
if groundtruth:
max_gt_boxes = tf.shape(
groundtruth[fields.InputDataFields.groundtruth_boxes])[0]
for gt_key in groundtruth:
# expand groundtruth dict along the batch dimension.
groundtruth[gt_key] = tf.expand_dims(groundtruth[gt_key], 0)
for detection_key in detections:
detections[detection_key] = tf.expand_dims(
detections[detection_key][0], axis=0)
batched_output_dict = result_dict_for_batched_example(
image,
tf.expand_dims(key, 0),
detections,
groundtruth,
class_agnostic,
scale_to_absolute,
max_gt_boxes=max_gt_boxes)
exclude_keys = [
fields.InputDataFields.original_image,
fields.DetectionResultFields.num_detections,
fields.InputDataFields.num_groundtruth_boxes
]
output_dict = {
fields.InputDataFields.original_image:
batched_output_dict[fields.InputDataFields.original_image]
}
for key in batched_output_dict:
# remove the batch dimension.
if key not in exclude_keys:
output_dict[key] = tf.squeeze(batched_output_dict[key], 0)
return output_dict
def result_dict_for_batched_example(images,
keys,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False,
original_image_spatial_shapes=None,
true_image_shapes=None,
max_gt_boxes=None):
"""Merges all detection and groundtruth information for a single example.
Note that evaluation tools require classes that are 1-indexed, and so this
function performs the offset. If `class_agnostic` is True, all output classes
have label 1.
Args:
images: A single 4D uint8 image tensor of shape [batch_size, H, W, C].
keys: A [batch_size] string tensor with image identifier.
detections: A dictionary of detections, returned from
DetectionModel.postprocess().
groundtruth: (Optional) Dictionary of groundtruth items, with fields:
'groundtruth_boxes': [batch_size, max_number_of_boxes, 4] float32 tensor
of boxes, in normalized coordinates.
'groundtruth_classes': [batch_size, max_number_of_boxes] int64 tensor of
1-indexed classes.
'groundtruth_area': [batch_size, max_number_of_boxes] float32 tensor of
bbox area. (Optional)
'groundtruth_is_crowd':[batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_difficult': [batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_group_of': [batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_instance_masks': 4D int64 tensor of instance
masks (Optional).
class_agnostic: Boolean indicating whether the detections are class-agnostic
(i.e. binary). Default False.
scale_to_absolute: Boolean indicating whether boxes and keypoints should be
scaled to absolute coordinates. Note that for IoU based evaluations, it
does not matter whether boxes are expressed in absolute or relative
coordinates. Default False.
original_image_spatial_shapes: A 2D int32 tensor of shape [batch_size, 2]
used to resize the image. When set to None, the image size is retained.
true_image_shapes: A 2D int32 tensor of shape [batch_size, 3]
containing the size of the unpadded original_image.
max_gt_boxes: [batch_size] tensor representing the maximum number of
groundtruth boxes to pad.
Returns:
A dictionary with:
'original_image': A [batch_size, H, W, C] uint8 image tensor.
'original_image_spatial_shape': A [batch_size, 2] tensor containing the
original image sizes.
'true_image_shape': A [batch_size, 3] tensor containing the size of
the unpadded original_image.
'key': A [batch_size] string tensor with image identifier.
'detection_boxes': [batch_size, max_detections, 4] float32 tensor of boxes,
in normalized or absolute coordinates, depending on the value of
`scale_to_absolute`.
'detection_scores': [batch_size, max_detections] float32 tensor of scores.
'detection_classes': [batch_size, max_detections] int64 tensor of 1-indexed
classes.
'detection_masks': [batch_size, max_detections, H, W] float32 tensor of
binarized masks, reframed to full image masks.
'num_detections': [batch_size] int64 tensor containing number of valid
detections.
'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`. (Optional)
'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed
classes. (Optional)
'groundtruth_area': [batch_size, num_boxes] float32 tensor of bbox
area. (Optional)
'groundtruth_is_crowd': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 4D int64 tensor of instance masks
(Optional).
'num_groundtruth_boxes': [batch_size] tensor containing the maximum number
of groundtruth boxes per image.
Raises:
ValueError: if original_image_spatial_shape is not 2D int32 tensor of shape
[2].
ValueError: if true_image_shapes is not 2D int32 tensor of shape
[3].
"""
label_id_offset = 1 # Applying label id offset (b/63711816)
input_data_fields = fields.InputDataFields
if original_image_spatial_shapes is None:
original_image_spatial_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:3], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(original_image_spatial_shapes.shape) != 2 and
original_image_spatial_shapes.shape[1] != 2):
raise ValueError(
'`original_image_spatial_shape` should be a 2D tensor of shape '
'[batch_size, 2].')
if true_image_shapes is None:
true_image_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:4], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(true_image_shapes.shape) != 2
and true_image_shapes.shape[1] != 3):
raise ValueError('`true_image_shapes` should be a 2D tensor of '
'shape [batch_size, 3].')
output_dict = {
input_data_fields.original_image:
images,
input_data_fields.key:
keys,
input_data_fields.original_image_spatial_shape: (
original_image_spatial_shapes),
input_data_fields.true_image_shape:
true_image_shapes
}
detection_fields = fields.DetectionResultFields
detection_boxes = detections[detection_fields.detection_boxes]
detection_scores = detections[detection_fields.detection_scores]
num_detections = tf.to_int32(detections[detection_fields.num_detections])
if class_agnostic:
detection_classes = tf.ones_like(detection_scores, dtype=tf.int64)
else:
detection_classes = (
tf.to_int64(detections[detection_fields.detection_classes]) +
label_id_offset)
if scale_to_absolute:
output_dict[detection_fields.detection_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[detection_boxes, original_image_spatial_shapes],
dtype=tf.float32))
else:
output_dict[detection_fields.detection_boxes] = detection_boxes
output_dict[detection_fields.detection_classes] = detection_classes
output_dict[detection_fields.detection_scores] = detection_scores
output_dict[detection_fields.num_detections] = num_detections
if detection_fields.detection_masks in detections:
detection_masks = detections[detection_fields.detection_masks]
# TODO(rathodv): This should be done in model's postprocess
# function ideally.
output_dict[detection_fields.detection_masks] = (
shape_utils.static_or_dynamic_map_fn(
_resize_detection_masks,
elems=[detection_boxes, detection_masks,
original_image_spatial_shapes],
dtype=tf.uint8))
if detection_fields.detection_keypoints in detections:
detection_keypoints = detections[detection_fields.detection_keypoints]
output_dict[detection_fields.detection_keypoints] = detection_keypoints
if scale_to_absolute:
output_dict[detection_fields.detection_keypoints] = (
shape_utils.static_or_dynamic_map_fn(
_scale_keypoint_to_absolute,
elems=[detection_keypoints, original_image_spatial_shapes],
dtype=tf.float32))
if groundtruth:
if max_gt_boxes is None:
if input_data_fields.num_groundtruth_boxes in groundtruth:
max_gt_boxes = groundtruth[input_data_fields.num_groundtruth_boxes]
else:
raise ValueError(
'max_gt_boxes must be provided when processing batched examples.')
if input_data_fields.groundtruth_instance_masks in groundtruth:
masks = groundtruth[input_data_fields.groundtruth_instance_masks]
groundtruth[input_data_fields.groundtruth_instance_masks] = (
shape_utils.static_or_dynamic_map_fn(
_resize_groundtruth_masks,
elems=[masks, original_image_spatial_shapes],
dtype=tf.uint8))
output_dict.update(groundtruth)
if scale_to_absolute:
groundtruth_boxes = groundtruth[input_data_fields.groundtruth_boxes]
output_dict[input_data_fields.groundtruth_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[groundtruth_boxes, original_image_spatial_shapes],
dtype=tf.float32))
# For class-agnostic models, groundtruth classes all become 1.
if class_agnostic:
groundtruth_classes = groundtruth[input_data_fields.groundtruth_classes]
groundtruth_classes = tf.ones_like(groundtruth_classes, dtype=tf.int64)
output_dict[input_data_fields.groundtruth_classes] = groundtruth_classes
output_dict[input_data_fields.num_groundtruth_boxes] = max_gt_boxes
return output_dict
def get_evaluators(eval_config, categories, evaluator_options=None):
"""Returns the evaluator class according to eval_config, valid for categories.
Args:
eval_config: An `eval_pb2.EvalConfig`.
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
evaluator_options: A dictionary of metric names (see
EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization
keyword arguments. For example:
evalator_options = {
'coco_detection_metrics': {'include_metrics_per_category': True}
}
Returns:
An list of instances of DetectionEvaluator.
Raises:
ValueError: if metric is not in the metric class dictionary.
"""
evaluator_options = evaluator_options or {}
eval_metric_fn_keys = eval_config.metrics_set
if not eval_metric_fn_keys:
eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]
evaluators_list = []
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:
raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))
kwargs_dict = (evaluator_options[eval_metric_fn_key] if eval_metric_fn_key
in evaluator_options else {})
evaluators_list.append(EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](
categories,
**kwargs_dict))
return evaluators_list
def get_eval_metric_ops_for_evaluators(eval_config,
categories,
eval_dict):
"""Returns eval metrics ops to use with `tf.estimator.EstimatorSpec`.
Args:
eval_config: An `eval_pb2.EvalConfig`.
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
eval_dict: An evaluation dictionary, returned from
result_dict_for_single_example().
Returns:
A dictionary of metric names to tuple of value_op and update_op that can be
used as eval metric ops in tf.EstimatorSpec.
"""
eval_metric_ops = {}
evaluator_options = evaluator_options_from_eval_config(eval_config)
evaluators_list = get_evaluators(eval_config, categories, evaluator_options)
for evaluator in evaluators_list:
eval_metric_ops.update(evaluator.get_estimator_eval_metric_ops(
eval_dict))
return eval_metric_ops
def evaluator_options_from_eval_config(eval_config):
"""Produces a dictionary of evaluation options for each eval metric.
Args:
eval_config: An `eval_pb2.EvalConfig`.
Returns:
evaluator_options: A dictionary of metric names (see
EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization
keyword arguments. For example:
evalator_options = {
'coco_detection_metrics': {'include_metrics_per_category': True}
}
"""
eval_metric_fn_keys = eval_config.metrics_set
evaluator_options = {}
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key in ('coco_detection_metrics', 'coco_mask_metrics'):
evaluator_options[eval_metric_fn_key] = {
'include_metrics_per_category': (
eval_config.include_metrics_per_category)
}
return evaluator_options
| 44.259912
| 80
| 0.700956
|
import collections
import os
import time
import numpy as np
import tensorflow as tf
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
from object_detection.metrics import coco_evaluation
from object_detection.utils import label_map_util
from object_detection.utils import object_detection_evaluation
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import visualization_utils as vis_utils
slim = tf.contrib.slim
EVAL_METRICS_CLASS_DICT = {
'coco_detection_metrics':
coco_evaluation.CocoDetectionEvaluator,
'coco_mask_metrics':
coco_evaluation.CocoMaskEvaluator,
'oid_challenge_detection_metrics':
object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,
'pascal_voc_detection_metrics':
object_detection_evaluation.PascalDetectionEvaluator,
'weighted_pascal_voc_detection_metrics':
object_detection_evaluation.WeightedPascalDetectionEvaluator,
'pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.PascalInstanceSegmentationEvaluator,
'weighted_pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator,
'oid_V2_detection_metrics':
object_detection_evaluation.OpenImagesDetectionEvaluator,
}
EVAL_DEFAULT_METRIC = 'coco_detection_metrics'
def write_metrics(metrics, global_step, summary_dir):
tf.logging.info('Writing metrics to tf summary.')
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
for key in sorted(metrics):
summary = tf.Summary(value=[
tf.Summary.Value(tag=key, simple_value=metrics[key]),
])
summary_writer.add_summary(summary, global_step)
tf.logging.info('%s: %f', key, metrics[key])
tf.logging.info('Metrics written to tf summary.')
def visualize_detection_results(result_dict,
tag,
global_step,
categories,
summary_dir='',
export_dir='',
agnostic_mode=False,
show_groundtruth=False,
groundtruth_box_visualization_color='black',
min_score_thresh=.5,
max_num_predictions=20,
skip_scores=False,
skip_labels=False,
keep_image_id_for_visualization_export=False):
detection_fields = fields.DetectionResultFields
input_fields = fields.InputDataFields
if not set([
input_fields.original_image,
detection_fields.detection_boxes,
detection_fields.detection_scores,
detection_fields.detection_classes,
]).issubset(set(result_dict.keys())):
raise ValueError('result_dict does not contain all expected keys.')
if show_groundtruth and input_fields.groundtruth_boxes not in result_dict:
raise ValueError('If show_groundtruth is enabled, result_dict must contain '
'groundtruth_boxes.')
tf.logging.info('Creating detection visualizations.')
category_index = label_map_util.create_category_index(categories)
image = np.squeeze(result_dict[input_fields.original_image], axis=0)
if image.shape[2] == 1:
image = np.tile(image, [1, 1, 3])
detection_boxes = result_dict[detection_fields.detection_boxes]
detection_scores = result_dict[detection_fields.detection_scores]
detection_classes = np.int32((result_dict[
detection_fields.detection_classes]))
detection_keypoints = result_dict.get(detection_fields.detection_keypoints)
detection_masks = result_dict.get(detection_fields.detection_masks)
detection_boundaries = result_dict.get(detection_fields.detection_boundaries)
if show_groundtruth:
groundtruth_boxes = result_dict[input_fields.groundtruth_boxes]
groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints)
vis_utils.visualize_boxes_and_labels_on_image_array(
image=image,
boxes=groundtruth_boxes,
classes=None,
scores=None,
category_index=category_index,
keypoints=groundtruth_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=None,
groundtruth_box_visualization_color=groundtruth_box_visualization_color)
vis_utils.visualize_boxes_and_labels_on_image_array(
image,
detection_boxes,
detection_classes,
detection_scores,
category_index,
instance_masks=detection_masks,
instance_boundaries=detection_boundaries,
keypoints=detection_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=max_num_predictions,
min_score_thresh=min_score_thresh,
agnostic_mode=agnostic_mode,
skip_scores=skip_scores,
skip_labels=skip_labels)
if export_dir:
if keep_image_id_for_visualization_export and result_dict[fields.
InputDataFields()
.key]:
export_path = os.path.join(export_dir, 'export-{}-{}.png'.format(
tag, result_dict[fields.InputDataFields().key]))
else:
export_path = os.path.join(export_dir, 'export-{}.png'.format(tag))
vis_utils.save_image_array_as_png(image, export_path)
summary = tf.Summary(value=[
tf.Summary.Value(
tag=tag,
image=tf.Summary.Image(
encoded_image_string=vis_utils.encode_image_array_as_png_str(
image)))
])
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
summary_writer.add_summary(summary, global_step)
tf.logging.info('Detection visualizations written to summary with tag %s.',
tag)
def _run_checkpoint_once(tensor_dict,
evaluators=None,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None):
if save_graph and not save_graph_dir:
raise ValueError('`save_graph_dir` must be defined.')
sess = tf.Session(master, graph=tf.get_default_graph())
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
if restore_fn:
restore_fn(sess)
else:
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dirs[0])
saver = tf.train.Saver(variables_to_restore)
saver.restore(sess, checkpoint_file)
if save_graph:
tf.train.write_graph(sess.graph_def, save_graph_dir, 'eval.pbtxt')
counters = {'skipped': 0, 'success': 0}
aggregate_result_losses_dict = collections.defaultdict(list)
with tf.contrib.slim.queues.QueueRunners(sess):
try:
for batch in range(int(num_batches)):
if (batch + 1) % 100 == 0:
tf.logging.info('Running eval ops batch %d/%d', batch + 1,
num_batches)
if not batch_processor:
try:
if not losses_dict:
losses_dict = {}
result_dict, result_losses_dict = sess.run([tensor_dict,
losses_dict])
counters['success'] += 1
except tf.errors.InvalidArgumentError:
tf.logging.info('Skipping image')
counters['skipped'] += 1
result_dict = {}
else:
result_dict, result_losses_dict = batch_processor(
tensor_dict, sess, batch, counters, losses_dict=losses_dict)
if not result_dict:
continue
for key, value in iter(result_losses_dict.items()):
aggregate_result_losses_dict[key].append(value)
for evaluator in evaluators:
if (isinstance(result_dict, dict) and
fields.InputDataFields.key in result_dict and
result_dict[fields.InputDataFields.key]):
image_id = result_dict[fields.InputDataFields.key]
else:
image_id = batch
evaluator.add_single_ground_truth_image_info(
image_id=image_id, groundtruth_dict=result_dict)
evaluator.add_single_detected_image_info(
image_id=image_id, detections_dict=result_dict)
tf.logging.info('Running eval batches done.')
except tf.errors.OutOfRangeError:
tf.logging.info('Done evaluating -- epoch limit reached')
finally:
tf.logging.info('# success: %d', counters['success'])
tf.logging.info('# skipped: %d', counters['skipped'])
all_evaluator_metrics = {}
if eval_export_path and eval_export_path is not None:
for evaluator in evaluators:
if (isinstance(evaluator, coco_evaluation.CocoDetectionEvaluator) or
isinstance(evaluator, coco_evaluation.CocoMaskEvaluator)):
tf.logging.info('Started dumping to json file.')
evaluator.dump_detections_to_json_file(
json_output_path=eval_export_path)
tf.logging.info('Finished dumping to json file.')
for evaluator in evaluators:
metrics = evaluator.evaluate()
evaluator.clear()
if any(key in all_evaluator_metrics for key in metrics):
raise ValueError('Metric names between evaluators must not collide.')
all_evaluator_metrics.update(metrics)
global_step = tf.train.global_step(sess, tf.train.get_global_step())
for key, value in iter(aggregate_result_losses_dict.items()):
all_evaluator_metrics['Losses/' + key] = np.mean(value)
sess.close()
return (global_step, all_evaluator_metrics)
def repeated_checkpoint_run(tensor_dict,
summary_dir,
evaluators,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
eval_interval_secs=120,
max_number_of_evaluations=None,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None):
if max_number_of_evaluations and max_number_of_evaluations <= 0:
raise ValueError(
'`number_of_steps` must be either None or a positive number.')
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
last_evaluated_model_path = None
number_of_evaluations = 0
while True:
start = time.time()
tf.logging.info('Starting evaluation at ' + time.strftime(
'%Y-%m-%d-%H:%M:%S', time.gmtime()))
model_path = tf.train.latest_checkpoint(checkpoint_dirs[0])
if not model_path:
tf.logging.info('No model found in %s. Will try again in %d seconds',
checkpoint_dirs[0], eval_interval_secs)
elif model_path == last_evaluated_model_path:
tf.logging.info('Found already evaluated checkpoint. Will try again in '
'%d seconds', eval_interval_secs)
else:
last_evaluated_model_path = model_path
global_step, metrics = _run_checkpoint_once(
tensor_dict,
evaluators,
batch_processor,
checkpoint_dirs,
variables_to_restore,
restore_fn,
num_batches,
master,
save_graph,
save_graph_dir,
losses_dict=losses_dict,
eval_export_path=eval_export_path)
write_metrics(metrics, global_step, summary_dir)
number_of_evaluations += 1
if (max_number_of_evaluations and
number_of_evaluations >= max_number_of_evaluations):
tf.logging.info('Finished evaluation!')
break
time_to_next_eval = start + eval_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
return metrics
def _scale_box_to_absolute(args):
boxes, image_shape = args
return box_list_ops.to_absolute_coordinates(
box_list.BoxList(boxes), image_shape[0], image_shape[1]).get()
def _resize_detection_masks(args):
detection_boxes, detection_masks, image_shape = args
detection_masks_reframed = ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image_shape[0], image_shape[1])
return tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
def _resize_groundtruth_masks(args):
mask, image_shape = args
mask = tf.expand_dims(mask, 3)
mask = tf.image.resize_images(
mask,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(mask, 3), tf.uint8)
def _scale_keypoint_to_absolute(args):
keypoints, image_shape = args
return keypoint_ops.scale(keypoints, image_shape[0], image_shape[1])
def result_dict_for_single_example(image,
key,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False):
if groundtruth:
max_gt_boxes = tf.shape(
groundtruth[fields.InputDataFields.groundtruth_boxes])[0]
for gt_key in groundtruth:
groundtruth[gt_key] = tf.expand_dims(groundtruth[gt_key], 0)
for detection_key in detections:
detections[detection_key] = tf.expand_dims(
detections[detection_key][0], axis=0)
batched_output_dict = result_dict_for_batched_example(
image,
tf.expand_dims(key, 0),
detections,
groundtruth,
class_agnostic,
scale_to_absolute,
max_gt_boxes=max_gt_boxes)
exclude_keys = [
fields.InputDataFields.original_image,
fields.DetectionResultFields.num_detections,
fields.InputDataFields.num_groundtruth_boxes
]
output_dict = {
fields.InputDataFields.original_image:
batched_output_dict[fields.InputDataFields.original_image]
}
for key in batched_output_dict:
if key not in exclude_keys:
output_dict[key] = tf.squeeze(batched_output_dict[key], 0)
return output_dict
def result_dict_for_batched_example(images,
keys,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False,
original_image_spatial_shapes=None,
true_image_shapes=None,
max_gt_boxes=None):
label_id_offset = 1
input_data_fields = fields.InputDataFields
if original_image_spatial_shapes is None:
original_image_spatial_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:3], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(original_image_spatial_shapes.shape) != 2 and
original_image_spatial_shapes.shape[1] != 2):
raise ValueError(
'`original_image_spatial_shape` should be a 2D tensor of shape '
'[batch_size, 2].')
if true_image_shapes is None:
true_image_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:4], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(true_image_shapes.shape) != 2
and true_image_shapes.shape[1] != 3):
raise ValueError('`true_image_shapes` should be a 2D tensor of '
'shape [batch_size, 3].')
output_dict = {
input_data_fields.original_image:
images,
input_data_fields.key:
keys,
input_data_fields.original_image_spatial_shape: (
original_image_spatial_shapes),
input_data_fields.true_image_shape:
true_image_shapes
}
detection_fields = fields.DetectionResultFields
detection_boxes = detections[detection_fields.detection_boxes]
detection_scores = detections[detection_fields.detection_scores]
num_detections = tf.to_int32(detections[detection_fields.num_detections])
if class_agnostic:
detection_classes = tf.ones_like(detection_scores, dtype=tf.int64)
else:
detection_classes = (
tf.to_int64(detections[detection_fields.detection_classes]) +
label_id_offset)
if scale_to_absolute:
output_dict[detection_fields.detection_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[detection_boxes, original_image_spatial_shapes],
dtype=tf.float32))
else:
output_dict[detection_fields.detection_boxes] = detection_boxes
output_dict[detection_fields.detection_classes] = detection_classes
output_dict[detection_fields.detection_scores] = detection_scores
output_dict[detection_fields.num_detections] = num_detections
if detection_fields.detection_masks in detections:
detection_masks = detections[detection_fields.detection_masks]
# function ideally.
output_dict[detection_fields.detection_masks] = (
shape_utils.static_or_dynamic_map_fn(
_resize_detection_masks,
elems=[detection_boxes, detection_masks,
original_image_spatial_shapes],
dtype=tf.uint8))
if detection_fields.detection_keypoints in detections:
detection_keypoints = detections[detection_fields.detection_keypoints]
output_dict[detection_fields.detection_keypoints] = detection_keypoints
if scale_to_absolute:
output_dict[detection_fields.detection_keypoints] = (
shape_utils.static_or_dynamic_map_fn(
_scale_keypoint_to_absolute,
elems=[detection_keypoints, original_image_spatial_shapes],
dtype=tf.float32))
if groundtruth:
if max_gt_boxes is None:
if input_data_fields.num_groundtruth_boxes in groundtruth:
max_gt_boxes = groundtruth[input_data_fields.num_groundtruth_boxes]
else:
raise ValueError(
'max_gt_boxes must be provided when processing batched examples.')
if input_data_fields.groundtruth_instance_masks in groundtruth:
masks = groundtruth[input_data_fields.groundtruth_instance_masks]
groundtruth[input_data_fields.groundtruth_instance_masks] = (
shape_utils.static_or_dynamic_map_fn(
_resize_groundtruth_masks,
elems=[masks, original_image_spatial_shapes],
dtype=tf.uint8))
output_dict.update(groundtruth)
if scale_to_absolute:
groundtruth_boxes = groundtruth[input_data_fields.groundtruth_boxes]
output_dict[input_data_fields.groundtruth_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[groundtruth_boxes, original_image_spatial_shapes],
dtype=tf.float32))
# For class-agnostic models, groundtruth classes all become 1.
if class_agnostic:
groundtruth_classes = groundtruth[input_data_fields.groundtruth_classes]
groundtruth_classes = tf.ones_like(groundtruth_classes, dtype=tf.int64)
output_dict[input_data_fields.groundtruth_classes] = groundtruth_classes
output_dict[input_data_fields.num_groundtruth_boxes] = max_gt_boxes
return output_dict
def get_evaluators(eval_config, categories, evaluator_options=None):
evaluator_options = evaluator_options or {}
eval_metric_fn_keys = eval_config.metrics_set
if not eval_metric_fn_keys:
eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]
evaluators_list = []
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:
raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))
kwargs_dict = (evaluator_options[eval_metric_fn_key] if eval_metric_fn_key
in evaluator_options else {})
evaluators_list.append(EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](
categories,
**kwargs_dict))
return evaluators_list
def get_eval_metric_ops_for_evaluators(eval_config,
categories,
eval_dict):
eval_metric_ops = {}
evaluator_options = evaluator_options_from_eval_config(eval_config)
evaluators_list = get_evaluators(eval_config, categories, evaluator_options)
for evaluator in evaluators_list:
eval_metric_ops.update(evaluator.get_estimator_eval_metric_ops(
eval_dict))
return eval_metric_ops
def evaluator_options_from_eval_config(eval_config):
eval_metric_fn_keys = eval_config.metrics_set
evaluator_options = {}
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key in ('coco_detection_metrics', 'coco_mask_metrics'):
evaluator_options[eval_metric_fn_key] = {
'include_metrics_per_category': (
eval_config.include_metrics_per_category)
}
return evaluator_options
| true
| true
|
f714557129b17004b2cb69261547461b88d0c20a
| 227
|
py
|
Python
|
rank_reset.py
|
AmanMulani/python_web_crawling
|
a36115db6548b98c2c66868a14ce752449f4f7d1
|
[
"MIT"
] | null | null | null |
rank_reset.py
|
AmanMulani/python_web_crawling
|
a36115db6548b98c2c66868a14ce752449f4f7d1
|
[
"MIT"
] | null | null | null |
rank_reset.py
|
AmanMulani/python_web_crawling
|
a36115db6548b98c2c66868a14ce752449f4f7d1
|
[
"MIT"
] | null | null | null |
import sqlite3
conn = sqlite3.connect('spider.sqlite')
cur = conn.cursor()
cur.execute('''
UPDATE Pages SET new_rank = 1.0, old_rank = 0.0
''')
conn.commit()
cur.close()
print('The rank of all pages has been set to 1.0')
| 18.916667
| 51
| 0.678414
|
import sqlite3
conn = sqlite3.connect('spider.sqlite')
cur = conn.cursor()
cur.execute('''
UPDATE Pages SET new_rank = 1.0, old_rank = 0.0
''')
conn.commit()
cur.close()
print('The rank of all pages has been set to 1.0')
| true
| true
|
f71455c4fdf2d638a601f379ab38dd4ba96daa46
| 1,474
|
py
|
Python
|
PythonClient/cv_mode.py
|
jelaredulla/thesis
|
dc348652cc0bd0a35e5d7506144d641510c2483b
|
[
"MIT"
] | null | null | null |
PythonClient/cv_mode.py
|
jelaredulla/thesis
|
dc348652cc0bd0a35e5d7506144d641510c2483b
|
[
"MIT"
] | null | null | null |
PythonClient/cv_mode.py
|
jelaredulla/thesis
|
dc348652cc0bd0a35e5d7506144d641510c2483b
|
[
"MIT"
] | null | null | null |
# In settings.json first activate computer vision mode:
# https://github.com/Microsoft/AirSim/blob/master/docs/image_apis.md#computer-vision-mode
from AirSimClient import *
import pprint
pp = pprint.PrettyPrinter(indent=4)
client = CarClient()
client.confirmConnection()
for x in range(3): # do few times
z = x * -20 - 5 # some random number
client.simSetPose(Pose(Vector3r(z, z, z), AirSimClientBase.toQuaternion(x / 3.0, 0, x / 3.0)), True)
responses = client.simGetImages([
ImageRequest(0, AirSimImageType.DepthVis),
ImageRequest(1, AirSimImageType.DepthPerspective, True),
ImageRequest(0, AirSimImageType.Segmentation),
ImageRequest(0, AirSimImageType.Scene),
ImageRequest(0, AirSimImageType.DisparityNormalized),
ImageRequest(0, AirSimImageType.SurfaceNormals)])
for i, response in enumerate(responses):
if response.pixels_as_float:
print("Type %d, size %d" % (response.image_type, len(response.image_data_float)))
AirSimClientBase.write_pfm(os.path.normpath('/temp/cv_mode_' + str(x) + "_" + str(i) + '.pfm'), AirSimClientBase.getPfmArray(response))
else:
print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8)))
AirSimClientBase.write_file(os.path.normpath('/temp/cv_mode_' + str(x) + "_" + str(i) + '.png'), response.image_data_uint8)
pose = client.simGetPose()
pp.pprint(pose)
time.sleep(3)
| 40.944444
| 147
| 0.687246
|
mport *
import pprint
pp = pprint.PrettyPrinter(indent=4)
client = CarClient()
client.confirmConnection()
for x in range(3):
z = x * -20 - 5
client.simSetPose(Pose(Vector3r(z, z, z), AirSimClientBase.toQuaternion(x / 3.0, 0, x / 3.0)), True)
responses = client.simGetImages([
ImageRequest(0, AirSimImageType.DepthVis),
ImageRequest(1, AirSimImageType.DepthPerspective, True),
ImageRequest(0, AirSimImageType.Segmentation),
ImageRequest(0, AirSimImageType.Scene),
ImageRequest(0, AirSimImageType.DisparityNormalized),
ImageRequest(0, AirSimImageType.SurfaceNormals)])
for i, response in enumerate(responses):
if response.pixels_as_float:
print("Type %d, size %d" % (response.image_type, len(response.image_data_float)))
AirSimClientBase.write_pfm(os.path.normpath('/temp/cv_mode_' + str(x) + "_" + str(i) + '.pfm'), AirSimClientBase.getPfmArray(response))
else:
print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8)))
AirSimClientBase.write_file(os.path.normpath('/temp/cv_mode_' + str(x) + "_" + str(i) + '.png'), response.image_data_uint8)
pose = client.simGetPose()
pp.pprint(pose)
time.sleep(3)
| true
| true
|
f71455e0d19a2d1ec2ea85826d0070d6ac81fa73
| 5,361
|
py
|
Python
|
corehq/messaging/tasks.py
|
dannyroberts/commcare-hq
|
4b0b8ecbe851e46307d3a0e635d6d5d6e31c3598
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/messaging/tasks.py
|
dannyroberts/commcare-hq
|
4b0b8ecbe851e46307d3a0e635d6d5d6e31c3598
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/messaging/tasks.py
|
dannyroberts/commcare-hq
|
4b0b8ecbe851e46307d3a0e635d6d5d6e31c3598
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from __future__ import unicode_literals
from corehq.apps.data_interfaces.models import AutomaticUpdateRule
from corehq.apps.sms import tasks as sms_tasks
from corehq.form_processor.exceptions import CaseNotFound
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.models import CommCareCaseSQL
from corehq.form_processor.utils import should_use_sql_backend
from corehq.messaging.scheduling.tasks import delete_schedule_instances_for_cases
from corehq.messaging.scheduling.util import utcnow
from corehq.messaging.util import MessagingRuleProgressHelper, use_phone_entries
from corehq.sql_db.util import run_query_across_partitioned_databases
from corehq.toggles import REMINDERS_MIGRATION_IN_PROGRESS
from corehq.util.celery_utils import no_result_task
from corehq.util.datadog.utils import case_load_counter
from dimagi.utils.couch import CriticalSection
from django.conf import settings
from django.db.models import Q
from django.db import transaction
def get_sync_key(case_id):
return 'sync-case-for-messaging-%s' % case_id
@no_result_task(serializer='pickle', queue=settings.CELERY_REMINDER_CASE_UPDATE_QUEUE, acks_late=True,
default_retry_delay=5 * 60, max_retries=12, bind=True)
def sync_case_for_messaging(self, domain, case_id):
if REMINDERS_MIGRATION_IN_PROGRESS.enabled(domain):
sync_case_for_messaging.apply_async([domain, case_id], countdown=60)
return
try:
with CriticalSection([get_sync_key(case_id)], timeout=5 * 60):
_sync_case_for_messaging(domain, case_id)
except Exception as e:
self.retry(exc=e)
@no_result_task(serializer='pickle', queue=settings.CELERY_REMINDER_CASE_UPDATE_QUEUE, acks_late=True,
default_retry_delay=5 * 60, max_retries=12, bind=True)
def sync_case_for_messaging_rule(self, domain, case_id, rule_id):
try:
with CriticalSection([get_sync_key(case_id)], timeout=5 * 60):
_sync_case_for_messaging_rule(domain, case_id, rule_id)
except Exception as e:
self.retry(exc=e)
def _sync_case_for_messaging(domain, case_id):
try:
case = CaseAccessors(domain).get_case(case_id)
sms_tasks.clear_case_caches(case)
except CaseNotFound:
case = None
case_load_counter("messaging_sync", domain)()
if case is None or case.is_deleted:
sms_tasks.delete_phone_numbers_for_owners([case_id])
delete_schedule_instances_for_cases(domain, [case_id])
return
if use_phone_entries():
sms_tasks._sync_case_phone_number(case)
rules = AutomaticUpdateRule.by_domain_cached(case.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rules_by_case_type = AutomaticUpdateRule.organize_rules_by_case_type(rules)
for rule in rules_by_case_type.get(case.type, []):
rule.run_rule(case, utcnow())
def _get_cached_rule(domain, rule_id):
rules = AutomaticUpdateRule.by_domain_cached(domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rules = [rule for rule in rules if rule.pk == rule_id]
if len(rules) != 1:
return None
return rules[0]
def _sync_case_for_messaging_rule(domain, case_id, rule_id):
case_load_counter("messaging_rule_sync", domain)()
case = CaseAccessors(domain).get_case(case_id)
rule = _get_cached_rule(domain, rule_id)
if rule:
rule.run_rule(case, utcnow())
MessagingRuleProgressHelper(rule_id).increment_current_case_count()
def initiate_messaging_rule_run(domain, rule_id):
MessagingRuleProgressHelper(rule_id).set_initial_progress()
AutomaticUpdateRule.objects.filter(pk=rule_id).update(locked_for_editing=True)
transaction.on_commit(lambda: run_messaging_rule.delay(domain, rule_id))
def get_case_ids_for_messaging_rule(domain, case_type):
if not should_use_sql_backend(domain):
return CaseAccessors(domain).get_case_ids_in_domain(case_type)
else:
return run_query_across_partitioned_databases(
CommCareCaseSQL,
Q(domain=domain, type=case_type, deleted=False),
values=['case_id']
)
@no_result_task(serializer='pickle', queue=settings.CELERY_REMINDER_CASE_UPDATE_QUEUE)
def set_rule_complete(rule_id):
AutomaticUpdateRule.objects.filter(pk=rule_id).update(locked_for_editing=False)
MessagingRuleProgressHelper(rule_id).set_rule_complete()
@no_result_task(serializer='pickle', queue=settings.CELERY_REMINDER_RULE_QUEUE, acks_late=True)
def run_messaging_rule(domain, rule_id):
rule = _get_cached_rule(domain, rule_id)
if not rule:
return
total_count = 0
progress_helper = MessagingRuleProgressHelper(rule_id)
for case_id in get_case_ids_for_messaging_rule(domain, rule.case_type):
sync_case_for_messaging_rule.delay(domain, case_id, rule_id)
total_count += 1
if total_count % 1000 == 0:
progress_helper.set_total_case_count(total_count)
progress_helper.set_total_case_count(total_count)
# By putting this task last in the queue, the rule should be marked
# complete at about the time that the last tasks are finishing up.
# This beats saving the task results in the database and using a
# celery chord which would be more taxing on system resources.
set_rule_complete.delay(rule_id)
| 39.419118
| 102
| 0.76814
|
from __future__ import absolute_import
from __future__ import unicode_literals
from corehq.apps.data_interfaces.models import AutomaticUpdateRule
from corehq.apps.sms import tasks as sms_tasks
from corehq.form_processor.exceptions import CaseNotFound
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.models import CommCareCaseSQL
from corehq.form_processor.utils import should_use_sql_backend
from corehq.messaging.scheduling.tasks import delete_schedule_instances_for_cases
from corehq.messaging.scheduling.util import utcnow
from corehq.messaging.util import MessagingRuleProgressHelper, use_phone_entries
from corehq.sql_db.util import run_query_across_partitioned_databases
from corehq.toggles import REMINDERS_MIGRATION_IN_PROGRESS
from corehq.util.celery_utils import no_result_task
from corehq.util.datadog.utils import case_load_counter
from dimagi.utils.couch import CriticalSection
from django.conf import settings
from django.db.models import Q
from django.db import transaction
def get_sync_key(case_id):
return 'sync-case-for-messaging-%s' % case_id
@no_result_task(serializer='pickle', queue=settings.CELERY_REMINDER_CASE_UPDATE_QUEUE, acks_late=True,
default_retry_delay=5 * 60, max_retries=12, bind=True)
def sync_case_for_messaging(self, domain, case_id):
if REMINDERS_MIGRATION_IN_PROGRESS.enabled(domain):
sync_case_for_messaging.apply_async([domain, case_id], countdown=60)
return
try:
with CriticalSection([get_sync_key(case_id)], timeout=5 * 60):
_sync_case_for_messaging(domain, case_id)
except Exception as e:
self.retry(exc=e)
@no_result_task(serializer='pickle', queue=settings.CELERY_REMINDER_CASE_UPDATE_QUEUE, acks_late=True,
default_retry_delay=5 * 60, max_retries=12, bind=True)
def sync_case_for_messaging_rule(self, domain, case_id, rule_id):
try:
with CriticalSection([get_sync_key(case_id)], timeout=5 * 60):
_sync_case_for_messaging_rule(domain, case_id, rule_id)
except Exception as e:
self.retry(exc=e)
def _sync_case_for_messaging(domain, case_id):
try:
case = CaseAccessors(domain).get_case(case_id)
sms_tasks.clear_case_caches(case)
except CaseNotFound:
case = None
case_load_counter("messaging_sync", domain)()
if case is None or case.is_deleted:
sms_tasks.delete_phone_numbers_for_owners([case_id])
delete_schedule_instances_for_cases(domain, [case_id])
return
if use_phone_entries():
sms_tasks._sync_case_phone_number(case)
rules = AutomaticUpdateRule.by_domain_cached(case.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rules_by_case_type = AutomaticUpdateRule.organize_rules_by_case_type(rules)
for rule in rules_by_case_type.get(case.type, []):
rule.run_rule(case, utcnow())
def _get_cached_rule(domain, rule_id):
rules = AutomaticUpdateRule.by_domain_cached(domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rules = [rule for rule in rules if rule.pk == rule_id]
if len(rules) != 1:
return None
return rules[0]
def _sync_case_for_messaging_rule(domain, case_id, rule_id):
case_load_counter("messaging_rule_sync", domain)()
case = CaseAccessors(domain).get_case(case_id)
rule = _get_cached_rule(domain, rule_id)
if rule:
rule.run_rule(case, utcnow())
MessagingRuleProgressHelper(rule_id).increment_current_case_count()
def initiate_messaging_rule_run(domain, rule_id):
MessagingRuleProgressHelper(rule_id).set_initial_progress()
AutomaticUpdateRule.objects.filter(pk=rule_id).update(locked_for_editing=True)
transaction.on_commit(lambda: run_messaging_rule.delay(domain, rule_id))
def get_case_ids_for_messaging_rule(domain, case_type):
if not should_use_sql_backend(domain):
return CaseAccessors(domain).get_case_ids_in_domain(case_type)
else:
return run_query_across_partitioned_databases(
CommCareCaseSQL,
Q(domain=domain, type=case_type, deleted=False),
values=['case_id']
)
@no_result_task(serializer='pickle', queue=settings.CELERY_REMINDER_CASE_UPDATE_QUEUE)
def set_rule_complete(rule_id):
AutomaticUpdateRule.objects.filter(pk=rule_id).update(locked_for_editing=False)
MessagingRuleProgressHelper(rule_id).set_rule_complete()
@no_result_task(serializer='pickle', queue=settings.CELERY_REMINDER_RULE_QUEUE, acks_late=True)
def run_messaging_rule(domain, rule_id):
rule = _get_cached_rule(domain, rule_id)
if not rule:
return
total_count = 0
progress_helper = MessagingRuleProgressHelper(rule_id)
for case_id in get_case_ids_for_messaging_rule(domain, rule.case_type):
sync_case_for_messaging_rule.delay(domain, case_id, rule_id)
total_count += 1
if total_count % 1000 == 0:
progress_helper.set_total_case_count(total_count)
progress_helper.set_total_case_count(total_count)
set_rule_complete.delay(rule_id)
| true
| true
|
f71456a563d2f1c851ebfeff59b72638c5277020
| 17,730
|
py
|
Python
|
imageio_ffmpeg/_io.py
|
One-sixth/imageio-ffmpeg
|
888dace44a2160395cd88c577d542fe820086aa0
|
[
"BSD-2-Clause"
] | null | null | null |
imageio_ffmpeg/_io.py
|
One-sixth/imageio-ffmpeg
|
888dace44a2160395cd88c577d542fe820086aa0
|
[
"BSD-2-Clause"
] | null | null | null |
imageio_ffmpeg/_io.py
|
One-sixth/imageio-ffmpeg
|
888dace44a2160395cd88c577d542fe820086aa0
|
[
"BSD-2-Clause"
] | null | null | null |
import sys
import time
import signal
import subprocess
from ._utils import get_ffmpeg_exe, logger
from ._parsing import LogCatcher, parse_ffmpeg_header, cvsecs
ISWIN = sys.platform.startswith("win")
exe = None
def _get_exe():
global exe
if exe is None:
exe = get_ffmpeg_exe()
return exe
def count_frames_and_secs(path):
"""
Get the number of frames and number of seconds for the given video
file. Note that this operation can be quite slow for large files.
Disclaimer: I've seen this produce different results from actually reading
the frames with older versions of ffmpeg (2.x). Therefore I cannot say
with 100% certainty that the returned values are always exact.
"""
# https://stackoverflow.com/questions/2017843/fetch-frame-count-with-ffmpeg
assert isinstance(path, str), "Video path must be a string"
cmd = [_get_exe(), "-i", path, "-map", "0:v:0", "-c", "copy", "-f", "null", "-"]
try:
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=ISWIN)
except subprocess.CalledProcessError as err:
out = err.output.decode(errors="ignore")
raise RuntimeError("FFMEG call failed with {}:\n{}".format(err.returncode, out))
# Note that other than with the subprocess calls below, ffmpeg wont hang here.
# Worst case Python will stop/crash and ffmpeg will continue running until done.
nframes = nsecs = None
for line in reversed(out.splitlines()):
if line.startswith(b"frame="):
line = line.decode(errors="ignore")
i = line.find("frame=")
if i >= 0:
s = line[i:].split("=", 1)[-1].lstrip().split(" ", 1)[0].strip()
nframes = int(s)
i = line.find("time=")
if i >= 0:
s = line[i:].split("=", 1)[-1].lstrip().split(" ", 1)[0].strip()
nsecs = cvsecs(*s.split(":"))
return nframes, nsecs
raise RuntimeError("Could not get number of frames") # pragma: no cover
def read_frames(path, pix_fmt="rgb24", bpp=3, input_params=None, output_params=None):
"""
Create a generator to iterate over the frames in a video file.
It first yields a small metadata dictionary that contains:
* ffmpeg_version: the ffmpeg version is use (as a string).
* codec: a hint about the codec used to encode the video, e.g. "h264"
* source_size: the width and height of the encoded video frames
* size: the width and height of the frames that will be produced
* fps: the frames per second. Can be zero if it could not be detected.
* duration: duration in seconds. Can be zero if it could not be detected.
After that, it yields frames until the end of the video is reached. Each
frame is a bytes object.
This function makes no assumptions about the number of frames in
the data. For one because this is hard to predict exactly, but also
because it may depend on the provided output_params. If you want
to know the number of frames in a video file, use count_frames_and_secs().
It is also possible to estimate the number of frames from the fps and
duration, but note that even if both numbers are present, the resulting
value is not always correct.
Example:
gen = read_frames(path)
meta = gen.__next__()
for frame in gen:
print(len(frame))
Parameters:
path (str): the file to write to.
pix_fmt (str): the pixel format of the frames to be read.
The default is "rgb24" (frames are uint8 RGB images).
bpp (int): The number of bytes per pixel in the output frames.
This depends on the given pix_fmt. Default is 3 (RGB).
input_params (list): Additional ffmpeg input command line parameters.
output_params (list): Additional ffmpeg output command line parameters.
"""
# ----- Input args
assert isinstance(path, str), "Video path must be a string"
# Note: Dont check whether it exists. The source could be e.g. a camera.
pix_fmt = pix_fmt or "rgb24"
bpp = bpp or 3
input_params = input_params or []
output_params = output_params or []
assert isinstance(pix_fmt, str), "pix_fmt must be a string"
assert isinstance(bpp, int), "bpp must be an int"
assert isinstance(input_params, list), "input_params must be a list"
assert isinstance(output_params, list), "output_params must be a list"
# ----- Prepare
pre_output_params = ["-pix_fmt", pix_fmt, "-vcodec", "rawvideo", "-f", "image2pipe"]
cmd = [_get_exe()]
cmd += input_params + ["-i", path]
cmd += pre_output_params + output_params + ["-"]
cmd = ' '.join(cmd)
p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=ISWIN,
)
log_catcher = LogCatcher(p.stderr)
try:
# ----- Load meta data
# Wait for the log catcher to get the meta information
etime = time.time() + 10.0
while (not log_catcher.header) and time.time() < etime:
time.sleep(0.01)
# Check whether we have the information
if not log_catcher.header:
err2 = log_catcher.get_text(0.2)
fmt = "Could not load meta information\n=== stderr ===\n{}"
raise IOError(fmt.format(err2))
elif "No such file or directory" in log_catcher.header:
raise IOError("{} not found! Wrong path?".format(path))
meta = parse_ffmpeg_header(log_catcher.header)
yield meta
# ----- Read frames
w, h = meta["size"]
framesize = w * h * bpp
framenr = 0
while True:
framenr += 1
try:
bb = bytes()
while len(bb) < framesize:
extra_bytes = p.stdout.read(framesize - len(bb))
if not extra_bytes:
if len(bb) == 0:
return
else:
raise RuntimeError(
"End of file reached before full frame could be read."
)
bb += extra_bytes
yield bb
except Exception as err:
err1 = str(err)
err2 = log_catcher.get_text(0.4)
fmt = "Could not read frame {}:\n{}\n=== stderr ===\n{}"
raise RuntimeError(fmt.format(framenr, err1, err2))
finally:
# Generators are automatically closed when they get deleted,
# so this code is almost guaranteed to run.
if p.poll() is None:
# Ask ffmpeg to quit
try:
if True:
p.communicate(b"q")
else: # pragma: no cover
# I read somewhere that modern ffmpeg on Linux prefers a
# "ctrl-c", but tests so far suggests sending q is better.
p.send_signal(signal.SIGINT)
except Exception as err: # pragma: no cover
logger.warning("Error while attempting stop ffmpeg: " + str(err))
# Wait for it to stop
etime = time.time() + 1.5
while time.time() < etime and p.poll() is None:
time.sleep(0.01)
# Grr, we have to kill it
if p.poll() is None: # pragma: no cover
logger.warning("We had to kill ffmpeg to stop it.")
p.kill()
def write_frames(
path,
size,
pix_fmt_in="rgb24",
pix_fmt_out="yuv420p",
fps=16,
quality=5,
bitrate=None,
codec=None,
macro_block_size=16,
ffmpeg_log_level="warning",
ffmpeg_timeout=20.0,
input_params=None,
output_params=None,
):
"""
Create a generator to write frames (bytes objects) into a video file.
The frames are written by using the generator's `send()` method. Frames
can be anything that can be written to a file. Typically these are
bytes objects, but c-contiguous Numpy arrays also work.
Example:
gen = write_frames(path, size)
gen.send(None) # seed the generator
for frame in frames:
gen.send(frame)
gen.close() # don't forget this
Parameters:
path (str): the file to write to.
size (tuple): the width and height of the frames.
pix_fmt_in (str): the pixel format of incoming frames.
E.g. "gray", "gray8a", "rgb24", or "rgba". Default "rgb24".
pix_fmt_out (str): the pixel format to store frames. Default yuv420p".
fps (float): The frames per second. Default 16.
quality (float): A measure for quality between 0 and 10. Default 5.
Ignored if bitrate is given.
bitrate (str): The bitrate, e.g. "192k". The defaults are pretty good.
codec (str): The codec. Default "libx264" (or "msmpeg4" for .wmv).
macro_block_size (int): You probably want to align the size of frames
to this value to avoid image resizing. Default 16. Can be set
to 1 to avoid block alignment, though this is not recommended.
ffmpeg_log_level (str): The ffmpeg logging level. Default "warning".
ffmpeg_timeout (float): Timeout in seconds to wait for ffmpeg process
to finish. Value of 0 will wait forever. The time that ffmpeg needs
depends on CPU speed, compression, and frame size. Default 20.0.
input_params (list): Additional ffmpeg input command line parameters.
output_params (list): Additional ffmpeg output command line parameters.
"""
# ----- Input args
assert isinstance(path, str), "Video path must be a string"
# The pix_fmt_out yuv420p is the best for the outpur to work in
# QuickTime and most other players. These players only support
# the YUV planar color space with 4:2:0 chroma subsampling for
# H.264 video. Otherwise, depending on the source, ffmpeg may
# output to a pixel format that may be incompatible with these
# players. See https://trac.ffmpeg.org/wiki/Encode/H.264#Encodingfordumbplayers
pix_fmt_in = pix_fmt_in or "rgb24"
pix_fmt_out = pix_fmt_out or "yuv420p"
fps = fps or 16
quality = quality or 5
# bitrate, codec, macro_block_size can all be None or ...
macro_block_size = macro_block_size or 16
ffmpeg_log_level = ffmpeg_log_level or "warning"
input_params = input_params or []
output_params = output_params or []
floatish = float, int
if isinstance(size, (tuple, list)):
assert len(size) == 2, "size must be a 2-tuple"
assert isinstance(size[0], int) and isinstance(
size[1], int
), "size must be ints"
sizestr = "{:d}x{:d}".format(*size)
# elif isinstance(size, str):
# assert "x" in size, "size as string must have format NxM"
# sizestr = size
else:
assert False, "size must be str or tuple"
assert isinstance(pix_fmt_in, str), "pix_fmt_in must be str"
assert isinstance(pix_fmt_out, str), "pix_fmt_out must be str"
assert isinstance(fps, floatish), "fps must be float"
assert isinstance(quality, floatish), "quality must be float"
assert 1 <= quality <= 10, "quality must be between 1 and 10 inclusive"
assert isinstance(macro_block_size, int), "macro_block_size must be int"
assert isinstance(ffmpeg_log_level, str), "ffmpeg_log_level must be str"
assert isinstance(ffmpeg_timeout, floatish), "ffmpeg_timeout must be float"
assert isinstance(input_params, list), "input_params must be a list"
assert isinstance(output_params, list), "output_params must be a list"
# ----- Prepare
# Get parameters
default_codec = "libx264"
if path.lower().endswith(".wmv"):
# This is a safer default codec on windows to get videos that
# will play in powerpoint and other apps. H264 is not always
# available on windows.
default_codec = "msmpeg4"
codec = codec or default_codec
# Get command
cmd = [_get_exe(), "-y", "-f", "rawvideo", "-vcodec", "rawvideo", "-s", sizestr]
cmd += ["-pix_fmt", pix_fmt_in, "-r", "{:.02f}".format(fps)] + input_params
cmd += ["-i", "-"]
cmd += ["-an", "-vcodec", codec, "-pix_fmt", pix_fmt_out]
# Add fixed bitrate or variable bitrate compression flags
if bitrate is not None:
cmd += ["-b:v", str(bitrate)]
elif quality is not None: # If None, then we don't add anything
quality = 1 - quality / 10.0
if codec == "libx264":
# crf ranges 0 to 51, 51 being worst.
quality = int(quality * 51)
cmd += ["-crf", str(quality)] # for h264
else: # Many codecs accept q:v
# q:v range can vary, 1-31, 31 being worst
# But q:v does not always have the same range.
# May need a way to find range for any codec.
quality = int(quality * 30) + 1
cmd += ["-qscale:v", str(quality)] # for others
# Note, for most codecs, the image dimensions must be divisible by
# 16 the default for the macro_block_size is 16. Check if image is
# divisible, if not have ffmpeg upsize to nearest size and warn
# user they should correct input image if this is not desired.
if macro_block_size > 1:
if size[0] % macro_block_size > 0 or size[1] % macro_block_size > 0:
out_w = size[0]
out_h = size[1]
if size[0] % macro_block_size > 0:
out_w += macro_block_size - (size[0] % macro_block_size)
if size[1] % macro_block_size > 0:
out_h += macro_block_size - (size[1] % macro_block_size)
cmd += ["-vf", "scale={}:{}".format(out_w, out_h)]
logger.warning(
"IMAGEIO FFMPEG_WRITER WARNING: input image is not"
" divisible by macro_block_size={}, resizing from {} "
"to {} to ensure video compatibility with most codecs "
"and players. To prevent resizing, make your input "
"image divisible by the macro_block_size or set the "
"macro_block_size to 1 (risking incompatibility).".format(
macro_block_size, size[:2], (out_w, out_h)
)
)
# Rather than redirect stderr to a pipe, just set minimal
# output from ffmpeg by default. That way if there are warnings
# the user will see them.
cmd += ["-v", ffmpeg_log_level]
cmd += output_params
cmd.append(path)
cmd_str = " ".join(cmd)
if any(
[level in ffmpeg_log_level for level in ("info", "verbose", "debug", "trace")]
):
logger.info("RUNNING FFMPEG COMMAND: " + cmd_str)
# Launch process
p = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, shell=ISWIN
)
# For Windows, set `shell=True` in sp.Popen to prevent popup
# of a command line window in frozen applications.
# Note that directing stderr to a pipe on windows will cause ffmpeg
# to hang if the buffer is not periodically cleared using
# StreamCatcher or other means.
# Setting bufsize to 0 or a small value does not seem to have much effect
# (at least on Windows). I suspect that ffmpeg buffers # multiple frames
# (before encoding in a batch).
# ----- Write frames
try:
# Just keep going until the generator.close() is called (raises GeneratorExit).
# This could also happen when the generator is deleted somehow.
nframes = 0
while True:
# Get frame
bb = (yield)
# framesize = size[0] * size[1] * depth * bpp
# assert isinstance(bb, bytes), "Frame must be send as bytes"
# assert len(bb) == framesize, "Frame must have width*height*depth*bpp bytes"
# Actually, we accept anything that can be written to file.
# This e.g. allows writing numpy arrays without having to make a copy ...
# Write
try:
p.stdin.write(bb)
except Exception as err:
# Show the command and stderr from pipe
msg = (
"{0:}\n\nFFMPEG COMMAND:\n{1:}\n\nFFMPEG STDERR "
"OUTPUT:\n".format(err, cmd_str)
)
raise IOError(msg)
nframes += 1
except GeneratorExit:
if nframes == 0:
logger.warning("No frames have been written; the written video is invalid.")
finally:
if p.poll() is None:
# Ask ffmpeg to quit - and wait for it to finish writing the file.
# Depending on the frame size and encoding this can take a few
# seconds (sometimes 10-20). Since a user may get bored and hit
# Ctrl-C, we wrap this in a try-except.
waited = False
try:
try:
p.stdin.close()
except Exception: # pragma: no cover
pass
etime = time.time() + ffmpeg_timeout
while (not ffmpeg_timeout or time.time() < etime) and p.poll() is None:
time.sleep(0.01)
waited = True
finally:
# Grr, we have to kill it
if p.poll() is None: # pragma: no cover
more = " Consider increasing ffmpeg_timeout." if waited else ""
logger.warning("We had to kill ffmpeg to stop it." + more)
p.kill()
| 39.4
| 89
| 0.598759
|
import sys
import time
import signal
import subprocess
from ._utils import get_ffmpeg_exe, logger
from ._parsing import LogCatcher, parse_ffmpeg_header, cvsecs
ISWIN = sys.platform.startswith("win")
exe = None
def _get_exe():
global exe
if exe is None:
exe = get_ffmpeg_exe()
return exe
def count_frames_and_secs(path):
assert isinstance(path, str), "Video path must be a string"
cmd = [_get_exe(), "-i", path, "-map", "0:v:0", "-c", "copy", "-f", "null", "-"]
try:
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=ISWIN)
except subprocess.CalledProcessError as err:
out = err.output.decode(errors="ignore")
raise RuntimeError("FFMEG call failed with {}:\n{}".format(err.returncode, out))
nframes = nsecs = None
for line in reversed(out.splitlines()):
if line.startswith(b"frame="):
line = line.decode(errors="ignore")
i = line.find("frame=")
if i >= 0:
s = line[i:].split("=", 1)[-1].lstrip().split(" ", 1)[0].strip()
nframes = int(s)
i = line.find("time=")
if i >= 0:
s = line[i:].split("=", 1)[-1].lstrip().split(" ", 1)[0].strip()
nsecs = cvsecs(*s.split(":"))
return nframes, nsecs
raise RuntimeError("Could not get number of frames")
def read_frames(path, pix_fmt="rgb24", bpp=3, input_params=None, output_params=None):
assert isinstance(path, str), "Video path must be a string"
pix_fmt = pix_fmt or "rgb24"
bpp = bpp or 3
input_params = input_params or []
output_params = output_params or []
assert isinstance(pix_fmt, str), "pix_fmt must be a string"
assert isinstance(bpp, int), "bpp must be an int"
assert isinstance(input_params, list), "input_params must be a list"
assert isinstance(output_params, list), "output_params must be a list"
pre_output_params = ["-pix_fmt", pix_fmt, "-vcodec", "rawvideo", "-f", "image2pipe"]
cmd = [_get_exe()]
cmd += input_params + ["-i", path]
cmd += pre_output_params + output_params + ["-"]
cmd = ' '.join(cmd)
p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=ISWIN,
)
log_catcher = LogCatcher(p.stderr)
try:
etime = time.time() + 10.0
while (not log_catcher.header) and time.time() < etime:
time.sleep(0.01)
if not log_catcher.header:
err2 = log_catcher.get_text(0.2)
fmt = "Could not load meta information\n=== stderr ===\n{}"
raise IOError(fmt.format(err2))
elif "No such file or directory" in log_catcher.header:
raise IOError("{} not found! Wrong path?".format(path))
meta = parse_ffmpeg_header(log_catcher.header)
yield meta
w, h = meta["size"]
framesize = w * h * bpp
framenr = 0
while True:
framenr += 1
try:
bb = bytes()
while len(bb) < framesize:
extra_bytes = p.stdout.read(framesize - len(bb))
if not extra_bytes:
if len(bb) == 0:
return
else:
raise RuntimeError(
"End of file reached before full frame could be read."
)
bb += extra_bytes
yield bb
except Exception as err:
err1 = str(err)
err2 = log_catcher.get_text(0.4)
fmt = "Could not read frame {}:\n{}\n=== stderr ===\n{}"
raise RuntimeError(fmt.format(framenr, err1, err2))
finally:
if p.poll() is None:
try:
if True:
p.communicate(b"q")
else:
p.send_signal(signal.SIGINT)
except Exception as err:
logger.warning("Error while attempting stop ffmpeg: " + str(err))
etime = time.time() + 1.5
while time.time() < etime and p.poll() is None:
time.sleep(0.01)
if p.poll() is None:
logger.warning("We had to kill ffmpeg to stop it.")
p.kill()
def write_frames(
path,
size,
pix_fmt_in="rgb24",
pix_fmt_out="yuv420p",
fps=16,
quality=5,
bitrate=None,
codec=None,
macro_block_size=16,
ffmpeg_log_level="warning",
ffmpeg_timeout=20.0,
input_params=None,
output_params=None,
):
assert isinstance(path, str), "Video path must be a string"
fmt_in or "rgb24"
pix_fmt_out = pix_fmt_out or "yuv420p"
fps = fps or 16
quality = quality or 5
macro_block_size = macro_block_size or 16
ffmpeg_log_level = ffmpeg_log_level or "warning"
input_params = input_params or []
output_params = output_params or []
floatish = float, int
if isinstance(size, (tuple, list)):
assert len(size) == 2, "size must be a 2-tuple"
assert isinstance(size[0], int) and isinstance(
size[1], int
), "size must be ints"
sizestr = "{:d}x{:d}".format(*size)
else:
assert False, "size must be str or tuple"
assert isinstance(pix_fmt_in, str), "pix_fmt_in must be str"
assert isinstance(pix_fmt_out, str), "pix_fmt_out must be str"
assert isinstance(fps, floatish), "fps must be float"
assert isinstance(quality, floatish), "quality must be float"
assert 1 <= quality <= 10, "quality must be between 1 and 10 inclusive"
assert isinstance(macro_block_size, int), "macro_block_size must be int"
assert isinstance(ffmpeg_log_level, str), "ffmpeg_log_level must be str"
assert isinstance(ffmpeg_timeout, floatish), "ffmpeg_timeout must be float"
assert isinstance(input_params, list), "input_params must be a list"
assert isinstance(output_params, list), "output_params must be a list"
default_codec = "libx264"
if path.lower().endswith(".wmv"):
default_codec = "msmpeg4"
codec = codec or default_codec
cmd = [_get_exe(), "-y", "-f", "rawvideo", "-vcodec", "rawvideo", "-s", sizestr]
cmd += ["-pix_fmt", pix_fmt_in, "-r", "{:.02f}".format(fps)] + input_params
cmd += ["-i", "-"]
cmd += ["-an", "-vcodec", codec, "-pix_fmt", pix_fmt_out]
if bitrate is not None:
cmd += ["-b:v", str(bitrate)]
elif quality is not None:
quality = 1 - quality / 10.0
if codec == "libx264":
# crf ranges 0 to 51, 51 being worst.
quality = int(quality * 51)
cmd += ["-crf", str(quality)] # for h264
else: # Many codecs accept q:v
# q:v range can vary, 1-31, 31 being worst
# But q:v does not always have the same range.
# May need a way to find range for any codec.
quality = int(quality * 30) + 1
cmd += ["-qscale:v", str(quality)] # for others
# Note, for most codecs, the image dimensions must be divisible by
# 16 the default for the macro_block_size is 16. Check if image is
# divisible, if not have ffmpeg upsize to nearest size and warn
# user they should correct input image if this is not desired.
if macro_block_size > 1:
if size[0] % macro_block_size > 0 or size[1] % macro_block_size > 0:
out_w = size[0]
out_h = size[1]
if size[0] % macro_block_size > 0:
out_w += macro_block_size - (size[0] % macro_block_size)
if size[1] % macro_block_size > 0:
out_h += macro_block_size - (size[1] % macro_block_size)
cmd += ["-vf", "scale={}:{}".format(out_w, out_h)]
logger.warning(
"IMAGEIO FFMPEG_WRITER WARNING: input image is not"
" divisible by macro_block_size={}, resizing from {} "
"to {} to ensure video compatibility with most codecs "
"and players. To prevent resizing, make your input "
"image divisible by the macro_block_size or set the "
"macro_block_size to 1 (risking incompatibility).".format(
macro_block_size, size[:2], (out_w, out_h)
)
)
# Rather than redirect stderr to a pipe, just set minimal
# output from ffmpeg by default. That way if there are warnings
# the user will see them.
cmd += ["-v", ffmpeg_log_level]
cmd += output_params
cmd.append(path)
cmd_str = " ".join(cmd)
if any(
[level in ffmpeg_log_level for level in ("info", "verbose", "debug", "trace")]
):
logger.info("RUNNING FFMPEG COMMAND: " + cmd_str)
# Launch process
p = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, shell=ISWIN
)
# For Windows, set `shell=True` in sp.Popen to prevent popup
# of a command line window in frozen applications.
# Note that directing stderr to a pipe on windows will cause ffmpeg
# to hang if the buffer is not periodically cleared using
# StreamCatcher or other means.
# Setting bufsize to 0 or a small value does not seem to have much effect
# (at least on Windows). I suspect that ffmpeg buffers # multiple frames
# (before encoding in a batch).
# ----- Write frames
try:
# Just keep going until the generator.close() is called (raises GeneratorExit).
# This could also happen when the generator is deleted somehow.
nframes = 0
while True:
# Get frame
bb = (yield)
# framesize = size[0] * size[1] * depth * bpp
# assert isinstance(bb, bytes), "Frame must be send as bytes"
# assert len(bb) == framesize, "Frame must have width*height*depth*bpp bytes"
# Actually, we accept anything that can be written to file.
# This e.g. allows writing numpy arrays without having to make a copy ...
# Write
try:
p.stdin.write(bb)
except Exception as err:
# Show the command and stderr from pipe
msg = (
"{0:}\n\nFFMPEG COMMAND:\n{1:}\n\nFFMPEG STDERR "
"OUTPUT:\n".format(err, cmd_str)
)
raise IOError(msg)
nframes += 1
except GeneratorExit:
if nframes == 0:
logger.warning("No frames have been written; the written video is invalid.")
finally:
if p.poll() is None:
# Ask ffmpeg to quit - and wait for it to finish writing the file.
# Depending on the frame size and encoding this can take a few
# seconds (sometimes 10-20). Since a user may get bored and hit
# Ctrl-C, we wrap this in a try-except.
waited = False
try:
try:
p.stdin.close()
except Exception: # pragma: no cover
pass
etime = time.time() + ffmpeg_timeout
while (not ffmpeg_timeout or time.time() < etime) and p.poll() is None:
time.sleep(0.01)
waited = True
finally:
# Grr, we have to kill it
if p.poll() is None: # pragma: no cover
more = " Consider increasing ffmpeg_timeout." if waited else ""
logger.warning("We had to kill ffmpeg to stop it." + more)
p.kill()
| true
| true
|
f71457963f04786f6d22ea632036dbc649cd5930
| 6,561
|
py
|
Python
|
bbvSimMatrixGen.py
|
spencerhance/bbv-similarity-matrix
|
0818c6b7e15c408ef261ac6b55a4cb2e6a2f3bfc
|
[
"MIT"
] | 1
|
2021-09-03T11:31:10.000Z
|
2021-09-03T11:31:10.000Z
|
bbvSimMatrixGen.py
|
spencerhance/bbv-similarity-matrix
|
0818c6b7e15c408ef261ac6b55a4cb2e6a2f3bfc
|
[
"MIT"
] | null | null | null |
bbvSimMatrixGen.py
|
spencerhance/bbv-similarity-matrix
|
0818c6b7e15c408ef261ac6b55a4cb2e6a2f3bfc
|
[
"MIT"
] | 1
|
2017-09-16T17:19:28.000Z
|
2017-09-16T17:19:28.000Z
|
#!/usr/bin/env python
#Created by Spencer Hance and Trevor Gale on January 18th 2015
#Northeastern University Computer Architecture Research Group
#Licensed under MIT License
import sys
import matplotlib.pyplot as plt
import numpy as np
from pylab import cm
import re
import random
from scipy.misc import comb
import argparse
import warnings
def parseBBV(input_filename):
"""Parses a Basic Block Vector and converts data
into a Numpy array
"""
with open(input_filename, 'r') as f:
input_list = []
# Opens file into a list
for line in f.readlines():
# Ignores BBV comments, which are any line that starts with a "#"
if not line.strip().startswith('#'):
input_list.append(line.split())
# Removes empty list elements
input_list = filter(None, input_list)
num_intervals = len(input_list)
# Determines the total number of basic blocks
max_list = []
for line in input_list:
for j in range(0, len(line)):
m = re.search(":(\d+):(\d+)", line[j])
max_list.append(int(m.groups()[0]))
num_bb = max(max_list)
# Initializes array and adds basic block data
bbv_array = np.zeros((num_intervals, num_bb))
for i in range(0, num_intervals):
for j in range(0, len(input_list[i])):
m = re.search(":(\d+):(\d+)", input_list[i][j])
bbv_array[i, int(m.groups()[0])-1] = int(m.groups()[1])
# Update user on current progress
print 'Parsing Completed\n'
return bbv_array
def reduceArray(bbv_array):
"""Takes in numpy array of bbv vectors and reduces dimensions to 15.
Returns the reduced array
"""
# Initializes an array with the same number of rows
# as the BBV numpy array and 15 columns
random_array = np.zeros((bbv_array.shape[1], 15))
# Fills the array with a random float between -1 and 1
for i in range(0, random_array.shape[0]):
for j in range(0, random_array.shape[1]):
random_array[i, j] = random.uniform(-1,1)
# Takes the dot product of the two arrays to reduce
# the total dimensions to 15
reduced_array = np.dot(bbv_array, random_array)
return reduced_array
def mDistCompute(a, b):
"""Takes in two 1D arrays and computes sum of
manhattan distances. This function is an inner function of mDist()
"""
# Initialize the sum value
sum_dist = 0
# Both arrays must be of of the same length
length = len(a)
# Compute sum of differences
for i in range(0, length):
sum_dist += abs(a[i]- b[i])
return sum_dist
def mDist(bbv_array):
"""Takes in bbv array and calls mDistCompute to compute
manhattan distance between the vectors. Returns an
array with differences.
"""
# Determines the size of the array
mDist_length = bbv_array.shape[0]
# Initializes a new array to store distance values
mDist_array = np.zeros((mDist_length, mDist_length))
# Determines total number of steps for progress bar
total_steps = float(comb(mDist_length, 2, exact=True))
# Initializes step counter for progress bar
step = 0
# Compute distances by using mDistCompute() for each comparison
print 'Computing Manhattan Distances'
for i in range(0, mDist_length):
for j in range(1+i, mDist_length):
sum_dist = mDistCompute(bbv_array[i], bbv_array[j])
mDist_array[i, j] = sum_dist
# Calculations for progress counter
step += len(range(1+i, mDist_length))
sys.stdout.write('\r')
sys.stdout.write('Completion: ' + \
str(int(round((step/total_steps)*100))) + '%')
sys.stdout.flush()
print '\n'
return mDist_array
def normMatrix(mDist_values):
"""Takes in array of manhattan distance values and
returns the array normalized to the maximum value
"""
#Renames input to norm_array
norm_array = mDist_values
#Determines the largest distance to normalize to
max_val = max(max(l) for l in norm_array)
# Update user on current progress
print 'Normalizing Matrix\n'
#Replaces every value with the new normalized value
for i in range(0, norm_array.shape[0]):
for j in range(0, norm_array.shape[1]):
norm_array[i, j] /= max_val
return norm_array
def plotNormData(norm_values, show=True):
"""Takes in normalized values and plots
the data
"""
# Initialize lists for plt.scatter
x, y, colors = [], [], []
# Determines the height of the array for the graph's Y-Value
yval = norm_values.shape[0]
# The size of each point
# Dividing by 4.5 usually provides enough granularity, however this should
# be adjusted if a different resolution requirement is needed
SIZE = yval/4.5
# Update user on current progress
print 'Plotting Norm Data\n'
#Adds data to x, y, and colors lists
for i in range(0, yval):
for j in range(i, yval):
x.append(j)
y.append(i)
colors.append(norm_values[i,j])
#Plots data with gray colormap and aligns both axes to 0
plt.scatter(x, y, c = colors, cmap=cm.gray, s = SIZE)
plt.xlim(0)
plt.ylim(0)
#Inverts y axis to show similarity accurately
plt.gca().invert_yaxis()
if show == True:
plt.show()
def commandParser():
"""Uses argparse module to parse command line options
"""
parser = argparse.ArgumentParser(description='Similarity Matrix Generator \
for Basic Block Vectors')
parser.add_argument('-i',dest='filename', required=True, help='input BBV file',
metavar='file')
parser.add_argument('-s','--simmatrix', help='Create and display a similarity matrix' ,
action='store_true')
parser.add_argument('-dr','--do-not-reduce',
help='Do not reduce input matrix for similarity matrix', action='store_true')
args = parser.parse_args()
if not args.filename:
print 'Error: Not enough input arguments'
if args.do_not_reduce:
print 'Starting Similarity Matrix Process (with unreduced array)\n'
plotNormData(normMatrix(mDist(parseBBV(args.filename))))
else:
print 'Starting Similarity Matrix Process\n'
plotNormData(normMatrix(mDist(reduceArray(parseBBV(args.filename)))))
def main():
"""Main Function"""
commandParser()
if __name__ == '__main__':
main()
| 31.242857
| 91
| 0.641518
|
import sys
import matplotlib.pyplot as plt
import numpy as np
from pylab import cm
import re
import random
from scipy.misc import comb
import argparse
import warnings
def parseBBV(input_filename):
"""Parses a Basic Block Vector and converts data
into a Numpy array
"""
with open(input_filename, 'r') as f:
input_list = []
for line in f.readlines():
if not line.strip().startswith('#'):
input_list.append(line.split())
input_list = filter(None, input_list)
num_intervals = len(input_list)
max_list = []
for line in input_list:
for j in range(0, len(line)):
m = re.search(":(\d+):(\d+)", line[j])
max_list.append(int(m.groups()[0]))
num_bb = max(max_list)
bbv_array = np.zeros((num_intervals, num_bb))
for i in range(0, num_intervals):
for j in range(0, len(input_list[i])):
m = re.search(":(\d+):(\d+)", input_list[i][j])
bbv_array[i, int(m.groups()[0])-1] = int(m.groups()[1])
print 'Parsing Completed\n'
return bbv_array
def reduceArray(bbv_array):
"""Takes in numpy array of bbv vectors and reduces dimensions to 15.
Returns the reduced array
"""
random_array = np.zeros((bbv_array.shape[1], 15))
for i in range(0, random_array.shape[0]):
for j in range(0, random_array.shape[1]):
random_array[i, j] = random.uniform(-1,1)
reduced_array = np.dot(bbv_array, random_array)
return reduced_array
def mDistCompute(a, b):
"""Takes in two 1D arrays and computes sum of
manhattan distances. This function is an inner function of mDist()
"""
sum_dist = 0
length = len(a)
for i in range(0, length):
sum_dist += abs(a[i]- b[i])
return sum_dist
def mDist(bbv_array):
"""Takes in bbv array and calls mDistCompute to compute
manhattan distance between the vectors. Returns an
array with differences.
"""
mDist_length = bbv_array.shape[0]
mDist_array = np.zeros((mDist_length, mDist_length))
total_steps = float(comb(mDist_length, 2, exact=True))
step = 0
print 'Computing Manhattan Distances'
for i in range(0, mDist_length):
for j in range(1+i, mDist_length):
sum_dist = mDistCompute(bbv_array[i], bbv_array[j])
mDist_array[i, j] = sum_dist
step += len(range(1+i, mDist_length))
sys.stdout.write('\r')
sys.stdout.write('Completion: ' + \
str(int(round((step/total_steps)*100))) + '%')
sys.stdout.flush()
print '\n'
return mDist_array
def normMatrix(mDist_values):
"""Takes in array of manhattan distance values and
returns the array normalized to the maximum value
"""
norm_array = mDist_values
max_val = max(max(l) for l in norm_array)
print 'Normalizing Matrix\n'
for i in range(0, norm_array.shape[0]):
for j in range(0, norm_array.shape[1]):
norm_array[i, j] /= max_val
return norm_array
def plotNormData(norm_values, show=True):
"""Takes in normalized values and plots
the data
"""
x, y, colors = [], [], []
yval = norm_values.shape[0]
# The size of each point
# Dividing by 4.5 usually provides enough granularity, however this should
# be adjusted if a different resolution requirement is needed
SIZE = yval/4.5
# Update user on current progress
print 'Plotting Norm Data\n'
#Adds data to x, y, and colors lists
for i in range(0, yval):
for j in range(i, yval):
x.append(j)
y.append(i)
colors.append(norm_values[i,j])
#Plots data with gray colormap and aligns both axes to 0
plt.scatter(x, y, c = colors, cmap=cm.gray, s = SIZE)
plt.xlim(0)
plt.ylim(0)
#Inverts y axis to show similarity accurately
plt.gca().invert_yaxis()
if show == True:
plt.show()
def commandParser():
"""Uses argparse module to parse command line options
"""
parser = argparse.ArgumentParser(description='Similarity Matrix Generator \
for Basic Block Vectors')
parser.add_argument('-i',dest='filename', required=True, help='input BBV file',
metavar='file')
parser.add_argument('-s','--simmatrix', help='Create and display a similarity matrix' ,
action='store_true')
parser.add_argument('-dr','--do-not-reduce',
help='Do not reduce input matrix for similarity matrix', action='store_true')
args = parser.parse_args()
if not args.filename:
print 'Error: Not enough input arguments'
if args.do_not_reduce:
print 'Starting Similarity Matrix Process (with unreduced array)\n'
plotNormData(normMatrix(mDist(parseBBV(args.filename))))
else:
print 'Starting Similarity Matrix Process\n'
plotNormData(normMatrix(mDist(reduceArray(parseBBV(args.filename)))))
def main():
"""Main Function"""
commandParser()
if __name__ == '__main__':
main()
| false
| true
|
f714592c49e276e7f9b6598977e5a6108553973c
| 1,014
|
py
|
Python
|
django_admin_demo/urls.py
|
noahzaozao/django_admin_demo
|
631010bb8cd14c8ccf48b46f154d78c2e7b5887a
|
[
"Apache-2.0"
] | null | null | null |
django_admin_demo/urls.py
|
noahzaozao/django_admin_demo
|
631010bb8cd14c8ccf48b46f154d78c2e7b5887a
|
[
"Apache-2.0"
] | null | null | null |
django_admin_demo/urls.py
|
noahzaozao/django_admin_demo
|
631010bb8cd14c8ccf48b46f154d78c2e7b5887a
|
[
"Apache-2.0"
] | null | null | null |
"""django_admin_demo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from web.views import APIUserSearchView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'api/user/search', APIUserSearchView.as_view()),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 36.214286
| 79
| 0.731755
|
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from web.views import APIUserSearchView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'api/user/search', APIUserSearchView.as_view()),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| true
| true
|
f7145968a1bf58bef88ec9f77fbf6c48708480e2
| 1,067
|
py
|
Python
|
wtools/plotting.py
|
DrAuxin/WestpaTools
|
4e236e0a3d65504d1937260316a4a5c6f39aa610
|
[
"BSD-3-Clause"
] | 1
|
2020-05-18T15:58:17.000Z
|
2020-05-18T15:58:17.000Z
|
wtools/plotting.py
|
DrAuxin/WestpaTools
|
4e236e0a3d65504d1937260316a4a5c6f39aa610
|
[
"BSD-3-Clause"
] | null | null | null |
wtools/plotting.py
|
DrAuxin/WestpaTools
|
4e236e0a3d65504d1937260316a4a5c6f39aa610
|
[
"BSD-3-Clause"
] | null | null | null |
import h5py
import numpy
import matplotlib.pyplot as plt
def plotflux(h5file, state=1):
"""
A function that plots the dataset target_flux_evolution from a direct.h5 file.
Parameters
----------
h5file: dictionary
The user's HDF5 file loaded with loadh5.
state: integer
The target state; the state for which you want to know the entering flux for.
Returns
-------
Nothing
The plot of the flux evolution will be shown in a separate window.
Examples
--------
>>> h5file = loadh5("west.h5")
>>> plotflux(h5file, 1)
--------
| __/ |
| / |
--------
"""
fluxes = h5file['target_flux_evolution']['expected',:,state-1]
iterations = numpy.arange(1,len(fluxes)+1,1)
fig, ax = plt.subplots()
ax.plot(iterations,fluxes, linewidth=3)
ax.set_xlabel('WE Iteration', fontsize=24)
ax.set_ylabel('Mean Flux', fontsize=24)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.tick_params(labelsize=22)
fig.tight_layout()
plt.show()
| 26.675
| 85
| 0.615745
|
import h5py
import numpy
import matplotlib.pyplot as plt
def plotflux(h5file, state=1):
fluxes = h5file['target_flux_evolution']['expected',:,state-1]
iterations = numpy.arange(1,len(fluxes)+1,1)
fig, ax = plt.subplots()
ax.plot(iterations,fluxes, linewidth=3)
ax.set_xlabel('WE Iteration', fontsize=24)
ax.set_ylabel('Mean Flux', fontsize=24)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.tick_params(labelsize=22)
fig.tight_layout()
plt.show()
| true
| true
|
f7145aa2447443687be6df3402e6c85c14e2707b
| 15,668
|
py
|
Python
|
experiments/custom_agents_opt.py
|
anonips/-MDP-Playground
|
74431f98c210830a93a1bc83fcdcb95bf1644696
|
[
"Apache-2.0"
] | 2
|
2019-09-18T14:43:40.000Z
|
2021-02-23T18:46:50.000Z
|
experiments/custom_agents_opt.py
|
anonips/-MDP-Playground
|
74431f98c210830a93a1bc83fcdcb95bf1644696
|
[
"Apache-2.0"
] | null | null | null |
experiments/custom_agents_opt.py
|
anonips/-MDP-Playground
|
74431f98c210830a93a1bc83fcdcb95bf1644696
|
[
"Apache-2.0"
] | 1
|
2020-02-14T13:59:15.000Z
|
2020-02-14T13:59:15.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from ray.rllib.agents.trainer import Trainer, with_common_config
from ray.rllib.utils.annotations import override
# yapf: disable
# __sphinx_doc_begin__
class RandomAgent(Trainer):
"""Policy that takes random actions and never learns."""
_name = "RandomAgent"
_default_config = with_common_config({
"rollouts_per_iteration": 10,
})
@override(Trainer)
def _init(self, config, env_creator):
self.env = env_creator(config["env_config"])
@override(Trainer)
def _train(self):
rewards = []
steps = 0
for _ in range(self.config["rollouts_per_iteration"]):
obs = self.env.reset()
done = False
reward = 0.0
while not done:
action = self.env.action_space.sample()
obs, r, done, info = self.env.step(action)
reward += r
steps += 1
rewards.append(reward)
return {
"episode_reward_mean": np.mean(rewards),
"timesteps_this_iter": steps,
}
class VIAgent(Trainer):
"""Value Iteration.
#TODO Make it Generalized PI.
"""
_name = "VIAgent"
_default_config = with_common_config({
"tolerance": 0.01,
"discount_factor": 0.5,
"rollouts_per_iteration": 10,
"episode_length": 200,
# "lr": 0.5
})
@override(Trainer)
def _init(self, config, env_creator):
self.env = env_creator(config["env_config"])
self.V = np.zeros(self.env.observation_space.n)
self.policy = np.zeros(self.env.observation_space.n, dtype=int)
self.policy[:] = -1 #IMP # To avoid initing it to a value within action_space range
@override(Trainer)
def _train(self):
max_diff = np.inf # Maybe keep a state variable so that we don't need to update every train iteration??
state_space_size = self.env.observation_space.n
gamma = self.config["discount_factor"]
total_iterations = 0
while max_diff > self.config["tolerance"]:
total_iterations += 1
for s in range(state_space_size):
# print("self.V[:]", s, max_diff, self.V, [self.env.R(s, a) for a in range(self.env.action_space.n)], self.policy[s])
self.V_old = self.V.copy() # Is this asynchronous? V_old should be held constant for all states in the for loop?
# print([self.env.R(s, a) for a in range(self.env.action_space.n)], [gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)], [self.env.R(s, a) + gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)])
self.policy[s] = np.argmax([self.env.R(s, a) + gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)])
self.V[s] = np.max([self.env.R(s, a) + gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)]) # We want R to be a callable function, so I guess we have to keep a for loop here??
# print("self.V, self.V_old, self.policy[s]", self.V, self.V_old, self.policy[s], self.env.P(s, self.policy[s]))
max_diff = np.max(np.absolute(self.V_old - self.V))
# import time
# time.sleep(2)
# for s in range(state_space_size):
# print("FINAL self.V[:]", s, max_diff, self.V[:], [self.env.R(s, a) for a in range(self.env.action_space.n)])
print("Total iterations:", total_iterations)
rewards = []
steps = 0
for _ in range(self.config["rollouts_per_iteration"]):
obs = self.env.reset()
done = False
reward = 0.0
for _ in range(self.config["episode_length"]):
action = self.policy[obs]
obs, r, done, info = self.env.step(action)
reward += r
steps += 1
rewards.append(reward)
return {
"episode_reward_mean": np.mean(rewards),
"timesteps_this_iter": steps,
}
import ray
from ray import tune
from ray.rllib.utils.seed import seed as rllib_seed
import rl_toy
from rl_toy.envs import RLToyEnv
from ray.tune.registry import register_env
register_env("RLToy-v0", lambda config: RLToyEnv(config))
from ray.rllib.models.preprocessors import OneHotPreprocessor
from ray.rllib.models import ModelCatalog
ModelCatalog.register_custom_preprocessor("ohe", OneHotPreprocessor)
#rllib_seed(0, 0, 0) ####IMP Doesn't work due to multi-process I think; so use config["seed"]
ray.init()
# Old config space
# algorithms = ["DQN"]
# state_space_sizes = [2**i for i in range(4,6)]
# action_space_sizes = [2**i for i in range(1,6)]
# delays = [0] + [2**i for i in range(5)]
# sequence_lengths = [i for i in range(1,6)]
# reward_densities = [0.25] # np.linspace(0.0, 1.0, num=5)
# # make_reward_dense = [True, False]
# terminal_state_densities = [0.25] # np.linspace(0.1, 1.0, num=5)
#test basic case
# algorithms = ["DQN"]
# state_space_sizes = [10]
# action_space_sizes = [10]
# delays = [4]
# sequence_lengths = [2]
# reward_densities = [0.25] # np.linspace(0.0, 1.0, num=5)
# # make_reward_dense = [True, False]
# terminal_state_densities = [0.25] # np.linspace(0.1, 1.0, num=5)
state_space_sizes = [8]#, 10, 12, 14] # [2**i for i in range(1,6)]
action_space_sizes = [8]#2, 4, 8, 16] # [2**i for i in range(1,6)]
delays = [0] # + [2**i for i in range(4)]
sequence_lengths = [1]#, 2]#i for i in range(1,4)]
reward_densities = [0.25] # np.linspace(0.0, 1.0, num=5)
# make_reward_dense = [True, False]
terminal_state_densities = [0.25] # np.linspace(0.1, 1.0, num=5)
algorithms = ["DQN"]
#seeds = []
# Others, keep the rest fixed for these: learning_starts, target_network_update_freq, double_dqn, fcnet_hiddens, fcnet_activation, use_lstm, lstm_seq_len, sample_batch_size/train_batch_size
# More others: adam_epsilon, exploration_final_eps/exploration_fraction, buffer_size
num_layerss = [1, 2, 3, 4]
layer_widths = [128, 256, 512]
fcnet_activations = ["tanh", "relu", "sigmoid"]
learning_startss = [500, 1000, 2000, 4000, 8000]
target_network_update_freqs = [8, 80, 800]
double_dqn = [False, True]
learning_rates = [1e-2, 1e-3, 1e-4, 1e-5, 1e-6]
adam_epsilons = [1e-3, 1e-4, 1e-5, 1e-6] # [1e-1, 1e-4, 1e-7, 1e-10]
# lstm with sequence lengths
print('# Algorithm, state_space_size, action_space_size, delay, sequence_length, reward_density,'
'terminal_state_density ')
print(algorithms, state_space_sizes, action_space_sizes, delays, sequence_lengths, reward_densities, terminal_state_densities)
# stats = {}
# aaaa = 3
#TODO Write addnl. line at beginning of file for column names
# fout = open('rl_stats_temp.csv', 'a') #hardcoded
# fout.write('# basename, n_points, n_features, n_trees ')
import time
start = time.time()
print(algorithms, state_space_sizes, action_space_sizes, delays,
sequence_lengths, reward_densities, terminal_state_densities)
def on_train_result(info):
# print("#############trainer.train() result: {} -> {} episodes".format(
# info["trainer"], info["result"]["episodes_this_iter"]), info)
# you can mutate the result dict to add new fields to return
# stats['episode_len_mean'] = info['result']['episode_len_mean']
# print("++++++++", aaaa, stats)
algorithm = info["trainer"]._name
state_space_size = info["result"]["config"]["env_config"]["state_space_size"]
action_space_size = info["result"]["config"]["env_config"]["action_space_size"]
delay = info["result"]["config"]["env_config"]["delay"]
sequence_length = info["result"]["config"]["env_config"]["sequence_length"]
reward_density = info["result"]["config"]["env_config"]["reward_density"]
terminal_state_density = info["result"]["config"]["env_config"]["terminal_state_density"]
fcnet_hiddens = info["result"]["config"]["model"]["fcnet_hiddens"]
num_layers = len(fcnet_hiddens)
layer_width = fcnet_hiddens[0] #hack
lr = info["result"]["config"]["lr"]
adam_epsilon = info["result"]["config"]["adam_epsilon"]
timesteps_total = info["result"]["timesteps_total"] # also has episodes_total and training_iteration
episode_reward_mean = info["result"]["episode_reward_mean"] # also has max and min
episode_len_mean = info["result"]["episode_len_mean"]
fout = open('./rl_stats_temp_opt.csv', 'a') #hardcoded
fout.write('# Algorithm, state_space_size, action_space_size, delay, sequence_length, reward_density, '
'terminal_state_density, num_layers, layer_width, lr, adam_epsilon,\n' + str(algorithm) + ' ' + str(state_space_size) +
' ' + str(action_space_size) + ' ' + str(delay) + ' ' + str(sequence_length)
+ ' ' + str(reward_density) + ' ' + str(terminal_state_density) + ' ')
# Writes every iteration, would slow things down. #hack
fout.write(str(num_layers) + ' ' + str(layer_width) + ' ' + str(lr) + ' ' + str(adam_epsilon) + ' ' + str(timesteps_total) + ' ' + str(episode_reward_mean) +
' ' + str(episode_len_mean) + '\n')
fout.close()
info["result"]["callback_ok"] = True
# tune.run(
# RandomAgent,
# stop={
# "timesteps_total": 20000,
# },
# config={
# "rollouts_per_iteration": 10,
# "env": "RLToy-v0",
# "env_config": {
# 'state_space_type': 'discrete',
# 'action_space_type': 'discrete',
# 'state_space_size': 16,
# 'action_space_size': 16,
# 'generate_random_mdp': True,
# 'delay': 6,
# 'sequence_length': 1,
# 'reward_density': 0.25,
# 'terminal_state_density': 0.25
# },
# },
# )
# tune.run(
# VIAgent,
# stop={
# "timesteps_total": 20000,
# },
# config={
# "tolerance": 0.01,
# "discount_factor": 0.99,
# "rollouts_per_iteration": 10,
# "env": "RLToy-v0",
# "env_config": {
# 'state_space_type': 'discrete',
# 'action_space_type': 'discrete',
# 'state_space_size': 10,
# 'action_space_size': 10,
# 'generate_random_mdp': True,
# 'delay': 0,
# 'sequence_length': 1,
# 'reward_density': 0.25,
# 'terminal_state_density': 0.25
# },
# },
# )
for algorithm in algorithms: #TODO each one has different config_spaces
for state_space_size in state_space_sizes:
for action_space_size in action_space_sizes:
for delay in delays:
for sequence_length in sequence_lengths:
for reward_density in reward_densities:
for terminal_state_density in terminal_state_densities:
for lr in learning_rates:
for adam_epsilon in adam_epsilons:
tune.run(
algorithm,
stop={
"timesteps_total": 20000,
},
config={
# 'seed': 0, #seed
"adam_epsilon": adam_epsilon,
"lr": lr, # "lr": grid_search([1e-2, 1e-4, 1e-6]),
"beta_annealing_fraction": 1.0,
"buffer_size": 1000000,
"double_q": False,
"dueling": False,
"env": "RLToy-v0",
"env_config": {
'seed': 0, #seed
'state_space_type': 'discrete',
'action_space_type': 'discrete',
'state_space_size': state_space_size,
'action_space_size': action_space_size,
'generate_random_mdp': True,
'delay': delay,
'sequence_length': sequence_length,
'reward_density': reward_density,
'terminal_state_density': terminal_state_density,
'repeats_in_sequences': False,
'reward_unit': 1.0,
'make_denser': False,
'completely_connected': True
},
"model": {
"fcnet_hiddens": [256, 256],
"custom_preprocessor": "ohe",
"custom_options": {}, # extra options to pass to your preprocessor
"fcnet_activation": "tanh",
"use_lstm": False,
"max_seq_len": 20,
"lstm_cell_size": 256,
"lstm_use_prev_action_reward": False,
},
"exploration_final_eps": 0.01,
"exploration_fraction": 0.1,
"final_prioritized_replay_beta": 1.0,
"hiddens": None,
"learning_starts": 1000,
"n_step": 1,
"noisy": False,
"num_atoms": 1,
"prioritized_replay": False,
"prioritized_replay_alpha": 0.5,
"sample_batch_size": 4,
"schedule_max_timesteps": 20000,
"target_network_update_freq": 800,
"timesteps_per_iteration": 100,
"train_batch_size": 32,
"callbacks": {
# "on_episode_start": tune.function(on_episode_start),
# "on_episode_step": tune.function(on_episode_step),
# "on_episode_end": tune.function(on_episode_end),
# "on_sample_end": tune.function(on_sample_end),
"on_train_result": tune.function(on_train_result),
# "on_postprocess_traj": tune.function(on_postprocess_traj),
},
},
#return_trials=True # add tirals = tune.run( above
)
end = time.time()
print("No. of seconds to run:", end - start)
| 44.511364
| 254
| 0.524509
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from ray.rllib.agents.trainer import Trainer, with_common_config
from ray.rllib.utils.annotations import override
class RandomAgent(Trainer):
_name = "RandomAgent"
_default_config = with_common_config({
"rollouts_per_iteration": 10,
})
@override(Trainer)
def _init(self, config, env_creator):
self.env = env_creator(config["env_config"])
@override(Trainer)
def _train(self):
rewards = []
steps = 0
for _ in range(self.config["rollouts_per_iteration"]):
obs = self.env.reset()
done = False
reward = 0.0
while not done:
action = self.env.action_space.sample()
obs, r, done, info = self.env.step(action)
reward += r
steps += 1
rewards.append(reward)
return {
"episode_reward_mean": np.mean(rewards),
"timesteps_this_iter": steps,
}
class VIAgent(Trainer):
_name = "VIAgent"
_default_config = with_common_config({
"tolerance": 0.01,
"discount_factor": 0.5,
"rollouts_per_iteration": 10,
"episode_length": 200,
})
@override(Trainer)
def _init(self, config, env_creator):
self.env = env_creator(config["env_config"])
self.V = np.zeros(self.env.observation_space.n)
self.policy = np.zeros(self.env.observation_space.n, dtype=int)
self.policy[:] = -1 _diff = np.inf
state_space_size = self.env.observation_space.n
gamma = self.config["discount_factor"]
total_iterations = 0
while max_diff > self.config["tolerance"]:
total_iterations += 1
for s in range(state_space_size):
# print("self.V[:]", s, max_diff, self.V, [self.env.R(s, a) for a in range(self.env.action_space.n)], self.policy[s])
self.V_old = self.V.copy() # Is this asynchronous? V_old should be held constant for all states in the for loop?
# print([self.env.R(s, a) for a in range(self.env.action_space.n)], [gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)], [self.env.R(s, a) + gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)])
self.policy[s] = np.argmax([self.env.R(s, a) + gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)])
self.V[s] = np.max([self.env.R(s, a) + gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)]) # We want R to be a callable function, so I guess we have to keep a for loop here??
# print("self.V, self.V_old, self.policy[s]", self.V, self.V_old, self.policy[s], self.env.P(s, self.policy[s]))
max_diff = np.max(np.absolute(self.V_old - self.V))
# import time
# time.sleep(2)
# for s in range(state_space_size):
# print("FINAL self.V[:]", s, max_diff, self.V[:], [self.env.R(s, a) for a in range(self.env.action_space.n)])
print("Total iterations:", total_iterations)
rewards = []
steps = 0
for _ in range(self.config["rollouts_per_iteration"]):
obs = self.env.reset()
done = False
reward = 0.0
for _ in range(self.config["episode_length"]):
action = self.policy[obs]
obs, r, done, info = self.env.step(action)
reward += r
steps += 1
rewards.append(reward)
return {
"episode_reward_mean": np.mean(rewards),
"timesteps_this_iter": steps,
}
import ray
from ray import tune
from ray.rllib.utils.seed import seed as rllib_seed
import rl_toy
from rl_toy.envs import RLToyEnv
from ray.tune.registry import register_env
register_env("RLToy-v0", lambda config: RLToyEnv(config))
from ray.rllib.models.preprocessors import OneHotPreprocessor
from ray.rllib.models import ModelCatalog
ModelCatalog.register_custom_preprocessor("ohe", OneHotPreprocessor)
#rllib_seed(0, 0, 0) ####IMP Doesn't work due to multi-process I think; so use config["seed"]
ray.init()
anh", "relu", "sigmoid"]
learning_startss = [500, 1000, 2000, 4000, 8000]
target_network_update_freqs = [8, 80, 800]
double_dqn = [False, True]
learning_rates = [1e-2, 1e-3, 1e-4, 1e-5, 1e-6]
adam_epsilons = [1e-3, 1e-4, 1e-5, 1e-6]
print('# Algorithm, state_space_size, action_space_size, delay, sequence_length, reward_density,'
'terminal_state_density ')
print(algorithms, state_space_sizes, action_space_sizes, delays, sequence_lengths, reward_densities, terminal_state_densities)
time
start = time.time()
print(algorithms, state_space_sizes, action_space_sizes, delays,
sequence_lengths, reward_densities, terminal_state_densities)
def on_train_result(info):
algorithm = info["trainer"]._name
state_space_size = info["result"]["config"]["env_config"]["state_space_size"]
action_space_size = info["result"]["config"]["env_config"]["action_space_size"]
delay = info["result"]["config"]["env_config"]["delay"]
sequence_length = info["result"]["config"]["env_config"]["sequence_length"]
reward_density = info["result"]["config"]["env_config"]["reward_density"]
terminal_state_density = info["result"]["config"]["env_config"]["terminal_state_density"]
fcnet_hiddens = info["result"]["config"]["model"]["fcnet_hiddens"]
num_layers = len(fcnet_hiddens)
layer_width = fcnet_hiddens[0]
lr = info["result"]["config"]["lr"]
adam_epsilon = info["result"]["config"]["adam_epsilon"]
timesteps_total = info["result"]["timesteps_total"]
episode_reward_mean = info["result"]["episode_reward_mean"]
episode_len_mean = info["result"]["episode_len_mean"]
fout = open('./rl_stats_temp_opt.csv', 'a')
fout.write('# Algorithm, state_space_size, action_space_size, delay, sequence_length, reward_density, '
'terminal_state_density, num_layers, layer_width, lr, adam_epsilon,\n' + str(algorithm) + ' ' + str(state_space_size) +
' ' + str(action_space_size) + ' ' + str(delay) + ' ' + str(sequence_length)
+ ' ' + str(reward_density) + ' ' + str(terminal_state_density) + ' ')
fout.write(str(num_layers) + ' ' + str(layer_width) + ' ' + str(lr) + ' ' + str(adam_epsilon) + ' ' + str(timesteps_total) + ' ' + str(episode_reward_mean) +
' ' + str(episode_len_mean) + '\n')
fout.close()
info["result"]["callback_ok"] = True
for algorithm in algorithms:
for state_space_size in state_space_sizes:
for action_space_size in action_space_sizes:
for delay in delays:
for sequence_length in sequence_lengths:
for reward_density in reward_densities:
for terminal_state_density in terminal_state_densities:
for lr in learning_rates:
for adam_epsilon in adam_epsilons:
tune.run(
algorithm,
stop={
"timesteps_total": 20000,
},
config={
"adam_epsilon": adam_epsilon,
"lr": lr,
"beta_annealing_fraction": 1.0,
"buffer_size": 1000000,
"double_q": False,
"dueling": False,
"env": "RLToy-v0",
"env_config": {
'seed': 0,
'state_space_type': 'discrete',
'action_space_type': 'discrete',
'state_space_size': state_space_size,
'action_space_size': action_space_size,
'generate_random_mdp': True,
'delay': delay,
'sequence_length': sequence_length,
'reward_density': reward_density,
'terminal_state_density': terminal_state_density,
'repeats_in_sequences': False,
'reward_unit': 1.0,
'make_denser': False,
'completely_connected': True
},
"model": {
"fcnet_hiddens": [256, 256],
"custom_preprocessor": "ohe",
"custom_options": {},
"fcnet_activation": "tanh",
"use_lstm": False,
"max_seq_len": 20,
"lstm_cell_size": 256,
"lstm_use_prev_action_reward": False,
},
"exploration_final_eps": 0.01,
"exploration_fraction": 0.1,
"final_prioritized_replay_beta": 1.0,
"hiddens": None,
"learning_starts": 1000,
"n_step": 1,
"noisy": False,
"num_atoms": 1,
"prioritized_replay": False,
"prioritized_replay_alpha": 0.5,
"sample_batch_size": 4,
"schedule_max_timesteps": 20000,
"target_network_update_freq": 800,
"timesteps_per_iteration": 100,
"train_batch_size": 32,
"callbacks": {
"on_train_result": tune.function(on_train_result),
},
},
)
end = time.time()
print("No. of seconds to run:", end - start)
| true
| true
|
f7145c199e0e4cfca77fa9ac99b9dea5fb703b95
| 1,756
|
py
|
Python
|
alipay/aop/api/domain/AnswerModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AnswerModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AnswerModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AnswerModel(object):
def __init__(self):
self._extra = None
self._item_id = None
self._option_id = None
@property
def extra(self):
return self._extra
@extra.setter
def extra(self, value):
self._extra = value
@property
def item_id(self):
return self._item_id
@item_id.setter
def item_id(self, value):
self._item_id = value
@property
def option_id(self):
return self._option_id
@option_id.setter
def option_id(self, value):
self._option_id = value
def to_alipay_dict(self):
params = dict()
if self.extra:
if hasattr(self.extra, 'to_alipay_dict'):
params['extra'] = self.extra.to_alipay_dict()
else:
params['extra'] = self.extra
if self.item_id:
if hasattr(self.item_id, 'to_alipay_dict'):
params['item_id'] = self.item_id.to_alipay_dict()
else:
params['item_id'] = self.item_id
if self.option_id:
if hasattr(self.option_id, 'to_alipay_dict'):
params['option_id'] = self.option_id.to_alipay_dict()
else:
params['option_id'] = self.option_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnswerModel()
if 'extra' in d:
o.extra = d['extra']
if 'item_id' in d:
o.item_id = d['item_id']
if 'option_id' in d:
o.option_id = d['option_id']
return o
| 24.732394
| 69
| 0.555809
|
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AnswerModel(object):
def __init__(self):
self._extra = None
self._item_id = None
self._option_id = None
@property
def extra(self):
return self._extra
@extra.setter
def extra(self, value):
self._extra = value
@property
def item_id(self):
return self._item_id
@item_id.setter
def item_id(self, value):
self._item_id = value
@property
def option_id(self):
return self._option_id
@option_id.setter
def option_id(self, value):
self._option_id = value
def to_alipay_dict(self):
params = dict()
if self.extra:
if hasattr(self.extra, 'to_alipay_dict'):
params['extra'] = self.extra.to_alipay_dict()
else:
params['extra'] = self.extra
if self.item_id:
if hasattr(self.item_id, 'to_alipay_dict'):
params['item_id'] = self.item_id.to_alipay_dict()
else:
params['item_id'] = self.item_id
if self.option_id:
if hasattr(self.option_id, 'to_alipay_dict'):
params['option_id'] = self.option_id.to_alipay_dict()
else:
params['option_id'] = self.option_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnswerModel()
if 'extra' in d:
o.extra = d['extra']
if 'item_id' in d:
o.item_id = d['item_id']
if 'option_id' in d:
o.option_id = d['option_id']
return o
| true
| true
|
f7145c94fe95283ff3e26d0aa9e1a5bdf965d2fc
| 52,866
|
py
|
Python
|
sdk/python/pulumi_azure_native/hybridnetwork/v20210501/outputs.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/hybridnetwork/v20210501/outputs.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/hybridnetwork/v20210501/outputs.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'CustomProfileResponse',
'DataDiskResponse',
'ImageReferenceResponse',
'LinuxConfigurationResponse',
'NetworkFunctionRoleConfigurationResponse',
'NetworkFunctionTemplateResponse',
'NetworkFunctionUserConfigurationResponse',
'NetworkFunctionUserConfigurationResponseOsProfile',
'NetworkInterfaceIPConfigurationResponse',
'NetworkInterfaceResponse',
'OsDiskResponse',
'OsProfileResponse',
'SshConfigurationResponse',
'SshPublicKeyResponse',
'StorageProfileResponse',
'SubResourceResponse',
'SystemDataResponse',
]
@pulumi.output_type
class CustomProfileResponse(dict):
"""
Specifies the custom settings for the virtual machine.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "metadataConfigurationPath":
suggest = "metadata_configuration_path"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CustomProfileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CustomProfileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CustomProfileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
metadata_configuration_path: Optional[str] = None):
"""
Specifies the custom settings for the virtual machine.
:param str metadata_configuration_path: Path for metadata configuration.
"""
if metadata_configuration_path is not None:
pulumi.set(__self__, "metadata_configuration_path", metadata_configuration_path)
@property
@pulumi.getter(name="metadataConfigurationPath")
def metadata_configuration_path(self) -> Optional[str]:
"""
Path for metadata configuration.
"""
return pulumi.get(self, "metadata_configuration_path")
@pulumi.output_type
class DataDiskResponse(dict):
"""
Specifies information about the operating system disk used by the virtual machine. <br><br> For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createOption":
suggest = "create_option"
elif key == "diskSizeGB":
suggest = "disk_size_gb"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DataDiskResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DataDiskResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DataDiskResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
create_option: Optional[str] = None,
disk_size_gb: Optional[int] = None,
name: Optional[str] = None):
"""
Specifies information about the operating system disk used by the virtual machine. <br><br> For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
:param str create_option: Specifies how the virtual machine should be created.
:param int disk_size_gb: Specifies the size of an empty disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.
:param str name: The name of data disk.
"""
if create_option is not None:
pulumi.set(__self__, "create_option", create_option)
if disk_size_gb is not None:
pulumi.set(__self__, "disk_size_gb", disk_size_gb)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="createOption")
def create_option(self) -> Optional[str]:
"""
Specifies how the virtual machine should be created.
"""
return pulumi.get(self, "create_option")
@property
@pulumi.getter(name="diskSizeGB")
def disk_size_gb(self) -> Optional[int]:
"""
Specifies the size of an empty disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.
"""
return pulumi.get(self, "disk_size_gb")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of data disk.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class ImageReferenceResponse(dict):
"""
The image reference properties.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "exactVersion":
suggest = "exact_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ImageReferenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ImageReferenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ImageReferenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
exact_version: Optional[str] = None,
offer: Optional[str] = None,
publisher: Optional[str] = None,
sku: Optional[str] = None,
version: Optional[str] = None):
"""
The image reference properties.
:param str exact_version: Specifies in decimal numbers, the exact version of image used to create the virtual machine.
:param str offer: Specifies the offer of the image used to create the virtual machine.
:param str publisher: The image publisher.
:param str sku: The image SKU.
:param str version: Specifies the version of the image used to create the virtual machine. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available.
"""
if exact_version is not None:
pulumi.set(__self__, "exact_version", exact_version)
if offer is not None:
pulumi.set(__self__, "offer", offer)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="exactVersion")
def exact_version(self) -> Optional[str]:
"""
Specifies in decimal numbers, the exact version of image used to create the virtual machine.
"""
return pulumi.get(self, "exact_version")
@property
@pulumi.getter
def offer(self) -> Optional[str]:
"""
Specifies the offer of the image used to create the virtual machine.
"""
return pulumi.get(self, "offer")
@property
@pulumi.getter
def publisher(self) -> Optional[str]:
"""
The image publisher.
"""
return pulumi.get(self, "publisher")
@property
@pulumi.getter
def sku(self) -> Optional[str]:
"""
The image SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
Specifies the version of the image used to create the virtual machine. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available.
"""
return pulumi.get(self, "version")
@pulumi.output_type
class LinuxConfigurationResponse(dict):
"""
Specifies the Linux operating system settings on the virtual machine.
"""
def __init__(__self__, *,
ssh: Optional['outputs.SshConfigurationResponse'] = None):
"""
Specifies the Linux operating system settings on the virtual machine.
:param 'SshConfigurationResponse' ssh: Specifies the ssh key configuration for a Linux OS.
"""
if ssh is not None:
pulumi.set(__self__, "ssh", ssh)
@property
@pulumi.getter
def ssh(self) -> Optional['outputs.SshConfigurationResponse']:
"""
Specifies the ssh key configuration for a Linux OS.
"""
return pulumi.get(self, "ssh")
@pulumi.output_type
class NetworkFunctionRoleConfigurationResponse(dict):
"""
Network function role configuration.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "customProfile":
suggest = "custom_profile"
elif key == "networkInterfaces":
suggest = "network_interfaces"
elif key == "osProfile":
suggest = "os_profile"
elif key == "roleName":
suggest = "role_name"
elif key == "roleType":
suggest = "role_type"
elif key == "storageProfile":
suggest = "storage_profile"
elif key == "userDataParameters":
suggest = "user_data_parameters"
elif key == "userDataTemplate":
suggest = "user_data_template"
elif key == "virtualMachineSize":
suggest = "virtual_machine_size"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkFunctionRoleConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkFunctionRoleConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkFunctionRoleConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
custom_profile: Optional['outputs.CustomProfileResponse'] = None,
network_interfaces: Optional[Sequence['outputs.NetworkInterfaceResponse']] = None,
os_profile: Optional['outputs.OsProfileResponse'] = None,
role_name: Optional[str] = None,
role_type: Optional[str] = None,
storage_profile: Optional['outputs.StorageProfileResponse'] = None,
user_data_parameters: Optional[Any] = None,
user_data_template: Optional[Any] = None,
virtual_machine_size: Optional[str] = None):
"""
Network function role configuration.
:param 'CustomProfileResponse' custom_profile: Specifies the custom settings for the virtual machine.
:param Sequence['NetworkInterfaceResponse'] network_interfaces: The network interface configurations.
:param 'OsProfileResponse' os_profile: Specifies the operating system settings for the role instance. This value can be updated during the deployment of network function.
:param str role_name: The name of the network function role.
:param str role_type: Role type.
:param 'StorageProfileResponse' storage_profile: Specifies the storage settings for the virtual machine disks.
:param Any user_data_parameters: The user parameters for customers. The format of user data parameters has to be matched with the provided user data template.
:param Any user_data_template: The user data template for customers. This is a json schema template describing the format and data type of user data parameters.
:param str virtual_machine_size: The size of the virtual machine.
"""
if custom_profile is not None:
pulumi.set(__self__, "custom_profile", custom_profile)
if network_interfaces is not None:
pulumi.set(__self__, "network_interfaces", network_interfaces)
if os_profile is not None:
pulumi.set(__self__, "os_profile", os_profile)
if role_name is not None:
pulumi.set(__self__, "role_name", role_name)
if role_type is not None:
pulumi.set(__self__, "role_type", role_type)
if storage_profile is not None:
pulumi.set(__self__, "storage_profile", storage_profile)
if user_data_parameters is not None:
pulumi.set(__self__, "user_data_parameters", user_data_parameters)
if user_data_template is not None:
pulumi.set(__self__, "user_data_template", user_data_template)
if virtual_machine_size is not None:
pulumi.set(__self__, "virtual_machine_size", virtual_machine_size)
@property
@pulumi.getter(name="customProfile")
def custom_profile(self) -> Optional['outputs.CustomProfileResponse']:
"""
Specifies the custom settings for the virtual machine.
"""
return pulumi.get(self, "custom_profile")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Optional[Sequence['outputs.NetworkInterfaceResponse']]:
"""
The network interface configurations.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> Optional['outputs.OsProfileResponse']:
"""
Specifies the operating system settings for the role instance. This value can be updated during the deployment of network function.
"""
return pulumi.get(self, "os_profile")
@property
@pulumi.getter(name="roleName")
def role_name(self) -> Optional[str]:
"""
The name of the network function role.
"""
return pulumi.get(self, "role_name")
@property
@pulumi.getter(name="roleType")
def role_type(self) -> Optional[str]:
"""
Role type.
"""
return pulumi.get(self, "role_type")
@property
@pulumi.getter(name="storageProfile")
def storage_profile(self) -> Optional['outputs.StorageProfileResponse']:
"""
Specifies the storage settings for the virtual machine disks.
"""
return pulumi.get(self, "storage_profile")
@property
@pulumi.getter(name="userDataParameters")
def user_data_parameters(self) -> Optional[Any]:
"""
The user parameters for customers. The format of user data parameters has to be matched with the provided user data template.
"""
return pulumi.get(self, "user_data_parameters")
@property
@pulumi.getter(name="userDataTemplate")
def user_data_template(self) -> Optional[Any]:
"""
The user data template for customers. This is a json schema template describing the format and data type of user data parameters.
"""
return pulumi.get(self, "user_data_template")
@property
@pulumi.getter(name="virtualMachineSize")
def virtual_machine_size(self) -> Optional[str]:
"""
The size of the virtual machine.
"""
return pulumi.get(self, "virtual_machine_size")
@pulumi.output_type
class NetworkFunctionTemplateResponse(dict):
"""
The network function template.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "networkFunctionRoleConfigurations":
suggest = "network_function_role_configurations"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkFunctionTemplateResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkFunctionTemplateResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkFunctionTemplateResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
network_function_role_configurations: Optional[Sequence['outputs.NetworkFunctionRoleConfigurationResponse']] = None):
"""
The network function template.
:param Sequence['NetworkFunctionRoleConfigurationResponse'] network_function_role_configurations: An array of network function role definitions.
"""
if network_function_role_configurations is not None:
pulumi.set(__self__, "network_function_role_configurations", network_function_role_configurations)
@property
@pulumi.getter(name="networkFunctionRoleConfigurations")
def network_function_role_configurations(self) -> Optional[Sequence['outputs.NetworkFunctionRoleConfigurationResponse']]:
"""
An array of network function role definitions.
"""
return pulumi.get(self, "network_function_role_configurations")
@pulumi.output_type
class NetworkFunctionUserConfigurationResponse(dict):
"""
The network function user configuration.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "networkInterfaces":
suggest = "network_interfaces"
elif key == "osProfile":
suggest = "os_profile"
elif key == "roleName":
suggest = "role_name"
elif key == "userDataParameters":
suggest = "user_data_parameters"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkFunctionUserConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkFunctionUserConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkFunctionUserConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
network_interfaces: Optional[Sequence['outputs.NetworkInterfaceResponse']] = None,
os_profile: Optional['outputs.NetworkFunctionUserConfigurationResponseOsProfile'] = None,
role_name: Optional[str] = None,
user_data_parameters: Optional[Any] = None):
"""
The network function user configuration.
:param Sequence['NetworkInterfaceResponse'] network_interfaces: The network interface configuration.
:param 'NetworkFunctionUserConfigurationResponseOsProfile' os_profile: Specifies the operating system settings for the role instance.
:param str role_name: The name of the network function role.
:param Any user_data_parameters: The user data parameters from the customer.
"""
if network_interfaces is not None:
pulumi.set(__self__, "network_interfaces", network_interfaces)
if os_profile is not None:
pulumi.set(__self__, "os_profile", os_profile)
if role_name is not None:
pulumi.set(__self__, "role_name", role_name)
if user_data_parameters is not None:
pulumi.set(__self__, "user_data_parameters", user_data_parameters)
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Optional[Sequence['outputs.NetworkInterfaceResponse']]:
"""
The network interface configuration.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> Optional['outputs.NetworkFunctionUserConfigurationResponseOsProfile']:
"""
Specifies the operating system settings for the role instance.
"""
return pulumi.get(self, "os_profile")
@property
@pulumi.getter(name="roleName")
def role_name(self) -> Optional[str]:
"""
The name of the network function role.
"""
return pulumi.get(self, "role_name")
@property
@pulumi.getter(name="userDataParameters")
def user_data_parameters(self) -> Optional[Any]:
"""
The user data parameters from the customer.
"""
return pulumi.get(self, "user_data_parameters")
@pulumi.output_type
class NetworkFunctionUserConfigurationResponseOsProfile(dict):
"""
Specifies the operating system settings for the role instance.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "customData":
suggest = "custom_data"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkFunctionUserConfigurationResponseOsProfile. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkFunctionUserConfigurationResponseOsProfile.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkFunctionUserConfigurationResponseOsProfile.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
custom_data: Optional[str] = None):
"""
Specifies the operating system settings for the role instance.
:param str custom_data: Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the virtual machine. The maximum length of the binary array is 65535 bytes. <br><br> **Note: Do not pass any secrets or passwords in customData property** <br><br> This property cannot be updated after the VM is created. <br><br> customData is passed to the VM to be saved as a file. For more information see [Custom Data on Azure VMs](https://azure.microsoft.com/en-us/blog/custom-data-and-cloud-init-on-windows-azure/) <br><br> For using cloud-init for your Linux VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
"""
if custom_data is not None:
pulumi.set(__self__, "custom_data", custom_data)
@property
@pulumi.getter(name="customData")
def custom_data(self) -> Optional[str]:
"""
Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the virtual machine. The maximum length of the binary array is 65535 bytes. <br><br> **Note: Do not pass any secrets or passwords in customData property** <br><br> This property cannot be updated after the VM is created. <br><br> customData is passed to the VM to be saved as a file. For more information see [Custom Data on Azure VMs](https://azure.microsoft.com/en-us/blog/custom-data-and-cloud-init-on-windows-azure/) <br><br> For using cloud-init for your Linux VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
"""
return pulumi.get(self, "custom_data")
@pulumi.output_type
class NetworkInterfaceIPConfigurationResponse(dict):
"""
Network interface IP configuration properties.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dnsServers":
suggest = "dns_servers"
elif key == "ipAddress":
suggest = "ip_address"
elif key == "ipAllocationMethod":
suggest = "ip_allocation_method"
elif key == "ipVersion":
suggest = "ip_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkInterfaceIPConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkInterfaceIPConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkInterfaceIPConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
dns_servers: Optional[Sequence[str]] = None,
gateway: Optional[str] = None,
ip_address: Optional[str] = None,
ip_allocation_method: Optional[str] = None,
ip_version: Optional[str] = None,
subnet: Optional[str] = None):
"""
Network interface IP configuration properties.
:param Sequence[str] dns_servers: The list of DNS servers IP addresses.
:param str gateway: The value of the gateway.
:param str ip_address: The value of the IP address.
:param str ip_allocation_method: IP address allocation method.
:param str ip_version: IP address version.
:param str subnet: The value of the subnet.
"""
if dns_servers is not None:
pulumi.set(__self__, "dns_servers", dns_servers)
if gateway is not None:
pulumi.set(__self__, "gateway", gateway)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if ip_allocation_method is not None:
pulumi.set(__self__, "ip_allocation_method", ip_allocation_method)
if ip_version is not None:
pulumi.set(__self__, "ip_version", ip_version)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter(name="dnsServers")
def dns_servers(self) -> Optional[Sequence[str]]:
"""
The list of DNS servers IP addresses.
"""
return pulumi.get(self, "dns_servers")
@property
@pulumi.getter
def gateway(self) -> Optional[str]:
"""
The value of the gateway.
"""
return pulumi.get(self, "gateway")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
"""
The value of the IP address.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="ipAllocationMethod")
def ip_allocation_method(self) -> Optional[str]:
"""
IP address allocation method.
"""
return pulumi.get(self, "ip_allocation_method")
@property
@pulumi.getter(name="ipVersion")
def ip_version(self) -> Optional[str]:
"""
IP address version.
"""
return pulumi.get(self, "ip_version")
@property
@pulumi.getter
def subnet(self) -> Optional[str]:
"""
The value of the subnet.
"""
return pulumi.get(self, "subnet")
@pulumi.output_type
class NetworkInterfaceResponse(dict):
"""
Network interface properties.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ipConfigurations":
suggest = "ip_configurations"
elif key == "macAddress":
suggest = "mac_address"
elif key == "networkInterfaceName":
suggest = "network_interface_name"
elif key == "vmSwitchType":
suggest = "vm_switch_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkInterfaceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkInterfaceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkInterfaceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ip_configurations: Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']] = None,
mac_address: Optional[str] = None,
network_interface_name: Optional[str] = None,
vm_switch_type: Optional[str] = None):
"""
Network interface properties.
:param Sequence['NetworkInterfaceIPConfigurationResponse'] ip_configurations: A list of IP configurations of the network interface.
:param str mac_address: The MAC address of the network interface.
:param str network_interface_name: The name of the network interface.
:param str vm_switch_type: The type of the VM switch.
"""
if ip_configurations is not None:
pulumi.set(__self__, "ip_configurations", ip_configurations)
if mac_address is not None:
pulumi.set(__self__, "mac_address", mac_address)
if network_interface_name is not None:
pulumi.set(__self__, "network_interface_name", network_interface_name)
if vm_switch_type is not None:
pulumi.set(__self__, "vm_switch_type", vm_switch_type)
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']]:
"""
A list of IP configurations of the network interface.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter(name="macAddress")
def mac_address(self) -> Optional[str]:
"""
The MAC address of the network interface.
"""
return pulumi.get(self, "mac_address")
@property
@pulumi.getter(name="networkInterfaceName")
def network_interface_name(self) -> Optional[str]:
"""
The name of the network interface.
"""
return pulumi.get(self, "network_interface_name")
@property
@pulumi.getter(name="vmSwitchType")
def vm_switch_type(self) -> Optional[str]:
"""
The type of the VM switch.
"""
return pulumi.get(self, "vm_switch_type")
@pulumi.output_type
class OsDiskResponse(dict):
"""
Specifies information about the operating system disk used by the virtual machine. <br><br> For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "diskSizeGB":
suggest = "disk_size_gb"
elif key == "osType":
suggest = "os_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OsDiskResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OsDiskResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OsDiskResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
disk_size_gb: Optional[int] = None,
name: Optional[str] = None,
os_type: Optional[str] = None):
"""
Specifies information about the operating system disk used by the virtual machine. <br><br> For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
:param int disk_size_gb: Specifies the size of os disk in gigabytes. This is the fully expanded disk size needed of the VHD image on the ASE. This disk size should be greater than the size of the VHD provided in vhdUri.
:param str name: The VHD name.
:param str os_type: The OS type.
"""
if disk_size_gb is not None:
pulumi.set(__self__, "disk_size_gb", disk_size_gb)
if name is not None:
pulumi.set(__self__, "name", name)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
@property
@pulumi.getter(name="diskSizeGB")
def disk_size_gb(self) -> Optional[int]:
"""
Specifies the size of os disk in gigabytes. This is the fully expanded disk size needed of the VHD image on the ASE. This disk size should be greater than the size of the VHD provided in vhdUri.
"""
return pulumi.get(self, "disk_size_gb")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The VHD name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
"""
The OS type.
"""
return pulumi.get(self, "os_type")
@pulumi.output_type
class OsProfileResponse(dict):
"""
Specifies the operating system settings for the role instance.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "adminUsername":
suggest = "admin_username"
elif key == "customData":
suggest = "custom_data"
elif key == "customDataRequired":
suggest = "custom_data_required"
elif key == "linuxConfiguration":
suggest = "linux_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OsProfileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OsProfileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OsProfileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
admin_username: Optional[str] = None,
custom_data: Optional[str] = None,
custom_data_required: Optional[bool] = None,
linux_configuration: Optional['outputs.LinuxConfigurationResponse'] = None):
"""
Specifies the operating system settings for the role instance.
:param str admin_username: Specifies the name of the administrator account. <br><br> **Windows-only restriction:** Cannot end in "." <br><br> **Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5". <br><br> **Minimum-length (Linux):** 1 character <br><br> **Max-length (Linux):** 64 characters <br><br> **Max-length (Windows):** 20 characters <br><br><li> For root access to the Linux VM, see [Using root privileges on Linux virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)<br><li> For a list of built-in system users on Linux that should not be used in this field, see [Selecting User Names for Linux on Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json).
:param str custom_data: Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the virtual machine. The maximum length of the binary array is 65535 bytes. <br><br> **Note: Do not pass any secrets or passwords in customData property** <br><br> This property cannot be updated after the VM is created. <br><br> customData is passed to the VM to be saved as a file. For more information see [Custom Data on Azure VMs](https://azure.microsoft.com/en-us/blog/custom-data-and-cloud-init-on-windows-azure/) <br><br> For using cloud-init for your Linux VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
:param bool custom_data_required: Indicates if custom data is required to deploy this role.
:param 'LinuxConfigurationResponse' linux_configuration: Specifies the Linux operating system settings on the virtual machine. <br><br>For a list of supported Linux distributions, see [Linux on Azure-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) <br><br> For running non-endorsed distributions, see [Information for Non-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json).
"""
if admin_username is not None:
pulumi.set(__self__, "admin_username", admin_username)
if custom_data is not None:
pulumi.set(__self__, "custom_data", custom_data)
if custom_data_required is None:
custom_data_required = True
if custom_data_required is not None:
pulumi.set(__self__, "custom_data_required", custom_data_required)
if linux_configuration is not None:
pulumi.set(__self__, "linux_configuration", linux_configuration)
@property
@pulumi.getter(name="adminUsername")
def admin_username(self) -> Optional[str]:
"""
Specifies the name of the administrator account. <br><br> **Windows-only restriction:** Cannot end in "." <br><br> **Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5". <br><br> **Minimum-length (Linux):** 1 character <br><br> **Max-length (Linux):** 64 characters <br><br> **Max-length (Windows):** 20 characters <br><br><li> For root access to the Linux VM, see [Using root privileges on Linux virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)<br><li> For a list of built-in system users on Linux that should not be used in this field, see [Selecting User Names for Linux on Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json).
"""
return pulumi.get(self, "admin_username")
@property
@pulumi.getter(name="customData")
def custom_data(self) -> Optional[str]:
"""
Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the virtual machine. The maximum length of the binary array is 65535 bytes. <br><br> **Note: Do not pass any secrets or passwords in customData property** <br><br> This property cannot be updated after the VM is created. <br><br> customData is passed to the VM to be saved as a file. For more information see [Custom Data on Azure VMs](https://azure.microsoft.com/en-us/blog/custom-data-and-cloud-init-on-windows-azure/) <br><br> For using cloud-init for your Linux VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
"""
return pulumi.get(self, "custom_data")
@property
@pulumi.getter(name="customDataRequired")
def custom_data_required(self) -> Optional[bool]:
"""
Indicates if custom data is required to deploy this role.
"""
return pulumi.get(self, "custom_data_required")
@property
@pulumi.getter(name="linuxConfiguration")
def linux_configuration(self) -> Optional['outputs.LinuxConfigurationResponse']:
"""
Specifies the Linux operating system settings on the virtual machine. <br><br>For a list of supported Linux distributions, see [Linux on Azure-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) <br><br> For running non-endorsed distributions, see [Information for Non-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json).
"""
return pulumi.get(self, "linux_configuration")
@pulumi.output_type
class SshConfigurationResponse(dict):
"""
SSH configuration for Linux based VMs running on Azure
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "publicKeys":
suggest = "public_keys"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SshConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SshConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SshConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
public_keys: Optional[Sequence['outputs.SshPublicKeyResponse']] = None):
"""
SSH configuration for Linux based VMs running on Azure
:param Sequence['SshPublicKeyResponse'] public_keys: The list of SSH public keys used to authenticate with linux based VMs.
"""
if public_keys is not None:
pulumi.set(__self__, "public_keys", public_keys)
@property
@pulumi.getter(name="publicKeys")
def public_keys(self) -> Optional[Sequence['outputs.SshPublicKeyResponse']]:
"""
The list of SSH public keys used to authenticate with linux based VMs.
"""
return pulumi.get(self, "public_keys")
@pulumi.output_type
class SshPublicKeyResponse(dict):
"""
Contains information about SSH certificate public key and the path on the Linux VM where the public key is placed.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "keyData":
suggest = "key_data"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SshPublicKeyResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SshPublicKeyResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SshPublicKeyResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
key_data: Optional[str] = None,
path: Optional[str] = None):
"""
Contains information about SSH certificate public key and the path on the Linux VM where the public key is placed.
:param str key_data: SSH public key certificate used to authenticate with the VM through ssh. The key needs to be at least 2048-bit and in ssh-rsa format. <br><br> For creating ssh keys, see [Create SSH keys on Linux and Mac for Linux VMs in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-mac-create-ssh-keys?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json).
:param str path: Specifies the full path on the created VM where ssh public key is stored. If the file already exists, the specified key is appended to the file. Example: /home/user/.ssh/authorized_keys
"""
if key_data is not None:
pulumi.set(__self__, "key_data", key_data)
if path is not None:
pulumi.set(__self__, "path", path)
@property
@pulumi.getter(name="keyData")
def key_data(self) -> Optional[str]:
"""
SSH public key certificate used to authenticate with the VM through ssh. The key needs to be at least 2048-bit and in ssh-rsa format. <br><br> For creating ssh keys, see [Create SSH keys on Linux and Mac for Linux VMs in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-mac-create-ssh-keys?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json).
"""
return pulumi.get(self, "key_data")
@property
@pulumi.getter
def path(self) -> Optional[str]:
"""
Specifies the full path on the created VM where ssh public key is stored. If the file already exists, the specified key is appended to the file. Example: /home/user/.ssh/authorized_keys
"""
return pulumi.get(self, "path")
@pulumi.output_type
class StorageProfileResponse(dict):
"""
Specifies the storage settings for the virtual machine disks.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dataDisks":
suggest = "data_disks"
elif key == "imageReference":
suggest = "image_reference"
elif key == "osDisk":
suggest = "os_disk"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StorageProfileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StorageProfileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StorageProfileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
data_disks: Optional[Sequence['outputs.DataDiskResponse']] = None,
image_reference: Optional['outputs.ImageReferenceResponse'] = None,
os_disk: Optional['outputs.OsDiskResponse'] = None):
"""
Specifies the storage settings for the virtual machine disks.
:param Sequence['DataDiskResponse'] data_disks: Specifies the parameters that are used to add a data disk to a virtual machine.
:param 'ImageReferenceResponse' image_reference: The image reference properties.
:param 'OsDiskResponse' os_disk: Specifies information about the operating system disk used by the virtual machine.
"""
if data_disks is not None:
pulumi.set(__self__, "data_disks", data_disks)
if image_reference is not None:
pulumi.set(__self__, "image_reference", image_reference)
if os_disk is not None:
pulumi.set(__self__, "os_disk", os_disk)
@property
@pulumi.getter(name="dataDisks")
def data_disks(self) -> Optional[Sequence['outputs.DataDiskResponse']]:
"""
Specifies the parameters that are used to add a data disk to a virtual machine.
"""
return pulumi.get(self, "data_disks")
@property
@pulumi.getter(name="imageReference")
def image_reference(self) -> Optional['outputs.ImageReferenceResponse']:
"""
The image reference properties.
"""
return pulumi.get(self, "image_reference")
@property
@pulumi.getter(name="osDisk")
def os_disk(self) -> Optional['outputs.OsDiskResponse']:
"""
Specifies information about the operating system disk used by the virtual machine.
"""
return pulumi.get(self, "os_disk")
@pulumi.output_type
class SubResourceResponse(dict):
"""
Reference to another sub resource.
"""
def __init__(__self__, *,
id: Optional[str] = None):
"""
Reference to another sub resource.
:param str id: Resource ID.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@pulumi.output_type
class SystemDataResponse(dict):
"""
Metadata pertaining to creation and last modification of the resource.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdAt":
suggest = "created_at"
elif key == "createdBy":
suggest = "created_by"
elif key == "createdByType":
suggest = "created_by_type"
elif key == "lastModifiedAt":
suggest = "last_modified_at"
elif key == "lastModifiedBy":
suggest = "last_modified_by"
elif key == "lastModifiedByType":
suggest = "last_modified_by_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SystemDataResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SystemDataResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SystemDataResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created_at: Optional[str] = None,
created_by: Optional[str] = None,
created_by_type: Optional[str] = None,
last_modified_at: Optional[str] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[str] = None):
"""
Metadata pertaining to creation and last modification of the resource.
:param str created_at: The timestamp of resource creation (UTC).
:param str created_by: The identity that created the resource.
:param str created_by_type: The type of identity that created the resource.
:param str last_modified_at: The timestamp of resource last modification (UTC)
:param str last_modified_by: The identity that last modified the resource.
:param str last_modified_by_type: The type of identity that last modified the resource.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if created_by is not None:
pulumi.set(__self__, "created_by", created_by)
if created_by_type is not None:
pulumi.set(__self__, "created_by_type", created_by_type)
if last_modified_at is not None:
pulumi.set(__self__, "last_modified_at", last_modified_at)
if last_modified_by is not None:
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_by_type is not None:
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The timestamp of resource creation (UTC).
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
"""
The identity that created the resource.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
"""
The type of identity that created the resource.
"""
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[str]:
"""
The timestamp of resource last modification (UTC)
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
"""
The identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[str]:
"""
The type of identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by_type")
| 44.239331
| 1,145
| 0.659668
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'CustomProfileResponse',
'DataDiskResponse',
'ImageReferenceResponse',
'LinuxConfigurationResponse',
'NetworkFunctionRoleConfigurationResponse',
'NetworkFunctionTemplateResponse',
'NetworkFunctionUserConfigurationResponse',
'NetworkFunctionUserConfigurationResponseOsProfile',
'NetworkInterfaceIPConfigurationResponse',
'NetworkInterfaceResponse',
'OsDiskResponse',
'OsProfileResponse',
'SshConfigurationResponse',
'SshPublicKeyResponse',
'StorageProfileResponse',
'SubResourceResponse',
'SystemDataResponse',
]
@pulumi.output_type
class CustomProfileResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "metadataConfigurationPath":
suggest = "metadata_configuration_path"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CustomProfileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CustomProfileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CustomProfileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
metadata_configuration_path: Optional[str] = None):
if metadata_configuration_path is not None:
pulumi.set(__self__, "metadata_configuration_path", metadata_configuration_path)
@property
@pulumi.getter(name="metadataConfigurationPath")
def metadata_configuration_path(self) -> Optional[str]:
return pulumi.get(self, "metadata_configuration_path")
@pulumi.output_type
class DataDiskResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createOption":
suggest = "create_option"
elif key == "diskSizeGB":
suggest = "disk_size_gb"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DataDiskResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DataDiskResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DataDiskResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
create_option: Optional[str] = None,
disk_size_gb: Optional[int] = None,
name: Optional[str] = None):
if create_option is not None:
pulumi.set(__self__, "create_option", create_option)
if disk_size_gb is not None:
pulumi.set(__self__, "disk_size_gb", disk_size_gb)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="createOption")
def create_option(self) -> Optional[str]:
return pulumi.get(self, "create_option")
@property
@pulumi.getter(name="diskSizeGB")
def disk_size_gb(self) -> Optional[int]:
return pulumi.get(self, "disk_size_gb")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@pulumi.output_type
class ImageReferenceResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "exactVersion":
suggest = "exact_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ImageReferenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ImageReferenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ImageReferenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
exact_version: Optional[str] = None,
offer: Optional[str] = None,
publisher: Optional[str] = None,
sku: Optional[str] = None,
version: Optional[str] = None):
if exact_version is not None:
pulumi.set(__self__, "exact_version", exact_version)
if offer is not None:
pulumi.set(__self__, "offer", offer)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="exactVersion")
def exact_version(self) -> Optional[str]:
return pulumi.get(self, "exact_version")
@property
@pulumi.getter
def offer(self) -> Optional[str]:
return pulumi.get(self, "offer")
@property
@pulumi.getter
def publisher(self) -> Optional[str]:
return pulumi.get(self, "publisher")
@property
@pulumi.getter
def sku(self) -> Optional[str]:
return pulumi.get(self, "sku")
@property
@pulumi.getter
def version(self) -> Optional[str]:
return pulumi.get(self, "version")
@pulumi.output_type
class LinuxConfigurationResponse(dict):
def __init__(__self__, *,
ssh: Optional['outputs.SshConfigurationResponse'] = None):
if ssh is not None:
pulumi.set(__self__, "ssh", ssh)
@property
@pulumi.getter
def ssh(self) -> Optional['outputs.SshConfigurationResponse']:
return pulumi.get(self, "ssh")
@pulumi.output_type
class NetworkFunctionRoleConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "customProfile":
suggest = "custom_profile"
elif key == "networkInterfaces":
suggest = "network_interfaces"
elif key == "osProfile":
suggest = "os_profile"
elif key == "roleName":
suggest = "role_name"
elif key == "roleType":
suggest = "role_type"
elif key == "storageProfile":
suggest = "storage_profile"
elif key == "userDataParameters":
suggest = "user_data_parameters"
elif key == "userDataTemplate":
suggest = "user_data_template"
elif key == "virtualMachineSize":
suggest = "virtual_machine_size"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkFunctionRoleConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkFunctionRoleConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkFunctionRoleConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
custom_profile: Optional['outputs.CustomProfileResponse'] = None,
network_interfaces: Optional[Sequence['outputs.NetworkInterfaceResponse']] = None,
os_profile: Optional['outputs.OsProfileResponse'] = None,
role_name: Optional[str] = None,
role_type: Optional[str] = None,
storage_profile: Optional['outputs.StorageProfileResponse'] = None,
user_data_parameters: Optional[Any] = None,
user_data_template: Optional[Any] = None,
virtual_machine_size: Optional[str] = None):
if custom_profile is not None:
pulumi.set(__self__, "custom_profile", custom_profile)
if network_interfaces is not None:
pulumi.set(__self__, "network_interfaces", network_interfaces)
if os_profile is not None:
pulumi.set(__self__, "os_profile", os_profile)
if role_name is not None:
pulumi.set(__self__, "role_name", role_name)
if role_type is not None:
pulumi.set(__self__, "role_type", role_type)
if storage_profile is not None:
pulumi.set(__self__, "storage_profile", storage_profile)
if user_data_parameters is not None:
pulumi.set(__self__, "user_data_parameters", user_data_parameters)
if user_data_template is not None:
pulumi.set(__self__, "user_data_template", user_data_template)
if virtual_machine_size is not None:
pulumi.set(__self__, "virtual_machine_size", virtual_machine_size)
@property
@pulumi.getter(name="customProfile")
def custom_profile(self) -> Optional['outputs.CustomProfileResponse']:
return pulumi.get(self, "custom_profile")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Optional[Sequence['outputs.NetworkInterfaceResponse']]:
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> Optional['outputs.OsProfileResponse']:
return pulumi.get(self, "os_profile")
@property
@pulumi.getter(name="roleName")
def role_name(self) -> Optional[str]:
return pulumi.get(self, "role_name")
@property
@pulumi.getter(name="roleType")
def role_type(self) -> Optional[str]:
return pulumi.get(self, "role_type")
@property
@pulumi.getter(name="storageProfile")
def storage_profile(self) -> Optional['outputs.StorageProfileResponse']:
return pulumi.get(self, "storage_profile")
@property
@pulumi.getter(name="userDataParameters")
def user_data_parameters(self) -> Optional[Any]:
return pulumi.get(self, "user_data_parameters")
@property
@pulumi.getter(name="userDataTemplate")
def user_data_template(self) -> Optional[Any]:
return pulumi.get(self, "user_data_template")
@property
@pulumi.getter(name="virtualMachineSize")
def virtual_machine_size(self) -> Optional[str]:
return pulumi.get(self, "virtual_machine_size")
@pulumi.output_type
class NetworkFunctionTemplateResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "networkFunctionRoleConfigurations":
suggest = "network_function_role_configurations"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkFunctionTemplateResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkFunctionTemplateResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkFunctionTemplateResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
network_function_role_configurations: Optional[Sequence['outputs.NetworkFunctionRoleConfigurationResponse']] = None):
if network_function_role_configurations is not None:
pulumi.set(__self__, "network_function_role_configurations", network_function_role_configurations)
@property
@pulumi.getter(name="networkFunctionRoleConfigurations")
def network_function_role_configurations(self) -> Optional[Sequence['outputs.NetworkFunctionRoleConfigurationResponse']]:
return pulumi.get(self, "network_function_role_configurations")
@pulumi.output_type
class NetworkFunctionUserConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "networkInterfaces":
suggest = "network_interfaces"
elif key == "osProfile":
suggest = "os_profile"
elif key == "roleName":
suggest = "role_name"
elif key == "userDataParameters":
suggest = "user_data_parameters"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkFunctionUserConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkFunctionUserConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkFunctionUserConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
network_interfaces: Optional[Sequence['outputs.NetworkInterfaceResponse']] = None,
os_profile: Optional['outputs.NetworkFunctionUserConfigurationResponseOsProfile'] = None,
role_name: Optional[str] = None,
user_data_parameters: Optional[Any] = None):
if network_interfaces is not None:
pulumi.set(__self__, "network_interfaces", network_interfaces)
if os_profile is not None:
pulumi.set(__self__, "os_profile", os_profile)
if role_name is not None:
pulumi.set(__self__, "role_name", role_name)
if user_data_parameters is not None:
pulumi.set(__self__, "user_data_parameters", user_data_parameters)
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Optional[Sequence['outputs.NetworkInterfaceResponse']]:
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> Optional['outputs.NetworkFunctionUserConfigurationResponseOsProfile']:
return pulumi.get(self, "os_profile")
@property
@pulumi.getter(name="roleName")
def role_name(self) -> Optional[str]:
return pulumi.get(self, "role_name")
@property
@pulumi.getter(name="userDataParameters")
def user_data_parameters(self) -> Optional[Any]:
return pulumi.get(self, "user_data_parameters")
@pulumi.output_type
class NetworkFunctionUserConfigurationResponseOsProfile(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "customData":
suggest = "custom_data"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkFunctionUserConfigurationResponseOsProfile. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkFunctionUserConfigurationResponseOsProfile.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkFunctionUserConfigurationResponseOsProfile.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
custom_data: Optional[str] = None):
if custom_data is not None:
pulumi.set(__self__, "custom_data", custom_data)
@property
@pulumi.getter(name="customData")
def custom_data(self) -> Optional[str]:
return pulumi.get(self, "custom_data")
@pulumi.output_type
class NetworkInterfaceIPConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dnsServers":
suggest = "dns_servers"
elif key == "ipAddress":
suggest = "ip_address"
elif key == "ipAllocationMethod":
suggest = "ip_allocation_method"
elif key == "ipVersion":
suggest = "ip_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkInterfaceIPConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkInterfaceIPConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkInterfaceIPConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
dns_servers: Optional[Sequence[str]] = None,
gateway: Optional[str] = None,
ip_address: Optional[str] = None,
ip_allocation_method: Optional[str] = None,
ip_version: Optional[str] = None,
subnet: Optional[str] = None):
if dns_servers is not None:
pulumi.set(__self__, "dns_servers", dns_servers)
if gateway is not None:
pulumi.set(__self__, "gateway", gateway)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if ip_allocation_method is not None:
pulumi.set(__self__, "ip_allocation_method", ip_allocation_method)
if ip_version is not None:
pulumi.set(__self__, "ip_version", ip_version)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter(name="dnsServers")
def dns_servers(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "dns_servers")
@property
@pulumi.getter
def gateway(self) -> Optional[str]:
return pulumi.get(self, "gateway")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="ipAllocationMethod")
def ip_allocation_method(self) -> Optional[str]:
return pulumi.get(self, "ip_allocation_method")
@property
@pulumi.getter(name="ipVersion")
def ip_version(self) -> Optional[str]:
return pulumi.get(self, "ip_version")
@property
@pulumi.getter
def subnet(self) -> Optional[str]:
return pulumi.get(self, "subnet")
@pulumi.output_type
class NetworkInterfaceResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ipConfigurations":
suggest = "ip_configurations"
elif key == "macAddress":
suggest = "mac_address"
elif key == "networkInterfaceName":
suggest = "network_interface_name"
elif key == "vmSwitchType":
suggest = "vm_switch_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkInterfaceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkInterfaceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkInterfaceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ip_configurations: Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']] = None,
mac_address: Optional[str] = None,
network_interface_name: Optional[str] = None,
vm_switch_type: Optional[str] = None):
if ip_configurations is not None:
pulumi.set(__self__, "ip_configurations", ip_configurations)
if mac_address is not None:
pulumi.set(__self__, "mac_address", mac_address)
if network_interface_name is not None:
pulumi.set(__self__, "network_interface_name", network_interface_name)
if vm_switch_type is not None:
pulumi.set(__self__, "vm_switch_type", vm_switch_type)
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']]:
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter(name="macAddress")
def mac_address(self) -> Optional[str]:
return pulumi.get(self, "mac_address")
@property
@pulumi.getter(name="networkInterfaceName")
def network_interface_name(self) -> Optional[str]:
return pulumi.get(self, "network_interface_name")
@property
@pulumi.getter(name="vmSwitchType")
def vm_switch_type(self) -> Optional[str]:
return pulumi.get(self, "vm_switch_type")
@pulumi.output_type
class OsDiskResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "diskSizeGB":
suggest = "disk_size_gb"
elif key == "osType":
suggest = "os_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OsDiskResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OsDiskResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OsDiskResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
disk_size_gb: Optional[int] = None,
name: Optional[str] = None,
os_type: Optional[str] = None):
if disk_size_gb is not None:
pulumi.set(__self__, "disk_size_gb", disk_size_gb)
if name is not None:
pulumi.set(__self__, "name", name)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
@property
@pulumi.getter(name="diskSizeGB")
def disk_size_gb(self) -> Optional[int]:
return pulumi.get(self, "disk_size_gb")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
return pulumi.get(self, "os_type")
@pulumi.output_type
class OsProfileResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "adminUsername":
suggest = "admin_username"
elif key == "customData":
suggest = "custom_data"
elif key == "customDataRequired":
suggest = "custom_data_required"
elif key == "linuxConfiguration":
suggest = "linux_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OsProfileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OsProfileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OsProfileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
admin_username: Optional[str] = None,
custom_data: Optional[str] = None,
custom_data_required: Optional[bool] = None,
linux_configuration: Optional['outputs.LinuxConfigurationResponse'] = None):
if admin_username is not None:
pulumi.set(__self__, "admin_username", admin_username)
if custom_data is not None:
pulumi.set(__self__, "custom_data", custom_data)
if custom_data_required is None:
custom_data_required = True
if custom_data_required is not None:
pulumi.set(__self__, "custom_data_required", custom_data_required)
if linux_configuration is not None:
pulumi.set(__self__, "linux_configuration", linux_configuration)
@property
@pulumi.getter(name="adminUsername")
def admin_username(self) -> Optional[str]:
return pulumi.get(self, "admin_username")
@property
@pulumi.getter(name="customData")
def custom_data(self) -> Optional[str]:
return pulumi.get(self, "custom_data")
@property
@pulumi.getter(name="customDataRequired")
def custom_data_required(self) -> Optional[bool]:
return pulumi.get(self, "custom_data_required")
@property
@pulumi.getter(name="linuxConfiguration")
def linux_configuration(self) -> Optional['outputs.LinuxConfigurationResponse']:
return pulumi.get(self, "linux_configuration")
@pulumi.output_type
class SshConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "publicKeys":
suggest = "public_keys"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SshConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SshConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SshConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
public_keys: Optional[Sequence['outputs.SshPublicKeyResponse']] = None):
if public_keys is not None:
pulumi.set(__self__, "public_keys", public_keys)
@property
@pulumi.getter(name="publicKeys")
def public_keys(self) -> Optional[Sequence['outputs.SshPublicKeyResponse']]:
return pulumi.get(self, "public_keys")
@pulumi.output_type
class SshPublicKeyResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "keyData":
suggest = "key_data"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SshPublicKeyResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SshPublicKeyResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SshPublicKeyResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
key_data: Optional[str] = None,
path: Optional[str] = None):
if key_data is not None:
pulumi.set(__self__, "key_data", key_data)
if path is not None:
pulumi.set(__self__, "path", path)
@property
@pulumi.getter(name="keyData")
def key_data(self) -> Optional[str]:
return pulumi.get(self, "key_data")
@property
@pulumi.getter
def path(self) -> Optional[str]:
return pulumi.get(self, "path")
@pulumi.output_type
class StorageProfileResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dataDisks":
suggest = "data_disks"
elif key == "imageReference":
suggest = "image_reference"
elif key == "osDisk":
suggest = "os_disk"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StorageProfileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StorageProfileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StorageProfileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
data_disks: Optional[Sequence['outputs.DataDiskResponse']] = None,
image_reference: Optional['outputs.ImageReferenceResponse'] = None,
os_disk: Optional['outputs.OsDiskResponse'] = None):
if data_disks is not None:
pulumi.set(__self__, "data_disks", data_disks)
if image_reference is not None:
pulumi.set(__self__, "image_reference", image_reference)
if os_disk is not None:
pulumi.set(__self__, "os_disk", os_disk)
@property
@pulumi.getter(name="dataDisks")
def data_disks(self) -> Optional[Sequence['outputs.DataDiskResponse']]:
return pulumi.get(self, "data_disks")
@property
@pulumi.getter(name="imageReference")
def image_reference(self) -> Optional['outputs.ImageReferenceResponse']:
return pulumi.get(self, "image_reference")
@property
@pulumi.getter(name="osDisk")
def os_disk(self) -> Optional['outputs.OsDiskResponse']:
return pulumi.get(self, "os_disk")
@pulumi.output_type
class SubResourceResponse(dict):
def __init__(__self__, *,
id: Optional[str] = None):
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@pulumi.output_type
class SystemDataResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdAt":
suggest = "created_at"
elif key == "createdBy":
suggest = "created_by"
elif key == "createdByType":
suggest = "created_by_type"
elif key == "lastModifiedAt":
suggest = "last_modified_at"
elif key == "lastModifiedBy":
suggest = "last_modified_by"
elif key == "lastModifiedByType":
suggest = "last_modified_by_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SystemDataResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SystemDataResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SystemDataResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created_at: Optional[str] = None,
created_by: Optional[str] = None,
created_by_type: Optional[str] = None,
last_modified_at: Optional[str] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[str] = None):
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if created_by is not None:
pulumi.set(__self__, "created_by", created_by)
if created_by_type is not None:
pulumi.set(__self__, "created_by_type", created_by_type)
if last_modified_at is not None:
pulumi.set(__self__, "last_modified_at", last_modified_at)
if last_modified_by is not None:
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_by_type is not None:
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[str]:
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[str]:
return pulumi.get(self, "last_modified_by_type")
| true
| true
|
f7145cc12aed42e52b811d6e792bdbbe823aba63
| 9,103
|
py
|
Python
|
rodnet/models/backbones/hgwi.py
|
zhengzangw/RODNet
|
eca5f2bd1f3051c2b823d279532ddafa71b009c1
|
[
"MIT"
] | null | null | null |
rodnet/models/backbones/hgwi.py
|
zhengzangw/RODNet
|
eca5f2bd1f3051c2b823d279532ddafa71b009c1
|
[
"MIT"
] | null | null | null |
rodnet/models/backbones/hgwi.py
|
zhengzangw/RODNet
|
eca5f2bd1f3051c2b823d279532ddafa71b009c1
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
class RadarStackedHourglass(nn.Module):
def __init__(self, n_class, stacked_num=1):
super(RadarStackedHourglass, self).__init__()
self.stacked_num = stacked_num
self.conv1a = nn.Conv3d(
in_channels=2,
out_channels=32,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv1b = nn.Conv3d(
in_channels=32,
out_channels=64,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv1c = nn.Conv3d(
in_channels=64,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.hourglass = []
for i in range(stacked_num):
self.hourglass.append(
nn.ModuleList(
[
RODEncode(),
RODDecode(),
nn.Conv3d(
in_channels=160,
out_channels=n_class,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
),
nn.Conv3d(
in_channels=n_class,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
),
]
)
)
self.hourglass = nn.ModuleList(self.hourglass)
self.relu = nn.ReLU()
self.bn1a = nn.BatchNorm3d(num_features=32)
self.bn1b = nn.BatchNorm3d(num_features=64)
self.bn1c = nn.BatchNorm3d(num_features=160)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.relu(self.bn1a(self.conv1a(x)))
x = self.relu(self.bn1b(self.conv1b(x)))
x = self.relu(self.bn1c(self.conv1c(x)))
out = []
for i in range(self.stacked_num):
x, x1, x2, x3 = self.hourglass[i][0](x)
x = self.hourglass[i][1](x, x1, x2, x3)
confmap = self.hourglass[i][2](x)
out.append(self.sigmoid(confmap))
if i < self.stacked_num - 1:
confmap_ = self.hourglass[i][3](confmap)
x = x + confmap_
return out
class InceptionLayerConcat(nn.Module):
"""
Kernal size: for 2d kernal size, since the kernal size in temporal domain will be fixed
"""
def __init__(self, kernal_size, in_channel, stride):
super(InceptionLayerConcat, self).__init__()
paddingX = kernal_size[0] // 2
paddingY = kernal_size[1] // 2
self.branch1 = nn.Conv3d(
in_channels=in_channel,
out_channels=32,
kernel_size=(5, kernal_size[0], kernal_size[1]),
stride=stride,
padding=(2, paddingX, paddingY),
)
self.branch2a = nn.Conv3d(
in_channels=in_channel,
out_channels=64,
kernel_size=(5, kernal_size[0], kernal_size[1]),
stride=(1, 1, 1),
padding=(2, paddingX, paddingY),
)
self.branch2b = nn.Conv3d(
in_channels=64,
out_channels=64,
kernel_size=(9, kernal_size[0], kernal_size[1]),
stride=stride,
padding=(4, paddingX, paddingY),
)
self.branch3a = nn.Conv3d(
in_channels=in_channel,
out_channels=64,
kernel_size=(5, kernal_size[0], kernal_size[1]),
stride=(1, 1, 1),
padding=(2, paddingX, paddingY),
)
self.branch3b = nn.Conv3d(
in_channels=64,
out_channels=64,
kernel_size=(13, kernal_size[0], kernal_size[1]),
stride=stride,
padding=(6, paddingX, paddingY),
)
def forward(self, x):
branch1 = self.branch1(x)
branch2 = self.branch2a(x)
branch2 = self.branch2b(branch2)
branch3 = self.branch3a(x)
branch3 = self.branch3b(branch3)
return torch.cat((branch1, branch2, branch3), 1)
class RODEncode(nn.Module):
def __init__(self):
super(RODEncode, self).__init__()
self.inception1 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.inception2 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.inception3 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.skip_inception1 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.skip_inception2 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.skip_inception3 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
# self.conv4a = nn.Conv3d(in_channels=64, out_channels=64,
# kernel_size=(9, 5, 5), stride=(1, 1, 1), padding=(4, 2, 2))
# self.conv4b = nn.Conv3d(in_channels=64, out_channels=64,
# kernel_size=(9, 5, 5), stride=(1, 2, 2), padding=(4, 2, 2))
# self.conv5a = nn.Conv3d(in_channels=64, out_channels=64,
# kernel_size=(9, 5, 5), stride=(1, 1, 1), padding=(4, 2, 2))
# self.conv5b = nn.Conv3d(in_channels=64, out_channels=64,
# kernel_size=(9, 5, 5), stride=(1, 2, 2), padding=(4, 2, 2))
self.bn1 = nn.BatchNorm3d(num_features=160)
self.bn2 = nn.BatchNorm3d(num_features=160)
self.bn3 = nn.BatchNorm3d(num_features=160)
self.skip_bn1 = nn.BatchNorm3d(num_features=160)
self.skip_bn2 = nn.BatchNorm3d(num_features=160)
self.skip_bn3 = nn.BatchNorm3d(num_features=160)
# self.bn4a = nn.BatchNorm3d(num_features=64)
# self.bn4b = nn.BatchNorm3d(num_features=64)
# self.bn5a = nn.BatchNorm3d(num_features=64)
# self.bn5b = nn.BatchNorm3d(num_features=64)
self.relu = nn.ReLU()
def forward(self, x):
x1 = self.relu(self.skip_bn1(self.skip_inception1(x)))
x = self.relu(
self.bn1(self.inception1(x))
) # (B, 2, W, 128, 128) -> (B, 64, W, 128, 128)
x2 = self.relu(self.skip_bn2(self.skip_inception2(x)))
x = self.relu(
self.bn2(self.inception2(x))
) # (B, 2, W, 128, 128) -> (B, 64, W, 128, 128)
x3 = self.relu(self.skip_bn3(self.skip_inception3(x)))
x = self.relu(
self.bn3(self.inception3(x))
) # (B, 2, W, 128, 128) -> (B, 64, W, 128, 128)
return x, x1, x2, x3
class RODDecode(nn.Module):
def __init__(self):
super(RODDecode, self).__init__()
self.convt1 = nn.ConvTranspose3d(
in_channels=160,
out_channels=160,
kernel_size=(3, 6, 6),
stride=(1, 2, 2),
padding=(1, 2, 2),
)
self.convt2 = nn.ConvTranspose3d(
in_channels=160,
out_channels=160,
kernel_size=(3, 6, 6),
stride=(1, 2, 2),
padding=(1, 2, 2),
)
self.convt3 = nn.ConvTranspose3d(
in_channels=160,
out_channels=160,
kernel_size=(3, 6, 6),
stride=(1, 2, 2),
padding=(1, 2, 2),
)
self.conv1 = nn.Conv3d(
in_channels=160,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv2 = nn.Conv3d(
in_channels=160,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv3 = nn.Conv3d(
in_channels=160,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.prelu = nn.PReLU()
self.sigmoid = nn.Sigmoid()
# self.upsample = nn.Upsample(size=(rodnet_configs['win_size'], radar_configs['ramap_rsize'],
# radar_configs['ramap_asize']), mode='nearest')
def forward(self, x, x1, x2, x3):
x = self.prelu(
self.convt1(x + x3)
) # (B, 256, W/4, 16, 16) -> (B, 128, W/2, 32, 32)
x = self.prelu(self.conv1(x))
x = self.prelu(
self.convt2(x + x2)
) # (B, 128, W/2, 32, 32) -> (B, 64, W, 64, 64)
x = self.prelu(self.conv2(x))
x = self.prelu(self.convt3(x + x1)) # (B, 64, W, 64, 64) -> (B, 3, W, 128, 128)
x = self.prelu(self.conv3(x))
return x
| 34.612167
| 101
| 0.494782
|
import torch
import torch.nn as nn
class RadarStackedHourglass(nn.Module):
def __init__(self, n_class, stacked_num=1):
super(RadarStackedHourglass, self).__init__()
self.stacked_num = stacked_num
self.conv1a = nn.Conv3d(
in_channels=2,
out_channels=32,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv1b = nn.Conv3d(
in_channels=32,
out_channels=64,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv1c = nn.Conv3d(
in_channels=64,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.hourglass = []
for i in range(stacked_num):
self.hourglass.append(
nn.ModuleList(
[
RODEncode(),
RODDecode(),
nn.Conv3d(
in_channels=160,
out_channels=n_class,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
),
nn.Conv3d(
in_channels=n_class,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
),
]
)
)
self.hourglass = nn.ModuleList(self.hourglass)
self.relu = nn.ReLU()
self.bn1a = nn.BatchNorm3d(num_features=32)
self.bn1b = nn.BatchNorm3d(num_features=64)
self.bn1c = nn.BatchNorm3d(num_features=160)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.relu(self.bn1a(self.conv1a(x)))
x = self.relu(self.bn1b(self.conv1b(x)))
x = self.relu(self.bn1c(self.conv1c(x)))
out = []
for i in range(self.stacked_num):
x, x1, x2, x3 = self.hourglass[i][0](x)
x = self.hourglass[i][1](x, x1, x2, x3)
confmap = self.hourglass[i][2](x)
out.append(self.sigmoid(confmap))
if i < self.stacked_num - 1:
confmap_ = self.hourglass[i][3](confmap)
x = x + confmap_
return out
class InceptionLayerConcat(nn.Module):
def __init__(self, kernal_size, in_channel, stride):
super(InceptionLayerConcat, self).__init__()
paddingX = kernal_size[0] // 2
paddingY = kernal_size[1] // 2
self.branch1 = nn.Conv3d(
in_channels=in_channel,
out_channels=32,
kernel_size=(5, kernal_size[0], kernal_size[1]),
stride=stride,
padding=(2, paddingX, paddingY),
)
self.branch2a = nn.Conv3d(
in_channels=in_channel,
out_channels=64,
kernel_size=(5, kernal_size[0], kernal_size[1]),
stride=(1, 1, 1),
padding=(2, paddingX, paddingY),
)
self.branch2b = nn.Conv3d(
in_channels=64,
out_channels=64,
kernel_size=(9, kernal_size[0], kernal_size[1]),
stride=stride,
padding=(4, paddingX, paddingY),
)
self.branch3a = nn.Conv3d(
in_channels=in_channel,
out_channels=64,
kernel_size=(5, kernal_size[0], kernal_size[1]),
stride=(1, 1, 1),
padding=(2, paddingX, paddingY),
)
self.branch3b = nn.Conv3d(
in_channels=64,
out_channels=64,
kernel_size=(13, kernal_size[0], kernal_size[1]),
stride=stride,
padding=(6, paddingX, paddingY),
)
def forward(self, x):
branch1 = self.branch1(x)
branch2 = self.branch2a(x)
branch2 = self.branch2b(branch2)
branch3 = self.branch3a(x)
branch3 = self.branch3b(branch3)
return torch.cat((branch1, branch2, branch3), 1)
class RODEncode(nn.Module):
def __init__(self):
super(RODEncode, self).__init__()
self.inception1 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.inception2 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.inception3 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.skip_inception1 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.skip_inception2 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.skip_inception3 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.bn1 = nn.BatchNorm3d(num_features=160)
self.bn2 = nn.BatchNorm3d(num_features=160)
self.bn3 = nn.BatchNorm3d(num_features=160)
self.skip_bn1 = nn.BatchNorm3d(num_features=160)
self.skip_bn2 = nn.BatchNorm3d(num_features=160)
self.skip_bn3 = nn.BatchNorm3d(num_features=160)
self.relu = nn.ReLU()
def forward(self, x):
x1 = self.relu(self.skip_bn1(self.skip_inception1(x)))
x = self.relu(
self.bn1(self.inception1(x))
)
x2 = self.relu(self.skip_bn2(self.skip_inception2(x)))
x = self.relu(
self.bn2(self.inception2(x))
)
x3 = self.relu(self.skip_bn3(self.skip_inception3(x)))
x = self.relu(
self.bn3(self.inception3(x))
)
return x, x1, x2, x3
class RODDecode(nn.Module):
def __init__(self):
super(RODDecode, self).__init__()
self.convt1 = nn.ConvTranspose3d(
in_channels=160,
out_channels=160,
kernel_size=(3, 6, 6),
stride=(1, 2, 2),
padding=(1, 2, 2),
)
self.convt2 = nn.ConvTranspose3d(
in_channels=160,
out_channels=160,
kernel_size=(3, 6, 6),
stride=(1, 2, 2),
padding=(1, 2, 2),
)
self.convt3 = nn.ConvTranspose3d(
in_channels=160,
out_channels=160,
kernel_size=(3, 6, 6),
stride=(1, 2, 2),
padding=(1, 2, 2),
)
self.conv1 = nn.Conv3d(
in_channels=160,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv2 = nn.Conv3d(
in_channels=160,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv3 = nn.Conv3d(
in_channels=160,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.prelu = nn.PReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x, x1, x2, x3):
x = self.prelu(
self.convt1(x + x3)
)
x = self.prelu(self.conv1(x))
x = self.prelu(
self.convt2(x + x2)
)
x = self.prelu(self.conv2(x))
x = self.prelu(self.convt3(x + x1))
x = self.prelu(self.conv3(x))
return x
| true
| true
|
f7145d06775df411d8b6bbed45d9cb10c999cfeb
| 203,536
|
py
|
Python
|
salt/modules/file.py
|
sacren/salt
|
887336c6deaaad6f9ad4948b69472bd043962d56
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/file.py
|
sacren/salt
|
887336c6deaaad6f9ad4948b69472bd043962d56
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/file.py
|
sacren/salt
|
887336c6deaaad6f9ad4948b69472bd043962d56
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Manage information about regular files, directories,
and special files on the minion, set/read user,
group, mode, and data
'''
# TODO: We should add the capability to do u+r type operations here
# some time in the future
from __future__ import absolute_import, print_function
# Import python libs
import datetime
import difflib
import errno
import fileinput
import fnmatch
import itertools
import logging
import operator
import os
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import glob
import hashlib
import mmap
from collections import Iterable, Mapping
from functools import reduce # pylint: disable=redefined-builtin
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt.ext.six.moves import range, zip
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
# pylint: enable=import-error,no-name-in-module,redefined-builtin
try:
import grp
import pwd
except ImportError:
pass
# Import salt libs
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.filebuffer
import salt.utils.files
import salt.utils.find
import salt.utils.functools
import salt.utils.hashutils
import salt.utils.itertools
import salt.utils.locales
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.templates
import salt.utils.url
import salt.utils.user
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError, get_error_message as _get_error_message
from salt.utils.files import HASHES, HASHES_REVMAP
log = logging.getLogger(__name__)
__func_alias__ = {
'makedirs_': 'makedirs'
}
def __virtual__():
'''
Only work on POSIX-like systems
'''
# win_file takes care of windows
if salt.utils.platform.is_windows():
return (
False,
'The file execution module cannot be loaded: only available on '
'non-Windows systems - use win_file instead.'
)
return True
def __clean_tmp(sfn):
'''
Clean out a template temp file
'''
if sfn.startswith(os.path.join(tempfile.gettempdir(),
salt.utils.files.TEMPFILE_PREFIX)):
# Don't remove if it exists in file_roots (any saltenv)
all_roots = itertools.chain.from_iterable(
six.itervalues(__opts__['file_roots']))
in_roots = any(sfn.startswith(root) for root in all_roots)
# Only clean up files that exist
if os.path.exists(sfn) and not in_roots:
os.remove(sfn)
def _error(ret, err_msg):
'''
Common function for setting error information for return dicts
'''
ret['result'] = False
ret['comment'] = err_msg
return ret
def _binary_replace(old, new):
'''
This function does NOT do any diffing, it just checks the old and new files
to see if either is binary, and provides an appropriate string noting the
difference between the two files. If neither file is binary, an empty
string is returned.
This function should only be run AFTER it has been determined that the
files differ.
'''
old_isbin = not __utils__['files.is_text'](old)
new_isbin = not __utils__['files.is_text'](new)
if any((old_isbin, new_isbin)):
if all((old_isbin, new_isbin)):
return u'Replace binary file'
elif old_isbin:
return u'Replace binary file with text file'
elif new_isbin:
return u'Replace text file with binary file'
return u''
def _get_bkroot():
'''
Get the location of the backup dir in the minion cache
'''
# Get the cachedir from the minion config
return os.path.join(__salt__['config.get']('cachedir'), 'file_backup')
def _splitlines_preserving_trailing_newline(str):
'''
Returns a list of the lines in the string, breaking at line boundaries and
preserving a trailing newline (if present).
Essentially, this works like ``str.striplines(False)`` but preserves an
empty line at the end. This is equivalent to the following code:
.. code-block:: python
lines = str.splitlines()
if str.endswith('\n') or str.endswith('\r'):
lines.append('')
'''
lines = str.splitlines()
if str.endswith('\n') or str.endswith('\r'):
lines.append('')
return lines
def gid_to_group(gid):
'''
Convert the group id to the group name on this system
gid
gid to convert to a group name
CLI Example:
.. code-block:: bash
salt '*' file.gid_to_group 0
'''
try:
gid = int(gid)
except ValueError:
# This is not an integer, maybe it's already the group name?
gid = group_to_gid(gid)
if gid == '':
# Don't even bother to feed it to grp
return ''
try:
return grp.getgrgid(gid).gr_name
except (KeyError, NameError):
# If group is not present, fall back to the gid.
return gid
def group_to_gid(group):
'''
Convert the group to the gid on this system
group
group to convert to its gid
CLI Example:
.. code-block:: bash
salt '*' file.group_to_gid root
'''
if group is None:
return ''
try:
if isinstance(group, int):
return group
return grp.getgrnam(group).gr_gid
except KeyError:
return ''
def get_gid(path, follow_symlinks=True):
'''
Return the id of the group that owns a given file
path
file or directory of which to get the gid
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_gid /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
'''
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('gid', -1)
def get_group(path, follow_symlinks=True):
'''
Return the group that owns a given file
path
file or directory of which to get the group
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_group /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
'''
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('group', False)
def uid_to_user(uid):
'''
Convert a uid to a user name
uid
uid to convert to a username
CLI Example:
.. code-block:: bash
salt '*' file.uid_to_user 0
'''
try:
return pwd.getpwuid(uid).pw_name
except (KeyError, NameError):
# If user is not present, fall back to the uid.
return uid
def user_to_uid(user):
'''
Convert user name to a uid
user
user name to convert to its uid
CLI Example:
.. code-block:: bash
salt '*' file.user_to_uid root
'''
if user is None:
user = salt.utils.user.get_user()
try:
if isinstance(user, int):
return user
return pwd.getpwnam(user).pw_uid
except KeyError:
return ''
def get_uid(path, follow_symlinks=True):
'''
Return the id of the user that owns a given file
path
file or directory of which to get the uid
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_uid /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
'''
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('uid', -1)
def get_user(path, follow_symlinks=True):
'''
Return the user that owns a given file
path
file or directory of which to get the user
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_user /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
'''
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('user', False)
def get_mode(path, follow_symlinks=True):
'''
Return the mode of a file
path
file or directory of which to get the mode
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_mode /etc/passwd
.. versionchanged:: 2014.1.0
``follow_symlinks`` option added
'''
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('mode', '')
def set_mode(path, mode):
'''
Set the mode of a file
path
file or directory of which to set the mode
mode
mode to set the path to
CLI Example:
.. code-block:: bash
salt '*' file.set_mode /etc/passwd 0644
'''
path = os.path.expanduser(path)
mode = str(mode).lstrip('0Oo')
if not mode:
mode = '0'
if not os.path.exists(path):
raise CommandExecutionError('{0}: File not found'.format(path))
try:
os.chmod(path, int(mode, 8))
except Exception:
return 'Invalid Mode ' + mode
return get_mode(path)
def lchown(path, user, group):
'''
Chown a file, pass the file the desired user and group without following
symlinks.
path
path to the file or directory
user
user owner
group
group owner
CLI Example:
.. code-block:: bash
salt '*' file.chown /etc/passwd root root
'''
path = os.path.expanduser(path)
uid = user_to_uid(user)
gid = group_to_gid(group)
err = ''
if uid == '':
if user:
err += 'User does not exist\n'
else:
uid = -1
if gid == '':
if group:
err += 'Group does not exist\n'
else:
gid = -1
return os.lchown(path, uid, gid)
def chown(path, user, group):
'''
Chown a file, pass the file the desired user and group
path
path to the file or directory
user
user owner
group
group owner
CLI Example:
.. code-block:: bash
salt '*' file.chown /etc/passwd root root
'''
path = os.path.expanduser(path)
uid = user_to_uid(user)
gid = group_to_gid(group)
err = ''
if uid == '':
if user:
err += 'User does not exist\n'
else:
uid = -1
if gid == '':
if group:
err += 'Group does not exist\n'
else:
gid = -1
if not os.path.exists(path):
try:
# Broken symlinks will return false, but still need to be chowned
return os.lchown(path, uid, gid)
except OSError:
pass
err += 'File not found'
if err:
return err
return os.chown(path, uid, gid)
def chgrp(path, group):
'''
Change the group of a file
path
path to the file or directory
group
group owner
CLI Example:
.. code-block:: bash
salt '*' file.chgrp /etc/passwd root
'''
path = os.path.expanduser(path)
user = get_user(path)
return chown(path, user, group)
def _cmp_attrs(path, attrs):
'''
.. versionadded: Oxygen
Compare attributes of a given file to given attributes.
Returns a pair (list) where first item are attributes to
add and second item are to be removed.
path
path to file to compare attributes with.
attrs
string of attributes to compare against a given file
'''
diff = [None, None]
lattrs = lsattr(path).get(path, '')
old = [chr for chr in lattrs if chr not in attrs]
if len(old) > 0:
diff[1] = ''.join(old)
new = [chr for chr in attrs if chr not in lattrs]
if len(new) > 0:
diff[0] = ''.join(new)
return diff
def lsattr(path):
'''
.. versionadded: Oxygen
Obtain the modifiable attributes of the given file. If path
is to a directory, an empty list is returned.
path
path to file to obtain attributes of. File/directory must exist.
CLI Example:
.. code-block:: bash
salt '*' file.lsattr foo1.txt
'''
if not os.path.exists(path):
raise SaltInvocationError("File or directory does not exist.")
cmd = ['lsattr', path]
result = __salt__['cmd.run'](cmd, python_shell=False)
results = {}
for line in result.splitlines():
if not line.startswith('lsattr'):
vals = line.split(None, 1)
results[vals[1]] = re.findall(r"[acdijstuADST]", vals[0])
return results
def chattr(*args, **kwargs):
'''
.. versionadded: Oxygen
Change the attributes of files
*args
list of files to modify attributes of
**kwargs - the following are valid <key,value> pairs:
operator
add|remove
determines whether attributes should be added or removed from files
attributes
acdijstuADST
string of characters representing attributes to add/remove from files
version
a version number to assign to the files
flags
[RVf]
flags to assign to chattr (recurse, verbose, suppress most errors)
CLI Example:
.. code-block:: bash
salt '*' file.chattr foo1.txt foo2.txt operator=add attributes=ai
salt '*' file.chattr foo3.txt operator=remove attributes=i version=2
'''
args = [arg if salt.utils.stringutils.is_quoted(arg) else '"{0}"'.format(arg)
for arg in args]
operator = kwargs.pop('operator', None)
attributes = kwargs.pop('attributes', None)
flags = kwargs.pop('flags', None)
version = kwargs.pop('version', None)
if (operator is None) or (operator not in ['add', 'remove']):
raise SaltInvocationError(
"Need an operator: 'add' or 'remove' to modify attributes.")
if attributes is None:
raise SaltInvocationError("Need attributes: [AacDdijsTtSu]")
if operator == "add":
attrs = '+{0}'.format(attributes)
elif operator == "remove":
attrs = '-{0}'.format(attributes)
flgs = ''
if flags is not None:
flgs = '-{0}'.format(flags)
vrsn = ''
if version is not None:
vrsn = '-v {0}'.format(version)
cmd = 'chattr {0} {1} {2} {3}'.format(attrs, flgs, vrsn, ' '.join(args))
result = __salt__['cmd.run'](cmd, python_shell=False)
if bool(result):
raise CommandExecutionError(
"chattr failed to run, possibly due to bad parameters.")
return True
def get_sum(path, form='sha256'):
'''
Return the checksum for the given file. The following checksum algorithms
are supported:
* md5
* sha1
* sha224
* sha256 **(default)**
* sha384
* sha512
path
path to the file or directory
form
desired sum format
CLI Example:
.. code-block:: bash
salt '*' file.get_sum /etc/passwd sha512
'''
path = os.path.expanduser(path)
if not os.path.isfile(path):
return 'File not found'
return salt.utils.hashutils.get_hash(path, form, 4096)
def get_hash(path, form='sha256', chunk_size=65536):
'''
Get the hash sum of a file
This is better than ``get_sum`` for the following reasons:
- It does not read the entire file into memory.
- It does not return a string on error. The returned value of
``get_sum`` cannot really be trusted since it is vulnerable to
collisions: ``get_sum(..., 'xyz') == 'Hash xyz not supported'``
path
path to the file or directory
form
desired sum format
chunk_size
amount to sum at once
CLI Example:
.. code-block:: bash
salt '*' file.get_hash /etc/shadow
'''
return salt.utils.hashutils.get_hash(os.path.expanduser(path), form, chunk_size)
def get_source_sum(file_name='',
source='',
source_hash=None,
source_hash_name=None,
saltenv='base'):
'''
.. versionadded:: 2016.11.0
Used by :py:func:`file.get_managed <salt.modules.file.get_managed>` to
obtain the hash and hash type from the parameters specified below.
file_name
Optional file name being managed, for matching with
:py:func:`file.extract_hash <salt.modules.file.extract_hash>`.
source
Source file, as used in :py:mod:`file <salt.states.file>` and other
states. If ``source_hash`` refers to a file containing hashes, then
this filename will be used to match a filename in that file. If the
``source_hash`` is a hash expression, then this argument will be
ignored.
source_hash
Hash file/expression, as used in :py:mod:`file <salt.states.file>` and
other states. If this value refers to a remote URL or absolute path to
a local file, it will be cached and :py:func:`file.extract_hash
<salt.modules.file.extract_hash>` will be used to obtain a hash from
it.
source_hash_name
Specific file name to look for when ``source_hash`` refers to a remote
file, used to disambiguate ambiguous matches.
saltenv : base
Salt fileserver environment from which to retrieve the source_hash. This
value will only be used when ``source_hash`` refers to a file on the
Salt fileserver (i.e. one beginning with ``salt://``).
CLI Example:
.. code-block:: bash
salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=499ae16dcae71eeb7c3a30c75ea7a1a6
salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5
salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5 source_hash_name=./dir2/foo.tar.gz
'''
def _invalid_source_hash_format():
'''
DRY helper for reporting invalid source_hash input
'''
raise CommandExecutionError(
'Source hash {0} format is invalid. The supported formats are: '
'1) a hash, 2) an expression in the format <hash_type>=<hash>, or '
'3) either a path to a local file containing hashes, or a URI of '
'a remote hash file. Supported protocols for remote hash files '
'are: {1}. The hash may also not be of a valid length, the '
'following are supported hash types and lengths: {2}.'.format(
source_hash,
', '.join(salt.utils.files.VALID_PROTOS),
', '.join(
['{0} ({1})'.format(HASHES_REVMAP[x], x)
for x in sorted(HASHES_REVMAP)]
),
)
)
hash_fn = None
if os.path.isabs(source_hash):
hash_fn = source_hash
else:
try:
proto = _urlparse(source_hash).scheme
if proto in salt.utils.files.VALID_PROTOS:
hash_fn = __salt__['cp.cache_file'](source_hash, saltenv)
if not hash_fn:
raise CommandExecutionError(
'Source hash file {0} not found'.format(source_hash)
)
else:
if proto != '':
# Some unsupported protocol (e.g. foo://) is being used.
# We'll get into this else block if a hash expression
# (like md5=<md5 checksum here>), but in those cases, the
# protocol will be an empty string, in which case we avoid
# this error condition.
_invalid_source_hash_format()
except (AttributeError, TypeError):
_invalid_source_hash_format()
if hash_fn is not None:
ret = extract_hash(hash_fn, '', file_name, source, source_hash_name)
if ret is None:
_invalid_source_hash_format()
return ret
else:
# The source_hash is a hash expression
ret = {}
try:
ret['hash_type'], ret['hsum'] = \
[x.strip() for x in source_hash.split('=', 1)]
except AttributeError:
_invalid_source_hash_format()
except ValueError:
# No hash type, try to figure out by hash length
if not re.match('^[{0}]+$'.format(string.hexdigits), source_hash):
_invalid_source_hash_format()
ret['hsum'] = source_hash
source_hash_len = len(source_hash)
if source_hash_len in HASHES_REVMAP:
ret['hash_type'] = HASHES_REVMAP[source_hash_len]
else:
_invalid_source_hash_format()
if ret['hash_type'] not in HASHES:
raise CommandExecutionError(
'Invalid hash type \'{0}\'. Supported hash types are: {1}. '
'Either remove the hash type and simply use \'{2}\' as the '
'source_hash, or change the hash type to a supported type.'
.format(ret['hash_type'], ', '.join(HASHES), ret['hsum'])
)
else:
hsum_len = len(ret['hsum'])
if hsum_len not in HASHES_REVMAP:
_invalid_source_hash_format()
elif hsum_len != HASHES[ret['hash_type']]:
raise CommandExecutionError(
'Invalid length ({0}) for hash type \'{1}\'. Either '
'remove the hash type and simply use \'{2}\' as the '
'source_hash, or change the hash type to \'{3}\''.format(
hsum_len,
ret['hash_type'],
ret['hsum'],
HASHES_REVMAP[hsum_len],
)
)
return ret
def check_hash(path, file_hash):
'''
Check if a file matches the given hash string
Returns ``True`` if the hash matches, otherwise ``False``.
path
Path to a file local to the minion.
hash
The hash to check against the file specified in the ``path`` argument.
.. versionchanged:: 2016.11.4
For this and newer versions the hash can be specified without an
accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``),
but for earlier releases it is necessary to also specify the hash type
in the format ``<hash_type>=<hash_value>`` (e.g.
``md5=e138491e9d5b97023cea823fe17bac22``).
CLI Example:
.. code-block:: bash
salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22
salt '*' file.check_hash /etc/fstab md5=e138491e9d5b97023cea823fe17bac22
'''
path = os.path.expanduser(path)
if not isinstance(file_hash, six.string_types):
raise SaltInvocationError('hash must be a string')
for sep in (':', '='):
if sep in file_hash:
hash_type, hash_value = file_hash.split(sep, 1)
break
else:
hash_value = file_hash
hash_len = len(file_hash)
hash_type = HASHES_REVMAP.get(hash_len)
if hash_type is None:
raise SaltInvocationError(
'Hash {0} (length: {1}) could not be matched to a supported '
'hash type. The supported hash types and lengths are: '
'{2}'.format(
file_hash,
hash_len,
', '.join(
['{0} ({1})'.format(HASHES_REVMAP[x], x)
for x in sorted(HASHES_REVMAP)]
),
)
)
return get_hash(path, hash_type) == hash_value
def find(path, *args, **kwargs):
'''
Approximate the Unix ``find(1)`` command and return a list of paths that
meet the specified criteria.
The options include match criteria:
.. code-block:: text
name = path-glob # case sensitive
iname = path-glob # case insensitive
regex = path-regex # case sensitive
iregex = path-regex # case insensitive
type = file-types # match any listed type
user = users # match any listed user
group = groups # match any listed group
size = [+-]number[size-unit] # default unit = byte
mtime = interval # modified since date
grep = regex # search file contents
and/or actions:
.. code-block:: text
delete [= file-types] # default type = 'f'
exec = command [arg ...] # where {} is replaced by pathname
print [= print-opts]
and/or depth criteria:
.. code-block:: text
maxdepth = maximum depth to transverse in path
mindepth = minimum depth to transverse before checking files or directories
The default action is ``print=path``
``path-glob``:
.. code-block:: text
* = match zero or more chars
? = match any char
[abc] = match a, b, or c
[!abc] or [^abc] = match anything except a, b, and c
[x-y] = match chars x through y
[!x-y] or [^x-y] = match anything except chars x through y
{a,b,c} = match a or b or c
``path-regex``: a Python Regex (regular expression) pattern to match pathnames
``file-types``: a string of one or more of the following:
.. code-block:: text
a: all file types
b: block device
c: character device
d: directory
p: FIFO (named pipe)
f: plain file
l: symlink
s: socket
``users``: a space and/or comma separated list of user names and/or uids
``groups``: a space and/or comma separated list of group names and/or gids
``size-unit``:
.. code-block:: text
b: bytes
k: kilobytes
m: megabytes
g: gigabytes
t: terabytes
interval:
.. code-block:: text
[<num>w] [<num>d] [<num>h] [<num>m] [<num>s]
where:
w: week
d: day
h: hour
m: minute
s: second
print-opts: a comma and/or space separated list of one or more of the
following:
.. code-block:: text
group: group name
md5: MD5 digest of file contents
mode: file permissions (as integer)
mtime: last modification time (as time_t)
name: file basename
path: file absolute path
size: file size in bytes
type: file type
user: user name
CLI Examples:
.. code-block:: bash
salt '*' file.find / type=f name=\\*.bak size=+10m
salt '*' file.find /var mtime=+30d size=+10m print=path,size,mtime
salt '*' file.find /var/log name=\\*.[0-9] mtime=+30d size=+10m delete
'''
if 'delete' in args:
kwargs['delete'] = 'f'
elif 'print' in args:
kwargs['print'] = 'path'
try:
finder = salt.utils.find.Finder(kwargs)
except ValueError as ex:
return 'error: {0}'.format(ex)
ret = [item for i in [finder.find(p) for p in glob.glob(os.path.expanduser(path))] for item in i]
ret.sort()
return ret
def _sed_esc(string, escape_all=False):
'''
Escape single quotes and forward slashes
'''
special_chars = "^.[$()|*+?{"
string = string.replace("'", "'\"'\"'").replace("/", "\\/")
if escape_all is True:
for char in special_chars:
string = string.replace(char, "\\" + char)
return string
def sed(path,
before,
after,
limit='',
backup='.bak',
options='-r -e',
flags='g',
escape_all=False,
negate_match=False):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Make a simple edit to a file
Equivalent to:
.. code-block:: bash
sed <backup> <options> "/<limit>/ s/<before>/<after>/<flags> <file>"
path
The full path to the file to be edited
before
A pattern to find in order to replace with ``after``
after
Text that will replace ``before``
limit : ``''``
An initial pattern to search for before searching for ``before``
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
options : ``-r -e``
Options to pass to sed
flags : ``g``
Flags to modify the sed search; e.g., ``i`` for case-insensitive pattern
matching
negate_match : False
Negate the search command (``!``)
.. versionadded:: 0.17.0
Forward slashes and single quotes will be escaped automatically in the
``before`` and ``after`` patterns.
CLI Example:
.. code-block:: bash
salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info'
'''
# Largely inspired by Fabric's contrib.files.sed()
# XXX:dc: Do we really want to always force escaping?
#
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
# Mandate that before and after are strings
before = str(before)
after = str(after)
before = _sed_esc(before, escape_all)
after = _sed_esc(after, escape_all)
limit = _sed_esc(limit, escape_all)
if sys.platform == 'darwin':
options = options.replace('-r', '-E')
cmd = ['sed']
cmd.append('-i{0}'.format(backup) if backup else '-i')
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append(
r'{limit}{negate_match}s/{before}/{after}/{flags}'.format(
limit='/{0}/ '.format(limit) if limit else '',
negate_match='!' if negate_match else '',
before=before,
after=after,
flags=flags
)
)
cmd.append(path)
return __salt__['cmd.run_all'](cmd, python_shell=False)
def sed_contains(path,
text,
limit='',
flags='g'):
'''
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return True if the file at ``path`` contains ``text``. Utilizes sed to
perform the search (line-wise search).
Note: the ``p`` flag will be added to any flags you pass in.
CLI Example:
.. code-block:: bash
salt '*' file.contains /etc/crontab 'mymaintenance.sh'
'''
# Largely inspired by Fabric's contrib.files.contains()
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
before = _sed_esc(str(text), False)
limit = _sed_esc(str(limit), False)
options = '-n -r -e'
if sys.platform == 'darwin':
options = options.replace('-r', '-E')
cmd = ['sed']
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append(
r'{limit}s/{before}/$/{flags}'.format(
limit='/{0}/ '.format(limit) if limit else '',
before=before,
flags='p{0}'.format(flags)
)
)
cmd.append(path)
result = __salt__['cmd.run'](cmd, python_shell=False)
return bool(result)
def psed(path,
before,
after,
limit='',
backup='.bak',
flags='gMS',
escape_all=False,
multi=False):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Make a simple edit to a file (pure Python version)
Equivalent to:
.. code-block:: bash
sed <backup> <options> "/<limit>/ s/<before>/<after>/<flags> <file>"
path
The full path to the file to be edited
before
A pattern to find in order to replace with ``after``
after
Text that will replace ``before``
limit : ``''``
An initial pattern to search for before searching for ``before``
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
flags : ``gMS``
Flags to modify the search. Valid values are:
- ``g``: Replace all occurrences of the pattern, not just the first.
- ``I``: Ignore case.
- ``L``: Make ``\\w``, ``\\W``, ``\\b``, ``\\B``, ``\\s`` and ``\\S``
dependent on the locale.
- ``M``: Treat multiple lines as a single line.
- ``S``: Make `.` match all characters, including newlines.
- ``U``: Make ``\\w``, ``\\W``, ``\\b``, ``\\B``, ``\\d``, ``\\D``,
``\\s`` and ``\\S`` dependent on Unicode.
- ``X``: Verbose (whitespace is ignored).
multi: ``False``
If True, treat the entire file as a single line
Forward slashes and single quotes will be escaped automatically in the
``before`` and ``after`` patterns.
CLI Example:
.. code-block:: bash
salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info'
'''
# Largely inspired by Fabric's contrib.files.sed()
# XXX:dc: Do we really want to always force escaping?
#
# Mandate that before and after are strings
path = os.path.expanduser(path)
multi = bool(multi)
before = str(before)
after = str(after)
before = _sed_esc(before, escape_all)
# The pattern to replace with does not need to be escaped!!!
#after = _sed_esc(after, escape_all)
limit = _sed_esc(limit, escape_all)
shutil.copy2(path, '{0}{1}'.format(path, backup))
with salt.utils.files.fopen(path, 'w') as ofile:
with salt.utils.files.fopen('{0}{1}'.format(path, backup), 'r') as ifile:
if multi is True:
for line in ifile.readline():
ofile.write(_psed(line, before, after, limit, flags))
else:
ofile.write(_psed(ifile.read(), before, after, limit, flags))
RE_FLAG_TABLE = {'I': re.I,
'L': re.L,
'M': re.M,
'S': re.S,
'U': re.U,
'X': re.X}
def _psed(text,
before,
after,
limit,
flags):
'''
Does the actual work for file.psed, so that single lines can be passed in
'''
atext = text
if limit:
limit = re.compile(limit)
comps = text.split(limit)
atext = ''.join(comps[1:])
count = 1
if 'g' in flags:
count = 0
flags = flags.replace('g', '')
aflags = 0
for flag in flags:
aflags |= RE_FLAG_TABLE[flag]
before = re.compile(before, flags=aflags)
text = re.sub(before, after, atext, count=count)
return text
def uncomment(path,
regex,
char='#',
backup='.bak'):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Uncomment specified commented lines in a file
path
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be uncommented.
This regex should not include the comment character. A leading ``^``
character will be stripped for convenience (for easily switching
between comment() and uncomment()).
char : ``#``
The character to remove in order to uncomment a line
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
CLI Example:
.. code-block:: bash
salt '*' file.uncomment /etc/hosts.deny 'ALL: PARANOID'
'''
return comment_line(path=path,
regex=regex,
char=char,
cmnt=False,
backup=backup)
def comment(path,
regex,
char='#',
backup='.bak'):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Comment out specified lines in a file
path
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be commented;
this pattern will be wrapped in parenthesis and will move any
preceding/trailing ``^`` or ``$`` characters outside the parenthesis
(e.g., the pattern ``^foo$`` will be rewritten as ``^(foo)$``)
char : ``#``
The character to be inserted at the beginning of a line in order to
comment it out
backup : ``.bak``
The file will be backed up before edit with this file extension
.. warning::
This backup will be overwritten each time ``sed`` / ``comment`` /
``uncomment`` is called. Meaning the backup will only be useful
after the first invocation.
CLI Example:
.. code-block:: bash
salt '*' file.comment /etc/modules pcspkr
'''
return comment_line(path=path,
regex=regex,
char=char,
cmnt=True,
backup=backup)
def comment_line(path,
regex,
char='#',
cmnt=True,
backup='.bak'):
r'''
Comment or Uncomment a line in a text file.
:param path: string
The full path to the text file.
:param regex: string
A regex expression that begins with ``^`` that will find the line you wish
to comment. Can be as simple as ``^color =``
:param char: string
The character used to comment a line in the type of file you're referencing.
Default is ``#``
:param cmnt: boolean
True to comment the line. False to uncomment the line. Default is True.
:param backup: string
The file extension to give the backup file. Default is ``.bak``
Set to False/None to not keep a backup.
:return: boolean
Returns True if successful, False if not
CLI Example:
The following example will comment out the ``pcspkr`` line in the
``/etc/modules`` file using the default ``#`` character and create a backup
file named ``modules.bak``
.. code-block:: bash
salt '*' file.comment_line '/etc/modules' '^pcspkr'
CLI Example:
The following example will uncomment the ``log_level`` setting in ``minion``
config file if it is set to either ``warning``, ``info``, or ``debug`` using
the ``#`` character and create a backup file named ``minion.bk``
.. code-block:: bash
salt '*' file.comment_line 'C:\salt\conf\minion' '^log_level: (warning|info|debug)' '#' False '.bk'
'''
# Get the regex for comment or uncomment
if cmnt:
regex = '{0}({1}){2}'.format(
'^' if regex.startswith('^') else '',
regex.lstrip('^').rstrip('$'),
'$' if regex.endswith('$') else '')
else:
regex = r'^{0}\s*({1}){2}'.format(
char,
regex.lstrip('^').rstrip('$'),
'$' if regex.endswith('$') else '')
# Load the real path to the file
path = os.path.realpath(os.path.expanduser(path))
# Make sure the file exists
if not os.path.isfile(path):
raise SaltInvocationError('File not found: {0}'.format(path))
# Make sure it is a text file
if not __utils__['files.is_text'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'.format(path))
# First check the whole file, determine whether to make the replacement
# Searching first avoids modifying the time stamp if there are no changes
found = False
# Dictionaries for comparing changes
orig_file = []
new_file = []
# Buffer size for fopen
bufsize = os.path.getsize(path)
try:
# Use a read-only handle to open the file
with salt.utils.files.fopen(path,
mode='rb',
buffering=bufsize) as r_file:
# Loop through each line of the file and look for a match
for line in r_file:
# Is it in this line
if six.PY3:
line = line.decode(__salt_system_encoding__)
if re.match(regex, line):
# Load lines into dictionaries, set found to True
orig_file.append(line)
if cmnt:
new_file.append('{0}{1}'.format(char, line))
else:
new_file.append(line.lstrip(char))
found = True
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to open file '{0}'. "
"Exception: {1}".format(path, exc)
)
# We've searched the whole file. If we didn't find anything, return False
if not found:
return False
if not salt.utils.platform.is_windows():
pre_user = get_user(path)
pre_group = get_group(path)
pre_mode = salt.utils.files.normalize_mode(get_mode(path))
# Create a copy to read from and to use as a backup later
try:
temp_file = _mkstemp_copy(path=path, preserve_inode=False)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
try:
# Open the file in write mode
with salt.utils.files.fopen(path,
mode='wb',
buffering=bufsize) as w_file:
try:
# Open the temp file in read mode
with salt.utils.files.fopen(temp_file,
mode='rb',
buffering=bufsize) as r_file:
# Loop through each line of the file and look for a match
for line in r_file:
if six.PY3:
line = line.decode(__salt_system_encoding__)
try:
# Is it in this line
if re.match(regex, line):
# Write the new line
if cmnt:
wline = '{0}{1}'.format(char, line)
else:
wline = line.lstrip(char)
else:
# Write the existing line (no change)
wline = line
if six.PY3:
wline = wline.encode(__salt_system_encoding__)
w_file.write(wline)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to write file '{0}'. Contents may "
"be truncated. Temporary file contains copy "
"at '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
if backup:
# Move the backup file to the original directory
backup_name = '{0}{1}'.format(path, backup)
try:
shutil.move(temp_file, backup_name)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move the temp file '{0}' to the "
"backup file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
else:
os.remove(temp_file)
if not salt.utils.platform.is_windows():
check_perms(path, None, pre_user, pre_group, pre_mode)
# Return a diff using the two dictionaries
return ''.join(difflib.unified_diff(orig_file, new_file))
def _get_flags(flags):
'''
Return an integer appropriate for use as a flag for the re module from a
list of human-readable strings
.. code-block:: python
>>> _get_flags(['MULTILINE', 'IGNORECASE'])
10
>>> _get_flags('MULTILINE')
8
>>> _get_flags(2)
2
'''
if isinstance(flags, six.string_types):
flags = [flags]
if isinstance(flags, Iterable) and not isinstance(flags, Mapping):
_flags_acc = []
for flag in flags:
_flag = getattr(re, str(flag).upper())
if not isinstance(_flag, six.integer_types):
raise SaltInvocationError(
'Invalid re flag given: {0}'.format(flag)
)
_flags_acc.append(_flag)
return reduce(operator.__or__, _flags_acc)
elif isinstance(flags, six.integer_types):
return flags
else:
raise SaltInvocationError(
'Invalid re flags: "{0}", must be given either as a single flag '
'string, a list of strings, or as an integer'.format(flags)
)
def _add_flags(flags, new_flags):
'''
Combine ``flags`` and ``new_flags``
'''
flags = _get_flags(flags)
new_flags = _get_flags(new_flags)
return flags | new_flags
def _mkstemp_copy(path,
preserve_inode=True):
'''
Create a temp file and move/copy the contents of ``path`` to the temp file.
Return the path to the temp file.
path
The full path to the file whose contents will be moved/copied to a temp file.
Whether it's moved or copied depends on the value of ``preserve_inode``.
preserve_inode
Preserve the inode of the file, so that any hard links continue to share the
inode with the original filename. This works by *copying* the file, reading
from the copy, and writing to the file at the original inode. If ``False``, the
file will be *moved* rather than copied, and a new file will be written to a
new inode, but using the original filename. Hard links will then share an inode
with the backup, instead (if using ``backup`` to create a backup copy).
Default is ``True``.
'''
temp_file = None
# Create the temp file
try:
temp_file = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to create temp file. "
"Exception: {0}".format(exc)
)
# use `copy` to preserve the inode of the
# original file, and thus preserve hardlinks
# to the inode. otherwise, use `move` to
# preserve prior behavior, which results in
# writing the file to a new inode.
if preserve_inode:
try:
shutil.copy2(path, temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to copy file '{0}' to the "
"temp file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
else:
try:
shutil.move(path, temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move file '{0}' to the "
"temp file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
return temp_file
def _starts_till(src, probe, strip_comments=True):
'''
Returns True if src and probe at least matches at the beginning till some point.
'''
def _strip_comments(txt):
'''
Strip possible comments.
Usually comments are one or two symbols at the beginning of the line, separated with space
'''
buff = txt.split(" ", 1)
return len(buff) == 2 and len(buff[0]) < 2 and buff[1] or txt
def _to_words(txt):
'''
Split by words
'''
return txt and [w for w in txt.strip().split(" ") if w.strip()] or txt
no_match = -1
equal = 0
if not src or not probe:
return no_match
if src == probe:
return equal
src = _to_words(strip_comments and _strip_comments(src) or src)
probe = _to_words(strip_comments and _strip_comments(probe) or probe)
a_buff, b_buff = len(src) < len(probe) and (src, probe) or (probe, src)
b_buff = ' '.join(b_buff)
for idx in range(len(a_buff)):
prb = ' '.join(a_buff[:-(idx + 1)])
if prb and b_buff.startswith(prb):
return idx
return no_match
def _regex_to_static(src, regex):
'''
Expand regular expression to static match.
'''
if not src or not regex:
return None
try:
src = re.search(regex, src, re.M)
except Exception as ex:
raise CommandExecutionError("{0}: '{1}'".format(_get_error_message(ex), regex))
return src and src.group() or regex
def _assert_occurrence(src, probe, target, amount=1):
'''
Raise an exception, if there are different amount of specified occurrences in src.
'''
occ = src.count(probe)
if occ > amount:
msg = 'more than'
elif occ < amount:
msg = 'less than'
elif not occ:
msg = 'no'
else:
msg = None
if msg:
raise CommandExecutionError('Found {0} expected occurrences in "{1}" expression'.format(msg, target))
return occ
def _get_line_indent(src, line, indent):
'''
Indent the line with the source line.
'''
if not indent:
return line
idt = []
for c in src:
if c not in ['\t', ' ']:
break
idt.append(c)
return ''.join(idt) + line.strip()
def line(path, content=None, match=None, mode=None, location=None,
before=None, after=None, show_changes=True, backup=False,
quiet=False, indent=True):
'''
.. versionadded:: 2015.8.0
Edit a line in the configuration file. The ``path`` and ``content``
arguments are required, as well as passing in one of the ``mode``
options.
path
Filesystem path to the file to be edited.
content
Content of the line. Allowed to be empty if mode=delete.
match
Match the target line for an action by
a fragment of a string or regular expression.
If neither ``before`` nor ``after`` are provided, and ``match``
is also ``None``, match becomes the ``content`` value.
mode
Defines how to edit a line. One of the following options is
required:
- ensure
If line does not exist, it will be added. This is based on the
``content`` argument.
- replace
If line already exists, it will be replaced.
- delete
Delete the line, once found.
- insert
Insert a line.
.. note::
If ``mode=insert`` is used, at least one of the following
options must also be defined: ``location``, ``before``, or
``after``. If ``location`` is used, it takes precedence
over the other two options.
location
Defines where to place content in the line. Note this option is only
used when ``mode=insert`` is specified. If a location is passed in, it
takes precedence over both the ``before`` and ``after`` kwargs. Valid
locations are:
- start
Place the content at the beginning of the file.
- end
Place the content at the end of the file.
before
Regular expression or an exact case-sensitive fragment of the string.
This option is only used when either the ``ensure`` or ``insert`` mode
is defined.
after
Regular expression or an exact case-sensitive fragment of the string.
This option is only used when either the ``ensure`` or ``insert`` mode
is defined.
show_changes
Output a unified diff of the old file and the new file.
If ``False`` return a boolean if any changes were made.
Default is ``True``
.. note::
Using this option will store two copies of the file in-memory
(the original version and the edited version) in order to generate the diff.
backup
Create a backup of the original file with the extension:
"Year-Month-Day-Hour-Minutes-Seconds".
quiet
Do not raise any exceptions. E.g. ignore the fact that the file that is
tried to be edited does not exist and nothing really happened.
indent
Keep indentation with the previous line. This option is not considered when
the ``delete`` mode is specified.
CLI Example:
.. code-block:: bash
salt '*' file.line /etc/nsswitch.conf "networks:\tfiles dns" after="hosts:.*?" mode='ensure'
.. note::
If an equal sign (``=``) appears in an argument to a Salt command, it is
interpreted as a keyword argument in the format of ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' file.line /path/to/file content="CREATEMAIL_SPOOL=no" match="CREATE_MAIL_SPOOL=yes" mode="replace"
'''
path = os.path.realpath(os.path.expanduser(path))
if not os.path.isfile(path):
if not quiet:
raise CommandExecutionError('File "{0}" does not exists or is not a file.'.format(path))
return False # No changes had happened
mode = mode and mode.lower() or mode
if mode not in ['insert', 'ensure', 'delete', 'replace']:
if mode is None:
raise CommandExecutionError('Mode was not defined. How to process the file?')
else:
raise CommandExecutionError('Unknown mode: "{0}"'.format(mode))
# We've set the content to be empty in the function params but we want to make sure
# it gets passed when needed. Feature #37092
empty_content_modes = ['delete']
if mode not in empty_content_modes and content is None:
raise CommandExecutionError('Content can only be empty if mode is "{0}"'.format(', '.join(empty_content_modes)))
del empty_content_modes
# Before/after has privilege. If nothing defined, match is used by content.
if before is None and after is None and not match:
match = content
with salt.utils.files.fopen(path, mode='r') as fp_:
body = fp_.read()
body_before = hashlib.sha256(salt.utils.stringutils.to_bytes(body)).hexdigest()
after = _regex_to_static(body, after)
before = _regex_to_static(body, before)
match = _regex_to_static(body, match)
if os.stat(path).st_size == 0 and mode in ('delete', 'replace'):
log.warning('Cannot find text to {0}. File \'{1}\' is empty.'.format(mode, path))
body = ''
elif mode == 'delete':
body = os.linesep.join([line for line in body.split(os.linesep) if line.find(match) < 0])
elif mode == 'replace':
body = os.linesep.join([(_get_line_indent(file_line, content, indent)
if (file_line.find(match) > -1 and not file_line == content) else file_line)
for file_line in body.split(os.linesep)])
elif mode == 'insert':
if not location and not before and not after:
raise CommandExecutionError('On insert must be defined either "location" or "before/after" conditions.')
if not location:
if before and after:
_assert_occurrence(body, before, 'before')
_assert_occurrence(body, after, 'after')
out = []
lines = body.split(os.linesep)
in_range = False
for line in lines:
if line.find(after) > -1:
in_range = True
elif line.find(before) > -1 and in_range:
out.append(_get_line_indent(line, content, indent))
out.append(line)
body = os.linesep.join(out)
if before and not after:
_assert_occurrence(body, before, 'before')
out = []
lines = body.split(os.linesep)
for idx in range(len(lines)):
_line = lines[idx]
if _line.find(before) > -1:
cnd = _get_line_indent(_line, content, indent)
if not idx or (idx and _starts_till(lines[idx - 1], cnd) < 0): # Job for replace instead
out.append(cnd)
out.append(_line)
body = os.linesep.join(out)
elif after and not before:
_assert_occurrence(body, after, 'after')
out = []
lines = body.split(os.linesep)
for idx, _line in enumerate(lines):
out.append(_line)
cnd = _get_line_indent(_line, content, indent)
# No duplicates or append, if "after" is the last line
if (_line.find(after) > -1 and
(lines[((idx + 1) < len(lines)) and idx + 1 or idx].strip() != cnd or
idx + 1 == len(lines))):
out.append(cnd)
body = os.linesep.join(out)
else:
if location == 'start':
body = os.linesep.join((content, body))
elif location == 'end':
body = os.linesep.join((body, _get_line_indent(body[-1], content, indent) if body else content))
elif mode == 'ensure':
after = after and after.strip()
before = before and before.strip()
if before and after:
_assert_occurrence(body, before, 'before')
_assert_occurrence(body, after, 'after')
is_there = bool(body.count(content))
if not is_there:
out = []
body = body.split(os.linesep)
for idx, line in enumerate(body):
out.append(line)
if line.find(content) > -1:
is_there = True
if not is_there:
if idx < (len(body) - 1) and line.find(after) > -1 and body[idx + 1].find(before) > -1:
out.append(content)
elif line.find(after) > -1:
raise CommandExecutionError('Found more than one line between '
'boundaries "before" and "after".')
body = os.linesep.join(out)
elif before and not after:
_assert_occurrence(body, before, 'before')
body = body.split(os.linesep)
out = []
for idx in range(len(body)):
if body[idx].find(before) > -1:
prev = (idx > 0 and idx or 1) - 1
out.append(_get_line_indent(body[idx], content, indent))
if _starts_till(out[prev], content) > -1:
del out[prev]
out.append(body[idx])
body = os.linesep.join(out)
elif not before and after:
_assert_occurrence(body, after, 'after')
body = body.split(os.linesep)
skip = None
out = []
for idx in range(len(body)):
if skip != body[idx]:
out.append(body[idx])
if body[idx].find(after) > -1:
next_line = idx + 1 < len(body) and body[idx + 1] or None
if next_line is not None and _starts_till(next_line, content) > -1:
skip = next_line
out.append(_get_line_indent(body[idx], content, indent))
body = os.linesep.join(out)
else:
raise CommandExecutionError("Wrong conditions? "
"Unable to ensure line without knowing "
"where to put it before and/or after.")
changed = body_before != hashlib.sha256(salt.utils.stringutils.to_bytes(body)).hexdigest()
if backup and changed and __opts__['test'] is False:
try:
temp_file = _mkstemp_copy(path=path, preserve_inode=True)
shutil.move(temp_file, '{0}.{1}'.format(path, time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())))
except (OSError, IOError) as exc:
raise CommandExecutionError("Unable to create the backup file of {0}. Exception: {1}".format(path, exc))
changes_diff = None
if changed:
if show_changes:
with salt.utils.files.fopen(path, 'r') as fp_:
path_content = _splitlines_preserving_trailing_newline(
fp_.read())
changes_diff = ''.join(difflib.unified_diff(
path_content, _splitlines_preserving_trailing_newline(body)))
if __opts__['test'] is False:
fh_ = None
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'w')
fh_.write(body)
finally:
if fh_:
fh_.close()
return show_changes and changes_diff or changed
def replace(path,
pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
backup='.bak',
dry_run=False,
search_only=False,
show_changes=True,
ignore_if_missing=False,
preserve_inode=True,
backslash_literal=False,
):
'''
.. versionadded:: 0.17.0
Replace occurrences of a pattern in a file. If ``show_changes`` is
``True``, then a diff of what changed will be returned, otherwise a
``True`` will be returned when changes are made, and ``False`` when
no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
path
Filesystem path to the file to be edited. If a symlink is specified, it
will be resolved to its target.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text
count : 0
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int)
A list of flags defined in the :ref:`re module documentation
<contents-of-module-re>`. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str)
How much of the file to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found : False
.. versionadded:: 2014.7.0
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found : False
.. versionadded:: 2014.7.0
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
.. versionadded:: 2014.7.0
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
backup : .bak
The file extension to use for a backup of the file before editing. Set
to ``False`` to skip making a backup.
dry_run : False
If set to ``True``, no changes will be made to the file, the function
will just return the changes that would have been made (or a
``True``/``False`` value if ``show_changes`` is set to ``False``).
search_only : False
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes : True
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
.. note::
Using this option will store two copies of the file in memory (the
original version and the edited version) in order to generate the
diff. This may not normally be a concern, but could impact
performance if used with large files.
ignore_if_missing : False
.. versionadded:: 2015.8.0
If set to ``True``, this function will simply return ``False``
if the file doesn't exist. Otherwise, an error will be thrown.
preserve_inode : True
.. versionadded:: 2015.8.0
Preserve the inode of the file, so that any hard links continue to
share the inode with the original filename. This works by *copying* the
file, reading from the copy, and writing to the file at the original
inode. If ``False``, the file will be *moved* rather than copied, and a
new file will be written to a new inode, but using the original
filename. Hard links will then share an inode with the backup, instead
(if using ``backup`` to create a backup copy).
backslash_literal : False
.. versionadded:: 2016.11.7
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' file.replace /path/to/file pattern='=' repl=':'
salt '*' file.replace /path/to/file pattern="bind-address\\s*=" repl='bind-address:'
CLI Examples:
.. code-block:: bash
salt '*' file.replace /etc/httpd/httpd.conf pattern='LogLevel warn' repl='LogLevel info'
salt '*' file.replace /some/file pattern='before' repl='after' flags='[MULTILINE, IGNORECASE]'
'''
symlink = False
if is_link(path):
symlink = True
target_path = os.readlink(path)
given_path = os.path.expanduser(path)
path = os.path.realpath(os.path.expanduser(path))
if not os.path.exists(path):
if ignore_if_missing:
return False
else:
raise SaltInvocationError('File not found: {0}'.format(path))
if not __utils__['files.is_text'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
if search_only and (append_if_not_found or prepend_if_not_found):
raise SaltInvocationError(
'search_only cannot be used with append/prepend_if_not_found'
)
if append_if_not_found and prepend_if_not_found:
raise SaltInvocationError(
'Only one of append and prepend_if_not_found is permitted'
)
flags_num = _get_flags(flags)
cpattern = re.compile(salt.utils.stringutils.to_bytes(pattern), flags_num)
filesize = os.path.getsize(path)
if bufsize == 'file':
bufsize = filesize
# Search the file; track if any changes have been made for the return val
has_changes = False
orig_file = [] # used for show_changes and change detection
new_file = [] # used for show_changes and change detection
if not salt.utils.platform.is_windows():
pre_user = get_user(path)
pre_group = get_group(path)
pre_mode = salt.utils.files.normalize_mode(get_mode(path))
# Avoid TypeErrors by forcing repl to be bytearray related to mmap
# Replacement text may contains integer: 123 for example
repl = salt.utils.stringutils.to_bytes(str(repl))
if not_found_content:
not_found_content = salt.utils.stringutils.to_bytes(not_found_content)
found = False
temp_file = None
content = salt.utils.stringutils.to_str(not_found_content) if not_found_content and \
(prepend_if_not_found or
append_if_not_found) \
else salt.utils.stringutils.to_str(repl)
try:
# First check the whole file, determine whether to make the replacement
# Searching first avoids modifying the time stamp if there are no changes
r_data = None
# Use a read-only handle to open the file
with salt.utils.files.fopen(path,
mode='rb',
buffering=bufsize) as r_file:
try:
# mmap throws a ValueError if the file is empty.
r_data = mmap.mmap(r_file.fileno(),
0,
access=mmap.ACCESS_READ)
except (ValueError, mmap.error):
# size of file in /proc is 0, but contains data
r_data = salt.utils.stringutils.to_bytes("".join(r_file))
if search_only:
# Just search; bail as early as a match is found
if re.search(cpattern, r_data):
return True # `with` block handles file closure
else:
result, nrepl = re.subn(cpattern,
repl.replace('\\', '\\\\') if backslash_literal else repl,
r_data,
count)
# found anything? (even if no change)
if nrepl > 0:
found = True
# Identity check the potential change
has_changes = True if pattern != repl else has_changes
if prepend_if_not_found or append_if_not_found:
# Search for content, to avoid pre/appending the
# content if it was pre/appended in a previous run.
if re.search(salt.utils.stringutils.to_bytes('^{0}$'.format(re.escape(content))),
r_data,
flags=flags_num):
# Content was found, so set found.
found = True
orig_file = r_data.read(filesize).splitlines(True) \
if isinstance(r_data, mmap.mmap) \
else r_data.splitlines(True)
new_file = result.splitlines(True)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to open file '{0}'. "
"Exception: {1}".format(path, exc)
)
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close()
if has_changes and not dry_run:
# Write the replacement text in this block.
try:
# Create a copy to read from and to use as a backup later
temp_file = _mkstemp_copy(path=path,
preserve_inode=preserve_inode)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
r_data = None
try:
# Open the file in write mode
with salt.utils.files.fopen(path,
mode='w',
buffering=bufsize) as w_file:
try:
# Open the temp file in read mode
with salt.utils.files.fopen(temp_file,
mode='r',
buffering=bufsize) as r_file:
r_data = mmap.mmap(r_file.fileno(),
0,
access=mmap.ACCESS_READ)
result, nrepl = re.subn(cpattern,
repl.replace('\\', '\\\\') if backslash_literal else repl,
r_data,
count)
try:
w_file.write(salt.utils.stringutils.to_str(result))
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to write file '{0}'. Contents may "
"be truncated. Temporary file contains copy "
"at '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close()
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
if not found and (append_if_not_found or prepend_if_not_found):
if not_found_content is None:
not_found_content = repl
if prepend_if_not_found:
new_file.insert(0, not_found_content + salt.utils.stringutils.to_bytes(os.linesep))
else:
# append_if_not_found
# Make sure we have a newline at the end of the file
if 0 != len(new_file):
if not new_file[-1].endswith(salt.utils.stringutils.to_bytes(os.linesep)):
new_file[-1] += salt.utils.stringutils.to_bytes(os.linesep)
new_file.append(not_found_content + salt.utils.stringutils.to_bytes(os.linesep))
has_changes = True
if not dry_run:
try:
# Create a copy to read from and for later use as a backup
temp_file = _mkstemp_copy(path=path,
preserve_inode=preserve_inode)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line))
finally:
fh_.close()
if backup and has_changes and not dry_run:
# keep the backup only if it was requested
# and only if there were any changes
backup_name = '{0}{1}'.format(path, backup)
try:
shutil.move(temp_file, backup_name)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move the temp file '{0}' to the "
"backup file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
if symlink:
symlink_backup = '{0}{1}'.format(given_path, backup)
target_backup = '{0}{1}'.format(target_path, backup)
# Always clobber any existing symlink backup
# to match the behaviour of the 'backup' option
try:
os.symlink(target_backup, symlink_backup)
except OSError:
os.remove(symlink_backup)
os.symlink(target_backup, symlink_backup)
except:
raise CommandExecutionError(
"Unable create backup symlink '{0}'. "
"Target was '{1}'. "
"Exception: {2}".format(symlink_backup, target_backup,
exc)
)
elif temp_file:
try:
os.remove(temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to delete temp file '{0}'. "
"Exception: {1}".format(temp_file, exc)
)
if not dry_run and not salt.utils.platform.is_windows():
check_perms(path, None, pre_user, pre_group, pre_mode)
def get_changes():
orig_file_as_str = [salt.utils.stringutils.to_str(x) for x in orig_file]
new_file_as_str = [salt.utils.stringutils.to_str(x) for x in new_file]
return ''.join(difflib.unified_diff(orig_file_as_str, new_file_as_str))
if show_changes:
return get_changes()
# We may have found a regex line match but don't need to change the line
# (for situations where the pattern also matches the repl). Revert the
# has_changes flag to False if the final result is unchanged.
if not get_changes():
has_changes = False
return has_changes
def blockreplace(path,
marker_start='#-- start managed zone --',
marker_end='#-- end managed zone --',
content='',
append_if_not_found=False,
prepend_if_not_found=False,
backup='.bak',
dry_run=False,
show_changes=True,
append_newline=False,
):
'''
.. versionadded:: 2014.1.0
Replace content of a text block in a file, delimited by line markers
A block of content delimited by comments can help you manage several lines
entries without worrying about old entries removal.
.. note::
This function will store two copies of the file in-memory (the original
version and the edited version) in order to detect changes and only
edit the targeted file if necessary.
path
Filesystem path to the file to be edited
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
marker_end
The line content identifying a line as the end of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
content
The content to be used between the two lines identified by marker_start
and marker_stop.
append_if_not_found : False
If markers are not found and set to ``True`` then, the markers and
content will be appended to the file.
prepend_if_not_found : False
If markers are not found and set to ``True`` then, the markers and
content will be prepended to the file.
backup
The file extension to use for a backup of the file if any edit is made.
Set to ``False`` to skip making a backup.
dry_run
Don't make any edits to the file.
show_changes
Output a unified diff of the old file and the new file. If ``False``,
return a boolean if any changes were made.
append_newline:
Append a newline to the content block. For more information see:
https://github.com/saltstack/salt/issues/33686
.. versionadded:: 2016.3.4
CLI Example:
.. code-block:: bash
salt '*' file.blockreplace /etc/hosts '#-- start managed zone foobar : DO NOT EDIT --' \\
'#-- end managed zone foobar --' $'10.0.1.1 foo.foobar\\n10.0.1.2 bar.foobar' True
'''
path = os.path.expanduser(path)
if not os.path.exists(path):
raise SaltInvocationError('File not found: {0}'.format(path))
if append_if_not_found and prepend_if_not_found:
raise SaltInvocationError(
'Only one of append and prepend_if_not_found is permitted'
)
if not __utils__['files.is_text'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
# Search the file; track if any changes have been made for the return val
has_changes = False
orig_file = []
new_file = []
in_block = False
old_content = ''
done = False
# we do not use in_place editing to avoid file attrs modifications when
# no changes are required and to avoid any file access on a partially
# written file.
# we could also use salt.utils.filebuffer.BufferedReader
try:
fi_file = fileinput.input(path,
inplace=False, backup=False,
bufsize=1, mode='rb')
for line in fi_file:
line = salt.utils.stringutils.to_str(line)
result = line
if marker_start in line:
# managed block start found, start recording
in_block = True
else:
if in_block:
if marker_end in line:
# end of block detected
in_block = False
# Handle situations where there may be multiple types
# of line endings in the same file. Separate the content
# into lines. Account for Windows-style line endings
# using os.linesep, then by linux-style line endings
# using '\n'
split_content = []
for linesep_line in content.split(os.linesep):
for content_line in linesep_line.split('\n'):
split_content.append(content_line)
# Trim any trailing new lines to avoid unwanted
# additional new lines
while not split_content[-1]:
split_content.pop()
# push new block content in file
for content_line in split_content:
new_file.append(content_line + os.linesep)
done = True
else:
# remove old content, but keep a trace
old_content += line
result = None
# else: we are not in the marked block, keep saving things
orig_file.append(line)
if result is not None:
new_file.append(result)
# end for. If we are here without block management we maybe have some problems,
# or we need to initialise the marked block
finally:
fi_file.close()
if in_block:
# unterminated block => bad, always fail
raise CommandExecutionError(
'Unterminated marked block. End of file reached before marker_end.'
)
if not done:
if prepend_if_not_found:
# add the markers and content at the beginning of file
new_file.insert(0, marker_end + os.linesep)
if append_newline is True:
new_file.insert(0, content + os.linesep)
else:
new_file.insert(0, content)
new_file.insert(0, marker_start + os.linesep)
done = True
elif append_if_not_found:
# Make sure we have a newline at the end of the file
if 0 != len(new_file):
if not new_file[-1].endswith(os.linesep):
new_file[-1] += os.linesep
# add the markers and content at the end of file
new_file.append(marker_start + os.linesep)
if append_newline is True:
new_file.append(content + os.linesep)
else:
new_file.append(content)
new_file.append(marker_end + os.linesep)
done = True
else:
raise CommandExecutionError(
'Cannot edit marked block. Markers were not found in file.'
)
if done:
diff = ''.join(difflib.unified_diff(orig_file, new_file))
has_changes = diff is not ''
if has_changes and not dry_run:
# changes detected
# backup file attrs
perms = {}
perms['user'] = get_user(path)
perms['group'] = get_group(path)
perms['mode'] = salt.utils.files.normalize_mode(get_mode(path))
# backup old content
if backup is not False:
backup_path = '{0}{1}'.format(path, backup)
shutil.copy2(path, backup_path)
# copy2 does not preserve ownership
check_perms(backup_path,
None,
perms['user'],
perms['group'],
perms['mode'])
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line))
finally:
fh_.close()
# this may have overwritten file attrs
check_perms(path,
None,
perms['user'],
perms['group'],
perms['mode'])
if show_changes:
return diff
return has_changes
def search(path,
pattern,
flags=8,
bufsize=1,
ignore_if_missing=False,
multiline=False
):
'''
.. versionadded:: 0.17.0
Search for occurrences of a pattern in a file
Except for multiline, params are identical to
:py:func:`~salt.modules.file.replace`.
multiline
If true, inserts 'MULTILINE' into ``flags`` and sets ``bufsize`` to
'file'.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' file.search /etc/crontab 'mymaintenance.sh'
'''
if multiline:
flags = _add_flags(flags, 'MULTILINE')
bufsize = 'file'
# This function wraps file.replace on purpose in order to enforce
# consistent usage, compatible regex's, expected behavior, *and* bugs. :)
# Any enhancements or fixes to one should affect the other.
return replace(path,
pattern,
'',
flags=flags,
bufsize=bufsize,
dry_run=True,
search_only=True,
show_changes=False,
ignore_if_missing=ignore_if_missing)
def patch(originalfile, patchfile, options='', dry_run=False):
'''
.. versionadded:: 0.10.4
Apply a patch to a file or directory.
Equivalent to:
.. code-block:: bash
patch <options> -i <patchfile> <originalfile>
Or, when a directory is patched:
.. code-block:: bash
patch <options> -i <patchfile> -d <originalfile> -p0
originalfile
The full path to the file or directory to be patched
patchfile
A patch file to apply to ``originalfile``
options
Options to pass to patch.
CLI Example:
.. code-block:: bash
salt '*' file.patch /opt/file.txt /tmp/file.txt.patch
'''
patchpath = salt.utils.path.which('patch')
if not patchpath:
raise CommandExecutionError(
'patch executable not found. Is the distribution\'s patch '
'package installed?'
)
cmd = [patchpath]
cmd.extend(salt.utils.args.shlex_split(options))
if dry_run:
if __grains__['kernel'] in ('FreeBSD', 'OpenBSD'):
cmd.append('-C')
else:
cmd.append('--dry-run')
# this argument prevents interactive prompts when the patch fails to apply.
# the exit code will still be greater than 0 if that is the case.
if '-N' not in cmd and '--forward' not in cmd:
cmd.append('--forward')
has_rejectfile_option = False
for option in cmd:
if option == '-r' or option.startswith('-r ') \
or option.startswith('--reject-file'):
has_rejectfile_option = True
break
# by default, patch will write rejected patch files to <filename>.rej.
# this option prevents that.
if not has_rejectfile_option:
cmd.append('--reject-file=-')
cmd.extend(['-i', patchfile])
if os.path.isdir(originalfile):
cmd.extend(['-d', originalfile])
has_strip_option = False
for option in cmd:
if option.startswith('-p') or option.startswith('--strip='):
has_strip_option = True
break
if not has_strip_option:
cmd.append('--strip=0')
else:
cmd.append(originalfile)
return __salt__['cmd.run_all'](cmd, python_shell=False)
def contains(path, text):
'''
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return ``True`` if the file at ``path`` contains ``text``
CLI Example:
.. code-block:: bash
salt '*' file.contains /etc/crontab 'mymaintenance.sh'
'''
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
stripped_text = str(text).strip()
try:
with salt.utils.filebuffer.BufferedReader(path) as breader:
for chunk in breader:
if stripped_text in chunk:
return True
return False
except (IOError, OSError):
return False
def contains_regex(path, regex, lchar=''):
'''
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return True if the given regular expression matches on any line in the text
of a given file.
If the lchar argument (leading char) is specified, it
will strip `lchar` from the left side of each line before trying to match
CLI Example:
.. code-block:: bash
salt '*' file.contains_regex /etc/crontab
'''
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
try:
with salt.utils.files.fopen(path, 'r') as target:
for line in target:
if lchar:
line = line.lstrip(lchar)
if re.search(regex, line):
return True
return False
except (IOError, OSError):
return False
def contains_glob(path, glob_expr):
'''
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return ``True`` if the given glob matches a string in the named file
CLI Example:
.. code-block:: bash
salt '*' file.contains_glob /etc/foobar '*cheese*'
'''
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
try:
with salt.utils.filebuffer.BufferedReader(path) as breader:
for chunk in breader:
if fnmatch.fnmatch(chunk, glob_expr):
return True
return False
except (IOError, OSError):
return False
def append(path, *args, **kwargs):
'''
.. versionadded:: 0.9.5
Append text to the end of a file
path
path to file
`*args`
strings to append to file
CLI Example:
.. code-block:: bash
salt '*' file.append /etc/motd \\
"With all thine offerings thou shalt offer salt." \\
"Salt is what makes things taste bad when it isn't in them."
.. admonition:: Attention
If you need to pass a string to append and that string contains
an equal sign, you **must** include the argument name, args.
For example:
.. code-block:: bash
salt '*' file.append /etc/motd args='cheese=spam'
salt '*' file.append /etc/motd args="['cheese=spam','spam=cheese']"
'''
path = os.path.expanduser(path)
# Largely inspired by Fabric's contrib.files.append()
if 'args' in kwargs:
if isinstance(kwargs['args'], list):
args = kwargs['args']
else:
args = [kwargs['args']]
# Make sure we have a newline at the end of the file. Do this in binary
# mode so SEEK_END with nonzero offset will work.
with salt.utils.files.fopen(path, 'rb+') as ofile:
linesep = salt.utils.stringutils.to_bytes(os.linesep)
try:
ofile.seek(-len(linesep), os.SEEK_END)
except IOError as exc:
if exc.errno in (errno.EINVAL, errno.ESPIPE):
# Empty file, simply append lines at the beginning of the file
pass
else:
raise
else:
if ofile.read(len(linesep)) != linesep:
ofile.seek(0, os.SEEK_END)
ofile.write(linesep)
# Append lines in text mode
with salt.utils.files.fopen(path, 'a') as ofile:
for new_line in args:
ofile.write('{0}{1}'.format(new_line, os.linesep))
return 'Wrote {0} lines to "{1}"'.format(len(args), path)
def prepend(path, *args, **kwargs):
'''
.. versionadded:: 2014.7.0
Prepend text to the beginning of a file
path
path to file
`*args`
strings to prepend to the file
CLI Example:
.. code-block:: bash
salt '*' file.prepend /etc/motd \\
"With all thine offerings thou shalt offer salt." \\
"Salt is what makes things taste bad when it isn't in them."
.. admonition:: Attention
If you need to pass a string to append and that string contains
an equal sign, you **must** include the argument name, args.
For example:
.. code-block:: bash
salt '*' file.prepend /etc/motd args='cheese=spam'
salt '*' file.prepend /etc/motd args="['cheese=spam','spam=cheese']"
'''
path = os.path.expanduser(path)
if 'args' in kwargs:
if isinstance(kwargs['args'], list):
args = kwargs['args']
else:
args = [kwargs['args']]
try:
with salt.utils.files.fopen(path) as fhr:
contents = fhr.readlines()
except IOError:
contents = []
preface = []
for line in args:
preface.append('{0}\n'.format(line))
with salt.utils.files.fopen(path, "w") as ofile:
contents = preface + contents
ofile.write(''.join(contents))
return 'Prepended {0} lines to "{1}"'.format(len(args), path)
def write(path, *args, **kwargs):
'''
.. versionadded:: 2014.7.0
Write text to a file, overwriting any existing contents.
path
path to file
`*args`
strings to write to the file
CLI Example:
.. code-block:: bash
salt '*' file.write /etc/motd \\
"With all thine offerings thou shalt offer salt."
.. admonition:: Attention
If you need to pass a string to append and that string contains
an equal sign, you **must** include the argument name, args.
For example:
.. code-block:: bash
salt '*' file.write /etc/motd args='cheese=spam'
salt '*' file.write /etc/motd args="['cheese=spam','spam=cheese']"
'''
path = os.path.expanduser(path)
if 'args' in kwargs:
if isinstance(kwargs['args'], list):
args = kwargs['args']
else:
args = [kwargs['args']]
contents = []
for line in args:
contents.append('{0}\n'.format(line))
with salt.utils.files.fopen(path, "w") as ofile:
ofile.write(''.join(contents))
return 'Wrote {0} lines to "{1}"'.format(len(contents), path)
def touch(name, atime=None, mtime=None):
'''
.. versionadded:: 0.9.5
Just like the ``touch`` command, create a file if it doesn't exist or
simply update the atime and mtime if it already does.
atime:
Access time in Unix epoch time
mtime:
Last modification in Unix epoch time
CLI Example:
.. code-block:: bash
salt '*' file.touch /var/log/emptyfile
'''
name = os.path.expanduser(name)
if atime and atime.isdigit():
atime = int(atime)
if mtime and mtime.isdigit():
mtime = int(mtime)
try:
if not os.path.exists(name):
with salt.utils.files.fopen(name, 'a') as fhw:
fhw.write('')
if not atime and not mtime:
times = None
elif not mtime and atime:
times = (atime, time.time())
elif not atime and mtime:
times = (time.time(), mtime)
else:
times = (atime, mtime)
os.utime(name, times)
except TypeError:
raise SaltInvocationError('atime and mtime must be integers')
except (IOError, OSError) as exc:
raise CommandExecutionError(exc.strerror)
return os.path.exists(name)
def seek_read(path, size, offset):
'''
.. versionadded:: 2014.1.0
Seek to a position on a file and read it
path
path to file
seek
amount to read at once
offset
offset to start into the file
CLI Example:
.. code-block:: bash
salt '*' file.seek_read /path/to/file 4096 0
'''
path = os.path.expanduser(path)
seek_fh = os.open(path, os.O_RDONLY)
try:
os.lseek(seek_fh, int(offset), 0)
data = os.read(seek_fh, int(size))
finally:
os.close(seek_fh)
return data
def seek_write(path, data, offset):
'''
.. versionadded:: 2014.1.0
Seek to a position on a file and write to it
path
path to file
data
data to write to file
offset
position in file to start writing
CLI Example:
.. code-block:: bash
salt '*' file.seek_write /path/to/file 'some data' 4096
'''
path = os.path.expanduser(path)
seek_fh = os.open(path, os.O_WRONLY)
try:
os.lseek(seek_fh, int(offset), 0)
ret = os.write(seek_fh, data)
os.fsync(seek_fh)
finally:
os.close(seek_fh)
return ret
def truncate(path, length):
'''
.. versionadded:: 2014.1.0
Seek to a position on a file and delete everything after that point
path
path to file
length
offset into file to truncate
CLI Example:
.. code-block:: bash
salt '*' file.truncate /path/to/file 512
'''
path = os.path.expanduser(path)
with salt.utils.files.fopen(path, 'rb+') as seek_fh:
seek_fh.truncate(int(length))
def link(src, path):
'''
.. versionadded:: 2014.1.0
Create a hard link to a file
CLI Example:
.. code-block:: bash
salt '*' file.link /path/to/file /path/to/link
'''
src = os.path.expanduser(src)
if not os.path.isabs(src):
raise SaltInvocationError('File path must be absolute.')
try:
os.link(src, path)
return True
except (OSError, IOError):
raise CommandExecutionError('Could not create \'{0}\''.format(path))
return False
def is_link(path):
'''
Check if the path is a symbolic link
CLI Example:
.. code-block:: bash
salt '*' file.is_link /path/to/link
'''
# This function exists because os.path.islink does not support Windows,
# therefore a custom function will need to be called. This function
# therefore helps API consistency by providing a single function to call for
# both operating systems.
return os.path.islink(os.path.expanduser(path))
def symlink(src, path):
'''
Create a symbolic link (symlink, soft link) to a file
CLI Example:
.. code-block:: bash
salt '*' file.symlink /path/to/file /path/to/link
'''
path = os.path.expanduser(path)
try:
if os.path.normpath(os.readlink(path)) == os.path.normpath(src):
log.debug('link already in correct state: %s -> %s', path, src)
return True
except OSError:
pass
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
try:
os.symlink(src, path)
return True
except (OSError, IOError):
raise CommandExecutionError('Could not create \'{0}\''.format(path))
return False
def rename(src, dst):
'''
Rename a file or directory
CLI Example:
.. code-block:: bash
salt '*' file.rename /path/to/src /path/to/dst
'''
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError('File path must be absolute.')
try:
os.rename(src, dst)
return True
except OSError:
raise CommandExecutionError(
'Could not rename \'{0}\' to \'{1}\''.format(src, dst)
)
return False
def copy(src, dst, recurse=False, remove_existing=False):
'''
Copy a file or directory from source to dst
In order to copy a directory, the recurse flag is required, and
will by default overwrite files in the destination with the same path,
and retain all other existing files. (similar to cp -r on unix)
remove_existing will remove all files in the target directory,
and then copy files from the source.
.. note::
The copy function accepts paths that are local to the Salt minion.
This function does not support salt://, http://, or the other
additional file paths that are supported by :mod:`states.file.managed
<salt.states.file.managed>` and :mod:`states.file.recurse
<salt.states.file.recurse>`.
CLI Example:
.. code-block:: bash
salt '*' file.copy /path/to/src /path/to/dst
salt '*' file.copy /path/to/src_dir /path/to/dst_dir recurse=True
salt '*' file.copy /path/to/src_dir /path/to/dst_dir recurse=True remove_existing=True
'''
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError('File path must be absolute.')
if not os.path.exists(src):
raise CommandExecutionError('No such file or directory \'{0}\''.format(src))
if not salt.utils.platform.is_windows():
pre_user = get_user(src)
pre_group = get_group(src)
pre_mode = salt.utils.files.normalize_mode(get_mode(src))
try:
if (os.path.exists(dst) and os.path.isdir(dst)) or os.path.isdir(src):
if not recurse:
raise SaltInvocationError(
"Cannot copy overwriting a directory without recurse flag set to true!")
if remove_existing:
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
else:
salt.utils.files.recursive_copy(src, dst)
else:
shutil.copyfile(src, dst)
except OSError:
raise CommandExecutionError(
'Could not copy \'{0}\' to \'{1}\''.format(src, dst)
)
if not salt.utils.platform.is_windows():
check_perms(dst, None, pre_user, pre_group, pre_mode)
return True
def lstat(path):
'''
.. versionadded:: 2014.1.0
Returns the lstat attributes for the given file or dir. Does not support
symbolic links.
CLI Example:
.. code-block:: bash
salt '*' file.lstat /path/to/file
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Path to file must be absolute.')
try:
lst = os.lstat(path)
return dict((key, getattr(lst, key)) for key in ('st_atime', 'st_ctime',
'st_gid', 'st_mode', 'st_mtime', 'st_nlink', 'st_size', 'st_uid'))
except Exception:
return {}
def access(path, mode):
'''
.. versionadded:: 2014.1.0
Test whether the Salt process has the specified access to the file. One of
the following modes must be specified:
.. code-block::text
f: Test the existence of the path
r: Test the readability of the path
w: Test the writability of the path
x: Test whether the path can be executed
CLI Example:
.. code-block:: bash
salt '*' file.access /path/to/file f
salt '*' file.access /path/to/file x
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Path to link must be absolute.')
modes = {'f': os.F_OK,
'r': os.R_OK,
'w': os.W_OK,
'x': os.X_OK}
if mode in modes:
return os.access(path, modes[mode])
elif mode in six.itervalues(modes):
return os.access(path, mode)
else:
raise SaltInvocationError('Invalid mode specified.')
def read(path, binary=False):
'''
.. versionadded:: 2017.7.0
Return the content of the file.
CLI Example:
.. code-block:: bash
salt '*' file.read /path/to/file
'''
access_mode = 'r'
if binary is True:
access_mode += 'b'
with salt.utils.files.fopen(path, access_mode) as file_obj:
return file_obj.read()
def readlink(path, canonicalize=False):
'''
.. versionadded:: 2014.1.0
Return the path that a symlink points to
If canonicalize is set to True, then it return the final target
CLI Example:
.. code-block:: bash
salt '*' file.readlink /path/to/link
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Path to link must be absolute.')
if not os.path.islink(path):
raise SaltInvocationError('A valid link was not specified.')
if canonicalize:
return os.path.realpath(path)
else:
return os.readlink(path)
def readdir(path):
'''
.. versionadded:: 2014.1.0
Return a list containing the contents of a directory
CLI Example:
.. code-block:: bash
salt '*' file.readdir /path/to/dir/
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Dir path must be absolute.')
if not os.path.isdir(path):
raise SaltInvocationError('A valid directory was not specified.')
dirents = ['.', '..']
dirents.extend(os.listdir(path))
return dirents
def statvfs(path):
'''
.. versionadded:: 2014.1.0
Perform a statvfs call against the filesystem that the file resides on
CLI Example:
.. code-block:: bash
salt '*' file.statvfs /path/to/file
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
try:
stv = os.statvfs(path)
return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree',
'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag',
'f_frsize', 'f_namemax'))
except (OSError, IOError):
raise CommandExecutionError('Could not statvfs \'{0}\''.format(path))
return False
def stats(path, hash_type=None, follow_symlinks=True):
'''
Return a dict containing the stats for a given file
CLI Example:
.. code-block:: bash
salt '*' file.stats /etc/passwd
'''
path = os.path.expanduser(path)
ret = {}
if not os.path.exists(path):
try:
# Broken symlinks will return False for os.path.exists(), but still
# have a uid and gid
pstat = os.lstat(path)
except OSError:
# Not a broken symlink, just a nonexistent path
return ret
else:
if follow_symlinks:
pstat = os.stat(path)
else:
pstat = os.lstat(path)
ret['inode'] = pstat.st_ino
ret['uid'] = pstat.st_uid
ret['gid'] = pstat.st_gid
ret['group'] = gid_to_group(pstat.st_gid)
ret['user'] = uid_to_user(pstat.st_uid)
ret['atime'] = pstat.st_atime
ret['mtime'] = pstat.st_mtime
ret['ctime'] = pstat.st_ctime
ret['size'] = pstat.st_size
ret['mode'] = str(oct(stat.S_IMODE(pstat.st_mode)))
if hash_type:
ret['sum'] = get_hash(path, hash_type)
ret['type'] = 'file'
if stat.S_ISDIR(pstat.st_mode):
ret['type'] = 'dir'
if stat.S_ISCHR(pstat.st_mode):
ret['type'] = 'char'
if stat.S_ISBLK(pstat.st_mode):
ret['type'] = 'block'
if stat.S_ISREG(pstat.st_mode):
ret['type'] = 'file'
if stat.S_ISLNK(pstat.st_mode):
ret['type'] = 'link'
if stat.S_ISFIFO(pstat.st_mode):
ret['type'] = 'pipe'
if stat.S_ISSOCK(pstat.st_mode):
ret['type'] = 'socket'
ret['target'] = os.path.realpath(path)
return ret
def rmdir(path):
'''
.. versionadded:: 2014.1.0
Remove the specified directory. Fails if a directory is not empty.
CLI Example:
.. code-block:: bash
salt '*' file.rmdir /tmp/foo/
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
if not os.path.isdir(path):
raise SaltInvocationError('A valid directory was not specified.')
try:
os.rmdir(path)
return True
except OSError as exc:
return exc.strerror
def remove(path):
'''
Remove the named file. If a directory is supplied, it will be recursively
deleted.
CLI Example:
.. code-block:: bash
salt '*' file.remove /tmp/foo
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute: {0}'.format(path))
try:
if os.path.isfile(path) or os.path.islink(path):
os.remove(path)
return True
elif os.path.isdir(path):
shutil.rmtree(path)
return True
except (OSError, IOError) as exc:
raise CommandExecutionError(
'Could not remove \'{0}\': {1}'.format(path, exc)
)
return False
def directory_exists(path):
'''
Tests to see if path is a valid directory. Returns True/False.
CLI Example:
.. code-block:: bash
salt '*' file.directory_exists /etc
'''
return os.path.isdir(os.path.expanduser(path))
def file_exists(path):
'''
Tests to see if path is a valid file. Returns True/False.
CLI Example:
.. code-block:: bash
salt '*' file.file_exists /etc/passwd
'''
return os.path.isfile(os.path.expanduser(path))
def path_exists_glob(path):
'''
Tests to see if path after expansion is a valid path (file or directory).
Expansion allows usage of ? * and character ranges []. Tilde expansion
is not supported. Returns True/False.
.. versionadded:: Hellium
CLI Example:
.. code-block:: bash
salt '*' file.path_exists_glob /etc/pam*/pass*
'''
return True if glob.glob(os.path.expanduser(path)) else False
def restorecon(path, recursive=False):
'''
Reset the SELinux context on a given path
CLI Example:
.. code-block:: bash
salt '*' file.restorecon /home/user/.ssh/authorized_keys
'''
if recursive:
cmd = ['restorecon', '-FR', path]
else:
cmd = ['restorecon', '-F', path]
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def get_selinux_context(path):
'''
Get an SELinux context from a given path
CLI Example:
.. code-block:: bash
salt '*' file.get_selinux_context /etc/hosts
'''
out = __salt__['cmd.run'](['ls', '-Z', path], python_shell=False)
try:
ret = re.search(r'\w+:\w+:\w+:\w+', out).group(0)
except AttributeError:
ret = (
'No selinux context information is available for {0}'.format(path)
)
return ret
def set_selinux_context(path,
user=None,
role=None,
type=None, # pylint: disable=W0622
range=None): # pylint: disable=W0622
'''
Set a specific SELinux label on a given path
CLI Example:
.. code-block:: bash
salt '*' file.set_selinux_context path <user> <role> <type> <range>
salt '*' file.set_selinux_context /etc/yum.repos.d/epel.repo system_u object_r system_conf_t s0
'''
if not any((user, role, type, range)):
return False
cmd = ['chcon']
if user:
cmd.extend(['-u', user])
if role:
cmd.extend(['-r', role])
if type:
cmd.extend(['-t', type])
if range:
cmd.extend(['-l', range])
cmd.append(path)
ret = not __salt__['cmd.retcode'](cmd, python_shell=False)
if ret:
return get_selinux_context(path)
else:
return ret
def source_list(source, source_hash, saltenv):
'''
Check the source list and return the source to use
CLI Example:
.. code-block:: bash
salt '*' file.source_list salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' base
'''
contextkey = '{0}_|-{1}_|-{2}'.format(source, source_hash, saltenv)
if contextkey in __context__:
return __context__[contextkey]
# get the master file list
if isinstance(source, list):
mfiles = [(f, saltenv) for f in __salt__['cp.list_master'](saltenv)]
mdirs = [(d, saltenv) for d in __salt__['cp.list_master_dirs'](saltenv)]
for single in source:
if isinstance(single, dict):
single = next(iter(single))
path, senv = salt.utils.url.parse(single)
if senv:
mfiles += [(f, senv) for f in __salt__['cp.list_master'](senv)]
mdirs += [(d, senv) for d in __salt__['cp.list_master_dirs'](senv)]
ret = None
for single in source:
if isinstance(single, dict):
# check the proto, if it is http or ftp then download the file
# to check, if it is salt then check the master list
# if it is a local file, check if the file exists
if len(single) != 1:
continue
single_src = next(iter(single))
single_hash = single[single_src] if single[single_src] else source_hash
urlparsed_single_src = _urlparse(single_src)
# Fix this for Windows
if salt.utils.platform.is_windows():
# urlparse doesn't handle a local Windows path without the
# protocol indicator (file://). The scheme will be the
# drive letter instead of the protocol. So, we'll add the
# protocol and re-parse
if urlparsed_single_src.scheme.lower() in string.ascii_lowercase:
urlparsed_single_src = _urlparse('file://' + single_src)
proto = urlparsed_single_src.scheme
if proto == 'salt':
path, senv = salt.utils.url.parse(single_src)
if not senv:
senv = saltenv
if (path, saltenv) in mfiles or (path, saltenv) in mdirs:
ret = (single_src, single_hash)
break
elif proto.startswith('http') or proto == 'ftp':
ret = (single_src, single_hash)
break
elif proto == 'file' and (
os.path.exists(urlparsed_single_src.netloc) or
os.path.exists(urlparsed_single_src.path) or
os.path.exists(os.path.join(
urlparsed_single_src.netloc,
urlparsed_single_src.path))):
ret = (single_src, single_hash)
break
elif single_src.startswith(os.sep) and os.path.exists(single_src):
ret = (single_src, single_hash)
break
elif isinstance(single, six.string_types):
path, senv = salt.utils.url.parse(single)
if not senv:
senv = saltenv
if (path, senv) in mfiles or (path, senv) in mdirs:
ret = (single, source_hash)
break
urlparsed_src = _urlparse(single)
if salt.utils.platform.is_windows():
# urlparse doesn't handle a local Windows path without the
# protocol indicator (file://). The scheme will be the
# drive letter instead of the protocol. So, we'll add the
# protocol and re-parse
if urlparsed_src.scheme.lower() in string.ascii_lowercase:
urlparsed_src = _urlparse('file://' + single)
proto = urlparsed_src.scheme
if proto == 'file' and (
os.path.exists(urlparsed_src.netloc) or
os.path.exists(urlparsed_src.path) or
os.path.exists(os.path.join(
urlparsed_src.netloc,
urlparsed_src.path))):
ret = (single, source_hash)
break
elif proto.startswith('http') or proto == 'ftp':
ret = (single, source_hash)
break
elif single.startswith(os.sep) and os.path.exists(single):
ret = (single, source_hash)
break
if ret is None:
# None of the list items matched
raise CommandExecutionError(
'none of the specified sources were found'
)
else:
ret = (source, source_hash)
__context__[contextkey] = ret
return ret
def apply_template_on_contents(
contents,
template,
context,
defaults,
saltenv):
'''
Return the contents after applying the templating engine
contents
template string
template
template format
context
Overrides default context variables passed to the template.
defaults
Default context passed to the template.
CLI Example:
.. code-block:: bash
salt '*' file.apply_template_on_contents \\
contents='This is a {{ template }} string.' \\
template=jinja \\
"context={}" "defaults={'template': 'cool'}" \\
saltenv=base
'''
if template in salt.utils.templates.TEMPLATE_REGISTRY:
context_dict = defaults if defaults else {}
if context:
context_dict.update(context)
# Apply templating
contents = salt.utils.templates.TEMPLATE_REGISTRY[template](
contents,
from_str=True,
to_str=True,
context=context_dict,
saltenv=saltenv,
grains=__opts__['grains'],
pillar=__pillar__,
salt=__salt__,
opts=__opts__)['data']
if six.PY2:
contents = contents.encode('utf-8')
elif six.PY3 and isinstance(contents, bytes):
# bytes -> str
contents = contents.decode('utf-8')
else:
ret = {}
ret['result'] = False
ret['comment'] = ('Specified template format {0} is not supported'
).format(template)
return ret
return contents
def get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify=False,
**kwargs):
'''
Return the managed file data for file.managed
name
location where the file lives on the server
template
template format
source
managed source file
source_hash
hash of the source file
source_hash_name
When ``source_hash`` refers to a remote file, this specifies the
filename to look for in that file.
.. versionadded:: 2016.3.5
user
Owner of file
group
Group owner of file
mode
Permissions of file
attrs
Attributes of file
.. versionadded:: Oxygen
context
Variables to add to the template context
defaults
Default values of for context_dict
skip_verify
If ``True``, hash verification of remote file sources (``http://``,
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
argument will be ignored.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' file.get_managed /etc/httpd/conf.d/httpd.conf jinja salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' None root root '755' base None None
'''
# Copy the file to the minion and templatize it
sfn = ''
source_sum = {}
def _get_local_file_source_sum(path):
'''
DRY helper for getting the source_sum value from a locally cached
path.
'''
return {'hsum': get_hash(path, form='sha256'), 'hash_type': 'sha256'}
# If we have a source defined, let's figure out what the hash is
if source:
urlparsed_source = _urlparse(source)
parsed_scheme = urlparsed_source.scheme
parsed_path = os.path.join(
urlparsed_source.netloc, urlparsed_source.path).rstrip(os.sep)
if parsed_scheme and parsed_scheme.lower() in 'abcdefghijklmnopqrstuvwxyz':
parsed_path = ':'.join([parsed_scheme, parsed_path])
parsed_scheme = 'file'
if parsed_scheme == 'salt':
source_sum = __salt__['cp.hash_file'](source, saltenv)
if not source_sum:
return '', {}, 'Source file {0} not found'.format(source)
elif not source_hash and parsed_scheme == 'file':
source_sum = _get_local_file_source_sum(parsed_path)
elif not source_hash and source.startswith(os.sep):
source_sum = _get_local_file_source_sum(source)
else:
if not skip_verify:
if source_hash:
try:
source_sum = get_source_sum(name,
source,
source_hash,
source_hash_name,
saltenv)
except CommandExecutionError as exc:
return '', {}, exc.strerror
else:
msg = (
'Unable to verify upstream hash of source file {0}, '
'please set source_hash or set skip_verify to True'
.format(source)
)
return '', {}, msg
if source and (template or parsed_scheme in salt.utils.files.REMOTE_PROTOS):
# Check if we have the template or remote file cached
cache_refetch = False
cached_dest = __salt__['cp.is_cached'](source, saltenv)
if cached_dest and (source_hash or skip_verify):
htype = source_sum.get('hash_type', 'sha256')
cached_sum = get_hash(cached_dest, form=htype)
if skip_verify:
# prev: if skip_verify or cached_sum == source_sum['hsum']:
# but `cached_sum == source_sum['hsum']` is elliptical as prev if
sfn = cached_dest
source_sum = {'hsum': cached_sum, 'hash_type': htype}
elif cached_sum != source_sum.get('hsum', __opts__['hash_type']):
cache_refetch = True
else:
sfn = cached_dest
# If we didn't have the template or remote file, or the file has been
# updated and the cache has to be refreshed, download the file.
if not sfn or cache_refetch:
try:
sfn = __salt__['cp.cache_file'](
source,
saltenv,
source_hash=source_sum.get('hsum'))
except Exception as exc:
# A 404 or other error code may raise an exception, catch it
# and return a comment that will fail the calling state.
return '', {}, 'Failed to cache {0}: {1}'.format(source, exc)
# If cache failed, sfn will be False, so do a truth check on sfn first
# as invoking os.path.exists() on a bool raises a TypeError.
if not sfn or not os.path.exists(sfn):
return sfn, {}, 'Source file \'{0}\' not found'.format(source)
if sfn == name:
raise SaltInvocationError(
'Source file cannot be the same as destination'
)
if template:
if template in salt.utils.templates.TEMPLATE_REGISTRY:
context_dict = defaults if defaults else {}
if context:
context_dict.update(context)
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
sfn,
name=name,
source=source,
user=user,
group=group,
mode=mode,
attrs=attrs,
saltenv=saltenv,
context=context_dict,
salt=__salt__,
pillar=__pillar__,
grains=__opts__['grains'],
opts=__opts__,
**kwargs)
else:
return sfn, {}, ('Specified template format {0} is not supported'
).format(template)
if data['result']:
sfn = data['data']
hsum = get_hash(sfn, form='sha256')
source_sum = {'hash_type': 'sha256',
'hsum': hsum}
else:
__clean_tmp(sfn)
return sfn, {}, data['data']
return sfn, source_sum, ''
def extract_hash(hash_fn,
hash_type='sha256',
file_name='',
source='',
source_hash_name=None):
'''
.. versionchanged:: 2016.3.5
Prior to this version, only the ``file_name`` argument was considered
for filename matches in the hash file. This would be problematic for
cases in which the user was relying on a remote checksum file that they
do not control, and they wished to use a different name for that file
on the minion from the filename on the remote server (and in the
checksum file). For example, managing ``/tmp/myfile.tar.gz`` when the
remote file was at ``https://mydomain.tld/different_name.tar.gz``. The
:py:func:`file.managed <salt.states.file.managed>` state now also
passes this function the source URI as well as the ``source_hash_name``
(if specified). In cases where ``source_hash_name`` is specified, it
takes precedence over both the ``file_name`` and ``source``. When it is
not specified, ``file_name`` takes precedence over ``source``. This
allows for better capability for matching hashes.
.. versionchanged:: 2016.11.0
File name and source URI matches are no longer disregarded when
``source_hash_name`` is specified. They will be used as fallback
matches if there is no match to the ``source_hash_name`` value.
This routine is called from the :mod:`file.managed
<salt.states.file.managed>` state to pull a hash from a remote file.
Regular expressions are used line by line on the ``source_hash`` file, to
find a potential candidate of the indicated hash type. This avoids many
problems of arbitrary file layout rules. It specifically permits pulling
hash codes from debian ``*.dsc`` files.
If no exact match of a hash and filename are found, then the first hash
found (if any) will be returned. If no hashes at all are found, then
``None`` will be returned.
For example:
.. code-block:: yaml
openerp_7.0-latest-1.tar.gz:
file.managed:
- name: /tmp/openerp_7.0-20121227-075624-1_all.deb
- source: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.tar.gz
- source_hash: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.dsc
CLI Example:
.. code-block:: bash
salt '*' file.extract_hash /path/to/hash/file sha512 /etc/foo
'''
hash_len = HASHES.get(hash_type)
if hash_len is None:
if hash_type:
log.warning(
'file.extract_hash: Unsupported hash_type \'%s\', falling '
'back to matching any supported hash_type', hash_type
)
hash_type = ''
hash_len_expr = '{0},{1}'.format(min(HASHES_REVMAP), max(HASHES_REVMAP))
else:
hash_len_expr = str(hash_len)
filename_separators = string.whitespace + r'\/'
if source_hash_name:
if not isinstance(source_hash_name, six.string_types):
source_hash_name = str(source_hash_name)
source_hash_name_idx = (len(source_hash_name) + 1) * -1
log.debug(
'file.extract_hash: Extracting %s hash for file matching '
'source_hash_name \'%s\'',
'any supported' if not hash_type else hash_type,
source_hash_name
)
if file_name:
if not isinstance(file_name, six.string_types):
file_name = str(file_name)
file_name_basename = os.path.basename(file_name)
file_name_idx = (len(file_name_basename) + 1) * -1
if source:
if not isinstance(source, six.string_types):
source = str(source)
urlparsed_source = _urlparse(source)
source_basename = os.path.basename(
urlparsed_source.path or urlparsed_source.netloc
)
source_idx = (len(source_basename) + 1) * -1
basename_searches = [x for x in (file_name, source) if x]
if basename_searches:
log.debug(
'file.extract_hash: %s %s hash for file matching%s: %s',
'If no source_hash_name match found, will extract'
if source_hash_name
else 'Extracting',
'any supported' if not hash_type else hash_type,
'' if len(basename_searches) == 1 else ' either of the following',
', '.join(basename_searches)
)
partial = None
found = {}
with salt.utils.files.fopen(hash_fn, 'r') as fp_:
for line in fp_:
line = line.strip()
hash_re = r'(?i)(?<![a-z0-9])([a-f0-9]{' + hash_len_expr + '})(?![a-z0-9])'
hash_match = re.search(hash_re, line)
matched = None
if hash_match:
matched_hsum = hash_match.group(1)
if matched_hsum is not None:
matched_type = HASHES_REVMAP.get(len(matched_hsum))
if matched_type is None:
# There was a match, but it's not of the correct length
# to match one of the supported hash types.
matched = None
else:
matched = {'hsum': matched_hsum,
'hash_type': matched_type}
if matched is None:
log.debug(
'file.extract_hash: In line \'%s\', no %shash found',
line,
'' if not hash_type else hash_type + ' '
)
continue
if partial is None:
partial = matched
def _add_to_matches(found, line, match_type, value, matched):
log.debug(
'file.extract_hash: Line \'%s\' matches %s \'%s\'',
line, match_type, value
)
found.setdefault(match_type, []).append(matched)
hash_matched = False
if source_hash_name:
if line.endswith(source_hash_name):
# Checking the character before where the basename
# should start for either whitespace or a path
# separator. We can't just rsplit on spaces/whitespace,
# because the filename may contain spaces.
try:
if line[source_hash_name_idx] in string.whitespace:
_add_to_matches(found, line, 'source_hash_name',
source_hash_name, matched)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(source_hash_name) + r'\s+',
line):
_add_to_matches(found, line, 'source_hash_name',
source_hash_name, matched)
hash_matched = True
if file_name:
if line.endswith(file_name_basename):
# Checking the character before where the basename
# should start for either whitespace or a path
# separator. We can't just rsplit on spaces/whitespace,
# because the filename may contain spaces.
try:
if line[file_name_idx] in filename_separators:
_add_to_matches(found, line, 'file_name',
file_name, matched)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(file_name) + r'\s+', line):
_add_to_matches(found, line, 'file_name',
file_name, matched)
hash_matched = True
if source:
if line.endswith(source_basename):
# Same as above, we can't just do an rsplit here.
try:
if line[source_idx] in filename_separators:
_add_to_matches(found, line, 'source',
source, matched)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(source) + r'\s+', line):
_add_to_matches(found, line, 'source', source, matched)
hash_matched = True
if not hash_matched:
log.debug(
'file.extract_hash: Line \'%s\' contains %s hash '
'\'%s\', but line did not meet the search criteria',
line, matched['hash_type'], matched['hsum']
)
for found_type, found_str in (('source_hash_name', source_hash_name),
('file_name', file_name),
('source', source)):
if found_type in found:
if len(found[found_type]) > 1:
log.debug(
'file.extract_hash: Multiple %s matches for %s: %s',
found_type,
found_str,
', '.join(
['{0} ({1})'.format(x['hsum'], x['hash_type'])
for x in found[found_type]]
)
)
ret = found[found_type][0]
log.debug(
'file.extract_hash: Returning %s hash \'%s\' as a match of %s',
ret['hash_type'], ret['hsum'], found_str
)
return ret
if partial:
log.debug(
'file.extract_hash: Returning the partially identified %s hash '
'\'%s\'', partial['hash_type'], partial['hsum']
)
return partial
log.debug('file.extract_hash: No matches, returning None')
return None
def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False):
'''
Check the permissions on files, modify attributes and chown if needed. File
attributes are only verified if lsattr(1) is installed.
CLI Example:
.. code-block:: bash
salt '*' file.check_perms /etc/sudoers '{}' root root 400 ai
.. versionchanged:: 2014.1.3
``follow_symlinks`` option added
'''
name = os.path.expanduser(name)
lsattr_cmd = salt.utils.path.which('lsattr')
if not ret:
ret = {'name': name,
'changes': {},
'comment': [],
'result': True}
orig_comment = ''
else:
orig_comment = ret['comment']
ret['comment'] = []
# Check permissions
perms = {}
cur = stats(name, follow_symlinks=follow_symlinks)
if not cur:
# NOTE: The file.directory state checks the content of the error
# message in this exception. Any changes made to the message for this
# exception will reflect the file.directory state as well, and will
# likely require changes there.
raise CommandExecutionError('{0} does not exist'.format(name))
perms['luser'] = cur['user']
perms['lgroup'] = cur['group']
perms['lmode'] = salt.utils.files.normalize_mode(cur['mode'])
is_dir = os.path.isdir(name)
if not salt.utils.platform.is_windows() and not is_dir and lsattr_cmd:
# List attributes on file
perms['lattrs'] = ''.join(lsattr(name).get('name', ''))
# Remove attributes on file so changes can be enforced.
if perms['lattrs']:
chattr(name, operator='remove', attributes=perms['lattrs'])
# Mode changes if needed
if mode is not None:
# File is a symlink, ignore the mode setting
# if follow_symlinks is False
if os.path.islink(name) and not follow_symlinks:
pass
else:
mode = salt.utils.files.normalize_mode(mode)
if mode != perms['lmode']:
if __opts__['test'] is True:
ret['changes']['mode'] = mode
else:
set_mode(name, mode)
if mode != salt.utils.files.normalize_mode(get_mode(name)):
ret['result'] = False
ret['comment'].append(
'Failed to change mode to {0}'.format(mode)
)
else:
ret['changes']['mode'] = mode
# user/group changes if needed, then check if it worked
if user:
if isinstance(user, int):
user = uid_to_user(user)
if (salt.utils.platform.is_windows() and
user_to_uid(user) != user_to_uid(perms['luser'])
) or (
not salt.utils.platform.is_windows() and user != perms['luser']
):
perms['cuser'] = user
if group:
if isinstance(group, int):
group = gid_to_group(group)
if (salt.utils.platform.is_windows() and
group_to_gid(group) != group_to_gid(perms['lgroup'])
) or (
not salt.utils.platform.is_windows() and group != perms['lgroup']
):
perms['cgroup'] = group
if 'cuser' in perms or 'cgroup' in perms:
if not __opts__['test']:
if os.path.islink(name) and not follow_symlinks:
chown_func = lchown
else:
chown_func = chown
if user is None:
user = perms['luser']
if group is None:
group = perms['lgroup']
try:
chown_func(name, user, group)
except OSError:
ret['result'] = False
if user:
if isinstance(user, int):
user = uid_to_user(user)
if (salt.utils.platform.is_windows() and
user_to_uid(user) != user_to_uid(
get_user(name, follow_symlinks=follow_symlinks)) and
user != ''
) or (
not salt.utils.platform.is_windows() and
user != get_user(name, follow_symlinks=follow_symlinks) and
user != ''
):
if __opts__['test'] is True:
ret['changes']['user'] = user
else:
ret['result'] = False
ret['comment'].append('Failed to change user to {0}'
.format(user))
elif 'cuser' in perms and user != '':
ret['changes']['user'] = user
if group:
if isinstance(group, int):
group = gid_to_group(group)
if (salt.utils.platform.is_windows() and
group_to_gid(group) != group_to_gid(
get_group(name, follow_symlinks=follow_symlinks)) and
user != '') or (
not salt.utils.platform.is_windows() and
group != get_group(name, follow_symlinks=follow_symlinks) and
user != ''
):
if __opts__['test'] is True:
ret['changes']['group'] = group
else:
ret['result'] = False
ret['comment'].append('Failed to change group to {0}'
.format(group))
elif 'cgroup' in perms and user != '':
ret['changes']['group'] = group
if isinstance(orig_comment, six.string_types):
if orig_comment:
ret['comment'].insert(0, orig_comment)
ret['comment'] = '; '.join(ret['comment'])
if __opts__['test'] is True and ret['changes']:
ret['result'] = None
if not salt.utils.platform.is_windows() and not is_dir and lsattr_cmd:
# Replace attributes on file if it had been removed
if perms['lattrs']:
chattr(name, operator='add', attributes=perms['lattrs'])
# Modify attributes of file if needed
if attrs is not None and not is_dir:
# File is a symlink, ignore the mode setting
# if follow_symlinks is False
if os.path.islink(name) and not follow_symlinks:
pass
else:
diff_attrs = _cmp_attrs(name, attrs)
if diff_attrs[0] is not None or diff_attrs[1] is not None:
if __opts__['test'] is True:
ret['changes']['attrs'] = attrs
else:
if diff_attrs[0] is not None:
chattr(name, operator="add", attributes=diff_attrs[0])
if diff_attrs[1] is not None:
chattr(name, operator="remove", attributes=diff_attrs[1])
cmp_attrs = _cmp_attrs(name, attrs)
if cmp_attrs[0] is not None or cmp_attrs[1] is not None:
ret['result'] = False
ret['comment'].append(
'Failed to change attributes to {0}'.format(attrs)
)
else:
ret['changes']['attrs'] = attrs
return ret, perms
def check_managed(
name,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
template,
context,
defaults,
saltenv,
contents=None,
skip_verify=False,
**kwargs):
'''
Check to see what changes need to be made for a file
CLI Example:
.. code-block:: bash
salt '*' file.check_managed /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' jinja True None None base
'''
# If the source is a list then find which file exists
source, source_hash = source_list(source, # pylint: disable=W0633
source_hash,
saltenv)
sfn = ''
source_sum = None
if contents is None:
# Gather the source file from the server
sfn, source_sum, comments = get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify,
**kwargs)
if comments:
__clean_tmp(sfn)
return False, comments
changes = check_file_meta(name, sfn, source, source_sum, user,
group, mode, attrs, saltenv, contents)
# Ignore permission for files written temporary directories
# Files in any path will still be set correctly using get_managed()
if name.startswith(tempfile.gettempdir()):
for key in ['user', 'group', 'mode']:
changes.pop(key, None)
__clean_tmp(sfn)
if changes:
log.info(changes)
comments = ['The following values are set to be changed:\n']
comments.extend('{0}: {1}\n'.format(key, val)
for key, val in six.iteritems(changes))
return None, ''.join(comments)
return True, 'The file {0} is in the correct state'.format(name)
def check_managed_changes(
name,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
template,
context,
defaults,
saltenv,
contents=None,
skip_verify=False,
keep_mode=False,
**kwargs):
'''
Return a dictionary of what changes need to be made for a file
CLI Example:
.. code-block:: bash
salt '*' file.check_managed_changes /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' jinja True None None base
'''
# If the source is a list then find which file exists
source, source_hash = source_list(source, # pylint: disable=W0633
source_hash,
saltenv)
sfn = ''
source_sum = None
if contents is None:
# Gather the source file from the server
sfn, source_sum, comments = get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify,
**kwargs)
if comments:
__clean_tmp(sfn)
return False, comments
if sfn and source and keep_mode:
if _urlparse(source).scheme in ('salt', 'file') \
or source.startswith('/'):
try:
mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True)
except Exception as exc:
log.warning('Unable to stat %s: %s', sfn, exc)
changes = check_file_meta(name, sfn, source, source_sum, user,
group, mode, attrs, saltenv, contents)
__clean_tmp(sfn)
return changes
def check_file_meta(
name,
sfn,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
contents=None):
'''
Check for the changes in the file metadata.
CLI Example:
.. code-block:: bash
salt '*' file.check_file_meta /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' base
.. note::
Supported hash types include sha512, sha384, sha256, sha224, sha1, and
md5.
name
Path to file destination
sfn
Template-processed source file contents
source
URL to file source
source_sum
File checksum information as a dictionary
.. code-block:: yaml
{hash_type: md5, hsum: <md5sum>}
user
Destination file user owner
group
Destination file group owner
mode
Destination file permissions mode
attrs
Destination file attributes
.. versionadded:: Oxygen
saltenv
Salt environment used to resolve source files
contents
File contents
'''
lsattr_cmd = salt.utils.path.which('lsattr')
changes = {}
if not source_sum:
source_sum = {}
lstats = stats(name, hash_type=source_sum.get('hash_type', None), follow_symlinks=False)
if not lstats:
changes['newfile'] = name
return changes
if 'hsum' in source_sum:
if source_sum['hsum'] != lstats['sum']:
if not sfn and source:
sfn = __salt__['cp.cache_file'](
source,
saltenv,
source_hash=source_sum['hsum'])
if sfn:
try:
changes['diff'] = get_diff(
sfn, name, template=True, show_filenames=False)
except CommandExecutionError as exc:
changes['diff'] = exc.strerror
else:
changes['sum'] = 'Checksum differs'
if contents is not None:
# Write a tempfile with the static contents
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
with salt.utils.files.fopen(tmp, 'w') as tmp_:
tmp_.write(salt.utils.stringutils.to_str(contents))
# Compare the static contents with the named file
try:
differences = get_diff(name, tmp, show_filenames=False)
except CommandExecutionError as exc:
log.error('Failed to diff files: {0}'.format(exc))
differences = exc.strerror
__clean_tmp(tmp)
if differences:
if __salt__['config.option']('obfuscate_templates'):
changes['diff'] = '<Obfuscated Template>'
else:
changes['diff'] = differences
if not salt.utils.platform.is_windows():
# Check owner
if (user is not None
and user != lstats['user']
and user != lstats['uid']):
changes['user'] = user
# Check group
if (group is not None
and group != lstats['group']
and group != lstats['gid']):
changes['group'] = group
# Normalize the file mode
smode = salt.utils.files.normalize_mode(lstats['mode'])
mode = salt.utils.files.normalize_mode(mode)
if mode is not None and mode != smode:
changes['mode'] = mode
if lsattr_cmd:
diff_attrs = _cmp_attrs(name, attrs)
if (
attrs is not None and
diff_attrs[0] is not None or
diff_attrs[1] is not None
):
changes['attrs'] = attrs
return changes
def get_diff(file1,
file2,
saltenv='base',
show_filenames=True,
show_changes=True,
template=False,
source_hash_file1=None,
source_hash_file2=None):
'''
Return unified diff of two files
file1
The first file to feed into the diff utility
.. versionchanged:: Oxygen
Can now be either a local or remote file. In earlier releases,
thuis had to be a file local to the minion.
file2
The second file to feed into the diff utility
.. versionchanged:: Oxygen
Can now be either a local or remote file. In earlier releases, this
had to be a file on the salt fileserver (i.e.
``salt://somefile.txt``)
show_filenames : True
Set to ``False`` to hide the filenames in the top two lines of the
diff.
show_changes : True
If set to ``False``, and there are differences, then instead of a diff
a simple message stating that show_changes is set to ``False`` will be
returned.
template : False
Set to ``True`` if two templates are being compared. This is not useful
except for within states, with the ``obfuscate_templates`` option set
to ``True``.
.. versionadded:: Oxygen
source_hash_file1
If ``file1`` is an http(s)/ftp URL and the file exists in the minion's
file cache, this option can be passed to keep the minion from
re-downloading the archive if the cached copy matches the specified
hash.
.. versionadded:: Oxygen
source_hash_file2
If ``file2`` is an http(s)/ftp URL and the file exists in the minion's
file cache, this option can be passed to keep the minion from
re-downloading the archive if the cached copy matches the specified
hash.
.. versionadded:: Oxygen
CLI Examples:
.. code-block:: bash
salt '*' file.get_diff /home/fred/.vimrc salt://users/fred/.vimrc
salt '*' file.get_diff /tmp/foo.txt /tmp/bar.txt
'''
files = (file1, file2)
source_hashes = (source_hash_file1, source_hash_file2)
paths = []
errors = []
for filename, source_hash in zip(files, source_hashes):
try:
# Local file paths will just return the same path back when passed
# to cp.cache_file.
cached_path = __salt__['cp.cache_file'](filename,
saltenv,
source_hash=source_hash)
if cached_path is False:
errors.append(
u'File {0} not found'.format(
salt.utils.stringutils.to_unicode(filename)
)
)
continue
paths.append(cached_path)
except MinionError as exc:
errors.append(salt.utils.stringutils.to_unicode(exc.__str__()))
continue
if errors:
raise CommandExecutionError(
'Failed to cache one or more files',
info=errors
)
args = []
for idx, filename in enumerate(files):
try:
with salt.utils.files.fopen(filename, 'r') as fp_:
args.append(fp_.readlines())
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Failed to read {0}: {1}'.format(
salt.utils.stringutils.to_str(filename),
exc.strerror
)
)
if args[0] != args[1]:
if template and __salt__['config.option']('obfuscate_templates'):
ret = u'<Obfuscated Template>'
elif not show_changes:
ret = u'<show_changes=False>'
else:
bdiff = _binary_replace(*files)
if bdiff:
ret = bdiff
else:
if show_filenames:
args.extend(
[salt.utils.stringutils.to_str(x) for x in files]
)
ret = salt.utils.locales.sdecode(
''.join(difflib.unified_diff(*args)) # pylint: disable=no-value-for-parameter
)
return ret
return u''
def manage_file(name,
sfn,
ret,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
backup,
makedirs=False,
template=None, # pylint: disable=W0613
show_changes=True,
contents=None,
dir_mode=None,
follow_symlinks=True,
skip_verify=False,
keep_mode=False,
encoding=None,
encoding_errors='strict',
**kwargs):
'''
Checks the destination against what was retrieved with get_managed and
makes the appropriate modifications (if necessary).
name
location to place the file
sfn
location of cached file on the minion
This is the path to the file stored on the minion. This file is placed
on the minion using cp.cache_file. If the hash sum of that file
matches the source_sum, we do not transfer the file to the minion
again.
This file is then grabbed and if it has template set, it renders the
file to be placed into the correct place on the system using
salt.files.utils.copyfile()
ret
The initial state return data structure. Pass in ``None`` to use the
default structure.
source
file reference on the master
source_hash
sum hash for source
user
user owner
group
group owner
backup
backup_mode
attrs
attributes to be set on file: '' means remove all of them
.. versionadded: Oxygen
makedirs
make directories if they do not exist
template
format of templating
show_changes
Include diff in state return
contents:
contents to be placed in the file
dir_mode
mode for directories created with makedirs
skip_verify : False
If ``True``, hash verification of remote file sources (``http://``,
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
argument will be ignored.
.. versionadded:: 2016.3.0
keep_mode : False
If ``True``, and the ``source`` is a file from the Salt fileserver (or
a local file on the minion), the mode of the destination file will be
set to the mode of the source file.
.. note:: keep_mode does not work with salt-ssh.
As a consequence of how the files are transferred to the minion, and
the inability to connect back to the master with salt-ssh, salt is
unable to stat the file as it exists on the fileserver and thus
cannot mirror the mode on the salt-ssh minion
encoding : None
If None, str() will be applied to contents.
If not None, specified encoding will be used.
See https://docs.python.org/3/library/codecs.html#standard-encodings
for the list of available encodings.
.. versionadded:: 2017.7.0
encoding_errors : 'strict'
Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the error handling schemes.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' base ''
.. versionchanged:: 2014.7.0
``follow_symlinks`` option added
'''
name = os.path.expanduser(name)
if not ret:
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
# Ensure that user-provided hash string is lowercase
if source_sum and ('hsum' in source_sum):
source_sum['hsum'] = source_sum['hsum'].lower()
if source and not sfn:
# File is not present, cache it
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
htype = source_sum.get('hash_type', __opts__['hash_type'])
# Recalculate source sum now that file has been cached
source_sum = {
'hash_type': htype,
'hsum': get_hash(sfn, form=htype)
}
if keep_mode:
if _urlparse(source).scheme in ('salt', 'file') \
or source.startswith('/'):
try:
mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True)
except Exception as exc:
log.warning('Unable to stat %s: %s', sfn, exc)
# Check changes if the target file exists
if os.path.isfile(name) or os.path.islink(name):
if os.path.islink(name) and follow_symlinks:
real_name = os.path.realpath(name)
else:
real_name = name
# Only test the checksums on files with managed contents
if source and not (not follow_symlinks and os.path.islink(real_name)):
name_sum = get_hash(real_name, source_sum.get('hash_type', __opts__['hash_type']))
else:
name_sum = None
# Check if file needs to be replaced
if source and (name_sum is None or source_sum.get('hsum', __opts__['hash_type']) != name_sum):
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
# If the downloaded file came from a non salt server or local
# source, and we are not skipping checksum verification, then
# verify that it matches the specified checksum.
if not skip_verify \
and _urlparse(source).scheme not in ('salt', ''):
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3}). If the \'source_hash\' value '
'refers to a remote file with multiple possible '
'matches, then it may be necessary to set '
'\'source_hash_name\'.'.format(
source_sum['hash_type'],
source,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
# Print a diff equivalent to diff -u old new
if __salt__['config.option']('obfuscate_templates'):
ret['changes']['diff'] = '<Obfuscated Template>'
elif not show_changes:
ret['changes']['diff'] = '<show_changes=False>'
else:
try:
ret['changes']['diff'] = get_diff(
real_name, sfn, show_filenames=False)
except CommandExecutionError as exc:
ret['changes']['diff'] = exc.strerror
# Pre requisites are met, and the file needs to be replaced, do it
try:
salt.utils.files.copyfile(sfn,
real_name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(sfn)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
with salt.utils.files.fopen(tmp, 'w') as tmp_:
if encoding:
log.debug('File will be encoded with {0}'.format(encoding))
tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors))
else:
tmp_.write(salt.utils.stringutils.to_str(contents))
try:
differences = get_diff(
real_name, tmp, show_filenames=False,
show_changes=show_changes, template=True)
except CommandExecutionError as exc:
ret.setdefault('warnings', []).append(
'Failed to detect changes to file: {0}'.format(exc.strerror)
)
differences = ''
if differences:
ret['changes']['diff'] = differences
# Pre requisites are met, the file needs to be replaced, do it
try:
salt.utils.files.copyfile(tmp,
real_name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(tmp)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
__clean_tmp(tmp)
# Check for changing symlink to regular file here
if os.path.islink(name) and not follow_symlinks:
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
# If the downloaded file came from a non salt server source verify
# that it matches the intended sum value
if not skip_verify and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3})'.format(
source_sum['hash_type'],
name,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
try:
salt.utils.files.copyfile(sfn,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(sfn)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
ret['changes']['diff'] = \
'Replace symbolic link with regular file'
if salt.utils.platform.is_windows():
ret = check_perms(name,
ret,
kwargs.get('win_owner'),
kwargs.get('win_perms'),
kwargs.get('win_deny_perms'),
None,
kwargs.get('win_inheritance'))
else:
ret, _ = check_perms(name, ret, user, group, mode, attrs, follow_symlinks)
if ret['changes']:
ret['comment'] = u'File {0} updated'.format(
salt.utils.locales.sdecode(name)
)
elif not ret['changes'] and ret['result']:
ret['comment'] = u'File {0} is in the correct state'.format(
salt.utils.locales.sdecode(name)
)
if sfn:
__clean_tmp(sfn)
return ret
else: # target file does not exist
contain_dir = os.path.dirname(name)
def _set_mode_and_make_dirs(name, dir_mode, mode, user, group):
# check for existence of windows drive letter
if salt.utils.platform.is_windows():
drive, _ = os.path.splitdrive(name)
if drive and not os.path.exists(drive):
__clean_tmp(sfn)
return _error(ret,
'{0} drive not present'.format(drive))
if dir_mode is None and mode is not None:
# Add execute bit to each nonzero digit in the mode, if
# dir_mode was not specified. Otherwise, any
# directories created with makedirs_() below can't be
# listed via a shell.
mode_list = [x for x in str(mode)][-3:]
for idx in range(len(mode_list)):
if mode_list[idx] != '0':
mode_list[idx] = str(int(mode_list[idx]) | 1)
dir_mode = ''.join(mode_list)
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1121
makedirs_(name,
kwargs.get('win_owner'),
kwargs.get('win_perms'),
kwargs.get('win_deny_perms'),
kwargs.get('win_inheritance'))
# pylint: enable=E1121
else:
makedirs_(name, user=user, group=group, mode=dir_mode)
if source:
# It is a new file, set the diff accordingly
ret['changes']['diff'] = 'New file'
# Apply the new file
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
# If the downloaded file came from a non salt server source verify
# that it matches the intended sum value
if not skip_verify \
and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3})'.format(
source_sum['hash_type'],
name,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
# No changes actually made
ret['changes'].pop('diff', None)
return _error(ret, 'Parent directory not present')
else: # source != True
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
# No changes actually made
ret['changes'].pop('diff', None)
return _error(ret, 'Parent directory not present')
# Create the file, user rw-only if mode will be set to prevent
# a small security race problem before the permissions are set
if mode:
current_umask = os.umask(0o77)
# Create a new file when test is False and source is None
if contents is None:
if not __opts__['test']:
if touch(name):
ret['changes']['new'] = 'file {0} created'.format(name)
ret['comment'] = 'Empty file'
else:
return _error(
ret, 'Empty file {0} not created'.format(name)
)
else:
if not __opts__['test']:
if touch(name):
ret['changes']['diff'] = 'New file'
else:
return _error(
ret, 'File {0} not created'.format(name)
)
if mode:
os.umask(current_umask)
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
with salt.utils.files.fopen(tmp, 'w') as tmp_:
if encoding:
log.debug('File will be encoded with {0}'.format(encoding))
tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors))
else:
tmp_.write(salt.utils.stringutils.to_str(contents))
# Copy into place
salt.utils.files.copyfile(tmp,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
__clean_tmp(tmp)
# Now copy the file contents if there is a source file
elif sfn:
salt.utils.files.copyfile(sfn,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
__clean_tmp(sfn)
# This is a new file, if no mode specified, use the umask to figure
# out what mode to use for the new file.
if mode is None and not salt.utils.platform.is_windows():
# Get current umask
mask = os.umask(0)
os.umask(mask)
# Calculate the mode value that results from the umask
mode = oct((0o777 ^ mask) & 0o666)
if salt.utils.platform.is_windows():
ret = check_perms(name,
ret,
kwargs.get('win_owner'),
kwargs.get('win_perms'),
kwargs.get('win_deny_perms'),
None,
kwargs.get('win_inheritance'))
else:
ret, _ = check_perms(name, ret, user, group, mode, attrs)
if not ret['comment']:
ret['comment'] = 'File ' + name + ' updated'
if __opts__['test']:
ret['comment'] = 'File ' + name + ' not updated'
elif not ret['changes'] and ret['result']:
ret['comment'] = 'File ' + name + ' is in the correct state'
if sfn:
__clean_tmp(sfn)
return ret
def mkdir(dir_path,
user=None,
group=None,
mode=None):
'''
Ensure that a directory is available.
CLI Example:
.. code-block:: bash
salt '*' file.mkdir /opt/jetty/context
'''
dir_path = os.path.expanduser(dir_path)
directory = os.path.normpath(dir_path)
if not os.path.isdir(directory):
# If a caller such as managed() is invoked with makedirs=True, make
# sure that any created dirs are created with the same user and group
# to follow the principal of least surprise method.
makedirs_perms(directory, user, group, mode)
return True
def makedirs_(path,
user=None,
group=None,
mode=None):
'''
Ensure that the directory containing this path is available.
.. note::
The path must end with a trailing slash otherwise the directory/directories
will be created up to the parent directory. For example if path is
``/opt/code``, then it would be treated as ``/opt/`` but if the path
ends with a trailing slash like ``/opt/code/``, then it would be
treated as ``/opt/code/``.
CLI Example:
.. code-block:: bash
salt '*' file.makedirs /opt/code/
'''
path = os.path.expanduser(path)
if mode:
mode = salt.utils.files.normalize_mode(mode)
# walk up the directory structure until we find the first existing
# directory
dirname = os.path.normpath(os.path.dirname(path))
if os.path.isdir(dirname):
# There's nothing for us to do
msg = 'Directory \'{0}\' already exists'.format(dirname)
log.debug(msg)
return msg
if os.path.exists(dirname):
msg = 'The path \'{0}\' already exists and is not a directory'.format(
dirname
)
log.debug(msg)
return msg
directories_to_create = []
while True:
if os.path.isdir(dirname):
break
directories_to_create.append(dirname)
current_dirname = dirname
dirname = os.path.dirname(dirname)
if current_dirname == dirname:
raise SaltInvocationError(
'Recursive creation for path \'{0}\' would result in an '
'infinite loop. Please use an absolute path.'.format(dirname)
)
# create parent directories from the topmost to the most deeply nested one
directories_to_create.reverse()
for directory_to_create in directories_to_create:
# all directories have the user, group and mode set!!
log.debug('Creating directory: %s', directory_to_create)
mkdir(directory_to_create, user=user, group=group, mode=mode)
def makedirs_perms(name,
user=None,
group=None,
mode='0755'):
'''
Taken and modified from os.makedirs to set user, group and mode for each
directory created.
CLI Example:
.. code-block:: bash
salt '*' file.makedirs_perms /opt/code
'''
name = os.path.expanduser(name)
path = os.path
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs_perms(head, user, group, mode)
except OSError as exc:
# be happy if someone already created the path
if exc.errno != errno.EEXIST:
raise
if tail == os.curdir: # xxx/newdir/. exists if xxx/newdir exists
return
os.mkdir(name)
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
def get_devmm(name):
'''
Get major/minor info from a device
CLI Example:
.. code-block:: bash
salt '*' file.get_devmm /dev/chr
'''
name = os.path.expanduser(name)
if is_chrdev(name) or is_blkdev(name):
stat_structure = os.stat(name)
return (
os.major(stat_structure.st_rdev),
os.minor(stat_structure.st_rdev))
else:
return (0, 0)
def is_chrdev(name):
'''
Check if a file exists and is a character device.
CLI Example:
.. code-block:: bash
salt '*' file.is_chrdev /dev/chr
'''
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
# If the character device does not exist in the first place
return False
else:
raise
return stat.S_ISCHR(stat_structure.st_mode)
def mknod_chrdev(name,
major,
minor,
user=None,
group=None,
mode='0660'):
'''
.. versionadded:: 0.17.0
Create a character device.
CLI Example:
.. code-block:: bash
salt '*' file.mknod_chrdev /dev/chr 180 31
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
log.debug('Creating character device name:{0} major:{1} minor:{2} mode:{3}'
.format(name, major, minor, mode))
try:
if __opts__['test']:
ret['changes'] = {'new': 'Character device {0} created.'.format(name)}
ret['result'] = None
else:
if os.mknod(name,
int(str(mode).lstrip('0Oo'), 8) | stat.S_IFCHR,
os.makedev(major, minor)) is None:
ret['changes'] = {'new': 'Character device {0} created.'.format(name)}
ret['result'] = True
except OSError as exc:
# be happy it is already there....however, if you are trying to change the
# major/minor, you will need to unlink it first as os.mknod will not overwrite
if exc.errno != errno.EEXIST:
raise
else:
ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name)
# quick pass at verifying the permissions of the newly created character device
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
return ret
def is_blkdev(name):
'''
Check if a file exists and is a block device.
CLI Example:
.. code-block:: bash
salt '*' file.is_blkdev /dev/blk
'''
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
# If the block device does not exist in the first place
return False
else:
raise
return stat.S_ISBLK(stat_structure.st_mode)
def mknod_blkdev(name,
major,
minor,
user=None,
group=None,
mode='0660'):
'''
.. versionadded:: 0.17.0
Create a block device.
CLI Example:
.. code-block:: bash
salt '*' file.mknod_blkdev /dev/blk 8 999
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
log.debug('Creating block device name:{0} major:{1} minor:{2} mode:{3}'
.format(name, major, minor, mode))
try:
if __opts__['test']:
ret['changes'] = {'new': 'Block device {0} created.'.format(name)}
ret['result'] = None
else:
if os.mknod(name,
int(str(mode).lstrip('0Oo'), 8) | stat.S_IFBLK,
os.makedev(major, minor)) is None:
ret['changes'] = {'new': 'Block device {0} created.'.format(name)}
ret['result'] = True
except OSError as exc:
# be happy it is already there....however, if you are trying to change the
# major/minor, you will need to unlink it first as os.mknod will not overwrite
if exc.errno != errno.EEXIST:
raise
else:
ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name)
# quick pass at verifying the permissions of the newly created block device
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
return ret
def is_fifo(name):
'''
Check if a file exists and is a FIFO.
CLI Example:
.. code-block:: bash
salt '*' file.is_fifo /dev/fifo
'''
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
# If the fifo does not exist in the first place
return False
else:
raise
return stat.S_ISFIFO(stat_structure.st_mode)
def mknod_fifo(name,
user=None,
group=None,
mode='0660'):
'''
.. versionadded:: 0.17.0
Create a FIFO pipe.
CLI Example:
.. code-block:: bash
salt '*' file.mknod_fifo /dev/fifo
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
log.debug('Creating FIFO name: {0}'.format(name))
try:
if __opts__['test']:
ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)}
ret['result'] = None
else:
if os.mkfifo(name, int(str(mode).lstrip('0Oo'), 8)) is None:
ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)}
ret['result'] = True
except OSError as exc:
# be happy it is already there
if exc.errno != errno.EEXIST:
raise
else:
ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name)
# quick pass at verifying the permissions of the newly created fifo
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
return ret
def mknod(name,
ntype,
major=0,
minor=0,
user=None,
group=None,
mode='0600'):
'''
.. versionadded:: 0.17.0
Create a block device, character device, or fifo pipe.
Identical to the gnu mknod.
CLI Examples:
.. code-block:: bash
salt '*' file.mknod /dev/chr c 180 31
salt '*' file.mknod /dev/blk b 8 999
salt '*' file.nknod /dev/fifo p
'''
ret = False
makedirs_(name, user, group)
if ntype == 'c':
ret = mknod_chrdev(name, major, minor, user, group, mode)
elif ntype == 'b':
ret = mknod_blkdev(name, major, minor, user, group, mode)
elif ntype == 'p':
ret = mknod_fifo(name, user, group, mode)
else:
raise SaltInvocationError(
'Node type unavailable: \'{0}\'. Available node types are '
'character (\'c\'), block (\'b\'), and pipe (\'p\').'.format(ntype)
)
return ret
def list_backups(path, limit=None):
'''
.. versionadded:: 0.17.0
Lists the previous versions of a file backed up using Salt's :ref:`file
state backup <file-state-backups>` system.
path
The path on the minion to check for backups
limit
Limit the number of results to the most recent N backups
CLI Example:
.. code-block:: bash
salt '*' file.list_backups /foo/bar/baz.txt
'''
path = os.path.expanduser(path)
try:
limit = int(limit)
except TypeError:
pass
except ValueError:
log.error('file.list_backups: \'limit\' value must be numeric')
limit = None
bkroot = _get_bkroot()
parent_dir, basename = os.path.split(path)
if salt.utils.platform.is_windows():
# ':' is an illegal filesystem path character on Windows
src_dir = parent_dir.replace(':', '_')
else:
src_dir = parent_dir[1:]
# Figure out full path of location of backup file in minion cache
bkdir = os.path.join(bkroot, src_dir)
if not os.path.isdir(bkdir):
return {}
files = {}
for fname in [x for x in os.listdir(bkdir)
if os.path.isfile(os.path.join(bkdir, x))]:
if salt.utils.platform.is_windows():
# ':' is an illegal filesystem path character on Windows
strpfmt = '{0}_%a_%b_%d_%H-%M-%S_%f_%Y'.format(basename)
else:
strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename)
try:
timestamp = datetime.datetime.strptime(fname, strpfmt)
except ValueError:
# File didn't match the strp format string, so it's not a backup
# for this file. Move on to the next one.
continue
if salt.utils.platform.is_windows():
str_format = '%a %b %d %Y %H-%M-%S.%f'
else:
str_format = '%a %b %d %Y %H:%M:%S.%f'
files.setdefault(timestamp, {})['Backup Time'] = \
timestamp.strftime(str_format)
location = os.path.join(bkdir, fname)
files[timestamp]['Size'] = os.stat(location).st_size
files[timestamp]['Location'] = location
return dict(list(zip(
list(range(len(files))),
[files[x] for x in sorted(files, reverse=True)[:limit]]
)))
list_backup = salt.utils.functools.alias_function(list_backups, 'list_backup')
def list_backups_dir(path, limit=None):
'''
Lists the previous versions of a directory backed up using Salt's :ref:`file
state backup <file-state-backups>` system.
path
The directory on the minion to check for backups
limit
Limit the number of results to the most recent N backups
CLI Example:
.. code-block:: bash
salt '*' file.list_backups_dir /foo/bar/baz/
'''
path = os.path.expanduser(path)
try:
limit = int(limit)
except TypeError:
pass
except ValueError:
log.error('file.list_backups_dir: \'limit\' value must be numeric')
limit = None
bkroot = _get_bkroot()
parent_dir, basename = os.path.split(path)
# Figure out full path of location of backup folder in minion cache
bkdir = os.path.join(bkroot, parent_dir[1:])
if not os.path.isdir(bkdir):
return {}
files = {}
f = dict([(i, len(list(n))) for i, n in itertools.groupby([x.split("_")[0] for x in sorted(os.listdir(bkdir))])])
ff = os.listdir(bkdir)
for i, n in six.iteritems(f):
ssfile = {}
for x in sorted(ff):
basename = x.split('_')[0]
if i == basename:
strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename)
try:
timestamp = datetime.datetime.strptime(x, strpfmt)
except ValueError:
# Folder didn't match the strp format string, so it's not a backup
# for this folder. Move on to the next one.
continue
ssfile.setdefault(timestamp, {})['Backup Time'] = \
timestamp.strftime('%a %b %d %Y %H:%M:%S.%f')
location = os.path.join(bkdir, x)
ssfile[timestamp]['Size'] = os.stat(location).st_size
ssfile[timestamp]['Location'] = location
sfiles = dict(list(zip(list(range(n)), [ssfile[x] for x in sorted(ssfile, reverse=True)[:limit]])))
sefiles = {i: sfiles}
files.update(sefiles)
return files
def restore_backup(path, backup_id):
'''
.. versionadded:: 0.17.0
Restore a previous version of a file that was backed up using Salt's
:ref:`file state backup <file-state-backups>` system.
path
The path on the minion to check for backups
backup_id
The numeric id for the backup you wish to restore, as found using
:mod:`file.list_backups <salt.modules.file.list_backups>`
CLI Example:
.. code-block:: bash
salt '*' file.restore_backup /foo/bar/baz.txt 0
'''
path = os.path.expanduser(path)
# Note: This only supports minion backups, so this function will need to be
# modified if/when master backups are implemented.
ret = {'result': False,
'comment': 'Invalid backup_id \'{0}\''.format(backup_id)}
try:
if len(str(backup_id)) == len(str(int(backup_id))):
backup = list_backups(path)[int(backup_id)]
else:
return ret
except ValueError:
return ret
except KeyError:
ret['comment'] = 'backup_id \'{0}\' does not exist for ' \
'{1}'.format(backup_id, path)
return ret
salt.utils.files.backup_minion(path, _get_bkroot())
try:
shutil.copyfile(backup['Location'], path)
except IOError as exc:
ret['comment'] = \
'Unable to restore {0} to {1}: ' \
'{2}'.format(backup['Location'], path, exc)
return ret
else:
ret['result'] = True
ret['comment'] = 'Successfully restored {0} to ' \
'{1}'.format(backup['Location'], path)
# Try to set proper ownership
if not salt.utils.platform.is_windows():
try:
fstat = os.stat(path)
except (OSError, IOError):
ret['comment'] += ', but was unable to set ownership'
else:
os.chown(path, fstat.st_uid, fstat.st_gid)
return ret
def delete_backup(path, backup_id):
'''
.. versionadded:: 0.17.0
Delete a previous version of a file that was backed up using Salt's
:ref:`file state backup <file-state-backups>` system.
path
The path on the minion to check for backups
backup_id
The numeric id for the backup you wish to delete, as found using
:mod:`file.list_backups <salt.modules.file.list_backups>`
CLI Example:
.. code-block:: bash
salt '*' file.delete_backup /var/cache/salt/minion/file_backup/home/foo/bar/baz.txt 0
'''
path = os.path.expanduser(path)
ret = {'result': False,
'comment': 'Invalid backup_id \'{0}\''.format(backup_id)}
try:
if len(str(backup_id)) == len(str(int(backup_id))):
backup = list_backups(path)[int(backup_id)]
else:
return ret
except ValueError:
return ret
except KeyError:
ret['comment'] = 'backup_id \'{0}\' does not exist for ' \
'{1}'.format(backup_id, path)
return ret
try:
os.remove(backup['Location'])
except IOError as exc:
ret['comment'] = 'Unable to remove {0}: {1}'.format(backup['Location'],
exc)
else:
ret['result'] = True
ret['comment'] = 'Successfully removed {0}'.format(backup['Location'])
return ret
remove_backup = salt.utils.functools.alias_function(delete_backup, 'remove_backup')
def grep(path,
pattern,
*opts):
'''
Grep for a string in the specified file
.. note::
This function's return value is slated for refinement in future
versions of Salt
path
Path to the file to be searched
.. note::
Globbing is supported (i.e. ``/var/log/foo/*.log``, but if globbing
is being used then the path should be quoted to keep the shell from
attempting to expand the glob expression.
pattern
Pattern to match. For example: ``test``, or ``a[0-5]``
opts
Additional command-line flags to pass to the grep command. For example:
``-v``, or ``-i -B2``
.. note::
The options should come after a double-dash (as shown in the
examples below) to keep Salt's own argument parser from
interpreting them.
CLI Example:
.. code-block:: bash
salt '*' file.grep /etc/passwd nobody
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i -B2
salt '*' file.grep "/etc/sysconfig/network-scripts/*" ipaddr -- -i -l
'''
path = os.path.expanduser(path)
split_opts = []
for opt in opts:
try:
split = salt.utils.args.shlex_split(opt)
except AttributeError:
split = salt.utils.args.shlex_split(str(opt))
if len(split) > 1:
raise SaltInvocationError(
'Passing multiple command line arguments in a single string '
'is not supported, please pass the following arguments '
'separately: {0}'.format(opt)
)
split_opts.extend(split)
cmd = ['grep'] + split_opts + [pattern, path]
try:
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
except (IOError, OSError) as exc:
raise CommandExecutionError(exc.strerror)
return ret
def open_files(by_pid=False):
'''
Return a list of all physical open files on the system.
CLI Examples:
.. code-block:: bash
salt '*' file.open_files
salt '*' file.open_files by_pid=True
'''
# First we collect valid PIDs
pids = {}
procfs = os.listdir('/proc/')
for pfile in procfs:
try:
pids[int(pfile)] = []
except ValueError:
# Not a valid PID, move on
pass
# Then we look at the open files for each PID
files = {}
for pid in pids:
ppath = '/proc/{0}'.format(pid)
try:
tids = os.listdir('{0}/task'.format(ppath))
except OSError:
continue
# Collect the names of all of the file descriptors
fd_ = []
#try:
# fd_.append(os.path.realpath('{0}/task/{1}exe'.format(ppath, tid)))
#except:
# pass
for fpath in os.listdir('{0}/fd'.format(ppath)):
fd_.append('{0}/fd/{1}'.format(ppath, fpath))
for tid in tids:
try:
fd_.append(
os.path.realpath('{0}/task/{1}/exe'.format(ppath, tid))
)
except OSError:
continue
for tpath in os.listdir('{0}/task/{1}/fd'.format(ppath, tid)):
fd_.append('{0}/task/{1}/fd/{2}'.format(ppath, tid, tpath))
fd_ = sorted(set(fd_))
# Loop through file descriptors and return useful data for each file
for fdpath in fd_:
# Sometimes PIDs and TIDs disappear before we can query them
try:
name = os.path.realpath(fdpath)
# Running stat on the file cuts out all of the sockets and
# deleted files from the list
os.stat(name)
except OSError:
continue
if name not in files:
files[name] = [pid]
else:
# We still want to know which PIDs are using each file
files[name].append(pid)
files[name] = sorted(set(files[name]))
pids[pid].append(name)
pids[pid] = sorted(set(pids[pid]))
if by_pid:
return pids
return files
def pardir():
'''
Return the relative parent directory path symbol for underlying OS
.. versionadded:: 2014.7.0
This can be useful when constructing Salt Formulas.
.. code-block:: jinja
{% set pardir = salt['file.pardir']() %}
{% set final_path = salt['file.join']('subdir', pardir, 'confdir') %}
CLI Example:
.. code-block:: bash
salt '*' file.pardir
'''
return os.path.pardir
def normpath(path):
'''
Returns Normalize path, eliminating double slashes, etc.
.. versionadded:: 2015.5.0
This can be useful at the CLI but is frequently useful when scripting.
.. code-block:: jinja
{%- from salt['file.normpath'](tpldir + '/../vars.jinja') import parent_vars %}
CLI Example:
.. code-block:: bash
salt '*' file.normpath 'a/b/c/..'
'''
return os.path.normpath(path)
def basename(path):
'''
Returns the final component of a pathname
.. versionadded:: 2015.5.0
This can be useful at the CLI but is frequently useful when scripting.
.. code-block:: jinja
{%- set filename = salt['file.basename'](source_file) %}
CLI Example:
.. code-block:: bash
salt '*' file.basename 'test/test.config'
'''
return os.path.basename(path)
def dirname(path):
'''
Returns the directory component of a pathname
.. versionadded:: 2015.5.0
This can be useful at the CLI but is frequently useful when scripting.
.. code-block:: jinja
{%- from salt['file.dirname'](tpldir) + '/vars.jinja' import parent_vars %}
CLI Example:
.. code-block:: bash
salt '*' file.dirname 'test/path/filename.config'
'''
return os.path.dirname(path)
def join(*args):
'''
Return a normalized file system path for the underlying OS
.. versionadded:: 2014.7.0
This can be useful at the CLI but is frequently useful when scripting
combining path variables:
.. code-block:: jinja
{% set www_root = '/var' %}
{% set app_dir = 'myapp' %}
myapp_config:
file:
- managed
- name: {{ salt['file.join'](www_root, app_dir, 'config.yaml') }}
CLI Example:
.. code-block:: bash
salt '*' file.join '/' 'usr' 'local' 'bin'
'''
return os.path.join(*args)
def move(src, dst):
'''
Move a file or directory
CLI Example:
.. code-block:: bash
salt '*' file.move /path/to/src /path/to/dst
'''
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError('Source path must be absolute.')
if not os.path.isabs(dst):
raise SaltInvocationError('Destination path must be absolute.')
ret = {
'result': True,
'comment': "'{0}' moved to '{1}'".format(src, dst),
}
try:
shutil.move(src, dst)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move '{0}' to '{1}': {2}".format(src, dst, exc)
)
return ret
def diskusage(path):
'''
Recursively calculate disk usage of path and return it
in bytes
CLI Example:
.. code-block:: bash
salt '*' file.diskusage /path/to/check
'''
total_size = 0
seen = set()
if os.path.isfile(path):
stat_structure = os.stat(path)
ret = stat_structure.st_size
return ret
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
try:
stat_structure = os.stat(fp)
except OSError:
continue
if stat_structure.st_ino in seen:
continue
seen.add(stat_structure.st_ino)
total_size += stat_structure.st_size
ret = total_size
return ret
| 31.932225
| 178
| 0.555337
|
from __future__ import absolute_import, print_function
import datetime
import difflib
import errno
import fileinput
import fnmatch
import itertools
import logging
import operator
import os
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import glob
import hashlib
import mmap
from collections import Iterable, Mapping
from functools import reduce
from salt.ext import six
from salt.ext.six.moves import range, zip
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
try:
import grp
import pwd
except ImportError:
pass
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.filebuffer
import salt.utils.files
import salt.utils.find
import salt.utils.functools
import salt.utils.hashutils
import salt.utils.itertools
import salt.utils.locales
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.templates
import salt.utils.url
import salt.utils.user
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError, get_error_message as _get_error_message
from salt.utils.files import HASHES, HASHES_REVMAP
log = logging.getLogger(__name__)
__func_alias__ = {
'makedirs_': 'makedirs'
}
def __virtual__():
if salt.utils.platform.is_windows():
return (
False,
'The file execution module cannot be loaded: only available on '
'non-Windows systems - use win_file instead.'
)
return True
def __clean_tmp(sfn):
if sfn.startswith(os.path.join(tempfile.gettempdir(),
salt.utils.files.TEMPFILE_PREFIX)):
all_roots = itertools.chain.from_iterable(
six.itervalues(__opts__['file_roots']))
in_roots = any(sfn.startswith(root) for root in all_roots)
# Only clean up files that exist
if os.path.exists(sfn) and not in_roots:
os.remove(sfn)
def _error(ret, err_msg):
ret['result'] = False
ret['comment'] = err_msg
return ret
def _binary_replace(old, new):
old_isbin = not __utils__['files.is_text'](old)
new_isbin = not __utils__['files.is_text'](new)
if any((old_isbin, new_isbin)):
if all((old_isbin, new_isbin)):
return u'Replace binary file'
elif old_isbin:
return u'Replace binary file with text file'
elif new_isbin:
return u'Replace text file with binary file'
return u''
def _get_bkroot():
# Get the cachedir from the minion config
return os.path.join(__salt__['config.get']('cachedir'), 'file_backup')
def _splitlines_preserving_trailing_newline(str):
lines = str.splitlines()
if str.endswith('\n') or str.endswith('\r'):
lines.append('')
return lines
def gid_to_group(gid):
try:
gid = int(gid)
except ValueError:
# This is not an integer, maybe it's already the group name?
gid = group_to_gid(gid)
if gid == '':
return ''
try:
return grp.getgrgid(gid).gr_name
except (KeyError, NameError):
# If group is not present, fall back to the gid.
return gid
def group_to_gid(group):
if group is None:
return ''
try:
if isinstance(group, int):
return group
return grp.getgrnam(group).gr_gid
except KeyError:
return ''
def get_gid(path, follow_symlinks=True):
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('gid', -1)
def get_group(path, follow_symlinks=True):
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('group', False)
def uid_to_user(uid):
try:
return pwd.getpwuid(uid).pw_name
except (KeyError, NameError):
# If user is not present, fall back to the uid.
return uid
def user_to_uid(user):
if user is None:
user = salt.utils.user.get_user()
try:
if isinstance(user, int):
return user
return pwd.getpwnam(user).pw_uid
except KeyError:
return ''
def get_uid(path, follow_symlinks=True):
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('uid', -1)
def get_user(path, follow_symlinks=True):
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('user', False)
def get_mode(path, follow_symlinks=True):
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('mode', '')
def set_mode(path, mode):
path = os.path.expanduser(path)
mode = str(mode).lstrip('0Oo')
if not mode:
mode = '0'
if not os.path.exists(path):
raise CommandExecutionError('{0}: File not found'.format(path))
try:
os.chmod(path, int(mode, 8))
except Exception:
return 'Invalid Mode ' + mode
return get_mode(path)
def lchown(path, user, group):
path = os.path.expanduser(path)
uid = user_to_uid(user)
gid = group_to_gid(group)
err = ''
if uid == '':
if user:
err += 'User does not exist\n'
else:
uid = -1
if gid == '':
if group:
err += 'Group does not exist\n'
else:
gid = -1
return os.lchown(path, uid, gid)
def chown(path, user, group):
path = os.path.expanduser(path)
uid = user_to_uid(user)
gid = group_to_gid(group)
err = ''
if uid == '':
if user:
err += 'User does not exist\n'
else:
uid = -1
if gid == '':
if group:
err += 'Group does not exist\n'
else:
gid = -1
if not os.path.exists(path):
try:
# Broken symlinks will return false, but still need to be chowned
return os.lchown(path, uid, gid)
except OSError:
pass
err += 'File not found'
if err:
return err
return os.chown(path, uid, gid)
def chgrp(path, group):
path = os.path.expanduser(path)
user = get_user(path)
return chown(path, user, group)
def _cmp_attrs(path, attrs):
diff = [None, None]
lattrs = lsattr(path).get(path, '')
old = [chr for chr in lattrs if chr not in attrs]
if len(old) > 0:
diff[1] = ''.join(old)
new = [chr for chr in attrs if chr not in lattrs]
if len(new) > 0:
diff[0] = ''.join(new)
return diff
def lsattr(path):
if not os.path.exists(path):
raise SaltInvocationError("File or directory does not exist.")
cmd = ['lsattr', path]
result = __salt__['cmd.run'](cmd, python_shell=False)
results = {}
for line in result.splitlines():
if not line.startswith('lsattr'):
vals = line.split(None, 1)
results[vals[1]] = re.findall(r"[acdijstuADST]", vals[0])
return results
def chattr(*args, **kwargs):
args = [arg if salt.utils.stringutils.is_quoted(arg) else '"{0}"'.format(arg)
for arg in args]
operator = kwargs.pop('operator', None)
attributes = kwargs.pop('attributes', None)
flags = kwargs.pop('flags', None)
version = kwargs.pop('version', None)
if (operator is None) or (operator not in ['add', 'remove']):
raise SaltInvocationError(
"Need an operator: 'add' or 'remove' to modify attributes.")
if attributes is None:
raise SaltInvocationError("Need attributes: [AacDdijsTtSu]")
if operator == "add":
attrs = '+{0}'.format(attributes)
elif operator == "remove":
attrs = '-{0}'.format(attributes)
flgs = ''
if flags is not None:
flgs = '-{0}'.format(flags)
vrsn = ''
if version is not None:
vrsn = '-v {0}'.format(version)
cmd = 'chattr {0} {1} {2} {3}'.format(attrs, flgs, vrsn, ' '.join(args))
result = __salt__['cmd.run'](cmd, python_shell=False)
if bool(result):
raise CommandExecutionError(
"chattr failed to run, possibly due to bad parameters.")
return True
def get_sum(path, form='sha256'):
path = os.path.expanduser(path)
if not os.path.isfile(path):
return 'File not found'
return salt.utils.hashutils.get_hash(path, form, 4096)
def get_hash(path, form='sha256', chunk_size=65536):
return salt.utils.hashutils.get_hash(os.path.expanduser(path), form, chunk_size)
def get_source_sum(file_name='',
source='',
source_hash=None,
source_hash_name=None,
saltenv='base'):
def _invalid_source_hash_format():
raise CommandExecutionError(
'Source hash {0} format is invalid. The supported formats are: '
'1) a hash, 2) an expression in the format <hash_type>=<hash>, or '
'3) either a path to a local file containing hashes, or a URI of '
'a remote hash file. Supported protocols for remote hash files '
'are: {1}. The hash may also not be of a valid length, the '
'following are supported hash types and lengths: {2}.'.format(
source_hash,
', '.join(salt.utils.files.VALID_PROTOS),
', '.join(
['{0} ({1})'.format(HASHES_REVMAP[x], x)
for x in sorted(HASHES_REVMAP)]
),
)
)
hash_fn = None
if os.path.isabs(source_hash):
hash_fn = source_hash
else:
try:
proto = _urlparse(source_hash).scheme
if proto in salt.utils.files.VALID_PROTOS:
hash_fn = __salt__['cp.cache_file'](source_hash, saltenv)
if not hash_fn:
raise CommandExecutionError(
'Source hash file {0} not found'.format(source_hash)
)
else:
if proto != '':
# Some unsupported protocol (e.g. foo://) is being used.
# We'll get into this else block if a hash expression
_invalid_source_hash_format()
except (AttributeError, TypeError):
_invalid_source_hash_format()
if hash_fn is not None:
ret = extract_hash(hash_fn, '', file_name, source, source_hash_name)
if ret is None:
_invalid_source_hash_format()
return ret
else:
ret = {}
try:
ret['hash_type'], ret['hsum'] = \
[x.strip() for x in source_hash.split('=', 1)]
except AttributeError:
_invalid_source_hash_format()
except ValueError:
if not re.match('^[{0}]+$'.format(string.hexdigits), source_hash):
_invalid_source_hash_format()
ret['hsum'] = source_hash
source_hash_len = len(source_hash)
if source_hash_len in HASHES_REVMAP:
ret['hash_type'] = HASHES_REVMAP[source_hash_len]
else:
_invalid_source_hash_format()
if ret['hash_type'] not in HASHES:
raise CommandExecutionError(
'Invalid hash type \'{0}\'. Supported hash types are: {1}. '
'Either remove the hash type and simply use \'{2}\' as the '
'source_hash, or change the hash type to a supported type.'
.format(ret['hash_type'], ', '.join(HASHES), ret['hsum'])
)
else:
hsum_len = len(ret['hsum'])
if hsum_len not in HASHES_REVMAP:
_invalid_source_hash_format()
elif hsum_len != HASHES[ret['hash_type']]:
raise CommandExecutionError(
'Invalid length ({0}) for hash type \'{1}\'. Either '
'remove the hash type and simply use \'{2}\' as the '
'source_hash, or change the hash type to \'{3}\''.format(
hsum_len,
ret['hash_type'],
ret['hsum'],
HASHES_REVMAP[hsum_len],
)
)
return ret
def check_hash(path, file_hash):
path = os.path.expanduser(path)
if not isinstance(file_hash, six.string_types):
raise SaltInvocationError('hash must be a string')
for sep in (':', '='):
if sep in file_hash:
hash_type, hash_value = file_hash.split(sep, 1)
break
else:
hash_value = file_hash
hash_len = len(file_hash)
hash_type = HASHES_REVMAP.get(hash_len)
if hash_type is None:
raise SaltInvocationError(
'Hash {0} (length: {1}) could not be matched to a supported '
'hash type. The supported hash types and lengths are: '
'{2}'.format(
file_hash,
hash_len,
', '.join(
['{0} ({1})'.format(HASHES_REVMAP[x], x)
for x in sorted(HASHES_REVMAP)]
),
)
)
return get_hash(path, hash_type) == hash_value
def find(path, *args, **kwargs):
if 'delete' in args:
kwargs['delete'] = 'f'
elif 'print' in args:
kwargs['print'] = 'path'
try:
finder = salt.utils.find.Finder(kwargs)
except ValueError as ex:
return 'error: {0}'.format(ex)
ret = [item for i in [finder.find(p) for p in glob.glob(os.path.expanduser(path))] for item in i]
ret.sort()
return ret
def _sed_esc(string, escape_all=False):
special_chars = "^.[$()|*+?{"
string = string.replace("'", "'\"'\"'").replace("/", "\\/")
if escape_all is True:
for char in special_chars:
string = string.replace(char, "\\" + char)
return string
def sed(path,
before,
after,
limit='',
backup='.bak',
options='-r -e',
flags='g',
escape_all=False,
negate_match=False):
# XXX:dc: Do we really want to always force escaping?
#
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
# Mandate that before and after are strings
before = str(before)
after = str(after)
before = _sed_esc(before, escape_all)
after = _sed_esc(after, escape_all)
limit = _sed_esc(limit, escape_all)
if sys.platform == 'darwin':
options = options.replace('-r', '-E')
cmd = ['sed']
cmd.append('-i{0}'.format(backup) if backup else '-i')
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append(
r'{limit}{negate_match}s/{before}/{after}/{flags}'.format(
limit='/{0}/ '.format(limit) if limit else '',
negate_match='!' if negate_match else '',
before=before,
after=after,
flags=flags
)
)
cmd.append(path)
return __salt__['cmd.run_all'](cmd, python_shell=False)
def sed_contains(path,
text,
limit='',
flags='g'):
# Largely inspired by Fabric's contrib.files.contains()
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
before = _sed_esc(str(text), False)
limit = _sed_esc(str(limit), False)
options = '-n -r -e'
if sys.platform == 'darwin':
options = options.replace('-r', '-E')
cmd = ['sed']
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append(
r'{limit}s/{before}/$/{flags}'.format(
limit='/{0}/ '.format(limit) if limit else '',
before=before,
flags='p{0}'.format(flags)
)
)
cmd.append(path)
result = __salt__['cmd.run'](cmd, python_shell=False)
return bool(result)
def psed(path,
before,
after,
limit='',
backup='.bak',
flags='gMS',
escape_all=False,
multi=False):
# XXX:dc: Do we really want to always force escaping?
#
# Mandate that before and after are strings
path = os.path.expanduser(path)
multi = bool(multi)
before = str(before)
after = str(after)
before = _sed_esc(before, escape_all)
# The pattern to replace with does not need to be escaped!!!
#after = _sed_esc(after, escape_all)
limit = _sed_esc(limit, escape_all)
shutil.copy2(path, '{0}{1}'.format(path, backup))
with salt.utils.files.fopen(path, 'w') as ofile:
with salt.utils.files.fopen('{0}{1}'.format(path, backup), 'r') as ifile:
if multi is True:
for line in ifile.readline():
ofile.write(_psed(line, before, after, limit, flags))
else:
ofile.write(_psed(ifile.read(), before, after, limit, flags))
RE_FLAG_TABLE = {'I': re.I,
'L': re.L,
'M': re.M,
'S': re.S,
'U': re.U,
'X': re.X}
def _psed(text,
before,
after,
limit,
flags):
atext = text
if limit:
limit = re.compile(limit)
comps = text.split(limit)
atext = ''.join(comps[1:])
count = 1
if 'g' in flags:
count = 0
flags = flags.replace('g', '')
aflags = 0
for flag in flags:
aflags |= RE_FLAG_TABLE[flag]
before = re.compile(before, flags=aflags)
text = re.sub(before, after, atext, count=count)
return text
def uncomment(path,
regex,
char='
backup='.bak'):
return comment_line(path=path,
regex=regex,
char=char,
cmnt=False,
backup=backup)
def comment(path,
regex,
char='
backup='.bak'):
return comment_line(path=path,
regex=regex,
char=char,
cmnt=True,
backup=backup)
def comment_line(path,
regex,
char='
cmnt=True,
backup='.bak'):
# Get the regex for comment or uncomment
if cmnt:
regex = '{0}({1}){2}'.format(
'^' if regex.startswith('^') else '',
regex.lstrip('^').rstrip('$'),
'$' if regex.endswith('$') else '')
else:
regex = r'^{0}\s*({1}){2}'.format(
char,
regex.lstrip('^').rstrip('$'),
'$' if regex.endswith('$') else '')
# Load the real path to the file
path = os.path.realpath(os.path.expanduser(path))
# Make sure the file exists
if not os.path.isfile(path):
raise SaltInvocationError('File not found: {0}'.format(path))
# Make sure it is a text file
if not __utils__['files.is_text'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'.format(path))
# First check the whole file, determine whether to make the replacement
# Searching first avoids modifying the time stamp if there are no changes
found = False
# Dictionaries for comparing changes
orig_file = []
new_file = []
# Buffer size for fopen
bufsize = os.path.getsize(path)
try:
# Use a read-only handle to open the file
with salt.utils.files.fopen(path,
mode='rb',
buffering=bufsize) as r_file:
# Loop through each line of the file and look for a match
for line in r_file:
# Is it in this line
if six.PY3:
line = line.decode(__salt_system_encoding__)
if re.match(regex, line):
# Load lines into dictionaries, set found to True
orig_file.append(line)
if cmnt:
new_file.append('{0}{1}'.format(char, line))
else:
new_file.append(line.lstrip(char))
found = True
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to open file '{0}'. "
"Exception: {1}".format(path, exc)
)
# We've searched the whole file. If we didn't find anything, return False
if not found:
return False
if not salt.utils.platform.is_windows():
pre_user = get_user(path)
pre_group = get_group(path)
pre_mode = salt.utils.files.normalize_mode(get_mode(path))
# Create a copy to read from and to use as a backup later
try:
temp_file = _mkstemp_copy(path=path, preserve_inode=False)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
try:
# Open the file in write mode
with salt.utils.files.fopen(path,
mode='wb',
buffering=bufsize) as w_file:
try:
# Open the temp file in read mode
with salt.utils.files.fopen(temp_file,
mode='rb',
buffering=bufsize) as r_file:
# Loop through each line of the file and look for a match
for line in r_file:
if six.PY3:
line = line.decode(__salt_system_encoding__)
try:
# Is it in this line
if re.match(regex, line):
# Write the new line
if cmnt:
wline = '{0}{1}'.format(char, line)
else:
wline = line.lstrip(char)
else:
# Write the existing line (no change)
wline = line
if six.PY3:
wline = wline.encode(__salt_system_encoding__)
w_file.write(wline)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to write file '{0}'. Contents may "
"be truncated. Temporary file contains copy "
"at '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
if backup:
# Move the backup file to the original directory
backup_name = '{0}{1}'.format(path, backup)
try:
shutil.move(temp_file, backup_name)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move the temp file '{0}' to the "
"backup file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
else:
os.remove(temp_file)
if not salt.utils.platform.is_windows():
check_perms(path, None, pre_user, pre_group, pre_mode)
# Return a diff using the two dictionaries
return ''.join(difflib.unified_diff(orig_file, new_file))
def _get_flags(flags):
if isinstance(flags, six.string_types):
flags = [flags]
if isinstance(flags, Iterable) and not isinstance(flags, Mapping):
_flags_acc = []
for flag in flags:
_flag = getattr(re, str(flag).upper())
if not isinstance(_flag, six.integer_types):
raise SaltInvocationError(
'Invalid re flag given: {0}'.format(flag)
)
_flags_acc.append(_flag)
return reduce(operator.__or__, _flags_acc)
elif isinstance(flags, six.integer_types):
return flags
else:
raise SaltInvocationError(
'Invalid re flags: "{0}", must be given either as a single flag '
'string, a list of strings, or as an integer'.format(flags)
)
def _add_flags(flags, new_flags):
flags = _get_flags(flags)
new_flags = _get_flags(new_flags)
return flags | new_flags
def _mkstemp_copy(path,
preserve_inode=True):
temp_file = None
# Create the temp file
try:
temp_file = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to create temp file. "
"Exception: {0}".format(exc)
)
# use `copy` to preserve the inode of the
# original file, and thus preserve hardlinks
# to the inode. otherwise, use `move` to
# preserve prior behavior, which results in
# writing the file to a new inode.
if preserve_inode:
try:
shutil.copy2(path, temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to copy file '{0}' to the "
"temp file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
else:
try:
shutil.move(path, temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move file '{0}' to the "
"temp file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
return temp_file
def _starts_till(src, probe, strip_comments=True):
def _strip_comments(txt):
buff = txt.split(" ", 1)
return len(buff) == 2 and len(buff[0]) < 2 and buff[1] or txt
def _to_words(txt):
return txt and [w for w in txt.strip().split(" ") if w.strip()] or txt
no_match = -1
equal = 0
if not src or not probe:
return no_match
if src == probe:
return equal
src = _to_words(strip_comments and _strip_comments(src) or src)
probe = _to_words(strip_comments and _strip_comments(probe) or probe)
a_buff, b_buff = len(src) < len(probe) and (src, probe) or (probe, src)
b_buff = ' '.join(b_buff)
for idx in range(len(a_buff)):
prb = ' '.join(a_buff[:-(idx + 1)])
if prb and b_buff.startswith(prb):
return idx
return no_match
def _regex_to_static(src, regex):
if not src or not regex:
return None
try:
src = re.search(regex, src, re.M)
except Exception as ex:
raise CommandExecutionError("{0}: '{1}'".format(_get_error_message(ex), regex))
return src and src.group() or regex
def _assert_occurrence(src, probe, target, amount=1):
occ = src.count(probe)
if occ > amount:
msg = 'more than'
elif occ < amount:
msg = 'less than'
elif not occ:
msg = 'no'
else:
msg = None
if msg:
raise CommandExecutionError('Found {0} expected occurrences in "{1}" expression'.format(msg, target))
return occ
def _get_line_indent(src, line, indent):
if not indent:
return line
idt = []
for c in src:
if c not in ['\t', ' ']:
break
idt.append(c)
return ''.join(idt) + line.strip()
def line(path, content=None, match=None, mode=None, location=None,
before=None, after=None, show_changes=True, backup=False,
quiet=False, indent=True):
path = os.path.realpath(os.path.expanduser(path))
if not os.path.isfile(path):
if not quiet:
raise CommandExecutionError('File "{0}" does not exists or is not a file.'.format(path))
return False # No changes had happened
mode = mode and mode.lower() or mode
if mode not in ['insert', 'ensure', 'delete', 'replace']:
if mode is None:
raise CommandExecutionError('Mode was not defined. How to process the file?')
else:
raise CommandExecutionError('Unknown mode: "{0}"'.format(mode))
# We've set the content to be empty in the function params but we want to make sure
mpty_content_modes = ['delete']
if mode not in empty_content_modes and content is None:
raise CommandExecutionError('Content can only be empty if mode is "{0}"'.format(', '.join(empty_content_modes)))
del empty_content_modes
if before is None and after is None and not match:
match = content
with salt.utils.files.fopen(path, mode='r') as fp_:
body = fp_.read()
body_before = hashlib.sha256(salt.utils.stringutils.to_bytes(body)).hexdigest()
after = _regex_to_static(body, after)
before = _regex_to_static(body, before)
match = _regex_to_static(body, match)
if os.stat(path).st_size == 0 and mode in ('delete', 'replace'):
log.warning('Cannot find text to {0}. File \'{1}\' is empty.'.format(mode, path))
body = ''
elif mode == 'delete':
body = os.linesep.join([line for line in body.split(os.linesep) if line.find(match) < 0])
elif mode == 'replace':
body = os.linesep.join([(_get_line_indent(file_line, content, indent)
if (file_line.find(match) > -1 and not file_line == content) else file_line)
for file_line in body.split(os.linesep)])
elif mode == 'insert':
if not location and not before and not after:
raise CommandExecutionError('On insert must be defined either "location" or "before/after" conditions.')
if not location:
if before and after:
_assert_occurrence(body, before, 'before')
_assert_occurrence(body, after, 'after')
out = []
lines = body.split(os.linesep)
in_range = False
for line in lines:
if line.find(after) > -1:
in_range = True
elif line.find(before) > -1 and in_range:
out.append(_get_line_indent(line, content, indent))
out.append(line)
body = os.linesep.join(out)
if before and not after:
_assert_occurrence(body, before, 'before')
out = []
lines = body.split(os.linesep)
for idx in range(len(lines)):
_line = lines[idx]
if _line.find(before) > -1:
cnd = _get_line_indent(_line, content, indent)
if not idx or (idx and _starts_till(lines[idx - 1], cnd) < 0):
out.append(cnd)
out.append(_line)
body = os.linesep.join(out)
elif after and not before:
_assert_occurrence(body, after, 'after')
out = []
lines = body.split(os.linesep)
for idx, _line in enumerate(lines):
out.append(_line)
cnd = _get_line_indent(_line, content, indent)
if (_line.find(after) > -1 and
(lines[((idx + 1) < len(lines)) and idx + 1 or idx].strip() != cnd or
idx + 1 == len(lines))):
out.append(cnd)
body = os.linesep.join(out)
else:
if location == 'start':
body = os.linesep.join((content, body))
elif location == 'end':
body = os.linesep.join((body, _get_line_indent(body[-1], content, indent) if body else content))
elif mode == 'ensure':
after = after and after.strip()
before = before and before.strip()
if before and after:
_assert_occurrence(body, before, 'before')
_assert_occurrence(body, after, 'after')
is_there = bool(body.count(content))
if not is_there:
out = []
body = body.split(os.linesep)
for idx, line in enumerate(body):
out.append(line)
if line.find(content) > -1:
is_there = True
if not is_there:
if idx < (len(body) - 1) and line.find(after) > -1 and body[idx + 1].find(before) > -1:
out.append(content)
elif line.find(after) > -1:
raise CommandExecutionError('Found more than one line between '
'boundaries "before" and "after".')
body = os.linesep.join(out)
elif before and not after:
_assert_occurrence(body, before, 'before')
body = body.split(os.linesep)
out = []
for idx in range(len(body)):
if body[idx].find(before) > -1:
prev = (idx > 0 and idx or 1) - 1
out.append(_get_line_indent(body[idx], content, indent))
if _starts_till(out[prev], content) > -1:
del out[prev]
out.append(body[idx])
body = os.linesep.join(out)
elif not before and after:
_assert_occurrence(body, after, 'after')
body = body.split(os.linesep)
skip = None
out = []
for idx in range(len(body)):
if skip != body[idx]:
out.append(body[idx])
if body[idx].find(after) > -1:
next_line = idx + 1 < len(body) and body[idx + 1] or None
if next_line is not None and _starts_till(next_line, content) > -1:
skip = next_line
out.append(_get_line_indent(body[idx], content, indent))
body = os.linesep.join(out)
else:
raise CommandExecutionError("Wrong conditions? "
"Unable to ensure line without knowing "
"where to put it before and/or after.")
changed = body_before != hashlib.sha256(salt.utils.stringutils.to_bytes(body)).hexdigest()
if backup and changed and __opts__['test'] is False:
try:
temp_file = _mkstemp_copy(path=path, preserve_inode=True)
shutil.move(temp_file, '{0}.{1}'.format(path, time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())))
except (OSError, IOError) as exc:
raise CommandExecutionError("Unable to create the backup file of {0}. Exception: {1}".format(path, exc))
changes_diff = None
if changed:
if show_changes:
with salt.utils.files.fopen(path, 'r') as fp_:
path_content = _splitlines_preserving_trailing_newline(
fp_.read())
changes_diff = ''.join(difflib.unified_diff(
path_content, _splitlines_preserving_trailing_newline(body)))
if __opts__['test'] is False:
fh_ = None
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'w')
fh_.write(body)
finally:
if fh_:
fh_.close()
return show_changes and changes_diff or changed
def replace(path,
pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
backup='.bak',
dry_run=False,
search_only=False,
show_changes=True,
ignore_if_missing=False,
preserve_inode=True,
backslash_literal=False,
):
symlink = False
if is_link(path):
symlink = True
target_path = os.readlink(path)
given_path = os.path.expanduser(path)
path = os.path.realpath(os.path.expanduser(path))
if not os.path.exists(path):
if ignore_if_missing:
return False
else:
raise SaltInvocationError('File not found: {0}'.format(path))
if not __utils__['files.is_text'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
if search_only and (append_if_not_found or prepend_if_not_found):
raise SaltInvocationError(
'search_only cannot be used with append/prepend_if_not_found'
)
if append_if_not_found and prepend_if_not_found:
raise SaltInvocationError(
'Only one of append and prepend_if_not_found is permitted'
)
flags_num = _get_flags(flags)
cpattern = re.compile(salt.utils.stringutils.to_bytes(pattern), flags_num)
filesize = os.path.getsize(path)
if bufsize == 'file':
bufsize = filesize
has_changes = False
orig_file = []
new_file = []
if not salt.utils.platform.is_windows():
pre_user = get_user(path)
pre_group = get_group(path)
pre_mode = salt.utils.files.normalize_mode(get_mode(path))
repl = salt.utils.stringutils.to_bytes(str(repl))
if not_found_content:
not_found_content = salt.utils.stringutils.to_bytes(not_found_content)
found = False
temp_file = None
content = salt.utils.stringutils.to_str(not_found_content) if not_found_content and \
(prepend_if_not_found or
append_if_not_found) \
else salt.utils.stringutils.to_str(repl)
try:
r_data = None
with salt.utils.files.fopen(path,
mode='rb',
buffering=bufsize) as r_file:
try:
r_data = mmap.mmap(r_file.fileno(),
0,
access=mmap.ACCESS_READ)
except (ValueError, mmap.error):
r_data = salt.utils.stringutils.to_bytes("".join(r_file))
if search_only:
if re.search(cpattern, r_data):
return True
else:
result, nrepl = re.subn(cpattern,
repl.replace('\\', '\\\\') if backslash_literal else repl,
r_data,
count)
if nrepl > 0:
found = True
has_changes = True if pattern != repl else has_changes
if prepend_if_not_found or append_if_not_found:
if re.search(salt.utils.stringutils.to_bytes('^{0}$'.format(re.escape(content))),
r_data,
flags=flags_num):
found = True
orig_file = r_data.read(filesize).splitlines(True) \
if isinstance(r_data, mmap.mmap) \
else r_data.splitlines(True)
new_file = result.splitlines(True)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to open file '{0}'. "
"Exception: {1}".format(path, exc)
)
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close()
if has_changes and not dry_run:
try:
temp_file = _mkstemp_copy(path=path,
preserve_inode=preserve_inode)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
r_data = None
try:
with salt.utils.files.fopen(path,
mode='w',
buffering=bufsize) as w_file:
try:
with salt.utils.files.fopen(temp_file,
mode='r',
buffering=bufsize) as r_file:
r_data = mmap.mmap(r_file.fileno(),
0,
access=mmap.ACCESS_READ)
result, nrepl = re.subn(cpattern,
repl.replace('\\', '\\\\') if backslash_literal else repl,
r_data,
count)
try:
w_file.write(salt.utils.stringutils.to_str(result))
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to write file '{0}'. Contents may "
"be truncated. Temporary file contains copy "
"at '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close()
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
if not found and (append_if_not_found or prepend_if_not_found):
if not_found_content is None:
not_found_content = repl
if prepend_if_not_found:
new_file.insert(0, not_found_content + salt.utils.stringutils.to_bytes(os.linesep))
else:
if 0 != len(new_file):
if not new_file[-1].endswith(salt.utils.stringutils.to_bytes(os.linesep)):
new_file[-1] += salt.utils.stringutils.to_bytes(os.linesep)
new_file.append(not_found_content + salt.utils.stringutils.to_bytes(os.linesep))
has_changes = True
if not dry_run:
try:
temp_file = _mkstemp_copy(path=path,
preserve_inode=preserve_inode)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line))
finally:
fh_.close()
if backup and has_changes and not dry_run:
backup_name = '{0}{1}'.format(path, backup)
try:
shutil.move(temp_file, backup_name)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move the temp file '{0}' to the "
"backup file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
if symlink:
symlink_backup = '{0}{1}'.format(given_path, backup)
target_backup = '{0}{1}'.format(target_path, backup)
try:
os.symlink(target_backup, symlink_backup)
except OSError:
os.remove(symlink_backup)
os.symlink(target_backup, symlink_backup)
except:
raise CommandExecutionError(
"Unable create backup symlink '{0}'. "
"Target was '{1}'. "
"Exception: {2}".format(symlink_backup, target_backup,
exc)
)
elif temp_file:
try:
os.remove(temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to delete temp file '{0}'. "
"Exception: {1}".format(temp_file, exc)
)
if not dry_run and not salt.utils.platform.is_windows():
check_perms(path, None, pre_user, pre_group, pre_mode)
def get_changes():
orig_file_as_str = [salt.utils.stringutils.to_str(x) for x in orig_file]
new_file_as_str = [salt.utils.stringutils.to_str(x) for x in new_file]
return ''.join(difflib.unified_diff(orig_file_as_str, new_file_as_str))
if show_changes:
return get_changes()
# (for situations where the pattern also matches the repl). Revert the
# has_changes flag to False if the final result is unchanged.
if not get_changes():
has_changes = False
return has_changes
def blockreplace(path,
marker_start='
marker_end='
content='',
append_if_not_found=False,
prepend_if_not_found=False,
backup='.bak',
dry_run=False,
show_changes=True,
append_newline=False,
):
path = os.path.expanduser(path)
if not os.path.exists(path):
raise SaltInvocationError('File not found: {0}'.format(path))
if append_if_not_found and prepend_if_not_found:
raise SaltInvocationError(
'Only one of append and prepend_if_not_found is permitted'
)
if not __utils__['files.is_text'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
# Search the file; track if any changes have been made for the return val
has_changes = False
orig_file = []
new_file = []
in_block = False
old_content = ''
done = False
# we do not use in_place editing to avoid file attrs modifications when
# no changes are required and to avoid any file access on a partially
# written file.
# we could also use salt.utils.filebuffer.BufferedReader
try:
fi_file = fileinput.input(path,
inplace=False, backup=False,
bufsize=1, mode='rb')
for line in fi_file:
line = salt.utils.stringutils.to_str(line)
result = line
if marker_start in line:
# managed block start found, start recording
in_block = True
else:
if in_block:
if marker_end in line:
# end of block detected
in_block = False
# Handle situations where there may be multiple types
# of line endings in the same file. Separate the content
# into lines. Account for Windows-style line endings
# using os.linesep, then by linux-style line endings
# using '\n'
split_content = []
for linesep_line in content.split(os.linesep):
for content_line in linesep_line.split('\n'):
split_content.append(content_line)
# Trim any trailing new lines to avoid unwanted
# additional new lines
while not split_content[-1]:
split_content.pop()
# push new block content in file
for content_line in split_content:
new_file.append(content_line + os.linesep)
done = True
else:
# remove old content, but keep a trace
old_content += line
result = None
# else: we are not in the marked block, keep saving things
orig_file.append(line)
if result is not None:
new_file.append(result)
# end for. If we are here without block management we maybe have some problems,
# or we need to initialise the marked block
finally:
fi_file.close()
if in_block:
# unterminated block => bad, always fail
raise CommandExecutionError(
'Unterminated marked block. End of file reached before marker_end.'
)
if not done:
if prepend_if_not_found:
# add the markers and content at the beginning of file
new_file.insert(0, marker_end + os.linesep)
if append_newline is True:
new_file.insert(0, content + os.linesep)
else:
new_file.insert(0, content)
new_file.insert(0, marker_start + os.linesep)
done = True
elif append_if_not_found:
# Make sure we have a newline at the end of the file
if 0 != len(new_file):
if not new_file[-1].endswith(os.linesep):
new_file[-1] += os.linesep
# add the markers and content at the end of file
new_file.append(marker_start + os.linesep)
if append_newline is True:
new_file.append(content + os.linesep)
else:
new_file.append(content)
new_file.append(marker_end + os.linesep)
done = True
else:
raise CommandExecutionError(
'Cannot edit marked block. Markers were not found in file.'
)
if done:
diff = ''.join(difflib.unified_diff(orig_file, new_file))
has_changes = diff is not ''
if has_changes and not dry_run:
# changes detected
# backup file attrs
perms = {}
perms['user'] = get_user(path)
perms['group'] = get_group(path)
perms['mode'] = salt.utils.files.normalize_mode(get_mode(path))
# backup old content
if backup is not False:
backup_path = '{0}{1}'.format(path, backup)
shutil.copy2(path, backup_path)
# copy2 does not preserve ownership
check_perms(backup_path,
None,
perms['user'],
perms['group'],
perms['mode'])
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line))
finally:
fh_.close()
# this may have overwritten file attrs
check_perms(path,
None,
perms['user'],
perms['group'],
perms['mode'])
if show_changes:
return diff
return has_changes
def search(path,
pattern,
flags=8,
bufsize=1,
ignore_if_missing=False,
multiline=False
):
if multiline:
flags = _add_flags(flags, 'MULTILINE')
bufsize = 'file'
# This function wraps file.replace on purpose in order to enforce
# consistent usage, compatible regex's, expected behavior, *and* bugs. :)
return replace(path,
pattern,
'',
flags=flags,
bufsize=bufsize,
dry_run=True,
search_only=True,
show_changes=False,
ignore_if_missing=ignore_if_missing)
def patch(originalfile, patchfile, options='', dry_run=False):
patchpath = salt.utils.path.which('patch')
if not patchpath:
raise CommandExecutionError(
'patch executable not found. Is the distribution\'s patch '
'package installed?'
)
cmd = [patchpath]
cmd.extend(salt.utils.args.shlex_split(options))
if dry_run:
if __grains__['kernel'] in ('FreeBSD', 'OpenBSD'):
cmd.append('-C')
else:
cmd.append('--dry-run')
# this argument prevents interactive prompts when the patch fails to apply.
# the exit code will still be greater than 0 if that is the case.
if '-N' not in cmd and '--forward' not in cmd:
cmd.append('--forward')
has_rejectfile_option = False
for option in cmd:
if option == '-r' or option.startswith('-r ') \
or option.startswith('--reject-file'):
has_rejectfile_option = True
break
# by default, patch will write rejected patch files to <filename>.rej.
# this option prevents that.
if not has_rejectfile_option:
cmd.append('--reject-file=-')
cmd.extend(['-i', patchfile])
if os.path.isdir(originalfile):
cmd.extend(['-d', originalfile])
has_strip_option = False
for option in cmd:
if option.startswith('-p') or option.startswith('--strip='):
has_strip_option = True
break
if not has_strip_option:
cmd.append('--strip=0')
else:
cmd.append(originalfile)
return __salt__['cmd.run_all'](cmd, python_shell=False)
def contains(path, text):
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
stripped_text = str(text).strip()
try:
with salt.utils.filebuffer.BufferedReader(path) as breader:
for chunk in breader:
if stripped_text in chunk:
return True
return False
except (IOError, OSError):
return False
def contains_regex(path, regex, lchar=''):
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
try:
with salt.utils.files.fopen(path, 'r') as target:
for line in target:
if lchar:
line = line.lstrip(lchar)
if re.search(regex, line):
return True
return False
except (IOError, OSError):
return False
def contains_glob(path, glob_expr):
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
try:
with salt.utils.filebuffer.BufferedReader(path) as breader:
for chunk in breader:
if fnmatch.fnmatch(chunk, glob_expr):
return True
return False
except (IOError, OSError):
return False
def append(path, *args, **kwargs):
path = os.path.expanduser(path)
# Largely inspired by Fabric's contrib.files.append()
if 'args' in kwargs:
if isinstance(kwargs['args'], list):
args = kwargs['args']
else:
args = [kwargs['args']]
with salt.utils.files.fopen(path, 'rb+') as ofile:
linesep = salt.utils.stringutils.to_bytes(os.linesep)
try:
ofile.seek(-len(linesep), os.SEEK_END)
except IOError as exc:
if exc.errno in (errno.EINVAL, errno.ESPIPE):
pass
else:
raise
else:
if ofile.read(len(linesep)) != linesep:
ofile.seek(0, os.SEEK_END)
ofile.write(linesep)
with salt.utils.files.fopen(path, 'a') as ofile:
for new_line in args:
ofile.write('{0}{1}'.format(new_line, os.linesep))
return 'Wrote {0} lines to "{1}"'.format(len(args), path)
def prepend(path, *args, **kwargs):
path = os.path.expanduser(path)
if 'args' in kwargs:
if isinstance(kwargs['args'], list):
args = kwargs['args']
else:
args = [kwargs['args']]
try:
with salt.utils.files.fopen(path) as fhr:
contents = fhr.readlines()
except IOError:
contents = []
preface = []
for line in args:
preface.append('{0}\n'.format(line))
with salt.utils.files.fopen(path, "w") as ofile:
contents = preface + contents
ofile.write(''.join(contents))
return 'Prepended {0} lines to "{1}"'.format(len(args), path)
def write(path, *args, **kwargs):
path = os.path.expanduser(path)
if 'args' in kwargs:
if isinstance(kwargs['args'], list):
args = kwargs['args']
else:
args = [kwargs['args']]
contents = []
for line in args:
contents.append('{0}\n'.format(line))
with salt.utils.files.fopen(path, "w") as ofile:
ofile.write(''.join(contents))
return 'Wrote {0} lines to "{1}"'.format(len(contents), path)
def touch(name, atime=None, mtime=None):
name = os.path.expanduser(name)
if atime and atime.isdigit():
atime = int(atime)
if mtime and mtime.isdigit():
mtime = int(mtime)
try:
if not os.path.exists(name):
with salt.utils.files.fopen(name, 'a') as fhw:
fhw.write('')
if not atime and not mtime:
times = None
elif not mtime and atime:
times = (atime, time.time())
elif not atime and mtime:
times = (time.time(), mtime)
else:
times = (atime, mtime)
os.utime(name, times)
except TypeError:
raise SaltInvocationError('atime and mtime must be integers')
except (IOError, OSError) as exc:
raise CommandExecutionError(exc.strerror)
return os.path.exists(name)
def seek_read(path, size, offset):
path = os.path.expanduser(path)
seek_fh = os.open(path, os.O_RDONLY)
try:
os.lseek(seek_fh, int(offset), 0)
data = os.read(seek_fh, int(size))
finally:
os.close(seek_fh)
return data
def seek_write(path, data, offset):
path = os.path.expanduser(path)
seek_fh = os.open(path, os.O_WRONLY)
try:
os.lseek(seek_fh, int(offset), 0)
ret = os.write(seek_fh, data)
os.fsync(seek_fh)
finally:
os.close(seek_fh)
return ret
def truncate(path, length):
path = os.path.expanduser(path)
with salt.utils.files.fopen(path, 'rb+') as seek_fh:
seek_fh.truncate(int(length))
def link(src, path):
src = os.path.expanduser(src)
if not os.path.isabs(src):
raise SaltInvocationError('File path must be absolute.')
try:
os.link(src, path)
return True
except (OSError, IOError):
raise CommandExecutionError('Could not create \'{0}\''.format(path))
return False
def is_link(path):
return os.path.islink(os.path.expanduser(path))
def symlink(src, path):
path = os.path.expanduser(path)
try:
if os.path.normpath(os.readlink(path)) == os.path.normpath(src):
log.debug('link already in correct state: %s -> %s', path, src)
return True
except OSError:
pass
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
try:
os.symlink(src, path)
return True
except (OSError, IOError):
raise CommandExecutionError('Could not create \'{0}\''.format(path))
return False
def rename(src, dst):
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError('File path must be absolute.')
try:
os.rename(src, dst)
return True
except OSError:
raise CommandExecutionError(
'Could not rename \'{0}\' to \'{1}\''.format(src, dst)
)
return False
def copy(src, dst, recurse=False, remove_existing=False):
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError('File path must be absolute.')
if not os.path.exists(src):
raise CommandExecutionError('No such file or directory \'{0}\''.format(src))
if not salt.utils.platform.is_windows():
pre_user = get_user(src)
pre_group = get_group(src)
pre_mode = salt.utils.files.normalize_mode(get_mode(src))
try:
if (os.path.exists(dst) and os.path.isdir(dst)) or os.path.isdir(src):
if not recurse:
raise SaltInvocationError(
"Cannot copy overwriting a directory without recurse flag set to true!")
if remove_existing:
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
else:
salt.utils.files.recursive_copy(src, dst)
else:
shutil.copyfile(src, dst)
except OSError:
raise CommandExecutionError(
'Could not copy \'{0}\' to \'{1}\''.format(src, dst)
)
if not salt.utils.platform.is_windows():
check_perms(dst, None, pre_user, pre_group, pre_mode)
return True
def lstat(path):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Path to file must be absolute.')
try:
lst = os.lstat(path)
return dict((key, getattr(lst, key)) for key in ('st_atime', 'st_ctime',
'st_gid', 'st_mode', 'st_mtime', 'st_nlink', 'st_size', 'st_uid'))
except Exception:
return {}
def access(path, mode):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Path to link must be absolute.')
modes = {'f': os.F_OK,
'r': os.R_OK,
'w': os.W_OK,
'x': os.X_OK}
if mode in modes:
return os.access(path, modes[mode])
elif mode in six.itervalues(modes):
return os.access(path, mode)
else:
raise SaltInvocationError('Invalid mode specified.')
def read(path, binary=False):
access_mode = 'r'
if binary is True:
access_mode += 'b'
with salt.utils.files.fopen(path, access_mode) as file_obj:
return file_obj.read()
def readlink(path, canonicalize=False):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Path to link must be absolute.')
if not os.path.islink(path):
raise SaltInvocationError('A valid link was not specified.')
if canonicalize:
return os.path.realpath(path)
else:
return os.readlink(path)
def readdir(path):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Dir path must be absolute.')
if not os.path.isdir(path):
raise SaltInvocationError('A valid directory was not specified.')
dirents = ['.', '..']
dirents.extend(os.listdir(path))
return dirents
def statvfs(path):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
try:
stv = os.statvfs(path)
return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree',
'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag',
'f_frsize', 'f_namemax'))
except (OSError, IOError):
raise CommandExecutionError('Could not statvfs \'{0}\''.format(path))
return False
def stats(path, hash_type=None, follow_symlinks=True):
path = os.path.expanduser(path)
ret = {}
if not os.path.exists(path):
try:
pstat = os.lstat(path)
except OSError:
return ret
else:
if follow_symlinks:
pstat = os.stat(path)
else:
pstat = os.lstat(path)
ret['inode'] = pstat.st_ino
ret['uid'] = pstat.st_uid
ret['gid'] = pstat.st_gid
ret['group'] = gid_to_group(pstat.st_gid)
ret['user'] = uid_to_user(pstat.st_uid)
ret['atime'] = pstat.st_atime
ret['mtime'] = pstat.st_mtime
ret['ctime'] = pstat.st_ctime
ret['size'] = pstat.st_size
ret['mode'] = str(oct(stat.S_IMODE(pstat.st_mode)))
if hash_type:
ret['sum'] = get_hash(path, hash_type)
ret['type'] = 'file'
if stat.S_ISDIR(pstat.st_mode):
ret['type'] = 'dir'
if stat.S_ISCHR(pstat.st_mode):
ret['type'] = 'char'
if stat.S_ISBLK(pstat.st_mode):
ret['type'] = 'block'
if stat.S_ISREG(pstat.st_mode):
ret['type'] = 'file'
if stat.S_ISLNK(pstat.st_mode):
ret['type'] = 'link'
if stat.S_ISFIFO(pstat.st_mode):
ret['type'] = 'pipe'
if stat.S_ISSOCK(pstat.st_mode):
ret['type'] = 'socket'
ret['target'] = os.path.realpath(path)
return ret
def rmdir(path):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
if not os.path.isdir(path):
raise SaltInvocationError('A valid directory was not specified.')
try:
os.rmdir(path)
return True
except OSError as exc:
return exc.strerror
def remove(path):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute: {0}'.format(path))
try:
if os.path.isfile(path) or os.path.islink(path):
os.remove(path)
return True
elif os.path.isdir(path):
shutil.rmtree(path)
return True
except (OSError, IOError) as exc:
raise CommandExecutionError(
'Could not remove \'{0}\': {1}'.format(path, exc)
)
return False
def directory_exists(path):
return os.path.isdir(os.path.expanduser(path))
def file_exists(path):
return os.path.isfile(os.path.expanduser(path))
def path_exists_glob(path):
return True if glob.glob(os.path.expanduser(path)) else False
def restorecon(path, recursive=False):
if recursive:
cmd = ['restorecon', '-FR', path]
else:
cmd = ['restorecon', '-F', path]
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def get_selinux_context(path):
out = __salt__['cmd.run'](['ls', '-Z', path], python_shell=False)
try:
ret = re.search(r'\w+:\w+:\w+:\w+', out).group(0)
except AttributeError:
ret = (
'No selinux context information is available for {0}'.format(path)
)
return ret
def set_selinux_context(path,
user=None,
role=None,
type=None,
range=None):
if not any((user, role, type, range)):
return False
cmd = ['chcon']
if user:
cmd.extend(['-u', user])
if role:
cmd.extend(['-r', role])
if type:
cmd.extend(['-t', type])
if range:
cmd.extend(['-l', range])
cmd.append(path)
ret = not __salt__['cmd.retcode'](cmd, python_shell=False)
if ret:
return get_selinux_context(path)
else:
return ret
def source_list(source, source_hash, saltenv):
contextkey = '{0}_|-{1}_|-{2}'.format(source, source_hash, saltenv)
if contextkey in __context__:
return __context__[contextkey]
if isinstance(source, list):
mfiles = [(f, saltenv) for f in __salt__['cp.list_master'](saltenv)]
mdirs = [(d, saltenv) for d in __salt__['cp.list_master_dirs'](saltenv)]
for single in source:
if isinstance(single, dict):
single = next(iter(single))
path, senv = salt.utils.url.parse(single)
if senv:
mfiles += [(f, senv) for f in __salt__['cp.list_master'](senv)]
mdirs += [(d, senv) for d in __salt__['cp.list_master_dirs'](senv)]
ret = None
for single in source:
if isinstance(single, dict):
if len(single) != 1:
continue
single_src = next(iter(single))
single_hash = single[single_src] if single[single_src] else source_hash
urlparsed_single_src = _urlparse(single_src)
if salt.utils.platform.is_windows():
# protocol indicator (file://). The scheme will be the
# drive letter instead of the protocol. So, we'll add the
if urlparsed_single_src.scheme.lower() in string.ascii_lowercase:
urlparsed_single_src = _urlparse('file://' + single_src)
proto = urlparsed_single_src.scheme
if proto == 'salt':
path, senv = salt.utils.url.parse(single_src)
if not senv:
senv = saltenv
if (path, saltenv) in mfiles or (path, saltenv) in mdirs:
ret = (single_src, single_hash)
break
elif proto.startswith('http') or proto == 'ftp':
ret = (single_src, single_hash)
break
elif proto == 'file' and (
os.path.exists(urlparsed_single_src.netloc) or
os.path.exists(urlparsed_single_src.path) or
os.path.exists(os.path.join(
urlparsed_single_src.netloc,
urlparsed_single_src.path))):
ret = (single_src, single_hash)
break
elif single_src.startswith(os.sep) and os.path.exists(single_src):
ret = (single_src, single_hash)
break
elif isinstance(single, six.string_types):
path, senv = salt.utils.url.parse(single)
if not senv:
senv = saltenv
if (path, senv) in mfiles or (path, senv) in mdirs:
ret = (single, source_hash)
break
urlparsed_src = _urlparse(single)
if salt.utils.platform.is_windows():
# protocol indicator (file://). The scheme will be the
# drive letter instead of the protocol. So, we'll add the
if urlparsed_src.scheme.lower() in string.ascii_lowercase:
urlparsed_src = _urlparse('file://' + single)
proto = urlparsed_src.scheme
if proto == 'file' and (
os.path.exists(urlparsed_src.netloc) or
os.path.exists(urlparsed_src.path) or
os.path.exists(os.path.join(
urlparsed_src.netloc,
urlparsed_src.path))):
ret = (single, source_hash)
break
elif proto.startswith('http') or proto == 'ftp':
ret = (single, source_hash)
break
elif single.startswith(os.sep) and os.path.exists(single):
ret = (single, source_hash)
break
if ret is None:
raise CommandExecutionError(
'none of the specified sources were found'
)
else:
ret = (source, source_hash)
__context__[contextkey] = ret
return ret
def apply_template_on_contents(
contents,
template,
context,
defaults,
saltenv):
if template in salt.utils.templates.TEMPLATE_REGISTRY:
context_dict = defaults if defaults else {}
if context:
context_dict.update(context)
contents = salt.utils.templates.TEMPLATE_REGISTRY[template](
contents,
from_str=True,
to_str=True,
context=context_dict,
saltenv=saltenv,
grains=__opts__['grains'],
pillar=__pillar__,
salt=__salt__,
opts=__opts__)['data']
if six.PY2:
contents = contents.encode('utf-8')
elif six.PY3 and isinstance(contents, bytes):
contents = contents.decode('utf-8')
else:
ret = {}
ret['result'] = False
ret['comment'] = ('Specified template format {0} is not supported'
).format(template)
return ret
return contents
def get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify=False,
**kwargs):
sfn = ''
source_sum = {}
def _get_local_file_source_sum(path):
return {'hsum': get_hash(path, form='sha256'), 'hash_type': 'sha256'}
if source:
urlparsed_source = _urlparse(source)
parsed_scheme = urlparsed_source.scheme
parsed_path = os.path.join(
urlparsed_source.netloc, urlparsed_source.path).rstrip(os.sep)
if parsed_scheme and parsed_scheme.lower() in 'abcdefghijklmnopqrstuvwxyz':
parsed_path = ':'.join([parsed_scheme, parsed_path])
parsed_scheme = 'file'
if parsed_scheme == 'salt':
source_sum = __salt__['cp.hash_file'](source, saltenv)
if not source_sum:
return '', {}, 'Source file {0} not found'.format(source)
elif not source_hash and parsed_scheme == 'file':
source_sum = _get_local_file_source_sum(parsed_path)
elif not source_hash and source.startswith(os.sep):
source_sum = _get_local_file_source_sum(source)
else:
if not skip_verify:
if source_hash:
try:
source_sum = get_source_sum(name,
source,
source_hash,
source_hash_name,
saltenv)
except CommandExecutionError as exc:
return '', {}, exc.strerror
else:
msg = (
'Unable to verify upstream hash of source file {0}, '
'please set source_hash or set skip_verify to True'
.format(source)
)
return '', {}, msg
if source and (template or parsed_scheme in salt.utils.files.REMOTE_PROTOS):
# Check if we have the template or remote file cached
cache_refetch = False
cached_dest = __salt__['cp.is_cached'](source, saltenv)
if cached_dest and (source_hash or skip_verify):
htype = source_sum.get('hash_type', 'sha256')
cached_sum = get_hash(cached_dest, form=htype)
if skip_verify:
# prev: if skip_verify or cached_sum == source_sum['hsum']:
# but `cached_sum == source_sum['hsum']` is elliptical as prev if
sfn = cached_dest
source_sum = {'hsum': cached_sum, 'hash_type': htype}
elif cached_sum != source_sum.get('hsum', __opts__['hash_type']):
cache_refetch = True
else:
sfn = cached_dest
# If we didn't have the template or remote file, or the file has been
if not sfn or cache_refetch:
try:
sfn = __salt__['cp.cache_file'](
source,
saltenv,
source_hash=source_sum.get('hsum'))
except Exception as exc:
return '', {}, 'Failed to cache {0}: {1}'.format(source, exc)
if not sfn or not os.path.exists(sfn):
return sfn, {}, 'Source file \'{0}\' not found'.format(source)
if sfn == name:
raise SaltInvocationError(
'Source file cannot be the same as destination'
)
if template:
if template in salt.utils.templates.TEMPLATE_REGISTRY:
context_dict = defaults if defaults else {}
if context:
context_dict.update(context)
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
sfn,
name=name,
source=source,
user=user,
group=group,
mode=mode,
attrs=attrs,
saltenv=saltenv,
context=context_dict,
salt=__salt__,
pillar=__pillar__,
grains=__opts__['grains'],
opts=__opts__,
**kwargs)
else:
return sfn, {}, ('Specified template format {0} is not supported'
).format(template)
if data['result']:
sfn = data['data']
hsum = get_hash(sfn, form='sha256')
source_sum = {'hash_type': 'sha256',
'hsum': hsum}
else:
__clean_tmp(sfn)
return sfn, {}, data['data']
return sfn, source_sum, ''
def extract_hash(hash_fn,
hash_type='sha256',
file_name='',
source='',
source_hash_name=None):
hash_len = HASHES.get(hash_type)
if hash_len is None:
if hash_type:
log.warning(
'file.extract_hash: Unsupported hash_type \'%s\', falling '
'back to matching any supported hash_type', hash_type
)
hash_type = ''
hash_len_expr = '{0},{1}'.format(min(HASHES_REVMAP), max(HASHES_REVMAP))
else:
hash_len_expr = str(hash_len)
filename_separators = string.whitespace + r'\/'
if source_hash_name:
if not isinstance(source_hash_name, six.string_types):
source_hash_name = str(source_hash_name)
source_hash_name_idx = (len(source_hash_name) + 1) * -1
log.debug(
'file.extract_hash: Extracting %s hash for file matching '
'source_hash_name \'%s\'',
'any supported' if not hash_type else hash_type,
source_hash_name
)
if file_name:
if not isinstance(file_name, six.string_types):
file_name = str(file_name)
file_name_basename = os.path.basename(file_name)
file_name_idx = (len(file_name_basename) + 1) * -1
if source:
if not isinstance(source, six.string_types):
source = str(source)
urlparsed_source = _urlparse(source)
source_basename = os.path.basename(
urlparsed_source.path or urlparsed_source.netloc
)
source_idx = (len(source_basename) + 1) * -1
basename_searches = [x for x in (file_name, source) if x]
if basename_searches:
log.debug(
'file.extract_hash: %s %s hash for file matching%s: %s',
'If no source_hash_name match found, will extract'
if source_hash_name
else 'Extracting',
'any supported' if not hash_type else hash_type,
'' if len(basename_searches) == 1 else ' either of the following',
', '.join(basename_searches)
)
partial = None
found = {}
with salt.utils.files.fopen(hash_fn, 'r') as fp_:
for line in fp_:
line = line.strip()
hash_re = r'(?i)(?<![a-z0-9])([a-f0-9]{' + hash_len_expr + '})(?![a-z0-9])'
hash_match = re.search(hash_re, line)
matched = None
if hash_match:
matched_hsum = hash_match.group(1)
if matched_hsum is not None:
matched_type = HASHES_REVMAP.get(len(matched_hsum))
if matched_type is None:
# to match one of the supported hash types.
matched = None
else:
matched = {'hsum': matched_hsum,
'hash_type': matched_type}
if matched is None:
log.debug(
'file.extract_hash: In line \'%s\', no %shash found',
line,
'' if not hash_type else hash_type + ' '
)
continue
if partial is None:
partial = matched
def _add_to_matches(found, line, match_type, value, matched):
log.debug(
'file.extract_hash: Line \'%s\' matches %s \'%s\'',
line, match_type, value
)
found.setdefault(match_type, []).append(matched)
hash_matched = False
if source_hash_name:
if line.endswith(source_hash_name):
# Checking the character before where the basename
# should start for either whitespace or a path
# separator. We can't just rsplit on spaces/whitespace,
try:
if line[source_hash_name_idx] in string.whitespace:
_add_to_matches(found, line, 'source_hash_name',
source_hash_name, matched)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(source_hash_name) + r'\s+',
line):
_add_to_matches(found, line, 'source_hash_name',
source_hash_name, matched)
hash_matched = True
if file_name:
if line.endswith(file_name_basename):
# because the filename may contain spaces.
try:
if line[file_name_idx] in filename_separators:
_add_to_matches(found, line, 'file_name',
file_name, matched)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(file_name) + r'\s+', line):
_add_to_matches(found, line, 'file_name',
file_name, matched)
hash_matched = True
if source:
if line.endswith(source_basename):
# Same as above, we can't just do an rsplit here.
try:
if line[source_idx] in filename_separators:
_add_to_matches(found, line, 'source',
source, matched)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(source) + r'\s+', line):
_add_to_matches(found, line, 'source', source, matched)
hash_matched = True
if not hash_matched:
log.debug(
'file.extract_hash: Line \'%s\' contains %s hash '
'\'%s\', but line did not meet the search criteria',
line, matched['hash_type'], matched['hsum']
)
for found_type, found_str in (('source_hash_name', source_hash_name),
('file_name', file_name),
('source', source)):
if found_type in found:
if len(found[found_type]) > 1:
log.debug(
'file.extract_hash: Multiple %s matches for %s: %s',
found_type,
found_str,
', '.join(
['{0} ({1})'.format(x['hsum'], x['hash_type'])
for x in found[found_type]]
)
)
ret = found[found_type][0]
log.debug(
'file.extract_hash: Returning %s hash \'%s\' as a match of %s',
ret['hash_type'], ret['hsum'], found_str
)
return ret
if partial:
log.debug(
'file.extract_hash: Returning the partially identified %s hash '
'\'%s\'', partial['hash_type'], partial['hsum']
)
return partial
log.debug('file.extract_hash: No matches, returning None')
return None
def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False):
name = os.path.expanduser(name)
lsattr_cmd = salt.utils.path.which('lsattr')
if not ret:
ret = {'name': name,
'changes': {},
'comment': [],
'result': True}
orig_comment = ''
else:
orig_comment = ret['comment']
ret['comment'] = []
perms = {}
cur = stats(name, follow_symlinks=follow_symlinks)
if not cur:
raise CommandExecutionError('{0} does not exist'.format(name))
perms['luser'] = cur['user']
perms['lgroup'] = cur['group']
perms['lmode'] = salt.utils.files.normalize_mode(cur['mode'])
is_dir = os.path.isdir(name)
if not salt.utils.platform.is_windows() and not is_dir and lsattr_cmd:
perms['lattrs'] = ''.join(lsattr(name).get('name', ''))
if perms['lattrs']:
chattr(name, operator='remove', attributes=perms['lattrs'])
if mode is not None:
if os.path.islink(name) and not follow_symlinks:
pass
else:
mode = salt.utils.files.normalize_mode(mode)
if mode != perms['lmode']:
if __opts__['test'] is True:
ret['changes']['mode'] = mode
else:
set_mode(name, mode)
if mode != salt.utils.files.normalize_mode(get_mode(name)):
ret['result'] = False
ret['comment'].append(
'Failed to change mode to {0}'.format(mode)
)
else:
ret['changes']['mode'] = mode
if user:
if isinstance(user, int):
user = uid_to_user(user)
if (salt.utils.platform.is_windows() and
user_to_uid(user) != user_to_uid(perms['luser'])
) or (
not salt.utils.platform.is_windows() and user != perms['luser']
):
perms['cuser'] = user
if group:
if isinstance(group, int):
group = gid_to_group(group)
if (salt.utils.platform.is_windows() and
group_to_gid(group) != group_to_gid(perms['lgroup'])
) or (
not salt.utils.platform.is_windows() and group != perms['lgroup']
):
perms['cgroup'] = group
if 'cuser' in perms or 'cgroup' in perms:
if not __opts__['test']:
if os.path.islink(name) and not follow_symlinks:
chown_func = lchown
else:
chown_func = chown
if user is None:
user = perms['luser']
if group is None:
group = perms['lgroup']
try:
chown_func(name, user, group)
except OSError:
ret['result'] = False
if user:
if isinstance(user, int):
user = uid_to_user(user)
if (salt.utils.platform.is_windows() and
user_to_uid(user) != user_to_uid(
get_user(name, follow_symlinks=follow_symlinks)) and
user != ''
) or (
not salt.utils.platform.is_windows() and
user != get_user(name, follow_symlinks=follow_symlinks) and
user != ''
):
if __opts__['test'] is True:
ret['changes']['user'] = user
else:
ret['result'] = False
ret['comment'].append('Failed to change user to {0}'
.format(user))
elif 'cuser' in perms and user != '':
ret['changes']['user'] = user
if group:
if isinstance(group, int):
group = gid_to_group(group)
if (salt.utils.platform.is_windows() and
group_to_gid(group) != group_to_gid(
get_group(name, follow_symlinks=follow_symlinks)) and
user != '') or (
not salt.utils.platform.is_windows() and
group != get_group(name, follow_symlinks=follow_symlinks) and
user != ''
):
if __opts__['test'] is True:
ret['changes']['group'] = group
else:
ret['result'] = False
ret['comment'].append('Failed to change group to {0}'
.format(group))
elif 'cgroup' in perms and user != '':
ret['changes']['group'] = group
if isinstance(orig_comment, six.string_types):
if orig_comment:
ret['comment'].insert(0, orig_comment)
ret['comment'] = '; '.join(ret['comment'])
if __opts__['test'] is True and ret['changes']:
ret['result'] = None
if not salt.utils.platform.is_windows() and not is_dir and lsattr_cmd:
if perms['lattrs']:
chattr(name, operator='add', attributes=perms['lattrs'])
if attrs is not None and not is_dir:
if os.path.islink(name) and not follow_symlinks:
pass
else:
diff_attrs = _cmp_attrs(name, attrs)
if diff_attrs[0] is not None or diff_attrs[1] is not None:
if __opts__['test'] is True:
ret['changes']['attrs'] = attrs
else:
if diff_attrs[0] is not None:
chattr(name, operator="add", attributes=diff_attrs[0])
if diff_attrs[1] is not None:
chattr(name, operator="remove", attributes=diff_attrs[1])
cmp_attrs = _cmp_attrs(name, attrs)
if cmp_attrs[0] is not None or cmp_attrs[1] is not None:
ret['result'] = False
ret['comment'].append(
'Failed to change attributes to {0}'.format(attrs)
)
else:
ret['changes']['attrs'] = attrs
return ret, perms
def check_managed(
name,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
template,
context,
defaults,
saltenv,
contents=None,
skip_verify=False,
**kwargs):
source, source_hash = source_list(source,
source_hash,
saltenv)
sfn = ''
source_sum = None
if contents is None:
sfn, source_sum, comments = get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify,
**kwargs)
if comments:
__clean_tmp(sfn)
return False, comments
changes = check_file_meta(name, sfn, source, source_sum, user,
group, mode, attrs, saltenv, contents)
if name.startswith(tempfile.gettempdir()):
for key in ['user', 'group', 'mode']:
changes.pop(key, None)
__clean_tmp(sfn)
if changes:
log.info(changes)
comments = ['The following values are set to be changed:\n']
comments.extend('{0}: {1}\n'.format(key, val)
for key, val in six.iteritems(changes))
return None, ''.join(comments)
return True, 'The file {0} is in the correct state'.format(name)
def check_managed_changes(
name,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
template,
context,
defaults,
saltenv,
contents=None,
skip_verify=False,
keep_mode=False,
**kwargs):
source, source_hash = source_list(source,
source_hash,
saltenv)
sfn = ''
source_sum = None
if contents is None:
sfn, source_sum, comments = get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify,
**kwargs)
if comments:
__clean_tmp(sfn)
return False, comments
if sfn and source and keep_mode:
if _urlparse(source).scheme in ('salt', 'file') \
or source.startswith('/'):
try:
mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True)
except Exception as exc:
log.warning('Unable to stat %s: %s', sfn, exc)
changes = check_file_meta(name, sfn, source, source_sum, user,
group, mode, attrs, saltenv, contents)
__clean_tmp(sfn)
return changes
def check_file_meta(
name,
sfn,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
contents=None):
lsattr_cmd = salt.utils.path.which('lsattr')
changes = {}
if not source_sum:
source_sum = {}
lstats = stats(name, hash_type=source_sum.get('hash_type', None), follow_symlinks=False)
if not lstats:
changes['newfile'] = name
return changes
if 'hsum' in source_sum:
if source_sum['hsum'] != lstats['sum']:
if not sfn and source:
sfn = __salt__['cp.cache_file'](
source,
saltenv,
source_hash=source_sum['hsum'])
if sfn:
try:
changes['diff'] = get_diff(
sfn, name, template=True, show_filenames=False)
except CommandExecutionError as exc:
changes['diff'] = exc.strerror
else:
changes['sum'] = 'Checksum differs'
if contents is not None:
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
with salt.utils.files.fopen(tmp, 'w') as tmp_:
tmp_.write(salt.utils.stringutils.to_str(contents))
try:
differences = get_diff(name, tmp, show_filenames=False)
except CommandExecutionError as exc:
log.error('Failed to diff files: {0}'.format(exc))
differences = exc.strerror
__clean_tmp(tmp)
if differences:
if __salt__['config.option']('obfuscate_templates'):
changes['diff'] = '<Obfuscated Template>'
else:
changes['diff'] = differences
if not salt.utils.platform.is_windows():
if (user is not None
and user != lstats['user']
and user != lstats['uid']):
changes['user'] = user
if (group is not None
and group != lstats['group']
and group != lstats['gid']):
changes['group'] = group
smode = salt.utils.files.normalize_mode(lstats['mode'])
mode = salt.utils.files.normalize_mode(mode)
if mode is not None and mode != smode:
changes['mode'] = mode
if lsattr_cmd:
diff_attrs = _cmp_attrs(name, attrs)
if (
attrs is not None and
diff_attrs[0] is not None or
diff_attrs[1] is not None
):
changes['attrs'] = attrs
return changes
def get_diff(file1,
file2,
saltenv='base',
show_filenames=True,
show_changes=True,
template=False,
source_hash_file1=None,
source_hash_file2=None):
files = (file1, file2)
source_hashes = (source_hash_file1, source_hash_file2)
paths = []
errors = []
for filename, source_hash in zip(files, source_hashes):
try:
cached_path = __salt__['cp.cache_file'](filename,
saltenv,
source_hash=source_hash)
if cached_path is False:
errors.append(
u'File {0} not found'.format(
salt.utils.stringutils.to_unicode(filename)
)
)
continue
paths.append(cached_path)
except MinionError as exc:
errors.append(salt.utils.stringutils.to_unicode(exc.__str__()))
continue
if errors:
raise CommandExecutionError(
'Failed to cache one or more files',
info=errors
)
args = []
for idx, filename in enumerate(files):
try:
with salt.utils.files.fopen(filename, 'r') as fp_:
args.append(fp_.readlines())
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Failed to read {0}: {1}'.format(
salt.utils.stringutils.to_str(filename),
exc.strerror
)
)
if args[0] != args[1]:
if template and __salt__['config.option']('obfuscate_templates'):
ret = u'<Obfuscated Template>'
elif not show_changes:
ret = u'<show_changes=False>'
else:
bdiff = _binary_replace(*files)
if bdiff:
ret = bdiff
else:
if show_filenames:
args.extend(
[salt.utils.stringutils.to_str(x) for x in files]
)
ret = salt.utils.locales.sdecode(
''.join(difflib.unified_diff(*args))
)
return ret
return u''
def manage_file(name,
sfn,
ret,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
backup,
makedirs=False,
template=None,
show_changes=True,
contents=None,
dir_mode=None,
follow_symlinks=True,
skip_verify=False,
keep_mode=False,
encoding=None,
encoding_errors='strict',
**kwargs):
name = os.path.expanduser(name)
if not ret:
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if source_sum and ('hsum' in source_sum):
source_sum['hsum'] = source_sum['hsum'].lower()
if source and not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
htype = source_sum.get('hash_type', __opts__['hash_type'])
source_sum = {
'hash_type': htype,
'hsum': get_hash(sfn, form=htype)
}
if keep_mode:
if _urlparse(source).scheme in ('salt', 'file') \
or source.startswith('/'):
try:
mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True)
except Exception as exc:
log.warning('Unable to stat %s: %s', sfn, exc)
if os.path.isfile(name) or os.path.islink(name):
if os.path.islink(name) and follow_symlinks:
real_name = os.path.realpath(name)
else:
real_name = name
if source and not (not follow_symlinks and os.path.islink(real_name)):
name_sum = get_hash(real_name, source_sum.get('hash_type', __opts__['hash_type']))
else:
name_sum = None
if source and (name_sum is None or source_sum.get('hsum', __opts__['hash_type']) != name_sum):
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
if not skip_verify \
and _urlparse(source).scheme not in ('salt', ''):
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3}). If the \'source_hash\' value '
'refers to a remote file with multiple possible '
'matches, then it may be necessary to set '
'\'source_hash_name\'.'.format(
source_sum['hash_type'],
source,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
if __salt__['config.option']('obfuscate_templates'):
ret['changes']['diff'] = '<Obfuscated Template>'
elif not show_changes:
ret['changes']['diff'] = '<show_changes=False>'
else:
try:
ret['changes']['diff'] = get_diff(
real_name, sfn, show_filenames=False)
except CommandExecutionError as exc:
ret['changes']['diff'] = exc.strerror
try:
salt.utils.files.copyfile(sfn,
real_name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(sfn)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
if contents is not None:
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
with salt.utils.files.fopen(tmp, 'w') as tmp_:
if encoding:
log.debug('File will be encoded with {0}'.format(encoding))
tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors))
else:
tmp_.write(salt.utils.stringutils.to_str(contents))
try:
differences = get_diff(
real_name, tmp, show_filenames=False,
show_changes=show_changes, template=True)
except CommandExecutionError as exc:
ret.setdefault('warnings', []).append(
'Failed to detect changes to file: {0}'.format(exc.strerror)
)
differences = ''
if differences:
ret['changes']['diff'] = differences
try:
salt.utils.files.copyfile(tmp,
real_name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(tmp)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
__clean_tmp(tmp)
if os.path.islink(name) and not follow_symlinks:
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
if not skip_verify and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3})'.format(
source_sum['hash_type'],
name,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
try:
salt.utils.files.copyfile(sfn,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(sfn)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
ret['changes']['diff'] = \
'Replace symbolic link with regular file'
if salt.utils.platform.is_windows():
ret = check_perms(name,
ret,
kwargs.get('win_owner'),
kwargs.get('win_perms'),
kwargs.get('win_deny_perms'),
None,
kwargs.get('win_inheritance'))
else:
ret, _ = check_perms(name, ret, user, group, mode, attrs, follow_symlinks)
if ret['changes']:
ret['comment'] = u'File {0} updated'.format(
salt.utils.locales.sdecode(name)
)
elif not ret['changes'] and ret['result']:
ret['comment'] = u'File {0} is in the correct state'.format(
salt.utils.locales.sdecode(name)
)
if sfn:
__clean_tmp(sfn)
return ret
else:
contain_dir = os.path.dirname(name)
def _set_mode_and_make_dirs(name, dir_mode, mode, user, group):
if salt.utils.platform.is_windows():
drive, _ = os.path.splitdrive(name)
if drive and not os.path.exists(drive):
__clean_tmp(sfn)
return _error(ret,
'{0} drive not present'.format(drive))
if dir_mode is None and mode is not None:
# listed via a shell.
mode_list = [x for x in str(mode)][-3:]
for idx in range(len(mode_list)):
if mode_list[idx] != '0':
mode_list[idx] = str(int(mode_list[idx]) | 1)
dir_mode = ''.join(mode_list)
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1121
makedirs_(name,
kwargs.get('win_owner'),
kwargs.get('win_perms'),
kwargs.get('win_deny_perms'),
kwargs.get('win_inheritance'))
# pylint: enable=E1121
else:
makedirs_(name, user=user, group=group, mode=dir_mode)
if source:
# It is a new file, set the diff accordingly
ret['changes']['diff'] = 'New file'
# Apply the new file
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
# If the downloaded file came from a non salt server source verify
# that it matches the intended sum value
if not skip_verify \
and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3})'.format(
source_sum['hash_type'],
name,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
# No changes actually made
ret['changes'].pop('diff', None)
return _error(ret, 'Parent directory not present')
else: # source != True
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
# No changes actually made
ret['changes'].pop('diff', None)
return _error(ret, 'Parent directory not present')
# Create the file, user rw-only if mode will be set to prevent
# a small security race problem before the permissions are set
if mode:
current_umask = os.umask(0o77)
# Create a new file when test is False and source is None
if contents is None:
if not __opts__['test']:
if touch(name):
ret['changes']['new'] = 'file {0} created'.format(name)
ret['comment'] = 'Empty file'
else:
return _error(
ret, 'Empty file {0} not created'.format(name)
)
else:
if not __opts__['test']:
if touch(name):
ret['changes']['diff'] = 'New file'
else:
return _error(
ret, 'File {0} not created'.format(name)
)
if mode:
os.umask(current_umask)
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
with salt.utils.files.fopen(tmp, 'w') as tmp_:
if encoding:
log.debug('File will be encoded with {0}'.format(encoding))
tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors))
else:
tmp_.write(salt.utils.stringutils.to_str(contents))
# Copy into place
salt.utils.files.copyfile(tmp,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
__clean_tmp(tmp)
# Now copy the file contents if there is a source file
elif sfn:
salt.utils.files.copyfile(sfn,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
__clean_tmp(sfn)
# This is a new file, if no mode specified, use the umask to figure
# out what mode to use for the new file.
if mode is None and not salt.utils.platform.is_windows():
# Get current umask
mask = os.umask(0)
os.umask(mask)
# Calculate the mode value that results from the umask
mode = oct((0o777 ^ mask) & 0o666)
if salt.utils.platform.is_windows():
ret = check_perms(name,
ret,
kwargs.get('win_owner'),
kwargs.get('win_perms'),
kwargs.get('win_deny_perms'),
None,
kwargs.get('win_inheritance'))
else:
ret, _ = check_perms(name, ret, user, group, mode, attrs)
if not ret['comment']:
ret['comment'] = 'File ' + name + ' updated'
if __opts__['test']:
ret['comment'] = 'File ' + name + ' not updated'
elif not ret['changes'] and ret['result']:
ret['comment'] = 'File ' + name + ' is in the correct state'
if sfn:
__clean_tmp(sfn)
return ret
def mkdir(dir_path,
user=None,
group=None,
mode=None):
dir_path = os.path.expanduser(dir_path)
directory = os.path.normpath(dir_path)
if not os.path.isdir(directory):
# If a caller such as managed() is invoked with makedirs=True, make
# sure that any created dirs are created with the same user and group
# to follow the principal of least surprise method.
makedirs_perms(directory, user, group, mode)
return True
def makedirs_(path,
user=None,
group=None,
mode=None):
path = os.path.expanduser(path)
if mode:
mode = salt.utils.files.normalize_mode(mode)
# walk up the directory structure until we find the first existing
# directory
dirname = os.path.normpath(os.path.dirname(path))
if os.path.isdir(dirname):
# There's nothing for us to do
msg = 'Directory \'{0}\' already exists'.format(dirname)
log.debug(msg)
return msg
if os.path.exists(dirname):
msg = 'The path \'{0}\' already exists and is not a directory'.format(
dirname
)
log.debug(msg)
return msg
directories_to_create = []
while True:
if os.path.isdir(dirname):
break
directories_to_create.append(dirname)
current_dirname = dirname
dirname = os.path.dirname(dirname)
if current_dirname == dirname:
raise SaltInvocationError(
'Recursive creation for path \'{0}\' would result in an '
'infinite loop. Please use an absolute path.'.format(dirname)
)
directories_to_create.reverse()
for directory_to_create in directories_to_create:
log.debug('Creating directory: %s', directory_to_create)
mkdir(directory_to_create, user=user, group=group, mode=mode)
def makedirs_perms(name,
user=None,
group=None,
mode='0755'):
name = os.path.expanduser(name)
path = os.path
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs_perms(head, user, group, mode)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
if tail == os.curdir:
return
os.mkdir(name)
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
def get_devmm(name):
name = os.path.expanduser(name)
if is_chrdev(name) or is_blkdev(name):
stat_structure = os.stat(name)
return (
os.major(stat_structure.st_rdev),
os.minor(stat_structure.st_rdev))
else:
return (0, 0)
def is_chrdev(name):
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
return False
else:
raise
return stat.S_ISCHR(stat_structure.st_mode)
def mknod_chrdev(name,
major,
minor,
user=None,
group=None,
mode='0660'):
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
log.debug('Creating character device name:{0} major:{1} minor:{2} mode:{3}'
.format(name, major, minor, mode))
try:
if __opts__['test']:
ret['changes'] = {'new': 'Character device {0} created.'.format(name)}
ret['result'] = None
else:
if os.mknod(name,
int(str(mode).lstrip('0Oo'), 8) | stat.S_IFCHR,
os.makedev(major, minor)) is None:
ret['changes'] = {'new': 'Character device {0} created.'.format(name)}
ret['result'] = True
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
else:
ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name)
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
return ret
def is_blkdev(name):
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
return False
else:
raise
return stat.S_ISBLK(stat_structure.st_mode)
def mknod_blkdev(name,
major,
minor,
user=None,
group=None,
mode='0660'):
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
log.debug('Creating block device name:{0} major:{1} minor:{2} mode:{3}'
.format(name, major, minor, mode))
try:
if __opts__['test']:
ret['changes'] = {'new': 'Block device {0} created.'.format(name)}
ret['result'] = None
else:
if os.mknod(name,
int(str(mode).lstrip('0Oo'), 8) | stat.S_IFBLK,
os.makedev(major, minor)) is None:
ret['changes'] = {'new': 'Block device {0} created.'.format(name)}
ret['result'] = True
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
else:
ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name)
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
return ret
def is_fifo(name):
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
return False
else:
raise
return stat.S_ISFIFO(stat_structure.st_mode)
def mknod_fifo(name,
user=None,
group=None,
mode='0660'):
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
log.debug('Creating FIFO name: {0}'.format(name))
try:
if __opts__['test']:
ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)}
ret['result'] = None
else:
if os.mkfifo(name, int(str(mode).lstrip('0Oo'), 8)) is None:
ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)}
ret['result'] = True
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
else:
ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name)
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
return ret
def mknod(name,
ntype,
major=0,
minor=0,
user=None,
group=None,
mode='0600'):
ret = False
makedirs_(name, user, group)
if ntype == 'c':
ret = mknod_chrdev(name, major, minor, user, group, mode)
elif ntype == 'b':
ret = mknod_blkdev(name, major, minor, user, group, mode)
elif ntype == 'p':
ret = mknod_fifo(name, user, group, mode)
else:
raise SaltInvocationError(
'Node type unavailable: \'{0}\'. Available node types are '
'character (\'c\'), block (\'b\'), and pipe (\'p\').'.format(ntype)
)
return ret
def list_backups(path, limit=None):
path = os.path.expanduser(path)
try:
limit = int(limit)
except TypeError:
pass
except ValueError:
log.error('file.list_backups: \'limit\' value must be numeric')
limit = None
bkroot = _get_bkroot()
parent_dir, basename = os.path.split(path)
if salt.utils.platform.is_windows():
src_dir = parent_dir.replace(':', '_')
else:
src_dir = parent_dir[1:]
bkdir = os.path.join(bkroot, src_dir)
if not os.path.isdir(bkdir):
return {}
files = {}
for fname in [x for x in os.listdir(bkdir)
if os.path.isfile(os.path.join(bkdir, x))]:
if salt.utils.platform.is_windows():
strpfmt = '{0}_%a_%b_%d_%H-%M-%S_%f_%Y'.format(basename)
else:
strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename)
try:
timestamp = datetime.datetime.strptime(fname, strpfmt)
except ValueError:
continue
if salt.utils.platform.is_windows():
str_format = '%a %b %d %Y %H-%M-%S.%f'
else:
str_format = '%a %b %d %Y %H:%M:%S.%f'
files.setdefault(timestamp, {})['Backup Time'] = \
timestamp.strftime(str_format)
location = os.path.join(bkdir, fname)
files[timestamp]['Size'] = os.stat(location).st_size
files[timestamp]['Location'] = location
return dict(list(zip(
list(range(len(files))),
[files[x] for x in sorted(files, reverse=True)[:limit]]
)))
list_backup = salt.utils.functools.alias_function(list_backups, 'list_backup')
def list_backups_dir(path, limit=None):
path = os.path.expanduser(path)
try:
limit = int(limit)
except TypeError:
pass
except ValueError:
log.error('file.list_backups_dir: \'limit\' value must be numeric')
limit = None
bkroot = _get_bkroot()
parent_dir, basename = os.path.split(path)
bkdir = os.path.join(bkroot, parent_dir[1:])
if not os.path.isdir(bkdir):
return {}
files = {}
f = dict([(i, len(list(n))) for i, n in itertools.groupby([x.split("_")[0] for x in sorted(os.listdir(bkdir))])])
ff = os.listdir(bkdir)
for i, n in six.iteritems(f):
ssfile = {}
for x in sorted(ff):
basename = x.split('_')[0]
if i == basename:
strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename)
try:
timestamp = datetime.datetime.strptime(x, strpfmt)
except ValueError:
continue
ssfile.setdefault(timestamp, {})['Backup Time'] = \
timestamp.strftime('%a %b %d %Y %H:%M:%S.%f')
location = os.path.join(bkdir, x)
ssfile[timestamp]['Size'] = os.stat(location).st_size
ssfile[timestamp]['Location'] = location
sfiles = dict(list(zip(list(range(n)), [ssfile[x] for x in sorted(ssfile, reverse=True)[:limit]])))
sefiles = {i: sfiles}
files.update(sefiles)
return files
def restore_backup(path, backup_id):
path = os.path.expanduser(path)
ret = {'result': False,
'comment': 'Invalid backup_id \'{0}\''.format(backup_id)}
try:
if len(str(backup_id)) == len(str(int(backup_id))):
backup = list_backups(path)[int(backup_id)]
else:
return ret
except ValueError:
return ret
except KeyError:
ret['comment'] = 'backup_id \'{0}\' does not exist for ' \
'{1}'.format(backup_id, path)
return ret
salt.utils.files.backup_minion(path, _get_bkroot())
try:
shutil.copyfile(backup['Location'], path)
except IOError as exc:
ret['comment'] = \
'Unable to restore {0} to {1}: ' \
'{2}'.format(backup['Location'], path, exc)
return ret
else:
ret['result'] = True
ret['comment'] = 'Successfully restored {0} to ' \
'{1}'.format(backup['Location'], path)
if not salt.utils.platform.is_windows():
try:
fstat = os.stat(path)
except (OSError, IOError):
ret['comment'] += ', but was unable to set ownership'
else:
os.chown(path, fstat.st_uid, fstat.st_gid)
return ret
def delete_backup(path, backup_id):
path = os.path.expanduser(path)
ret = {'result': False,
'comment': 'Invalid backup_id \'{0}\''.format(backup_id)}
try:
if len(str(backup_id)) == len(str(int(backup_id))):
backup = list_backups(path)[int(backup_id)]
else:
return ret
except ValueError:
return ret
except KeyError:
ret['comment'] = 'backup_id \'{0}\' does not exist for ' \
'{1}'.format(backup_id, path)
return ret
try:
os.remove(backup['Location'])
except IOError as exc:
ret['comment'] = 'Unable to remove {0}: {1}'.format(backup['Location'],
exc)
else:
ret['result'] = True
ret['comment'] = 'Successfully removed {0}'.format(backup['Location'])
return ret
remove_backup = salt.utils.functools.alias_function(delete_backup, 'remove_backup')
def grep(path,
pattern,
*opts):
path = os.path.expanduser(path)
split_opts = []
for opt in opts:
try:
split = salt.utils.args.shlex_split(opt)
except AttributeError:
split = salt.utils.args.shlex_split(str(opt))
if len(split) > 1:
raise SaltInvocationError(
'Passing multiple command line arguments in a single string '
'is not supported, please pass the following arguments '
'separately: {0}'.format(opt)
)
split_opts.extend(split)
cmd = ['grep'] + split_opts + [pattern, path]
try:
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
except (IOError, OSError) as exc:
raise CommandExecutionError(exc.strerror)
return ret
def open_files(by_pid=False):
pids = {}
procfs = os.listdir('/proc/')
for pfile in procfs:
try:
pids[int(pfile)] = []
except ValueError:
pass
files = {}
for pid in pids:
ppath = '/proc/{0}'.format(pid)
try:
tids = os.listdir('{0}/task'.format(ppath))
except OSError:
continue
fd_ = []
for fpath in os.listdir('{0}/fd'.format(ppath)):
fd_.append('{0}/fd/{1}'.format(ppath, fpath))
for tid in tids:
try:
fd_.append(
os.path.realpath('{0}/task/{1}/exe'.format(ppath, tid))
)
except OSError:
continue
for tpath in os.listdir('{0}/task/{1}/fd'.format(ppath, tid)):
fd_.append('{0}/task/{1}/fd/{2}'.format(ppath, tid, tpath))
fd_ = sorted(set(fd_))
for fdpath in fd_:
try:
name = os.path.realpath(fdpath)
os.stat(name)
except OSError:
continue
if name not in files:
files[name] = [pid]
else:
files[name].append(pid)
files[name] = sorted(set(files[name]))
pids[pid].append(name)
pids[pid] = sorted(set(pids[pid]))
if by_pid:
return pids
return files
def pardir():
return os.path.pardir
def normpath(path):
return os.path.normpath(path)
def basename(path):
return os.path.basename(path)
def dirname(path):
return os.path.dirname(path)
def join(*args):
return os.path.join(*args)
def move(src, dst):
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError('Source path must be absolute.')
if not os.path.isabs(dst):
raise SaltInvocationError('Destination path must be absolute.')
ret = {
'result': True,
'comment': "'{0}' moved to '{1}'".format(src, dst),
}
try:
shutil.move(src, dst)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move '{0}' to '{1}': {2}".format(src, dst, exc)
)
return ret
def diskusage(path):
total_size = 0
seen = set()
if os.path.isfile(path):
stat_structure = os.stat(path)
ret = stat_structure.st_size
return ret
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
try:
stat_structure = os.stat(fp)
except OSError:
continue
if stat_structure.st_ino in seen:
continue
seen.add(stat_structure.st_ino)
total_size += stat_structure.st_size
ret = total_size
return ret
| true
| true
|
f7145d3436320d6def2071605cdc3fc5a509c911
| 2,682
|
py
|
Python
|
catalog/bindings/gmd/cubic_spline_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/cubic_spline_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/cubic_spline_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field
from typing import List, Optional
from bindings.gmd.abstract_curve_segment_type import AbstractCurveSegmentType
from bindings.gmd.coordinates import Coordinates
from bindings.gmd.curve_interpolation_type import CurveInterpolationType
from bindings.gmd.point_property import PointProperty
from bindings.gmd.point_rep import PointRep
from bindings.gmd.pos import Pos
from bindings.gmd.pos_list import PosList
from bindings.gmd.vector_type import VectorType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class CubicSplineType(AbstractCurveSegmentType):
pos: List[Pos] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"min_occurs": 2,
"sequential": True,
},
)
point_property: List[PointProperty] = field(
default_factory=list,
metadata={
"name": "pointProperty",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"min_occurs": 2,
"sequential": True,
},
)
point_rep: List[PointRep] = field(
default_factory=list,
metadata={
"name": "pointRep",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"min_occurs": 2,
"sequential": True,
},
)
pos_list: Optional[PosList] = field(
default=None,
metadata={
"name": "posList",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
coordinates: Optional[Coordinates] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
vector_at_start: Optional[VectorType] = field(
default=None,
metadata={
"name": "vectorAtStart",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"required": True,
},
)
vector_at_end: Optional[VectorType] = field(
default=None,
metadata={
"name": "vectorAtEnd",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"required": True,
},
)
interpolation: CurveInterpolationType = field(
init=False,
default=CurveInterpolationType.CUBIC_SPLINE,
metadata={
"type": "Attribute",
},
)
degree: int = field(
init=False,
default=3,
metadata={
"type": "Attribute",
},
)
| 28.83871
| 77
| 0.561894
|
from dataclasses import dataclass, field
from typing import List, Optional
from bindings.gmd.abstract_curve_segment_type import AbstractCurveSegmentType
from bindings.gmd.coordinates import Coordinates
from bindings.gmd.curve_interpolation_type import CurveInterpolationType
from bindings.gmd.point_property import PointProperty
from bindings.gmd.point_rep import PointRep
from bindings.gmd.pos import Pos
from bindings.gmd.pos_list import PosList
from bindings.gmd.vector_type import VectorType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class CubicSplineType(AbstractCurveSegmentType):
pos: List[Pos] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"min_occurs": 2,
"sequential": True,
},
)
point_property: List[PointProperty] = field(
default_factory=list,
metadata={
"name": "pointProperty",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"min_occurs": 2,
"sequential": True,
},
)
point_rep: List[PointRep] = field(
default_factory=list,
metadata={
"name": "pointRep",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"min_occurs": 2,
"sequential": True,
},
)
pos_list: Optional[PosList] = field(
default=None,
metadata={
"name": "posList",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
coordinates: Optional[Coordinates] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
vector_at_start: Optional[VectorType] = field(
default=None,
metadata={
"name": "vectorAtStart",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"required": True,
},
)
vector_at_end: Optional[VectorType] = field(
default=None,
metadata={
"name": "vectorAtEnd",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"required": True,
},
)
interpolation: CurveInterpolationType = field(
init=False,
default=CurveInterpolationType.CUBIC_SPLINE,
metadata={
"type": "Attribute",
},
)
degree: int = field(
init=False,
default=3,
metadata={
"type": "Attribute",
},
)
| true
| true
|
f7145dbe062462ea587231c7a6d56ded0ad5f8e1
| 323
|
py
|
Python
|
examples/02_Example_WaterwaySearch/TerminalColors.py
|
jaywilhelm/OpenUxAS
|
76b08d94c4c51ca51d9f79c9db03d7344e9d6552
|
[
"NASA-1.3"
] | 13
|
2019-09-19T01:07:23.000Z
|
2022-01-06T17:25:48.000Z
|
src/TerminalColors.py
|
JTEnglish/UAVHeading-CollisionAvoidance
|
97e732616b6243184d64455e143ffe798840273a
|
[
"MIT"
] | 3
|
2019-06-10T06:10:52.000Z
|
2020-07-21T16:10:41.000Z
|
src/TerminalColors.py
|
JTEnglish/UAVHeading-CollisionAvoidance
|
97e732616b6243184d64455e143ffe798840273a
|
[
"MIT"
] | 3
|
2020-02-12T06:13:36.000Z
|
2021-02-14T03:00:34.000Z
|
'''
Class: TerminalColors
Credit: https://stackoverflow.com/questions/287871/print-in-terminal-with-colors
'''
class TerminalColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
| 23.071429
| 81
| 0.609907
|
class TerminalColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
| true
| true
|
f7145e9a3b17f8a481672804c82a177d305100f7
| 3,194
|
py
|
Python
|
pictures/tests.py
|
David5627/My_Gallary
|
cfbdcb13586f3d132993f9ceb1aa84c2f0ca61b3
|
[
"MIT"
] | null | null | null |
pictures/tests.py
|
David5627/My_Gallary
|
cfbdcb13586f3d132993f9ceb1aa84c2f0ca61b3
|
[
"MIT"
] | null | null | null |
pictures/tests.py
|
David5627/My_Gallary
|
cfbdcb13586f3d132993f9ceb1aa84c2f0ca61b3
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
# Create your tests here.
from .models import Image, Category, Location
class TestImage(TestCase):
def setUp(self):
self.location = Location(locationName='Kiambu')
self.location.saveLocation()
self.category = Category(categoryName='job')
self.category.saveCategory()
self.testInstance = Image(id=1, imageName='IMG.jpg', imageDescription=' a test image', imageLocation=self.location,
imageCategory=self.category)
def test_instance(self):
self.assertTrue(isinstance(self.testInstance, Image))
def test_save_image(self):
self.testInstance.saveImage()
filterImage= Image.objects.all()
self.assertTrue(len(filterImage) > 0)
def test_delete_image(self):
self.testInstance.deleteImage()
images = Image.objects.all()
self.assertTrue(len(images) == 0)
def test_update_image(self):
self.testInstance.saveImage()
self.testInstance.updateImage(self.testInstance.id, 'images/img.jpg')
imgUpdt = Image.objects.filter(image='images/test.jpg')
self.assertTrue(len(imgUpdt) > 0)
def test_get_image_by_id(self):
imageF = self.testInstance.getimageById(self.testInstance.id)
image = Image.objects.filter(id=self.testInstance.id)
self.assertTrue(imageF, image)
def test_search_image_by_location(self):
self.testInstance.saveImage()
foundImages = self.testInstance.filterimageByLocation(imageLocation='Kiambu')
self.assertTrue(len(found_images) == 1)
def test_search_image_by_category(self):
category = 'food'
foundImages = self.testInstance.searchImage(category)
self.assertTrue(len(found_img) > 1)
def tearDown(self):
Image.objects.all().delete()
Location.objects.all().delete()
Category.objects.all().delete()
class TestLocation(TestCase):
def setUp(self):
self.location = Location(name='kiambu')
self.location.saveLocation()
def test_instance(self):
self.assertTrue(isinstance(self.location, Location))
def test_save_location(self):
self.location.saveLocation()
locations = Location.getLocations()
self.assertTrue(len(locations) > 0)
def test_get_locations(self):
self.location.saveLocation()
locations = Location.getLocations()
self.assertTrue(len(locations) > 1)
def test_delete_location(self):
self.location.deleteLocation()
location = Location.objects.all()
self.assertTrue(len(location) == 0)
class CategoryTestClass(TestCase):
def setUp(self):
self.category = Category(name='job')
self.category.saveCategory()
def test_instance(self):
self.assertTrue(isinstance(self.category, Category))
def test_save_category(self):
self.category.saveCategory()
categories = Category.objects.all()
self.assertTrue(len(categories) > 0)
def test_delete_category(self):
self.category.deleteCategory()
category = Category.objects.all()
self.assertTrue(len(category) == 0)
| 32.591837
| 123
| 0.66938
|
from django.test import TestCase
from .models import Image, Category, Location
class TestImage(TestCase):
def setUp(self):
self.location = Location(locationName='Kiambu')
self.location.saveLocation()
self.category = Category(categoryName='job')
self.category.saveCategory()
self.testInstance = Image(id=1, imageName='IMG.jpg', imageDescription=' a test image', imageLocation=self.location,
imageCategory=self.category)
def test_instance(self):
self.assertTrue(isinstance(self.testInstance, Image))
def test_save_image(self):
self.testInstance.saveImage()
filterImage= Image.objects.all()
self.assertTrue(len(filterImage) > 0)
def test_delete_image(self):
self.testInstance.deleteImage()
images = Image.objects.all()
self.assertTrue(len(images) == 0)
def test_update_image(self):
self.testInstance.saveImage()
self.testInstance.updateImage(self.testInstance.id, 'images/img.jpg')
imgUpdt = Image.objects.filter(image='images/test.jpg')
self.assertTrue(len(imgUpdt) > 0)
def test_get_image_by_id(self):
imageF = self.testInstance.getimageById(self.testInstance.id)
image = Image.objects.filter(id=self.testInstance.id)
self.assertTrue(imageF, image)
def test_search_image_by_location(self):
self.testInstance.saveImage()
foundImages = self.testInstance.filterimageByLocation(imageLocation='Kiambu')
self.assertTrue(len(found_images) == 1)
def test_search_image_by_category(self):
category = 'food'
foundImages = self.testInstance.searchImage(category)
self.assertTrue(len(found_img) > 1)
def tearDown(self):
Image.objects.all().delete()
Location.objects.all().delete()
Category.objects.all().delete()
class TestLocation(TestCase):
def setUp(self):
self.location = Location(name='kiambu')
self.location.saveLocation()
def test_instance(self):
self.assertTrue(isinstance(self.location, Location))
def test_save_location(self):
self.location.saveLocation()
locations = Location.getLocations()
self.assertTrue(len(locations) > 0)
def test_get_locations(self):
self.location.saveLocation()
locations = Location.getLocations()
self.assertTrue(len(locations) > 1)
def test_delete_location(self):
self.location.deleteLocation()
location = Location.objects.all()
self.assertTrue(len(location) == 0)
class CategoryTestClass(TestCase):
def setUp(self):
self.category = Category(name='job')
self.category.saveCategory()
def test_instance(self):
self.assertTrue(isinstance(self.category, Category))
def test_save_category(self):
self.category.saveCategory()
categories = Category.objects.all()
self.assertTrue(len(categories) > 0)
def test_delete_category(self):
self.category.deleteCategory()
category = Category.objects.all()
self.assertTrue(len(category) == 0)
| true
| true
|
f7145f89446bea1ed70f31be8e13fd069d3d268f
| 16,419
|
py
|
Python
|
venv/lib/python2.7/site-packages/sklearn/base.py
|
bopopescu/fbserver
|
e812dbc4dc0cbf2fda19473015a3d7e253718a19
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python2.7/site-packages/sklearn/base.py
|
bopopescu/fbserver
|
e812dbc4dc0cbf2fda19473015a3d7e253718a19
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python2.7/site-packages/sklearn/base.py
|
bopopescu/fbserver
|
e812dbc4dc0cbf2fda19473015a3d7e253718a19
|
[
"Apache-2.0"
] | 1
|
2020-07-23T19:26:19.000Z
|
2020-07-23T19:26:19.000Z
|
"""Base classes for all estimators."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import copy
import inspect
import warnings
import numpy as np
from scipy import sparse
from .externals import six
###############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
equality_test = new_object_params[name] == params_set[name]
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
args.sort()
return args
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if not name in valid_params:
raise ValueError('Invalid parameter %s for estimator %s' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if not key in valid_params:
raise ValueError('Invalid parameter %s ' 'for estimator %s'
% (key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn."""
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples,)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn."""
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0, lower values are worse.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples,)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
from .cluster.bicluster.utils import get_indices
return get_indices(self.rows_[i], self.columns_[i])
def get_shape(self, i):
"""Shape of the i'th bicluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
from .cluster.bicluster.utils import get_shape
return get_shape(self.rows_[i], self.columns_[i])
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .cluster.bicluster.utils import get_submatrix
return get_submatrix(self.rows_[i], self.columns_[i], data)
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
# XXX: Temporary solution to figure out if an estimator is a classifier
def _get_sub_estimator(estimator):
"""Returns the final estimator if there is any."""
if hasattr(estimator, 'estimator'):
# GridSearchCV and other CV-tuned estimators
return _get_sub_estimator(estimator.estimator)
if hasattr(estimator, 'steps'):
# Pipeline
return _get_sub_estimator(estimator.steps[-1][1])
return estimator
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
estimator = _get_sub_estimator(estimator)
return isinstance(estimator, ClassifierMixin)
| 36.006579
| 79
| 0.554053
|
import copy
import inspect
import warnings
import numpy as np
from scipy import sparse
from .externals import six
items=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
@classmethod
def _get_param_names(cls):
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
args.pop(0)
args.sort()
return args
def get_params(self, deep=True):
out = dict()
for key in self._get_param_names():
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if not name in valid_params:
raise ValueError('Invalid parameter %s for estimator %s' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if not key in valid_params:
raise ValueError('Invalid parameter %s ' 'for estimator %s'
% (key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
###############################################################################
class ClassifierMixin(object):
def score(self, X, y, sample_weight=None):
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
def score(self, X, y, sample_weight=None):
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class ClusterMixin(object):
def fit_predict(self, X, y=None):
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
@property
def biclusters_(self):
return self.rows_, self.columns_
def get_indices(self, i):
from .cluster.bicluster.utils import get_indices
return get_indices(self.rows_[i], self.columns_[i])
def get_shape(self, i):
from .cluster.bicluster.utils import get_shape
return get_shape(self.rows_[i], self.columns_[i])
def get_submatrix(self, i, data):
from .cluster.bicluster.utils import get_submatrix
return get_submatrix(self.rows_[i], self.columns_[i], data)
###############################################################################
class TransformerMixin(object):
def fit_transform(self, X, y=None, **fit_params):
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
# this is just a tag for the moment
###############################################################################
# XXX: Temporary solution to figure out if an estimator is a classifier
def _get_sub_estimator(estimator):
if hasattr(estimator, 'estimator'):
# GridSearchCV and other CV-tuned estimators
return _get_sub_estimator(estimator.estimator)
if hasattr(estimator, 'steps'):
# Pipeline
return _get_sub_estimator(estimator.steps[-1][1])
return estimator
def is_classifier(estimator):
estimator = _get_sub_estimator(estimator)
return isinstance(estimator, ClassifierMixin)
| true
| true
|
f7145fcd7c7022a6a463fb955a8d7559d8d3a21d
| 64,450
|
py
|
Python
|
amd64-linux/lib/python/update_config.py
|
qiyancos/Simics-3.0.31
|
9bd52d5abad023ee87a37306382a338abf7885f1
|
[
"BSD-4-Clause",
"FSFAP"
] | 1
|
2020-06-15T10:41:18.000Z
|
2020-06-15T10:41:18.000Z
|
amd64-linux/lib/python/update_config.py
|
qiyancos/Simics-3.0.31
|
9bd52d5abad023ee87a37306382a338abf7885f1
|
[
"BSD-4-Clause",
"FSFAP"
] | null | null | null |
amd64-linux/lib/python/update_config.py
|
qiyancos/Simics-3.0.31
|
9bd52d5abad023ee87a37306382a338abf7885f1
|
[
"BSD-4-Clause",
"FSFAP"
] | 3
|
2020-08-10T10:25:02.000Z
|
2021-09-12T01:12:09.000Z
|
from sim_core import *
import conf
import sim
# dictionary with list of update functions to call
update_functions = {}
def install_configuration_update(version, f):
prev = update_functions.get(version, [])
update_functions[version] = prev + [f]
def update_configuration(set):
global first_queue
try:
if set['sim'].version > conf.sim.version:
print ('Loading a configuration created in a newer Simics version '
'(build %d) is not supported, and may not work. Current '
'Simics build is %d.' % (set['sim'].version,
conf.sim.version))
return
except:
if SIM_get_verbose():
print 'No version information in checkpoint - not updating.'
return
for x in set.values():
try:
first_queue = x.queue
break
except:
pass
vers = sorted(update_functions.keys())
for ver in vers:
if ver < set['sim'].version:
continue
# allow callback on same version
if ver > conf.sim.version:
print ("Warning: update_config callback for future version "
"found: %s" % ver)
continue
if SIM_get_verbose():
print 'Updating from version %d' % ver
for f in update_functions[ver]:
try:
f(set)
except Exception, msg:
print 'Update function for version %d failed: %s' % (ver, msg)
#######################
def all_objects(set, classname):
return [x for x in set.values() if x.classname == classname]
def for_all_objects(set, classname, function):
for obj in all_objects(set, classname):
function(set, obj)
def all_objects_with_attr(set, attrname):
return [x for x in set.values() if hasattr(x, attrname)]
def remove_attr(obj, name):
try:
delattr(obj, name)
except AttributeError:
pass
def rename_attr(obj, new_attr, old_attr):
try:
setattr(obj, new_attr, getattr(obj, old_attr))
except AttributeError:
pass
remove_attr(obj, old_attr)
def remove_class_attr(set, classname, name):
for obj in all_objects(set, classname):
remove_attr(obj, name)
def remove_class(set, classname):
for l in [x.name for x in set.values() if x.classname == classname]:
del set[l]
x86_classes = ["x86-386", "x86-386-387", "x86-486dx2", "x86-486sx",
"x86-pentium", "x86-pentium-mmx", "x86-ppro",
"x86-p2", "x86-p3", "x86-p4", "x86-p4e",
"x86-hammer", "x86-k7"]
mips_classes = ["mips-4kc", "mips-5kc", "mips-rm7000-be",
"mips-e9000-be"]
ppc_classes = ['ppc403gcx', 'ppc405gp', 'ppc440gp', 'ppc440gx', 'ppc603e',
'ppc7400', 'ppc7447', 'ppc7450', 'ppc7450-36', 'ppc7457',
'ppc750', 'ppc750fx', 'ppc750gx', 'ppc755', 'ppc970fx',
'ppce500', 'ppce600', 'ppc-power6']
def remove_class(set, classname):
for obj in all_objects(set, classname):
del set[obj.name]
#######################
def update_1396_to_1397(set):
for obj in (all_objects(set, 'mpc8641-rapidio') +
all_objects(set, 'mpc8548-rapidio')):
# misspelt register name
remove_attr(obj, "regs_LTRETCR")
def update_1390_to_1391(set):
for obj in all_objects(set, 'es_asic'):
remove_attr(obj, "bar3_CT")
remove_attr(obj, "page_buffer_crc")
def update_1378_to_1379(set):
for obj in all_objects(set, 'mpc8641-pic'):
# create task priority registers for 30 more cores:
# pad CTPR with zeros until a length of 32
obj.P_CTPR = obj.P_CTPR + [0]*(32 - len(obj.P_CTPR))
def update_1377_to_1378(set):
for obj in all_objects(set, 'mpc8641-pcie'):
remove_attr(obj, "pci_config_device_id")
for obj in (all_objects(set, "MV64360") +
all_objects(set, "MV64460") +
all_objects(set, "MV64470") +
all_objects(set, "MV64470-EC")):
remove_attr(obj, "regs_port_GoodOctetsReceived")
remove_attr(obj, "regs_port_GoodOctetsSent")
for obj in (all_objects(set, "es_asic")):
remove_attr(obj, "bar3_REVTO")
def add_pex8111_irq_level(set, obj):
obj.irq_level = 4
def update_8x4x_rapidio_1371(set, obj):
for reg in "OMR OSR ODQDPAR OSAR ODPR ODATR ODCR ODQEPAR".split():
remove_attr(obj, "regs_"+reg)
for reg in "IMR ISR IFQDPAR IDQEPAR IFQEPAR".split():
remove_attr(obj, "regs_"+reg)
def update_1370_to_1371(set):
for obj in (all_objects(set, 'mpc8641-rapidio') +
all_objects(set, 'mpc8548-rapidio')):
update_8x4x_rapidio_1371(set, obj)
def update_1367_to_1368(set):
for_all_objects(set, 'pex8111', add_pex8111_irq_level)
def update_1366_to_1367(set):
# new partial registers
for o in (all_objects(set, "MV64360") +
all_objects(set, "MV64460") +
all_objects(set, "MV64470") +
all_objects(set, "MV64470-EC")):
rename_attr(o, 'partial_regs_IDMA_Interrupt_Cause', 'regs_IDMA_Interrupt_Cause')
def update_8x4x_rapidio_1366(set, obj):
if not hasattr(obj, "inbound_space"):
# create local link if network in other simics
space = pre_conf_object(obj.name + "_inbound_space", 'memory-space')
set[obj.name + '_inbound_space'] = space
setattr(obj, "inbound_space", space)
if obj.classname in ('mpc8641-rapidio', 'mpc8548-rapidio'):
for reg in "EODQEPAR EOSAR EODQDPAR EIFQEPAR EIFQDPAR".split():
if hasattr(obj, "regs_"+reg):
setattr(obj, "regs_M_"+reg, [ getattr(obj, "regs_"+reg), 0 ])
delattr(obj, "regs_"+reg)
def update_1365_to_1366(set):
for obj in all_objects(set, 'mpc8641-duart'):
obj.__class_name__ = 'NS16550'
for obj in (all_objects(set, 'mpc8641-rapidio') +
all_objects(set, 'mpc8540-rapidio') +
all_objects(set, 'mpc8548-rapidio')):
update_8x4x_rapidio_1366(set, obj)
for obj in (all_objects(set, 'mpc8641-i2c') +
all_objects(set, 'mpc8540-i2c') +
all_objects(set, 'mpc8548-i2c')):
delattr(obj, 'i2c_device_state')
# new partial registers
for o in (all_objects(set, "MV64360") +
all_objects(set, "MV64460") +
all_objects(set, "MV64470") +
all_objects(set, "MV64470-EC")):
rename_attr(o, 'partial_regs_IDMA_Interrupt_Mask', 'regs_IDMA_Interrupt_Mask')
def update_1364_to_1365(set):
for obj in all_objects(set, 'mpc8641-gu'):
# remove all registers, layout have changed.
# registers are characterized by all capital letters
for attr in dir(obj):
if attr.isupper():
delattr(obj, attr)
def update_1363_to_1364(set):
max_cpu_num = -1
for c in all_objects_with_attr(set, 'processor_number'):
if c.processor_number > max_cpu_num:
max_cpu_num = c.processor_number
next_cpu_num = max_cpu_num + 1
taken_nums = {}
for c in all_objects_with_attr(set, 'processor_number'):
if taken_nums.has_key(c.processor_number):
c.processor_number = next_cpu_num
next_cpu_num += 1
else:
taken_nums[c.processor_number] = True
def rename_mv_pci_access_control_attr(obj):
for i in range(6):
remove_attr(obj, 'regs_pci_bus_PCI_Access_Control_Base_%d_L' % i)
remove_attr(obj, 'regs_pci_bus_PCI_Access_Control_Base_%d_H' % i)
remove_attr(obj, 'regs_pci_bus_PCI_Access_Control_Size_%d' % i)
def update_1361_to_1362(set):
for o in (all_objects(set, "MV64360") +
all_objects(set, "MV64460") +
all_objects(set, "MV64470") +
all_objects(set, "MV64470-EC")):
rename_mv_pci_access_control_attr(o)
def remap_pq2(set, mem_map):
# Dictionary where class name + mapping function is key, and the old
# offset is the value
remap = {'clocks' + '0' : 0x10c80,
'brg' + '0' : 0x119f0,
'cpm-mux' + '0' : 0x11b00,
'cpm-timers' + '0' : 0x10d80,
'cpm' + '0' : 0x119c0,
'fcc' + '0' : 0x11300,
'i2c_dev' + '1' : 0x08afc,
'ic' + '0' : 0x10c00,
'io-port' + '0' : 0x10d00,
'mc' + '0' : 0x10100,
'mcc' + '0' : 0x11b30,
'pci' + '0' : 0x10430,
'pci' + '1' : 0x101ac,
'scc' + '0' : 0x11a00,
'sdma' + '0' : 0x11018,
'si' + '0' : 0x11b20,
'sit' + '0' : 0x10220,
'siu' + '0' : 0x10000,
'smc' + '0' : 0x11a82,
'spi' + '0' : 0x11aa0}
# Remap all PQ2 objects at offset 0
for e in mem_map.map:
obj = e[1]
fun = e[2]
ofs = e[3]
if not obj.classname[:8] in ['mpc8260-', 'mpc8270-', 'mpc8280-']:
continue
key = obj.classname[8:] + str(fun)
if remap.has_key(key) and ofs == remap[key]:
e[3] = 0
def update_1358_to_1359(set):
for_all_objects(set, 'memory-space', remap_pq2)
def update_1357_to_1358(set):
for o in (all_objects(set, "MV64360") +
all_objects(set, "MV64460") +
all_objects(set, "MV64470") +
all_objects(set, "MV64470-EC")):
remove_attr(o, "regs_port_MAC_MIB_Counters")
def update_1354_to_1355(set):
scc_reg_subst = {
"armv5te": {
0: "main_id",
1: "cache_type",
2: "control",
3: "translation_table_base",
4: "domain_access_control",
6: "fault_status",
7: "fault_address",
},
"arm966e-s": {
0: "main_id",
1: "tcm_size",
2: "control",
10: "trace_process_identifier",
16: "configuration_control",
17: "bist_control",
18: "instruction_bist_address",
19: "instruction_bist_general",
22: "data_bist_address",
23: "data_bist_general",
}
}
for cl in ["armv5te", "arm966e-s"]:
for o in all_objects(set, cl):
scc_regs = getattr(o, "scc_regs")
for (i, name) in scc_reg_subst[cl].iteritems():
setattr(o, name, scc_regs[i])
remove_attr(o, "scc_regs")
def update_1350_to_1351(set):
for pq2_class in ("ep8260", "sbc8260", "cpp8260",
"gda8540", "mpc8540ads"):
for obj in all_objects(set, pq2_class):
remove_attr(obj, "mac_address0")
remove_attr(obj, "mac_address1")
def update_1348_to_1349(set):
for obj in all_objects(set, "sx_asic"):
rename_attr(obj, 'startup_SRCRST', 'startup_GPIOOUT')
rename_attr(obj, 'startup_SRCLSRI', 'startup_GPIOIN')
rename_attr(obj, 'startup_SRCRSTSTAT', 'startup_GPIOCR')
rename_attr(obj, 'startup_SRCRSTRSN', 'startup_GPIOINT')
def update_1340_to_1341(set):
# hypersim-patttern-matcher fixes:
# 1. Add a CPUs attribute
# 2. Remove sample event from step queue, it will be reposted in time q.
for o in all_objects(set, "hypersim-pattern-matcher"):
o.cpus = [o.queue]
for obj in set.values():
try:
SIM_get_class(obj.classname)
except SimExc_General, msg:
continue
if 'processor' not in sim.classes[obj.classname].interfaces:
continue
sq = obj.step_queue
nsq = []
for e in sq:
if e[1] != "do pattern match":
nsq.append(e)
obj.step_queue = nsq
def update_1334_to_1335(set):
for cl in x86_classes:
for o in all_objects(set, cl):
if "debugctlmsr" in dir(o):
rename_attr(o, "ia32_debugctl", "debugctlmsr")
def update_1332_to_1333(set):
msr_translate = [["msr_pat" , "ia32_cr_pat"],
["msr_syscfg" , "syscfg"],
["msr_top_mem" , "top_mem"],
["msr_top_mem2" , "top_mem2"],
["msr_iorr_base0" , "iorrbase0"],
["msr_iorr_base1" , "iorrbase1"],
["msr_iorr_mask0" , "iorrmask0"],
["msr_iorr_mask1" , "iorrmask1"],
["msr_hwcr" , "hwcr"],
["msr_manid" , "manid"],
["msr_nb_cfg" , "nb_cfg"],
["msr_fidvid_ctl" , "fidvid_ctl"],
["msr_fidvid_status" , "fidvid_status"],
["msr_iotrap_addr0" , "iotrap_addr0"],
["msr_iotrap_addr1" , "iotrap_addr1"],
["msr_iotrap_addr2" , "iotrap_addr2"],
["msr_iotrap_addr3" , "iotrap_addr3"],
["msr_iotrap_ctl" , "iotrap_ctl"],
["msr_smm_base" , "smm_base"],
["msr_smm_addr" , "smm_addr"],
["msr_smm_mask" , "smm_mask"],
["mcg_status" , "ia32_mcg_status"],
["mcg_ctl" , "ia32_mcg_ctl"],
["sysenter_cs" , "ia32_sysenter_cs"],
["sysenter_eip" , "ia32_sysenter_eip"],
["sysenter_esp" , "ia32_sysenter_esp"],
["p5_mc_addr" , "ia32_p5_mc_addr"],
["p5_mc_type" , "ia32_p5_mc_type"]]
# remove wrong MSR from x86 processors
for cl in ["x86-hammer", "x86-k7"]:
for o in all_objects(set, cl):
if "p5_mc_addr" in dir(o):
remove_attr(o, "p5_mc_addr")
if "p5_mc_type" in dir(o):
remove_attr(o, "p5_mc_type")
# translate old MSRs into new
for cl in x86_classes:
for o in all_objects(set, cl):
if "started" in dir(o):
remove_attr(o, "started")
for p in msr_translate:
old,new = p
if old in dir(o):
rename_attr(o, new, old)
# build correct threads attribute for hyperthreaded cpus
shared_state_sets = {}
for cl in x86_classes:
for o in all_objects(set, cl):
if "shared_state" in dir(o):
if o.shared_state:
try:
shared_state_sets[o.shared_state].append(o)
except:
shared_state_sets[o.shared_state] = [o.shared_state, o]
remove_attr(o, "shared_state")
for k in shared_state_sets.keys():
for o in shared_state_sets[k]:
o.threads = shared_state_sets[k]
# update apic_p4 to apic with P4 state
for o in all_objects(set, "apic"):
if "lvt_thermal_sensor" in dir(o):
# this is an apic_p4, convert it but let broadcast address untouched
o.apic_type = "P4"
o.version = 0x14
else:
o.apic_type = "P6"
o.version = 0x18
def update_1329_to_1330(set):
for cfg in all_objects(set, "ppc403gcx-cfg"):
if not "ic" in dir(cfg):
for m in cfg.cpu.dcr_space.map:
if m[1].classname == "ppc403gcx-ic":
cfg.ic = m[1]
break
def update_pq2_attrs_1329(set):
# The cpm module now posts event, add queue attribute if none defined
# Take the queue from associated mcc1 module
for o in (all_objects(set, "mpc8260-cpm") +
all_objects(set, "mpc8270-cpm") +
all_objects(set, "mpc8280-cpm")):
if not "queue" in dir(o):
o.queue = o.mcc1.queue
# Remove txbd_monitor references in tx_channels_active in fcc_atm
for o in all_objects(set, "mpc8260-fcc-atm") + all_objects(set, "mpc8280-fcc-atm"):
o.tx_channels_active = [x[0] for x in o.tx_channels_active]
if "fcc" in dir(o):
o.ram_tx_enabled = not not (o.fcc.reg_GFMR & (1 << 4))
else:
o.ram_tx_enabled = 0
# Fix FCC fast ethernets
for o in (all_objects(set, "mpc8260-fcc-fast-ethernet") +
all_objects(set, "mpc8270-fcc-fast-ethernet") +
all_objects(set, "mpc8280-fcc-fast-ethernet")):
remove_attr(o, "txbd_monitor")
if "fcc" in dir(o):
o.tx_enabled = not not (o.fcc.reg_GFMR & (1 << 4))
else:
o.tx_enabled = 0
# Fix SCC UARTs
for o in (all_objects(set, "mpc8260-scc-uart") +
all_objects(set, "mpc8270-scc-uart") +
all_objects(set, "mpc8280-scc-uart")):
remove_attr(o, "txbd_monitor")
if "scc" in dir(o):
o.ram_tx_enabled = not not (o.scc.reg_GSMR_L & (1 << 4))
else:
o.ram_tx_enabled = 0
# Fix SMC UARTs
for o in (all_objects(set, "mpc8260-smc-uart") +
all_objects(set, "mpc8270-smc-uart") +
all_objects(set, "mpc8280-smc-uart")):
remove_attr(o, "txbd_monitor")
if "smc" in dir(o):
o.ram_tx_enabled = not not (o.smc.reg_SMCMR & (1 << 1))
else:
o.ram_tx_enabled = 0
# Finally, remove the txbd-monitor objects
remove_class(set, "mpc8260-txbd-monitor")
remove_class(set, "mpc8270-txbd-monitor")
remove_class(set, "mpc8280-txbd-monitor")
def update_1328_to_1329(set):
update_pq2_attrs_1329(set)
def update_mdio_attrs(set):
for x in set.values():
# Purge all data on ongoing MDIO transfers as the format have
# changed.
remove_attr(x, 'mii_nvram_read_bit')
remove_attr(x, 'mii_nvram_last_clock')
remove_attr(x, 'mii_nvram_addr')
remove_attr(x, 'mii_nvram_data_in')
remove_attr(x, 'mii_nvram_op')
remove_attr(x, 'mii_nvram_word')
remove_attr(x, 'mii_nvram_in_size')
remove_attr(x, 'nvram_read_bit')
remove_attr(x, 'nvram_last_clock')
remove_attr(x, 'nvram_addr')
remove_attr(x, 'nvram_data_in')
remove_attr(x, 'nvram_in_size')
remove_attr(x, 'nvram_op')
remove_attr(x, 'nvram_word')
remove_attr(x, 'serial_reg')
remove_attr(x, 'serial_op')
remove_attr(x, 'serial_addr')
remove_attr(x, 'serial_word')
remove_attr(x, 'serial_read_bit')
remove_attr(x, 'serial_in_size')
def update_1327_to_1328(set):
for cpu in all_objects(set, "MV64360") + all_objects(set, "MV64470"):
rename_attr(cpu, 'partial_regs_pci_bus_PCI_Configuration_Data', 'regs_pci_bus_PCI_Configuration_Data')
update_mdio_attrs(set)
for cls in x86_classes:
for obj in all_objects(set, cls):
# Convert in_halt_state to activity_state
in_halt_state = getattr(obj, "in_halt_state")
activity_state = 0
if in_halt_state:
activity_state = 1
setattr(obj, "activity_state", activity_state)
remove_attr(obj, "in_halt_state")
# Remove useless pending_device attribute
if hasattr(obj, "pending_device"):
remove_attr(obj, "pending_device")
# Rename pni_enabled to cpuid_sse3
if hasattr(obj, "pni_enabled"):
rename_attr(obj, "cpuid_sse3", "pni_enabled")
# Temporary interrupt mask
q = getattr(obj, "step_queue")
has_interrupt_mask = 0
for e in q:
if e[1] == "release temporary interrupt mask":
has_interrupt_mask = 1
q = filter(lambda a: a[1] != "release temporary interrupt mask", q)
setattr(obj, "step_queue", q)
temp_mask = 0
if has_interrupt_mask:
temp_mask = 1 # Block_By_Sti
setattr(obj, "temporary_interrupt_mask", temp_mask)
if (hasattr(obj, "pending_debug_exceptions") and
getattr(obj, "pending_debug_exceptions")):
setattr(obj, "pending_debug_exception", 1)
rename_attr(obj, "pending_debug_exception_dr6", "pending_debug_exceptions")
def mv_for_all_objects(set, classname, function, mv_obj):
for obj in all_objects(set, classname):
function(set, obj, mv_obj)
def replace_mv64xxx_gbe_ptr_1326(set, gbe_obj):
def update_mv64xxx_gbe_maps(set, space, mv_obj):
try:
maplist = space.map
except:
return
if len([x for x in maplist if (x[1].classname == 'MV64360-gbe' or x[1].classname == 'MV64470-gbe')]) == 0:
return
for i in range(len(maplist)):
if (maplist[i][1].classname == 'MV64360-gbe' or maplist[i][1].classname == 'MV64470-gbe'):
maplist[i][1] = mv_obj
space.map = maplist
def update_mv64xxx_phys(set, phy, mv_obj):
if (phy.mac.classname == 'MV64360-gbe' or phy.mac.classname == 'MV64470-gbe'):
phy.mac = mv_obj
try:
mv_obj = set[(gbe_obj.name).strip('_gbe')]
except:
return
mv_for_all_objects(set, "memory-space", update_mv64xxx_gbe_maps, mv_obj)
mv_for_all_objects(set, "BCM5421S", update_mv64xxx_phys, mv_obj);
def copy_mv64xxx_attrs_1326(set, gbe_obj):
try:
mv_obj = set[(gbe_obj.name).strip('_gbe')]
except:
return
SIM_get_class('MV64360')
for gbe_attr in dir(gbe_obj):
for mv_attr in sim.classes['MV64360'].attributes:
if gbe_attr == mv_attr and not gbe_attr[0:2] == "__":
exec "mv_obj.%s = gbe_obj.%s" % (gbe_attr, gbe_attr)
def update_mv64xxx_pci_1326(set, obj):
setattr(obj, 'pci_config_header_type', 0x80)
def update_system_cmp_object_list_1326(set, system_classname, obj_classname):
for obj in all_objects(set, system_classname):
for l in [x for x in obj.object_list if x[-4:] == "_gbe"]:
del obj.object_list[l]
def update_rtc_time_1326(set, obj):
import time
val = getattr(obj, "rtc_time")
try:
time.strptime(val, '%Y-%m-%d %H:%M:%S %Z')
except Exception, msg:
val = val[:len("yyyy-mm-dd HH:MM:SS")]+" UTC"
setattr(obj, "rtc_time", val)
def update_1326_to_1327(set):
# remove mv64xxx_gbe pointers
for_all_objects(set, 'MV64360-gbe', replace_mv64xxx_gbe_ptr_1326)
for_all_objects(set, 'MV64470-gbe', replace_mv64xxx_gbe_ptr_1326)
# copy attrs from mv64xxx-gbe to mv64xxx
for_all_objects(set, 'MV64360-gbe', copy_mv64xxx_attrs_1326)
for_all_objects(set, 'MV64470-gbe', copy_mv64xxx_attrs_1326)
# remove mv64xxx-gbe
remove_class(set, 'MV64360-gbe')
remove_class(set, 'MV64470-gbe')
# remove from mv64xxx-gbe system component list
update_system_cmp_object_list_1326(set, 'sbc750gx-board', 'MV64360-gbe')
update_system_cmp_object_list_1326(set, 'daredevil-board', 'MV64470-gbe')
update_system_cmp_object_list_1326(set, 'atlantis-board', 'MV64360-gbe')
# make sure mv64xxx_pci_fx header_type[7] = 1
for_all_objects(set, 'MV64360-pci-f0', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64360-pci-f1', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64360-pci-f2', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64360-pci-f3', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64360-pci-f4', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64470-pci-f0', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64470-pci-f1', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64470-pci-f2', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64470-pci-f3', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64470-pci-f4', update_mv64xxx_pci_1326)
# Permit loading of checkpoints with invalid rtc_time, but which
# could be loaded before
for_all_objects(set, 'x86-apic-system', update_rtc_time_1326)
def update_etherlink_1321(set, link):
# network interfaces should now register for the broadcast address
bcast = ["ff:ff:ff:ff:ff:ff"]*2
for (x, y, name, dev, (listen_macs, promics)) in link.devices:
if bcast not in listen_macs:
listen_macs.append(bcast)
def rename_piix4_usb_1322(set, obj):
obj.__class_name__ = 'piix4_usb_dummy'
def rename_ppc440gx_obp_1322(set, obj):
obj.__class_name__ = 'ppc440gx-opb'
def update_1321_to_1322(set):
# Incorrect name of class
for_all_objects(set, 'ppc440gx-obp', rename_ppc440gx_obp_1322)
# Use dummy PIIX4 USB for old checkpoints
for_all_objects(set, 'piix4_usb', rename_piix4_usb_1322)
def update_1320_to_1321(set):
# fix MV64360/MV64470 checkpoints to use new PCI classes
objs = all_objects(set, 'MV64360-pci') + all_objects(set, 'MV64470-pci')
for obj in objs:
obj.__class_name__ = obj.classname + "-f%d" % obj.function
remove_attr(obj, "function")
# fix x86-components that are missing phys_mem
objs = ( all_objects(set, 'x86-system')
+ all_objects(set, 'x86-apic-bus-system')
+ all_objects(set, 'x86-apic-system')
+ all_objects(set, 'x86-separate-mem-io-system'))
for obj in objs:
obj.object_list['phys_mem'] = obj.object_list['pci_mem']
for_all_objects(set, "ethernet-link", update_etherlink_1321)
def update_1318_to_1319(set):
# some broken checkpoints do not have a cpu_list attribute in the top
# level component, set a dummy (possibly incorrect) attribute as workaround
cpu = None
patch_list = []
for obj in set.values():
try:
SIM_get_class(obj.classname)
except:
continue
if 'processor' in sim.classes[obj.classname].interfaces:
cpu = obj
elif 'component' in sim.classes[obj.classname].interfaces:
try:
if obj.top_level and not hasattr(obj, 'cpu_list'):
patch_list += [obj]
except:
pass
for obj in patch_list:
obj.cpu_list = [cpu]
def update_1317_to_1318(set):
reg_aliases = ['ubamr', 'uctrl', 'ummcr0', 'ummcr1', 'ummcr2', 'ummcra',
'ummcrh', 'upmc1', 'upmc2', 'upmc3', 'upmc4', 'upmc5',
'upmc6', 'upmc7', 'upmc8', 'usdar', 'usiar', 'usprg3',
'usprg4', 'usprg5', 'usprg6', 'usprg7', 'utbl', 'utbu',
'utrace']
for obj in set.values():
if obj.classname in ppc_classes:
for reg_alias in reg_aliases:
remove_attr(obj, reg_alias)
def update_ppc440_pci_1316(set, space):
try:
maplist = space.map
except:
return
if len([x for x in maplist if x[1].classname == 'ppc440gp-pci']) == 0:
return
for i in range(len(maplist)):
if (maplist[i][1].classname == 'ppc440gp-pci'
and maplist[i][2] == 1):
maplist[i][3] = 0
space.map = maplist
def update_1316_to_1317(set):
for_all_objects(set, "memory-space", update_ppc440_pci_1316)
objs = (all_objects(set, 'ddr2-memory-module')
+ all_objects(set, 'ddr-memory-module')
+ all_objects(set, 'sdram-memory-module'))
for obj in objs:
if obj.registered:
obj.module_type = "RDIMM"
else:
obj.module_type = "UDIMM"
remove_attr(obj, 'registered')
def update_1304_to_1305(set):
remove_class(set, 'le-permissions')
def update_tlb_1302_970(set, cpu):
tlb = cpu.tlb
for i in range(len(tlb)):
for j in range(len(tlb[i])):
tlb[i][j].append(tlb[i][j][4])
tlb[i][j].append(tlb[i][j][5])
tlb[i][j][5] = 0 # large page encoding
tlb[i][j][4] = 0 # big segment encoding
cpu.tlb = tlb
def update_1302_to_1303(set):
for_all_objects(set, "ppc970fx", update_tlb_1302_970)
def update_pending_exceptions_1301(set, cpu, table, excvec_bits):
pending = cpu.pending_exceptions
exceptions = []
for i in range(excvec_bits):
exc = (pending >> (excvec_bits - 1 - i)) & 1
if not exc:
continue
exc_name = table[i]
exceptions += [exc_name]
cpu.pending_exceptions = exceptions
def update_pending_exceptions_1301_4xx(set, cpu):
table = ["Critical_Input", "Machine_check", "DSI", "ISI",
"External_interrupt", "Alignment", "Program", "System_call",
"PIT", "FIT", "Watchdog", "Data_TLB_miss", "Instruction_TLB_miss",
"Debug"]
update_pending_exceptions_1301(set, cpu, table, 32)
def update_pending_exceptions_1301_booke(set, cpu):
table = ["Critical_interrupt", "Machine_check", "DSI", "ISI",
"External_interrupt", "Alignment", "Program",
"Floating-point_unavailable", "System_call",
"Auxiliary_processor_unavailable", "Decrementer", "FIT",
"Watchdog", "Data_TLB_miss", "Instruction_TLB_miss", "Debug",
"reserved_16", "reserved_17", "reserved_18", "reserved_19",
"reserved_20", "reserved_21", "reserved_22", "reserved_23",
"reserved_24", "reserved_25", "reserved_26", "reserved_27",
"reserved_28", "reserved_29", "reserved_30", "reserved_31",
"SPE_APU_unavailable", "SPE_floating-point_data",
"SPE_floating-point_round", "Performance_monitor"]
update_pending_exceptions_1301(set, cpu, table, 64)
def update_pending_exceptions_1301_750(set, cpu):
table = ["Reserved", "System_reset", "Machine_check", "Data_storage",
"Data_segment", "Instruction_storage", "Instruction_segment",
"External_interrupt", "Alignment", "Program",
"Floating-point_unavailable", "Decrementer", "Reserved_a",
"Reserved_b", "System_call", "Trace", "Reserved_e",
"Performance_monitor", "Altivec_Unavailable",
"Instruction_Tlb_miss", "Data_Tlb_Load_miss",
"Data_Tlb_Store_miss", "Instruction_address_breakpoint",
"System_management_interrupt", "Reserved_15", "Altivec_Assist",
"Thermal_management_interrupt"]
update_pending_exceptions_1301(set, cpu, table, 32)
def update_add_ftp_alg_in(set, forward_in_obj):
sn = forward_in_obj.tcp
forward_out_obj = forward_in_obj.forward_handler
alg_name = sn.name + "_ftp_alg"
if set.has_key(alg_name):
alg_obj = set[alg_name]
else:
alg_obj = pre_conf_object(alg_name, "ftp-alg")
set[alg_name] = alg_obj
alg_obj.forward_handler = forward_out_obj
alg_obj.incoming_handler = forward_in_obj
forward_out_obj.algs = [alg_obj]
forward_in_obj.algs = [alg_obj]
remove_attr(forward_in_obj, "forward_handler")
pcmcia_dev = None
slot0_att = None
slot1_att = None
slot0_cmn = None
slot1_cmn = None
def update_pcmcia_1301_map(set, space):
try:
maplist = space.map
except:
return
if len([x for x in maplist if x[1] == pcmcia_dev]) == 0:
return
newlist = []
map_functions = [0, 0x100, 0x200, 0x210, 0x300, 0x310]
for m in maplist:
if m[1] == pcmcia_dev:
if m[2] == 2:
m[1] = slot0_att
elif m[2] == 3:
m[1] = slot1_att
elif m[2] == 4:
m[1] = slot0_cmn
elif m[2] == 5:
m[1] = slot1_cmn
if m[2] != 255: # PCI config-space
m[2] = map_functions[m[2]]
newlist.append(m)
space.map = newlist
def update_pcmcia_mappings(set, obj, slot):
global slot0_att, slot1_att, slot0_cmn, slot1_cmn
if slot == 0:
ide = obj.slot0_ata
else:
ide = obj.slot1_ata
slot_cmn = pre_conf_object(ide.name + '_cmn', "memory-space")
slot_att = pre_conf_object(ide.name + '_att', "memory-space")
set[ide.name + '_cmn'] = slot_cmn
set[ide.name + '_att'] = slot_att
# TODO: read data
cis_image = pre_conf_object(ide.name + '_cis_image', "image")
cis_image.size = 768
cis = pre_conf_object(ide.name + '_cis', "rom")
cis.image = cis_image
set[ide.name + 'cis'] = cis
set[ide.name + 'cis_image'] = cis_image
slot_cmn.map = [
[0, ide, 0, 0, 8],
[0xe, ide, 0, 8, 1]]
for i in range(0x400, 0x800, 2):
slot_cmn.map.append([i, ide, 0, 0x0, 0x2])
slot_att.map = [[0x0, cis, 0, 0, 0x300]]
if slot == 0:
remove_attr(obj, 'slot0_ata')
remove_attr(obj, 'slot0_cis')
obj.slot0_spaces = [slot_att, slot_cmn, slot_cmn]
slot0_att = slot_att
slot0_cmn = slot_cmn
else:
remove_attr(obj, 'slot1_ata')
remove_attr(obj, 'slot1_cis')
obj.slot1_spaces = [slot_att, slot_cmn, slot_cmn]
slot1_att = slot_att
slot1_cmn = slot_cmn
ide_cis = (
0x01, 0x03, 0xd9, 0x01, 0xff, 0x1c, 0x04, 0x03, 0xd9, 0x01, 0xff, 0x18,
0x02, 0xdf, 0x01, 0x20, 0x04, 0x01, 0x4e, 0x00, 0x02, 0x15, 0x2b, 0x04,
0x01, 0x56, 0x69, 0x6b, 0x69, 0x6e, 0x67, 0x20, 0x41, 0x54, 0x41, 0x20,
0x46, 0x6c, 0x61, 0x73, 0x68, 0x20, 0x43, 0x61, 0x72, 0x64, 0x20, 0x20,
0x20, 0x20, 0x00, 0x53, 0x54, 0x4f, 0x52, 0x4d, 0x20, 0x20, 0x00, 0x53,
0x54, 0x42, 0x4d, 0x30, 0x00, 0xff, 0x21, 0x02, 0x04, 0x01, 0x22, 0x02,
0x01, 0x01, 0x22, 0x03, 0x02, 0x04, 0x5f, 0x1a, 0x05, 0x01, 0x03, 0x00,
0x02, 0x0f, 0x1b, 0x0b, 0xc0, 0x40, 0xa1, 0x27, 0x55, 0x4d, 0x5d, 0x75,
0x08, 0x00, 0x21, 0x1b, 0x06, 0x00, 0x01, 0x21, 0xb5, 0x1e, 0x4d, 0x1b,
0x0d, 0xc1, 0x41, 0x99, 0x27, 0x55, 0x4d, 0x5d, 0x75, 0x64, 0xf0, 0xff,
0xff, 0x21, 0x1b, 0x06, 0x01, 0x01, 0x21, 0xb5, 0x1e, 0x4d, 0x1b, 0x12,
0xc2, 0x41, 0x99, 0x27, 0x55, 0x4d, 0x5d, 0x75, 0xea, 0x61, 0xf0, 0x01,
0x07, 0xf6, 0x03, 0x01, 0xee, 0x21, 0x1b, 0x06, 0x02, 0x01, 0x21, 0xb5,
0x1e, 0x4d, 0x1b, 0x12, 0xc3, 0x41, 0x99, 0x27, 0x55, 0x4d, 0x5d, 0x75,
0xea, 0x61, 0x70, 0x01, 0x07, 0x76, 0x03, 0x01, 0xee, 0x21, 0x1b, 0x06,
0x03, 0x01, 0x21, 0xb5, 0x1e, 0x4d, 0x14)
def add_pcmcia_cis_1301(arg, ini_obj):
obj = SIM_get_object(arg)
spaces = [obj.slot0_spaces, obj.slot1_spaces]
for i in (0, 1):
if len(spaces[i]) == 1:
continue
attr = spaces[i][0]
for i in range(len(ide_cis)):
attr.iface.memory_space.write(attr, None,
i * 2, (ide_cis[i], ), 1)
# Fake some attribute space registers
attr.iface.memory_space.write(attr, None,
0x204, (0x2e, ), 1)
SIM_hap_delete_callback("Core_Configuration_Loaded",
add_pcmcia_cis_1301, arg)
def update_pcmcia_1301(set, obj):
global pcmcia_dev
pcmcia_dev = obj
obj.config_registers[15] = 0x00000100 # interrupt pin A
update_pcmcia_mappings(set, obj, 0)
update_pcmcia_mappings(set, obj, 1)
if obj.slot0_memory_windows[0][0]:
obj.slot0_memory_windows[0][1] = 3
if obj.slot0_memory_windows[4][0]:
obj.slot0_memory_windows[4][1] = 2
if obj.slot1_memory_windows[0][0]:
obj.slot1_memory_windows[0][1] = 3
if obj.slot1_memory_windows[4][0]:
obj.slot1_memory_windows[4][1] = 2
obj.slot0_registers[1] = 0xef
obj.slot1_registers[1] = 0xef
for_all_objects(set, "memory-space", update_pcmcia_1301_map)
SIM_hap_add_callback("Core_Configuration_Loaded",
add_pcmcia_cis_1301, obj.name)
def update_uart_1301(set, obj):
if not hasattr(obj, "interrupt_mask_out2"):
obj.interrupt_mask_out2 = 1
def update_x86_components_1301(set, obj):
if 'x87' not in obj.object_list and 'x87[0]' in obj.object_list:
obj.object_list['x87'] = obj.object_list['x87[0]']
if 'x87[0]' in obj.object_list:
del obj.object_list['x87[0]']
remove_attr(obj, 'num_threads')
def update_1301_to_1302(set):
for_all_objects(set, "ppc403gcx", update_pending_exceptions_1301_4xx)
for_all_objects(set, "ppc405gp", update_pending_exceptions_1301_4xx)
for_all_objects(set, "ppc440gp", update_pending_exceptions_1301_booke)
for_all_objects(set, "ppc440gx", update_pending_exceptions_1301_booke)
for_all_objects(set, "ppce500", update_pending_exceptions_1301_booke)
for_all_objects(set, "ppc603e", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc7400", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc7447", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc7450", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc7457", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc750", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc750fx", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc750gx", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc755", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc970fx", update_pending_exceptions_1301_750)
for_all_objects(set, "CL-PD6729", update_pcmcia_1301)
for cls in x86_classes:
remove_class_attr(set, cls, 'smbase')
for_all_objects(set, "port-forward-incoming-server", update_add_ftp_alg_in)
for cpu in (all_objects(set, "ultrasparc-ii")
+ all_objects(set, "ultrasparc-iii")
+ all_objects(set, "ultrasparc-iii-plus")
+ all_objects(set, "ultrasparc-iii-i")):
rename_attr(cpu, 'cpu_group', 'irq_bus')
for_all_objects(set, "NS16550", update_uart_1301)
for_all_objects(set, "NS16450", update_uart_1301)
# somewhere between 1301 and 1325 x86-cpu components become incompatible
for_all_objects(set, "pentium-4-cpu", update_x86_components_1301)
def update_event_queue_1300(cpu):
# Read_slot has been removed and should not be present, but we map it
# to the default slot just in case
slot_names = ["sync", "pre-update", "update", "update2", "default",
"default", "assert", "event-end"]
ignore_events = set(("User breakpoint",
"Internal: update time counter",
"Internal: update step counter",
"Internal: renew queue",
"Head of Time",
"Deleted Event",
"Check for Async Events"))
q = [[], []]
hot_step = 0
for (evobj, val, slot, queue, time) in cpu.event_queue:
if evobj == "$simple_event":
if val == "Head of Time":
hot_step = time
if val in ignore_events:
continue
evobj = None
q[queue].append([evobj, val, slot_names[slot], time])
# compensate for a head of time at step > 0
q[Sim_Queue_Time] = [[o, v, s, t + hot_step]
for [o, v, s, t] in q[Sim_Queue_Time]]
del cpu.event_queue
cpu.step_queue = q[Sim_Queue_Step]
cpu.time_queue = q[Sim_Queue_Time]
def update_1300_to_1301(set):
# Translate to new event queue attributes:
for obj in set.values():
try:
SIM_get_class(obj.classname)
except:
continue
if 'processor' in sim.classes[obj.classname].interfaces:
update_event_queue_1300(obj)
create_central_client = False
remote_central = False
remote_host = None
first_queue = None
def remove_default_target_endian_1299(set, obj):
try:
if len(obj.default_target) == 5:
obj.default_target = obj.default_target[:4]
except:
pass
def replace_dcr_mapping_1299(set, ppc):
if len(ppc.dcr):
dcr_map = {}
for d in ppc.dcr:
if dcr_map.has_key(d[0]):
dcr_map[d[0]].append(d[1])
else:
dcr_map[d[0]] = [d[1]]
mem_map = []
for obj in dcr_map.keys():
dcr_list = dcr_map[obj]
first = dcr_list[0]
for dcr in dcr_list:
mem_map += [[(dcr)*4, set[obj], 0, (dcr-first)*4, 4]]
dcr_space = ppc.name + '-dcr-space'
set[dcr_space] = pre_conf_object(dcr_space, 'memory-space')
ppc.dcr_space = set[dcr_space]
set[dcr_space].map = mem_map
remove_attr(ppc, 'dcr')
def fix_405_uic_1299(set, uic):
print "WARNING: Converting an old 405 based configuration"
print "The interrupt controller has changed so that irq levels are according"
print "to documentation. Will try to patch devices but there might be more"
print "devices connected to the UIC which needs to be patched manually."
print "Typically obj.irq_level = 31 - old_irq_level"
uic.target = uic.irq_dev
uic.critical_target = uic.irq_dev
uic.target_level = 0
uic.critical_target_level = 1
remove_attr(uic, 'irq_dev')
rename_attr(uic, 'UICx_CR', 'uiccr')
rename_attr(uic, 'UICx_ER', 'uicer')
rename_attr(uic, 'UICx_PR', 'uicvpr')
rename_attr(uic, 'UICx_SR', 'uicsr')
rename_attr(uic, 'UICx_TR', 'uictr')
rename_attr(uic, 'UICx_VCR', 'uicvcr')
for obj in all_objects(set, 'ppc405gp-iic'):
if obj.interrupt_device == uic:
print "Patching %s (level %d -> %d)" % (obj.name, obj.interrupt_level,
31 - obj.interrupt_level)
obj.interrupt_level = 31 - obj.interrupt_level
for obj in all_objects(set, 'ppc405gp-pci'):
irqs = obj.irq_routing
new_irq = []
for i in irqs:
if i[1] == uic.name:
print "Patching %s (level %d -> %d)" % (obj.name, i[2], 31 - i[2])
new_irq.append([i[0], i[1], 31 - i[2]])
else:
new_irq.append(i)
obj.irq_routing = new_irq
for obj in all_objects(set, 'NS16550'):
if obj.irq_dev == uic:
print "Patching %s (level %d -> %d)" % (obj.name, obj.interrupt_pin,
31 - obj.interrupt_pin)
obj.interrupt_pin = 31 - obj.interrupt_pin
def remove_uic_attributes_1299(set, uic):
remove_attr(uic, 'UICx_VR')
remove_attr(uic, 'UICx_MSR')
def add_cpu_obj_1299(set, obj):
# Find which CPU this object is mapped into
cpus = all_objects(set, 'ppc405gp') + all_objects(set, 'ppc440gp') + all_objects(set, 'ppc440gx')
for cpu in cpus:
space = cpu.dcr_space
map = space.map
for m in map:
if m[1] == obj:
obj.cpu = cpu
break
def change_memory_attr_1299(set, obj):
if hasattr(obj, 'memory') and type(obj.memory) == str:
obj.memory = set[obj.memory]
def change_mal_attr_1299(set, obj):
if hasattr(obj, 'mal') and type(obj.mal) == str:
obj.mal = set[obj.mal]
def change_irq_attr_1299(set, obj):
if hasattr(obj, 'irq_routing') and type(obj.irq_routing) == list:
for i in range(len(obj.irq_routing)):
if type(obj.irq_routing[i][1]) == str:
obj.irq_routing[i][1] = set[obj.irq_routing[i][1]]
def rename_ioapic_1299(set, obj):
obj.__class_name__ = 'io-apic'
def rename_cheetah_plus_mmu_1299(set, obj):
obj.__class_name__ = 'cheetah-plus-mmu'
def rename_ultrasparc_iii_plus_1299(set, obj):
obj.__class_name__ = 'ultrasparc-iii-plus'
def rename_ultrasparc_iv_plus_1299(set, obj):
obj.__class_name__ = 'ultrasparc-iv-plus'
def set_dec_srom_width_1299(set, obj):
obj.srom_address_width = 6
def fix_fb_mem_1299(set, obj):
name = "%s-image" % obj.name
image = pre_conf_object(name, 'image')
if obj.classname == 'ragexl':
image.size = 0x800000
elif obj.classname.startswith('vga'):
image.size = 0x40000
elif obj.classname.startswith('voodoo3'):
image.size = 0x1000000
set[name] = image
obj.image = image
def update_1299_to_1300(set):
for_all_objects(set, 'ragexl', fix_fb_mem_1299)
for_all_objects(set, 'vga', fix_fb_mem_1299)
for_all_objects(set, 'vga_pci', fix_fb_mem_1299)
for_all_objects(set, 'voodoo3', fix_fb_mem_1299)
for_all_objects(set, 'voodoo3-agp', fix_fb_mem_1299)
for_all_objects(set, 'ppc403gcx', replace_dcr_mapping_1299)
for_all_objects(set, 'ppc405gp', replace_dcr_mapping_1299)
for_all_objects(set, 'ppc440gp', replace_dcr_mapping_1299)
for_all_objects(set, 'ppc440gx', replace_dcr_mapping_1299)
for_all_objects(set, 'ppc405gp-uic', fix_405_uic_1299)
for_all_objects(set, 'ppc440gp-uic', remove_uic_attributes_1299)
for_all_objects(set, 'ppc440gx-uic', remove_uic_attributes_1299)
for_all_objects(set, 'ppc405gp-dma', add_cpu_obj_1299)
for_all_objects(set, 'ppc440gp-dma', add_cpu_obj_1299)
for_all_objects(set, 'ppc440gx-dma', add_cpu_obj_1299)
for_all_objects(set, 'ppc405gp-ebc', add_cpu_obj_1299)
for_all_objects(set, 'ppc440gp-ebc', add_cpu_obj_1299)
for_all_objects(set, 'ppc440gx-ebc', add_cpu_obj_1299)
for_all_objects(set, 'ppc405gp-mal', add_cpu_obj_1299)
for_all_objects(set, 'ppc440gp-mal', add_cpu_obj_1299)
for_all_objects(set, 'ppc440gx-mal', add_cpu_obj_1299)
for_all_objects(set, 'misc-dcr', add_cpu_obj_1299)
for_all_objects(set, 'ppc405gp-dma', change_memory_attr_1299)
for_all_objects(set, 'ppc440gp-dma', change_memory_attr_1299)
for_all_objects(set, 'ppc440gx-dma', change_memory_attr_1299)
for_all_objects(set, 'ppc405gp-mal', change_memory_attr_1299)
for_all_objects(set, 'ppc440gp-mal', change_memory_attr_1299)
for_all_objects(set, 'ppc440gx-mal', change_memory_attr_1299)
for_all_objects(set, 'ppc405gp-emac', change_mal_attr_1299)
for_all_objects(set, 'ppc440gp-emac', change_mal_attr_1299)
for_all_objects(set, 'ppc440gx-emac', change_mal_attr_1299)
for_all_objects(set, 'ppc405gp-pci', change_irq_attr_1299)
for_all_objects(set, 'ppc440gp-pci', change_irq_attr_1299)
for_all_objects(set, 'ppc440gx-pci', change_irq_attr_1299)
for_all_objects(set, 'memory-space', remove_default_target_endian_1299)
for_all_objects(set, 'port-space', remove_default_target_endian_1299)
for_all_objects(set, 'I/O-APIC', rename_ioapic_1299)
for_all_objects(set, 'cheetah+mmu', rename_cheetah_plus_mmu_1299)
for_all_objects(set, 'ultrasparc-iii+', rename_ultrasparc_iii_plus_1299)
for_all_objects(set, 'ultrasparc-iv+', rename_ultrasparc_iv_plus_1299)
try:
SIM_get_object('dummy-component')
set['system-component'] = pre_conf_object('system-component',
'dummy-component')
except:
pass
for cls in ['DEC21041', 'DEC21140A', 'DEC21143']:
for_all_objects(set, cls, set_dec_srom_width_1299)
for obj in all_objects(set, 'i82077'):
try:
obj.drives = [x[1] for x in obj.drives]
except:
pass
for cls in mips_classes:
remove_class_attr(set, cls, 'itlb')
remove_class_attr(set, cls, 'dtlb')
for obj in all_objects(set, 'i8042'):
if hasattr(obj, 'reset_targets') and len(obj.reset_targets) > 0:
bus = pre_conf_object(obj.name + '_reset', 'x86-reset-bus')
set[obj.name + '_reset'] = bus
bus.reset_targets = obj.reset_targets
obj.reset_target = bus
remove_attr(obj, 'a20_target')
remove_attr(obj, 'reset_targets')
for cls in x86_classes:
remove_class_attr(set, cls, 'stc_segreg_enabled')
def connections_1200(set, obj):
try:
connections = obj.connections
except:
return
# <port-forward-outgoing-server>.connections changed from
# [[si]|[sisi]*]
# to
# [[si]|[sissi]*]
# Find the service-node-device and use that IP
new_ip = "0.0.0.0"
for snd in [x for x in set.values()
if x.classname == 'service-node-device']:
new_ip = snd.ip_address
break
newlist = []
for sublist in connections:
if len(sublist) == 2:
newlist.append(sublist)
elif len(sublist) == 4:
newlist.append([sublist[0], sublist[0], new_ip,
sublist[2], sublist[3]])
obj.connections = newlist
def update_1200_to_1201(set):
for_all_objects(set, "port-forward-outgoing-server", connections_1200)
def sim_1199(set, obj):
global create_central_client, remote_central, remote_host
try:
if obj.remote_simics_central == 1:
create_central_client = True
remote_central = True
remote_host = obj.simics_central_host
except:
pass
remove_attr(obj, 'remote_simics_central')
remove_attr(obj, 'simics_central_host')
remove_attr(obj, 'central_debug')
def connect_eth_1199(arg, ini_obj):
dev = SIM_get_object(arg[0])
net = SIM_get_object(arg[1])
dev.link = net
SIM_hap_delete_callback("Core_Configuration_Loaded",
connect_eth_1199, arg)
def eth_device_1199(set, obj):
global remote_central
if remote_central:
# create local link if network in other simics
link = pre_conf_object('net0', 'ethernet_link')
set['net0'] = link
link.central = set['central_client']
remote_central = False
link = link.name
else:
try:
link = obj.network
except:
pass
if obj.connected:
SIM_hap_add_callback("Core_Configuration_Loaded",
connect_eth_1199, (obj.name, link))
remove_attr(obj, 'network')
remove_attr(obj, 'connected')
remove_attr(obj, 'min_latency')
remove_attr(obj, 'backdoor_ok')
remove_attr(obj, 'auto_connect')
remove_attr(obj, 'individual_address')
def ethernet_net_1199(set, obj):
obj.__class_name__ = 'ethernet-link'
remove_attr(obj, 'frame_loss')
remove_attr(obj, 'network_id')
remove_attr(obj, 'handle_dhcp')
remove_attr(obj, 'shared_media')
remove_attr(obj, 'netip')
remove_attr(obj, 'ethernet_central')
if obj.central_device:
snd = pre_conf_object('sn0_dev', 'service-node-device')
set['sn0_dev'] = snd
snd.service_node = set['sn0']
snd.arp_table = obj.arp
snd.mac_address = obj.ownmac
snd.ip_address = obj.ownip
snd.netmask = obj.netmask
snd.queue = first_queue
set['sn0'].routing_table = [[snd.ip_address, snd.netmask,
'0.0.0.0', snd]]
snd.link = obj
try:
obj.central = set['central_client']
except:
pass
remove_attr(obj, 'central_device')
remove_attr(obj, 'arp')
remove_attr(obj, 'ownmac')
remove_attr(obj, 'ownip')
remove_attr(obj, 'netmask')
def ethernet_central_1199(set, obj):
new_dns = []
for dns in obj.dns:
new_dns.append([None, dns[0], dns[1], dns[2]])
set['sn0'].hosts = new_dns
del set[obj.name]
def central_1199(set, obj):
global create_central_client
port = obj.ip_port
file = obj.unix_socket
del set['central']
if port == -1 and len(file) == 0:
return
# central was used
cs = pre_conf_object('central_server', 'central-server')
set['central_server'] = cs
if len(file):
cs.unix_socket = file
cs.unix_socket_mode = 438
if port != -1:
cs.tcp_port = port
create_central_client = True
def update_1199_to_1200(set):
global remote_host
for_all_objects(set, 'sim', sim_1199)
for_all_objects(set, 'central', central_1199)
if create_central_client:
cc = pre_conf_object('central_client', 'central-client')
set['central_client'] = cc
if remote_host and len(remote_host):
if not ':' in remote_host and not '/' in remote_host:
# if port not specified, add default one
remote_host += ":4711"
cc.server = remote_host
elif not remote_central:
cc.server = pre_conf_object('central_server', 'central-server')
set['central_server'] = cc.server
if len(all_objects(set, 'ethernet-central')):
set['sn0'] = pre_conf_object('sn0', 'service-node')
for_all_objects(set, 'ethernet-central', ethernet_central_1199)
for_all_objects(set, 'ethernet-network', ethernet_net_1199)
for_all_objects(set, 'sbus-hme', eth_device_1199)
for_all_objects(set, 'cheerio-hme', eth_device_1199)
for_all_objects(set, 'BCM5703C', eth_device_1199)
for_all_objects(set, 'BCM57034', eth_device_1199)
for_all_objects(set, 'AM79C960', eth_device_1199)
for_all_objects(set, 'cassini', eth_device_1199)
for_all_objects(set, 'DEC21041', eth_device_1199)
for_all_objects(set, 'DEC21140A', eth_device_1199)
for_all_objects(set, 'DEC21143', eth_device_1199)
for_all_objects(set, 'ppc440gp-emac', eth_device_1199)
for_all_objects(set, 'CS8900A', eth_device_1199)
remove_class_attr(set, 'ppc440gp', 'ear')
for l in [x.name for x in set.values() if x.classname == 'central-links']:
del set[l]
remove_class_attr(set, 'ICS951601', 'address_mask')
remove_class_attr(set, 'NS16450', 'send_while_playing_back')
remove_class_attr(set, 'NS16550', 'send_while_playing_back')
remove_class_attr(set, 'M5823', 'irq_disable')
remove_class_attr(set, 'DS12887', 'irq_disable')
remove_class_attr(set, 'DS17485', 'irq_disable')
remove_class_attr(set, 'i8254', 'rw_state')
for obj in all_objects(set, 'ppc440gp-mal'):
# both tx and tx to the same irq-device in 440gp-mal
obj.interrupts[1] = obj.interrupts[0]
cpus = [x for x in set.values() if x.classname in x86_classes]
for kbd in all_objects(set, 'i8042'):
if len(cpus):
# this is an x86 config
kbd.reset_targets = cpus
for obj in all_objects(set, 'port-space'):
# remove obsolete 6th element (reverse-endian)
try:
for m in range(len(obj.map)):
if len(obj.map[m]) == 6:
obj.map[m].pop(-1)
except:
pass
def update_1051_to_1052(set):
remove_class_attr(set, 'server-console', 'data_out')
remove_class_attr(set, 'server-console', 'poll_interval')
def change_map_endian_1049(set, obj):
try:
# align base for serengeti empty mappings
for i in range(len(obj.map)):
off = obj.map[i][0] & 0x1fff
if off == 0x60 and obj.map[i][4] == 0x10:
obj.map[i][0] &= 0xffffffffffffe000
obj.map[i][4] &= 0x70
# change endian. 5 or shorter? - no endian info included
if len(obj.map[i]) > 5:
for j in range(5, len(obj.map[i])):
if isinstance(obj.map[i][j], int):
obj.map[i][j] = 0
# if length 6 and last is integer -> remove endian
# since we don't support this format anymore.
if len(obj.map[i]) == 6 and isinstance(obj.map[i][5], int):
obj.map[i].pop(-1)
# TODO: update vga mapping
except Exception, msg:
print msg
pass
def add_vga_memory_1049(set, obj):
for vga in [x[1] for x in obj.map]:
if type(vga) == str:
vga = set[vga]
if 'vga' in vga.classname or 'voodoo' in vga.classname:
vga.memory_space = obj
def update_1049_to_1050(set):
for_all_objects(set, 'memory-space', change_map_endian_1049)
for_all_objects(set, 'memory-space', add_vga_memory_1049)
remove_class_attr(set, 'ide-disk', 'tr_rdy_dma')
remove_class_attr(set, 'ide-disk', 'tr_cmd_return_dma')
remove_class_attr(set, 'ide-cdrom', 'tr_rdy_dma')
remove_class_attr(set, 'ide-cdrom', 'tr_cmd_return_dma')
remove_class_attr(set, 'i82077', 'seek_irq_drive')
remove_class_attr(set, 'i8042', 'reset_target')
remove_class_attr(set, 'NS16450', 'com')
remove_class_attr(set, 'NS16550', 'com')
remove_class_attr(set, 'i21152', 'first_bus_nonzero')
remove_class_attr(set, 'i82443bx_agp', 'first_bus_nonzero')
remove_class_attr(set, 'i82443bx_agp', 'memory')
# p4 has these before 2.0 (only p2, p3 and ppro)
remove_class_attr(set, 'x86-p4', 'mc4_ctl')
remove_class_attr(set, 'x86-p4', 'mc4_addr')
remove_class_attr(set, 'x86-p4', 'mc4_status')
remove_class_attr(set, 'x86-p4', 'mc4_misc')
remove_class_attr(set, 'x86-p4', 'perfevtsel0')
remove_class_attr(set, 'x86-p4', 'perfevtsel1')
for cls in x86_classes:
remove_class_attr(set, cls, 'cr1')
for cls in ['SYM53C810', 'SYM53C875']:
for obj in all_objects(set, cls):
try:
pin = obj.interrupt_pin
obj.interrupt_pin = [pin, 0, 0, 0]
except:
pass
def update_1042_to_1043(set):
for obj in all_objects(set, 'Z8530'):
a = pre_conf_object(obj.name + '-port-a', 'Z8530-port')
b = pre_conf_object(obj.name + '-port-b', 'Z8530-port')
obj.a_port = set[a.name] = a
obj.b_port = set[b.name] = b
a.master = obj
b.master = obj
# only change console if set
try:
a.console = obj.a_console
a.console.device = a
remove_attr(obj, 'a_console')
except:
pass
try:
b.console = obj.b_console
b.console.device = b
remove_attr(obj, 'b_console')
except:
pass
def update_1040_to_1041(set):
for obj in all_objects(set, 'ultrasparc-iii+'):
try:
if obj.report_ultra3i:
obj.__class_name__ = 'ultrasparc-iii-i'
remove_attr(obj, 'report_ultra3i')
except:
pass
seg_regs = ["cs", "ds", "ss", "es", "fs", "gs", "tr", "ldtr"]
for cls in x86_classes:
for obj in all_objects(set, cls):
for seg in seg_regs:
try:
reg = getattr(obj, seg)
if reg[3]:
reg[8] = (reg[8] << 12) | 0xfff
setattr(obj, seg, reg)
except:
pass
for obj in all_objects(set, 'flash-memory'):
try:
obj.storage_ram = obj.storage_space.map[0][1]
except:
pass
remove_attr(obj, 'storage_space')
def update_1039_to_1040(set):
first = 1
for cls in [x for x in x86_classes if not '486' in x]:
for obj in all_objects(set, cls):
# only on first processor, does not work on multi-machines
obj.bsp = first
first = 0
def update_1031_to_1032(set):
# Old versions do not have the udma_enabled attribute. Assume
# that udma is enabled if udma_mode is non-zero. The new
# multiword_dma_mode and multiword_dma_enabled attributes
# will have the correct default values (off and zero).
for obj in (all_objects(set, 'ide-disk') + all_objects(set, 'ide-cdrom')):
try:
if obj.udma_mode:
obj.udma_enabled = 1
except:
pass
def update_1030_to_1031(set):
for obj in (all_objects(set, 'ultrasparc-ii')
+ all_objects(set, 'ultrasparc-iii')
+ all_objects(set, 'ultrasparc-iii+')):
remove_attr(obj, 'fp_follow_errata_69')
remove_attr(obj, 'no_unpriv_nucleus_ifetch')
for obj in all_objects(set, 'text-console'):
remove_attr(obj, 'xterm_args')
for cls in ['ISP1040', 'ISP1040_SUN', 'ISP2200', 'ISP2200_SUN']:
for obj in all_objects(set, cls):
# mask to 32 bits
obj.req_queue_addr &= 0xffffffff
obj.res_queue_addr &= 0xffffffff
remove_class_attr(set, 'ram', 'mapped_size')
def update_1019_to_1020(set):
objs = (all_objects(set, 'ultrasparc-ii')
+ all_objects(set, 'ultrasparc-iii')
+ all_objects(set, 'ultrasparc-iii+')
+ all_objects(set, 'ultrasparc-v')
+ all_objects(set, 'serengeti-schizo')
+ all_objects(set, 'fiesta-tomatillo')
+ all_objects(set, 'sun4u-fhc')
+ all_objects(set, 'sunfire-sysio')
+ all_objects(set, 'sunfire-psycho')
+ all_objects(set, 'serengeti-console')
+ all_objects(set, 'serengeti-console-old'))
if len(objs):
irq_bus = pre_conf_object('irq_bus0', 'sparc-irq-bus')
set['irq_bus0'] = irq_bus
for obj in objs:
obj.irq_bus = irq_bus
remove_attr(obj, 'irq_objs')
remove_attr(obj, 'cpu_objs')
def update_1010_to_1011(set):
for cls in ['ISP1040', 'ISP1040_SUN', 'ISP2200', 'ISP2200_SUN']:
remove_class_attr(set, cls, 'nvram')
remove_class_attr(set, cls, 'nvram-extra-cycle')
def add_x86_tlb_1009(set, obj):
name = obj.name + "_tlb"
set[name] = tlb = pre_conf_object(name, 'x86-tlb')
tlb.cpu = obj
obj.tlb = tlb
for t in ['itlb_large', 'dtlb_large', 'itlb_4k', 'dtlb_4k']:
try:
exec "tlb.%s = obj.%s" % (t, t)
remove_attr(obj, t)
except:
pass
def update_1009_to_1010(set):
remove_class_attr(set, 'ide-disk', 'debug_level')
remove_class_attr(set, 'ide-cdrom', 'debug_level')
remove_class_attr(set, 'spitfire-mmu', 'no_unpriv_nucleus_ifetch')
remove_class_attr(set, 'cheetah-mmu', 'no_unpriv_nucleus_ifetch')
remove_class_attr(set, 'cheetah+mmu', 'no_unpriv_nucleus_ifetch')
remove_class_attr(set, 'text-console', 'add_title')
for obj in all_objects(set, 'serengeti-console'):
obj.__class_name__ = 'serengeti-console-old'
for cls in x86_classes:
for obj in all_objects(set, cls):
add_x86_tlb_1009(set, obj)
#######################
install_configuration_update(1397, update_1396_to_1397)
install_configuration_update(1391, update_1390_to_1391)
install_configuration_update(1379, update_1378_to_1379)
install_configuration_update(1378, update_1377_to_1378)
install_configuration_update(1370, update_1370_to_1371)
install_configuration_update(1367, update_1367_to_1368)
install_configuration_update(1366, update_1366_to_1367)
install_configuration_update(1365, update_1365_to_1366)
install_configuration_update(1364, update_1364_to_1365)
install_configuration_update(1363, update_1363_to_1364)
install_configuration_update(1361, update_1361_to_1362)
install_configuration_update(1358, update_1358_to_1359)
install_configuration_update(1357, update_1357_to_1358)
install_configuration_update(1354, update_1354_to_1355)
install_configuration_update(1350, update_1350_to_1351)
install_configuration_update(1348, update_1348_to_1349)
install_configuration_update(1339, update_1340_to_1341)
install_configuration_update(1334, update_1334_to_1335)
install_configuration_update(1332, update_1332_to_1333)
install_configuration_update(1329, update_1329_to_1330)
install_configuration_update(1328, update_1328_to_1329)
install_configuration_update(1327, update_1327_to_1328)
install_configuration_update(1326, update_1326_to_1327)
install_configuration_update(1321, update_1321_to_1322)
install_configuration_update(1320, update_1320_to_1321)
install_configuration_update(1318, update_1318_to_1319)
install_configuration_update(1317, update_1317_to_1318)
install_configuration_update(1316, update_1316_to_1317)
install_configuration_update(1305, update_1304_to_1305)
install_configuration_update(1302, update_1302_to_1303)
install_configuration_update(1301, update_1301_to_1302)
install_configuration_update(1300, update_1300_to_1301)
install_configuration_update(1299, update_1299_to_1300)
install_configuration_update(1200, update_1200_to_1201)
install_configuration_update(1199, update_1199_to_1200)
install_configuration_update(1051, update_1051_to_1052)
install_configuration_update(1049, update_1049_to_1050)
install_configuration_update(1042, update_1042_to_1043)
install_configuration_update(1040, update_1040_to_1041)
install_configuration_update(1039, update_1039_to_1040)
install_configuration_update(1031, update_1031_to_1032)
install_configuration_update(1030, update_1030_to_1031)
install_configuration_update(1019, update_1019_to_1020)
install_configuration_update(1010, update_1010_to_1011)
install_configuration_update(1009, update_1009_to_1010)
| 38.919082
| 114
| 0.617797
|
from sim_core import *
import conf
import sim
update_functions = {}
def install_configuration_update(version, f):
prev = update_functions.get(version, [])
update_functions[version] = prev + [f]
def update_configuration(set):
global first_queue
try:
if set['sim'].version > conf.sim.version:
print ('Loading a configuration created in a newer Simics version '
'(build %d) is not supported, and may not work. Current '
'Simics build is %d.' % (set['sim'].version,
conf.sim.version))
return
except:
if SIM_get_verbose():
print 'No version information in checkpoint - not updating.'
return
for x in set.values():
try:
first_queue = x.queue
break
except:
pass
vers = sorted(update_functions.keys())
for ver in vers:
if ver < set['sim'].version:
continue
if ver > conf.sim.version:
print ("Warning: update_config callback for future version "
"found: %s" % ver)
continue
if SIM_get_verbose():
print 'Updating from version %d' % ver
for f in update_functions[ver]:
try:
f(set)
except Exception, msg:
print 'Update function for version %d failed: %s' % (ver, msg)
trname):
return [x for x in set.values() if hasattr(x, attrname)]
def remove_attr(obj, name):
try:
delattr(obj, name)
except AttributeError:
pass
def rename_attr(obj, new_attr, old_attr):
try:
setattr(obj, new_attr, getattr(obj, old_attr))
except AttributeError:
pass
remove_attr(obj, old_attr)
def remove_class_attr(set, classname, name):
for obj in all_objects(set, classname):
remove_attr(obj, name)
def remove_class(set, classname):
for l in [x.name for x in set.values() if x.classname == classname]:
del set[l]
x86_classes = ["x86-386", "x86-386-387", "x86-486dx2", "x86-486sx",
"x86-pentium", "x86-pentium-mmx", "x86-ppro",
"x86-p2", "x86-p3", "x86-p4", "x86-p4e",
"x86-hammer", "x86-k7"]
mips_classes = ["mips-4kc", "mips-5kc", "mips-rm7000-be",
"mips-e9000-be"]
ppc_classes = ['ppc403gcx', 'ppc405gp', 'ppc440gp', 'ppc440gx', 'ppc603e',
'ppc7400', 'ppc7447', 'ppc7450', 'ppc7450-36', 'ppc7457',
'ppc750', 'ppc750fx', 'ppc750gx', 'ppc755', 'ppc970fx',
'ppce500', 'ppce600', 'ppc-power6']
def remove_class(set, classname):
for obj in all_objects(set, classname):
del set[obj.name]
et, 'es_asic'):
remove_attr(obj, "bar3_CT")
remove_attr(obj, "page_buffer_crc")
def update_1378_to_1379(set):
for obj in all_objects(set, 'mpc8641-pic'):
obj.P_CTPR = obj.P_CTPR + [0]*(32 - len(obj.P_CTPR))
def update_1377_to_1378(set):
for obj in all_objects(set, 'mpc8641-pcie'):
remove_attr(obj, "pci_config_device_id")
for obj in (all_objects(set, "MV64360") +
all_objects(set, "MV64460") +
all_objects(set, "MV64470") +
all_objects(set, "MV64470-EC")):
remove_attr(obj, "regs_port_GoodOctetsReceived")
remove_attr(obj, "regs_port_GoodOctetsSent")
for obj in (all_objects(set, "es_asic")):
remove_attr(obj, "bar3_REVTO")
def add_pex8111_irq_level(set, obj):
obj.irq_level = 4
def update_8x4x_rapidio_1371(set, obj):
for reg in "OMR OSR ODQDPAR OSAR ODPR ODATR ODCR ODQEPAR".split():
remove_attr(obj, "regs_"+reg)
for reg in "IMR ISR IFQDPAR IDQEPAR IFQEPAR".split():
remove_attr(obj, "regs_"+reg)
def update_1370_to_1371(set):
for obj in (all_objects(set, 'mpc8641-rapidio') +
all_objects(set, 'mpc8548-rapidio')):
update_8x4x_rapidio_1371(set, obj)
def update_1367_to_1368(set):
for_all_objects(set, 'pex8111', add_pex8111_irq_level)
def update_1366_to_1367(set):
for o in (all_objects(set, "MV64360") +
all_objects(set, "MV64460") +
all_objects(set, "MV64470") +
all_objects(set, "MV64470-EC")):
rename_attr(o, 'partial_regs_IDMA_Interrupt_Cause', 'regs_IDMA_Interrupt_Cause')
def update_8x4x_rapidio_1366(set, obj):
if not hasattr(obj, "inbound_space"):
space = pre_conf_object(obj.name + "_inbound_space", 'memory-space')
set[obj.name + '_inbound_space'] = space
setattr(obj, "inbound_space", space)
if obj.classname in ('mpc8641-rapidio', 'mpc8548-rapidio'):
for reg in "EODQEPAR EOSAR EODQDPAR EIFQEPAR EIFQDPAR".split():
if hasattr(obj, "regs_"+reg):
setattr(obj, "regs_M_"+reg, [ getattr(obj, "regs_"+reg), 0 ])
delattr(obj, "regs_"+reg)
def update_1365_to_1366(set):
for obj in all_objects(set, 'mpc8641-duart'):
obj.__class_name__ = 'NS16550'
for obj in (all_objects(set, 'mpc8641-rapidio') +
all_objects(set, 'mpc8540-rapidio') +
all_objects(set, 'mpc8548-rapidio')):
update_8x4x_rapidio_1366(set, obj)
for obj in (all_objects(set, 'mpc8641-i2c') +
all_objects(set, 'mpc8540-i2c') +
all_objects(set, 'mpc8548-i2c')):
delattr(obj, 'i2c_device_state')
for o in (all_objects(set, "MV64360") +
all_objects(set, "MV64460") +
all_objects(set, "MV64470") +
all_objects(set, "MV64470-EC")):
rename_attr(o, 'partial_regs_IDMA_Interrupt_Mask', 'regs_IDMA_Interrupt_Mask')
def update_1364_to_1365(set):
for obj in all_objects(set, 'mpc8641-gu'):
for attr in dir(obj):
if attr.isupper():
delattr(obj, attr)
def update_1363_to_1364(set):
max_cpu_num = -1
for c in all_objects_with_attr(set, 'processor_number'):
if c.processor_number > max_cpu_num:
max_cpu_num = c.processor_number
next_cpu_num = max_cpu_num + 1
taken_nums = {}
for c in all_objects_with_attr(set, 'processor_number'):
if taken_nums.has_key(c.processor_number):
c.processor_number = next_cpu_num
next_cpu_num += 1
else:
taken_nums[c.processor_number] = True
def rename_mv_pci_access_control_attr(obj):
for i in range(6):
remove_attr(obj, 'regs_pci_bus_PCI_Access_Control_Base_%d_L' % i)
remove_attr(obj, 'regs_pci_bus_PCI_Access_Control_Base_%d_H' % i)
remove_attr(obj, 'regs_pci_bus_PCI_Access_Control_Size_%d' % i)
def update_1361_to_1362(set):
for o in (all_objects(set, "MV64360") +
all_objects(set, "MV64460") +
all_objects(set, "MV64470") +
all_objects(set, "MV64470-EC")):
rename_mv_pci_access_control_attr(o)
def remap_pq2(set, mem_map):
remap = {'clocks' + '0' : 0x10c80,
'brg' + '0' : 0x119f0,
'cpm-mux' + '0' : 0x11b00,
'cpm-timers' + '0' : 0x10d80,
'cpm' + '0' : 0x119c0,
'fcc' + '0' : 0x11300,
'i2c_dev' + '1' : 0x08afc,
'ic' + '0' : 0x10c00,
'io-port' + '0' : 0x10d00,
'mc' + '0' : 0x10100,
'mcc' + '0' : 0x11b30,
'pci' + '0' : 0x10430,
'pci' + '1' : 0x101ac,
'scc' + '0' : 0x11a00,
'sdma' + '0' : 0x11018,
'si' + '0' : 0x11b20,
'sit' + '0' : 0x10220,
'siu' + '0' : 0x10000,
'smc' + '0' : 0x11a82,
'spi' + '0' : 0x11aa0}
for e in mem_map.map:
obj = e[1]
fun = e[2]
ofs = e[3]
if not obj.classname[:8] in ['mpc8260-', 'mpc8270-', 'mpc8280-']:
continue
key = obj.classname[8:] + str(fun)
if remap.has_key(key) and ofs == remap[key]:
e[3] = 0
def update_1358_to_1359(set):
for_all_objects(set, 'memory-space', remap_pq2)
def update_1357_to_1358(set):
for o in (all_objects(set, "MV64360") +
all_objects(set, "MV64460") +
all_objects(set, "MV64470") +
all_objects(set, "MV64470-EC")):
remove_attr(o, "regs_port_MAC_MIB_Counters")
def update_1354_to_1355(set):
scc_reg_subst = {
"armv5te": {
0: "main_id",
1: "cache_type",
2: "control",
3: "translation_table_base",
4: "domain_access_control",
6: "fault_status",
7: "fault_address",
},
"arm966e-s": {
0: "main_id",
1: "tcm_size",
2: "control",
10: "trace_process_identifier",
16: "configuration_control",
17: "bist_control",
18: "instruction_bist_address",
19: "instruction_bist_general",
22: "data_bist_address",
23: "data_bist_general",
}
}
for cl in ["armv5te", "arm966e-s"]:
for o in all_objects(set, cl):
scc_regs = getattr(o, "scc_regs")
for (i, name) in scc_reg_subst[cl].iteritems():
setattr(o, name, scc_regs[i])
remove_attr(o, "scc_regs")
def update_1350_to_1351(set):
for pq2_class in ("ep8260", "sbc8260", "cpp8260",
"gda8540", "mpc8540ads"):
for obj in all_objects(set, pq2_class):
remove_attr(obj, "mac_address0")
remove_attr(obj, "mac_address1")
def update_1348_to_1349(set):
for obj in all_objects(set, "sx_asic"):
rename_attr(obj, 'startup_SRCRST', 'startup_GPIOOUT')
rename_attr(obj, 'startup_SRCLSRI', 'startup_GPIOIN')
rename_attr(obj, 'startup_SRCRSTSTAT', 'startup_GPIOCR')
rename_attr(obj, 'startup_SRCRSTRSN', 'startup_GPIOINT')
def update_1340_to_1341(set):
for o in all_objects(set, "hypersim-pattern-matcher"):
o.cpus = [o.queue]
for obj in set.values():
try:
SIM_get_class(obj.classname)
except SimExc_General, msg:
continue
if 'processor' not in sim.classes[obj.classname].interfaces:
continue
sq = obj.step_queue
nsq = []
for e in sq:
if e[1] != "do pattern match":
nsq.append(e)
obj.step_queue = nsq
def update_1334_to_1335(set):
for cl in x86_classes:
for o in all_objects(set, cl):
if "debugctlmsr" in dir(o):
rename_attr(o, "ia32_debugctl", "debugctlmsr")
def update_1332_to_1333(set):
msr_translate = [["msr_pat" , "ia32_cr_pat"],
["msr_syscfg" , "syscfg"],
["msr_top_mem" , "top_mem"],
["msr_top_mem2" , "top_mem2"],
["msr_iorr_base0" , "iorrbase0"],
["msr_iorr_base1" , "iorrbase1"],
["msr_iorr_mask0" , "iorrmask0"],
["msr_iorr_mask1" , "iorrmask1"],
["msr_hwcr" , "hwcr"],
["msr_manid" , "manid"],
["msr_nb_cfg" , "nb_cfg"],
["msr_fidvid_ctl" , "fidvid_ctl"],
["msr_fidvid_status" , "fidvid_status"],
["msr_iotrap_addr0" , "iotrap_addr0"],
["msr_iotrap_addr1" , "iotrap_addr1"],
["msr_iotrap_addr2" , "iotrap_addr2"],
["msr_iotrap_addr3" , "iotrap_addr3"],
["msr_iotrap_ctl" , "iotrap_ctl"],
["msr_smm_base" , "smm_base"],
["msr_smm_addr" , "smm_addr"],
["msr_smm_mask" , "smm_mask"],
["mcg_status" , "ia32_mcg_status"],
["mcg_ctl" , "ia32_mcg_ctl"],
["sysenter_cs" , "ia32_sysenter_cs"],
["sysenter_eip" , "ia32_sysenter_eip"],
["sysenter_esp" , "ia32_sysenter_esp"],
["p5_mc_addr" , "ia32_p5_mc_addr"],
["p5_mc_type" , "ia32_p5_mc_type"]]
for cl in ["x86-hammer", "x86-k7"]:
for o in all_objects(set, cl):
if "p5_mc_addr" in dir(o):
remove_attr(o, "p5_mc_addr")
if "p5_mc_type" in dir(o):
remove_attr(o, "p5_mc_type")
for cl in x86_classes:
for o in all_objects(set, cl):
if "started" in dir(o):
remove_attr(o, "started")
for p in msr_translate:
old,new = p
if old in dir(o):
rename_attr(o, new, old)
shared_state_sets = {}
for cl in x86_classes:
for o in all_objects(set, cl):
if "shared_state" in dir(o):
if o.shared_state:
try:
shared_state_sets[o.shared_state].append(o)
except:
shared_state_sets[o.shared_state] = [o.shared_state, o]
remove_attr(o, "shared_state")
for k in shared_state_sets.keys():
for o in shared_state_sets[k]:
o.threads = shared_state_sets[k]
for o in all_objects(set, "apic"):
if "lvt_thermal_sensor" in dir(o):
o.apic_type = "P4"
o.version = 0x14
else:
o.apic_type = "P6"
o.version = 0x18
def update_1329_to_1330(set):
for cfg in all_objects(set, "ppc403gcx-cfg"):
if not "ic" in dir(cfg):
for m in cfg.cpu.dcr_space.map:
if m[1].classname == "ppc403gcx-ic":
cfg.ic = m[1]
break
def update_pq2_attrs_1329(set):
for o in (all_objects(set, "mpc8260-cpm") +
all_objects(set, "mpc8270-cpm") +
all_objects(set, "mpc8280-cpm")):
if not "queue" in dir(o):
o.queue = o.mcc1.queue
for o in all_objects(set, "mpc8260-fcc-atm") + all_objects(set, "mpc8280-fcc-atm"):
o.tx_channels_active = [x[0] for x in o.tx_channels_active]
if "fcc" in dir(o):
o.ram_tx_enabled = not not (o.fcc.reg_GFMR & (1 << 4))
else:
o.ram_tx_enabled = 0
for o in (all_objects(set, "mpc8260-fcc-fast-ethernet") +
all_objects(set, "mpc8270-fcc-fast-ethernet") +
all_objects(set, "mpc8280-fcc-fast-ethernet")):
remove_attr(o, "txbd_monitor")
if "fcc" in dir(o):
o.tx_enabled = not not (o.fcc.reg_GFMR & (1 << 4))
else:
o.tx_enabled = 0
for o in (all_objects(set, "mpc8260-scc-uart") +
all_objects(set, "mpc8270-scc-uart") +
all_objects(set, "mpc8280-scc-uart")):
remove_attr(o, "txbd_monitor")
if "scc" in dir(o):
o.ram_tx_enabled = not not (o.scc.reg_GSMR_L & (1 << 4))
else:
o.ram_tx_enabled = 0
for o in (all_objects(set, "mpc8260-smc-uart") +
all_objects(set, "mpc8270-smc-uart") +
all_objects(set, "mpc8280-smc-uart")):
remove_attr(o, "txbd_monitor")
if "smc" in dir(o):
o.ram_tx_enabled = not not (o.smc.reg_SMCMR & (1 << 1))
else:
o.ram_tx_enabled = 0
remove_class(set, "mpc8260-txbd-monitor")
remove_class(set, "mpc8270-txbd-monitor")
remove_class(set, "mpc8280-txbd-monitor")
def update_1328_to_1329(set):
update_pq2_attrs_1329(set)
def update_mdio_attrs(set):
for x in set.values():
remove_attr(x, 'mii_nvram_read_bit')
remove_attr(x, 'mii_nvram_last_clock')
remove_attr(x, 'mii_nvram_addr')
remove_attr(x, 'mii_nvram_data_in')
remove_attr(x, 'mii_nvram_op')
remove_attr(x, 'mii_nvram_word')
remove_attr(x, 'mii_nvram_in_size')
remove_attr(x, 'nvram_read_bit')
remove_attr(x, 'nvram_last_clock')
remove_attr(x, 'nvram_addr')
remove_attr(x, 'nvram_data_in')
remove_attr(x, 'nvram_in_size')
remove_attr(x, 'nvram_op')
remove_attr(x, 'nvram_word')
remove_attr(x, 'serial_reg')
remove_attr(x, 'serial_op')
remove_attr(x, 'serial_addr')
remove_attr(x, 'serial_word')
remove_attr(x, 'serial_read_bit')
remove_attr(x, 'serial_in_size')
def update_1327_to_1328(set):
for cpu in all_objects(set, "MV64360") + all_objects(set, "MV64470"):
rename_attr(cpu, 'partial_regs_pci_bus_PCI_Configuration_Data', 'regs_pci_bus_PCI_Configuration_Data')
update_mdio_attrs(set)
for cls in x86_classes:
for obj in all_objects(set, cls):
in_halt_state = getattr(obj, "in_halt_state")
activity_state = 0
if in_halt_state:
activity_state = 1
setattr(obj, "activity_state", activity_state)
remove_attr(obj, "in_halt_state")
if hasattr(obj, "pending_device"):
remove_attr(obj, "pending_device")
if hasattr(obj, "pni_enabled"):
rename_attr(obj, "cpuid_sse3", "pni_enabled")
q = getattr(obj, "step_queue")
has_interrupt_mask = 0
for e in q:
if e[1] == "release temporary interrupt mask":
has_interrupt_mask = 1
q = filter(lambda a: a[1] != "release temporary interrupt mask", q)
setattr(obj, "step_queue", q)
temp_mask = 0
if has_interrupt_mask:
temp_mask = 1
setattr(obj, "temporary_interrupt_mask", temp_mask)
if (hasattr(obj, "pending_debug_exceptions") and
getattr(obj, "pending_debug_exceptions")):
setattr(obj, "pending_debug_exception", 1)
rename_attr(obj, "pending_debug_exception_dr6", "pending_debug_exceptions")
def mv_for_all_objects(set, classname, function, mv_obj):
for obj in all_objects(set, classname):
function(set, obj, mv_obj)
def replace_mv64xxx_gbe_ptr_1326(set, gbe_obj):
def update_mv64xxx_gbe_maps(set, space, mv_obj):
try:
maplist = space.map
except:
return
if len([x for x in maplist if (x[1].classname == 'MV64360-gbe' or x[1].classname == 'MV64470-gbe')]) == 0:
return
for i in range(len(maplist)):
if (maplist[i][1].classname == 'MV64360-gbe' or maplist[i][1].classname == 'MV64470-gbe'):
maplist[i][1] = mv_obj
space.map = maplist
def update_mv64xxx_phys(set, phy, mv_obj):
if (phy.mac.classname == 'MV64360-gbe' or phy.mac.classname == 'MV64470-gbe'):
phy.mac = mv_obj
try:
mv_obj = set[(gbe_obj.name).strip('_gbe')]
except:
return
mv_for_all_objects(set, "memory-space", update_mv64xxx_gbe_maps, mv_obj)
mv_for_all_objects(set, "BCM5421S", update_mv64xxx_phys, mv_obj);
def copy_mv64xxx_attrs_1326(set, gbe_obj):
try:
mv_obj = set[(gbe_obj.name).strip('_gbe')]
except:
return
SIM_get_class('MV64360')
for gbe_attr in dir(gbe_obj):
for mv_attr in sim.classes['MV64360'].attributes:
if gbe_attr == mv_attr and not gbe_attr[0:2] == "__":
exec "mv_obj.%s = gbe_obj.%s" % (gbe_attr, gbe_attr)
def update_mv64xxx_pci_1326(set, obj):
setattr(obj, 'pci_config_header_type', 0x80)
def update_system_cmp_object_list_1326(set, system_classname, obj_classname):
for obj in all_objects(set, system_classname):
for l in [x for x in obj.object_list if x[-4:] == "_gbe"]:
del obj.object_list[l]
def update_rtc_time_1326(set, obj):
import time
val = getattr(obj, "rtc_time")
try:
time.strptime(val, '%Y-%m-%d %H:%M:%S %Z')
except Exception, msg:
val = val[:len("yyyy-mm-dd HH:MM:SS")]+" UTC"
setattr(obj, "rtc_time", val)
def update_1326_to_1327(set):
for_all_objects(set, 'MV64360-gbe', replace_mv64xxx_gbe_ptr_1326)
for_all_objects(set, 'MV64470-gbe', replace_mv64xxx_gbe_ptr_1326)
for_all_objects(set, 'MV64360-gbe', copy_mv64xxx_attrs_1326)
for_all_objects(set, 'MV64470-gbe', copy_mv64xxx_attrs_1326)
remove_class(set, 'MV64360-gbe')
remove_class(set, 'MV64470-gbe')
update_system_cmp_object_list_1326(set, 'sbc750gx-board', 'MV64360-gbe')
update_system_cmp_object_list_1326(set, 'daredevil-board', 'MV64470-gbe')
update_system_cmp_object_list_1326(set, 'atlantis-board', 'MV64360-gbe')
for_all_objects(set, 'MV64360-pci-f0', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64360-pci-f1', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64360-pci-f2', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64360-pci-f3', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64360-pci-f4', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64470-pci-f0', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64470-pci-f1', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64470-pci-f2', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64470-pci-f3', update_mv64xxx_pci_1326)
for_all_objects(set, 'MV64470-pci-f4', update_mv64xxx_pci_1326)
for_all_objects(set, 'x86-apic-system', update_rtc_time_1326)
def update_etherlink_1321(set, link):
bcast = ["ff:ff:ff:ff:ff:ff"]*2
for (x, y, name, dev, (listen_macs, promics)) in link.devices:
if bcast not in listen_macs:
listen_macs.append(bcast)
def rename_piix4_usb_1322(set, obj):
obj.__class_name__ = 'piix4_usb_dummy'
def rename_ppc440gx_obp_1322(set, obj):
obj.__class_name__ = 'ppc440gx-opb'
def update_1321_to_1322(set):
for_all_objects(set, 'ppc440gx-obp', rename_ppc440gx_obp_1322)
for_all_objects(set, 'piix4_usb', rename_piix4_usb_1322)
def update_1320_to_1321(set):
objs = all_objects(set, 'MV64360-pci') + all_objects(set, 'MV64470-pci')
for obj in objs:
obj.__class_name__ = obj.classname + "-f%d" % obj.function
remove_attr(obj, "function")
objs = ( all_objects(set, 'x86-system')
+ all_objects(set, 'x86-apic-bus-system')
+ all_objects(set, 'x86-apic-system')
+ all_objects(set, 'x86-separate-mem-io-system'))
for obj in objs:
obj.object_list['phys_mem'] = obj.object_list['pci_mem']
for_all_objects(set, "ethernet-link", update_etherlink_1321)
def update_1318_to_1319(set):
cpu = None
patch_list = []
for obj in set.values():
try:
SIM_get_class(obj.classname)
except:
continue
if 'processor' in sim.classes[obj.classname].interfaces:
cpu = obj
elif 'component' in sim.classes[obj.classname].interfaces:
try:
if obj.top_level and not hasattr(obj, 'cpu_list'):
patch_list += [obj]
except:
pass
for obj in patch_list:
obj.cpu_list = [cpu]
def update_1317_to_1318(set):
reg_aliases = ['ubamr', 'uctrl', 'ummcr0', 'ummcr1', 'ummcr2', 'ummcra',
'ummcrh', 'upmc1', 'upmc2', 'upmc3', 'upmc4', 'upmc5',
'upmc6', 'upmc7', 'upmc8', 'usdar', 'usiar', 'usprg3',
'usprg4', 'usprg5', 'usprg6', 'usprg7', 'utbl', 'utbu',
'utrace']
for obj in set.values():
if obj.classname in ppc_classes:
for reg_alias in reg_aliases:
remove_attr(obj, reg_alias)
def update_ppc440_pci_1316(set, space):
try:
maplist = space.map
except:
return
if len([x for x in maplist if x[1].classname == 'ppc440gp-pci']) == 0:
return
for i in range(len(maplist)):
if (maplist[i][1].classname == 'ppc440gp-pci'
and maplist[i][2] == 1):
maplist[i][3] = 0
space.map = maplist
def update_1316_to_1317(set):
for_all_objects(set, "memory-space", update_ppc440_pci_1316)
objs = (all_objects(set, 'ddr2-memory-module')
+ all_objects(set, 'ddr-memory-module')
+ all_objects(set, 'sdram-memory-module'))
for obj in objs:
if obj.registered:
obj.module_type = "RDIMM"
else:
obj.module_type = "UDIMM"
remove_attr(obj, 'registered')
def update_1304_to_1305(set):
remove_class(set, 'le-permissions')
def update_tlb_1302_970(set, cpu):
tlb = cpu.tlb
for i in range(len(tlb)):
for j in range(len(tlb[i])):
tlb[i][j].append(tlb[i][j][4])
tlb[i][j].append(tlb[i][j][5])
tlb[i][j][5] = 0
tlb[i][j][4] = 0
cpu.tlb = tlb
def update_1302_to_1303(set):
for_all_objects(set, "ppc970fx", update_tlb_1302_970)
def update_pending_exceptions_1301(set, cpu, table, excvec_bits):
pending = cpu.pending_exceptions
exceptions = []
for i in range(excvec_bits):
exc = (pending >> (excvec_bits - 1 - i)) & 1
if not exc:
continue
exc_name = table[i]
exceptions += [exc_name]
cpu.pending_exceptions = exceptions
def update_pending_exceptions_1301_4xx(set, cpu):
table = ["Critical_Input", "Machine_check", "DSI", "ISI",
"External_interrupt", "Alignment", "Program", "System_call",
"PIT", "FIT", "Watchdog", "Data_TLB_miss", "Instruction_TLB_miss",
"Debug"]
update_pending_exceptions_1301(set, cpu, table, 32)
def update_pending_exceptions_1301_booke(set, cpu):
table = ["Critical_interrupt", "Machine_check", "DSI", "ISI",
"External_interrupt", "Alignment", "Program",
"Floating-point_unavailable", "System_call",
"Auxiliary_processor_unavailable", "Decrementer", "FIT",
"Watchdog", "Data_TLB_miss", "Instruction_TLB_miss", "Debug",
"reserved_16", "reserved_17", "reserved_18", "reserved_19",
"reserved_20", "reserved_21", "reserved_22", "reserved_23",
"reserved_24", "reserved_25", "reserved_26", "reserved_27",
"reserved_28", "reserved_29", "reserved_30", "reserved_31",
"SPE_APU_unavailable", "SPE_floating-point_data",
"SPE_floating-point_round", "Performance_monitor"]
update_pending_exceptions_1301(set, cpu, table, 64)
def update_pending_exceptions_1301_750(set, cpu):
table = ["Reserved", "System_reset", "Machine_check", "Data_storage",
"Data_segment", "Instruction_storage", "Instruction_segment",
"External_interrupt", "Alignment", "Program",
"Floating-point_unavailable", "Decrementer", "Reserved_a",
"Reserved_b", "System_call", "Trace", "Reserved_e",
"Performance_monitor", "Altivec_Unavailable",
"Instruction_Tlb_miss", "Data_Tlb_Load_miss",
"Data_Tlb_Store_miss", "Instruction_address_breakpoint",
"System_management_interrupt", "Reserved_15", "Altivec_Assist",
"Thermal_management_interrupt"]
update_pending_exceptions_1301(set, cpu, table, 32)
def update_add_ftp_alg_in(set, forward_in_obj):
sn = forward_in_obj.tcp
forward_out_obj = forward_in_obj.forward_handler
alg_name = sn.name + "_ftp_alg"
if set.has_key(alg_name):
alg_obj = set[alg_name]
else:
alg_obj = pre_conf_object(alg_name, "ftp-alg")
set[alg_name] = alg_obj
alg_obj.forward_handler = forward_out_obj
alg_obj.incoming_handler = forward_in_obj
forward_out_obj.algs = [alg_obj]
forward_in_obj.algs = [alg_obj]
remove_attr(forward_in_obj, "forward_handler")
pcmcia_dev = None
slot0_att = None
slot1_att = None
slot0_cmn = None
slot1_cmn = None
def update_pcmcia_1301_map(set, space):
try:
maplist = space.map
except:
return
if len([x for x in maplist if x[1] == pcmcia_dev]) == 0:
return
newlist = []
map_functions = [0, 0x100, 0x200, 0x210, 0x300, 0x310]
for m in maplist:
if m[1] == pcmcia_dev:
if m[2] == 2:
m[1] = slot0_att
elif m[2] == 3:
m[1] = slot1_att
elif m[2] == 4:
m[1] = slot0_cmn
elif m[2] == 5:
m[1] = slot1_cmn
if m[2] != 255:
m[2] = map_functions[m[2]]
newlist.append(m)
space.map = newlist
def update_pcmcia_mappings(set, obj, slot):
global slot0_att, slot1_att, slot0_cmn, slot1_cmn
if slot == 0:
ide = obj.slot0_ata
else:
ide = obj.slot1_ata
slot_cmn = pre_conf_object(ide.name + '_cmn', "memory-space")
slot_att = pre_conf_object(ide.name + '_att', "memory-space")
set[ide.name + '_cmn'] = slot_cmn
set[ide.name + '_att'] = slot_att
cis_image = pre_conf_object(ide.name + '_cis_image', "image")
cis_image.size = 768
cis = pre_conf_object(ide.name + '_cis', "rom")
cis.image = cis_image
set[ide.name + 'cis'] = cis
set[ide.name + 'cis_image'] = cis_image
slot_cmn.map = [
[0, ide, 0, 0, 8],
[0xe, ide, 0, 8, 1]]
for i in range(0x400, 0x800, 2):
slot_cmn.map.append([i, ide, 0, 0x0, 0x2])
slot_att.map = [[0x0, cis, 0, 0, 0x300]]
if slot == 0:
remove_attr(obj, 'slot0_ata')
remove_attr(obj, 'slot0_cis')
obj.slot0_spaces = [slot_att, slot_cmn, slot_cmn]
slot0_att = slot_att
slot0_cmn = slot_cmn
else:
remove_attr(obj, 'slot1_ata')
remove_attr(obj, 'slot1_cis')
obj.slot1_spaces = [slot_att, slot_cmn, slot_cmn]
slot1_att = slot_att
slot1_cmn = slot_cmn
ide_cis = (
0x01, 0x03, 0xd9, 0x01, 0xff, 0x1c, 0x04, 0x03, 0xd9, 0x01, 0xff, 0x18,
0x02, 0xdf, 0x01, 0x20, 0x04, 0x01, 0x4e, 0x00, 0x02, 0x15, 0x2b, 0x04,
0x01, 0x56, 0x69, 0x6b, 0x69, 0x6e, 0x67, 0x20, 0x41, 0x54, 0x41, 0x20,
0x46, 0x6c, 0x61, 0x73, 0x68, 0x20, 0x43, 0x61, 0x72, 0x64, 0x20, 0x20,
0x20, 0x20, 0x00, 0x53, 0x54, 0x4f, 0x52, 0x4d, 0x20, 0x20, 0x00, 0x53,
0x54, 0x42, 0x4d, 0x30, 0x00, 0xff, 0x21, 0x02, 0x04, 0x01, 0x22, 0x02,
0x01, 0x01, 0x22, 0x03, 0x02, 0x04, 0x5f, 0x1a, 0x05, 0x01, 0x03, 0x00,
0x02, 0x0f, 0x1b, 0x0b, 0xc0, 0x40, 0xa1, 0x27, 0x55, 0x4d, 0x5d, 0x75,
0x08, 0x00, 0x21, 0x1b, 0x06, 0x00, 0x01, 0x21, 0xb5, 0x1e, 0x4d, 0x1b,
0x0d, 0xc1, 0x41, 0x99, 0x27, 0x55, 0x4d, 0x5d, 0x75, 0x64, 0xf0, 0xff,
0xff, 0x21, 0x1b, 0x06, 0x01, 0x01, 0x21, 0xb5, 0x1e, 0x4d, 0x1b, 0x12,
0xc2, 0x41, 0x99, 0x27, 0x55, 0x4d, 0x5d, 0x75, 0xea, 0x61, 0xf0, 0x01,
0x07, 0xf6, 0x03, 0x01, 0xee, 0x21, 0x1b, 0x06, 0x02, 0x01, 0x21, 0xb5,
0x1e, 0x4d, 0x1b, 0x12, 0xc3, 0x41, 0x99, 0x27, 0x55, 0x4d, 0x5d, 0x75,
0xea, 0x61, 0x70, 0x01, 0x07, 0x76, 0x03, 0x01, 0xee, 0x21, 0x1b, 0x06,
0x03, 0x01, 0x21, 0xb5, 0x1e, 0x4d, 0x14)
def add_pcmcia_cis_1301(arg, ini_obj):
obj = SIM_get_object(arg)
spaces = [obj.slot0_spaces, obj.slot1_spaces]
for i in (0, 1):
if len(spaces[i]) == 1:
continue
attr = spaces[i][0]
for i in range(len(ide_cis)):
attr.iface.memory_space.write(attr, None,
i * 2, (ide_cis[i], ), 1)
attr.iface.memory_space.write(attr, None,
0x204, (0x2e, ), 1)
SIM_hap_delete_callback("Core_Configuration_Loaded",
add_pcmcia_cis_1301, arg)
def update_pcmcia_1301(set, obj):
global pcmcia_dev
pcmcia_dev = obj
obj.config_registers[15] = 0x00000100
update_pcmcia_mappings(set, obj, 0)
update_pcmcia_mappings(set, obj, 1)
if obj.slot0_memory_windows[0][0]:
obj.slot0_memory_windows[0][1] = 3
if obj.slot0_memory_windows[4][0]:
obj.slot0_memory_windows[4][1] = 2
if obj.slot1_memory_windows[0][0]:
obj.slot1_memory_windows[0][1] = 3
if obj.slot1_memory_windows[4][0]:
obj.slot1_memory_windows[4][1] = 2
obj.slot0_registers[1] = 0xef
obj.slot1_registers[1] = 0xef
for_all_objects(set, "memory-space", update_pcmcia_1301_map)
SIM_hap_add_callback("Core_Configuration_Loaded",
add_pcmcia_cis_1301, obj.name)
def update_uart_1301(set, obj):
if not hasattr(obj, "interrupt_mask_out2"):
obj.interrupt_mask_out2 = 1
def update_x86_components_1301(set, obj):
if 'x87' not in obj.object_list and 'x87[0]' in obj.object_list:
obj.object_list['x87'] = obj.object_list['x87[0]']
if 'x87[0]' in obj.object_list:
del obj.object_list['x87[0]']
remove_attr(obj, 'num_threads')
def update_1301_to_1302(set):
for_all_objects(set, "ppc403gcx", update_pending_exceptions_1301_4xx)
for_all_objects(set, "ppc405gp", update_pending_exceptions_1301_4xx)
for_all_objects(set, "ppc440gp", update_pending_exceptions_1301_booke)
for_all_objects(set, "ppc440gx", update_pending_exceptions_1301_booke)
for_all_objects(set, "ppce500", update_pending_exceptions_1301_booke)
for_all_objects(set, "ppc603e", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc7400", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc7447", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc7450", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc7457", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc750", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc750fx", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc750gx", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc755", update_pending_exceptions_1301_750)
for_all_objects(set, "ppc970fx", update_pending_exceptions_1301_750)
for_all_objects(set, "CL-PD6729", update_pcmcia_1301)
for cls in x86_classes:
remove_class_attr(set, cls, 'smbase')
for_all_objects(set, "port-forward-incoming-server", update_add_ftp_alg_in)
for cpu in (all_objects(set, "ultrasparc-ii")
+ all_objects(set, "ultrasparc-iii")
+ all_objects(set, "ultrasparc-iii-plus")
+ all_objects(set, "ultrasparc-iii-i")):
rename_attr(cpu, 'cpu_group', 'irq_bus')
for_all_objects(set, "NS16550", update_uart_1301)
for_all_objects(set, "NS16450", update_uart_1301)
for_all_objects(set, "pentium-4-cpu", update_x86_components_1301)
def update_event_queue_1300(cpu):
slot_names = ["sync", "pre-update", "update", "update2", "default",
"default", "assert", "event-end"]
ignore_events = set(("User breakpoint",
"Internal: update time counter",
"Internal: update step counter",
"Internal: renew queue",
"Head of Time",
"Deleted Event",
"Check for Async Events"))
q = [[], []]
hot_step = 0
for (evobj, val, slot, queue, time) in cpu.event_queue:
if evobj == "$simple_event":
if val == "Head of Time":
hot_step = time
if val in ignore_events:
continue
evobj = None
q[queue].append([evobj, val, slot_names[slot], time])
q[Sim_Queue_Time] = [[o, v, s, t + hot_step]
for [o, v, s, t] in q[Sim_Queue_Time]]
del cpu.event_queue
cpu.step_queue = q[Sim_Queue_Step]
cpu.time_queue = q[Sim_Queue_Time]
def update_1300_to_1301(set):
for obj in set.values():
try:
SIM_get_class(obj.classname)
except:
continue
if 'processor' in sim.classes[obj.classname].interfaces:
update_event_queue_1300(obj)
create_central_client = False
remote_central = False
remote_host = None
first_queue = None
def remove_default_target_endian_1299(set, obj):
try:
if len(obj.default_target) == 5:
obj.default_target = obj.default_target[:4]
except:
pass
def replace_dcr_mapping_1299(set, ppc):
if len(ppc.dcr):
dcr_map = {}
for d in ppc.dcr:
if dcr_map.has_key(d[0]):
dcr_map[d[0]].append(d[1])
else:
dcr_map[d[0]] = [d[1]]
mem_map = []
for obj in dcr_map.keys():
dcr_list = dcr_map[obj]
first = dcr_list[0]
for dcr in dcr_list:
mem_map += [[(dcr)*4, set[obj], 0, (dcr-first)*4, 4]]
dcr_space = ppc.name + '-dcr-space'
set[dcr_space] = pre_conf_object(dcr_space, 'memory-space')
ppc.dcr_space = set[dcr_space]
set[dcr_space].map = mem_map
remove_attr(ppc, 'dcr')
def fix_405_uic_1299(set, uic):
print "WARNING: Converting an old 405 based configuration"
print "The interrupt controller has changed so that irq levels are according"
print "to documentation. Will try to patch devices but there might be more"
print "devices connected to the UIC which needs to be patched manually."
print "Typically obj.irq_level = 31 - old_irq_level"
uic.target = uic.irq_dev
uic.critical_target = uic.irq_dev
uic.target_level = 0
uic.critical_target_level = 1
remove_attr(uic, 'irq_dev')
rename_attr(uic, 'UICx_CR', 'uiccr')
rename_attr(uic, 'UICx_ER', 'uicer')
rename_attr(uic, 'UICx_PR', 'uicvpr')
rename_attr(uic, 'UICx_SR', 'uicsr')
rename_attr(uic, 'UICx_TR', 'uictr')
rename_attr(uic, 'UICx_VCR', 'uicvcr')
for obj in all_objects(set, 'ppc405gp-iic'):
if obj.interrupt_device == uic:
print "Patching %s (level %d -> %d)" % (obj.name, obj.interrupt_level,
31 - obj.interrupt_level)
obj.interrupt_level = 31 - obj.interrupt_level
for obj in all_objects(set, 'ppc405gp-pci'):
irqs = obj.irq_routing
new_irq = []
for i in irqs:
if i[1] == uic.name:
print "Patching %s (level %d -> %d)" % (obj.name, i[2], 31 - i[2])
new_irq.append([i[0], i[1], 31 - i[2]])
else:
new_irq.append(i)
obj.irq_routing = new_irq
for obj in all_objects(set, 'NS16550'):
if obj.irq_dev == uic:
print "Patching %s (level %d -> %d)" % (obj.name, obj.interrupt_pin,
31 - obj.interrupt_pin)
obj.interrupt_pin = 31 - obj.interrupt_pin
def remove_uic_attributes_1299(set, uic):
remove_attr(uic, 'UICx_VR')
remove_attr(uic, 'UICx_MSR')
def add_cpu_obj_1299(set, obj):
cpus = all_objects(set, 'ppc405gp') + all_objects(set, 'ppc440gp') + all_objects(set, 'ppc440gx')
for cpu in cpus:
space = cpu.dcr_space
map = space.map
for m in map:
if m[1] == obj:
obj.cpu = cpu
break
def change_memory_attr_1299(set, obj):
if hasattr(obj, 'memory') and type(obj.memory) == str:
obj.memory = set[obj.memory]
def change_mal_attr_1299(set, obj):
if hasattr(obj, 'mal') and type(obj.mal) == str:
obj.mal = set[obj.mal]
def change_irq_attr_1299(set, obj):
if hasattr(obj, 'irq_routing') and type(obj.irq_routing) == list:
for i in range(len(obj.irq_routing)):
if type(obj.irq_routing[i][1]) == str:
obj.irq_routing[i][1] = set[obj.irq_routing[i][1]]
def rename_ioapic_1299(set, obj):
obj.__class_name__ = 'io-apic'
def rename_cheetah_plus_mmu_1299(set, obj):
obj.__class_name__ = 'cheetah-plus-mmu'
def rename_ultrasparc_iii_plus_1299(set, obj):
obj.__class_name__ = 'ultrasparc-iii-plus'
def rename_ultrasparc_iv_plus_1299(set, obj):
obj.__class_name__ = 'ultrasparc-iv-plus'
def set_dec_srom_width_1299(set, obj):
obj.srom_address_width = 6
def fix_fb_mem_1299(set, obj):
name = "%s-image" % obj.name
image = pre_conf_object(name, 'image')
if obj.classname == 'ragexl':
image.size = 0x800000
elif obj.classname.startswith('vga'):
image.size = 0x40000
elif obj.classname.startswith('voodoo3'):
image.size = 0x1000000
set[name] = image
obj.image = image
def update_1299_to_1300(set):
for_all_objects(set, 'ragexl', fix_fb_mem_1299)
for_all_objects(set, 'vga', fix_fb_mem_1299)
for_all_objects(set, 'vga_pci', fix_fb_mem_1299)
for_all_objects(set, 'voodoo3', fix_fb_mem_1299)
for_all_objects(set, 'voodoo3-agp', fix_fb_mem_1299)
for_all_objects(set, 'ppc403gcx', replace_dcr_mapping_1299)
for_all_objects(set, 'ppc405gp', replace_dcr_mapping_1299)
for_all_objects(set, 'ppc440gp', replace_dcr_mapping_1299)
for_all_objects(set, 'ppc440gx', replace_dcr_mapping_1299)
for_all_objects(set, 'ppc405gp-uic', fix_405_uic_1299)
for_all_objects(set, 'ppc440gp-uic', remove_uic_attributes_1299)
for_all_objects(set, 'ppc440gx-uic', remove_uic_attributes_1299)
for_all_objects(set, 'ppc405gp-dma', add_cpu_obj_1299)
for_all_objects(set, 'ppc440gp-dma', add_cpu_obj_1299)
for_all_objects(set, 'ppc440gx-dma', add_cpu_obj_1299)
for_all_objects(set, 'ppc405gp-ebc', add_cpu_obj_1299)
for_all_objects(set, 'ppc440gp-ebc', add_cpu_obj_1299)
for_all_objects(set, 'ppc440gx-ebc', add_cpu_obj_1299)
for_all_objects(set, 'ppc405gp-mal', add_cpu_obj_1299)
for_all_objects(set, 'ppc440gp-mal', add_cpu_obj_1299)
for_all_objects(set, 'ppc440gx-mal', add_cpu_obj_1299)
for_all_objects(set, 'misc-dcr', add_cpu_obj_1299)
for_all_objects(set, 'ppc405gp-dma', change_memory_attr_1299)
for_all_objects(set, 'ppc440gp-dma', change_memory_attr_1299)
for_all_objects(set, 'ppc440gx-dma', change_memory_attr_1299)
for_all_objects(set, 'ppc405gp-mal', change_memory_attr_1299)
for_all_objects(set, 'ppc440gp-mal', change_memory_attr_1299)
for_all_objects(set, 'ppc440gx-mal', change_memory_attr_1299)
for_all_objects(set, 'ppc405gp-emac', change_mal_attr_1299)
for_all_objects(set, 'ppc440gp-emac', change_mal_attr_1299)
for_all_objects(set, 'ppc440gx-emac', change_mal_attr_1299)
for_all_objects(set, 'ppc405gp-pci', change_irq_attr_1299)
for_all_objects(set, 'ppc440gp-pci', change_irq_attr_1299)
for_all_objects(set, 'ppc440gx-pci', change_irq_attr_1299)
for_all_objects(set, 'memory-space', remove_default_target_endian_1299)
for_all_objects(set, 'port-space', remove_default_target_endian_1299)
for_all_objects(set, 'I/O-APIC', rename_ioapic_1299)
for_all_objects(set, 'cheetah+mmu', rename_cheetah_plus_mmu_1299)
for_all_objects(set, 'ultrasparc-iii+', rename_ultrasparc_iii_plus_1299)
for_all_objects(set, 'ultrasparc-iv+', rename_ultrasparc_iv_plus_1299)
try:
SIM_get_object('dummy-component')
set['system-component'] = pre_conf_object('system-component',
'dummy-component')
except:
pass
for cls in ['DEC21041', 'DEC21140A', 'DEC21143']:
for_all_objects(set, cls, set_dec_srom_width_1299)
for obj in all_objects(set, 'i82077'):
try:
obj.drives = [x[1] for x in obj.drives]
except:
pass
for cls in mips_classes:
remove_class_attr(set, cls, 'itlb')
remove_class_attr(set, cls, 'dtlb')
for obj in all_objects(set, 'i8042'):
if hasattr(obj, 'reset_targets') and len(obj.reset_targets) > 0:
bus = pre_conf_object(obj.name + '_reset', 'x86-reset-bus')
set[obj.name + '_reset'] = bus
bus.reset_targets = obj.reset_targets
obj.reset_target = bus
remove_attr(obj, 'a20_target')
remove_attr(obj, 'reset_targets')
for cls in x86_classes:
remove_class_attr(set, cls, 'stc_segreg_enabled')
def connections_1200(set, obj):
try:
connections = obj.connections
except:
return
new_ip = "0.0.0.0"
for snd in [x for x in set.values()
if x.classname == 'service-node-device']:
new_ip = snd.ip_address
break
newlist = []
for sublist in connections:
if len(sublist) == 2:
newlist.append(sublist)
elif len(sublist) == 4:
newlist.append([sublist[0], sublist[0], new_ip,
sublist[2], sublist[3]])
obj.connections = newlist
def update_1200_to_1201(set):
for_all_objects(set, "port-forward-outgoing-server", connections_1200)
def sim_1199(set, obj):
global create_central_client, remote_central, remote_host
try:
if obj.remote_simics_central == 1:
create_central_client = True
remote_central = True
remote_host = obj.simics_central_host
except:
pass
remove_attr(obj, 'remote_simics_central')
remove_attr(obj, 'simics_central_host')
remove_attr(obj, 'central_debug')
def connect_eth_1199(arg, ini_obj):
dev = SIM_get_object(arg[0])
net = SIM_get_object(arg[1])
dev.link = net
SIM_hap_delete_callback("Core_Configuration_Loaded",
connect_eth_1199, arg)
def eth_device_1199(set, obj):
global remote_central
if remote_central:
link = pre_conf_object('net0', 'ethernet_link')
set['net0'] = link
link.central = set['central_client']
remote_central = False
link = link.name
else:
try:
link = obj.network
except:
pass
if obj.connected:
SIM_hap_add_callback("Core_Configuration_Loaded",
connect_eth_1199, (obj.name, link))
remove_attr(obj, 'network')
remove_attr(obj, 'connected')
remove_attr(obj, 'min_latency')
remove_attr(obj, 'backdoor_ok')
remove_attr(obj, 'auto_connect')
remove_attr(obj, 'individual_address')
def ethernet_net_1199(set, obj):
obj.__class_name__ = 'ethernet-link'
remove_attr(obj, 'frame_loss')
remove_attr(obj, 'network_id')
remove_attr(obj, 'handle_dhcp')
remove_attr(obj, 'shared_media')
remove_attr(obj, 'netip')
remove_attr(obj, 'ethernet_central')
if obj.central_device:
snd = pre_conf_object('sn0_dev', 'service-node-device')
set['sn0_dev'] = snd
snd.service_node = set['sn0']
snd.arp_table = obj.arp
snd.mac_address = obj.ownmac
snd.ip_address = obj.ownip
snd.netmask = obj.netmask
snd.queue = first_queue
set['sn0'].routing_table = [[snd.ip_address, snd.netmask,
'0.0.0.0', snd]]
snd.link = obj
try:
obj.central = set['central_client']
except:
pass
remove_attr(obj, 'central_device')
remove_attr(obj, 'arp')
remove_attr(obj, 'ownmac')
remove_attr(obj, 'ownip')
remove_attr(obj, 'netmask')
def ethernet_central_1199(set, obj):
new_dns = []
for dns in obj.dns:
new_dns.append([None, dns[0], dns[1], dns[2]])
set['sn0'].hosts = new_dns
del set[obj.name]
def central_1199(set, obj):
global create_central_client
port = obj.ip_port
file = obj.unix_socket
del set['central']
if port == -1 and len(file) == 0:
return
cs = pre_conf_object('central_server', 'central-server')
set['central_server'] = cs
if len(file):
cs.unix_socket = file
cs.unix_socket_mode = 438
if port != -1:
cs.tcp_port = port
create_central_client = True
def update_1199_to_1200(set):
global remote_host
for_all_objects(set, 'sim', sim_1199)
for_all_objects(set, 'central', central_1199)
if create_central_client:
cc = pre_conf_object('central_client', 'central-client')
set['central_client'] = cc
if remote_host and len(remote_host):
if not ':' in remote_host and not '/' in remote_host:
remote_host += ":4711"
cc.server = remote_host
elif not remote_central:
cc.server = pre_conf_object('central_server', 'central-server')
set['central_server'] = cc.server
if len(all_objects(set, 'ethernet-central')):
set['sn0'] = pre_conf_object('sn0', 'service-node')
for_all_objects(set, 'ethernet-central', ethernet_central_1199)
for_all_objects(set, 'ethernet-network', ethernet_net_1199)
for_all_objects(set, 'sbus-hme', eth_device_1199)
for_all_objects(set, 'cheerio-hme', eth_device_1199)
for_all_objects(set, 'BCM5703C', eth_device_1199)
for_all_objects(set, 'BCM57034', eth_device_1199)
for_all_objects(set, 'AM79C960', eth_device_1199)
for_all_objects(set, 'cassini', eth_device_1199)
for_all_objects(set, 'DEC21041', eth_device_1199)
for_all_objects(set, 'DEC21140A', eth_device_1199)
for_all_objects(set, 'DEC21143', eth_device_1199)
for_all_objects(set, 'ppc440gp-emac', eth_device_1199)
for_all_objects(set, 'CS8900A', eth_device_1199)
remove_class_attr(set, 'ppc440gp', 'ear')
for l in [x.name for x in set.values() if x.classname == 'central-links']:
del set[l]
remove_class_attr(set, 'ICS951601', 'address_mask')
remove_class_attr(set, 'NS16450', 'send_while_playing_back')
remove_class_attr(set, 'NS16550', 'send_while_playing_back')
remove_class_attr(set, 'M5823', 'irq_disable')
remove_class_attr(set, 'DS12887', 'irq_disable')
remove_class_attr(set, 'DS17485', 'irq_disable')
remove_class_attr(set, 'i8254', 'rw_state')
for obj in all_objects(set, 'ppc440gp-mal'):
obj.interrupts[1] = obj.interrupts[0]
cpus = [x for x in set.values() if x.classname in x86_classes]
for kbd in all_objects(set, 'i8042'):
if len(cpus):
kbd.reset_targets = cpus
for obj in all_objects(set, 'port-space'):
try:
for m in range(len(obj.map)):
if len(obj.map[m]) == 6:
obj.map[m].pop(-1)
except:
pass
def update_1051_to_1052(set):
remove_class_attr(set, 'server-console', 'data_out')
remove_class_attr(set, 'server-console', 'poll_interval')
def change_map_endian_1049(set, obj):
try:
for i in range(len(obj.map)):
off = obj.map[i][0] & 0x1fff
if off == 0x60 and obj.map[i][4] == 0x10:
obj.map[i][0] &= 0xffffffffffffe000
obj.map[i][4] &= 0x70
if len(obj.map[i]) > 5:
for j in range(5, len(obj.map[i])):
if isinstance(obj.map[i][j], int):
obj.map[i][j] = 0
if len(obj.map[i]) == 6 and isinstance(obj.map[i][5], int):
obj.map[i].pop(-1)
# TODO: update vga mapping
except Exception, msg:
print msg
pass
def add_vga_memory_1049(set, obj):
for vga in [x[1] for x in obj.map]:
if type(vga) == str:
vga = set[vga]
if 'vga' in vga.classname or 'voodoo' in vga.classname:
vga.memory_space = obj
def update_1049_to_1050(set):
for_all_objects(set, 'memory-space', change_map_endian_1049)
for_all_objects(set, 'memory-space', add_vga_memory_1049)
remove_class_attr(set, 'ide-disk', 'tr_rdy_dma')
remove_class_attr(set, 'ide-disk', 'tr_cmd_return_dma')
remove_class_attr(set, 'ide-cdrom', 'tr_rdy_dma')
remove_class_attr(set, 'ide-cdrom', 'tr_cmd_return_dma')
remove_class_attr(set, 'i82077', 'seek_irq_drive')
remove_class_attr(set, 'i8042', 'reset_target')
remove_class_attr(set, 'NS16450', 'com')
remove_class_attr(set, 'NS16550', 'com')
remove_class_attr(set, 'i21152', 'first_bus_nonzero')
remove_class_attr(set, 'i82443bx_agp', 'first_bus_nonzero')
remove_class_attr(set, 'i82443bx_agp', 'memory')
# p4 has these before 2.0 (only p2, p3 and ppro)
remove_class_attr(set, 'x86-p4', 'mc4_ctl')
remove_class_attr(set, 'x86-p4', 'mc4_addr')
remove_class_attr(set, 'x86-p4', 'mc4_status')
remove_class_attr(set, 'x86-p4', 'mc4_misc')
remove_class_attr(set, 'x86-p4', 'perfevtsel0')
remove_class_attr(set, 'x86-p4', 'perfevtsel1')
for cls in x86_classes:
remove_class_attr(set, cls, 'cr1')
for cls in ['SYM53C810', 'SYM53C875']:
for obj in all_objects(set, cls):
try:
pin = obj.interrupt_pin
obj.interrupt_pin = [pin, 0, 0, 0]
except:
pass
def update_1042_to_1043(set):
for obj in all_objects(set, 'Z8530'):
a = pre_conf_object(obj.name + '-port-a', 'Z8530-port')
b = pre_conf_object(obj.name + '-port-b', 'Z8530-port')
obj.a_port = set[a.name] = a
obj.b_port = set[b.name] = b
a.master = obj
b.master = obj
# only change console if set
try:
a.console = obj.a_console
a.console.device = a
remove_attr(obj, 'a_console')
except:
pass
try:
b.console = obj.b_console
b.console.device = b
remove_attr(obj, 'b_console')
except:
pass
def update_1040_to_1041(set):
for obj in all_objects(set, 'ultrasparc-iii+'):
try:
if obj.report_ultra3i:
obj.__class_name__ = 'ultrasparc-iii-i'
remove_attr(obj, 'report_ultra3i')
except:
pass
seg_regs = ["cs", "ds", "ss", "es", "fs", "gs", "tr", "ldtr"]
for cls in x86_classes:
for obj in all_objects(set, cls):
for seg in seg_regs:
try:
reg = getattr(obj, seg)
if reg[3]:
reg[8] = (reg[8] << 12) | 0xfff
setattr(obj, seg, reg)
except:
pass
for obj in all_objects(set, 'flash-memory'):
try:
obj.storage_ram = obj.storage_space.map[0][1]
except:
pass
remove_attr(obj, 'storage_space')
def update_1039_to_1040(set):
first = 1
for cls in [x for x in x86_classes if not '486' in x]:
for obj in all_objects(set, cls):
# only on first processor, does not work on multi-machines
obj.bsp = first
first = 0
def update_1031_to_1032(set):
# Old versions do not have the udma_enabled attribute. Assume
# that udma is enabled if udma_mode is non-zero. The new
# multiword_dma_mode and multiword_dma_enabled attributes
# will have the correct default values (off and zero).
for obj in (all_objects(set, 'ide-disk') + all_objects(set, 'ide-cdrom')):
try:
if obj.udma_mode:
obj.udma_enabled = 1
except:
pass
def update_1030_to_1031(set):
for obj in (all_objects(set, 'ultrasparc-ii')
+ all_objects(set, 'ultrasparc-iii')
+ all_objects(set, 'ultrasparc-iii+')):
remove_attr(obj, 'fp_follow_errata_69')
remove_attr(obj, 'no_unpriv_nucleus_ifetch')
for obj in all_objects(set, 'text-console'):
remove_attr(obj, 'xterm_args')
for cls in ['ISP1040', 'ISP1040_SUN', 'ISP2200', 'ISP2200_SUN']:
for obj in all_objects(set, cls):
# mask to 32 bits
obj.req_queue_addr &= 0xffffffff
obj.res_queue_addr &= 0xffffffff
remove_class_attr(set, 'ram', 'mapped_size')
def update_1019_to_1020(set):
objs = (all_objects(set, 'ultrasparc-ii')
+ all_objects(set, 'ultrasparc-iii')
+ all_objects(set, 'ultrasparc-iii+')
+ all_objects(set, 'ultrasparc-v')
+ all_objects(set, 'serengeti-schizo')
+ all_objects(set, 'fiesta-tomatillo')
+ all_objects(set, 'sun4u-fhc')
+ all_objects(set, 'sunfire-sysio')
+ all_objects(set, 'sunfire-psycho')
+ all_objects(set, 'serengeti-console')
+ all_objects(set, 'serengeti-console-old'))
if len(objs):
irq_bus = pre_conf_object('irq_bus0', 'sparc-irq-bus')
set['irq_bus0'] = irq_bus
for obj in objs:
obj.irq_bus = irq_bus
remove_attr(obj, 'irq_objs')
remove_attr(obj, 'cpu_objs')
def update_1010_to_1011(set):
for cls in ['ISP1040', 'ISP1040_SUN', 'ISP2200', 'ISP2200_SUN']:
remove_class_attr(set, cls, 'nvram')
remove_class_attr(set, cls, 'nvram-extra-cycle')
def add_x86_tlb_1009(set, obj):
name = obj.name + "_tlb"
set[name] = tlb = pre_conf_object(name, 'x86-tlb')
tlb.cpu = obj
obj.tlb = tlb
for t in ['itlb_large', 'dtlb_large', 'itlb_4k', 'dtlb_4k']:
try:
exec "tlb.%s = obj.%s" % (t, t)
remove_attr(obj, t)
except:
pass
def update_1009_to_1010(set):
remove_class_attr(set, 'ide-disk', 'debug_level')
remove_class_attr(set, 'ide-cdrom', 'debug_level')
remove_class_attr(set, 'spitfire-mmu', 'no_unpriv_nucleus_ifetch')
remove_class_attr(set, 'cheetah-mmu', 'no_unpriv_nucleus_ifetch')
remove_class_attr(set, 'cheetah+mmu', 'no_unpriv_nucleus_ifetch')
remove_class_attr(set, 'text-console', 'add_title')
for obj in all_objects(set, 'serengeti-console'):
obj.__class_name__ = 'serengeti-console-old'
for cls in x86_classes:
for obj in all_objects(set, cls):
add_x86_tlb_1009(set, obj)
#######################
install_configuration_update(1397, update_1396_to_1397)
install_configuration_update(1391, update_1390_to_1391)
install_configuration_update(1379, update_1378_to_1379)
install_configuration_update(1378, update_1377_to_1378)
install_configuration_update(1370, update_1370_to_1371)
install_configuration_update(1367, update_1367_to_1368)
install_configuration_update(1366, update_1366_to_1367)
install_configuration_update(1365, update_1365_to_1366)
install_configuration_update(1364, update_1364_to_1365)
install_configuration_update(1363, update_1363_to_1364)
install_configuration_update(1361, update_1361_to_1362)
install_configuration_update(1358, update_1358_to_1359)
install_configuration_update(1357, update_1357_to_1358)
install_configuration_update(1354, update_1354_to_1355)
install_configuration_update(1350, update_1350_to_1351)
install_configuration_update(1348, update_1348_to_1349)
install_configuration_update(1339, update_1340_to_1341)
install_configuration_update(1334, update_1334_to_1335)
install_configuration_update(1332, update_1332_to_1333)
install_configuration_update(1329, update_1329_to_1330)
install_configuration_update(1328, update_1328_to_1329)
install_configuration_update(1327, update_1327_to_1328)
install_configuration_update(1326, update_1326_to_1327)
install_configuration_update(1321, update_1321_to_1322)
install_configuration_update(1320, update_1320_to_1321)
install_configuration_update(1318, update_1318_to_1319)
install_configuration_update(1317, update_1317_to_1318)
install_configuration_update(1316, update_1316_to_1317)
install_configuration_update(1305, update_1304_to_1305)
install_configuration_update(1302, update_1302_to_1303)
install_configuration_update(1301, update_1301_to_1302)
install_configuration_update(1300, update_1300_to_1301)
install_configuration_update(1299, update_1299_to_1300)
install_configuration_update(1200, update_1200_to_1201)
install_configuration_update(1199, update_1199_to_1200)
install_configuration_update(1051, update_1051_to_1052)
install_configuration_update(1049, update_1049_to_1050)
install_configuration_update(1042, update_1042_to_1043)
install_configuration_update(1040, update_1040_to_1041)
install_configuration_update(1039, update_1039_to_1040)
install_configuration_update(1031, update_1031_to_1032)
install_configuration_update(1030, update_1030_to_1031)
install_configuration_update(1019, update_1019_to_1020)
install_configuration_update(1010, update_1010_to_1011)
install_configuration_update(1009, update_1009_to_1010)
| false
| true
|
f71461118a36638bf9f86bc877bea372a4e45f9a
| 689
|
py
|
Python
|
app.py
|
JoeDReynolds/HW_13
|
8fc15c37554069ff51e1d29685384e6e521a4b2a
|
[
"ADSL"
] | null | null | null |
app.py
|
JoeDReynolds/HW_13
|
8fc15c37554069ff51e1d29685384e6e521a4b2a
|
[
"ADSL"
] | null | null | null |
app.py
|
JoeDReynolds/HW_13
|
8fc15c37554069ff51e1d29685384e6e521a4b2a
|
[
"ADSL"
] | null | null | null |
# import necessary libraries
from flask import Flask, render_template, jsonify, redirect
from flask_pymongo import PyMongo
import scrape_mars
# create instance of Flask app
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
# create route that renders index.html template
@app.route("/")
def index():
mars_data = mongo.db.mars_db.find_one()
return render_template("index.html", mars_data=mars_data)
@app.route("/scrape")
def scraper():
mongo.db.marsdata.drop()
results = scrape_mars.scrape()
mongo.db.marsdata.insert_one(results)
return redirect("/", code=302)
if __name__ == "__main__":
app.run(debug=True)
| 23.758621
| 62
| 0.740203
|
from flask import Flask, render_template, jsonify, redirect
from flask_pymongo import PyMongo
import scrape_mars
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
@app.route("/")
def index():
mars_data = mongo.db.mars_db.find_one()
return render_template("index.html", mars_data=mars_data)
@app.route("/scrape")
def scraper():
mongo.db.marsdata.drop()
results = scrape_mars.scrape()
mongo.db.marsdata.insert_one(results)
return redirect("/", code=302)
if __name__ == "__main__":
app.run(debug=True)
| true
| true
|
f71461fff2ddfcf30af051a048b0f75af416145e
| 3,596
|
py
|
Python
|
ma.py
|
nishishailesh/moving_average_clin_lab
|
c8ee448ca16b0d3845c42cafa070dafd307594dc
|
[
"MIT"
] | null | null | null |
ma.py
|
nishishailesh/moving_average_clin_lab
|
c8ee448ca16b0d3845c42cafa070dafd307594dc
|
[
"MIT"
] | null | null | null |
ma.py
|
nishishailesh/moving_average_clin_lab
|
c8ee448ca16b0d3845c42cafa070dafd307594dc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import sys
import fcntl
import logging
import time
import io
import datetime
import decimal
import statistics
from astm_bidirectional_common import my_sql , file_mgmt, print_to_log
#For mysql password
sys.path.append('/var/gmcs_config')
import astm_var
####Settings section start#####
logfile_name='/var/log/ma.log'
log=1
n_size=50
####Settings section end#####
'''
select sample_id,result,avg(result)
over (ROWS BETWEEN 10 PRECEDING AND CURRENT ROW)
from result where result>0 and examination_id=5031 order by sample_id desc limit 40
'''
last_sample_id_dict={}
logging.basicConfig(filename=logfile_name,level=logging.DEBUG,format='%(asctime)s %(message)s')
if(log==0):
logging.disable(logging.DEBUG)
print_to_log("Moving Average Logging Test","[OK]")
def check_if_new_result_arrived(ms,examination_id):
global last_sample_id_dict
prepared_sql='select max(sample_id) from result where examination_id=%s and result>0'
data_tpl=(examination_id,)
cur=ms.run_query_with_log(prepared_sql,data_tpl)
if(cur!=None):
r=ms.get_single_row(cur)
print_to_log("max sample_id for {}".format(examination_id),r[0])
ms.close_cursor(cur)
if(examination_id in last_sample_id_dict):
if(last_sample_id_dict[examination_id]==r[0]):
print_to_log("Last sample id is not changed {}".format(last_sample_id_dict),"{}:{}".format(examination_id,r[0]))
return False
else:
print_to_log("Last sample id is changed {}".format(last_sample_id_dict),"{}:{}".format(examination_id,r[0]))
last_sample_id_dict.update({examination_id:r[0]})
print_to_log("updated dictionary",format(last_sample_id_dict))
prepared_sql_sample_data='select * from result where examination_id=%s and sample_id=%s'
data_tpl_sample_data=(examination_id,r[0])
cur_sample_data=ms.run_query_with_log(prepared_sql_sample_data,data_tpl_sample_data)
r_sample_data=ms.get_single_row(cur_sample_data)
return r_sample_data[0],r_sample_data[2] #sample id and result
else:
print_to_log("Examination not in dict:{}".format(last_sample_id_dict),examination_id)
last_sample_id_dict.update({examination_id:r[0]})
print_to_log("updated dictionary",format(last_sample_id_dict))
return 0,0,0
def calculate_moving_average(ms,examination_id):
chk=check_if_new_result_arrived(ms,examination_id)
if(chk==False):
print_to_log("Last sample id is not changed.. nothing to do for:",examination_id)
return
#prepared_sql='select avg(result) from result where examination_id=%s and result>0 order by sample_id desc limit %s'
prepared_sql='select result from result where examination_id=%s and result>0 order by sample_id desc limit %s'
data_tpl=(examination_id,n_size)
cur=ms.run_query_with_log(prepared_sql,data_tpl)
r_tuple=()
if(cur!=None):
r=ms.get_single_row(cur)
while(r!=None):
r_tuple=r_tuple+(decimal.Decimal(r[0]),)
r=ms.get_single_row(cur)
ms.close_cursor(cur)
r_avg=statistics.mean(r_tuple)
dt=datetime.datetime.now()
print_to_log("datetime",dt.strftime("%Y-%m-%d-%H-%M-%S"))
prepared_sql_insert='insert into moving_average (examination_id,date_time,avg_value,sample_id,value) values(%s,%s,%s,%s,%s)'
data_tpl_insert=(examination_id,dt,r_avg,chk[0],chk[1])
curi=ms.run_query_with_log(prepared_sql_insert,data_tpl_insert)
ms=my_sql()
ms.get_link(astm_var.my_host,astm_var.my_user,astm_var.my_pass,astm_var.my_db)
while True:
calculate_moving_average(ms,5031)
time.sleep(10)
ms.close_link()
| 35.60396
| 128
| 0.743326
|
import sys
import fcntl
import logging
import time
import io
import datetime
import decimal
import statistics
from astm_bidirectional_common import my_sql , file_mgmt, print_to_log
sys.path.append('/var/gmcs_config')
import astm_var
(logging.DEBUG)
print_to_log("Moving Average Logging Test","[OK]")
def check_if_new_result_arrived(ms,examination_id):
global last_sample_id_dict
prepared_sql='select max(sample_id) from result where examination_id=%s and result>0'
data_tpl=(examination_id,)
cur=ms.run_query_with_log(prepared_sql,data_tpl)
if(cur!=None):
r=ms.get_single_row(cur)
print_to_log("max sample_id for {}".format(examination_id),r[0])
ms.close_cursor(cur)
if(examination_id in last_sample_id_dict):
if(last_sample_id_dict[examination_id]==r[0]):
print_to_log("Last sample id is not changed {}".format(last_sample_id_dict),"{}:{}".format(examination_id,r[0]))
return False
else:
print_to_log("Last sample id is changed {}".format(last_sample_id_dict),"{}:{}".format(examination_id,r[0]))
last_sample_id_dict.update({examination_id:r[0]})
print_to_log("updated dictionary",format(last_sample_id_dict))
prepared_sql_sample_data='select * from result where examination_id=%s and sample_id=%s'
data_tpl_sample_data=(examination_id,r[0])
cur_sample_data=ms.run_query_with_log(prepared_sql_sample_data,data_tpl_sample_data)
r_sample_data=ms.get_single_row(cur_sample_data)
return r_sample_data[0],r_sample_data[2]
else:
print_to_log("Examination not in dict:{}".format(last_sample_id_dict),examination_id)
last_sample_id_dict.update({examination_id:r[0]})
print_to_log("updated dictionary",format(last_sample_id_dict))
return 0,0,0
def calculate_moving_average(ms,examination_id):
chk=check_if_new_result_arrived(ms,examination_id)
if(chk==False):
print_to_log("Last sample id is not changed.. nothing to do for:",examination_id)
return
prepared_sql='select result from result where examination_id=%s and result>0 order by sample_id desc limit %s'
data_tpl=(examination_id,n_size)
cur=ms.run_query_with_log(prepared_sql,data_tpl)
r_tuple=()
if(cur!=None):
r=ms.get_single_row(cur)
while(r!=None):
r_tuple=r_tuple+(decimal.Decimal(r[0]),)
r=ms.get_single_row(cur)
ms.close_cursor(cur)
r_avg=statistics.mean(r_tuple)
dt=datetime.datetime.now()
print_to_log("datetime",dt.strftime("%Y-%m-%d-%H-%M-%S"))
prepared_sql_insert='insert into moving_average (examination_id,date_time,avg_value,sample_id,value) values(%s,%s,%s,%s,%s)'
data_tpl_insert=(examination_id,dt,r_avg,chk[0],chk[1])
curi=ms.run_query_with_log(prepared_sql_insert,data_tpl_insert)
ms=my_sql()
ms.get_link(astm_var.my_host,astm_var.my_user,astm_var.my_pass,astm_var.my_db)
while True:
calculate_moving_average(ms,5031)
time.sleep(10)
ms.close_link()
| true
| true
|
f7146227c802ba11eb67d4ee45f43ada79d84b3d
| 3,553
|
py
|
Python
|
app/selenium_ui/jsm/pages/customer_selectors.py
|
mapit-plugin/dc-app-performance-toolkit
|
75d7562c7ffc925c8ba8dfbe81db08af85fadcfa
|
[
"Apache-2.0"
] | 1
|
2021-09-17T04:34:03.000Z
|
2021-09-17T04:34:03.000Z
|
app/selenium_ui/jsm/pages/customer_selectors.py
|
mapit-plugin/dc-app-performance-toolkit
|
75d7562c7ffc925c8ba8dfbe81db08af85fadcfa
|
[
"Apache-2.0"
] | null | null | null |
app/selenium_ui/jsm/pages/customer_selectors.py
|
mapit-plugin/dc-app-performance-toolkit
|
75d7562c7ffc925c8ba8dfbe81db08af85fadcfa
|
[
"Apache-2.0"
] | 1
|
2020-12-30T11:12:58.000Z
|
2020-12-30T11:12:58.000Z
|
from util.conf import JSM_SETTINGS
from selenium.webdriver.common.by import By
class UrlManager:
def __init__(self, portal_id=None, request_key=None):
self.host = JSM_SETTINGS.server_url
self.login_params = '/servicedesk/customer/user/login'
self.portal_params = f'/servicedesk/customer/portal/{portal_id}'
self.request_params = f'/servicedesk/customer/portal/{portal_id}/{request_key}'
self.my_requests = '/servicedesk/customer/user/requests'
self.all_requests = '/servicedesk/customer/user/requests?reporter=all'
def login_url(self):
return f'{self.host}{self.login_params}'
def portal_url(self):
return f'{self.host}{self.portal_params}'
def request_url(self):
return f'{self.host}{self.request_params}'
def my_requests_url(self):
return f'{self.host}{self.my_requests}'
def all_requests_url(self):
return f'{self.host}{self.all_requests}'
class LoginPageLocators:
login_url = UrlManager().login_url()
search_input_field = (By.ID, 'sd-customer-portal-smart-search-input')
welcome_logged_in_page = (By.CSS_SELECTOR, "div.cv-help-center-container")
login_field = (By.ID, 'os_username')
password_field = (By.ID, 'os_password')
login_submit_button = (By.ID, 'js-login-submit')
class TopPanelSelectors:
profile_icon = (By.XPATH, '//a[@href="#dropdown2-header"]')
profile_button = (By.CSS_SELECTOR, 'a.js-profile')
logout_button = (By.CSS_SELECTOR, 'a.js-logout')
class CustomerPortalsSelectors:
browse_portals_button = (By.CSS_SELECTOR, 'button.cv-smart-portal-browse-portals')
full_portals_list = (By.CSS_SELECTOR, 'ul.cv-smart-portal-all-portals-list')
portal_from_list = (By.CSS_SELECTOR, '"ul.cv-smart-portal-all-portals-list>li>a>span"')
class CustomerPortalSelectors:
portal_title = (By.CSS_SELECTOR, '.cv-page-title-text')
request_type = (By.CSS_SELECTOR, 'li>span.js-cv-request-type>a')
create_request_button = (By.XPATH, "//button[contains(text(),'Create')]")
summary_field = (By.ID, 'summary')
description_field = (By.ID, 'description')
required_dropdown_field = (By.CSS_SELECTOR, "#s2id_components>ul.select2-choices")
required_dropdown_list = (By.ID, 'select2-drop')
required_dropdown_element = (By.CSS_SELECTOR, '#select2-drop>ul.select2-results>li')
required_calendar_button = (By.CSS_SELECTOR, 'button#trigger-duedate')
required_calendar_input_field = (By.CSS_SELECTOR, 'input#duedate')
comment_request_field = (By.CSS_SELECTOR, 'textarea#comment-on-request')
class RequestSelectors:
request_url = UrlManager().request_url()
request_option = (By.CLASS_NAME, 'cv-request-options')
comment_request_field = (By.CSS_SELECTOR, 'textarea#comment-on-request')
add_comment_button = (By.XPATH, "//button[contains(text(),'Add')]")
share_request_button = (By.CSS_SELECTOR, 'a.js-share-request')
share_request_search_field = (By.ID, 's2id_participants')
share_request_dropdown = (By.ID, 'select2-drop')
share_request_dropdown_results = (By.CSS_SELECTOR, '#select2-drop>ul.select2-results>li')
share_request_dropdown_one_elem = (By.CSS_SELECTOR,
'#select2-drop>ul.select2-results>li>div>span.user-picker-display-name')
share_request_modal_button = (By.XPATH, "//button[contains(text(),'Share')]")
class RequestsSelectors:
my_requests_url = UrlManager().my_requests_url()
requests_label = (By.XPATH, "//h2[contains(text(),'Requests')]")
| 39.921348
| 111
| 0.710104
|
from util.conf import JSM_SETTINGS
from selenium.webdriver.common.by import By
class UrlManager:
def __init__(self, portal_id=None, request_key=None):
self.host = JSM_SETTINGS.server_url
self.login_params = '/servicedesk/customer/user/login'
self.portal_params = f'/servicedesk/customer/portal/{portal_id}'
self.request_params = f'/servicedesk/customer/portal/{portal_id}/{request_key}'
self.my_requests = '/servicedesk/customer/user/requests'
self.all_requests = '/servicedesk/customer/user/requests?reporter=all'
def login_url(self):
return f'{self.host}{self.login_params}'
def portal_url(self):
return f'{self.host}{self.portal_params}'
def request_url(self):
return f'{self.host}{self.request_params}'
def my_requests_url(self):
return f'{self.host}{self.my_requests}'
def all_requests_url(self):
return f'{self.host}{self.all_requests}'
class LoginPageLocators:
login_url = UrlManager().login_url()
search_input_field = (By.ID, 'sd-customer-portal-smart-search-input')
welcome_logged_in_page = (By.CSS_SELECTOR, "div.cv-help-center-container")
login_field = (By.ID, 'os_username')
password_field = (By.ID, 'os_password')
login_submit_button = (By.ID, 'js-login-submit')
class TopPanelSelectors:
profile_icon = (By.XPATH, '//a[@href="#dropdown2-header"]')
profile_button = (By.CSS_SELECTOR, 'a.js-profile')
logout_button = (By.CSS_SELECTOR, 'a.js-logout')
class CustomerPortalsSelectors:
browse_portals_button = (By.CSS_SELECTOR, 'button.cv-smart-portal-browse-portals')
full_portals_list = (By.CSS_SELECTOR, 'ul.cv-smart-portal-all-portals-list')
portal_from_list = (By.CSS_SELECTOR, '"ul.cv-smart-portal-all-portals-list>li>a>span"')
class CustomerPortalSelectors:
portal_title = (By.CSS_SELECTOR, '.cv-page-title-text')
request_type = (By.CSS_SELECTOR, 'li>span.js-cv-request-type>a')
create_request_button = (By.XPATH, "//button[contains(text(),'Create')]")
summary_field = (By.ID, 'summary')
description_field = (By.ID, 'description')
required_dropdown_field = (By.CSS_SELECTOR, "#s2id_components>ul.select2-choices")
required_dropdown_list = (By.ID, 'select2-drop')
required_dropdown_element = (By.CSS_SELECTOR, '#select2-drop>ul.select2-results>li')
required_calendar_button = (By.CSS_SELECTOR, 'button#trigger-duedate')
required_calendar_input_field = (By.CSS_SELECTOR, 'input#duedate')
comment_request_field = (By.CSS_SELECTOR, 'textarea#comment-on-request')
class RequestSelectors:
request_url = UrlManager().request_url()
request_option = (By.CLASS_NAME, 'cv-request-options')
comment_request_field = (By.CSS_SELECTOR, 'textarea#comment-on-request')
add_comment_button = (By.XPATH, "//button[contains(text(),'Add')]")
share_request_button = (By.CSS_SELECTOR, 'a.js-share-request')
share_request_search_field = (By.ID, 's2id_participants')
share_request_dropdown = (By.ID, 'select2-drop')
share_request_dropdown_results = (By.CSS_SELECTOR, '#select2-drop>ul.select2-results>li')
share_request_dropdown_one_elem = (By.CSS_SELECTOR,
'#select2-drop>ul.select2-results>li>div>span.user-picker-display-name')
share_request_modal_button = (By.XPATH, "//button[contains(text(),'Share')]")
class RequestsSelectors:
my_requests_url = UrlManager().my_requests_url()
requests_label = (By.XPATH, "//h2[contains(text(),'Requests')]")
| true
| true
|
f71462edb7ac3e4f02fba779f9139da6a78624ba
| 6,058
|
py
|
Python
|
tests/evaluators_test.py
|
gaussalgo/adaptor
|
8d8ae1b7694108f4bde78c127fe9ff97fa6b9470
|
[
"MIT"
] | 11
|
2022-01-25T13:44:15.000Z
|
2022-03-16T12:46:58.000Z
|
tests/evaluators_test.py
|
gaussalgo/adaptor
|
8d8ae1b7694108f4bde78c127fe9ff97fa6b9470
|
[
"MIT"
] | 3
|
2022-01-29T18:19:01.000Z
|
2022-02-01T15:34:44.000Z
|
tests/evaluators_test.py
|
gaussalgo/adaptor
|
8d8ae1b7694108f4bde78c127fe9ff97fa6b9470
|
[
"MIT"
] | 1
|
2022-02-17T17:11:40.000Z
|
2022-02-17T17:11:40.000Z
|
from adaptor.evaluators.generative import GenerativeEvaluator
from adaptor.evaluators.sequence_classification import SeqClassificationEvaluator
from adaptor.evaluators.token_classification import TokenClassificationEvaluator
from adaptor.lang_module import LangModule
from adaptor.objectives.objective_base import Objective
from adaptor.objectives.seq2seq import Sequence2Sequence
from utils import paths, test_base_models
def assert_evaluator_logs(lang_module: LangModule, objective: Objective, split: str) -> None:
# dataset iteration test
dataset_sample = next(iter(objective.get_dataset(split, objective_i=0, device="cpu")))
# providing labels makes HF lang_module to compute its own loss, which is in DA redundantly done by Objective
outputs = lang_module(**dataset_sample)
# request objective for its loss
loss = objective.compute_loss(outputs, dataset_sample["labels"], dataset_sample, split)
assert loss.item()
log = objective.per_objective_log(split)
# assert that objective's id can be found in each key of the logs
assert all(str(objective) in k for k in log.keys())
for split_evaluator in objective.evaluators[split]:
# assert that each evaluator of given split was logged and has a value of expected type
assert any(str(split_evaluator) in k and isinstance(v, float) for k, v in log.items())
gen_lang_module = LangModule(test_base_models["translation_mono"])
gen_lang_module_multi = LangModule(test_base_models["translation_multi"]["model"])
def assert_gen_evaluator_logs(evaluator: GenerativeEvaluator, split: str) -> None:
gen_objective = Sequence2Sequence(gen_lang_module,
texts_or_path=paths["texts"]["translation"],
labels_or_path=paths["labels"]["translation"],
batch_size=1,
train_evaluators=[evaluator],
val_evaluators=[evaluator])
assert_evaluator_logs(gen_lang_module, gen_objective, split)
def assert_gen_evaluator_logs_mbart(evaluator: GenerativeEvaluator, split: str) -> None:
gen_objective = Sequence2Sequence(gen_lang_module_multi,
texts_or_path=paths["texts"]["translation"],
labels_or_path=paths["labels"]["translation"],
batch_size=1,
train_evaluators=[evaluator],
val_evaluators=[evaluator],
source_lang_id=test_base_models["translation_multi"]["test_src_lang"],
target_lang_id=test_base_models["translation_multi"]["test_tgt_lang"])
assert_evaluator_logs(gen_lang_module_multi, gen_objective, split)
def assert_ner_evaluator_logs(evaluator: TokenClassificationEvaluator, split: str) -> None:
from adaptor.objectives.classification import TokenClassification
lang_module = LangModule(test_base_models["token_classification"])
gen_objective = TokenClassification(lang_module,
texts_or_path=paths["texts"]["ner"],
labels_or_path=paths["labels"]["ner"],
batch_size=1,
train_evaluators=[evaluator],
val_evaluators=[evaluator])
assert_evaluator_logs(lang_module, gen_objective, split)
def assert_classification_evaluator_logs(evaluator: SeqClassificationEvaluator, split: str) -> None:
from adaptor.objectives.classification import SequenceClassification
lang_module = LangModule(test_base_models["sequence_classification"])
gen_objective = SequenceClassification(lang_module,
texts_or_path=paths["texts"]["classification"],
labels_or_path=paths["labels"]["classification"],
batch_size=1,
train_evaluators=[evaluator],
val_evaluators=[evaluator])
assert_evaluator_logs(lang_module, gen_objective, split)
def test_bleu():
from adaptor.evaluators.generative import BLEU
assert_gen_evaluator_logs(BLEU(use_generate=True, decides_convergence=True), "train")
def test_bleu_mbart():
from adaptor.evaluators.generative import BLEU
assert_gen_evaluator_logs_mbart(BLEU(use_generate=True, decides_convergence=True), "train")
def test_rouge():
from adaptor.evaluators.generative import ROUGE
assert_gen_evaluator_logs(ROUGE(use_generate=False, decides_convergence=True), "train")
def test_bertscore():
from adaptor.evaluators.generative import BERTScore
assert_gen_evaluator_logs(BERTScore(use_generate=False, decides_convergence=True), "train")
def test_meteor():
from adaptor.evaluators.generative import METEOR
assert_gen_evaluator_logs(METEOR(decides_convergence=True), "train")
def test_prism():
"""
PRISM downloads relatively big model, we omit that by default.
"""
# from adaptor.evaluators.generative import PRISM
# assert_gen_evaluator_logs(PRISM(use_cuda=False, language="en", decides_convergence=True), "train")
def test_divergence():
"""
Default JS_Divergence uses PRISM - note that this test will download PRISM model
"""
# from adaptor.evaluators.generative import JS_Divergence
# assert_gen_evaluator_logs(JS_Divergence(decides_convergence=True), "train")
def test_token_fscore():
from adaptor.evaluators.token_classification import MeanFScore
assert_ner_evaluator_logs(MeanFScore(decides_convergence=True), "train")
def test_sequence_accuracy():
from adaptor.evaluators.sequence_classification import SequenceAccuracy
assert_classification_evaluator_logs(SequenceAccuracy(decides_convergence=False), "train")
| 44.544118
| 113
| 0.680753
|
from adaptor.evaluators.generative import GenerativeEvaluator
from adaptor.evaluators.sequence_classification import SeqClassificationEvaluator
from adaptor.evaluators.token_classification import TokenClassificationEvaluator
from adaptor.lang_module import LangModule
from adaptor.objectives.objective_base import Objective
from adaptor.objectives.seq2seq import Sequence2Sequence
from utils import paths, test_base_models
def assert_evaluator_logs(lang_module: LangModule, objective: Objective, split: str) -> None:
dataset_sample = next(iter(objective.get_dataset(split, objective_i=0, device="cpu")))
outputs = lang_module(**dataset_sample)
loss = objective.compute_loss(outputs, dataset_sample["labels"], dataset_sample, split)
assert loss.item()
log = objective.per_objective_log(split)
assert all(str(objective) in k for k in log.keys())
for split_evaluator in objective.evaluators[split]:
# assert that each evaluator of given split was logged and has a value of expected type
assert any(str(split_evaluator) in k and isinstance(v, float) for k, v in log.items())
gen_lang_module = LangModule(test_base_models["translation_mono"])
gen_lang_module_multi = LangModule(test_base_models["translation_multi"]["model"])
def assert_gen_evaluator_logs(evaluator: GenerativeEvaluator, split: str) -> None:
gen_objective = Sequence2Sequence(gen_lang_module,
texts_or_path=paths["texts"]["translation"],
labels_or_path=paths["labels"]["translation"],
batch_size=1,
train_evaluators=[evaluator],
val_evaluators=[evaluator])
assert_evaluator_logs(gen_lang_module, gen_objective, split)
def assert_gen_evaluator_logs_mbart(evaluator: GenerativeEvaluator, split: str) -> None:
gen_objective = Sequence2Sequence(gen_lang_module_multi,
texts_or_path=paths["texts"]["translation"],
labels_or_path=paths["labels"]["translation"],
batch_size=1,
train_evaluators=[evaluator],
val_evaluators=[evaluator],
source_lang_id=test_base_models["translation_multi"]["test_src_lang"],
target_lang_id=test_base_models["translation_multi"]["test_tgt_lang"])
assert_evaluator_logs(gen_lang_module_multi, gen_objective, split)
def assert_ner_evaluator_logs(evaluator: TokenClassificationEvaluator, split: str) -> None:
from adaptor.objectives.classification import TokenClassification
lang_module = LangModule(test_base_models["token_classification"])
gen_objective = TokenClassification(lang_module,
texts_or_path=paths["texts"]["ner"],
labels_or_path=paths["labels"]["ner"],
batch_size=1,
train_evaluators=[evaluator],
val_evaluators=[evaluator])
assert_evaluator_logs(lang_module, gen_objective, split)
def assert_classification_evaluator_logs(evaluator: SeqClassificationEvaluator, split: str) -> None:
from adaptor.objectives.classification import SequenceClassification
lang_module = LangModule(test_base_models["sequence_classification"])
gen_objective = SequenceClassification(lang_module,
texts_or_path=paths["texts"]["classification"],
labels_or_path=paths["labels"]["classification"],
batch_size=1,
train_evaluators=[evaluator],
val_evaluators=[evaluator])
assert_evaluator_logs(lang_module, gen_objective, split)
def test_bleu():
from adaptor.evaluators.generative import BLEU
assert_gen_evaluator_logs(BLEU(use_generate=True, decides_convergence=True), "train")
def test_bleu_mbart():
from adaptor.evaluators.generative import BLEU
assert_gen_evaluator_logs_mbart(BLEU(use_generate=True, decides_convergence=True), "train")
def test_rouge():
from adaptor.evaluators.generative import ROUGE
assert_gen_evaluator_logs(ROUGE(use_generate=False, decides_convergence=True), "train")
def test_bertscore():
from adaptor.evaluators.generative import BERTScore
assert_gen_evaluator_logs(BERTScore(use_generate=False, decides_convergence=True), "train")
def test_meteor():
from adaptor.evaluators.generative import METEOR
assert_gen_evaluator_logs(METEOR(decides_convergence=True), "train")
def test_prism():
# from adaptor.evaluators.generative import PRISM
# assert_gen_evaluator_logs(PRISM(use_cuda=False, language="en", decides_convergence=True), "train")
def test_divergence():
# from adaptor.evaluators.generative import JS_Divergence
# assert_gen_evaluator_logs(JS_Divergence(decides_convergence=True), "train")
def test_token_fscore():
from adaptor.evaluators.token_classification import MeanFScore
assert_ner_evaluator_logs(MeanFScore(decides_convergence=True), "train")
def test_sequence_accuracy():
from adaptor.evaluators.sequence_classification import SequenceAccuracy
assert_classification_evaluator_logs(SequenceAccuracy(decides_convergence=False), "train")
| true
| true
|
f71463ad03d8ae030b29ae38adf10fb001e335fc
| 6,524
|
py
|
Python
|
code/database/project.py
|
fegonda/icon_demo
|
d2d1b0148989187c1433597f9c3ae4357178c082
|
[
"MIT"
] | null | null | null |
code/database/project.py
|
fegonda/icon_demo
|
d2d1b0148989187c1433597f9c3ae4357178c082
|
[
"MIT"
] | null | null | null |
code/database/project.py
|
fegonda/icon_demo
|
d2d1b0148989187c1433597f9c3ae4357178c082
|
[
"MIT"
] | null | null | null |
#-------------------------------------------------------------------------------------------
# project.py
#
# Author : Felix Gonda
# Date : July 10, 2015
# School : Harvard University
#
# Project : Master Thesis
# An Interactive Deep Learning Toolkit for
# Automatic Segmentation of Images
#
# Summary : This file contains database access layer implementation footer
# sqlite3
#-------------------------------------------------------------------------------------------
import os
import sqlite3 as lite
import sys
import json
import glob
import time
import uuid
from datetime import datetime, date
base_path = os.path.dirname(__file__)
sys.path.insert(1,os.path.join(base_path, '../common'))
from utility import Utility
from paths import Paths
from label import Label
from image import Image
class Project (object):
CNN = 'CNN'
MLP = 'MLP'
UNET = 'UNET'
INVALID = -1
ONLINE = 0
OFFLINE = 1
TrainTime = 15 # 15 seconds
SyncTime = 20 # 20 seconds
# create a new project object
#----------------------------
def __init__(self,
id,
type,
revision=0,
baseModel='',
patchSize=39,
batchSize=16,
learningRate=0.01,
momentum=0.9,
hiddenUnits=[500,500,500],
nKernels=[48,48],
kernelSizes=[5,5],
threshold=0.5,
mean=0.5,
data_mean=0.5,
data_std=1.0
):
self.activemode = Project.ONLINE
self.id = id
self.type = type
self.baseModel = baseModel
self.std = data_std
self.mean = data_mean
self.threshold = threshold
self.patchSize = patchSize
self.batchSize = batchSize
self.learningRate = learningRate
self.momentum = momentum
self.hiddenUnits = hiddenUnits
self.nKernels = nKernels
self.kernelSizes = kernelSizes
self.trainTime = Project.TrainTime
self.syncTime = Project.SyncTime
self.epochs = 100
self.labels = []
self.images = []
self.validation_images = []
def addLabel(self, index, name, r, g, b):
self.labels.append( Label(index, name, r, g, b) )
def addImage(self, imageId, annFile=None, segFile=None, score=0.0, purpose='train'):
image = Image( imageId )
image.purpose = purpose
image.annotationFile = annFile
image.segmentationFile = segFile
image.traningScore = score
self.images.append( image )
def toJson(self):
data = {}
data['id'] = self.id
data['std'] = self.std
data['mean'] = self.mean
data['threshold'] = self.threshold
data['training_mod_status'] = self.trainingStatus
data['training_mod_status_str'] = Project.statusToStr( self.trainingStatus )
data['segmentation_mod_status'] = self.predictionStatus;
data['segmentation_mod_status_str'] = Project.statusToStr( self.predictionStatus )
data['initial_model'] = self.baseModel
data['model_type'] = self.type
data['sample_size'] = self.patchSize
data['learning_rate'] = self.learningRate
data['momentum'] = self.momentum
data['batch_size'] = self.batchSize
data['epochs'] = self.epochs
data['train_time'] = self.trainTime
data['sync_time'] = self.syncTime
data['model_mod_time'] = self.modelTime
data['locked'] = self.locked
data['labels'] = [l.toJson() for l in self.labels ]
data['hidden_layers'] = json.dumps( self.hiddenUnits )
data['num_kernels'] = json.dumps( self.nKernels )
data['kernel_sizes'] = json.dumps( self.kernelSizes )
data['images'] = [i.toJson() for i in self.images ]
data['validation_images'] = [i.toJson() for i in self.validation_images ]
data['offline'] = self.offline
data['online'] = self.online
data['baseline'] = self.baseline
data['stats'] = [ s.toJson() for s in self.stats ]
return data
@staticmethod
def fromJson(data):
project = Project(id=data['id'], type=data['model_type'])
project.baseModel = data['initial_model']
project.std = data['std']
project.mean = data['mean']
project.threshold = data['threshold']
project.patchSize = data['sample_size']
project.batchSize = data['batch_size']
project.learningRate = data['learning_rate']
project.momentum = data['momentum']
project.trainTime = data['train_time']
project.syncTime = data['sync_time']
print 'hidden_layers:', data['hidden_layers']
print 'num_kernels:', data['num_kernels']
print 'kernel_sizes:', data['kernel_sizes'], type(data['kernel_sizes'])
project.hiddenUnits = data['hidden_layers'] #json.loads( data['hidden_layers'] )
project.nKernels = data['num_kernels'] #jjson.loads( data['num_kernels'] )
project.kernelSizes = data['kernel_sizes'] #jjson.loads( data['kernel_sizes'] )
return project
def isTrainable(self):
if len(self.labels) == 0:
print 'no labels found...'
return False
if self.type == 'MLP':
if len(self.hiddenUnits) == 0:
print 'no hidden layer units found...'
return False
if self.type == 'CNN':
if len(self.nKernels) == 0:
print 'number of kernels not found...'
return False
elif len(self.kernelSizes) == 0:
print 'kernel sizes not found...'
return False
return True
@staticmethod
def statusToStr( status ):
if status == 1:
return 'Active'
elif status == 2:
return 'Pending Annotations'
else:
return 'Inactive'
| 36.858757
| 92
| 0.51962
|
import os
import sqlite3 as lite
import sys
import json
import glob
import time
import uuid
from datetime import datetime, date
base_path = os.path.dirname(__file__)
sys.path.insert(1,os.path.join(base_path, '../common'))
from utility import Utility
from paths import Paths
from label import Label
from image import Image
class Project (object):
CNN = 'CNN'
MLP = 'MLP'
UNET = 'UNET'
INVALID = -1
ONLINE = 0
OFFLINE = 1
TrainTime = 15
SyncTime = 20
def __init__(self,
id,
type,
revision=0,
baseModel='',
patchSize=39,
batchSize=16,
learningRate=0.01,
momentum=0.9,
hiddenUnits=[500,500,500],
nKernels=[48,48],
kernelSizes=[5,5],
threshold=0.5,
mean=0.5,
data_mean=0.5,
data_std=1.0
):
self.activemode = Project.ONLINE
self.id = id
self.type = type
self.baseModel = baseModel
self.std = data_std
self.mean = data_mean
self.threshold = threshold
self.patchSize = patchSize
self.batchSize = batchSize
self.learningRate = learningRate
self.momentum = momentum
self.hiddenUnits = hiddenUnits
self.nKernels = nKernels
self.kernelSizes = kernelSizes
self.trainTime = Project.TrainTime
self.syncTime = Project.SyncTime
self.epochs = 100
self.labels = []
self.images = []
self.validation_images = []
def addLabel(self, index, name, r, g, b):
self.labels.append( Label(index, name, r, g, b) )
def addImage(self, imageId, annFile=None, segFile=None, score=0.0, purpose='train'):
image = Image( imageId )
image.purpose = purpose
image.annotationFile = annFile
image.segmentationFile = segFile
image.traningScore = score
self.images.append( image )
def toJson(self):
data = {}
data['id'] = self.id
data['std'] = self.std
data['mean'] = self.mean
data['threshold'] = self.threshold
data['training_mod_status'] = self.trainingStatus
data['training_mod_status_str'] = Project.statusToStr( self.trainingStatus )
data['segmentation_mod_status'] = self.predictionStatus;
data['segmentation_mod_status_str'] = Project.statusToStr( self.predictionStatus )
data['initial_model'] = self.baseModel
data['model_type'] = self.type
data['sample_size'] = self.patchSize
data['learning_rate'] = self.learningRate
data['momentum'] = self.momentum
data['batch_size'] = self.batchSize
data['epochs'] = self.epochs
data['train_time'] = self.trainTime
data['sync_time'] = self.syncTime
data['model_mod_time'] = self.modelTime
data['locked'] = self.locked
data['labels'] = [l.toJson() for l in self.labels ]
data['hidden_layers'] = json.dumps( self.hiddenUnits )
data['num_kernels'] = json.dumps( self.nKernels )
data['kernel_sizes'] = json.dumps( self.kernelSizes )
data['images'] = [i.toJson() for i in self.images ]
data['validation_images'] = [i.toJson() for i in self.validation_images ]
data['offline'] = self.offline
data['online'] = self.online
data['baseline'] = self.baseline
data['stats'] = [ s.toJson() for s in self.stats ]
return data
@staticmethod
def fromJson(data):
project = Project(id=data['id'], type=data['model_type'])
project.baseModel = data['initial_model']
project.std = data['std']
project.mean = data['mean']
project.threshold = data['threshold']
project.patchSize = data['sample_size']
project.batchSize = data['batch_size']
project.learningRate = data['learning_rate']
project.momentum = data['momentum']
project.trainTime = data['train_time']
project.syncTime = data['sync_time']
print 'hidden_layers:', data['hidden_layers']
print 'num_kernels:', data['num_kernels']
print 'kernel_sizes:', data['kernel_sizes'], type(data['kernel_sizes'])
project.hiddenUnits = data['hidden_layers']
project.nKernels = data['num_kernels']
project.kernelSizes = data['kernel_sizes']
return project
def isTrainable(self):
if len(self.labels) == 0:
print 'no labels found...'
return False
if self.type == 'MLP':
if len(self.hiddenUnits) == 0:
print 'no hidden layer units found...'
return False
if self.type == 'CNN':
if len(self.nKernels) == 0:
print 'number of kernels not found...'
return False
elif len(self.kernelSizes) == 0:
print 'kernel sizes not found...'
return False
return True
@staticmethod
def statusToStr( status ):
if status == 1:
return 'Active'
elif status == 2:
return 'Pending Annotations'
else:
return 'Inactive'
| false
| true
|
f714644c0e16e7716dae2a067aae906a9e263d99
| 23,086
|
py
|
Python
|
script_helper/Script/Network.py
|
jupiterman/Data-Transfer-Neural-Way
|
a38140aab141e4749aedc30899714ad4028a6a8a
|
[
"Apache-2.0"
] | 1
|
2020-02-17T06:38:58.000Z
|
2020-02-17T06:38:58.000Z
|
script_helper/Script/Network.py
|
minihat/Neural-Style-Transfer
|
d900a5552c78f81450c3918640aa3e9210a57488
|
[
"Apache-2.0"
] | null | null | null |
script_helper/Script/Network.py
|
minihat/Neural-Style-Transfer
|
d900a5552c78f81450c3918640aa3e9210a57488
|
[
"Apache-2.0"
] | 1
|
2018-02-07T12:59:04.000Z
|
2018-02-07T12:59:04.000Z
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from scipy.misc import imread, imresize, imsave, fromimage, toimage
from scipy.optimize import fmin_l_bfgs_b
import numpy as np
import time
import argparse
import warnings
from keras.models import Model
from keras.layers import Input
from keras.layers.convolutional import Convolution2D, AveragePooling2D, MaxPooling2D
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.utils.layer_utils import convert_all_kernels_in_model
"""
Neural Style Transfer with Keras 2.0.5
Based on:
https://github.com/fchollet/keras/blob/master/examples/neural_style_transfer.py
-----------------------------------------------------------------------------------------------------------------------
"""
THEANO_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels_notop.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
TH_19_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_th_dim_ordering_th_kernels_notop.h5'
TF_19_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'
parser = argparse.ArgumentParser(description='Neural style transfer with Keras.')
parser.add_argument('base_image_path', metavar='base', type=str,
help='Path to the image to transform.')
parser.add_argument('syle_image_paths', metavar='ref', nargs='+', type=str,
help='Path to the style reference image.')
parser.add_argument('result_prefix', metavar='res_prefix', type=str,
help='Prefix for the saved results.')
parser.add_argument("--style_masks", type=str, default=None, nargs='+',
help='Masks for style images')
parser.add_argument("--content_mask", type=str, default=None,
help='Masks for the content image')
parser.add_argument("--color_mask", type=str, default=None,
help='Mask for color preservation')
parser.add_argument("--image_size", dest="img_size", default=400, type=int,
help='Minimum image size')
parser.add_argument("--content_weight", dest="content_weight", default=0.025, type=float,
help="Weight of content")
parser.add_argument("--style_weight", dest="style_weight", nargs='+', default=[1], type=float,
help="Weight of style, can be multiple for multiple styles")
parser.add_argument("--style_scale", dest="style_scale", default=1.0, type=float,
help="Scale the weighing of the style")
parser.add_argument("--total_variation_weight", dest="tv_weight", default=8.5e-5, type=float,
help="Total Variation weight")
parser.add_argument("--num_iter", dest="num_iter", default=10, type=int,
help="Number of iterations")
parser.add_argument("--model", default="vgg16", type=str,
help="Choices are 'vgg16' and 'vgg19'")
parser.add_argument("--content_loss_type", default=0, type=int,
help='Can be one of 0, 1 or 2. Readme contains the required information of each mode.')
parser.add_argument("--rescale_image", dest="rescale_image", default="False", type=str,
help="Rescale image after execution to original dimentions")
parser.add_argument("--rescale_method", dest="rescale_method", default="bilinear", type=str,
help="Rescale image algorithm")
parser.add_argument("--maintain_aspect_ratio", dest="maintain_aspect_ratio", default="True", type=str,
help="Maintain aspect ratio of loaded images")
parser.add_argument("--content_layer", dest="content_layer", default="conv5_2", type=str,
help="Content layer used for content loss.")
parser.add_argument("--init_image", dest="init_image", default="content", type=str,
help="Initial image used to generate the final image. Options are 'content', 'noise', or 'gray'")
parser.add_argument("--pool_type", dest="pool", default="max", type=str,
help='Pooling type. Can be "ave" for average pooling or "max" for max pooling')
parser.add_argument('--preserve_color', dest='color', default="False", type=str,
help='Preserve original color in image')
parser.add_argument('--min_improvement', default=0.0, type=float,
help='Defines minimum improvement required to continue script')
def str_to_bool(v):
return v.lower() in ("true", "yes", "t", "1")
''' Arguments '''
args = parser.parse_args()
base_image_path = args.base_image_path
style_reference_image_paths = args.syle_image_paths
result_prefix = args.result_prefix
style_image_paths = []
for style_image_path in style_reference_image_paths:
style_image_paths.append(style_image_path)
style_masks_present = args.style_masks is not None
mask_paths = []
if style_masks_present:
for mask_path in args.style_masks:
mask_paths.append(mask_path)
if style_masks_present:
assert len(style_image_paths) == len(mask_paths), "Wrong number of style masks provided.\n" \
"Number of style images = %d, \n" \
"Number of style mask paths = %d." % \
(len(style_image_paths), len(style_masks_present))
content_mask_present = args.content_mask is not None
content_mask_path = args.content_mask
color_mask_present = args.color_mask is not None
rescale_image = str_to_bool(args.rescale_image)
maintain_aspect_ratio = str_to_bool(args.maintain_aspect_ratio)
preserve_color = str_to_bool(args.color)
# these are the weights of the different loss components
content_weight = args.content_weight
total_variation_weight = args.tv_weight
style_weights = []
if len(style_image_paths) != len(args.style_weight):
print("Mismatch in number of style images provided and number of style weights provided. \n"
"Found %d style images and %d style weights. \n"
"Equally distributing weights to all other styles." % (len(style_image_paths), len(args.style_weight)))
weight_sum = sum(args.style_weight) * args.style_scale
count = len(style_image_paths)
for i in range(len(style_image_paths)):
style_weights.append(weight_sum / count)
else:
for style_weight in args.style_weight:
style_weights.append(style_weight * args.style_scale)
# Decide pooling function
pooltype = str(args.pool).lower()
assert pooltype in ["ave", "max"], 'Pooling argument is wrong. Needs to be either "ave" or "max".'
pooltype = 1 if pooltype == "ave" else 0
read_mode = "gray" if args.init_image == "gray" else "color"
# dimensions of the generated picture.
img_width = img_height = 0
img_WIDTH = img_HEIGHT = 0
aspect_ratio = 0
assert args.content_loss_type in [0, 1, 2], "Content Loss Type must be one of 0, 1 or 2"
# util function to open, resize and format pictures into appropriate tensors
def preprocess_image(image_path, load_dims=False, read_mode="color"):
global img_width, img_height, img_WIDTH, img_HEIGHT, aspect_ratio
mode = "RGB" if read_mode == "color" else "L"
img = imread(image_path, mode=mode) # Prevents crashes due to PNG images (ARGB)
if mode == "L":
# Expand the 1 channel grayscale to 3 channel grayscale image
temp = np.zeros(img.shape + (3,), dtype=np.uint8)
temp[:, :, 0] = img
temp[:, :, 1] = img.copy()
temp[:, :, 2] = img.copy()
img = temp
if load_dims:
img_WIDTH = img.shape[0]
img_HEIGHT = img.shape[1]
aspect_ratio = float(img_HEIGHT) / img_WIDTH
img_width = args.img_size
if maintain_aspect_ratio:
img_height = int(img_width * aspect_ratio)
else:
img_height = args.img_size
img = imresize(img, (img_width, img_height)).astype('float32')
# RGB -> BGR
img = img[:, :, ::-1]
img[:, :, 0] -= 103.939
img[:, :, 1] -= 116.779
img[:, :, 2] -= 123.68
if K.image_dim_ordering() == "th":
img = img.transpose((2, 0, 1)).astype('float32')
img = np.expand_dims(img, axis=0)
return img
# util function to convert a tensor into a valid image
def deprocess_image(x):
if K.image_dim_ordering() == "th":
x = x.reshape((3, img_width, img_height))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((img_width, img_height, 3))
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# BGR -> RGB
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
# util function to preserve image color
def original_color_transform(content, generated, mask=None):
generated = fromimage(toimage(generated, mode='RGB'), mode='YCbCr') # Convert to YCbCr color space
if mask is None:
generated[:, :, 1:] = content[:, :, 1:] # Generated CbCr = Content CbCr
else:
width, height, channels = generated.shape
for i in range(width):
for j in range(height):
if mask[i, j] == 1:
generated[i, j, 1:] = content[i, j, 1:]
generated = fromimage(toimage(generated, mode='YCbCr'), mode='RGB') # Convert to RGB color space
return generated
def load_mask(mask_path, shape, return_mask_img=False):
if K.image_dim_ordering() == "th":
_, channels, width, height = shape
else:
_, width, height, channels = shape
mask = imread(mask_path, mode="L") # Grayscale mask load
mask = imresize(mask, (width, height)).astype('float32')
# Perform binarization of mask
mask[mask <= 127] = 0
mask[mask > 128] = 255
max = np.amax(mask)
mask /= max
if return_mask_img: return mask
mask_shape = shape[1:]
mask_tensor = np.empty(mask_shape)
for i in range(channels):
if K.image_dim_ordering() == "th":
mask_tensor[i, :, :] = mask
else:
mask_tensor[:, :, i] = mask
return mask_tensor
def pooling_func(x):
if pooltype == 1:
return AveragePooling2D((2, 2), strides=(2, 2))(x)
else:
return MaxPooling2D((2, 2), strides=(2, 2))(x)
# get tensor representations of our images
base_image = K.variable(preprocess_image(base_image_path, True, read_mode=read_mode))
style_reference_images = []
for style_path in style_image_paths:
style_reference_images.append(K.variable(preprocess_image(style_path)))
# this will contain our generated image
if K.image_dim_ordering() == 'th':
combination_image = K.placeholder((1, 3, img_width, img_height))
else:
combination_image = K.placeholder((1, img_width, img_height, 3))
image_tensors = [base_image]
for style_image_tensor in style_reference_images:
image_tensors.append(style_image_tensor)
image_tensors.append(combination_image)
nb_tensors = len(image_tensors)
nb_style_images = nb_tensors - 2 # Content and Output image not considered
# combine the various images into a single Keras tensor
input_tensor = K.concatenate(image_tensors, axis=0)
if K.image_dim_ordering() == "th":
shape = (nb_tensors, 3, img_width, img_height)
else:
shape = (nb_tensors, img_width, img_height, 3)
ip = Input(tensor=input_tensor, batch_shape=shape)
# build the VGG16 network with our 3 images as input
x = Convolution2D(64, (3, 3), activation='relu', name='conv1_1', padding='same')(ip)
x = Convolution2D(64, (3, 3), activation='relu', name='conv1_2', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(128, (3, 3), activation='relu', name='conv2_1', padding='same')(x)
x = Convolution2D(128, (3, 3), activation='relu', name='conv2_2', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_1', padding='same')(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_2', padding='same')(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_3', padding='same')(x)
if args.model == "vgg19":
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_4', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_1', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_2', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_3', padding='same')(x)
if args.model == "vgg19":
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_4', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_1', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_2', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_3', padding='same')(x)
if args.model == "vgg19":
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_4', padding='same')(x)
x = pooling_func(x)
model = Model(ip, x)
if K.image_dim_ordering() == "th":
if args.model == "vgg19":
weights = get_file('vgg19_weights_th_dim_ordering_th_kernels_notop.h5', TH_19_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
else:
weights = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5', THEANO_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
else:
if args.model == "vgg19":
weights = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_19_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
else:
weights = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
model.load_weights(weights)
if K.backend() == 'tensorflow' and K.image_dim_ordering() == "th":
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image dimension ordering convention '
'(`image_dim_ordering="th"`). '
'For best performance, set '
'`image_dim_ordering="tf"` in '
'your Keras config '
'at ~/.keras/keras.json.')
convert_all_kernels_in_model(model)
print('Model loaded.')
# get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
shape_dict = dict([(layer.name, layer.output_shape) for layer in model.layers])
# compute the neural style loss
# first we need to define 4 util functions
# the gram matrix of an image tensor (feature-wise outer product)
def gram_matrix(x):
assert K.ndim(x) == 3
if K.image_dim_ordering() == "th":
features = K.batch_flatten(x)
else:
features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
gram = K.dot(features, K.transpose(features))
return gram
# the "style loss" is designed to maintain
# the style of the reference image in the generated image.
# It is based on the gram matrices (which capture style) of
# feature maps from the style reference image
# and from the generated image
def style_loss(style, combination, mask_path=None, nb_channels=None):
assert K.ndim(style) == 3
assert K.ndim(combination) == 3
if content_mask_path is not None:
content_mask = K.variable(load_mask(content_mask_path, nb_channels))
combination = combination * K.stop_gradient(content_mask)
del content_mask
if mask_path is not None:
style_mask = K.variable(load_mask(mask_path, nb_channels))
style = style * K.stop_gradient(style_mask)
if content_mask_path is None:
combination = combination * K.stop_gradient(style_mask)
del style_mask
S = gram_matrix(style)
C = gram_matrix(combination)
channels = 3
size = img_width * img_height
return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2))
# an auxiliary loss function
# designed to maintain the "content" of the
# base image in the generated image
def content_loss(base, combination):
channel_dim = 0 if K.image_dim_ordering() == "th" else -1
try:
channels = K.int_shape(base)[channel_dim]
except TypeError:
channels = K.shape(base)[channel_dim]
size = img_width * img_height
if args.content_loss_type == 1:
multiplier = 1. / (2. * (channels ** 0.5) * (size ** 0.5))
elif args.content_loss_type == 2:
multiplier = 1. / (channels * size)
else:
multiplier = 1.
return multiplier * K.sum(K.square(combination - base))
# the 3rd loss function, total variation loss,
# designed to keep the generated image locally coherent
def total_variation_loss(x):
assert K.ndim(x) == 4
if K.image_dim_ordering() == 'th':
a = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, 1:, :img_height - 1])
b = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, :img_width - 1, 1:])
else:
a = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, 1:, :img_height - 1, :])
b = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, :img_width - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
# combine these loss functions into a single scalar
loss = K.variable(0.)
layer_features = outputs_dict[args.content_layer] # 'conv5_2' or 'conv4_2'
base_image_features = layer_features[0, :, :, :]
combination_features = layer_features[nb_tensors - 1, :, :, :]
loss += content_weight * content_loss(base_image_features,
combination_features)
style_masks = []
if style_masks_present:
style_masks = mask_paths # If mask present, pass dictionary of masks to style loss
else:
style_masks = [None for _ in range(nb_style_images)] # If masks not present, pass None to the style loss
channel_index = 1 if K.image_dim_ordering() == "th" else -1
feature_layers = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
for layer_name in feature_layers:
layer_features = outputs_dict[layer_name]
shape = shape_dict[layer_name]
combination_features = layer_features[nb_tensors - 1, :, :, :]
style_reference_features = layer_features[1:nb_tensors - 1, :, :, :]
sl = []
for j in range(nb_style_images):
sl.append(style_loss(style_reference_features[j], combination_features, style_masks[j], shape))
for j in range(nb_style_images):
loss += (style_weights[j] / len(feature_layers)) * sl[j]
loss += total_variation_weight * total_variation_loss(combination_image)
# get the gradients of the generated image wrt the loss
grads = K.gradients(loss, combination_image)
outputs = [loss]
if type(grads) in {list, tuple}:
outputs += grads
else:
outputs.append(grads)
f_outputs = K.function([combination_image], outputs)
def eval_loss_and_grads(x):
if K.image_dim_ordering() == 'th':
x = x.reshape((1, 3, img_width, img_height))
else:
x = x.reshape((1, img_width, img_height, 3))
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
# run scipy-based optimization (L-BFGS) over the pixels of the generated image
# so as to minimize the neural style loss
if "content" in args.init_image or "gray" in args.init_image:
x = preprocess_image(base_image_path, True, read_mode=read_mode)
elif "noise" in args.init_image:
x = np.random.uniform(0, 255, (1, img_width, img_height, 3)) - 128.
if K.image_dim_ordering() == "th":
x = x.transpose((0, 3, 1, 2))
else:
print("Using initial image : ", args.init_image)
x = preprocess_image(args.init_image, read_mode=read_mode)
# We require original image if we are to preserve color in YCbCr mode
if preserve_color:
content = imread(base_image_path, mode="YCbCr")
content = imresize(content, (img_width, img_height))
if color_mask_present:
if K.image_dim_ordering() == "th":
color_mask_shape = (None, None, img_width, img_height)
else:
color_mask_shape = (None, img_width, img_height, None)
color_mask = load_mask(args.color_mask, color_mask_shape, return_mask_img=True)
else:
color_mask = None
else:
color_mask = None
num_iter = args.num_iter
prev_min_val = -1
improvement_threshold = float(args.min_improvement)
for i in range(num_iter):
print("Starting iteration %d of %d" % ((i + 1), num_iter))
start_time = time.time()
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxfun=20)
if prev_min_val == -1:
prev_min_val = min_val
improvement = (prev_min_val - min_val) / prev_min_val * 100
print('Current loss value:', min_val, " Improvement : %0.3f" % improvement, "%")
prev_min_val = min_val
# save current generated image
img = deprocess_image(x.copy())
if preserve_color and content is not None:
img = original_color_transform(content, img, mask=color_mask)
if not rescale_image:
img_ht = int(img_width * aspect_ratio)
print("Rescaling Image to (%d, %d)" % (img_width, img_ht))
img = imresize(img, (img_width, img_ht), interp=args.rescale_method)
if rescale_image:
print("Rescaling Image to (%d, %d)" % (img_WIDTH, img_HEIGHT))
img = imresize(img, (img_WIDTH, img_HEIGHT), interp=args.rescale_method)
fname = result_prefix + '_at_iteration_%d.png' % (i + 1)
imsave(fname, img)
end_time = time.time()
print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i + 1, end_time - start_time))
if improvement_threshold is not 0.0:
if improvement < improvement_threshold and improvement is not 0.0:
print("Improvement (%f) is less than improvement threshold (%f). Early stopping script." % (
improvement, improvement_threshold))
exit()
| 36.878594
| 152
| 0.668067
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from scipy.misc import imread, imresize, imsave, fromimage, toimage
from scipy.optimize import fmin_l_bfgs_b
import numpy as np
import time
import argparse
import warnings
from keras.models import Model
from keras.layers import Input
from keras.layers.convolutional import Convolution2D, AveragePooling2D, MaxPooling2D
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.utils.layer_utils import convert_all_kernels_in_model
THEANO_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels_notop.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
TH_19_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_th_dim_ordering_th_kernels_notop.h5'
TF_19_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'
parser = argparse.ArgumentParser(description='Neural style transfer with Keras.')
parser.add_argument('base_image_path', metavar='base', type=str,
help='Path to the image to transform.')
parser.add_argument('syle_image_paths', metavar='ref', nargs='+', type=str,
help='Path to the style reference image.')
parser.add_argument('result_prefix', metavar='res_prefix', type=str,
help='Prefix for the saved results.')
parser.add_argument("--style_masks", type=str, default=None, nargs='+',
help='Masks for style images')
parser.add_argument("--content_mask", type=str, default=None,
help='Masks for the content image')
parser.add_argument("--color_mask", type=str, default=None,
help='Mask for color preservation')
parser.add_argument("--image_size", dest="img_size", default=400, type=int,
help='Minimum image size')
parser.add_argument("--content_weight", dest="content_weight", default=0.025, type=float,
help="Weight of content")
parser.add_argument("--style_weight", dest="style_weight", nargs='+', default=[1], type=float,
help="Weight of style, can be multiple for multiple styles")
parser.add_argument("--style_scale", dest="style_scale", default=1.0, type=float,
help="Scale the weighing of the style")
parser.add_argument("--total_variation_weight", dest="tv_weight", default=8.5e-5, type=float,
help="Total Variation weight")
parser.add_argument("--num_iter", dest="num_iter", default=10, type=int,
help="Number of iterations")
parser.add_argument("--model", default="vgg16", type=str,
help="Choices are 'vgg16' and 'vgg19'")
parser.add_argument("--content_loss_type", default=0, type=int,
help='Can be one of 0, 1 or 2. Readme contains the required information of each mode.')
parser.add_argument("--rescale_image", dest="rescale_image", default="False", type=str,
help="Rescale image after execution to original dimentions")
parser.add_argument("--rescale_method", dest="rescale_method", default="bilinear", type=str,
help="Rescale image algorithm")
parser.add_argument("--maintain_aspect_ratio", dest="maintain_aspect_ratio", default="True", type=str,
help="Maintain aspect ratio of loaded images")
parser.add_argument("--content_layer", dest="content_layer", default="conv5_2", type=str,
help="Content layer used for content loss.")
parser.add_argument("--init_image", dest="init_image", default="content", type=str,
help="Initial image used to generate the final image. Options are 'content', 'noise', or 'gray'")
parser.add_argument("--pool_type", dest="pool", default="max", type=str,
help='Pooling type. Can be "ave" for average pooling or "max" for max pooling')
parser.add_argument('--preserve_color', dest='color', default="False", type=str,
help='Preserve original color in image')
parser.add_argument('--min_improvement', default=0.0, type=float,
help='Defines minimum improvement required to continue script')
def str_to_bool(v):
return v.lower() in ("true", "yes", "t", "1")
args = parser.parse_args()
base_image_path = args.base_image_path
style_reference_image_paths = args.syle_image_paths
result_prefix = args.result_prefix
style_image_paths = []
for style_image_path in style_reference_image_paths:
style_image_paths.append(style_image_path)
style_masks_present = args.style_masks is not None
mask_paths = []
if style_masks_present:
for mask_path in args.style_masks:
mask_paths.append(mask_path)
if style_masks_present:
assert len(style_image_paths) == len(mask_paths), "Wrong number of style masks provided.\n" \
"Number of style images = %d, \n" \
"Number of style mask paths = %d." % \
(len(style_image_paths), len(style_masks_present))
content_mask_present = args.content_mask is not None
content_mask_path = args.content_mask
color_mask_present = args.color_mask is not None
rescale_image = str_to_bool(args.rescale_image)
maintain_aspect_ratio = str_to_bool(args.maintain_aspect_ratio)
preserve_color = str_to_bool(args.color)
content_weight = args.content_weight
total_variation_weight = args.tv_weight
style_weights = []
if len(style_image_paths) != len(args.style_weight):
print("Mismatch in number of style images provided and number of style weights provided. \n"
"Found %d style images and %d style weights. \n"
"Equally distributing weights to all other styles." % (len(style_image_paths), len(args.style_weight)))
weight_sum = sum(args.style_weight) * args.style_scale
count = len(style_image_paths)
for i in range(len(style_image_paths)):
style_weights.append(weight_sum / count)
else:
for style_weight in args.style_weight:
style_weights.append(style_weight * args.style_scale)
pooltype = str(args.pool).lower()
assert pooltype in ["ave", "max"], 'Pooling argument is wrong. Needs to be either "ave" or "max".'
pooltype = 1 if pooltype == "ave" else 0
read_mode = "gray" if args.init_image == "gray" else "color"
img_width = img_height = 0
img_WIDTH = img_HEIGHT = 0
aspect_ratio = 0
assert args.content_loss_type in [0, 1, 2], "Content Loss Type must be one of 0, 1 or 2"
def preprocess_image(image_path, load_dims=False, read_mode="color"):
global img_width, img_height, img_WIDTH, img_HEIGHT, aspect_ratio
mode = "RGB" if read_mode == "color" else "L"
img = imread(image_path, mode=mode)
if mode == "L":
temp = np.zeros(img.shape + (3,), dtype=np.uint8)
temp[:, :, 0] = img
temp[:, :, 1] = img.copy()
temp[:, :, 2] = img.copy()
img = temp
if load_dims:
img_WIDTH = img.shape[0]
img_HEIGHT = img.shape[1]
aspect_ratio = float(img_HEIGHT) / img_WIDTH
img_width = args.img_size
if maintain_aspect_ratio:
img_height = int(img_width * aspect_ratio)
else:
img_height = args.img_size
img = imresize(img, (img_width, img_height)).astype('float32')
img = img[:, :, ::-1]
img[:, :, 0] -= 103.939
img[:, :, 1] -= 116.779
img[:, :, 2] -= 123.68
if K.image_dim_ordering() == "th":
img = img.transpose((2, 0, 1)).astype('float32')
img = np.expand_dims(img, axis=0)
return img
def deprocess_image(x):
if K.image_dim_ordering() == "th":
x = x.reshape((3, img_width, img_height))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((img_width, img_height, 3))
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
def original_color_transform(content, generated, mask=None):
generated = fromimage(toimage(generated, mode='RGB'), mode='YCbCr')
if mask is None:
generated[:, :, 1:] = content[:, :, 1:]
else:
width, height, channels = generated.shape
for i in range(width):
for j in range(height):
if mask[i, j] == 1:
generated[i, j, 1:] = content[i, j, 1:]
generated = fromimage(toimage(generated, mode='YCbCr'), mode='RGB')
return generated
def load_mask(mask_path, shape, return_mask_img=False):
if K.image_dim_ordering() == "th":
_, channels, width, height = shape
else:
_, width, height, channels = shape
mask = imread(mask_path, mode="L")
mask = imresize(mask, (width, height)).astype('float32')
mask[mask <= 127] = 0
mask[mask > 128] = 255
max = np.amax(mask)
mask /= max
if return_mask_img: return mask
mask_shape = shape[1:]
mask_tensor = np.empty(mask_shape)
for i in range(channels):
if K.image_dim_ordering() == "th":
mask_tensor[i, :, :] = mask
else:
mask_tensor[:, :, i] = mask
return mask_tensor
def pooling_func(x):
if pooltype == 1:
return AveragePooling2D((2, 2), strides=(2, 2))(x)
else:
return MaxPooling2D((2, 2), strides=(2, 2))(x)
base_image = K.variable(preprocess_image(base_image_path, True, read_mode=read_mode))
style_reference_images = []
for style_path in style_image_paths:
style_reference_images.append(K.variable(preprocess_image(style_path)))
if K.image_dim_ordering() == 'th':
combination_image = K.placeholder((1, 3, img_width, img_height))
else:
combination_image = K.placeholder((1, img_width, img_height, 3))
image_tensors = [base_image]
for style_image_tensor in style_reference_images:
image_tensors.append(style_image_tensor)
image_tensors.append(combination_image)
nb_tensors = len(image_tensors)
nb_style_images = nb_tensors - 2
input_tensor = K.concatenate(image_tensors, axis=0)
if K.image_dim_ordering() == "th":
shape = (nb_tensors, 3, img_width, img_height)
else:
shape = (nb_tensors, img_width, img_height, 3)
ip = Input(tensor=input_tensor, batch_shape=shape)
x = Convolution2D(64, (3, 3), activation='relu', name='conv1_1', padding='same')(ip)
x = Convolution2D(64, (3, 3), activation='relu', name='conv1_2', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(128, (3, 3), activation='relu', name='conv2_1', padding='same')(x)
x = Convolution2D(128, (3, 3), activation='relu', name='conv2_2', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_1', padding='same')(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_2', padding='same')(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_3', padding='same')(x)
if args.model == "vgg19":
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_4', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_1', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_2', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_3', padding='same')(x)
if args.model == "vgg19":
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_4', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_1', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_2', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_3', padding='same')(x)
if args.model == "vgg19":
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_4', padding='same')(x)
x = pooling_func(x)
model = Model(ip, x)
if K.image_dim_ordering() == "th":
if args.model == "vgg19":
weights = get_file('vgg19_weights_th_dim_ordering_th_kernels_notop.h5', TH_19_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
else:
weights = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5', THEANO_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
else:
if args.model == "vgg19":
weights = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_19_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
else:
weights = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
model.load_weights(weights)
if K.backend() == 'tensorflow' and K.image_dim_ordering() == "th":
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image dimension ordering convention '
'(`image_dim_ordering="th"`). '
'For best performance, set '
'`image_dim_ordering="tf"` in '
'your Keras config '
'at ~/.keras/keras.json.')
convert_all_kernels_in_model(model)
print('Model loaded.')
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
shape_dict = dict([(layer.name, layer.output_shape) for layer in model.layers])
def gram_matrix(x):
assert K.ndim(x) == 3
if K.image_dim_ordering() == "th":
features = K.batch_flatten(x)
else:
features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
gram = K.dot(features, K.transpose(features))
return gram
def style_loss(style, combination, mask_path=None, nb_channels=None):
assert K.ndim(style) == 3
assert K.ndim(combination) == 3
if content_mask_path is not None:
content_mask = K.variable(load_mask(content_mask_path, nb_channels))
combination = combination * K.stop_gradient(content_mask)
del content_mask
if mask_path is not None:
style_mask = K.variable(load_mask(mask_path, nb_channels))
style = style * K.stop_gradient(style_mask)
if content_mask_path is None:
combination = combination * K.stop_gradient(style_mask)
del style_mask
S = gram_matrix(style)
C = gram_matrix(combination)
channels = 3
size = img_width * img_height
return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2))
def content_loss(base, combination):
channel_dim = 0 if K.image_dim_ordering() == "th" else -1
try:
channels = K.int_shape(base)[channel_dim]
except TypeError:
channels = K.shape(base)[channel_dim]
size = img_width * img_height
if args.content_loss_type == 1:
multiplier = 1. / (2. * (channels ** 0.5) * (size ** 0.5))
elif args.content_loss_type == 2:
multiplier = 1. / (channels * size)
else:
multiplier = 1.
return multiplier * K.sum(K.square(combination - base))
def total_variation_loss(x):
assert K.ndim(x) == 4
if K.image_dim_ordering() == 'th':
a = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, 1:, :img_height - 1])
b = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, :img_width - 1, 1:])
else:
a = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, 1:, :img_height - 1, :])
b = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, :img_width - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
loss = K.variable(0.)
layer_features = outputs_dict[args.content_layer]
base_image_features = layer_features[0, :, :, :]
combination_features = layer_features[nb_tensors - 1, :, :, :]
loss += content_weight * content_loss(base_image_features,
combination_features)
style_masks = []
if style_masks_present:
style_masks = mask_paths
else:
style_masks = [None for _ in range(nb_style_images)]
channel_index = 1 if K.image_dim_ordering() == "th" else -1
feature_layers = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
for layer_name in feature_layers:
layer_features = outputs_dict[layer_name]
shape = shape_dict[layer_name]
combination_features = layer_features[nb_tensors - 1, :, :, :]
style_reference_features = layer_features[1:nb_tensors - 1, :, :, :]
sl = []
for j in range(nb_style_images):
sl.append(style_loss(style_reference_features[j], combination_features, style_masks[j], shape))
for j in range(nb_style_images):
loss += (style_weights[j] / len(feature_layers)) * sl[j]
loss += total_variation_weight * total_variation_loss(combination_image)
grads = K.gradients(loss, combination_image)
outputs = [loss]
if type(grads) in {list, tuple}:
outputs += grads
else:
outputs.append(grads)
f_outputs = K.function([combination_image], outputs)
def eval_loss_and_grads(x):
if K.image_dim_ordering() == 'th':
x = x.reshape((1, 3, img_width, img_height))
else:
x = x.reshape((1, img_width, img_height, 3))
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
if "content" in args.init_image or "gray" in args.init_image:
x = preprocess_image(base_image_path, True, read_mode=read_mode)
elif "noise" in args.init_image:
x = np.random.uniform(0, 255, (1, img_width, img_height, 3)) - 128.
if K.image_dim_ordering() == "th":
x = x.transpose((0, 3, 1, 2))
else:
print("Using initial image : ", args.init_image)
x = preprocess_image(args.init_image, read_mode=read_mode)
if preserve_color:
content = imread(base_image_path, mode="YCbCr")
content = imresize(content, (img_width, img_height))
if color_mask_present:
if K.image_dim_ordering() == "th":
color_mask_shape = (None, None, img_width, img_height)
else:
color_mask_shape = (None, img_width, img_height, None)
color_mask = load_mask(args.color_mask, color_mask_shape, return_mask_img=True)
else:
color_mask = None
else:
color_mask = None
num_iter = args.num_iter
prev_min_val = -1
improvement_threshold = float(args.min_improvement)
for i in range(num_iter):
print("Starting iteration %d of %d" % ((i + 1), num_iter))
start_time = time.time()
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxfun=20)
if prev_min_val == -1:
prev_min_val = min_val
improvement = (prev_min_val - min_val) / prev_min_val * 100
print('Current loss value:', min_val, " Improvement : %0.3f" % improvement, "%")
prev_min_val = min_val
img = deprocess_image(x.copy())
if preserve_color and content is not None:
img = original_color_transform(content, img, mask=color_mask)
if not rescale_image:
img_ht = int(img_width * aspect_ratio)
print("Rescaling Image to (%d, %d)" % (img_width, img_ht))
img = imresize(img, (img_width, img_ht), interp=args.rescale_method)
if rescale_image:
print("Rescaling Image to (%d, %d)" % (img_WIDTH, img_HEIGHT))
img = imresize(img, (img_WIDTH, img_HEIGHT), interp=args.rescale_method)
fname = result_prefix + '_at_iteration_%d.png' % (i + 1)
imsave(fname, img)
end_time = time.time()
print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i + 1, end_time - start_time))
if improvement_threshold is not 0.0:
if improvement < improvement_threshold and improvement is not 0.0:
print("Improvement (%f) is less than improvement threshold (%f). Early stopping script." % (
improvement, improvement_threshold))
exit()
| true
| true
|
f714646378a1286e368211c93a3126a60f12ee58
| 3,625
|
py
|
Python
|
src/varDA.py
|
aerorahul/lorenz-da
|
d8a01f512c974f4d74f8ea06d016956ff165da4b
|
[
"Apache-2.0"
] | 8
|
2017-09-02T07:50:14.000Z
|
2022-03-17T06:48:32.000Z
|
src/varDA.py
|
aerorahul/lorenz-da
|
d8a01f512c974f4d74f8ea06d016956ff165da4b
|
[
"Apache-2.0"
] | 1
|
2020-01-03T04:46:07.000Z
|
2020-02-14T18:29:28.000Z
|
src/varDA.py
|
aerorahul/lorenz-da
|
d8a01f512c974f4d74f8ea06d016956ff165da4b
|
[
"Apache-2.0"
] | 8
|
2017-08-30T03:31:27.000Z
|
2021-03-18T16:03:50.000Z
|
#!/usr/bin/env python
###############################################################
# < next few lines under version control, D O N O T E D I T >
# $Date$
# $Revision$
# $Author$
# $Id$
###############################################################
###############################################################
# varDA.py - driver script for variational DA
###############################################################
###############################################################
__author__ = "Rahul Mahajan"
__email__ = "rahul.mahajan@nasa.gov"
__copyright__ = "Copyright 2012, NASA / GSFC / GMAO"
__license__ = "GPL"
__status__ = "Prototype"
###############################################################
###############################################################
import sys
import numpy as np
from module_Lorenz import *
from module_DA import *
from module_IO import *
from param_varDA import *
###############################################################
###############################################################
def main():
# insure the same sequence of random numbers EVERY TIME
np.random.seed(0)
# check for valid variational data assimilation options
check_varDA(DA,varDA)
# get IC's
[xt, xa] = get_IC(model, restart, Nens=None)
xb = xa.copy()
# Load climatological covariance once and for all ...
Bc = read_clim_cov(model=model,norm=True)
nobs = model.Ndof*varDA.fdvar.nobstimes
y = np.tile(np.dot(H,xt),[varDA.fdvar.nobstimes,1])
# create diagnostic file and write initial conditions to the diagnostic file
create_diag(diag_file, model.Ndof, nobs=nobs, nouter=DA.maxouter)
for outer in range(DA.maxouter):
write_diag(diag_file.filename, 0, outer, xt, xb, xa, np.reshape(y,[nobs]), np.diag(H), np.diag(R), niters=np.NaN)
print 'Cycling ON the attractor ...'
for k in range(DA.nassim):
print '========== assimilation time = %5d ========== ' % (k+1)
# advance truth with the full nonlinear model; set verification values
xs = model.advance(xt, varDA.fdvar.tbkgd, perfect=True)
xt = xs[-1,:].copy()
ver = xt.copy()
# new observations from noise about truth
y = create_obs(model,varDA,xt,H,R,yold=y)
# advance analysis with the full nonlinear model
xs = model.advance(xa, varDA.fdvar.tbkgd, perfect=False)
xb = xs[-1,:].copy()
for outer in range(DA.maxouter):
# compute static background error cov.
Bs = compute_B(varDA,Bc,outer=outer)
# update step
xa, niters = update_varDA(xb, Bs, y, R, H, varDA, model)
# write diagnostics to disk for each outer loop (at the beginning of the window)
write_diag(diag_file.filename, k+1, outer, ver, xb, xa, np.reshape(y,[nobs]), np.diag(H), np.diag(R), niters=niters)
# update prior for next outer loop
xb = xa.copy()
# if doing 4Dvar, step to the next assimilation time from the beginning of assimilation window
if ( varDA.update == 2 ):
xs = model.advance(xt, varDA.fdvar.tanal, perfect=True )
xt = xs[-1,:].copy()
xs = model.advance(xa, varDA.fdvar.tanal, perfect=False)
xa = xs[-1,:].copy()
print '... all done ...'
sys.exit(0)
###############################################################
###############################################################
if __name__ == "__main__": main()
###############################################################
| 35.539216
| 128
| 0.486069
| false
| true
|
|
f7146499f43a1ececce716dc775d27d50a4ee29c
| 846
|
py
|
Python
|
test/test_item_option.py
|
gstingy/uc_python_api
|
9a0bd3f6e63f616586681518e44fe37c6bae2bba
|
[
"Apache-2.0"
] | null | null | null |
test/test_item_option.py
|
gstingy/uc_python_api
|
9a0bd3f6e63f616586681518e44fe37c6bae2bba
|
[
"Apache-2.0"
] | null | null | null |
test/test_item_option.py
|
gstingy/uc_python_api
|
9a0bd3f6e63f616586681518e44fe37c6bae2bba
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import ultracart
from ultracart.rest import ApiException
from ultracart.models.item_option import ItemOption
class TestItemOption(unittest.TestCase):
""" ItemOption unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testItemOption(self):
"""
Test ItemOption
"""
# FIXME: construct object with mandatory attributes with example values
#model = ultracart.models.item_option.ItemOption()
pass
if __name__ == '__main__':
unittest.main()
| 18.8
| 79
| 0.680851
|
from __future__ import absolute_import
import os
import sys
import unittest
import ultracart
from ultracart.rest import ApiException
from ultracart.models.item_option import ItemOption
class TestItemOption(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testItemOption(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
f71466828b23b73c75d19b330e7be8b61bb3a893
| 432
|
py
|
Python
|
python-startup.py
|
stinbetz/installify
|
850dfd80300f594cd0366df5201a7915229990b1
|
[
"MIT"
] | null | null | null |
python-startup.py
|
stinbetz/installify
|
850dfd80300f594cd0366df5201a7915229990b1
|
[
"MIT"
] | null | null | null |
python-startup.py
|
stinbetz/installify
|
850dfd80300f594cd0366df5201a7915229990b1
|
[
"MIT"
] | null | null | null |
import sys
import subprocess
def snipe_import_exceptions(exctype, value, traceback):
if exctype == ImportError:
module = str(value).split(" ")[-1:][0]
install_module(module)
else:
sys.__excepthook__(exctype, value, traceback)
sys.excepthook = snipe_import_exceptions
def install_module(module):
print "installing module", module
subprocess.call("sudo pip install %s" %module, shell=True)
| 25.411765
| 62
| 0.706019
|
import sys
import subprocess
def snipe_import_exceptions(exctype, value, traceback):
if exctype == ImportError:
module = str(value).split(" ")[-1:][0]
install_module(module)
else:
sys.__excepthook__(exctype, value, traceback)
sys.excepthook = snipe_import_exceptions
def install_module(module):
print "installing module", module
subprocess.call("sudo pip install %s" %module, shell=True)
| false
| true
|
f714679fa4b4036479edd5366153bd136b63a604
| 8,720
|
py
|
Python
|
pta_sim/pint_sim.py
|
Hazboun6/pta_sim
|
cf8676e23056586ecb35a030dbaad45a1f985764
|
[
"MIT"
] | 1
|
2019-05-22T10:35:49.000Z
|
2019-05-22T10:35:49.000Z
|
pta_sim/pint_sim.py
|
Hazboun6/pta_sim
|
cf8676e23056586ecb35a030dbaad45a1f985764
|
[
"MIT"
] | 1
|
2021-11-15T17:48:32.000Z
|
2021-11-15T17:48:32.000Z
|
pta_sim/pint_sim.py
|
Hazboun6/pta_sim
|
cf8676e23056586ecb35a030dbaad45a1f985764
|
[
"MIT"
] | 2
|
2019-05-23T13:55:53.000Z
|
2021-06-23T13:15:22.000Z
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import astropy.units as u
from astropy.time import Time, TimeDelta
from pint.residuals import resids
import pint.toa as toa
from pint import models
__all__ = ['make_ideal',
'createfourierdesignmatrix_red',
'add_rednoise',
'add_dm_rednoise',
'add_efac',
'add_equad',
'add_ecorr']
def make_ideal(toas, model, iterations=2):
'''
Takes a pint.toas and pint.model object and effectively zeros out the residuals.
'''
for ii in range(iterations):
rs=resids(toas, model)
toas.adjust_TOAs(TimeDelta(-1.0*rs.time_resids))
def createfourierdesignmatrix_red(toas, nmodes=30, Tspan=None,
logf=False, fmin=None, fmax=None,
pshift=False, modes=None):
"""
Construct fourier design matrix from eq 11 of Lentati et al, 2013
Parameters
----------
toas : array
Vector of time series in seconds.
nmodes : int
Number of fourier coefficients to use.
Tspan : float
Option to us some other Tspan [s]
logf : bool
Use log frequency spacing.
fmin : float
Lower sampling frequency.
fmax : float
Upper sampling frequency.
pshift : bool
Option to add random phase shift.
modes : array
Option to provide explicit list or array of sampling frequencies.
Returns
-------
F : array
fourier design matrix, [NTOAs x 2 nfreqs].
f : arraty
Sampling frequencies, [2 nfreqs].
"""
T = Tspan if Tspan is not None else toas.max() - toas.min()
# define sampling frequencies
if modes is not None:
nmodes = len(modes)
f = modes
elif fmin is None and fmax is None and not logf:
# make sure partially overlapping sets of modes
# have identical frequencies
f = 1.0 * np.arange(1, nmodes + 1) / T
else:
# more general case
if fmin is None:
fmin = 1 / T
if fmax is None:
fmax = nmodes / T
if logf:
f = np.logspace(np.log10(fmin), np.log10(fmax), nmodes)
else:
f = np.linspace(fmin, fmax, nmodes)
# add random phase shift to basis functions
ranphase = (np.random.uniform(0.0, 2 * np.pi, nmodes)
if pshift else np.zeros(nmodes))
Ffreqs = np.repeat(f, 2)
N = len(toas)
F = np.zeros((N, 2 * nmodes))
# The sine/cosine modes
F[:,::2] = np.sin(2*np.pi*toas[:,None]*f[None,:] +
ranphase[None,:])
F[:,1::2] = np.cos(2*np.pi*toas[:,None]*f[None,:] +
ranphase[None,:])
return F, Ffreqs
def add_rednoise(TOAs, A, gamma, components=30,
seed=None, modes=None, Tspan=None):
"""Add red noise with P(f) = A^2 / (12 pi^2) (f * year)^-gamma,
using `components` Fourier bases.
Optionally take a pseudorandom-number-generator seed."""
# nobs=len(psr.toas)
nobs = len(TOAs.table)
day_in_sec = 86400
year_in_sec = 365.25*day_in_sec
fyr = 1 / year_in_sec
if seed is not None:
np.random.seed(seed)
if modes is not None:
print('Must use linear spacing.')
toas = np.array(TOAs.table['tdbld'], dtype='float64') * day_in_sec #to sec
Tspan = toas.max() - toas.min()
F, freqs = createfourierdesignmatrix_red(toas,Tspan=Tspan,modes=modes)
prior = A**2 * (freqs/fyr)**(-gamma) / (12 * np.pi**2 * Tspan) * year_in_sec**3
y = np.sqrt(prior) * np.random.randn(freqs.size)
dt = np.dot(F,y) * u.s
TOAs.adjust_TOAs(TimeDelta(dt.to('day')))
def add_dm_rednoise(TOAs, A, gamma, components=30, rf_ref=1400,
seed=None, modes=None, Tspan=None, useDM=False):
"""Add red noise with P(f) = A^2 / (12 pi^2) (f year)^-gamma,
using `components` Fourier bases.
Optionally take a pseudorandom-number-generator seed."""
# nobs=len(psr.toas)
nobs = len(TOAs.table)
radio_freqs = TOAs.table['freq']
if useDM:
rf_ref = 4.15e3
chrom = rf_ref**2 / radio_freqs**2
day_in_sec = 86400
year_in_sec = 365.25*day_in_sec
fyr = 1 / year_in_sec
if seed is not None:
np.random.seed(seed)
toas = np.array(TOAs.table['tdbld'], dtype='float64') * day_in_sec #to sec
Tspan = toas.max() - toas.min()
F, freqs = createfourierdesignmatrix_red(toas,Tspan=Tspan,modes=modes)
prior = A**2 * (freqs/fyr)**(-gamma) / (12 * np.pi**2 * Tspan) * year_in_sec**3
y = np.sqrt(prior) * np.random.randn(freqs.size)
dt = chrom.quantity.value * np.dot(F,y) * u.s
TOAs.adjust_TOAs(TimeDelta(dt.to('day')))
def add_equad(TOAs, equad, flagid=None, flags=None, seed=None):
"""Add quadrature noise of rms `equad` [s].
Optionally take a pseudorandom-number-generator seed."""
if seed is not None:
np.random.seed(seed)
# default equadvec
equadvec = np.zeros(TOAs.ntoas)
# check that equad is scalar if flags is None
if flags is None:
if not np.isscalar(equad):
raise ValueError('ERROR: If flags is None, equad must be a scalar')
else:
equadvec = np.ones(TOAs.ntoas) * equad
if flags is not None and flagid is not None and not np.isscalar(equad):
if len(equad) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == np.array([f['f'] for f
in TOAs.table['flags'].data])
equadvec[ind] = equad[ct]
equadvec = equadvec * u.s * np.random.randn(TOAs.ntoas)
TOAs.adjust_TOAs(TimeDelta(equadvec.to('day')))
def add_efac(TOAs, efac, flagid=None, flags=None, seed=None):
"""Add quadrature noise of rms `equad` [s].
Optionally take a pseudorandom-number-generator seed."""
if seed is not None:
np.random.seed(seed)
# default equadvec
efacvec = np.zeros(TOAs.ntoas)
# check that equad is scalar if flags is None
if flags is None:
if not np.isscalar(efac):
raise ValueError('ERROR: If flags is None, efac must be a scalar')
else:
efacvec = np.ones(TOAs.ntoas) * efac
if flags is not None and flagid is not None and not np.isscalar(efac):
if len(efac) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == np.array([f['f'] for f
in TOAs.table['flags'].data])
efacvec[ind] = efac[ct]
dt = efacvec * TOAs.get_errors().to('s') * np.random.randn(TOAs.ntoas)
TOAs.adjust_TOAs(TimeDelta(dt.to('day')))
def quantize(times, flags=None, dt=1.0):
isort = np.argsort(times)
bucket_ref = [times[isort[0]]]
bucket_ind = [[isort[0]]]
for i in isort[1:]:
if times[i] - bucket_ref[-1] < dt:
bucket_ind[-1].append(i)
else:
bucket_ref.append(times[i])
bucket_ind.append([i])
avetoas = np.array([np.mean(times[l]) for l in bucket_ind],'d')
if flags is not None:
aveflags = np.array([flags[l[0]] for l in bucket_ind])
U = np.zeros((len(times),len(bucket_ind)),'d')
for i,l in enumerate(bucket_ind):
U[l,i] = 1
if flags is not None:
return avetoas, aveflags, U
else:
return avetoas, U
def add_ecorr(TOAs, ecorr, flagid=None, flags=None, coarsegrain=1*u.s, seed=None):
"""Add correlated quadrature noise of rms `ecorr` [s],
with coarse-graining time `coarsegrain` [days].
Optionally take a pseudorandom-number-generator seed."""
if seed is not None:
np.random.seed(seed)
times = np.array(TOAs.table['tdbld'], dtype='float64')
if flags is None:
t, U = quantize(times, dt=coarsegrain.to('day').value)
elif flags is not None and flagid is not None:
flagvals = np.array([f[flagid] for f in TOAs.table['flags'].data])
t, f, U = quantize(times, flagvals, dt=coarsegrain.to('day').value)
# default ecorr value
ecorrvec = np.zeros(len(t))
# check that ecorr is scalar if flags is None
if flags is None:
if not np.isscalar(ecorr):
raise ValueError('ERROR: If flags is None, ecorr must be a scalar')
else:
ecorrvec = np.ones(len(t)) * ecorr
if flags is not None and flagid is not None and not np.isscalar(ecorr):
if len(ecorr) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == np.array(f)
ecorrvec[ind] = ecorr[ct]
ecorrvec = np.dot(U * ecorrvec, np.random.randn(U.shape[1])) * u.s
TOAs.adjust_TOAs(TimeDelta(ecorrvec.to('day')))
| 31.142857
| 84
| 0.592431
|
import numpy as np
import astropy.units as u
from astropy.time import Time, TimeDelta
from pint.residuals import resids
import pint.toa as toa
from pint import models
__all__ = ['make_ideal',
'createfourierdesignmatrix_red',
'add_rednoise',
'add_dm_rednoise',
'add_efac',
'add_equad',
'add_ecorr']
def make_ideal(toas, model, iterations=2):
for ii in range(iterations):
rs=resids(toas, model)
toas.adjust_TOAs(TimeDelta(-1.0*rs.time_resids))
def createfourierdesignmatrix_red(toas, nmodes=30, Tspan=None,
logf=False, fmin=None, fmax=None,
pshift=False, modes=None):
T = Tspan if Tspan is not None else toas.max() - toas.min()
if modes is not None:
nmodes = len(modes)
f = modes
elif fmin is None and fmax is None and not logf:
f = 1.0 * np.arange(1, nmodes + 1) / T
else:
if fmin is None:
fmin = 1 / T
if fmax is None:
fmax = nmodes / T
if logf:
f = np.logspace(np.log10(fmin), np.log10(fmax), nmodes)
else:
f = np.linspace(fmin, fmax, nmodes)
ranphase = (np.random.uniform(0.0, 2 * np.pi, nmodes)
if pshift else np.zeros(nmodes))
Ffreqs = np.repeat(f, 2)
N = len(toas)
F = np.zeros((N, 2 * nmodes))
F[:,::2] = np.sin(2*np.pi*toas[:,None]*f[None,:] +
ranphase[None,:])
F[:,1::2] = np.cos(2*np.pi*toas[:,None]*f[None,:] +
ranphase[None,:])
return F, Ffreqs
def add_rednoise(TOAs, A, gamma, components=30,
seed=None, modes=None, Tspan=None):
nobs = len(TOAs.table)
day_in_sec = 86400
year_in_sec = 365.25*day_in_sec
fyr = 1 / year_in_sec
if seed is not None:
np.random.seed(seed)
if modes is not None:
print('Must use linear spacing.')
toas = np.array(TOAs.table['tdbld'], dtype='float64') * day_in_sec
Tspan = toas.max() - toas.min()
F, freqs = createfourierdesignmatrix_red(toas,Tspan=Tspan,modes=modes)
prior = A**2 * (freqs/fyr)**(-gamma) / (12 * np.pi**2 * Tspan) * year_in_sec**3
y = np.sqrt(prior) * np.random.randn(freqs.size)
dt = np.dot(F,y) * u.s
TOAs.adjust_TOAs(TimeDelta(dt.to('day')))
def add_dm_rednoise(TOAs, A, gamma, components=30, rf_ref=1400,
seed=None, modes=None, Tspan=None, useDM=False):
nobs = len(TOAs.table)
radio_freqs = TOAs.table['freq']
if useDM:
rf_ref = 4.15e3
chrom = rf_ref**2 / radio_freqs**2
day_in_sec = 86400
year_in_sec = 365.25*day_in_sec
fyr = 1 / year_in_sec
if seed is not None:
np.random.seed(seed)
toas = np.array(TOAs.table['tdbld'], dtype='float64') * day_in_sec
Tspan = toas.max() - toas.min()
F, freqs = createfourierdesignmatrix_red(toas,Tspan=Tspan,modes=modes)
prior = A**2 * (freqs/fyr)**(-gamma) / (12 * np.pi**2 * Tspan) * year_in_sec**3
y = np.sqrt(prior) * np.random.randn(freqs.size)
dt = chrom.quantity.value * np.dot(F,y) * u.s
TOAs.adjust_TOAs(TimeDelta(dt.to('day')))
def add_equad(TOAs, equad, flagid=None, flags=None, seed=None):
if seed is not None:
np.random.seed(seed)
equadvec = np.zeros(TOAs.ntoas)
if flags is None:
if not np.isscalar(equad):
raise ValueError('ERROR: If flags is None, equad must be a scalar')
else:
equadvec = np.ones(TOAs.ntoas) * equad
if flags is not None and flagid is not None and not np.isscalar(equad):
if len(equad) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == np.array([f['f'] for f
in TOAs.table['flags'].data])
equadvec[ind] = equad[ct]
equadvec = equadvec * u.s * np.random.randn(TOAs.ntoas)
TOAs.adjust_TOAs(TimeDelta(equadvec.to('day')))
def add_efac(TOAs, efac, flagid=None, flags=None, seed=None):
if seed is not None:
np.random.seed(seed)
efacvec = np.zeros(TOAs.ntoas)
if flags is None:
if not np.isscalar(efac):
raise ValueError('ERROR: If flags is None, efac must be a scalar')
else:
efacvec = np.ones(TOAs.ntoas) * efac
if flags is not None and flagid is not None and not np.isscalar(efac):
if len(efac) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == np.array([f['f'] for f
in TOAs.table['flags'].data])
efacvec[ind] = efac[ct]
dt = efacvec * TOAs.get_errors().to('s') * np.random.randn(TOAs.ntoas)
TOAs.adjust_TOAs(TimeDelta(dt.to('day')))
def quantize(times, flags=None, dt=1.0):
isort = np.argsort(times)
bucket_ref = [times[isort[0]]]
bucket_ind = [[isort[0]]]
for i in isort[1:]:
if times[i] - bucket_ref[-1] < dt:
bucket_ind[-1].append(i)
else:
bucket_ref.append(times[i])
bucket_ind.append([i])
avetoas = np.array([np.mean(times[l]) for l in bucket_ind],'d')
if flags is not None:
aveflags = np.array([flags[l[0]] for l in bucket_ind])
U = np.zeros((len(times),len(bucket_ind)),'d')
for i,l in enumerate(bucket_ind):
U[l,i] = 1
if flags is not None:
return avetoas, aveflags, U
else:
return avetoas, U
def add_ecorr(TOAs, ecorr, flagid=None, flags=None, coarsegrain=1*u.s, seed=None):
if seed is not None:
np.random.seed(seed)
times = np.array(TOAs.table['tdbld'], dtype='float64')
if flags is None:
t, U = quantize(times, dt=coarsegrain.to('day').value)
elif flags is not None and flagid is not None:
flagvals = np.array([f[flagid] for f in TOAs.table['flags'].data])
t, f, U = quantize(times, flagvals, dt=coarsegrain.to('day').value)
ecorrvec = np.zeros(len(t))
if flags is None:
if not np.isscalar(ecorr):
raise ValueError('ERROR: If flags is None, ecorr must be a scalar')
else:
ecorrvec = np.ones(len(t)) * ecorr
if flags is not None and flagid is not None and not np.isscalar(ecorr):
if len(ecorr) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == np.array(f)
ecorrvec[ind] = ecorr[ct]
ecorrvec = np.dot(U * ecorrvec, np.random.randn(U.shape[1])) * u.s
TOAs.adjust_TOAs(TimeDelta(ecorrvec.to('day')))
| true
| true
|
f71467a510667cf3558e0f2dd126bccf19a330a0
| 8,753
|
py
|
Python
|
data/external/repositories_2to3/137656/blundercheck-master/combine/data_prep/prepare_pgmodel.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/137656/blundercheck-master/combine/data_prep/prepare_pgmodel.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/137656/blundercheck-master/combine/data_prep/prepare_pgmodel.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | 1
|
2019-12-04T08:23:33.000Z
|
2019-12-04T08:23:33.000Z
|
#!/usr/bin/env python
from pandas import *
from numpy import *
from djeval import *
import csv, code
import pickle as pickle
from sklearn.externals import joblib
NUM_GAMES=50000
def shell():
vars = globals()
vars.update(locals())
shell = code.InteractiveConsole(vars)
shell.interact()
msg("Hi! Reading eheaders")
eheaders_filename = '/data/eheaders.p'
eheaders_file = open(eheaders_filename, 'r')
eheaders = pickle.load(eheaders_file)
elos = eheaders['elos']
result = eheaders['result']
checkmate = eheaders['checkmate']
openings = eheaders['openings']
ocount = eheaders['opening_count']
msg("Hi! Reading crunched movescores from %s" % sys.argv[1])
crunched_path = sys.argv[1]
crunched_df = read_csv(crunched_path, sep=',', engine='c', index_col=['gamenum', 'side'])
do_gb = False
if do_gb:
msg("Hi! Reading GB scores from %s" % sys.argv[2])
gb_path = sys.argv[2]
gb_df = read_csv(gb_path, sep=',', engine='c', index_col=['gamenum'])
msg("Hi! Reading depthstats")
depthstats_path = '/data/depthstats.csv'
columns = [
'gamenum',
'side',
'mean_depth',
'mean_seldepth',
'mean_depths_agreeing_ratio',
'mean_deepest_agree_ratio',
'pct_sanemoves',
'gamelength',
'mean_num_bestmoves',
'mean_num_bestmove_changes',
'mean_bestmove_depths_agreeing',
'mean_deepest_change',
'mean_deepest_change_ratio',
]
depthstats_df = read_csv(depthstats_path, sep=' ', engine='c', header=None, names=columns, index_col=False)
depthstats_df = depthstats_df.set_index(['gamenum', 'side'])
# we have the gamelength column in another df, drop it here to avoid conflicts
depthstats_df.drop('gamelength', axis=1, inplace=True)
do_material = True
if do_material:
msg("Hi! Reading material")
material_path = '/data/material.csv'
columns = [
'gamenum',
'material_break_0',
'material_break_1',
'material_break_2',
'material_break_3',
'material_break_4',
'opening_length',
'midgame_length',
'endgame_length',
'mean_acwsa',
'mean_acwsa_0',
'mean_acwsa_1',
'mean_acwsa_2',
'mean_acwsa_3',
'mean_acwsa_4',
'mean_acwsa_5',
'mean_acwsa_6',
'mean_acwsa_7',
'mean_acwsa_8',
'mean_acwsa_9',
]
material_df = read_csv(material_path, sep=' ', engine='c', header=None, names=columns, index_col=False)
material_df = material_df.set_index(['gamenum'])
material_df = material_df.reindex(list(range(1, NUM_GAMES+1)))
material_df = material_df.fillna(material_df.mean())
msg("Reading ELOscored data")
eloscored_cols = [
'gamenum',
'final_elo',
'final_ply',
'final_num_games',
'final_elo_stdev',
'elopath_min',
'elopath_max',
]
eloscored_df = read_csv('/data/data.pgn.eloscored21', sep=',', engine='c', header=None, names=eloscored_cols, index_col=False)
eloscored_df = eloscored_df.set_index(['gamenum'])
msg("Reading ELOscored data 4")
eloscored4_cols = [
'gamenum',
'final_elo',
'final_ply',
'final_num_games',
'final_elo_stdev',
]
eloscored4_cols[1:] = [x + '_elo4' for x in eloscored4_cols[1:]]
eloscored4_df = read_csv('/data/data.pgn.eloscored4', sep=',', engine='c', header=None, names=eloscored4_cols, index_col=False)
eloscored4_df = eloscored4_df.set_index(['gamenum'])
msg("Reading ELOscored data 10")
eloscored10_cols = [
'gamenum',
'final_elo',
'final_ply',
'final_num_games',
'final_elo_stdev',
]
eloscored10_cols[1:] = [x + '_elo10' for x in eloscored10_cols[1:]]
eloscored10_df = read_csv('/data/data.pgn.eloscored10', sep=',', engine='c', header=None, names=eloscored10_cols, index_col=False)
eloscored10_df = eloscored10_df.set_index(['gamenum'])
do_movemodel=True
if do_movemodel:
msg("Hi! Reading moveaggs")
move_aggs = joblib.load('/data/move_aggs.p')
move_aggs.fillna(move_aggs.mean(), inplace=True)
move_aggs = move_aggs[['mean', 'median', '25', '10', 'min', 'max', 'stdev']]
msg("Hi! Reading wmoveaggs")
wmove_aggs = joblib.load('/data/wmove_aggs.p')
wmove_aggs.fillna(wmove_aggs.mean(), inplace=True)
wmove_aggs.rename(columns={'elo_pred': 'moveelo_weighted'}, inplace=True)
wmove_aggs = wmove_aggs['moveelo_weighted']
do_elochunk = False
if do_elochunk:
ch_agg_df = joblib.load('/data/chunk_aggs.p')
ch_agg_df.index = ch_agg_df.index.droplevel('elo')
ch_agg_df.columns = ['elochunk_' + x for x in ch_agg_df.columns]
msg("Hi! Setting up playergame rows")
if do_elochunk:
elorange_cols = list(ch_agg_df.columns.values)
msg("elorange cols are %s" % elorange_cols)
msg('Preparing ELO df')
elo_rows = [[x[0][0], x[0][1], x[1]] for x in list(elos.items())]
elo_df = DataFrame(elo_rows, columns=['gamenum','side','elo'])
elo_df.set_index(['gamenum','side'], inplace=True)
msg('Joining DFs')
supplemental_dfs = [depthstats_df, elo_df, crunched_df]
if do_movemodel:
supplemental_dfs.extend([move_aggs, wmove_aggs])
if do_elochunk:
supplemental_dfs.append(ch_agg_df)
mega_df = concat(supplemental_dfs, axis=1)
if do_material:
mega_df = mega_df.join(material_df, how='outer')
mega_df = mega_df.join(eloscored_df, how='outer')
mega_df = mega_df.join(eloscored4_df, how='outer')
mega_df = mega_df.join(eloscored10_df, how='outer')
if do_gb:
mega_df = mega_df.join(gb_df, how='outer')
yy_df = mega_df
msg("hi, columns are %s" % yy_df.columns)
# TODO confirm that all columns are there
def opening_feature(opening):
if ocount[opening] < 20:
return 'rare'
if ocount[opening] < 200:
return 'uncommon'
return opening
msg("Hi! Computing additional features")
yy_df['opening_feature'] = [opening_feature(openings[x]) for x in yy_df.index.get_level_values('gamenum')]
yy_df['opening_count'] = [ocount[openings[x]] for x in yy_df.index.get_level_values('gamenum')]
yy_df['any_grit'] = (yy_df['grit'] > 0)
yy_df['major_grit'] = (yy_df['grit'] > 5)
yy_df['nmerror'] = log((-1 * yy_df['meanerror']).clip(1,60)).clip(1,4) - 2.53
yy_df['premature_quit'] = (yy_df['gameoutcome'] == -1) & (yy_df['my_final_equity'] > -100)
yy_df['drawn_game'] = (yy_df['gameoutcome'] == 0)
yy_df['ended_by_checkmate'] = yy_df['won_by_checkmate'] | yy_df['lost_by_checkmate']
yy_df['noblunders'] = (yy_df['blunderrate'] == 0)
yy_df['final_equity'] = yy_df['my_final_equity'].abs().clip(0,300)
yy_df['early_lead'] = yy_df['early_lead'].clip(0,100)
yy_df['mean_depth_clipped'] = yy_df['mean_depth'].clip(0,25)
yy_df['gamelength_clipped'] = yy_df['gamelength'].clip(20,200)
# prepare opponent_df with selected info about opponent
opponent_columns = ['meanerror', 'blunderrate', 'perfectrate', 'grit', 'meanecho', 'mate_created', 'mate_destroyed', 'q_error_one', 'q_error_two', 'stdeverror', 'elo', 'any_grit', 'noblunders', 'nmerror', 'mean_depths_agreeing_ratio', 'mean_deepest_agree_ratio', 'pct_sanemoves']
if do_elochunk:
opponent_columns.extend(elorange_cols)
opponent_df = yy_df[opponent_columns]
opponent_df = opponent_df.reset_index()
opponent_df['side'] = opponent_df['side'] * -1
opponent_df.set_index(['gamenum', 'side'], inplace=True)
opponent_df.columns = ['opponent_' + x for x in opponent_df.columns]
yy_df = concat([yy_df, opponent_df], axis=1)
# more derived columns that use opponent comparisons
yy_df['elo_advantage'] = (yy_df['elo'] - yy_df['opponent_elo']).clip(-500, 500)
yy_df['max_nmerror'] = yy_df[['nmerror', 'opponent_nmerror']].max(axis=1)
yy_df['min_nmerror'] = yy_df[['nmerror', 'opponent_nmerror']].min(axis=1)
yy_df['max_meanecho'] = yy_df[['meanecho', 'opponent_meanecho']].max(axis=1)
yy_df['elo_avg'] = (yy_df['elo'] + yy_df['opponent_elo'])/2.0
yy_df['elo_advantage'] = (yy_df['elo'] - yy_df['opponent_elo'])
yy_df['winner_elo_advantage'] = yy_df['elo_advantage'] * yy_df['gameoutcome']
msg("Hi! Computing dummy variables")
categorical_features = ['opening_feature']
dummies = get_dummies(yy_df[categorical_features]).astype(np.int8)
yy_df = yy_df.join(dummies)
# fill in missing values
msg("Hi! Filling in missing values")
full_index = pandas.MultiIndex.from_product([list(range(1,NUM_GAMES + 1)), [1,-1]], names=['gamenum', 'side'])
yy_df = yy_df.reindex(full_index)
yy_elo = yy_df['elo'].copy(True)
yy_df.fillna(yy_df.mean(numeric_only=True), inplace=True)
yy_df.fillna(False, inplace=True)
yy_df['elo'] = yy_elo
# stupid patch for some stupid opening feature that got assigned to False by fillna ?!!?!?!?
yy_df.loc[yy_df['opening_feature'] == False,'opening_feature'] = 'rare'
msg("Hi! Writing yy_df to disk")
yy_df.to_pickle(sys.argv[3])
msg("Column counts are:")
counts = yy_df.count(axis=0)
print(counts)
| 35.294355
| 280
| 0.69302
|
from pandas import *
from numpy import *
from djeval import *
import csv, code
import pickle as pickle
from sklearn.externals import joblib
NUM_GAMES=50000
def shell():
vars = globals()
vars.update(locals())
shell = code.InteractiveConsole(vars)
shell.interact()
msg("Hi! Reading eheaders")
eheaders_filename = '/data/eheaders.p'
eheaders_file = open(eheaders_filename, 'r')
eheaders = pickle.load(eheaders_file)
elos = eheaders['elos']
result = eheaders['result']
checkmate = eheaders['checkmate']
openings = eheaders['openings']
ocount = eheaders['opening_count']
msg("Hi! Reading crunched movescores from %s" % sys.argv[1])
crunched_path = sys.argv[1]
crunched_df = read_csv(crunched_path, sep=',', engine='c', index_col=['gamenum', 'side'])
do_gb = False
if do_gb:
msg("Hi! Reading GB scores from %s" % sys.argv[2])
gb_path = sys.argv[2]
gb_df = read_csv(gb_path, sep=',', engine='c', index_col=['gamenum'])
msg("Hi! Reading depthstats")
depthstats_path = '/data/depthstats.csv'
columns = [
'gamenum',
'side',
'mean_depth',
'mean_seldepth',
'mean_depths_agreeing_ratio',
'mean_deepest_agree_ratio',
'pct_sanemoves',
'gamelength',
'mean_num_bestmoves',
'mean_num_bestmove_changes',
'mean_bestmove_depths_agreeing',
'mean_deepest_change',
'mean_deepest_change_ratio',
]
depthstats_df = read_csv(depthstats_path, sep=' ', engine='c', header=None, names=columns, index_col=False)
depthstats_df = depthstats_df.set_index(['gamenum', 'side'])
depthstats_df.drop('gamelength', axis=1, inplace=True)
do_material = True
if do_material:
msg("Hi! Reading material")
material_path = '/data/material.csv'
columns = [
'gamenum',
'material_break_0',
'material_break_1',
'material_break_2',
'material_break_3',
'material_break_4',
'opening_length',
'midgame_length',
'endgame_length',
'mean_acwsa',
'mean_acwsa_0',
'mean_acwsa_1',
'mean_acwsa_2',
'mean_acwsa_3',
'mean_acwsa_4',
'mean_acwsa_5',
'mean_acwsa_6',
'mean_acwsa_7',
'mean_acwsa_8',
'mean_acwsa_9',
]
material_df = read_csv(material_path, sep=' ', engine='c', header=None, names=columns, index_col=False)
material_df = material_df.set_index(['gamenum'])
material_df = material_df.reindex(list(range(1, NUM_GAMES+1)))
material_df = material_df.fillna(material_df.mean())
msg("Reading ELOscored data")
eloscored_cols = [
'gamenum',
'final_elo',
'final_ply',
'final_num_games',
'final_elo_stdev',
'elopath_min',
'elopath_max',
]
eloscored_df = read_csv('/data/data.pgn.eloscored21', sep=',', engine='c', header=None, names=eloscored_cols, index_col=False)
eloscored_df = eloscored_df.set_index(['gamenum'])
msg("Reading ELOscored data 4")
eloscored4_cols = [
'gamenum',
'final_elo',
'final_ply',
'final_num_games',
'final_elo_stdev',
]
eloscored4_cols[1:] = [x + '_elo4' for x in eloscored4_cols[1:]]
eloscored4_df = read_csv('/data/data.pgn.eloscored4', sep=',', engine='c', header=None, names=eloscored4_cols, index_col=False)
eloscored4_df = eloscored4_df.set_index(['gamenum'])
msg("Reading ELOscored data 10")
eloscored10_cols = [
'gamenum',
'final_elo',
'final_ply',
'final_num_games',
'final_elo_stdev',
]
eloscored10_cols[1:] = [x + '_elo10' for x in eloscored10_cols[1:]]
eloscored10_df = read_csv('/data/data.pgn.eloscored10', sep=',', engine='c', header=None, names=eloscored10_cols, index_col=False)
eloscored10_df = eloscored10_df.set_index(['gamenum'])
do_movemodel=True
if do_movemodel:
msg("Hi! Reading moveaggs")
move_aggs = joblib.load('/data/move_aggs.p')
move_aggs.fillna(move_aggs.mean(), inplace=True)
move_aggs = move_aggs[['mean', 'median', '25', '10', 'min', 'max', 'stdev']]
msg("Hi! Reading wmoveaggs")
wmove_aggs = joblib.load('/data/wmove_aggs.p')
wmove_aggs.fillna(wmove_aggs.mean(), inplace=True)
wmove_aggs.rename(columns={'elo_pred': 'moveelo_weighted'}, inplace=True)
wmove_aggs = wmove_aggs['moveelo_weighted']
do_elochunk = False
if do_elochunk:
ch_agg_df = joblib.load('/data/chunk_aggs.p')
ch_agg_df.index = ch_agg_df.index.droplevel('elo')
ch_agg_df.columns = ['elochunk_' + x for x in ch_agg_df.columns]
msg("Hi! Setting up playergame rows")
if do_elochunk:
elorange_cols = list(ch_agg_df.columns.values)
msg("elorange cols are %s" % elorange_cols)
msg('Preparing ELO df')
elo_rows = [[x[0][0], x[0][1], x[1]] for x in list(elos.items())]
elo_df = DataFrame(elo_rows, columns=['gamenum','side','elo'])
elo_df.set_index(['gamenum','side'], inplace=True)
msg('Joining DFs')
supplemental_dfs = [depthstats_df, elo_df, crunched_df]
if do_movemodel:
supplemental_dfs.extend([move_aggs, wmove_aggs])
if do_elochunk:
supplemental_dfs.append(ch_agg_df)
mega_df = concat(supplemental_dfs, axis=1)
if do_material:
mega_df = mega_df.join(material_df, how='outer')
mega_df = mega_df.join(eloscored_df, how='outer')
mega_df = mega_df.join(eloscored4_df, how='outer')
mega_df = mega_df.join(eloscored10_df, how='outer')
if do_gb:
mega_df = mega_df.join(gb_df, how='outer')
yy_df = mega_df
msg("hi, columns are %s" % yy_df.columns)
def opening_feature(opening):
if ocount[opening] < 20:
return 'rare'
if ocount[opening] < 200:
return 'uncommon'
return opening
msg("Hi! Computing additional features")
yy_df['opening_feature'] = [opening_feature(openings[x]) for x in yy_df.index.get_level_values('gamenum')]
yy_df['opening_count'] = [ocount[openings[x]] for x in yy_df.index.get_level_values('gamenum')]
yy_df['any_grit'] = (yy_df['grit'] > 0)
yy_df['major_grit'] = (yy_df['grit'] > 5)
yy_df['nmerror'] = log((-1 * yy_df['meanerror']).clip(1,60)).clip(1,4) - 2.53
yy_df['premature_quit'] = (yy_df['gameoutcome'] == -1) & (yy_df['my_final_equity'] > -100)
yy_df['drawn_game'] = (yy_df['gameoutcome'] == 0)
yy_df['ended_by_checkmate'] = yy_df['won_by_checkmate'] | yy_df['lost_by_checkmate']
yy_df['noblunders'] = (yy_df['blunderrate'] == 0)
yy_df['final_equity'] = yy_df['my_final_equity'].abs().clip(0,300)
yy_df['early_lead'] = yy_df['early_lead'].clip(0,100)
yy_df['mean_depth_clipped'] = yy_df['mean_depth'].clip(0,25)
yy_df['gamelength_clipped'] = yy_df['gamelength'].clip(20,200)
opponent_columns = ['meanerror', 'blunderrate', 'perfectrate', 'grit', 'meanecho', 'mate_created', 'mate_destroyed', 'q_error_one', 'q_error_two', 'stdeverror', 'elo', 'any_grit', 'noblunders', 'nmerror', 'mean_depths_agreeing_ratio', 'mean_deepest_agree_ratio', 'pct_sanemoves']
if do_elochunk:
opponent_columns.extend(elorange_cols)
opponent_df = yy_df[opponent_columns]
opponent_df = opponent_df.reset_index()
opponent_df['side'] = opponent_df['side'] * -1
opponent_df.set_index(['gamenum', 'side'], inplace=True)
opponent_df.columns = ['opponent_' + x for x in opponent_df.columns]
yy_df = concat([yy_df, opponent_df], axis=1)
yy_df['elo_advantage'] = (yy_df['elo'] - yy_df['opponent_elo']).clip(-500, 500)
yy_df['max_nmerror'] = yy_df[['nmerror', 'opponent_nmerror']].max(axis=1)
yy_df['min_nmerror'] = yy_df[['nmerror', 'opponent_nmerror']].min(axis=1)
yy_df['max_meanecho'] = yy_df[['meanecho', 'opponent_meanecho']].max(axis=1)
yy_df['elo_avg'] = (yy_df['elo'] + yy_df['opponent_elo'])/2.0
yy_df['elo_advantage'] = (yy_df['elo'] - yy_df['opponent_elo'])
yy_df['winner_elo_advantage'] = yy_df['elo_advantage'] * yy_df['gameoutcome']
msg("Hi! Computing dummy variables")
categorical_features = ['opening_feature']
dummies = get_dummies(yy_df[categorical_features]).astype(np.int8)
yy_df = yy_df.join(dummies)
msg("Hi! Filling in missing values")
full_index = pandas.MultiIndex.from_product([list(range(1,NUM_GAMES + 1)), [1,-1]], names=['gamenum', 'side'])
yy_df = yy_df.reindex(full_index)
yy_elo = yy_df['elo'].copy(True)
yy_df.fillna(yy_df.mean(numeric_only=True), inplace=True)
yy_df.fillna(False, inplace=True)
yy_df['elo'] = yy_elo
yy_df.loc[yy_df['opening_feature'] == False,'opening_feature'] = 'rare'
msg("Hi! Writing yy_df to disk")
yy_df.to_pickle(sys.argv[3])
msg("Column counts are:")
counts = yy_df.count(axis=0)
print(counts)
| true
| true
|
f71467e65dae3f982a9af5237ac320ca8270123d
| 9,283
|
py
|
Python
|
src/transformers/models/mctct/configuration_mctct.py
|
shangz-ai/transformers
|
75259b44bf2e2b98b5a4d431fb400b7190342a01
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/models/mctct/configuration_mctct.py
|
shangz-ai/transformers
|
75259b44bf2e2b98b5a4d431fb400b7190342a01
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/models/mctct/configuration_mctct.py
|
shangz-ai/transformers
|
75259b44bf2e2b98b5a4d431fb400b7190342a01
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""M-CTC-T model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class MCTCTConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MCTCTModel`]. It is used to instantiate an
M-CTC-T model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the M-CTC-T
[speechbrain/m-ctc-t-large](https://huggingface.co/speechbrain/m-ctc-t-large) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 8065):
Vocabulary size of the M-CTC-T model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MCTCTModel`].
hidden_size (`int`, *optional*, defaults to 1536):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 36):
Number of hidden layers in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 6144):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
attention_head_dim (`int`, *optional*, defaults to 384):
Dimensions of each attention head for each attention layer in the Transformer encoder.
max_position_embeddings (`int`, *optional*, defaults to 920):
The maximum sequence length that this model might ever be used with (after log-mel spectrogram extraction).
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
layerdrop (`float`, *optional*, defaults to 0.3):
The probability of dropping an encoder layer during training. The default 0.3 value is used in the original
implementation.
hidden_act (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
pad_token_id (`int`, *optional*, defaults to 1):
The tokenizer index of the pad token.
bos_token_id (`int`, *optional*, defaults to 0):
The tokenizer index of the bos token.
eos_token_id (`int`, *optional*, defaults to 2):
The tokenizer index of the eos token.
conv_glu_dim (`int`, *optional*, defaults to 1):
The dimension of the output of the `Conv1dSubsampler` layer in which GLU is applied on. Though the original
Flashlight code uses the value of 2, here it's adapted to 1 due to transposition differences.
conv_dropout (`int`, *optional*, defaults to 0.3):
The probability of randomly dropping the `Conv1dSubsampler` layer during training.
num_conv_layers (`int`, *optional*, defaults to 1):
Number of convolution layers before applying transformer encoder layers.
conv_kernel (`List[int]`, *optional*, defaults to `[7]`):
The kernel size of the 1D convolution applied before transformer layers. `len(conv_kernel)` must be equal
to `num_conv_layers`.
conv_stride (`List[int]`, *optional*, defaults to `[3]`):
The stride length of the 1D convolution applied before transformer layers. `len(conv_stride)` must be equal
to `num_conv_layers`.
input_feat_per_channel (`int`, *optional*, defaults to 80):
Feature dimensions of the channels of the input to the Conv1D layer.
input_channels (`int`, *optional*, defaults to 1):
Number of input channels of the input to the Conv1D layer.
conv_channels (`List[int]`, *optional*, defaults to None):
Channel sizes of intermediate Conv1D layers.
ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
instance of [`MCTCTForCTC`].
ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
of [`MCTCTForCTC`].
Example:
```python
>>> from transformers import MCTCTModel, MCTCTConfig
>>> # Initializing a M-CTC-T mctct-large style configuration
>>> configuration = MCTCTConfig()
>>> # Initializing a model from the mctct-large style configuration
>>> model = MCTCTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "mctct"
def __init__(
self,
vocab_size=8065,
hidden_size=1536,
num_hidden_layers=36,
intermediate_size=6144,
num_attention_heads=4,
attention_head_dim=384,
max_position_embeddings=920,
layer_norm_eps=1e-5,
layerdrop=0.3,
hidden_act="relu",
initializer_range=0.02,
hidden_dropout_prob=0.3,
attention_probs_dropout_prob=0.3,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
conv_glu_dim=1,
conv_dropout=0.3,
num_conv_layers=1,
conv_kernel=(7,),
conv_stride=(3,),
input_feat_per_channel=80,
input_channels=1,
conv_channels=None,
ctc_loss_reduction="sum",
ctc_zero_infinity=False,
**kwargs
):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.attention_head_dim = attention_head_dim
self.max_position_embeddings = max_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.layerdrop = layerdrop
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.conv_glu_dim = conv_glu_dim
self.conv_dropout = conv_dropout
self.num_conv_layers = num_conv_layers
self.input_feat_per_channel = input_feat_per_channel
self.input_channels = input_channels
self.conv_channels = conv_channels
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
# prevents config testing fail with exporting to json
self.conv_kernel = list(conv_kernel)
self.conv_stride = list(conv_stride)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
f"but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`."
)
| 49.908602
| 119
| 0.679845
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
}
class MCTCTConfig(PretrainedConfig):
model_type = "mctct"
def __init__(
self,
vocab_size=8065,
hidden_size=1536,
num_hidden_layers=36,
intermediate_size=6144,
num_attention_heads=4,
attention_head_dim=384,
max_position_embeddings=920,
layer_norm_eps=1e-5,
layerdrop=0.3,
hidden_act="relu",
initializer_range=0.02,
hidden_dropout_prob=0.3,
attention_probs_dropout_prob=0.3,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
conv_glu_dim=1,
conv_dropout=0.3,
num_conv_layers=1,
conv_kernel=(7,),
conv_stride=(3,),
input_feat_per_channel=80,
input_channels=1,
conv_channels=None,
ctc_loss_reduction="sum",
ctc_zero_infinity=False,
**kwargs
):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.attention_head_dim = attention_head_dim
self.max_position_embeddings = max_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.layerdrop = layerdrop
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.conv_glu_dim = conv_glu_dim
self.conv_dropout = conv_dropout
self.num_conv_layers = num_conv_layers
self.input_feat_per_channel = input_feat_per_channel
self.input_channels = input_channels
self.conv_channels = conv_channels
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
self.conv_kernel = list(conv_kernel)
self.conv_stride = list(conv_stride)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
f"but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`."
)
| true
| true
|
f71468717fc5e61acbe354ad1694025b5b1bf250
| 1,649
|
py
|
Python
|
.venv/lib/python3.8/site-packages/opencensus/stats/measurement.py
|
MarkusMeyer13/graph-teams-presence
|
c302b79248f31623a1b209e098afc4f85d96228d
|
[
"MIT"
] | null | null | null |
.venv/lib/python3.8/site-packages/opencensus/stats/measurement.py
|
MarkusMeyer13/graph-teams-presence
|
c302b79248f31623a1b209e098afc4f85d96228d
|
[
"MIT"
] | 1
|
2021-07-28T09:45:24.000Z
|
2021-07-28T09:45:24.000Z
|
.venv/lib/python3.8/site-packages/opencensus/stats/measurement.py
|
MarkusMeyer13/graph-teams-presence
|
c302b79248f31623a1b209e098afc4f85d96228d
|
[
"MIT"
] | null | null | null |
# Copyright 2018, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Measurement(object):
""" A measurement is an object with a measure and a value attached to it
:type measure: :class: '~opencensus.stats.measure.Measure'
:param measure: A measure to pass into the measurement
:type value: int or float
:param value: value of the measurement
"""
def __init__(self, measure, value):
self._measure = measure
self._value = value
@property
def value(self):
"""The value of the current measurement"""
return self._value
@property
def measure(self):
"""The measure of the current measurement"""
return self._measure
class MeasurementInt(Measurement):
""" Creates a new Integer Measurement """
def __init__(self, measure, value):
super(MeasurementInt, self).__init__(measure, value)
class MeasurementFloat(Measurement):
""" Creates a new Float Measurement """
def __init__(self, measure, value):
super(MeasurementFloat, self).__init__(measure, value)
| 32.333333
| 77
| 0.681019
|
class Measurement(object):
def __init__(self, measure, value):
self._measure = measure
self._value = value
@property
def value(self):
return self._value
@property
def measure(self):
return self._measure
class MeasurementInt(Measurement):
def __init__(self, measure, value):
super(MeasurementInt, self).__init__(measure, value)
class MeasurementFloat(Measurement):
def __init__(self, measure, value):
super(MeasurementFloat, self).__init__(measure, value)
| true
| true
|
f71468baccb7f26415744498bbf3284f96465119
| 26,158
|
py
|
Python
|
tests/system/test_integration.py
|
jhonnysanchezillisaca/apm-server
|
eeae18ef1551769bd03998e6798aadc94dda0a3d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/system/test_integration.py
|
jhonnysanchezillisaca/apm-server
|
eeae18ef1551769bd03998e6798aadc94dda0a3d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/system/test_integration.py
|
jhonnysanchezillisaca/apm-server
|
eeae18ef1551769bd03998e6798aadc94dda0a3d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import os
import unittest
from apmserver import ElasticTest, ExpvarBaseTest
from apmserver import ClientSideElasticTest, SmapIndexBaseTest, SmapCacheBaseTest
from apmserver import SplitIndicesTest
from beat.beat import INTEGRATION_TESTS
import json
import time
class Test(ElasticTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_onboarding_doc(self):
"""
This test starts the beat and checks that the onboarding doc has been published to ES
"""
self.wait_until(lambda: self.es.indices.exists(self.index_name))
self.es.indices.refresh(index=self.index_name)
self.wait_until(
lambda: (self.es.count(index=self.index_name)['count'] == 1)
)
# Makes sure no error or warnings were logged
self.assert_no_logged_warnings()
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_template(self):
"""
This test starts the beat and checks that the template has been loaded to ES
"""
self.wait_until(lambda: self.es.indices.exists(self.index_name))
self.es.indices.refresh(index=self.index_name)
templates = self.es.indices.get_template(self.index_name)
assert len(templates) == 1
t = templates[self.index_name]
total_fields_limit = t['settings']['index']['mapping']['total_fields']['limit']
assert total_fields_limit == "2000", total_fields_limit
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_load_docs_with_template_and_add_transaction(self):
"""
This test starts the beat with a loaded template and sends transaction data to elasticsearch.
It verifies that all data make it into ES, means data is compatible with the template
and data are in expected format.
"""
self.load_docs_with_template(self.get_transaction_payload_path(),
self.transactions_url, 'transaction', 9)
self.assert_no_logged_warnings()
# compare existing ES documents for transactions with new ones
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "transaction"}}})
assert rs['hits']['total'] == 4, "found {} documents".format(rs['count'])
with open(self._beat_path_join(os.path.dirname(__file__), 'transaction.approved.json')) as f:
approved = json.load(f)
self.check_docs(approved, rs['hits']['hits'], 'transaction')
# compare existing ES documents for spans with new ones
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "span"}}})
assert rs['hits']['total'] == 5, "found {} documents".format(rs['count'])
with open(self._beat_path_join(os.path.dirname(__file__), 'spans.approved.json')) as f:
approved = json.load(f)
self.check_docs(approved, rs['hits']['hits'], 'span')
self.check_backend_transaction_sourcemap(count=5)
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_mark_navigation_timing(self):
self.load_docs_with_template(self.get_transaction_payload_path(), self.transactions_url, 'transaction', 9)
self.assert_no_logged_warnings()
mappings = self.es.indices.get_field_mapping(index=self.index_name, fields="transaction.marks.*")
found_other = False
for name, metric in mappings[self.index_name]["mappings"]["doc"].items():
for mapping in metric["mapping"].values():
mtype = mapping["type"]
if name.startswith("transaction.marks.navigationTiming."):
assert mtype == "scaled_float", name + " mapped as " + mtype + ", not scaled_float"
else:
# only navigation timing marks are scaled floats for now
assert mtype != "scaled_float", name + " mapped as scaled_float"
found_other = True
assert found_other, "no non-scaled_float marks found"
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_load_docs_with_template_and_add_error(self):
"""
This test starts the beat with a loaded template and sends error data to elasticsearch.
It verifies that all data make it into ES means data is compatible with the template.
"""
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url, 'error', 4)
self.assert_no_logged_warnings()
# compare existing ES documents for errors with new ones
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "error"}}})
assert rs['hits']['total'] == 4, "found {} documents".format(rs['count'])
with open(self._beat_path_join(os.path.dirname(__file__), 'error.approved.json')) as f:
approved = json.load(f)
self.check_docs(approved, rs['hits']['hits'], 'error')
self.check_backend_error_sourcemap(count=4)
def check_docs(self, approved, received, doc_type):
for rec_entry in received:
checked = False
rec = rec_entry['_source']
rec_id = rec[doc_type]['id']
for appr_entry in approved:
appr = appr_entry['_source']
if rec_id == appr[doc_type]['id']:
checked = True
self.assert_docs(rec[doc_type], appr[doc_type])
self.assert_docs(rec['context'], appr['context'])
self.assert_docs(rec['@timestamp'], appr['@timestamp'])
self.assert_docs(rec['processor'], appr['processor'])
assert checked == True, "New entry with id {}".format(rec_id)
def assert_docs(self, received, approved):
assert approved == received, "expected:\n{}\nreceived:\n{}".format(self.dump(approved), self.dump(received))
def dump(self, data):
return json.dumps(data, indent=4, separators=(',', ': '))
class RumEnabledIntegrationTest(ClientSideElasticTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_backend_error(self):
self.load_docs_with_template(self.get_error_payload_path(name="payload.json"),
'http://localhost:8200/v1/errors',
'error',
4)
self.check_library_frames({"true": 1, "false": 1, "empty": 2}, "error")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_error(self):
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_library_frames({"true": 5, "false": 1, "empty": 0}, "error")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_backend_transaction(self):
self.load_docs_with_template(self.get_transaction_payload_path(name="payload.json"),
'http://localhost:8200/v1/transactions',
'transaction',
9)
self.check_library_frames({"true": 1, "false": 0, "empty": 1}, "span")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_transaction(self):
self.load_docs_with_template(self.get_transaction_payload_path(),
self.transactions_url,
'transaction',
2)
self.check_library_frames({"true": 1, "false": 1, "empty": 0}, "span")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_enrich_backend_event(self):
self.load_docs_with_template(self.get_transaction_payload_path(name="payload.json"),
'http://localhost:8200/v1/transactions', 'transaction', 9)
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "transaction"}}})
assert "ip" in rs['hits']['hits'][0]["_source"]["context"]["system"], rs['hits']
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_enrich_rum_event(self):
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "error"}}})
hits = rs['hits']['hits']
for hit in hits:
assert "ip" in hit["_source"]["context"]["user"], rs['hits']
assert "user-agent" in hit["_source"]["context"]["user"], rs['hits']
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_grouping_key_for_error(self):
# upload the same error, once via rum, once via backend endpoint
# check they don't have the same grouping key, as the
# `rum.exclude_from_grouping` should only be applied to the rum error.
self.load_docs_with_template(self.get_error_payload_path(),
'http://localhost:8200/v1/errors',
'error',
1)
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
2)
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "error"}}})
docs = rs['hits']['hits']
grouping_key1 = docs[0]["_source"]["error"]["grouping_key"]
grouping_key2 = docs[1]["_source"]["error"]["grouping_key"]
assert grouping_key1 != grouping_key2
def check_library_frames(self, library_frames, event):
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": event}}})
l_frames = {"true": 0, "false": 0, "empty": 0}
for doc in rs['hits']['hits']:
if "error" in doc["_source"]:
err = doc["_source"]["error"]
if "exception" in err:
self.count_library_frames(err["exception"], l_frames)
if "log" in err:
self.count_library_frames(err["log"], l_frames)
elif "span" in doc["_source"]:
span = doc["_source"]["span"]
self.count_library_frames(span, l_frames)
assert l_frames == library_frames, "found {}, expected {}".format(
l_frames, library_frames)
def count_library_frames(self, doc, lf):
if "stacktrace" not in doc:
return
for frame in doc["stacktrace"]:
if frame.has_key("library_frame"):
k = "true" if frame["library_frame"] == True else "false"
lf[k] += 1
else:
lf["empty"] += 1
class SplitIndicesIntegrationTest(SplitIndicesTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_split_docs_into_separate_indices(self):
# load error and transaction document to ES
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
4,
query_index="test-apm*")
self.load_docs_with_template(self.get_transaction_payload_path(),
self.transactions_url,
'transaction',
9,
query_index="test-apm*")
# check that every document is indexed once (incl.1 onboarding doc)
assert 14 == self.es.count(index="test-apm*")['count']
# check that documents are split into separate indices
ct = self.es.count(
index="test-apm-error-12-12-2017",
body={"query": {"term": {"processor.event": "error"}}}
)['count']
assert 4 == ct
ct = self.es.count(
index="test-apm-transaction-12-12-2017",
body={"query": {"term": {"processor.event": "transaction"}}}
)['count']
assert 4 == ct
ct = self.es.count(
index="test-apm-span-12-12-2017",
body={"query": {"term": {"processor.event": "span"}}}
)['count']
assert 5 == ct
class SourcemappingIntegrationTest(ClientSideElasticTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_backend_error(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_error_payload_path(),
'http://localhost:8200/v1/errors',
'error',
1)
self.assert_no_logged_warnings()
self.check_backend_error_sourcemap()
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_duplicated_sourcemap_warning(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
self.upload_sourcemap(file_name='bundle.js.map', bundle_filepath=path)
self.wait_for_sourcemaps()
self.upload_sourcemap(file_name='bundle.js.map', bundle_filepath=path)
self.wait_for_sourcemaps(2)
assert self.log_contains(
"Overriding sourcemap"), "A log should be written when a sourcemap is overwritten"
self.upload_sourcemap(file_name='bundle.js.map', bundle_filepath=path)
self.wait_for_sourcemaps(3)
assert self.log_contains(
"Multiple sourcemaps found"), "the 3rd fetch should query ES and find that there are 2 sourcemaps with the same caching key"
self.assert_no_logged_warnings(
["WARN.*Overriding sourcemap", "WARN.*Multiple sourcemaps"])
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_error(self):
# use an uncleaned path to test that path is cleaned in upload
path = 'http://localhost:8000/test/e2e/../e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
self.check_rum_error_sourcemap(True)
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_backend_transaction(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(file_name='bundle.js.map',
bundle_filepath=path,
service_version='1.0.0')
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_transaction_payload_path(),
'http://localhost:8200/v1/transactions',
'transaction',
2)
self.assert_no_logged_warnings()
self.check_backend_transaction_sourcemap()
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_transaction(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(file_name='bundle.js.map',
bundle_filepath=path,
service_version='1.0.0')
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_transaction_payload_path(),
self.transactions_url,
'transaction',
2)
self.assert_no_logged_warnings()
self.check_rum_transaction_sourcemap(True)
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_no_sourcemap(self):
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_rum_error_sourcemap(
False, expected_err="No Sourcemap available for")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_no_matching_sourcemap(self):
r = self.upload_sourcemap('bundle_no_mapping.js.map')
self.assert_no_logged_warnings()
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.test_no_sourcemap()
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_fetch_latest_of_multiple_sourcemaps(self):
# upload sourcemap file that finds no matchings
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle_no_mapping.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_rum_error_sourcemap(
False, expected_err="No Sourcemap found for")
# remove existing document
self.es.delete_by_query(index=self.index_name,
body={"query": {"term": {"processor.name": 'error'}}})
self.wait_until(
lambda: (self.es.count(index=self.index_name, body={
"query": {"term": {"processor.name": 'error'}}}
)['count'] == 0)
)
# upload second sourcemap file with same key,
# that actually leads to proper matchings
# this also tests that the cache gets invalidated,
# as otherwise the former sourcemap would be taken from the cache.
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps(expected_ct=2)
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_rum_error_sourcemap(True, count=1)
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_sourcemap_mapping_cache_usage(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
# insert document, which also leads to caching the sourcemap
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
# delete sourcemap from ES
# fetching from ES would lead to an error afterwards
self.es.indices.delete(index=self.index_name, ignore=[400, 404])
self.wait_until(lambda: not self.es.indices.exists(self.index_name))
# insert document,
# fetching sourcemap without errors, so it must be fetched from cache
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
self.check_rum_error_sourcemap(True)
class SourcemappingIntegrationChangedConfigTest(SmapIndexBaseTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_error_changed_index(self):
# use an uncleaned path to test that path is cleaned in upload
path = 'http://localhost:8000/test/e2e/../e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
self.check_rum_error_sourcemap(True)
class SourcemappingCacheIntegrationTest(SmapCacheBaseTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_sourcemap_cache_expiration(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
# insert document, which also leads to caching the sourcemap
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
# delete sourcemap from ES
# fetching from ES would lead to an error afterwards
self.es.indices.delete(index=self.index_name, ignore=[400, 404])
self.wait_until(lambda: not self.es.indices.exists(self.index_name))
# after cache expiration no sourcemap should be found any more
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_rum_error_sourcemap(
False, expected_err="No Sourcemap available for")
class ExpvarDisabledIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "false"}
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_expvar_exists(self):
"""expvar disabled, should 404"""
r = self.get_debug_vars()
assert r.status_code == 404, r.status_code
class ExpvarEnabledIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "true"}
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_expvar_exists(self):
"""expvar enabled, should 200"""
r = self.get_debug_vars()
assert r.status_code == 200, r.status_code
class ExpvarCustomUrlIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "true", "expvar_url": "/foo"}
expvar_url = ExpvarBaseTest.expvar_url.replace("/debug/vars", "/foo")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_expvar_exists(self):
"""expvar enabled, should 200"""
r = self.get_debug_vars()
assert r.status_code == 200, r.status_code
class MetricsIntegrationTest(ElasticTest):
def all_metrics_docs(self):
return self.es.search(index=self.index_name,
body={"query": {"term": {"processor.event": "metric"}}})
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_metric_doc(self):
self.load_docs_with_template(self.get_metricset_payload_path(), self.metrics_url, 'metric', 1)
mappings = self.es.indices.get_field_mapping(index=self.index_name, fields="system.process.cpu.total.norm.pct")
expected_type = "scaled_float"
actual_type = mappings[self.index_name]["mappings"]["doc"]["system.process.cpu.total.norm.pct"]["mapping"]["pct"]["type"]
assert expected_type == actual_type, "want: {}, got: {}".format(expected_type, actual_type)
class PipelineRegisterTest(ElasticTest):
config_overrides = {
"register_pipeline_enabled": "true",
"register_pipeline_overwrite": "true"
}
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_default_pipeline_registered(self):
pipeline_id = "apm_user_agent"
default_desc = "Add user agent information for APM events"
loaded_msg = "Pipeline successfully registered"
self.wait_until(lambda: self.log_contains(loaded_msg),
max_timeout=5)
pipeline = self.es.ingest.get_pipeline(id=pipeline_id)
assert pipeline[pipeline_id]['description'] == default_desc
class PipelineDisableOverwriteTest(ElasticTest):
config_overrides = {
"register_pipeline_enabled": "true",
"register_pipeline_overwrite": "false"
}
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_pipeline_not_overwritten(self):
loaded_msg = "Pipeline already registered"
self.wait_until(lambda: self.log_contains(loaded_msg),
max_timeout=5)
class PipelineDisableTest(ElasticTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_pipeline_not_registered(self):
loaded_msg = "No pipeline callback registered"
self.wait_until(lambda: self.log_contains(loaded_msg),
max_timeout=5)
| 45.571429
| 136
| 0.601155
|
import os
import unittest
from apmserver import ElasticTest, ExpvarBaseTest
from apmserver import ClientSideElasticTest, SmapIndexBaseTest, SmapCacheBaseTest
from apmserver import SplitIndicesTest
from beat.beat import INTEGRATION_TESTS
import json
import time
class Test(ElasticTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_onboarding_doc(self):
self.wait_until(lambda: self.es.indices.exists(self.index_name))
self.es.indices.refresh(index=self.index_name)
self.wait_until(
lambda: (self.es.count(index=self.index_name)['count'] == 1)
)
self.assert_no_logged_warnings()
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_template(self):
self.wait_until(lambda: self.es.indices.exists(self.index_name))
self.es.indices.refresh(index=self.index_name)
templates = self.es.indices.get_template(self.index_name)
assert len(templates) == 1
t = templates[self.index_name]
total_fields_limit = t['settings']['index']['mapping']['total_fields']['limit']
assert total_fields_limit == "2000", total_fields_limit
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_load_docs_with_template_and_add_transaction(self):
self.load_docs_with_template(self.get_transaction_payload_path(),
self.transactions_url, 'transaction', 9)
self.assert_no_logged_warnings()
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "transaction"}}})
assert rs['hits']['total'] == 4, "found {} documents".format(rs['count'])
with open(self._beat_path_join(os.path.dirname(__file__), 'transaction.approved.json')) as f:
approved = json.load(f)
self.check_docs(approved, rs['hits']['hits'], 'transaction')
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "span"}}})
assert rs['hits']['total'] == 5, "found {} documents".format(rs['count'])
with open(self._beat_path_join(os.path.dirname(__file__), 'spans.approved.json')) as f:
approved = json.load(f)
self.check_docs(approved, rs['hits']['hits'], 'span')
self.check_backend_transaction_sourcemap(count=5)
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_mark_navigation_timing(self):
self.load_docs_with_template(self.get_transaction_payload_path(), self.transactions_url, 'transaction', 9)
self.assert_no_logged_warnings()
mappings = self.es.indices.get_field_mapping(index=self.index_name, fields="transaction.marks.*")
found_other = False
for name, metric in mappings[self.index_name]["mappings"]["doc"].items():
for mapping in metric["mapping"].values():
mtype = mapping["type"]
if name.startswith("transaction.marks.navigationTiming."):
assert mtype == "scaled_float", name + " mapped as " + mtype + ", not scaled_float"
else:
assert mtype != "scaled_float", name + " mapped as scaled_float"
found_other = True
assert found_other, "no non-scaled_float marks found"
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_load_docs_with_template_and_add_error(self):
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url, 'error', 4)
self.assert_no_logged_warnings()
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "error"}}})
assert rs['hits']['total'] == 4, "found {} documents".format(rs['count'])
with open(self._beat_path_join(os.path.dirname(__file__), 'error.approved.json')) as f:
approved = json.load(f)
self.check_docs(approved, rs['hits']['hits'], 'error')
self.check_backend_error_sourcemap(count=4)
def check_docs(self, approved, received, doc_type):
for rec_entry in received:
checked = False
rec = rec_entry['_source']
rec_id = rec[doc_type]['id']
for appr_entry in approved:
appr = appr_entry['_source']
if rec_id == appr[doc_type]['id']:
checked = True
self.assert_docs(rec[doc_type], appr[doc_type])
self.assert_docs(rec['context'], appr['context'])
self.assert_docs(rec['@timestamp'], appr['@timestamp'])
self.assert_docs(rec['processor'], appr['processor'])
assert checked == True, "New entry with id {}".format(rec_id)
def assert_docs(self, received, approved):
assert approved == received, "expected:\n{}\nreceived:\n{}".format(self.dump(approved), self.dump(received))
def dump(self, data):
return json.dumps(data, indent=4, separators=(',', ': '))
class RumEnabledIntegrationTest(ClientSideElasticTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_backend_error(self):
self.load_docs_with_template(self.get_error_payload_path(name="payload.json"),
'http://localhost:8200/v1/errors',
'error',
4)
self.check_library_frames({"true": 1, "false": 1, "empty": 2}, "error")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_error(self):
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_library_frames({"true": 5, "false": 1, "empty": 0}, "error")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_backend_transaction(self):
self.load_docs_with_template(self.get_transaction_payload_path(name="payload.json"),
'http://localhost:8200/v1/transactions',
'transaction',
9)
self.check_library_frames({"true": 1, "false": 0, "empty": 1}, "span")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_transaction(self):
self.load_docs_with_template(self.get_transaction_payload_path(),
self.transactions_url,
'transaction',
2)
self.check_library_frames({"true": 1, "false": 1, "empty": 0}, "span")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_enrich_backend_event(self):
self.load_docs_with_template(self.get_transaction_payload_path(name="payload.json"),
'http://localhost:8200/v1/transactions', 'transaction', 9)
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "transaction"}}})
assert "ip" in rs['hits']['hits'][0]["_source"]["context"]["system"], rs['hits']
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_enrich_rum_event(self):
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "error"}}})
hits = rs['hits']['hits']
for hit in hits:
assert "ip" in hit["_source"]["context"]["user"], rs['hits']
assert "user-agent" in hit["_source"]["context"]["user"], rs['hits']
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_grouping_key_for_error(self):
# `rum.exclude_from_grouping` should only be applied to the rum error.
self.load_docs_with_template(self.get_error_payload_path(),
'http://localhost:8200/v1/errors',
'error',
1)
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
2)
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "error"}}})
docs = rs['hits']['hits']
grouping_key1 = docs[0]["_source"]["error"]["grouping_key"]
grouping_key2 = docs[1]["_source"]["error"]["grouping_key"]
assert grouping_key1 != grouping_key2
def check_library_frames(self, library_frames, event):
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": event}}})
l_frames = {"true": 0, "false": 0, "empty": 0}
for doc in rs['hits']['hits']:
if "error" in doc["_source"]:
err = doc["_source"]["error"]
if "exception" in err:
self.count_library_frames(err["exception"], l_frames)
if "log" in err:
self.count_library_frames(err["log"], l_frames)
elif "span" in doc["_source"]:
span = doc["_source"]["span"]
self.count_library_frames(span, l_frames)
assert l_frames == library_frames, "found {}, expected {}".format(
l_frames, library_frames)
def count_library_frames(self, doc, lf):
if "stacktrace" not in doc:
return
for frame in doc["stacktrace"]:
if frame.has_key("library_frame"):
k = "true" if frame["library_frame"] == True else "false"
lf[k] += 1
else:
lf["empty"] += 1
class SplitIndicesIntegrationTest(SplitIndicesTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_split_docs_into_separate_indices(self):
# load error and transaction document to ES
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
4,
query_index="test-apm*")
self.load_docs_with_template(self.get_transaction_payload_path(),
self.transactions_url,
'transaction',
9,
query_index="test-apm*")
# check that every document is indexed once (incl.1 onboarding doc)
assert 14 == self.es.count(index="test-apm*")['count']
# check that documents are split into separate indices
ct = self.es.count(
index="test-apm-error-12-12-2017",
body={"query": {"term": {"processor.event": "error"}}}
)['count']
assert 4 == ct
ct = self.es.count(
index="test-apm-transaction-12-12-2017",
body={"query": {"term": {"processor.event": "transaction"}}}
)['count']
assert 4 == ct
ct = self.es.count(
index="test-apm-span-12-12-2017",
body={"query": {"term": {"processor.event": "span"}}}
)['count']
assert 5 == ct
class SourcemappingIntegrationTest(ClientSideElasticTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_backend_error(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_error_payload_path(),
'http://localhost:8200/v1/errors',
'error',
1)
self.assert_no_logged_warnings()
self.check_backend_error_sourcemap()
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_duplicated_sourcemap_warning(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
self.upload_sourcemap(file_name='bundle.js.map', bundle_filepath=path)
self.wait_for_sourcemaps()
self.upload_sourcemap(file_name='bundle.js.map', bundle_filepath=path)
self.wait_for_sourcemaps(2)
assert self.log_contains(
"Overriding sourcemap"), "A log should be written when a sourcemap is overwritten"
self.upload_sourcemap(file_name='bundle.js.map', bundle_filepath=path)
self.wait_for_sourcemaps(3)
assert self.log_contains(
"Multiple sourcemaps found"), "the 3rd fetch should query ES and find that there are 2 sourcemaps with the same caching key"
self.assert_no_logged_warnings(
["WARN.*Overriding sourcemap", "WARN.*Multiple sourcemaps"])
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_error(self):
# use an uncleaned path to test that path is cleaned in upload
path = 'http://localhost:8000/test/e2e/../e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
self.check_rum_error_sourcemap(True)
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_backend_transaction(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(file_name='bundle.js.map',
bundle_filepath=path,
service_version='1.0.0')
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_transaction_payload_path(),
'http://localhost:8200/v1/transactions',
'transaction',
2)
self.assert_no_logged_warnings()
self.check_backend_transaction_sourcemap()
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_transaction(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(file_name='bundle.js.map',
bundle_filepath=path,
service_version='1.0.0')
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_transaction_payload_path(),
self.transactions_url,
'transaction',
2)
self.assert_no_logged_warnings()
self.check_rum_transaction_sourcemap(True)
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_no_sourcemap(self):
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_rum_error_sourcemap(
False, expected_err="No Sourcemap available for")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_no_matching_sourcemap(self):
r = self.upload_sourcemap('bundle_no_mapping.js.map')
self.assert_no_logged_warnings()
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.test_no_sourcemap()
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_fetch_latest_of_multiple_sourcemaps(self):
# upload sourcemap file that finds no matchings
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle_no_mapping.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_rum_error_sourcemap(
False, expected_err="No Sourcemap found for")
# remove existing document
self.es.delete_by_query(index=self.index_name,
body={"query": {"term": {"processor.name": 'error'}}})
self.wait_until(
lambda: (self.es.count(index=self.index_name, body={
"query": {"term": {"processor.name": 'error'}}}
)['count'] == 0)
)
# upload second sourcemap file with same key,
# that actually leads to proper matchings
# this also tests that the cache gets invalidated,
# as otherwise the former sourcemap would be taken from the cache.
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps(expected_ct=2)
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_rum_error_sourcemap(True, count=1)
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_sourcemap_mapping_cache_usage(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
# insert document, which also leads to caching the sourcemap
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
# delete sourcemap from ES
# fetching from ES would lead to an error afterwards
self.es.indices.delete(index=self.index_name, ignore=[400, 404])
self.wait_until(lambda: not self.es.indices.exists(self.index_name))
# insert document,
# fetching sourcemap without errors, so it must be fetched from cache
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
self.check_rum_error_sourcemap(True)
class SourcemappingIntegrationChangedConfigTest(SmapIndexBaseTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_error_changed_index(self):
# use an uncleaned path to test that path is cleaned in upload
path = 'http://localhost:8000/test/e2e/../e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
self.check_rum_error_sourcemap(True)
class SourcemappingCacheIntegrationTest(SmapCacheBaseTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_sourcemap_cache_expiration(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
# insert document, which also leads to caching the sourcemap
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
# delete sourcemap from ES
# fetching from ES would lead to an error afterwards
self.es.indices.delete(index=self.index_name, ignore=[400, 404])
self.wait_until(lambda: not self.es.indices.exists(self.index_name))
# after cache expiration no sourcemap should be found any more
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_rum_error_sourcemap(
False, expected_err="No Sourcemap available for")
class ExpvarDisabledIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "false"}
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_expvar_exists(self):
r = self.get_debug_vars()
assert r.status_code == 404, r.status_code
class ExpvarEnabledIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "true"}
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_expvar_exists(self):
r = self.get_debug_vars()
assert r.status_code == 200, r.status_code
class ExpvarCustomUrlIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "true", "expvar_url": "/foo"}
expvar_url = ExpvarBaseTest.expvar_url.replace("/debug/vars", "/foo")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_expvar_exists(self):
r = self.get_debug_vars()
assert r.status_code == 200, r.status_code
class MetricsIntegrationTest(ElasticTest):
def all_metrics_docs(self):
return self.es.search(index=self.index_name,
body={"query": {"term": {"processor.event": "metric"}}})
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_metric_doc(self):
self.load_docs_with_template(self.get_metricset_payload_path(), self.metrics_url, 'metric', 1)
mappings = self.es.indices.get_field_mapping(index=self.index_name, fields="system.process.cpu.total.norm.pct")
expected_type = "scaled_float"
actual_type = mappings[self.index_name]["mappings"]["doc"]["system.process.cpu.total.norm.pct"]["mapping"]["pct"]["type"]
assert expected_type == actual_type, "want: {}, got: {}".format(expected_type, actual_type)
class PipelineRegisterTest(ElasticTest):
config_overrides = {
"register_pipeline_enabled": "true",
"register_pipeline_overwrite": "true"
}
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_default_pipeline_registered(self):
pipeline_id = "apm_user_agent"
default_desc = "Add user agent information for APM events"
loaded_msg = "Pipeline successfully registered"
self.wait_until(lambda: self.log_contains(loaded_msg),
max_timeout=5)
pipeline = self.es.ingest.get_pipeline(id=pipeline_id)
assert pipeline[pipeline_id]['description'] == default_desc
class PipelineDisableOverwriteTest(ElasticTest):
config_overrides = {
"register_pipeline_enabled": "true",
"register_pipeline_overwrite": "false"
}
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_pipeline_not_overwritten(self):
loaded_msg = "Pipeline already registered"
self.wait_until(lambda: self.log_contains(loaded_msg),
max_timeout=5)
class PipelineDisableTest(ElasticTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_pipeline_not_registered(self):
loaded_msg = "No pipeline callback registered"
self.wait_until(lambda: self.log_contains(loaded_msg),
max_timeout=5)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.