hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73b38066a1eb96becaeb2e3f13ef9ed2f171e6d | 4,711 | py | Python | pandapipes/test/io/test_file_io.py | dcronbach/pandapipes | 312fef81ddd0fb3eb23ec1c5bbc2848d568faa52 | [
"BSD-3-Clause"
] | null | null | null | pandapipes/test/io/test_file_io.py | dcronbach/pandapipes | 312fef81ddd0fb3eb23ec1c5bbc2848d568faa52 | [
"BSD-3-Clause"
] | null | null | null | pandapipes/test/io/test_file_io.py | dcronbach/pandapipes | 312fef81ddd0fb3eb23ec1c5bbc2848d568faa52 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2020 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
import os
import pandapipes
import pytest
from pandapower.test.toolbox import tempdir
from pandas.testing import assert_frame_equal
# @pytest.fixture()
def load_net():
# create test network
net = pandapipes.create_empty_network("test_net", fluid="lgas")
j1 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15,
name="Connection to External Grid", geodata=(0, 0))
j2 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 2",
geodata=(2, 0))
j3 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 3",
geodata=(7, 4))
j4 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 4",
geodata=(7, -4))
j5 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 5",
geodata=(5, 3))
j6 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 6",
geodata=(5, -3))
pandapipes.create_ext_grid(net, junction=j1, p_bar=1.1, t_k=293.15, name="Grid Connection")
pandapipes.create_pipe_from_parameters(net, from_junction=j1, to_junction=j2, length_km=10,
diameter_m=0.05, name="Pipe 1", geodata=[(0, 0), (2, 0)])
pandapipes.create_pipe_from_parameters(net, from_junction=j2, to_junction=j3, length_km=2,
diameter_m=0.05, name="Pipe 2",
geodata=[(2, 0), (2, 4), (7, 4)])
pandapipes.create_pipe_from_parameters(net, from_junction=j2, to_junction=j4, length_km=2.5,
diameter_m=0.05, name="Pipe 3",
geodata=[(2, 0), (2, -4), (7, -4)])
pandapipes.create_pipe_from_parameters(net, from_junction=j3, to_junction=j5, length_km=1,
diameter_m=0.05, name="Pipe 4",
geodata=[(7, 4), (7, 3), (5, 3)])
pandapipes.create_pipe_from_parameters(net, from_junction=j4, to_junction=j6, length_km=1,
diameter_m=0.05, name="Pipe 5",
geodata=[(7, -4), (7, -3), (5, -3)])
pandapipes.create_valve(net, from_junction=j5, to_junction=j6, diameter_m=0.05,
opened=True)
pandapipes.create_sink(net, junction=j4, mdot_kg_per_s=5.45e-5, name="Sink 1")
pandapipes.create_source(net, junction=j3, mdot_kg_per_s=3.45e-5)
return net
def test_pickle(tempdir):
"""
Checks if a network saved and reloaded as a pickle file is identical.
:return:
:rtype:
"""
net = load_net()
filename = os.path.join(tempdir, "test_net_1.p")
# save test network
pandapipes.to_pickle(net, filename)
# load test network
net2 = pandapipes.from_pickle(filename)
# check if saved and loaded versions are identical
assert pandapipes.nets_equal(net, net2), "Error in comparison after saving to Pickle."
def test_json(tempdir):
"""
Checks if a network saved and reloaded as a json file is identical.
:return:
:rtype:
"""
net = load_net()
filename = os.path.join(tempdir, "test_net_1.json")
# save test network
pandapipes.to_json(net, filename)
# load test network
net2 = pandapipes.from_json(filename)
# check if saved and loaded versions are identical
assert_frame_equal(net.pipe_geodata, net2.pipe_geodata)
del net.pipe_geodata
del net2.pipe_geodata
assert pandapipes.nets_equal(net, net2), "Error in comparison after saving to JSON."
def test_json_string():
"""
Checks if a network saved and reloaded as a json file is identical.
:return:
:rtype:
"""
net = load_net()
# save test network
json_string = pandapipes.to_json(net)
# load test network
net2 = pandapipes.from_json_string(json_string)
# check if saved and loaded versions are identical
assert_frame_equal(net.pipe_geodata, net2.pipe_geodata)
del net.pipe_geodata
del net2.pipe_geodata
assert pandapipes.nets_equal(net, net2),\
"Error in comparison after saving to JSON string."
if __name__ == '__main__':
pytest.main(["test_file_io.py"])
| 37.388889 | 100 | 0.610486 |
import os
import pandapipes
import pytest
from pandapower.test.toolbox import tempdir
from pandas.testing import assert_frame_equal
def load_net():
net = pandapipes.create_empty_network("test_net", fluid="lgas")
j1 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15,
name="Connection to External Grid", geodata=(0, 0))
j2 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 2",
geodata=(2, 0))
j3 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 3",
geodata=(7, 4))
j4 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 4",
geodata=(7, -4))
j5 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 5",
geodata=(5, 3))
j6 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 6",
geodata=(5, -3))
pandapipes.create_ext_grid(net, junction=j1, p_bar=1.1, t_k=293.15, name="Grid Connection")
pandapipes.create_pipe_from_parameters(net, from_junction=j1, to_junction=j2, length_km=10,
diameter_m=0.05, name="Pipe 1", geodata=[(0, 0), (2, 0)])
pandapipes.create_pipe_from_parameters(net, from_junction=j2, to_junction=j3, length_km=2,
diameter_m=0.05, name="Pipe 2",
geodata=[(2, 0), (2, 4), (7, 4)])
pandapipes.create_pipe_from_parameters(net, from_junction=j2, to_junction=j4, length_km=2.5,
diameter_m=0.05, name="Pipe 3",
geodata=[(2, 0), (2, -4), (7, -4)])
pandapipes.create_pipe_from_parameters(net, from_junction=j3, to_junction=j5, length_km=1,
diameter_m=0.05, name="Pipe 4",
geodata=[(7, 4), (7, 3), (5, 3)])
pandapipes.create_pipe_from_parameters(net, from_junction=j4, to_junction=j6, length_km=1,
diameter_m=0.05, name="Pipe 5",
geodata=[(7, -4), (7, -3), (5, -3)])
pandapipes.create_valve(net, from_junction=j5, to_junction=j6, diameter_m=0.05,
opened=True)
pandapipes.create_sink(net, junction=j4, mdot_kg_per_s=5.45e-5, name="Sink 1")
pandapipes.create_source(net, junction=j3, mdot_kg_per_s=3.45e-5)
return net
def test_pickle(tempdir):
net = load_net()
filename = os.path.join(tempdir, "test_net_1.p")
pandapipes.to_pickle(net, filename)
net2 = pandapipes.from_pickle(filename)
assert pandapipes.nets_equal(net, net2), "Error in comparison after saving to Pickle."
def test_json(tempdir):
net = load_net()
filename = os.path.join(tempdir, "test_net_1.json")
pandapipes.to_json(net, filename)
net2 = pandapipes.from_json(filename)
assert_frame_equal(net.pipe_geodata, net2.pipe_geodata)
del net.pipe_geodata
del net2.pipe_geodata
assert pandapipes.nets_equal(net, net2), "Error in comparison after saving to JSON."
def test_json_string():
net = load_net()
json_string = pandapipes.to_json(net)
net2 = pandapipes.from_json_string(json_string)
assert_frame_equal(net.pipe_geodata, net2.pipe_geodata)
del net.pipe_geodata
del net2.pipe_geodata
assert pandapipes.nets_equal(net, net2),\
"Error in comparison after saving to JSON string."
if __name__ == '__main__':
pytest.main(["test_file_io.py"])
| true | true |
f73b38a2ff5478dc0036b0e5ba52e688a0952586 | 115 | py | Python | setup.py | chappers/formdown | 7481bd69923f5907fe36d9620b4395ddb804fc4a | [
"MIT"
] | 1 | 2017-08-20T22:00:22.000Z | 2017-08-20T22:00:22.000Z | setup.py | chappers/formdown | 7481bd69923f5907fe36d9620b4395ddb804fc4a | [
"MIT"
] | null | null | null | setup.py | chappers/formdown | 7481bd69923f5907fe36d9620b4395ddb804fc4a | [
"MIT"
] | null | null | null | from distutils.core import setup
setup(name='formdown',
version='1.0',
py_modules=['formdown'],
) | 23 | 32 | 0.626087 | from distutils.core import setup
setup(name='formdown',
version='1.0',
py_modules=['formdown'],
) | true | true |
f73b38b52110264b786ff98662d19e9c25afa09e | 429 | py | Python | API_testes/scripts/putting_data.py | tavares-douglas/S206_testes | 49bce4e12d58296250a5af68efe87d9a25e86bad | [
"MIT"
] | null | null | null | API_testes/scripts/putting_data.py | tavares-douglas/S206_testes | 49bce4e12d58296250a5af68efe87d9a25e86bad | [
"MIT"
] | null | null | null | API_testes/scripts/putting_data.py | tavares-douglas/S206_testes | 49bce4e12d58296250a5af68efe87d9a25e86bad | [
"MIT"
] | null | null | null | import requests
def put_data():
headers = {
'Accept': '*/*',
'User-Agent': 'request',
}
data = {
"id":1,
"title":"A test title",
"body":"A test description",
"userId":1,
}
post_id = 1
url = "https://jsonplaceholder.typicode.com/posts/"
response = requests.put(url + str(post_id), data=data, headers=headers)
return response, response.json(), data | 22.578947 | 75 | 0.545455 | import requests
def put_data():
headers = {
'Accept': '*/*',
'User-Agent': 'request',
}
data = {
"id":1,
"title":"A test title",
"body":"A test description",
"userId":1,
}
post_id = 1
url = "https://jsonplaceholder.typicode.com/posts/"
response = requests.put(url + str(post_id), data=data, headers=headers)
return response, response.json(), data | true | true |
f73b39d8786487f9bb8a01448117efe761f6f2a9 | 17,027 | py | Python | ciftify/utils.py | helloTC/ciftify | ca6b83c8d40cd384de54269d7c62281552b91e21 | [
"MIT"
] | 84 | 2016-09-19T16:34:37.000Z | 2022-03-31T05:47:05.000Z | ciftify/utils.py | helloTC/ciftify | ca6b83c8d40cd384de54269d7c62281552b91e21 | [
"MIT"
] | 108 | 2016-09-11T15:18:31.000Z | 2022-03-06T07:03:12.000Z | ciftify/utils.py | helloTC/ciftify | ca6b83c8d40cd384de54269d7c62281552b91e21 | [
"MIT"
] | 296 | 2016-09-15T17:18:26.000Z | 2022-01-17T18:16:11.000Z | #!/usr/bin/env python3
"""
A collection of utilities for the epitome pipeline. Mostly for getting
subject numbers/names, checking paths, gathering information, etc.
"""
import os
import sys
import copy
import datetime
import subprocess
import tempfile
import shutil
import logging
import math
import yaml
import ciftify
logger = logging.getLogger(__name__)
def get_subj(path, user_filter=None):
"""
Gets all folder names (i.e., subjects) in a directory (of subjects).
Removes hidden folders.
user_filter option can be used to return only the subjects that contain
the given string.
Warning: Returns a list in python2 and a generator in python3 so always
wrap the returned value in list() if you require a list.
"""
subjects = []
if not os.path.exists(path):
# return empty list if given bad path
return subjects
for subj in next(os.walk(path))[1]:
subjects.append(subj)
subjects.sort()
subjects = filter(lambda x: x.startswith('.') == False, subjects)
if user_filter:
subjects = filter(lambda x: user_filter in x, subjects)
return subjects
def FWHM2Sigma(FWHM):
''' convert the FWHM to a Sigma value '''
if float(FWHM) == 0:
sigma = 0
else:
sigma = float(FWHM) / (2 * math.sqrt(2*math.log(2)))
return(sigma)
def make_dir(dir_name, dry_run=False, suppress_exists_error = False):
# Wait till logging is needed to get logger, so logging configuration
# set in main module is respected
logger = logging.getLogger(__name__)
if dry_run:
logger.debug("Dry-run, skipping creation of directory "\
"{}".format(dir_name))
return
try:
os.makedirs(dir_name)
except PermissionError:
logger.error("You do not have permission to write to {}".format(dir_name))
except FileExistsError:
if not suppress_exists_error:
logger.warning("{} already exists".format(dir_name))
except OSError:
logger.error('Could not create directory {}'.format(dir_name))
def check_output_writable(output_file, exit_on_error = True):
''' will test if the directory for an output_file exists and can be written too '''
logger = logging.getLogger(__name__)
dirname = os.path.dirname(output_file)
dirname = '.' if dirname == '' else dirname
result = os.access(dirname, os.W_OK)
if result == False:
if exit_on_error:
logger.error('Directory for output {} does not exist, '
'or you do not have permission to write there'.format(output_file))
sys.exit(1)
return(result)
def check_input_readable(path, exit_on_error = True):
'''check that path exists and is readable, exits upon failure by default'''
logger = logging.getLogger(__name__)
if not os.access(path, os.R_OK):
logger.error('Input {}, does not exist, or you do not have permission to read it.'
''.format(path))
if exit_on_error:
sys.exit(1)
return(path)
def log_arguments(arguments):
'''send a formatted version of the arguments to the logger'''
logger = logging.getLogger(__name__)
input_args = yaml.dump(arguments, default_flow_style=False)
sep = '{} '.format(os.linesep)
input_args2 = input_args.replace(os.linesep,sep)
input_args3 = input_args2.replace('!!python/object/new:docopt.Dict\ndictitems:','')
logger.info('Arguments:{}{}'.format(sep, input_args3))
def section_header(title):
'''returns a outlined bit to stick in a log file as a section header'''
header = '''
-------------------------------------------------------------
{} : {}
-------------------------------------------------------------
'''.format(datetime.datetime.now(),title)
return(header)
def ciftify_logo():
''' this logo is ascii art with fender font'''
logo = r'''
.|'; || .|';
'' || || '' ||
.|'', || '||' ''||'' || '||' '|| ||`
|| || || || || || `|..||
`|..' .||. .||. `|..' .||. .||. ||
, |'
'' '''
return(logo)
def pint_logo():
''' logo from ascii text with font fender'''
logo = r"""
'||'''|, |''||''| '||\ ||` |''||''|
|| || || ||\\ || ||
||...|' || || \\ || ||
|| || || \\|| ||
.|| |..||..| .|| \||. .||.
"""
return(logo)
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass. - Taken from six
to ensure python 2 and 3 class compatibility"""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
class TempDir:
def __init__(self):
self.path = None
return
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
if self.path is not None:
shutil.rmtree(self.path)
class TempSceneDir:
"""
A context manager for the temporary scene dir.
A temp dir in the same directory as the hcp data is used for the scene
file due to the fact that scene files contain a large number of relative
paths and the images will come out broken if it is put anywhere else.
"""
def __init__(self, hcp_dir):
self.base = os.path.join(hcp_dir, 'scene')
def __enter__(self):
self.dir = tempfile.mkdtemp(prefix=self.base)
return self.dir
def __exit__(self, type, value, traceback):
shutil.rmtree(self.dir)
class WorkDirSettings:
def __init__(self, arguments):
logger = logging.getLogger(__name__)
try:
temp_dir = arguments['--ciftify-work-dir']
except KeyError:
temp_dir = None
if not temp_dir:
try:
temp_dir = arguments['--hcp-data-dir']
if temp_dir:
logger.warning("Argument --hcp-data-dir has been deprecated. "
"Please instead use --ciftify-work-dir in the future.")
except KeyError:
temp_dir = None
try:
temp_subject = arguments['<subject>']
except KeyError:
temp_subject = None
self.work_dir = self.__set_work_dir(temp_dir, temp_subject)
def __set_work_dir(self, user_dir, subject):
# Wait till logging is needed to get logger, so logging configuration
# set in main module is respected
logger = logging.getLogger(__name__)
if user_dir:
return os.path.realpath(user_dir)
if subject == 'HCP_S1200_GroupAvg':
return None
found_dir = ciftify.config.find_work_dir()
if found_dir is None:
logger.error("Cannot find working directory, exiting.")
sys.exit(1)
return os.path.realpath(found_dir)
def get_registration_mode(arguments):
"""
Insures that the --surf-reg argument is either FS or MSMSulc
"""
if arguments['--surf-reg'] == "MSMSulc":
return 'MSMSulc'
if arguments['--surf-reg'] == "FS":
return 'FS'
else:
logger.error('--surf-reg must be either "MSMSulc" or "FS"')
sys.exit(1)
class WorkFlowSettings(WorkDirSettings):
'''
A convenience class for parsing settings that are shared
by ciftify_recon_all and ciftify_subject_fmri
'''
def __init__(self, arguments):
WorkDirSettings.__init__(self, arguments)
self.FSL_dir = self.__set_FSL_dir()
# Read settings from yaml
self.__config = self.__read_settings(arguments['--ciftify-conf'])
self.high_res = self.get_config_entry('high_res')
self.low_res = self.get_config_entry('low_res')
self.grayord_res = self.get_config_entry('grayord_res')
self.n_cpus = get_number_cpus(arguments['--n_cpus'])
def __set_FSL_dir(self):
fsl_dir = ciftify.config.find_fsl()
if fsl_dir is None:
logger.error("Cannot find FSL dir, exiting.")
sys.exit(1)
fsl_data = os.path.normpath(os.path.join(fsl_dir, 'data'))
if not os.path.exists(fsl_data):
logger.warn("Found {} for FSL path but {} does not exist. May "
"prevent registration files from being found.".format(
fsl_dir, fsl_data))
return fsl_dir
def __read_settings(self, yaml_file):
if yaml_file is None:
yaml_file = os.path.join(ciftify.config.find_ciftify_global(),
'ciftify_workflow_settings.yaml')
if not os.path.exists(yaml_file):
logger.critical("Settings yaml file {} does not exist"
"".format(yaml_file))
sys.exit(1)
try:
with open(yaml_file) as yaml_stream:
config = yaml.load(yaml_stream, Loader=yaml.SafeLoader)
except:
logger.critical("Cannot read yaml config file {}, check formatting."
"".format(yaml_file))
sys.exit(1)
return config
def get_config_entry(self, key):
try:
config_entry = self.__config[key]
except KeyError:
logger.critical("{} not defined in cifti recon settings".format(key))
sys.exit(1)
return config_entry
def get_resolution_config(self, method, standard_res):
"""
Reads the method and resolution settings.
"""
method_config = self.get_config_entry(method)
try:
resolution_config = method_config[standard_res]
except KeyError:
logger.error("Registration resolution {} not defined for method "
"{}".format(standard_res, method))
sys.exit(1)
for key in resolution_config.keys():
## The base dir (FSL_dir currently) may need to change when new
## resolutions/methods are added
reg_item = os.path.join(self.FSL_dir, resolution_config[key])
if not os.path.exists(reg_item):
logger.error("Item required for registration does not exist: "
"{}".format(reg_item))
sys.exit(1)
resolution_config[key] = reg_item
return resolution_config
def get_number_cpus(user_n_cpus = None):
''' reads the number of CPUS available for multithreaded processes
either from a user argument, or from the enviroment'''
if user_n_cpus:
try:
n_cpus = int(user_n_cpus)
except:
logger.critical('Could note read --n_cpus entry {} as integer'.format(user_n_cpus))
sys.exit(1)
else:
n_cpus = os.getenv('OMP_NUM_THREADS')
# if all else fails..set n_cpus to 1
if not n_cpus:
n_cpus = 1
return n_cpus
class VisSettings(WorkDirSettings):
"""
A convenience class. Provides a work_dir and qc_dir attribute and a
function to set each based on the user's input and the environment.
This is intended to be inherited from in each script, so that user
settings can be passed together and easily kept track of.
Arguments: A docopt parsed dictionary of the user's input arguments.
qc_mode: The qc_mode to operate in and the string to include
in the qc output folder name.
Will raise SystemExit if the user hasn't set the ciftify-work-dir/hcp-data-dir
and the environment variable isn't set.
"""
def __init__(self, arguments, qc_mode):
WorkDirSettings.__init__(self, arguments)
try:
temp_qc = arguments['--qcdir']
except KeyError:
temp_qc = None
try:
self.debug_mode = arguments['--debug']
except KeyError:
self.debug_mode = False
self.qc_mode = qc_mode
self.qc_dir = self.__set_qc_dir(temp_qc)
def __set_qc_dir(self, user_qc_dir):
if user_qc_dir:
return user_qc_dir
qc_dir = os.path.join(self.work_dir, 'qc_{}'.format(self.qc_mode))
return qc_dir
def run(cmd, dryrun=False,
suppress_stdout=False,
suppress_echo = False,
suppress_stderr = False,
env = None):
"""
Runs command in default shell, returning the return code and logging the
output. It can take a cmd argument as a string or a list.
If a list is given, it is joined into a string. There are some arguments
for changing the way the cmd is run:
dryrun: Do not actually run the command (for testing) (default:
False)
suppress_echo: echo's command to debug steam (default is info)
suppress_stdout: Any standard output from the function is printed to
the log at "debug" level but not "info"
suppress_stderr: Send error message to stdout...for situations when
program logs info to stderr stream..urg
env: a dict of environment variables to add to the subshell
(this can be a useful may to restrict CPU usage of the subprocess)
"""
# Wait till logging is needed to get logger, so logging configuration
# set in main module is respected
logger = logging.getLogger(__name__)
if type(cmd) is list:
cmd = ' '.join(cmd)
if suppress_echo:
logger.debug("Running: {}".format(cmd))
else:
logger.info("Running: {}".format(cmd))
if dryrun:
logger.info('Doing a dryrun')
return 0
merged_env = os.environ
if env:
merged_env.update(env)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=merged_env)
out, err = p.communicate()
# py3 compability :(
out = out.decode('utf-8')
err = err.decode('utf-8')
if p.returncode:
logger.error('cmd: {} \n Failed with returncode {}'.format(cmd,
p.returncode))
if len(out) > 0:
if suppress_stdout:
logger.debug(out)
else:
logger.info(out)
if len(err) > 0:
if suppress_stderr:
logger.info(err)
else:
logger.warning(err)
return p.returncode
class cd:
"""
A context manager for changing directory. Since best practices dictate
returning to the original directory, saves the original directory and
returns to it after the block has exited.
May raise OSError if the given path doesn't exist (or the current directory
is deleted before switching back)
"""
def __init__(self, path):
user_path = os.path.expanduser(path)
self.new_path = os.path.expandvars(user_path)
def __enter__(self):
self.old_path = os.getcwd()
os.chdir(self.new_path)
def __exit__(self, e, value, traceback):
os.chdir(self.old_path)
def get_stdout(cmd_list, echo=True):
''' run the command given from the cmd list and report the stdout result
Input: A command list'''
logger = logging.getLogger(__name__)
if echo: logger.info('Evaluating: {}'.format(' '.join(cmd_list)))
stdout = subprocess.check_output(cmd_list)
return stdout.decode('utf-8')
def check_output(command, stderr=None, shell = True):
""" Ensures python 3 compatibility by always decoding the return value of
subprocess.check_output
Input: A command string"""
output = subprocess.check_output(command, shell=shell, stderr=stderr)
return output.decode('utf-8')
def ciftify_log_endswith_done(ciftify_log):
'''return true with the ciftify log file exists and ends with the word Done'''
if not os.path.isfile(ciftify_log):
return False
with open(ciftify_log) as f:
lines = f.read().splitlines()
last_line = lines[-3]
is_done = True if 'Done' in last_line else False
return is_done
def has_ciftify_recon_all_run(ciftify_work_dir, subject):
'''determine if ciftify_recon_all has already completed'''
ciftify_log = os.path.join(ciftify_work_dir,
subject,
'cifti_recon_all.log')
return ciftify_log_endswith_done(ciftify_log)
def has_ciftify_fmri_run(subject, fmriname, ciftify_work_dir):
'''determine if ciftify_recon_all has already completed'''
ciftify_log = os.path.join(ciftify_work_dir,
subject,
'MNINonLinear', 'Results', fmriname,
'ciftify_subject_fmri.log')
# print('ciftify_subject_fmri done {}'.format(ciftify_log_endswith_done(ciftify_log)))
return ciftify_log_endswith_done(ciftify_log)
| 34.891393 | 95 | 0.611499 |
import os
import sys
import copy
import datetime
import subprocess
import tempfile
import shutil
import logging
import math
import yaml
import ciftify
logger = logging.getLogger(__name__)
def get_subj(path, user_filter=None):
subjects = []
if not os.path.exists(path):
return subjects
for subj in next(os.walk(path))[1]:
subjects.append(subj)
subjects.sort()
subjects = filter(lambda x: x.startswith('.') == False, subjects)
if user_filter:
subjects = filter(lambda x: user_filter in x, subjects)
return subjects
def FWHM2Sigma(FWHM):
if float(FWHM) == 0:
sigma = 0
else:
sigma = float(FWHM) / (2 * math.sqrt(2*math.log(2)))
return(sigma)
def make_dir(dir_name, dry_run=False, suppress_exists_error = False):
logger = logging.getLogger(__name__)
if dry_run:
logger.debug("Dry-run, skipping creation of directory "\
"{}".format(dir_name))
return
try:
os.makedirs(dir_name)
except PermissionError:
logger.error("You do not have permission to write to {}".format(dir_name))
except FileExistsError:
if not suppress_exists_error:
logger.warning("{} already exists".format(dir_name))
except OSError:
logger.error('Could not create directory {}'.format(dir_name))
def check_output_writable(output_file, exit_on_error = True):
logger = logging.getLogger(__name__)
dirname = os.path.dirname(output_file)
dirname = '.' if dirname == '' else dirname
result = os.access(dirname, os.W_OK)
if result == False:
if exit_on_error:
logger.error('Directory for output {} does not exist, '
'or you do not have permission to write there'.format(output_file))
sys.exit(1)
return(result)
def check_input_readable(path, exit_on_error = True):
logger = logging.getLogger(__name__)
if not os.access(path, os.R_OK):
logger.error('Input {}, does not exist, or you do not have permission to read it.'
''.format(path))
if exit_on_error:
sys.exit(1)
return(path)
def log_arguments(arguments):
logger = logging.getLogger(__name__)
input_args = yaml.dump(arguments, default_flow_style=False)
sep = '{} '.format(os.linesep)
input_args2 = input_args.replace(os.linesep,sep)
input_args3 = input_args2.replace('!!python/object/new:docopt.Dict\ndictitems:','')
logger.info('Arguments:{}{}'.format(sep, input_args3))
def section_header(title):
header = '''
-------------------------------------------------------------
{} : {}
-------------------------------------------------------------
'''.format(datetime.datetime.now(),title)
return(header)
def ciftify_logo():
logo = r'''
.|'; || .|';
'' || || '' ||
.|'', || '||' ''||'' || '||' '|| ||`
|| || || || || || `|..||
`|..' .||. .||. `|..' .||. .||. ||
, |'
'' '''
return(logo)
def pint_logo():
logo = r"""
'||'''|, |''||''| '||\ ||` |''||''|
|| || || ||\\ || ||
||...|' || || \\ || ||
|| || || \\|| ||
.|| |..||..| .|| \||. .||.
"""
return(logo)
def add_metaclass(metaclass):
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
class TempDir:
def __init__(self):
self.path = None
return
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, type, value, traceback):
if self.path is not None:
shutil.rmtree(self.path)
class TempSceneDir:
def __init__(self, hcp_dir):
self.base = os.path.join(hcp_dir, 'scene')
def __enter__(self):
self.dir = tempfile.mkdtemp(prefix=self.base)
return self.dir
def __exit__(self, type, value, traceback):
shutil.rmtree(self.dir)
class WorkDirSettings:
def __init__(self, arguments):
logger = logging.getLogger(__name__)
try:
temp_dir = arguments['--ciftify-work-dir']
except KeyError:
temp_dir = None
if not temp_dir:
try:
temp_dir = arguments['--hcp-data-dir']
if temp_dir:
logger.warning("Argument --hcp-data-dir has been deprecated. "
"Please instead use --ciftify-work-dir in the future.")
except KeyError:
temp_dir = None
try:
temp_subject = arguments['<subject>']
except KeyError:
temp_subject = None
self.work_dir = self.__set_work_dir(temp_dir, temp_subject)
def __set_work_dir(self, user_dir, subject):
# Wait till logging is needed to get logger, so logging configuration
# set in main module is respected
logger = logging.getLogger(__name__)
if user_dir:
return os.path.realpath(user_dir)
if subject == 'HCP_S1200_GroupAvg':
return None
found_dir = ciftify.config.find_work_dir()
if found_dir is None:
logger.error("Cannot find working directory, exiting.")
sys.exit(1)
return os.path.realpath(found_dir)
def get_registration_mode(arguments):
if arguments['--surf-reg'] == "MSMSulc":
return 'MSMSulc'
if arguments['--surf-reg'] == "FS":
return 'FS'
else:
logger.error('--surf-reg must be either "MSMSulc" or "FS"')
sys.exit(1)
class WorkFlowSettings(WorkDirSettings):
def __init__(self, arguments):
WorkDirSettings.__init__(self, arguments)
self.FSL_dir = self.__set_FSL_dir()
# Read settings from yaml
self.__config = self.__read_settings(arguments['--ciftify-conf'])
self.high_res = self.get_config_entry('high_res')
self.low_res = self.get_config_entry('low_res')
self.grayord_res = self.get_config_entry('grayord_res')
self.n_cpus = get_number_cpus(arguments['--n_cpus'])
def __set_FSL_dir(self):
fsl_dir = ciftify.config.find_fsl()
if fsl_dir is None:
logger.error("Cannot find FSL dir, exiting.")
sys.exit(1)
fsl_data = os.path.normpath(os.path.join(fsl_dir, 'data'))
if not os.path.exists(fsl_data):
logger.warn("Found {} for FSL path but {} does not exist. May "
"prevent registration files from being found.".format(
fsl_dir, fsl_data))
return fsl_dir
def __read_settings(self, yaml_file):
if yaml_file is None:
yaml_file = os.path.join(ciftify.config.find_ciftify_global(),
'ciftify_workflow_settings.yaml')
if not os.path.exists(yaml_file):
logger.critical("Settings yaml file {} does not exist"
"".format(yaml_file))
sys.exit(1)
try:
with open(yaml_file) as yaml_stream:
config = yaml.load(yaml_stream, Loader=yaml.SafeLoader)
except:
logger.critical("Cannot read yaml config file {}, check formatting."
"".format(yaml_file))
sys.exit(1)
return config
def get_config_entry(self, key):
try:
config_entry = self.__config[key]
except KeyError:
logger.critical("{} not defined in cifti recon settings".format(key))
sys.exit(1)
return config_entry
def get_resolution_config(self, method, standard_res):
method_config = self.get_config_entry(method)
try:
resolution_config = method_config[standard_res]
except KeyError:
logger.error("Registration resolution {} not defined for method "
"{}".format(standard_res, method))
sys.exit(1)
for key in resolution_config.keys():
## The base dir (FSL_dir currently) may need to change when new
## resolutions/methods are added
reg_item = os.path.join(self.FSL_dir, resolution_config[key])
if not os.path.exists(reg_item):
logger.error("Item required for registration does not exist: "
"{}".format(reg_item))
sys.exit(1)
resolution_config[key] = reg_item
return resolution_config
def get_number_cpus(user_n_cpus = None):
if user_n_cpus:
try:
n_cpus = int(user_n_cpus)
except:
logger.critical('Could note read --n_cpus entry {} as integer'.format(user_n_cpus))
sys.exit(1)
else:
n_cpus = os.getenv('OMP_NUM_THREADS')
# if all else fails..set n_cpus to 1
if not n_cpus:
n_cpus = 1
return n_cpus
class VisSettings(WorkDirSettings):
def __init__(self, arguments, qc_mode):
WorkDirSettings.__init__(self, arguments)
try:
temp_qc = arguments['--qcdir']
except KeyError:
temp_qc = None
try:
self.debug_mode = arguments['--debug']
except KeyError:
self.debug_mode = False
self.qc_mode = qc_mode
self.qc_dir = self.__set_qc_dir(temp_qc)
def __set_qc_dir(self, user_qc_dir):
if user_qc_dir:
return user_qc_dir
qc_dir = os.path.join(self.work_dir, 'qc_{}'.format(self.qc_mode))
return qc_dir
def run(cmd, dryrun=False,
suppress_stdout=False,
suppress_echo = False,
suppress_stderr = False,
env = None):
# Wait till logging is needed to get logger, so logging configuration
# set in main module is respected
logger = logging.getLogger(__name__)
if type(cmd) is list:
cmd = ' '.join(cmd)
if suppress_echo:
logger.debug("Running: {}".format(cmd))
else:
logger.info("Running: {}".format(cmd))
if dryrun:
logger.info('Doing a dryrun')
return 0
merged_env = os.environ
if env:
merged_env.update(env)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=merged_env)
out, err = p.communicate()
# py3 compability :(
out = out.decode('utf-8')
err = err.decode('utf-8')
if p.returncode:
logger.error('cmd: {} \n Failed with returncode {}'.format(cmd,
p.returncode))
if len(out) > 0:
if suppress_stdout:
logger.debug(out)
else:
logger.info(out)
if len(err) > 0:
if suppress_stderr:
logger.info(err)
else:
logger.warning(err)
return p.returncode
class cd:
def __init__(self, path):
user_path = os.path.expanduser(path)
self.new_path = os.path.expandvars(user_path)
def __enter__(self):
self.old_path = os.getcwd()
os.chdir(self.new_path)
def __exit__(self, e, value, traceback):
os.chdir(self.old_path)
def get_stdout(cmd_list, echo=True):
logger = logging.getLogger(__name__)
if echo: logger.info('Evaluating: {}'.format(' '.join(cmd_list)))
stdout = subprocess.check_output(cmd_list)
return stdout.decode('utf-8')
def check_output(command, stderr=None, shell = True):
output = subprocess.check_output(command, shell=shell, stderr=stderr)
return output.decode('utf-8')
def ciftify_log_endswith_done(ciftify_log):
if not os.path.isfile(ciftify_log):
return False
with open(ciftify_log) as f:
lines = f.read().splitlines()
last_line = lines[-3]
is_done = True if 'Done' in last_line else False
return is_done
def has_ciftify_recon_all_run(ciftify_work_dir, subject):
ciftify_log = os.path.join(ciftify_work_dir,
subject,
'cifti_recon_all.log')
return ciftify_log_endswith_done(ciftify_log)
def has_ciftify_fmri_run(subject, fmriname, ciftify_work_dir):
ciftify_log = os.path.join(ciftify_work_dir,
subject,
'MNINonLinear', 'Results', fmriname,
'ciftify_subject_fmri.log')
# print('ciftify_subject_fmri done {}'.format(ciftify_log_endswith_done(ciftify_log)))
return ciftify_log_endswith_done(ciftify_log)
| true | true |
f73b3bf8742aec4a5fa706282b69adaa982fe8a4 | 2,433 | py | Python | tests/parsers/winfirewall.py | rgayon/plaso | 5f1d0f2da19a28a00ab62c276162483e79a42efb | [
"Apache-2.0"
] | 1 | 2020-12-04T10:26:34.000Z | 2020-12-04T10:26:34.000Z | tests/parsers/winfirewall.py | dvntaudio/plaso | 6debdabbce3619b3210efa2a2cbc91242c02d4e3 | [
"Apache-2.0"
] | null | null | null | tests/parsers/winfirewall.py | dvntaudio/plaso | 6debdabbce3619b3210efa2a2cbc91242c02d4e3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Windows firewall log parser."""
from __future__ import unicode_literals
import unittest
from plaso.parsers import winfirewall
from tests.parsers import test_lib
class WinFirewallParserTest(test_lib.ParserTestCase):
"""Tests for the Windows firewall log parser."""
def testParse(self):
"""Tests the Parse function."""
parser = winfirewall.WinFirewallParser()
storage_writer = self._ParseFile(['firewall.log'], parser)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 15)
events = list(storage_writer.GetSortedEvents())
event = events[4]
self.CheckTimestamp(event.timestamp, '2005-04-11 08:06:02.000000')
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.source_ip, '123.45.78.90')
self.assertEqual(event_data.dest_ip, '123.156.78.90')
event = events[7]
self.CheckTimestamp(event.timestamp, '2005-04-11 08:06:26.000000')
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.size, 576)
self.assertEqual(event_data.flags, 'A')
self.assertEqual(event_data.tcp_ack, 987654321)
expected_message = (
'DROP [ TCP RECEIVE ] '
'From: 123.45.78.90 :80 > 123.156.78.90 :1774 '
'Size (bytes): 576 '
'Flags [A] '
'TCP Seq Number: 123456789 '
'TCP ACK Number: 987654321 '
'TCP Window Size (bytes): 12345')
expected_short_message = (
'DROP [TCP] 123.45.78.90 : 80 > 123.156.78.90 : 1774')
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
event = events[9]
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.icmp_type, 8)
self.assertEqual(event_data.icmp_code, 0)
def testParseWithTimeZone(self):
"""Tests the Parse function with a time zone."""
parser = winfirewall.WinFirewallParser()
storage_writer = self._ParseFile(['firewall.log'], parser, timezone='CET')
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 15)
events = list(storage_writer.GetSortedEvents())
event = events[4]
self.CheckTimestamp(event.timestamp, '2005-04-11 06:06:02.000000')
if __name__ == '__main__':
unittest.main()
| 30.037037 | 78 | 0.704891 |
from __future__ import unicode_literals
import unittest
from plaso.parsers import winfirewall
from tests.parsers import test_lib
class WinFirewallParserTest(test_lib.ParserTestCase):
def testParse(self):
parser = winfirewall.WinFirewallParser()
storage_writer = self._ParseFile(['firewall.log'], parser)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 15)
events = list(storage_writer.GetSortedEvents())
event = events[4]
self.CheckTimestamp(event.timestamp, '2005-04-11 08:06:02.000000')
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.source_ip, '123.45.78.90')
self.assertEqual(event_data.dest_ip, '123.156.78.90')
event = events[7]
self.CheckTimestamp(event.timestamp, '2005-04-11 08:06:26.000000')
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.size, 576)
self.assertEqual(event_data.flags, 'A')
self.assertEqual(event_data.tcp_ack, 987654321)
expected_message = (
'DROP [ TCP RECEIVE ] '
'From: 123.45.78.90 :80 > 123.156.78.90 :1774 '
'Size (bytes): 576 '
'Flags [A] '
'TCP Seq Number: 123456789 '
'TCP ACK Number: 987654321 '
'TCP Window Size (bytes): 12345')
expected_short_message = (
'DROP [TCP] 123.45.78.90 : 80 > 123.156.78.90 : 1774')
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
event = events[9]
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.icmp_type, 8)
self.assertEqual(event_data.icmp_code, 0)
def testParseWithTimeZone(self):
parser = winfirewall.WinFirewallParser()
storage_writer = self._ParseFile(['firewall.log'], parser, timezone='CET')
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 15)
events = list(storage_writer.GetSortedEvents())
event = events[4]
self.CheckTimestamp(event.timestamp, '2005-04-11 06:06:02.000000')
if __name__ == '__main__':
unittest.main()
| true | true |
f73b3e10fe23095d94d6bb5a0219edef3d24254b | 6,046 | py | Python | thenewboston_node/business_logic/tests/test_memory_blockchain/test_partial_blockchain.py | fonar/thenewboston-node | e8b574b32b3f0ff6d19a764105558ba1f3b31bc2 | [
"MIT"
] | null | null | null | thenewboston_node/business_logic/tests/test_memory_blockchain/test_partial_blockchain.py | fonar/thenewboston-node | e8b574b32b3f0ff6d19a764105558ba1f3b31bc2 | [
"MIT"
] | null | null | null | thenewboston_node/business_logic/tests/test_memory_blockchain/test_partial_blockchain.py | fonar/thenewboston-node | e8b574b32b3f0ff6d19a764105558ba1f3b31bc2 | [
"MIT"
] | null | null | null | from datetime import datetime
from thenewboston_node.business_logic.blockchain.memory_blockchain import MemoryBlockchain
from thenewboston_node.business_logic.models import (
BlockchainStateMessage, CoinTransferSignedChangeRequest, PrimaryValidatorSchedule
)
from thenewboston_node.business_logic.models.account_state import AccountState
from thenewboston_node.business_logic.models.blockchain_state import BlockchainState
from thenewboston_node.business_logic.node import get_node_signing_key
from thenewboston_node.core.utils.cryptography import generate_key_pair
from thenewboston_node.core.utils.types import hexstr
def test_partial_blockchain(primary_validator, preferred_node, node_identifier):
account1_key_pair = generate_key_pair()
account2_key_pair = generate_key_pair()
account3_key_pair = generate_key_pair()
new_account_key_pair = generate_key_pair()
fake_lock1, _ = generate_key_pair()
fake_lock2, _ = generate_key_pair()
fake_lock3, _ = generate_key_pair()
base_blockchain_state = BlockchainState(
message=BlockchainStateMessage(
account_states={
account1_key_pair.public:
AccountState(balance=1000, balance_lock=fake_lock1),
account2_key_pair.public:
AccountState(balance=2000, balance_lock=fake_lock2),
account3_key_pair.public:
AccountState(balance=3000, balance_lock=fake_lock3),
primary_validator.identifier:
AccountState(
node=primary_validator,
primary_validator_schedule=PrimaryValidatorSchedule(
begin_block_number=0, end_block_number=9999
)
),
},
last_block_number=1234,
last_block_identifier=hexstr('23203d245b5e128465669223b5220b3061af1e2e72b0429ef26b07ce3a2282e7'),
last_block_timestamp=datetime.utcnow(),
next_block_identifier=hexstr('626dea61c1a6480d6a4c9cd657c7d7be52ddc38e5f2ec590b609ac01edde62fd'),
),
signer=node_identifier,
)
blockchain = MemoryBlockchain()
blockchain.add_blockchain_state(base_blockchain_state)
primary_validator = blockchain.get_primary_validator()
assert primary_validator
assert blockchain.get_block_count() == 0
assert blockchain.get_account_current_balance(account1_key_pair.public) == 1000
assert blockchain.get_account_current_balance(account2_key_pair.public) == 2000
assert blockchain.get_account_current_balance(account3_key_pair.public) == 3000
assert blockchain.get_account_current_balance(new_account_key_pair.public) == 0
blockchain.validate()
signed_change_request1 = CoinTransferSignedChangeRequest.from_main_transaction(
blockchain=blockchain,
recipient=account2_key_pair.public,
amount=10,
signing_key=account1_key_pair.private,
node=preferred_node
)
signed_change_request1.validate(blockchain, blockchain.get_next_block_number())
blockchain.add_block_from_signed_change_request(signed_change_request1, get_node_signing_key())
blockchain.validate()
pv_fee = primary_validator.fee_amount
node_fee = preferred_node.fee_amount
assert pv_fee > 0
assert node_fee > 0
assert pv_fee != node_fee
total_fees = pv_fee + node_fee
assert blockchain.get_block_count() == 1
assert blockchain.get_account_current_balance(account1_key_pair.public) == 1000 - 10 - total_fees
assert blockchain.get_account_current_balance(account2_key_pair.public) == 2000 + 10
assert blockchain.get_account_current_balance(account3_key_pair.public) == 3000
assert blockchain.get_account_current_balance(new_account_key_pair.public) == 0
signed_change_request2 = CoinTransferSignedChangeRequest.from_main_transaction(
blockchain=blockchain,
recipient=new_account_key_pair.public,
amount=20,
signing_key=account2_key_pair.private,
node=preferred_node
)
signed_change_request2.validate(blockchain, blockchain.get_next_block_number())
blockchain.add_block_from_signed_change_request(signed_change_request2, get_node_signing_key())
blockchain.validate()
assert blockchain.get_block_count() == 2
assert blockchain.get_account_current_balance(account1_key_pair.public) == 1000 - 10 - total_fees
assert blockchain.get_account_current_balance(account2_key_pair.public) == 2000 + 10 - 20 - total_fees
assert blockchain.get_account_current_balance(account3_key_pair.public) == 3000
assert blockchain.get_account_current_balance(new_account_key_pair.public) == 20
blockchain.snapshot_blockchain_state()
blockchain.validate()
assert blockchain.get_account_current_balance(account1_key_pair.public) == 1000 - 10 - total_fees
assert blockchain.get_account_current_balance(account2_key_pair.public) == 2000 + 10 - 20 - total_fees
assert blockchain.get_account_current_balance(account3_key_pair.public) == 3000
assert blockchain.get_account_current_balance(new_account_key_pair.public) == 20
signed_change_request3 = CoinTransferSignedChangeRequest.from_main_transaction(
blockchain=blockchain,
recipient=account2_key_pair.public,
amount=30,
signing_key=account3_key_pair.private,
node=preferred_node
)
signed_change_request3.validate(blockchain, blockchain.get_next_block_number())
blockchain.add_block_from_signed_change_request(signed_change_request3, get_node_signing_key())
blockchain.validate()
assert blockchain.get_account_current_balance(account1_key_pair.public) == 1000 - 10 - total_fees
assert blockchain.get_account_current_balance(account2_key_pair.public) == 2000 + 10 - 20 - total_fees + 30
assert blockchain.get_account_current_balance(account3_key_pair.public) == 3000 - 30 - total_fees
assert blockchain.get_account_current_balance(new_account_key_pair.public) == 20
| 48.368 | 111 | 0.761164 | from datetime import datetime
from thenewboston_node.business_logic.blockchain.memory_blockchain import MemoryBlockchain
from thenewboston_node.business_logic.models import (
BlockchainStateMessage, CoinTransferSignedChangeRequest, PrimaryValidatorSchedule
)
from thenewboston_node.business_logic.models.account_state import AccountState
from thenewboston_node.business_logic.models.blockchain_state import BlockchainState
from thenewboston_node.business_logic.node import get_node_signing_key
from thenewboston_node.core.utils.cryptography import generate_key_pair
from thenewboston_node.core.utils.types import hexstr
def test_partial_blockchain(primary_validator, preferred_node, node_identifier):
account1_key_pair = generate_key_pair()
account2_key_pair = generate_key_pair()
account3_key_pair = generate_key_pair()
new_account_key_pair = generate_key_pair()
fake_lock1, _ = generate_key_pair()
fake_lock2, _ = generate_key_pair()
fake_lock3, _ = generate_key_pair()
base_blockchain_state = BlockchainState(
message=BlockchainStateMessage(
account_states={
account1_key_pair.public:
AccountState(balance=1000, balance_lock=fake_lock1),
account2_key_pair.public:
AccountState(balance=2000, balance_lock=fake_lock2),
account3_key_pair.public:
AccountState(balance=3000, balance_lock=fake_lock3),
primary_validator.identifier:
AccountState(
node=primary_validator,
primary_validator_schedule=PrimaryValidatorSchedule(
begin_block_number=0, end_block_number=9999
)
),
},
last_block_number=1234,
last_block_identifier=hexstr('23203d245b5e128465669223b5220b3061af1e2e72b0429ef26b07ce3a2282e7'),
last_block_timestamp=datetime.utcnow(),
next_block_identifier=hexstr('626dea61c1a6480d6a4c9cd657c7d7be52ddc38e5f2ec590b609ac01edde62fd'),
),
signer=node_identifier,
)
blockchain = MemoryBlockchain()
blockchain.add_blockchain_state(base_blockchain_state)
primary_validator = blockchain.get_primary_validator()
assert primary_validator
assert blockchain.get_block_count() == 0
assert blockchain.get_account_current_balance(account1_key_pair.public) == 1000
assert blockchain.get_account_current_balance(account2_key_pair.public) == 2000
assert blockchain.get_account_current_balance(account3_key_pair.public) == 3000
assert blockchain.get_account_current_balance(new_account_key_pair.public) == 0
blockchain.validate()
signed_change_request1 = CoinTransferSignedChangeRequest.from_main_transaction(
blockchain=blockchain,
recipient=account2_key_pair.public,
amount=10,
signing_key=account1_key_pair.private,
node=preferred_node
)
signed_change_request1.validate(blockchain, blockchain.get_next_block_number())
blockchain.add_block_from_signed_change_request(signed_change_request1, get_node_signing_key())
blockchain.validate()
pv_fee = primary_validator.fee_amount
node_fee = preferred_node.fee_amount
assert pv_fee > 0
assert node_fee > 0
assert pv_fee != node_fee
total_fees = pv_fee + node_fee
assert blockchain.get_block_count() == 1
assert blockchain.get_account_current_balance(account1_key_pair.public) == 1000 - 10 - total_fees
assert blockchain.get_account_current_balance(account2_key_pair.public) == 2000 + 10
assert blockchain.get_account_current_balance(account3_key_pair.public) == 3000
assert blockchain.get_account_current_balance(new_account_key_pair.public) == 0
signed_change_request2 = CoinTransferSignedChangeRequest.from_main_transaction(
blockchain=blockchain,
recipient=new_account_key_pair.public,
amount=20,
signing_key=account2_key_pair.private,
node=preferred_node
)
signed_change_request2.validate(blockchain, blockchain.get_next_block_number())
blockchain.add_block_from_signed_change_request(signed_change_request2, get_node_signing_key())
blockchain.validate()
assert blockchain.get_block_count() == 2
assert blockchain.get_account_current_balance(account1_key_pair.public) == 1000 - 10 - total_fees
assert blockchain.get_account_current_balance(account2_key_pair.public) == 2000 + 10 - 20 - total_fees
assert blockchain.get_account_current_balance(account3_key_pair.public) == 3000
assert blockchain.get_account_current_balance(new_account_key_pair.public) == 20
blockchain.snapshot_blockchain_state()
blockchain.validate()
assert blockchain.get_account_current_balance(account1_key_pair.public) == 1000 - 10 - total_fees
assert blockchain.get_account_current_balance(account2_key_pair.public) == 2000 + 10 - 20 - total_fees
assert blockchain.get_account_current_balance(account3_key_pair.public) == 3000
assert blockchain.get_account_current_balance(new_account_key_pair.public) == 20
signed_change_request3 = CoinTransferSignedChangeRequest.from_main_transaction(
blockchain=blockchain,
recipient=account2_key_pair.public,
amount=30,
signing_key=account3_key_pair.private,
node=preferred_node
)
signed_change_request3.validate(blockchain, blockchain.get_next_block_number())
blockchain.add_block_from_signed_change_request(signed_change_request3, get_node_signing_key())
blockchain.validate()
assert blockchain.get_account_current_balance(account1_key_pair.public) == 1000 - 10 - total_fees
assert blockchain.get_account_current_balance(account2_key_pair.public) == 2000 + 10 - 20 - total_fees + 30
assert blockchain.get_account_current_balance(account3_key_pair.public) == 3000 - 30 - total_fees
assert blockchain.get_account_current_balance(new_account_key_pair.public) == 20
| true | true |
f73b3ea9a7dd613af4760157de8e5ba50e49db44 | 402 | py | Python | courses/migrations/0026_course_last_taught.py | Isaacli0520/msnmatch | 228c6d546e16bd54dc8c7e0803f0f8c408cb0219 | [
"MIT"
] | null | null | null | courses/migrations/0026_course_last_taught.py | Isaacli0520/msnmatch | 228c6d546e16bd54dc8c7e0803f0f8c408cb0219 | [
"MIT"
] | 18 | 2020-03-11T18:57:27.000Z | 2022-02-26T11:14:38.000Z | courses/migrations/0026_course_last_taught.py | Isaacli0520/msnmatch | 228c6d546e16bd54dc8c7e0803f0f8c408cb0219 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.5 on 2019-08-29 21:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0025_courseuser_section'),
]
operations = [
migrations.AddField(
model_name='course',
name='last_taught',
field=models.CharField(default='', max_length=100),
),
]
| 21.157895 | 63 | 0.60199 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0025_courseuser_section'),
]
operations = [
migrations.AddField(
model_name='course',
name='last_taught',
field=models.CharField(default='', max_length=100),
),
]
| true | true |
f73b3f6f5fa09fc7ce1ff4de711ca83ebc8dceb2 | 1,220 | py | Python | djangocms_youtube/views.py | katlings/djangocms-youtube | 662566b15e68b57e3af5a825829bb5b99b0e91e5 | [
"BSD-3-Clause"
] | 42 | 2015-08-25T08:21:32.000Z | 2022-01-25T21:13:44.000Z | djangocms_youtube/views.py | katlings/djangocms-youtube | 662566b15e68b57e3af5a825829bb5b99b0e91e5 | [
"BSD-3-Clause"
] | 12 | 2016-01-20T15:25:45.000Z | 2018-06-21T11:38:54.000Z | djangocms_youtube/views.py | katlings/djangocms-youtube | 662566b15e68b57e3af5a825829bb5b99b0e91e5 | [
"BSD-3-Clause"
] | 25 | 2015-08-24T13:23:50.000Z | 2021-12-16T01:09:59.000Z | from django.contrib.sitemaps.views import x_robots_tag
from django.contrib.sites.shortcuts import get_current_site
from django.template.response import TemplateResponse
from .video_sitemap import VideoElement
@x_robots_tag
def video_sitemap(request, sitemaps,
template_name='djangocms_youtube/sitemap.xml',
content_type='application/xml'):
protocol = request.scheme
site = get_current_site(request)
domain = site.domain
urls = []
for section, site in sitemaps.items():
if callable(site):
site = site()
for url in site.get_urls():
location = '{protocol}://{domain}{location}'.format(
protocol=protocol, domain=domain, location=url.get('location'))
videos = []
for plugin in url.get('youtube_plugins', []):
videos.append(VideoElement(plugin))
if videos:
url_info = {
'location': location,
'videos': videos,
}
urls.append(url_info)
response = TemplateResponse(
request, template_name, {'urlset': urls}, content_type=content_type)
return response
| 30.5 | 79 | 0.606557 | from django.contrib.sitemaps.views import x_robots_tag
from django.contrib.sites.shortcuts import get_current_site
from django.template.response import TemplateResponse
from .video_sitemap import VideoElement
@x_robots_tag
def video_sitemap(request, sitemaps,
template_name='djangocms_youtube/sitemap.xml',
content_type='application/xml'):
protocol = request.scheme
site = get_current_site(request)
domain = site.domain
urls = []
for section, site in sitemaps.items():
if callable(site):
site = site()
for url in site.get_urls():
location = '{protocol}://{domain}{location}'.format(
protocol=protocol, domain=domain, location=url.get('location'))
videos = []
for plugin in url.get('youtube_plugins', []):
videos.append(VideoElement(plugin))
if videos:
url_info = {
'location': location,
'videos': videos,
}
urls.append(url_info)
response = TemplateResponse(
request, template_name, {'urlset': urls}, content_type=content_type)
return response
| true | true |
f73b3f81e1d6df883b47edd8ccfb0d53dd5a0962 | 6,406 | py | Python | tool.py | YaoJusheng/blog_photos | 77d85598f2c296fe845170628280f6d8b8324472 | [
"Apache-2.0"
] | null | null | null | tool.py | YaoJusheng/blog_photos | 77d85598f2c296fe845170628280f6d8b8324472 | [
"Apache-2.0"
] | null | null | null | tool.py | YaoJusheng/blog_photos | 77d85598f2c296fe845170628280f6d8b8324472 | [
"Apache-2.0"
] | null | null | null | # !/usr/bin/python3
# -*- coding: utf-8 -*-
from PIL import Image
import os
import sys
import json
from datetime import datetime
from ImageProcess import Graphics
# 定义压缩比,数值越大,压缩越小
SIZE_normal = 1.0
SIZE_small = 1.5
SIZE_more_small = 2.0
SIZE_more_small_small = 3.0
def make_directory(directory):
"""创建目录"""
os.makedirs(directory)
def directory_exists(directory):
"""判断目录是否存在"""
if os.path.exists(directory):
return True
else:
return False
def list_img_file(directory):
"""列出目录下所有文件,并筛选出图片文件列表返回"""
# old_list = os.listdir(directory)
img_type = ["jpg", "jpeg", "png", "gif"]
new_list = []
for root, dirs, files in os.walk(directory):
for file in files:
filepath = os.path.join(root, file)[len(directory):]
# print(f'filename: => {file} | filepath: => {filepath}')
name, fileformat = file.split(".")
if fileformat.lower() in img_type:
new_list.append(filepath)
# for filename in old_list:
# print('filename: => ', filename)
# name, fileformat = filename.split(".")
# if fileformat.lower() in img_type:
# new_list.append(filename)
return new_list
def print_help():
print(
"""
This program helps compress many image files
you can choose which scale you want to compress your img(jpg/png/etc)
1) normal compress(4M to 1M around)
2) small compress(4M to 500K around)
3) smaller compress(4M to 300K around)
4) smallest compress(unkown)
"""
)
num = 4
try:
print('请输入压缩模式:')
num = int(sys.stdin.readline().strip())
print(f"你的选择是:{num}")
except Exception as e:
print(e)
return num
def compress(choose, des_dir, src_dir, file_list):
"""
压缩算法,img.thumbnail对图片进行压缩,
参数
-----------
choose: str
选择压缩的比例,有4个选项,越大压缩后的图片越小
"""
if choose == 1:
scale = SIZE_normal
if choose == 2:
scale = SIZE_small
if choose == 3:
scale = SIZE_more_small
if choose == 4:
scale = SIZE_more_small_small
for infile in file_list:
img = Image.open(src_dir+infile)
# size_of_file = os.path.getsize(infile)
w, h = img.size
img.thumbnail((int(w/scale), int(h/scale)))
tagPath = des_dir + infile
fileDir = os.sep.join(tagPath.split(os.sep)[:-1])
# 子目录创建
if fileDir and not directory_exists(fileDir):
make_directory(fileDir)
print(f"infile: {infile} ===> tagPath: {tagPath}")
img.save(tagPath)
def compress_photo():
'''
调用压缩图片的函数
'''
src_dir, des_dir = "photos/", "min_photos/"
file_list_src, file_list_des = [], []
# photos 目录判断
if not directory_exists(src_dir):
make_directory(src_dir)
else:
# business logic
file_list_src = list_img_file(src_dir)
# min_photos 目录判断
if directory_exists(des_dir):
file_list_des = list_img_file(des_dir)
else:
make_directory(des_dir)
'''如果已经压缩了,就不再压缩'''
for i in range(len(file_list_des)):
if file_list_des[i] in file_list_src:
file_list_src.remove(file_list_des[i])
if len(file_list_src) == 0:
print("=====没有新文件需要压缩=======")
num = print_help()
compress(num, des_dir, src_dir, file_list_src)
def handle_photo():
'''
根据图片的文件名处理成需要的json格式的数据
-----------
最后将data.json文件存到博客的source/photos文件夹下
'''
src_dir, des_dir = "photos/", "min_photos/"
file_list = list_img_file(src_dir)
list_info = []
file_list.sort(key=lambda x: x.split(os.sep)[-1].split('_')[0]) # 按照日期排序
for i in range(len(file_list)):
filename = file_list[i]
date_info, info = filename.split("_")
if len(date_info.split(os.sep)) == 2:
_dir, date_str = date_info.split(os.sep)
else:
date_str = date_info
info, _ = info.split(".")
date = datetime.strptime(date_str, "%Y-%m-%d")
year_month = date_str[0:7]
if i == 0: # 处理第一个文件
new_dict = {
"date": year_month,
"arr": {
'year': date.year,
'month': date.month,
'link': [filename],
'text': [info],
'type': ['image']
}
}
list_info.append(new_dict)
elif year_month != list_info[-1]['date']: # 不是最后的一个日期,就新建一个dict
new_dict = {
"date": year_month,
"arr": {
'year': date.year,
'month': date.month,
'link': [filename],
'text': [info],
'type': ['image']
}
}
list_info.append(new_dict)
else: # 同一个日期
list_info[-1]['arr']['link'].append(filename)
list_info[-1]['arr']['text'].append(info)
list_info[-1]['arr']['type'].append('image')
list_info.reverse() # 翻转
final_dict = {"list": list_info}
with open("data.json", "w") as fp:
json.dump(final_dict, fp)
def cut_photo() -> bool:
"""
裁剪算法
----------
调用Graphics类中的裁剪算法,将src_dir目录下的文件进行裁剪(裁剪成正方形)
"""
flag = True
src_dir = "photos/"
if directory_exists(src_dir):
# business logic
file_list = list_img_file(src_dir)
# print file_list
if file_list:
# num = print_help()
for infile in file_list:
# img = Image.open(src_dir+infile)
Graphics(infile=src_dir+infile,
outfile=src_dir + infile).cut_by_ratio()
else:
pass
else:
flag = False
print("source directory not exist!")
return flag
def git_operation():
'''
git 命令行函数,将仓库提交
----------
需要安装git命令行工具,并且添加到环境变量中
'''
os.system('git add --all')
os.system('git commit -m "update photos"')
os.system('git push origin master')
def main():
"""主程序入口"""
res = cut_photo() # 裁剪图片,裁剪成正方形,去中间部分
if res:
compress_photo() # 压缩图片,并保存到 mini_photos 文件夹下
git_operation() # 提交到 github/gitee 仓库
handle_photo() # 将文件处理成 json 格式,存到博客仓库中
if __name__ == "__main__":
main()
| 25.726908 | 78 | 0.547924 |
from PIL import Image
import os
import sys
import json
from datetime import datetime
from ImageProcess import Graphics
SIZE_normal = 1.0
SIZE_small = 1.5
SIZE_more_small = 2.0
SIZE_more_small_small = 3.0
def make_directory(directory):
os.makedirs(directory)
def directory_exists(directory):
if os.path.exists(directory):
return True
else:
return False
def list_img_file(directory):
img_type = ["jpg", "jpeg", "png", "gif"]
new_list = []
for root, dirs, files in os.walk(directory):
for file in files:
filepath = os.path.join(root, file)[len(directory):]
name, fileformat = file.split(".")
if fileformat.lower() in img_type:
new_list.append(filepath)
return new_list
def print_help():
print(
"""
This program helps compress many image files
you can choose which scale you want to compress your img(jpg/png/etc)
1) normal compress(4M to 1M around)
2) small compress(4M to 500K around)
3) smaller compress(4M to 300K around)
4) smallest compress(unkown)
"""
)
num = 4
try:
print('请输入压缩模式:')
num = int(sys.stdin.readline().strip())
print(f"你的选择是:{num}")
except Exception as e:
print(e)
return num
def compress(choose, des_dir, src_dir, file_list):
if choose == 1:
scale = SIZE_normal
if choose == 2:
scale = SIZE_small
if choose == 3:
scale = SIZE_more_small
if choose == 4:
scale = SIZE_more_small_small
for infile in file_list:
img = Image.open(src_dir+infile)
w, h = img.size
img.thumbnail((int(w/scale), int(h/scale)))
tagPath = des_dir + infile
fileDir = os.sep.join(tagPath.split(os.sep)[:-1])
if fileDir and not directory_exists(fileDir):
make_directory(fileDir)
print(f"infile: {infile} ===> tagPath: {tagPath}")
img.save(tagPath)
def compress_photo():
src_dir, des_dir = "photos/", "min_photos/"
file_list_src, file_list_des = [], []
if not directory_exists(src_dir):
make_directory(src_dir)
else:
file_list_src = list_img_file(src_dir)
if directory_exists(des_dir):
file_list_des = list_img_file(des_dir)
else:
make_directory(des_dir)
for i in range(len(file_list_des)):
if file_list_des[i] in file_list_src:
file_list_src.remove(file_list_des[i])
if len(file_list_src) == 0:
print("=====没有新文件需要压缩=======")
num = print_help()
compress(num, des_dir, src_dir, file_list_src)
def handle_photo():
src_dir, des_dir = "photos/", "min_photos/"
file_list = list_img_file(src_dir)
list_info = []
file_list.sort(key=lambda x: x.split(os.sep)[-1].split('_')[0])
for i in range(len(file_list)):
filename = file_list[i]
date_info, info = filename.split("_")
if len(date_info.split(os.sep)) == 2:
_dir, date_str = date_info.split(os.sep)
else:
date_str = date_info
info, _ = info.split(".")
date = datetime.strptime(date_str, "%Y-%m-%d")
year_month = date_str[0:7]
if i == 0:
new_dict = {
"date": year_month,
"arr": {
'year': date.year,
'month': date.month,
'link': [filename],
'text': [info],
'type': ['image']
}
}
list_info.append(new_dict)
elif year_month != list_info[-1]['date']:
new_dict = {
"date": year_month,
"arr": {
'year': date.year,
'month': date.month,
'link': [filename],
'text': [info],
'type': ['image']
}
}
list_info.append(new_dict)
else:
list_info[-1]['arr']['link'].append(filename)
list_info[-1]['arr']['text'].append(info)
list_info[-1]['arr']['type'].append('image')
list_info.reverse()
final_dict = {"list": list_info}
with open("data.json", "w") as fp:
json.dump(final_dict, fp)
def cut_photo() -> bool:
flag = True
src_dir = "photos/"
if directory_exists(src_dir):
file_list = list_img_file(src_dir)
if file_list:
for infile in file_list:
Graphics(infile=src_dir+infile,
outfile=src_dir + infile).cut_by_ratio()
else:
pass
else:
flag = False
print("source directory not exist!")
return flag
def git_operation():
os.system('git add --all')
os.system('git commit -m "update photos"')
os.system('git push origin master')
def main():
res = cut_photo()
if res:
compress_photo()
git_operation()
handle_photo()
if __name__ == "__main__":
main()
| true | true |
f73b3fed9de3eefd4ba51684c70c0f9b43344685 | 5,702 | py | Python | ethereum_parser.py | TheCharlatan/blockchain-parser | 34d4c85ffee0e39978e5dea1f50cfd995a83a321 | [
"MIT"
] | null | null | null | ethereum_parser.py | TheCharlatan/blockchain-parser | 34d4c85ffee0e39978e5dea1f50cfd995a83a321 | [
"MIT"
] | null | null | null | ethereum_parser.py | TheCharlatan/blockchain-parser | 34d4c85ffee0e39978e5dea1f50cfd995a83a321 | [
"MIT"
] | null | null | null | import threading
from typing import NamedTuple
import zmq
from database import BLOCKCHAIN, DATATYPE, Database
from ethereum_blockchain_iterator import (
ParseEthereumBlockBodies,
ParseEthereumBlockHeaders,
)
from parser import DataExtractor
from pathlib import Path
ERC20_TRANSFER_METHOD_ID = bytes.fromhex("a9059cbb")
ERC20_APPROVE_METHOD_ID = bytes.fromhex("095ea7b3")
ERC20_TRANSFER_FROM_METHOD_ID = bytes.fromhex("23b872dd")
ETH_LEADING_12_ZERO_BYTES = bytes.fromhex("0" * 24)
def check_if_template_contract_call(tx_data: bytes) -> bool:
""""Checks if the provide tx_data contains one of the standard ERC20 function invocations
:param tx_data: The tx data to be analyzed
:type tx_data: bytes"""
if len(tx_data) < 5:
return False
# transfer(address _to, uint256 _value) method_id: 0xa9059cbb
# approve(address _spender, uint256 _value) method_id: 0x095ea7b3
if (
tx_data[0:4] == ERC20_TRANSFER_METHOD_ID
or tx_data[0:4] == ERC20_APPROVE_METHOD_ID
):
# the length of these contract calls is exactly 68 bytes
if len(tx_data) != 68:
return False
# check that the address is present, by checking the number of zeroes
if not (tx_data[4:16] == ETH_LEADING_12_ZERO_BYTES):
return False
return True
# transferFrom(address _from, address _to, uint256 _value)
if tx_data[0:4] == ERC20_TRANSFER_FROM_METHOD_ID:
# the length of this contract call is exactly 100 bytes
if len(tx_data) != 100:
return False
# check that the addresses are present, by checking the number of zeroes
if not (
tx_data[4:16] == ETH_LEADING_12_ZERO_BYTES
and tx_data[36:48] == ETH_LEADING_12_ZERO_BYTES
):
return False
return True
return False
class EthereumDataMessage(NamedTuple):
"""Named Tuple for the zmq message for writing prased and extracted data to the database"""
data: bytes
txid: bytes
data_type: DATATYPE
block_height: int
class DatabaseWriter(threading.Thread):
"""DatabaseWriter acts as a worker thread for writing to the sql database
and receives from a zmq socket"""
def __init__(
self, database: Database, receiver: zmq.Socket, blockchain: BLOCKCHAIN
):
"""
:param database: Database to be written into
:type database: Database
:param receiver: Receives parsed extra bytes and monero transaction indices
:type receiver: zmq.Socket
:param blockchain: Some Ethereum-compatible blockchain
:type blockchain: BLOCKCHAIN"""
self._db = database
self._receiver = receiver
self._blockchain = blockchain
threading.Thread.__init__(self)
def run(self) -> None:
records = []
while True:
message: EthereumDataMessage = self._receiver.recv_pyobj()
records.append(
(
message.data,
message.txid,
self._blockchain.value,
message.data_type.value,
message.block_height,
0,
)
)
if len(records) > 500:
print("ethereum writing to DB...", records[0])
self._db.insert_records(records)
records = []
class EthereumParser(DataExtractor):
def __init__(self, chaindata_path: Path, blockchain: BLOCKCHAIN):
"""
:param blockchain_path: Path to the Ethereum blockchain (e.g. /home/user/.ethereum/geth/chaindata).
:type blockchain_path: str
:param blockchain: One of the Ethereum compatible blockchains.
:type blockchain: BLOCKCHAIN
"""
self._chaindata_path = str(chaindata_path.expanduser()) + "/geth/chaindata"
self._ancient_chaindata_path = self._chaindata_path + "/ancient"
self._blockchain = blockchain
def parse_and_extract_blockchain(self, database: Database) -> None:
"""Parse the blockchain with the previously constructed options
:param database: Database to be written into.
:type database: Database
"""
context = zmq.Context()
database_event_sender = context.socket(zmq.PAIR)
database_event_receiver = context.socket(zmq.PAIR)
database_event_sender.bind("inproc://ethereum_dbbridge")
database_event_receiver.connect("inproc://ethereum_dbbridge")
writer = DatabaseWriter(database, database_event_receiver, self._blockchain)
writer.start()
for height, block_body in enumerate(
ParseEthereumBlockBodies(self._ancient_chaindata_path, self._chaindata_path)
):
for (tx_index, tx) in enumerate(block_body.Transactions):
if len(tx.data) < 2:
continue
if check_if_template_contract_call(tx.data):
continue
database_event_sender.send_pyobj(
EthereumDataMessage(tx.data, tx.hash(), DATATYPE.TX_DATA, height)
)
print("done parsing ethereum blocks, now parsing ethereum headers")
for height, header in enumerate(
ParseEthereumBlockHeaders(
self._ancient_chaindata_path, self._chaindata_path
)
):
if len(header.Extra) > 0:
database_event_sender.send_pyobj(
EthereumDataMessage(
header.Extra, header.TxHash, DATATYPE.TX_DATA, height
)
)
print("\n\n Completed Ethereum Parsing \n\n")
| 36.088608 | 107 | 0.63469 | import threading
from typing import NamedTuple
import zmq
from database import BLOCKCHAIN, DATATYPE, Database
from ethereum_blockchain_iterator import (
ParseEthereumBlockBodies,
ParseEthereumBlockHeaders,
)
from parser import DataExtractor
from pathlib import Path
ERC20_TRANSFER_METHOD_ID = bytes.fromhex("a9059cbb")
ERC20_APPROVE_METHOD_ID = bytes.fromhex("095ea7b3")
ERC20_TRANSFER_FROM_METHOD_ID = bytes.fromhex("23b872dd")
ETH_LEADING_12_ZERO_BYTES = bytes.fromhex("0" * 24)
def check_if_template_contract_call(tx_data: bytes) -> bool:
if len(tx_data) < 5:
return False
if (
tx_data[0:4] == ERC20_TRANSFER_METHOD_ID
or tx_data[0:4] == ERC20_APPROVE_METHOD_ID
):
if len(tx_data) != 68:
return False
if not (tx_data[4:16] == ETH_LEADING_12_ZERO_BYTES):
return False
return True
if tx_data[0:4] == ERC20_TRANSFER_FROM_METHOD_ID:
if len(tx_data) != 100:
return False
if not (
tx_data[4:16] == ETH_LEADING_12_ZERO_BYTES
and tx_data[36:48] == ETH_LEADING_12_ZERO_BYTES
):
return False
return True
return False
class EthereumDataMessage(NamedTuple):
data: bytes
txid: bytes
data_type: DATATYPE
block_height: int
class DatabaseWriter(threading.Thread):
def __init__(
self, database: Database, receiver: zmq.Socket, blockchain: BLOCKCHAIN
):
self._db = database
self._receiver = receiver
self._blockchain = blockchain
threading.Thread.__init__(self)
def run(self) -> None:
records = []
while True:
message: EthereumDataMessage = self._receiver.recv_pyobj()
records.append(
(
message.data,
message.txid,
self._blockchain.value,
message.data_type.value,
message.block_height,
0,
)
)
if len(records) > 500:
print("ethereum writing to DB...", records[0])
self._db.insert_records(records)
records = []
class EthereumParser(DataExtractor):
def __init__(self, chaindata_path: Path, blockchain: BLOCKCHAIN):
self._chaindata_path = str(chaindata_path.expanduser()) + "/geth/chaindata"
self._ancient_chaindata_path = self._chaindata_path + "/ancient"
self._blockchain = blockchain
def parse_and_extract_blockchain(self, database: Database) -> None:
context = zmq.Context()
database_event_sender = context.socket(zmq.PAIR)
database_event_receiver = context.socket(zmq.PAIR)
database_event_sender.bind("inproc://ethereum_dbbridge")
database_event_receiver.connect("inproc://ethereum_dbbridge")
writer = DatabaseWriter(database, database_event_receiver, self._blockchain)
writer.start()
for height, block_body in enumerate(
ParseEthereumBlockBodies(self._ancient_chaindata_path, self._chaindata_path)
):
for (tx_index, tx) in enumerate(block_body.Transactions):
if len(tx.data) < 2:
continue
if check_if_template_contract_call(tx.data):
continue
database_event_sender.send_pyobj(
EthereumDataMessage(tx.data, tx.hash(), DATATYPE.TX_DATA, height)
)
print("done parsing ethereum blocks, now parsing ethereum headers")
for height, header in enumerate(
ParseEthereumBlockHeaders(
self._ancient_chaindata_path, self._chaindata_path
)
):
if len(header.Extra) > 0:
database_event_sender.send_pyobj(
EthereumDataMessage(
header.Extra, header.TxHash, DATATYPE.TX_DATA, height
)
)
print("\n\n Completed Ethereum Parsing \n\n")
| true | true |
f73b402cabae49c31df20d71d6b05fb65aeb8bf0 | 7,184 | py | Python | scripts/compare_speed_with_pytorch.py | LiuHaolan/models | 1639b3039237c3997c51ff87f0b6113bb2e8d236 | [
"Apache-2.0"
] | 43 | 2021-06-03T09:07:08.000Z | 2022-03-31T15:21:48.000Z | scripts/compare_speed_with_pytorch.py | LiuHaolan/models | 1639b3039237c3997c51ff87f0b6113bb2e8d236 | [
"Apache-2.0"
] | 64 | 2021-05-31T10:34:06.000Z | 2022-01-17T03:44:58.000Z | scripts/compare_speed_with_pytorch.py | LiuHaolan/models | 1639b3039237c3997c51ff87f0b6113bb2e8d236 | [
"Apache-2.0"
] | 37 | 2021-07-04T03:13:18.000Z | 2022-03-25T07:30:47.000Z | import numpy as np
import time
import tempfile
import os
import importlib.util
import argparse
from typing import Sequence
import subprocess
import re
import oneflow as flow
import oneflow._oneflow_internal as oneflow_internal
DEFAULT_TIMES = 20
gpu_memory_used_by_oneflow = 0
def import_file(path):
spec = importlib.util.spec_from_file_location("mod", path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
def sync(x):
if test_oneflow:
x.numpy()
else:
x.cpu()
def gpu_memory_used():
output = subprocess.check_output(
[
"nvidia-smi",
"--query-compute-apps=pid,used_gpu_memory",
"--format=csv,noheader",
]
)
output = output.decode("utf-8").strip()
my_pid = os.getpid()
mem_used_by_me = 0
for line in output.split("\n"):
pid, mem_used = map(int, re.split(",? ", line)[:2])
if pid == my_pid:
mem_used_by_me += mem_used
return mem_used_by_me
def print_rank_0(*args, **kwargs):
rank = int(os.getenv("RANK", "0"))
if rank == 0:
print(*args, **kwargs)
def test(
model_path: str,
module_name: str,
input_shape: Sequence[int],
disable_backward=False,
times=DEFAULT_TIMES,
no_verbose=False,
ddp=False,
ddp_broadcast_buffers=False,
show_memory=True,
):
framework_name = "OneFlow" if test_oneflow else "PyTorch"
if test_oneflow:
python_module = import_file(model_path)
torch = flow
else:
with open(model_path) as f:
buf = f.read()
lines = buf.split("\n")
for i, line in enumerate(lines):
if "import" not in line and len(line.strip()) != 0:
break
lines = (
lines[:i]
+ [
"import torch as flow",
"import torch.nn as nn",
"from torch import Tensor",
"from torch.nn import Parameter",
]
+ lines[i:]
)
buf = "\n".join(lines)
with tempfile.NamedTemporaryFile("w", suffix=".py") as f:
f.write(buf)
f.flush()
python_module = import_file(f.name)
import torch
if ddp:
import torch.distributed as dist
local_rank_env_var = os.getenv("LOCAL_RANK")
assert local_rank_env_var is not None
rank = int(local_rank_env_var)
torch.cuda.set_device(rank)
dist.init_process_group(backend="nccl", init_method="env://")
Net = getattr(python_module, module_name)
warmup_times = 5
m = Net()
m = m.to("cuda")
if ddp:
if test_oneflow:
m = torch.nn.parallel.DistributedDataParallel(
m, broadcast_buffers=ddp_broadcast_buffers
)
else:
m = torch.nn.parallel.DistributedDataParallel(
m, device_ids=[rank], broadcast_buffers=ddp_broadcast_buffers
)
def run_model(m, x):
if disable_backward:
with torch.no_grad():
return m(x)
else:
return m(x)
learning_rate = 0.01
mom = 0.9
optimizer = torch.optim.SGD(m.parameters(), lr=learning_rate, momentum=mom)
# input tensor of OneFlow should set requires_grad=False due to a bug
x = torch.tensor(
np.ones(input_shape).astype(np.float32), requires_grad=not test_oneflow
).to("cuda")
for i in range(warmup_times + times):
if i == warmup_times:
start = time.time()
y = run_model(m, x)
if not disable_backward:
y = y.sum()
y.backward()
optimizer.zero_grad()
optimizer.step()
sync(y)
end = time.time()
total_time_ms = (end - start) * 1000
time_per_run_ms = total_time_ms / times
if no_verbose:
print_rank_0(f"{framework_name}: {time_per_run_ms:.1f}ms")
else:
print_rank_0(
f"{framework_name} {module_name} time: {time_per_run_ms:.1f}ms (= {total_time_ms:.1f}ms / {times}, input_shape={input_shape}{', backward is disabled' if disable_backward else ''}{', ddp' if ddp else ''}{', ddp_broadcast_buffers is disabled' if not ddp_broadcast_buffers else ''}{f', world size={flow.env.get_world_size()}' if flow.env.get_world_size() != 1 else ''})"
)
if show_memory:
global gpu_memory_used_by_oneflow
if test_oneflow:
gpu_memory_used_by_oneflow = gpu_memory_used()
print_rank_0(
f"{framework_name} GPU used (rank 0): {gpu_memory_used_by_oneflow} MiB"
)
else:
print_rank_0(
f"{framework_name} GPU used (rank 0, estimated): {gpu_memory_used() - gpu_memory_used_by_oneflow} MiB"
)
if ddp and not test_oneflow:
import torch.distributed as dist
dist.destroy_process_group()
return time_per_run_ms
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("model_path", type=str)
parser.add_argument("module_name", type=str)
parser.add_argument("input_shape", type=str)
parser.add_argument("--times", type=int, default=DEFAULT_TIMES)
parser.add_argument("--disable-backward", action="store_true")
parser.add_argument("--no-verbose", action="store_true")
parser.add_argument("--ddp", action="store_true")
parser.add_argument("--ddp-no-broadcast-buffers", action="store_true")
parser.add_argument("--only-oneflow", action="store_true")
parser.add_argument("--only-pytorch", action="store_true")
parser.add_argument("--no-show-memory", action="store_true")
args = parser.parse_args()
input_shape = list(map(int, args.input_shape.split("x")))
global test_oneflow
if not args.only_pytorch:
# NOTE: PyTorch must run after OneFlow for correct memory usage
test_oneflow = True
oneflow_time = test(
args.model_path,
args.module_name,
input_shape,
disable_backward=args.disable_backward,
times=args.times,
no_verbose=args.no_verbose,
ddp=args.ddp,
ddp_broadcast_buffers=not args.ddp_no_broadcast_buffers,
show_memory=not args.no_show_memory,
)
if not args.only_oneflow:
test_oneflow = False
pytorch_time = test(
args.model_path,
args.module_name,
input_shape,
disable_backward=args.disable_backward,
times=args.times,
no_verbose=args.no_verbose,
ddp=args.ddp,
ddp_broadcast_buffers=not args.ddp_no_broadcast_buffers,
show_memory=not args.no_show_memory,
)
if not args.only_pytorch and not args.only_oneflow:
relative_speed = pytorch_time / oneflow_time
if args.no_verbose:
print_rank_0(f"Relative speed: {relative_speed:.2f}")
else:
print_rank_0(
f"Relative speed: {relative_speed:.2f} (= {pytorch_time:.1f}ms / {oneflow_time:.1f}ms)"
)
| 30.312236 | 379 | 0.605651 | import numpy as np
import time
import tempfile
import os
import importlib.util
import argparse
from typing import Sequence
import subprocess
import re
import oneflow as flow
import oneflow._oneflow_internal as oneflow_internal
DEFAULT_TIMES = 20
gpu_memory_used_by_oneflow = 0
def import_file(path):
spec = importlib.util.spec_from_file_location("mod", path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
def sync(x):
if test_oneflow:
x.numpy()
else:
x.cpu()
def gpu_memory_used():
output = subprocess.check_output(
[
"nvidia-smi",
"--query-compute-apps=pid,used_gpu_memory",
"--format=csv,noheader",
]
)
output = output.decode("utf-8").strip()
my_pid = os.getpid()
mem_used_by_me = 0
for line in output.split("\n"):
pid, mem_used = map(int, re.split(",? ", line)[:2])
if pid == my_pid:
mem_used_by_me += mem_used
return mem_used_by_me
def print_rank_0(*args, **kwargs):
rank = int(os.getenv("RANK", "0"))
if rank == 0:
print(*args, **kwargs)
def test(
model_path: str,
module_name: str,
input_shape: Sequence[int],
disable_backward=False,
times=DEFAULT_TIMES,
no_verbose=False,
ddp=False,
ddp_broadcast_buffers=False,
show_memory=True,
):
framework_name = "OneFlow" if test_oneflow else "PyTorch"
if test_oneflow:
python_module = import_file(model_path)
torch = flow
else:
with open(model_path) as f:
buf = f.read()
lines = buf.split("\n")
for i, line in enumerate(lines):
if "import" not in line and len(line.strip()) != 0:
break
lines = (
lines[:i]
+ [
"import torch as flow",
"import torch.nn as nn",
"from torch import Tensor",
"from torch.nn import Parameter",
]
+ lines[i:]
)
buf = "\n".join(lines)
with tempfile.NamedTemporaryFile("w", suffix=".py") as f:
f.write(buf)
f.flush()
python_module = import_file(f.name)
import torch
if ddp:
import torch.distributed as dist
local_rank_env_var = os.getenv("LOCAL_RANK")
assert local_rank_env_var is not None
rank = int(local_rank_env_var)
torch.cuda.set_device(rank)
dist.init_process_group(backend="nccl", init_method="env://")
Net = getattr(python_module, module_name)
warmup_times = 5
m = Net()
m = m.to("cuda")
if ddp:
if test_oneflow:
m = torch.nn.parallel.DistributedDataParallel(
m, broadcast_buffers=ddp_broadcast_buffers
)
else:
m = torch.nn.parallel.DistributedDataParallel(
m, device_ids=[rank], broadcast_buffers=ddp_broadcast_buffers
)
def run_model(m, x):
if disable_backward:
with torch.no_grad():
return m(x)
else:
return m(x)
learning_rate = 0.01
mom = 0.9
optimizer = torch.optim.SGD(m.parameters(), lr=learning_rate, momentum=mom)
x = torch.tensor(
np.ones(input_shape).astype(np.float32), requires_grad=not test_oneflow
).to("cuda")
for i in range(warmup_times + times):
if i == warmup_times:
start = time.time()
y = run_model(m, x)
if not disable_backward:
y = y.sum()
y.backward()
optimizer.zero_grad()
optimizer.step()
sync(y)
end = time.time()
total_time_ms = (end - start) * 1000
time_per_run_ms = total_time_ms / times
if no_verbose:
print_rank_0(f"{framework_name}: {time_per_run_ms:.1f}ms")
else:
print_rank_0(
f"{framework_name} {module_name} time: {time_per_run_ms:.1f}ms (= {total_time_ms:.1f}ms / {times}, input_shape={input_shape}{', backward is disabled' if disable_backward else ''}{', ddp' if ddp else ''}{', ddp_broadcast_buffers is disabled' if not ddp_broadcast_buffers else ''}{f', world size={flow.env.get_world_size()}' if flow.env.get_world_size() != 1 else ''})"
)
if show_memory:
global gpu_memory_used_by_oneflow
if test_oneflow:
gpu_memory_used_by_oneflow = gpu_memory_used()
print_rank_0(
f"{framework_name} GPU used (rank 0): {gpu_memory_used_by_oneflow} MiB"
)
else:
print_rank_0(
f"{framework_name} GPU used (rank 0, estimated): {gpu_memory_used() - gpu_memory_used_by_oneflow} MiB"
)
if ddp and not test_oneflow:
import torch.distributed as dist
dist.destroy_process_group()
return time_per_run_ms
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("model_path", type=str)
parser.add_argument("module_name", type=str)
parser.add_argument("input_shape", type=str)
parser.add_argument("--times", type=int, default=DEFAULT_TIMES)
parser.add_argument("--disable-backward", action="store_true")
parser.add_argument("--no-verbose", action="store_true")
parser.add_argument("--ddp", action="store_true")
parser.add_argument("--ddp-no-broadcast-buffers", action="store_true")
parser.add_argument("--only-oneflow", action="store_true")
parser.add_argument("--only-pytorch", action="store_true")
parser.add_argument("--no-show-memory", action="store_true")
args = parser.parse_args()
input_shape = list(map(int, args.input_shape.split("x")))
global test_oneflow
if not args.only_pytorch:
test_oneflow = True
oneflow_time = test(
args.model_path,
args.module_name,
input_shape,
disable_backward=args.disable_backward,
times=args.times,
no_verbose=args.no_verbose,
ddp=args.ddp,
ddp_broadcast_buffers=not args.ddp_no_broadcast_buffers,
show_memory=not args.no_show_memory,
)
if not args.only_oneflow:
test_oneflow = False
pytorch_time = test(
args.model_path,
args.module_name,
input_shape,
disable_backward=args.disable_backward,
times=args.times,
no_verbose=args.no_verbose,
ddp=args.ddp,
ddp_broadcast_buffers=not args.ddp_no_broadcast_buffers,
show_memory=not args.no_show_memory,
)
if not args.only_pytorch and not args.only_oneflow:
relative_speed = pytorch_time / oneflow_time
if args.no_verbose:
print_rank_0(f"Relative speed: {relative_speed:.2f}")
else:
print_rank_0(
f"Relative speed: {relative_speed:.2f} (= {pytorch_time:.1f}ms / {oneflow_time:.1f}ms)"
)
| true | true |
f73b41134fa2ad59b6f89c5c8c24a732c2c22fdc | 444 | py | Python | dictol/base.py | ksasi/DICTOL_python | d2ea3f2a2fdb07c76e63d75e11edf9c8b11d9e69 | [
"MIT"
] | 62 | 2017-02-17T13:30:45.000Z | 2022-03-14T04:04:17.000Z | dictol/base.py | ksasi/DICTOL_python | d2ea3f2a2fdb07c76e63d75e11edf9c8b11d9e69 | [
"MIT"
] | 7 | 2019-03-03T00:47:28.000Z | 2022-03-21T09:37:12.000Z | dictol/base.py | ksasi/DICTOL_python | d2ea3f2a2fdb07c76e63d75e11edf9c8b11d9e69 | [
"MIT"
] | 31 | 2017-07-27T01:36:48.000Z | 2022-03-31T09:08:50.000Z | from __future__ import print_function
import numpy as np
class BaseModel(object):
"""
base dictionary learning model for classification
"""
# def __init__(self)
def predict(self, data):
raise NotImplementedError
def evaluate(self, data, label):
pred = self.predict(data)
acc = np.sum(pred == label)/float(len(label))
print('accuracy = {:.2f} %'.format(100 * acc))
return acc
| 22.2 | 54 | 0.628378 | from __future__ import print_function
import numpy as np
class BaseModel(object):
def predict(self, data):
raise NotImplementedError
def evaluate(self, data, label):
pred = self.predict(data)
acc = np.sum(pred == label)/float(len(label))
print('accuracy = {:.2f} %'.format(100 * acc))
return acc
| true | true |
f73b4114abe493c623045d4db9002ec8b758e179 | 362 | py | Python | src/mailing_list/management/commands/create_email_recipients.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 18 | 2021-05-20T13:20:16.000Z | 2022-02-11T02:40:18.000Z | src/mailing_list/management/commands/create_email_recipients.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 109 | 2021-05-21T20:14:23.000Z | 2022-03-31T20:56:10.000Z | src/mailing_list/management/commands/create_email_recipients.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 4 | 2021-05-17T13:47:53.000Z | 2022-02-12T10:48:21.000Z | from django.core.management.base import BaseCommand
from user.models import User
from mailing_list.models import EmailRecipient
class Command(BaseCommand):
def handle(self, *args, **options):
for user in User.objects.filter(emailrecipient__isnull=True, email__isnull=False):
EmailRecipient.objects.create(user=user, email=user.email)
| 30.166667 | 90 | 0.765193 | from django.core.management.base import BaseCommand
from user.models import User
from mailing_list.models import EmailRecipient
class Command(BaseCommand):
def handle(self, *args, **options):
for user in User.objects.filter(emailrecipient__isnull=True, email__isnull=False):
EmailRecipient.objects.create(user=user, email=user.email)
| true | true |
f73b42c9eac942cc2b14af35f4cf34df15fc74c4 | 3,070 | py | Python | src/livecli/cache.py | NghiemTrung/livecli | 6a21b1b144b045963b6d1db8d4d8dc8471b62737 | [
"BSD-2-Clause"
] | 1 | 2019-12-04T11:54:52.000Z | 2019-12-04T11:54:52.000Z | src/livecli/cache.py | NghiemTrung/livecli | 6a21b1b144b045963b6d1db8d4d8dc8471b62737 | [
"BSD-2-Clause"
] | null | null | null | src/livecli/cache.py | NghiemTrung/livecli | 6a21b1b144b045963b6d1db8d4d8dc8471b62737 | [
"BSD-2-Clause"
] | null | null | null | import json
import os
import shutil
import tempfile
from time import time
from .compat import is_win32
try:
import xbmc
import xbmcvfs
is_kodi = True
except ImportError:
is_kodi = False
if is_win32 and not is_kodi:
xdg_cache = os.environ.get("APPDATA",
os.path.expanduser("~"))
elif is_kodi:
xdg_cache = xbmc.translatePath("special://profile/addon_data/script.module.livecli").encode("utf-8")
temp_dir = xbmc.translatePath("special://temp").encode("utf-8")
else:
xdg_cache = os.environ.get("XDG_CACHE_HOME",
os.path.expanduser("~/.cache"))
cache_dir = os.path.join(xdg_cache, "livecli")
if is_kodi:
# Kodi - script.module.livecli
temp_livecli = os.path.join(temp_dir, "script.module.livecli")
if not xbmcvfs.exists(cache_dir):
xbmcvfs.mkdirs(cache_dir)
if not xbmcvfs.exists(temp_livecli):
xbmcvfs.mkdirs(temp_livecli)
class Cache(object):
"""Caches Python values as JSON and prunes expired entries."""
def __init__(self, filename, key_prefix=""):
self.key_prefix = key_prefix
self.filename = os.path.join(cache_dir, filename)
self._cache = {}
def _load(self):
if os.path.exists(self.filename):
try:
with open(self.filename, "r") as fd:
self._cache = json.load(fd)
except Exception:
self._cache = {}
else:
self._cache = {}
def _prune(self):
now = time()
pruned = []
for key, value in self._cache.items():
expires = value.get("expires", time())
if expires <= now:
pruned.append(key)
for key in pruned:
self._cache.pop(key, None)
return len(pruned) > 0
def _save(self):
if is_kodi:
fd, tempname = tempfile.mkstemp(dir=temp_livecli)
else:
fd, tempname = tempfile.mkstemp()
fd = os.fdopen(fd, "w")
json.dump(self._cache, fd, indent=2, separators=(",", ": "))
fd.close()
# Silently ignore errors
try:
if not os.path.exists(os.path.dirname(self.filename)):
os.makedirs(os.path.dirname(self.filename))
shutil.move(tempname, self.filename)
except (IOError, OSError):
os.remove(tempname)
def set(self, key, value, expires=60 * 60 * 24 * 7):
self._load()
self._prune()
if self.key_prefix:
key = "{0}:{1}".format(self.key_prefix, key)
expires += time()
self._cache[key] = dict(value=value, expires=expires)
self._save()
def get(self, key, default=None):
self._load()
if self._prune():
self._save()
if self.key_prefix:
key = "{0}:{1}".format(self.key_prefix, key)
if key in self._cache and "value" in self._cache[key]:
return self._cache[key]["value"]
else:
return default
__all__ = ["Cache"]
| 26.465517 | 104 | 0.56873 | import json
import os
import shutil
import tempfile
from time import time
from .compat import is_win32
try:
import xbmc
import xbmcvfs
is_kodi = True
except ImportError:
is_kodi = False
if is_win32 and not is_kodi:
xdg_cache = os.environ.get("APPDATA",
os.path.expanduser("~"))
elif is_kodi:
xdg_cache = xbmc.translatePath("special://profile/addon_data/script.module.livecli").encode("utf-8")
temp_dir = xbmc.translatePath("special://temp").encode("utf-8")
else:
xdg_cache = os.environ.get("XDG_CACHE_HOME",
os.path.expanduser("~/.cache"))
cache_dir = os.path.join(xdg_cache, "livecli")
if is_kodi:
temp_livecli = os.path.join(temp_dir, "script.module.livecli")
if not xbmcvfs.exists(cache_dir):
xbmcvfs.mkdirs(cache_dir)
if not xbmcvfs.exists(temp_livecli):
xbmcvfs.mkdirs(temp_livecli)
class Cache(object):
def __init__(self, filename, key_prefix=""):
self.key_prefix = key_prefix
self.filename = os.path.join(cache_dir, filename)
self._cache = {}
def _load(self):
if os.path.exists(self.filename):
try:
with open(self.filename, "r") as fd:
self._cache = json.load(fd)
except Exception:
self._cache = {}
else:
self._cache = {}
def _prune(self):
now = time()
pruned = []
for key, value in self._cache.items():
expires = value.get("expires", time())
if expires <= now:
pruned.append(key)
for key in pruned:
self._cache.pop(key, None)
return len(pruned) > 0
def _save(self):
if is_kodi:
fd, tempname = tempfile.mkstemp(dir=temp_livecli)
else:
fd, tempname = tempfile.mkstemp()
fd = os.fdopen(fd, "w")
json.dump(self._cache, fd, indent=2, separators=(",", ": "))
fd.close()
try:
if not os.path.exists(os.path.dirname(self.filename)):
os.makedirs(os.path.dirname(self.filename))
shutil.move(tempname, self.filename)
except (IOError, OSError):
os.remove(tempname)
def set(self, key, value, expires=60 * 60 * 24 * 7):
self._load()
self._prune()
if self.key_prefix:
key = "{0}:{1}".format(self.key_prefix, key)
expires += time()
self._cache[key] = dict(value=value, expires=expires)
self._save()
def get(self, key, default=None):
self._load()
if self._prune():
self._save()
if self.key_prefix:
key = "{0}:{1}".format(self.key_prefix, key)
if key in self._cache and "value" in self._cache[key]:
return self._cache[key]["value"]
else:
return default
__all__ = ["Cache"]
| true | true |
f73b4384882e3c3e76ca54a7bef8e31c44db8bd4 | 586 | py | Python | simpleticket/urls.py | jalvinronnie/SU-Portal | 7c881cd13e32eaccc1095095324b7ff084fbf403 | [
"MIT"
] | null | null | null | simpleticket/urls.py | jalvinronnie/SU-Portal | 7c881cd13e32eaccc1095095324b7ff084fbf403 | [
"MIT"
] | 1 | 2020-06-05T22:09:04.000Z | 2020-06-05T22:09:04.000Z | simpleticket/urls.py | jalvinronnie/SU-Portal | 7c881cd13e32eaccc1095095324b7ff084fbf403 | [
"MIT"
] | 1 | 2019-07-28T08:59:26.000Z | 2019-07-28T08:59:26.000Z | from django.urls import path
from simpleticket import views
urlpatterns = [
path('', views.view_all),
path('view/<int:ticket_id>/', views.view),
path('new/', views.create),
path('submit_ticket/', views.submit_ticket),
path('update/<int:ticket_id>/', views.update),
path('update_ticket/<int:ticket_id>/', views.update_ticket),
path('submit_comment/<int:ticket_id>/', views.submit_comment),
path('delete/<int:ticket_id>/', views.delete_ticket),
path('delete_comment/<int:comment_id>/', views.delete_comment),
# path('project/', views.project),
]
| 34.470588 | 67 | 0.687713 | from django.urls import path
from simpleticket import views
urlpatterns = [
path('', views.view_all),
path('view/<int:ticket_id>/', views.view),
path('new/', views.create),
path('submit_ticket/', views.submit_ticket),
path('update/<int:ticket_id>/', views.update),
path('update_ticket/<int:ticket_id>/', views.update_ticket),
path('submit_comment/<int:ticket_id>/', views.submit_comment),
path('delete/<int:ticket_id>/', views.delete_ticket),
path('delete_comment/<int:comment_id>/', views.delete_comment),
]
| true | true |
f73b455a7ce132aa0f3b300ddb962738610ca1f9 | 14,157 | py | Python | vulnerabilities/tests/test_api.py | keshav-space/vulnerablecode | 8ccb2026c084c42d075c629fe182feaa22c07830 | [
"Apache-2.0"
] | null | null | null | vulnerabilities/tests/test_api.py | keshav-space/vulnerablecode | 8ccb2026c084c42d075c629fe182feaa22c07830 | [
"Apache-2.0"
] | null | null | null | vulnerabilities/tests/test_api.py | keshav-space/vulnerablecode | 8ccb2026c084c42d075c629fe182feaa22c07830 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/vulnerablecode/
# The VulnerableCode software is licensed under the Apache License version 2.0.
# Data generated with VulnerableCode require an acknowledgment.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with VulnerableCode or any VulnerableCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with VulnerableCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# VulnerableCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# VulnerableCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/vulnerablecode/ for support and download.
import os
from collections import OrderedDict
from random import choices
from unittest.mock import MagicMock
from urllib.parse import quote
from django.test import TestCase
from django.test.client import RequestFactory
from rest_framework.test import APIClient
from rest_framework.test import APIRequestFactory
from vulnerabilities.api import PackageSerializer
from vulnerabilities.models import Package
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA = os.path.join(BASE_DIR, "test_data/")
def cleaned_response(response):
"""
Return a cleaned response suitable for comparison in tests in particular:
- sort lists with a stable order
"""
cleaned_response = []
response_copy = sorted(response, key=lambda x: x.get("purl", ""))
for package_data in response_copy:
package_data["unresolved_vulnerabilities"] = sorted(
package_data["unresolved_vulnerabilities"], key=lambda x: x["vulnerability_id"]
)
for index, vulnerability in enumerate(package_data["unresolved_vulnerabilities"]):
package_data["unresolved_vulnerabilities"][index]["references"] = sorted(
vulnerability["references"], key=lambda x: (x["reference_id"], x["url"])
)
for index2, reference in enumerate(
package_data["unresolved_vulnerabilities"][index]["references"]
):
reference["scores"] = sorted(
reference["scores"], key=lambda x: (x["value"], x["scoring_system"])
)
package_data["unresolved_vulnerabilities"][index]["references"][index2][
"scores"
] = reference["scores"]
package_data["resolved_vulnerabilities"] = sorted(
package_data["resolved_vulnerabilities"], key=lambda x: x["vulnerability_id"]
)
for index, vulnerability in enumerate(package_data["resolved_vulnerabilities"]):
package_data["resolved_vulnerabilities"][index]["references"] = sorted(
vulnerability["references"], key=lambda x: (x["reference_id"], x["url"])
)
for index2, reference in enumerate(
package_data["resolved_vulnerabilities"][index]["references"]
):
reference["scores"] = sorted(
reference["scores"], key=lambda x: (x["value"], x["scoring_system"])
)
package_data["resolved_vulnerabilities"][index]["references"][index2][
"scores"
] = reference["scores"]
cleaned_response.append(package_data)
return cleaned_response
class TestDebianResponse(TestCase):
fixtures = ["debian.json"]
@classmethod
def setUpTestData(cls):
# create one non-debian package called "mimetex" to verify filtering
Package.objects.create(name="mimetex", version="1.50-1.1", type="deb", namespace="ubuntu")
def test_query_qualifier_filtering(self):
# packages to check filtering with single/multiple and unordered qualifier filtering
pk_multi_qf = Package.objects.create(
name="vlc", version="1.50-1.1", type="deb", qualifiers={"foo": "bar", "tar": "ball"}
)
pk_single_qf = Package.objects.create(
name="vlc", version="1.50-1.1", type="deb", qualifiers={"foo": "bar"}
)
# check filtering when qualifiers are not normalized
test_purl = quote("pkg:deb/vlc@1.50-1.1?foo=bar&tar=ball")
response = self.client.get(f"/api/packages/?purl={test_purl}", format="json").data
self.assertEqual(1, response["count"])
self.assertEqual(pk_multi_qf.qualifiers, response["results"][0]["qualifiers"])
test_purl = quote("pkg:deb/vlc@1.50-1.1?tar=ball&foo=bar")
response = self.client.get(f"/api/packages/?purl={test_purl}", format="json").data
self.assertEqual(1, response["count"])
self.assertEqual(pk_multi_qf.qualifiers, response["results"][0]["qualifiers"])
# check filtering when there is intersection of qualifiers between packages
test_purl = quote("pkg:deb/vlc@1.50-1.1?foo=bar")
response = self.client.get(f"/api/packages/?purl={test_purl}", format="json").data
self.assertEqual(1, response["count"])
def test_query_by_name(self):
response = self.client.get("/api/packages/?name=mimetex", format="json").data
self.assertEqual(3, response["count"])
first_result = response["results"][0]
self.assertEqual("mimetex", first_result["name"])
versions = {r["version"] for r in response["results"]}
self.assertIn("1.50-1.1", versions)
self.assertIn("1.74-1", versions)
purls = {r["purl"] for r in response["results"]}
self.assertIn("pkg:deb/debian/mimetex@1.50-1.1?distro=jessie", purls)
self.assertIn("pkg:deb/debian/mimetex@1.74-1?distro=jessie", purls)
def test_query_by_invalid_package_url(self):
url = "/api/packages/?purl=invalid_purl"
response = self.client.get(url, format="json")
self.assertEqual(400, response.status_code)
self.assertIn("error", response.data)
error = response.data["error"]
self.assertIn("invalid_purl", error)
def test_query_by_package_url(self):
url = "/api/packages/?purl=pkg:deb/debian/mimetex@1.50-1.1?distro=jessie"
response = self.client.get(url, format="json").data
self.assertEqual(1, response["count"])
first_result = response["results"][0]
self.assertEqual("mimetex", first_result["name"])
versions = {r["version"] for r in response["results"]}
self.assertIn("1.50-1.1", versions)
self.assertNotIn("1.74-1", versions)
def test_query_by_package_url_without_namespace(self):
url = "/api/packages/?purl=pkg:deb/mimetex@1.50-1.1"
response = self.client.get(url, format="json").data
self.assertEqual(2, response["count"])
first_result = response["results"][0]
self.assertEqual("mimetex", first_result["name"])
purls = {r["purl"] for r in response["results"]}
self.assertIn("pkg:deb/debian/mimetex@1.50-1.1?distro=jessie", purls)
self.assertIn("pkg:deb/ubuntu/mimetex@1.50-1.1", purls)
class APIResponseRelations(TestCase):
fixtures = ["openssl.json"]
def test_vulnerability_package_relations(self):
test_pkgs = choices(Package.objects.all(), k=5)
for test_pkg in test_pkgs:
pkg_response = self.client.get(f"/api/packages/{test_pkg.id}/", format="json").data
resolved_vulns = {
vuln["vulnerability_id"] for vuln in pkg_response["resolved_vulnerabilities"]
}
unresolved_vulns = {
vuln["vulnerability_id"] for vuln in pkg_response["unresolved_vulnerabilities"]
}
for vuln in resolved_vulns:
vuln_resp = self.client.get(
f"/api/vulnerabilities/?vulnerability_id={vuln}", format="json"
).data
if not vuln_resp["results"]:
continue
resolved_purls = {
package["purl"] for package in vuln_resp["results"][0]["resolved_packages"]
}
self.assertIn(test_pkg.package_url, resolved_purls)
for vuln in unresolved_vulns:
vuln_resp = self.client.get(
f"/api/vulnerabilities/?vulnerability_id={vuln}", format="json"
).data
if not vuln_resp["results"]:
continue
unresolved_purls = {
package["purl"] for package in vuln_resp["results"][0]["unresolved_packages"]
}
self.assertIn(test_pkg.package_url, unresolved_purls)
class TestSerializers(TestCase):
fixtures = ["debian.json"]
def test_package_serializer(self):
pk = Package.objects.filter(name="mimetex")
mock_request = RequestFactory().get("/api")
response = PackageSerializer(pk, many=True, context={"request": mock_request}).data
self.assertEqual(2, len(response))
first_result = response[0]
self.assertEqual("mimetex", first_result["name"])
versions = {r["version"] for r in response}
self.assertIn("1.50-1.1", versions)
self.assertIn("1.74-1", versions)
purls = {r["purl"] for r in response}
self.assertIn("pkg:deb/debian/mimetex@1.50-1.1?distro=jessie", purls)
self.assertIn("pkg:deb/debian/mimetex@1.74-1?distro=jessie", purls)
class TestBulkAPIResponse(TestCase):
fixtures = ["github.json"]
def test_bulk_packages_api(self):
request_body = {
"purls": [
"pkg:deb/debian/doesnotexist@0.9.7-10?distro=jessie",
"pkg:maven/com.datadoghq/datadog-api-client@1.0.0-beta.7",
]
}
response = self.client.post(
"/api/packages/bulk_search/",
data=request_body,
content_type="application/json",
).json()
expected_response = [
{
"name": "doesnotexist",
"namespace": "debian",
"purl": "pkg:deb/debian/doesnotexist@0.9.7-10?distro=jessie",
"qualifiers": {"distro": "jessie"},
"resolved_vulnerabilities": [],
"subpath": None,
"type": "deb",
"unresolved_vulnerabilities": [],
"version": "0.9.7-10",
},
{
"name": "datadog-api-client",
"namespace": "com.datadoghq",
"purl": "pkg:maven/com.datadoghq/datadog-api-client@1.0.0-beta.7",
"qualifiers": {},
"resolved_vulnerabilities": [],
"subpath": "",
"type": "maven",
"unresolved_vulnerabilities": [
{
"references": [
{
"reference_id": "",
"scores": [],
"url": "https://nvd.nist.gov/vuln/detail/CVE-2021-21331",
},
{
"reference_id": "GHSA-2cxf-6567-7pp6",
"scores": [{"scoring_system": "cvssv3.1_qr", "value": "LOW"}],
"url": "https://github.com/DataDog/datadog-api-client-java/security/advisories/GHSA-2cxf-6567-7pp6",
},
{
"reference_id": "GHSA-2cxf-6567-7pp6",
"scores": [],
"url": "https://github.com/advisories/GHSA-2cxf-6567-7pp6",
},
],
"summary": "Local Information Disclosure " "Vulnerability",
"url": "http://testserver/api/vulnerabilities/60",
"vulnerability_id": "CVE-2021-21331",
}
],
"url": "http://testserver/api/packages/3467",
"version": "1.0.0-beta.7",
},
]
assert cleaned_response(expected_response) == cleaned_response(response)
def test_invalid_request_bulk_packages(self):
error_response = {
"Error": "A non-empty 'purls' list of package URLs is required." # nopep8
}
invalid_key_request_data = {"pkg": []}
response = self.client.post(
"/api/packages/bulk_search/",
data=invalid_key_request_data,
content_type="application/json",
).data
assert response == error_response
valid_key_invalid_datatype_request_data = {"packages": {}}
response = self.client.post(
"/api/packages/bulk_search/",
data=valid_key_invalid_datatype_request_data,
content_type="application/json",
).data
assert response == error_response
invalid_purl_request_data = {
"purls": [
"pkg:deb/debian/librsync@0.9.7-10?distro=jessie",
"pg:deb/debian/mimetex@1.50-1.1?distro=jessie",
]
}
response = self.client.post(
"/api/packages/bulk_search/",
data=invalid_purl_request_data,
content_type="application/json",
).data
purl_error_respones = {
"Error": "Invalid Package URL: pg:deb/debian/mimetex@1.50-1.1?distro=jessie"
}
assert response == purl_error_respones
| 41.034783 | 132 | 0.594335 |
import os
from collections import OrderedDict
from random import choices
from unittest.mock import MagicMock
from urllib.parse import quote
from django.test import TestCase
from django.test.client import RequestFactory
from rest_framework.test import APIClient
from rest_framework.test import APIRequestFactory
from vulnerabilities.api import PackageSerializer
from vulnerabilities.models import Package
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA = os.path.join(BASE_DIR, "test_data/")
def cleaned_response(response):
cleaned_response = []
response_copy = sorted(response, key=lambda x: x.get("purl", ""))
for package_data in response_copy:
package_data["unresolved_vulnerabilities"] = sorted(
package_data["unresolved_vulnerabilities"], key=lambda x: x["vulnerability_id"]
)
for index, vulnerability in enumerate(package_data["unresolved_vulnerabilities"]):
package_data["unresolved_vulnerabilities"][index]["references"] = sorted(
vulnerability["references"], key=lambda x: (x["reference_id"], x["url"])
)
for index2, reference in enumerate(
package_data["unresolved_vulnerabilities"][index]["references"]
):
reference["scores"] = sorted(
reference["scores"], key=lambda x: (x["value"], x["scoring_system"])
)
package_data["unresolved_vulnerabilities"][index]["references"][index2][
"scores"
] = reference["scores"]
package_data["resolved_vulnerabilities"] = sorted(
package_data["resolved_vulnerabilities"], key=lambda x: x["vulnerability_id"]
)
for index, vulnerability in enumerate(package_data["resolved_vulnerabilities"]):
package_data["resolved_vulnerabilities"][index]["references"] = sorted(
vulnerability["references"], key=lambda x: (x["reference_id"], x["url"])
)
for index2, reference in enumerate(
package_data["resolved_vulnerabilities"][index]["references"]
):
reference["scores"] = sorted(
reference["scores"], key=lambda x: (x["value"], x["scoring_system"])
)
package_data["resolved_vulnerabilities"][index]["references"][index2][
"scores"
] = reference["scores"]
cleaned_response.append(package_data)
return cleaned_response
class TestDebianResponse(TestCase):
fixtures = ["debian.json"]
@classmethod
def setUpTestData(cls):
Package.objects.create(name="mimetex", version="1.50-1.1", type="deb", namespace="ubuntu")
def test_query_qualifier_filtering(self):
pk_multi_qf = Package.objects.create(
name="vlc", version="1.50-1.1", type="deb", qualifiers={"foo": "bar", "tar": "ball"}
)
pk_single_qf = Package.objects.create(
name="vlc", version="1.50-1.1", type="deb", qualifiers={"foo": "bar"}
)
test_purl = quote("pkg:deb/vlc@1.50-1.1?foo=bar&tar=ball")
response = self.client.get(f"/api/packages/?purl={test_purl}", format="json").data
self.assertEqual(1, response["count"])
self.assertEqual(pk_multi_qf.qualifiers, response["results"][0]["qualifiers"])
test_purl = quote("pkg:deb/vlc@1.50-1.1?tar=ball&foo=bar")
response = self.client.get(f"/api/packages/?purl={test_purl}", format="json").data
self.assertEqual(1, response["count"])
self.assertEqual(pk_multi_qf.qualifiers, response["results"][0]["qualifiers"])
test_purl = quote("pkg:deb/vlc@1.50-1.1?foo=bar")
response = self.client.get(f"/api/packages/?purl={test_purl}", format="json").data
self.assertEqual(1, response["count"])
def test_query_by_name(self):
response = self.client.get("/api/packages/?name=mimetex", format="json").data
self.assertEqual(3, response["count"])
first_result = response["results"][0]
self.assertEqual("mimetex", first_result["name"])
versions = {r["version"] for r in response["results"]}
self.assertIn("1.50-1.1", versions)
self.assertIn("1.74-1", versions)
purls = {r["purl"] for r in response["results"]}
self.assertIn("pkg:deb/debian/mimetex@1.50-1.1?distro=jessie", purls)
self.assertIn("pkg:deb/debian/mimetex@1.74-1?distro=jessie", purls)
def test_query_by_invalid_package_url(self):
url = "/api/packages/?purl=invalid_purl"
response = self.client.get(url, format="json")
self.assertEqual(400, response.status_code)
self.assertIn("error", response.data)
error = response.data["error"]
self.assertIn("invalid_purl", error)
def test_query_by_package_url(self):
url = "/api/packages/?purl=pkg:deb/debian/mimetex@1.50-1.1?distro=jessie"
response = self.client.get(url, format="json").data
self.assertEqual(1, response["count"])
first_result = response["results"][0]
self.assertEqual("mimetex", first_result["name"])
versions = {r["version"] for r in response["results"]}
self.assertIn("1.50-1.1", versions)
self.assertNotIn("1.74-1", versions)
def test_query_by_package_url_without_namespace(self):
url = "/api/packages/?purl=pkg:deb/mimetex@1.50-1.1"
response = self.client.get(url, format="json").data
self.assertEqual(2, response["count"])
first_result = response["results"][0]
self.assertEqual("mimetex", first_result["name"])
purls = {r["purl"] for r in response["results"]}
self.assertIn("pkg:deb/debian/mimetex@1.50-1.1?distro=jessie", purls)
self.assertIn("pkg:deb/ubuntu/mimetex@1.50-1.1", purls)
class APIResponseRelations(TestCase):
fixtures = ["openssl.json"]
def test_vulnerability_package_relations(self):
test_pkgs = choices(Package.objects.all(), k=5)
for test_pkg in test_pkgs:
pkg_response = self.client.get(f"/api/packages/{test_pkg.id}/", format="json").data
resolved_vulns = {
vuln["vulnerability_id"] for vuln in pkg_response["resolved_vulnerabilities"]
}
unresolved_vulns = {
vuln["vulnerability_id"] for vuln in pkg_response["unresolved_vulnerabilities"]
}
for vuln in resolved_vulns:
vuln_resp = self.client.get(
f"/api/vulnerabilities/?vulnerability_id={vuln}", format="json"
).data
if not vuln_resp["results"]:
continue
resolved_purls = {
package["purl"] for package in vuln_resp["results"][0]["resolved_packages"]
}
self.assertIn(test_pkg.package_url, resolved_purls)
for vuln in unresolved_vulns:
vuln_resp = self.client.get(
f"/api/vulnerabilities/?vulnerability_id={vuln}", format="json"
).data
if not vuln_resp["results"]:
continue
unresolved_purls = {
package["purl"] for package in vuln_resp["results"][0]["unresolved_packages"]
}
self.assertIn(test_pkg.package_url, unresolved_purls)
class TestSerializers(TestCase):
fixtures = ["debian.json"]
def test_package_serializer(self):
pk = Package.objects.filter(name="mimetex")
mock_request = RequestFactory().get("/api")
response = PackageSerializer(pk, many=True, context={"request": mock_request}).data
self.assertEqual(2, len(response))
first_result = response[0]
self.assertEqual("mimetex", first_result["name"])
versions = {r["version"] for r in response}
self.assertIn("1.50-1.1", versions)
self.assertIn("1.74-1", versions)
purls = {r["purl"] for r in response}
self.assertIn("pkg:deb/debian/mimetex@1.50-1.1?distro=jessie", purls)
self.assertIn("pkg:deb/debian/mimetex@1.74-1?distro=jessie", purls)
class TestBulkAPIResponse(TestCase):
fixtures = ["github.json"]
def test_bulk_packages_api(self):
request_body = {
"purls": [
"pkg:deb/debian/doesnotexist@0.9.7-10?distro=jessie",
"pkg:maven/com.datadoghq/datadog-api-client@1.0.0-beta.7",
]
}
response = self.client.post(
"/api/packages/bulk_search/",
data=request_body,
content_type="application/json",
).json()
expected_response = [
{
"name": "doesnotexist",
"namespace": "debian",
"purl": "pkg:deb/debian/doesnotexist@0.9.7-10?distro=jessie",
"qualifiers": {"distro": "jessie"},
"resolved_vulnerabilities": [],
"subpath": None,
"type": "deb",
"unresolved_vulnerabilities": [],
"version": "0.9.7-10",
},
{
"name": "datadog-api-client",
"namespace": "com.datadoghq",
"purl": "pkg:maven/com.datadoghq/datadog-api-client@1.0.0-beta.7",
"qualifiers": {},
"resolved_vulnerabilities": [],
"subpath": "",
"type": "maven",
"unresolved_vulnerabilities": [
{
"references": [
{
"reference_id": "",
"scores": [],
"url": "https://nvd.nist.gov/vuln/detail/CVE-2021-21331",
},
{
"reference_id": "GHSA-2cxf-6567-7pp6",
"scores": [{"scoring_system": "cvssv3.1_qr", "value": "LOW"}],
"url": "https://github.com/DataDog/datadog-api-client-java/security/advisories/GHSA-2cxf-6567-7pp6",
},
{
"reference_id": "GHSA-2cxf-6567-7pp6",
"scores": [],
"url": "https://github.com/advisories/GHSA-2cxf-6567-7pp6",
},
],
"summary": "Local Information Disclosure " "Vulnerability",
"url": "http://testserver/api/vulnerabilities/60",
"vulnerability_id": "CVE-2021-21331",
}
],
"url": "http://testserver/api/packages/3467",
"version": "1.0.0-beta.7",
},
]
assert cleaned_response(expected_response) == cleaned_response(response)
def test_invalid_request_bulk_packages(self):
error_response = {
"Error": "A non-empty 'purls' list of package URLs is required."
}
invalid_key_request_data = {"pkg": []}
response = self.client.post(
"/api/packages/bulk_search/",
data=invalid_key_request_data,
content_type="application/json",
).data
assert response == error_response
valid_key_invalid_datatype_request_data = {"packages": {}}
response = self.client.post(
"/api/packages/bulk_search/",
data=valid_key_invalid_datatype_request_data,
content_type="application/json",
).data
assert response == error_response
invalid_purl_request_data = {
"purls": [
"pkg:deb/debian/librsync@0.9.7-10?distro=jessie",
"pg:deb/debian/mimetex@1.50-1.1?distro=jessie",
]
}
response = self.client.post(
"/api/packages/bulk_search/",
data=invalid_purl_request_data,
content_type="application/json",
).data
purl_error_respones = {
"Error": "Invalid Package URL: pg:deb/debian/mimetex@1.50-1.1?distro=jessie"
}
assert response == purl_error_respones
| true | true |
f73b45ab3dcd1386e68fd473096beef00d83178e | 801 | py | Python | Codes/gracekoo/401_binary-watch.py | liuxiaohui1221/algorithm | d80e64185ceb4798ac5389bfbd226dc1d406f6b5 | [
"Apache-2.0"
] | 256 | 2017-10-25T13:02:15.000Z | 2022-02-25T13:47:59.000Z | Codes/gracekoo/401_binary-watch.py | liuxiaohui1221/algorithm | d80e64185ceb4798ac5389bfbd226dc1d406f6b5 | [
"Apache-2.0"
] | 56 | 2017-10-27T01:34:20.000Z | 2022-03-01T00:20:55.000Z | Codes/gracekoo/401_binary-watch.py | liuxiaohui1221/algorithm | d80e64185ceb4798ac5389bfbd226dc1d406f6b5 | [
"Apache-2.0"
] | 83 | 2017-10-25T12:51:53.000Z | 2022-02-15T08:27:03.000Z | # -*- coding: utf-8 -*-
# @Time: 2020/4/20 12:38
# @Author: GraceKoo
# @File: 401_binary-watch.py
# @Desc:https://leetcode-cn.com/problems/binary-watch/
from typing import List
class Solution:
def readBinaryWatch(self, num: int) -> List[str]:
def count_binary_1(i):
return bin(i).count("1")
# 1~59分钟转换成二进制里面分别有多少1
dict_binary = {i: count_binary_1(i) for i in range(60)}
res = []
for h in range(12):
for m in range(60):
# hour 与 minutes 亮灯的两者之和与num相等
if dict_binary[h] + dict_binary[m] == num:
hour = str(h)
m = str(m) if m > 9 else "0" + str(m)
res.append(hour + ":" + m)
return res
so = Solution()
print(so.readBinaryWatch(1))
| 27.62069 | 63 | 0.533084 |
from typing import List
class Solution:
def readBinaryWatch(self, num: int) -> List[str]:
def count_binary_1(i):
return bin(i).count("1")
dict_binary = {i: count_binary_1(i) for i in range(60)}
res = []
for h in range(12):
for m in range(60):
if dict_binary[h] + dict_binary[m] == num:
hour = str(h)
m = str(m) if m > 9 else "0" + str(m)
res.append(hour + ":" + m)
return res
so = Solution()
print(so.readBinaryWatch(1))
| true | true |
f73b45f07187cda26b47f1f1bb05b9510b5120ea | 25,069 | py | Python | tags/commands.py | zephyrkul/phen-cogs | 656cbab522ed085d70850da1a066c170cbbe92ed | [
"MIT"
] | null | null | null | tags/commands.py | zephyrkul/phen-cogs | 656cbab522ed085d70850da1a066c170cbbe92ed | [
"MIT"
] | null | null | null | tags/commands.py | zephyrkul/phen-cogs | 656cbab522ed085d70850da1a066c170cbbe92ed | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2020-2021 phenom4n4n
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import asyncio
import logging
import re
import time
import types
from typing import Dict, List, Optional, Set, Union
from urllib.parse import quote_plus
import bs4
import discord
import TagScriptEngine as tse
from redbot.core import commands
from redbot.core.config import Config
from redbot.core.utils import AsyncIter
from redbot.core.utils.chat_formatting import humanize_list, inline, pagify
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu, start_adding_reactions
from redbot.core.utils.predicates import MessagePredicate, ReactionPredicate
from .abc import MixinMeta
from .blocks import ContextVariableBlock, ConverterBlock
from .converters import (
GlobalTagConverter,
GuildTagConverter,
TagConverter,
TagName,
TagScriptConverter,
)
from .errors import TagFeedbackError
from .objects import Tag
TAG_GUILD_LIMIT = 250
TAG_GLOBAL_LIMIT = 250
TAG_RE = re.compile(r"(?i)(\[p\])?\btag'?s?\b")
DOCS_URL = "https://phen-cogs.readthedocs.io/en/latest/"
log = logging.getLogger("red.phenom4n4n.tags.commands")
def _sub(match: re.Match) -> str:
if match.group(1):
return "[p]tag global"
repl = "global "
name = match.group(0)
repl += name
if name.istitle():
repl = repl.title()
return repl
def copy_doc(original: Union[commands.Command, types.FunctionType]):
def decorator(overriden: Union[commands.Command, types.FunctionType]):
doc = original.help if isinstance(original, commands.Command) else original.__doc__
doc = TAG_RE.sub(_sub, doc)
if isinstance(overriden, commands.Command):
overriden._help_override = doc
else:
overriden.__doc__ = doc
return overriden
return decorator
class Commands(MixinMeta):
def __init__(self):
self.custom_command_engine = tse.Interpreter([ContextVariableBlock(), ConverterBlock()])
super().__init__()
@staticmethod
def generate_tag_list(tags: Set[Tag]) -> Dict[str, List[str]]:
aliases = []
description = []
for tag in tags:
aliases.extend(tag.aliases)
tagscript = tag.tagscript.replace("\n", " ")
if len(tagscript) > 23:
tagscript = tagscript[:20] + "..."
tagscript = discord.utils.escape_markdown(tagscript)
description.append(f"`{tag}` - {tagscript}")
return {"aliases": aliases, "description": description}
@commands.command(usage="<tag_name> [args]")
async def invoketag(
self,
ctx: commands.Context,
response: Optional[bool],
tag_name: str,
*,
args: Optional[str] = "",
):
"""
Manually invoke a tag with its name and arguments.
Restricting this command with permissions in servers will restrict all members from invoking tags.
**Examples:**
`[p]invoketag searchitem trophy`
`[p]invoketag donate`
"""
response = response or True
try:
_tag = await TagConverter(check_global=True).convert(ctx, tag_name)
except commands.BadArgument as e:
if response is True:
await ctx.send(e)
else:
seed = {"args": tse.StringAdapter(args)}
await self.process_tag(ctx, _tag, seed_variables=seed)
@commands.bot_has_permissions(embed_links=True)
@commands.command()
async def tags(self, ctx: commands.Context):
"""
View all tags and aliases.
This command will show global tags if run in DMs.
**Example:**
`[p]tags`
"""
guild = ctx.guild
path = self.guild_tag_cache[guild.id] if guild else self.global_tag_cache
if not path:
return await ctx.send(
"This server has no tags." if guild else "No global tags have been added."
)
tags = path.keys()
title = f"Tags in {guild}" if guild else "Global Tags"
embed = discord.Embed(color=await ctx.embed_color(), title=title)
footer = f"{len(tags)} tags"
embeds = []
description = humanize_list([inline(tag) for tag in tags])
pages = list(pagify(description))
for index, page in enumerate(pages, 1):
e = embed.copy()
e.description = page
e.set_footer(text=f"{index}/{len(pages)} | {footer}")
embeds.append(e)
await menu(ctx, embeds, DEFAULT_CONTROLS)
@commands.guild_only()
@commands.group(aliases=["customcom", "cc", "alias"])
async def tag(self, ctx: commands.Context):
"""
Tag management with TagScript.
These commands use TagScriptEngine.
Read the [TagScript documentation](https://phen-cogs.readthedocs.io/en/latest/) to learn how to use TagScript blocks.
"""
@commands.mod_or_permissions(manage_guild=True)
@tag.command(name="add", aliases=["create", "+"])
async def tag_add(
self,
ctx: commands.Context,
tag_name: TagName(allow_named_tags=True),
*,
tagscript: TagScriptConverter,
):
"""
Add a tag with TagScript.
[Tag usage guide](https://phen-cogs.readthedocs.io/en/latest/blocks.html#usage)
**Example:**
`[p]tag add lawsofmotion {embed(title):Newton's Laws of motion}
{embed(description): According to all known laws of aviation, there is no way a bee should be able to fly.`
"""
await self.create_tag(ctx, tag_name, tagscript)
def validate_tag_count(self, guild: discord.Guild):
tag_count = len(self.get_unique_tags(guild))
if guild:
if tag_count >= TAG_GUILD_LIMIT:
raise TagFeedbackError(
f"This server has reached the limit of **{TAG_GUILD_LIMIT}** tags."
)
else:
if tag_count >= TAG_GLOBAL_LIMIT:
raise TagFeedbackError(
f"You have reached the limit of **{TAG_GLOBAL_LIMIT}** global tags."
)
async def create_tag(
self, ctx: commands.Context, tag_name: str, tagscript: str, *, global_tag: bool = False
):
kwargs = {"author_id": ctx.author.id}
if global_tag:
guild = None
tag = self.get_tag(None, tag_name, global_priority=True)
else:
guild = ctx.guild
tag = self.get_tag(guild, tag_name, check_global=False)
kwargs["guild_id"] = guild.id
self.validate_tag_count(guild)
if tag:
tag_prefix = tag.name_prefix
msg = await ctx.send(
f"`{tag_name}` is already a registered {tag_prefix.lower()}. Would you like to overwrite it?"
)
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
try:
await ctx.bot.wait_for("reaction_add", check=pred, timeout=30)
except asyncio.TimeoutError:
return await ctx.send(f"{tag_prefix} edit cancelled.")
if pred.result is False:
return await ctx.send(f"{tag_prefix} edit cancelled.")
await ctx.send(await tag.edit_tagscript(tagscript))
return
tag = Tag(self, tag_name, tagscript, **kwargs)
await ctx.send(await tag.initialize())
@commands.mod_or_permissions(manage_guild=True)
@tag.command(name="alias")
async def tag_alias(self, ctx: commands.Context, tag: GuildTagConverter, alias: TagName):
"""
Add an alias for a tag.
Adding an alias to the tag will make the tag invokable using the alias or the tag name.
In the example below, running `[p]donation` will invoke the `donate` tag.
**Example:**
`[p]tag alias donate donation`
"""
await ctx.send(await tag.add_alias(alias))
@commands.mod_or_permissions(manage_guild=True)
@tag.command(name="unalias")
async def tag_unalias(
self, ctx: commands.Context, tag: GuildTagConverter, alias: TagName(allow_named_tags=True)
):
"""
Remove an alias for a tag.
The tag will still be able to be used under its original name.
You can delete the original tag with the `[p]tag remove` command.
**Example:**
`tag unalias donate donation`
"""
await ctx.send(await tag.remove_alias(alias))
@commands.mod_or_permissions(manage_guild=True)
@tag.command(name="edit", aliases=["e"])
async def tag_edit(
self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter
):
"""
Edit a tag's TagScript.
The passed tagscript will replace the tag's current tagscript.
View the [TagScript docs](https://phen-cogs.readthedocs.io/en/latest/blocks.html) to find information on how to write valid tagscript.
**Example:**
`[p]tag edit rickroll Never gonna give you up!`
"""
await ctx.send(await tag.edit_tagscript(tagscript))
@commands.mod_or_permissions(manage_guild=True)
@tag.command(name="remove", aliases=["delete", "-"])
async def tag_remove(self, ctx: commands.Context, tag: GuildTagConverter):
"""
Permanently delete a tag.
If you want to remove a tag's alias, use `[p]tag unalias`.
**Example:**
`[p]tag remove RickRoll`
"""
await ctx.send(await tag.delete())
@tag.command(name="info")
async def tag_info(self, ctx: commands.Context, tag: TagConverter):
"""
Show information about a tag.
You can view meta information for a tag on this server or a global tag.
If a tag on this server has the same name as a global tag, it will show the server tag.
**Example:**
`[p]tag info notsupport`
"""
await tag.send_info(ctx)
@tag.command(name="raw")
async def tag_raw(self, ctx: commands.Context, tag: GuildTagConverter):
"""
Get a tag's raw content.
The sent TagScript will be escaped from Discord style formatting characters.
**Example:**
`[p]tag raw noping`
"""
await tag.send_raw_tagscript(ctx)
@tag.command(name="list")
async def tag_list(self, ctx: commands.Context):
"""
View all stored tags on this server.
To view info on a specific tag, use `[p]tag info`.
**Example:**
`[p]tag list`
"""
tags = self.get_unique_tags(ctx.guild)
if not tags:
return await ctx.send("There are no stored tags on this server.")
data = self.generate_tag_list(tags)
aliases = data["aliases"]
description = data["description"]
e = discord.Embed(color=await ctx.embed_color())
e.set_author(name="Stored Tags", icon_url=ctx.guild.icon_url)
embeds = []
pages = list(pagify("\n".join(description)))
footer = f"{len(tags)} tags | {len(aliases)} aliases"
for index, page in enumerate(pages, 1):
embed = e.copy()
embed.description = page
embed.set_footer(text=f"{index}/{len(pages)} | {footer}")
embeds.append(embed)
await menu(ctx, embeds, DEFAULT_CONTROLS)
async def doc_fetch(self):
# from https://github.com/eunwoo1104/slash-bot/blob/8162fd5a0b6ac6c372486438e498a3140b5970bb/modules/sphinx_parser.py#L5
async with self.session.get(f"{DOCS_URL}genindex.html") as response:
text = await response.read()
soup = bs4.BeautifulSoup(text, "html.parser")
self.docs = soup.findAll("a")
async def doc_search(self, keyword: str) -> List[bs4.Tag]:
keyword = keyword.lower()
if not self.docs:
await self.doc_fetch()
return [x for x in self.docs if keyword in str(x).lower()]
@tag.command(name="docs")
async def tag_docs(self, ctx: commands.Context, keyword: str = None):
"""
Search the TagScript documentation for a block.
https://phen-cogs.readthedocs.io/en/latest/
**Example:**
`[p]tag docs embed`
"""
await ctx.trigger_typing()
e = discord.Embed(color=await ctx.embed_color(), title="Tags Documentation")
if keyword:
doc_tags = await self.doc_search(keyword)
description = [f"Search for: `{keyword}`"]
for doc_tag in doc_tags:
href = doc_tag.get("href")
description.append(f"[`{doc_tag.text}`]({DOCS_URL}{href})")
url = f"{DOCS_URL}search.html?q={quote_plus(keyword)}&check_keywords=yes&area=default"
e.url = url
embeds = []
description = "\n".join(description)
for page in pagify(description):
embed = e.copy()
embed.description = page
embeds.append(embed)
await menu(ctx, embeds, DEFAULT_CONTROLS)
else:
e.url = DOCS_URL
await ctx.send(embed=e)
@commands.is_owner()
@tag.command(name="run", aliases=["execute"])
async def tag_run(self, ctx: commands.Context, *, tagscript: str):
"""
Execute TagScript without storing.
The variables and actions fields display debugging information.
**Example:**
`[p]tag run {#:yes,no}`
"""
start = time.monotonic()
seed = self.get_seed_from_context(ctx)
output = self.engine.process(tagscript, seed_variables=seed)
end = time.monotonic()
actions = output.actions
content = output.body[:2000] if output.body else None
await self.send_tag_response(ctx, actions, content)
e = discord.Embed(
color=await ctx.embed_color(),
title="TagScriptEngine",
description=f"Executed in **{round((end - start) * 1000, 3)}** ms",
)
for page in pagify(tagscript, page_length=1024):
e.add_field(name="Input", value=page, inline=False)
if actions:
e.add_field(name="Actions", value=actions, inline=False)
if output.variables:
variables = "\n".join(
f"`{name}`: {type(adapter).__name__}" for name, adapter in output.variables.items()
)
for page in pagify(variables, page_length=1024):
e.add_field(name="Variables", value=page, inline=False)
await ctx.send(embed=e)
@commands.is_owner()
@tag.command(name="process")
async def tag_process(self, ctx: commands.Context, *, tagscript: str):
"""
Process a temporary Tag without storing.
This differs from `[p]tag run` as it creates a fake tag and properly handles actions for all blocks.
The `{args}` block is not supported.
**Example:**
`[p]tag run {require(Admin):You must be admin to use this tag.} Congrats on being an admin!`
"""
tag = Tag(
self,
"processed_tag",
tagscript,
author_id=ctx.author.id,
real=False,
)
await self.process_tag(ctx, tag)
await ctx.tick()
@commands.is_owner()
@tag.group(name="global")
@copy_doc(tag)
async def tag_global(self, ctx: commands.Context):
pass
@tag_global.command(name="add", aliases=["create", "+"])
@copy_doc(tag_add)
async def tag_global_add(
self,
ctx: commands.Context,
tag_name: TagName(global_priority=True),
*,
tagscript: TagScriptConverter,
):
await self.create_tag(ctx, tag_name, tagscript, global_tag=True)
@tag_global.command(name="alias")
@copy_doc(tag_alias)
async def tag_global_alias(
self, ctx: commands.Context, tag: GlobalTagConverter, alias: TagName
):
await ctx.send(await tag.add_alias(alias))
@tag_global.command(name="unalias")
@copy_doc(tag_unalias)
async def tag_global_unalias(
self, ctx: commands.Context, tag: GlobalTagConverter, alias: TagName(allow_named_tags=True)
):
await ctx.send(await tag.remove_alias(alias))
@tag_global.command(name="edit", aliases=["e"])
@copy_doc(tag_edit)
async def tag_global_edit(
self,
ctx: commands.Context,
tag: GlobalTagConverter,
*,
tagscript: TagScriptConverter,
):
await ctx.send(await tag.edit_tagscript(tagscript))
@tag_global.command(name="remove", aliases=["delete", "-"])
@copy_doc(tag_remove)
async def tag_global_remove(self, ctx: commands.Context, tag: GlobalTagConverter):
await ctx.send(await tag.delete())
@tag_global.command(name="raw")
@copy_doc(tag_raw)
async def tag_global_raw(self, ctx: commands.Context, tag: GlobalTagConverter):
await tag.send_raw_tagscript(ctx)
@tag_global.command(name="list")
@copy_doc(tag_list)
async def tag_global_list(self, ctx: commands.Context):
tags = self.get_unique_tags()
if not tags:
return await ctx.send("There are no global tags.")
data = self.generate_tag_list(tags)
aliases = data["aliases"]
description = data["description"]
e = discord.Embed(color=await ctx.embed_color())
e.set_author(name="Global Tags", icon_url=ctx.me.avatar_url)
embeds = []
pages = list(pagify("\n".join(description)))
footer = f"{len(tags)} tags | {len(aliases)} aliases"
for index, page in enumerate(pages, 1):
embed = e.copy()
embed.description = page
embed.set_footer(text=f"{index}/{len(pages)} | {footer}")
embeds.append(embed)
await menu(ctx, embeds, DEFAULT_CONTROLS)
@commands.is_owner()
@commands.command()
async def migratealias(self, ctx: commands.Context):
"""
Migrate the Alias cog's global and server aliases into tags.
This converts all aliases created with the Alias cog into tags with command blocks.
This action cannot be undone.
**Example:**
`[p]migratealias`
"""
await ctx.send(f"Are you sure you want to migrate Alias data to tags? (Y/n)")
pred = MessagePredicate.yes_or_no(ctx)
try:
await self.bot.wait_for("message", check=pred, timeout=30)
except asyncio.TimeoutError:
return await ctx.send("Query timed out, not migrating alias to tags.")
if pred.result is False:
return await ctx.send("Migration cancelled.")
migrated_guilds = 0
migrated_guild_alias = 0
alias_config = Config.get_conf(
None, 8927348724, cog_name="Alias" # core cog doesn't use force_registration=True smh
) # Red can't change these values without breaking data
# so while this is sus it is technically safe to use
alias_config.register_global(entries=[])
all_guild_data: dict = await alias_config.all_guilds()
async for guild_data in AsyncIter(all_guild_data.values(), steps=100):
if not guild_data["entries"]:
continue
migrated_guilds += 1
for alias in guild_data["entries"]:
tagscript = "{c:%s {args}}" % alias["command"]
tag = Tag(
self,
alias["name"],
tagscript,
author_id=alias["creator"],
guild_id=alias["guild"],
uses=alias["uses"],
)
await tag.initialize()
migrated_guild_alias += 1
await ctx.send(
f"Migrated {migrated_guild_alias} aliases from {migrated_guilds} "
"servers to tags. Moving on to global aliases.."
)
migrated_global_alias = 0
async for entry in AsyncIter(await alias_config.entries(), steps=50):
tagscript = "{c:%s {args}}" % entry["command"]
global_tag = Tag(
self,
entry["name"],
tagscript,
author_id=entry["creator"],
uses=entry["uses"],
)
await global_tag.initialize()
migrated_global_alias += 1
await ctx.send(f"Migrated {migrated_global_alias} global aliases to tags.")
def parse_cc_text(self, content: str) -> str:
output = self.custom_command_engine.process(content)
return output.body
def convert_customcommand(self, guild_id: int, name: str, custom_command: dict) -> Tag:
author_id = custom_command.get("author", {"id": None})["id"]
response = custom_command["response"]
if isinstance(response, str):
tagscript = self.parse_cc_text(response)
else:
tag_lines = []
indices = []
for index, response_text in enumerate(response, 1):
script = self.parse_cc_text(response_text)
tag_lines.append("{=(choice.%s):%s}" % (index, script))
indices.append(index)
random_block = "{#:%s}" % ",".join(str(i) for i in indices)
tag_lines.append("{=(chosen):%s}" % random_block)
tag_lines.append("{choice.{chosen}}")
tagscript = "\n".join(tag_lines)
return Tag(self, name, tagscript, guild_id=guild_id, author_id=author_id)
@commands.is_owner()
@commands.command(aliases=["migratecustomcommands"])
async def migratecustomcom(self, ctx: commands.Context):
"""
Migrate the CustomCommand cog's server commands into tags.
This converts all custom commands created into tags with the command text as TagScript.
Randomized commands are converted into random blocks.
Commands with converters are converted into indexed args blocks.
This action cannot be undone.
**Example:**
`[p]migratecustomcom`
"""
await ctx.send(f"Are you sure you want to migrate CustomCommands data to tags? (Y/n)")
pred = MessagePredicate.yes_or_no(ctx)
try:
await self.bot.wait_for("message", check=pred, timeout=30)
except asyncio.TimeoutError:
return await ctx.send("Query timed out, not migrating CustomCommands to tags.")
if pred.result is False:
return await ctx.send("Migration cancelled.")
cc_config = Config.get_conf(None, 414589031223512, cog_name="CustomCommands")
migrated_guilds = 0
migrated_ccs = 0
all_guild_data: dict = await cc_config.all_guilds()
async for guild_id, guild_data in AsyncIter(all_guild_data.items(), steps=100):
if not guild_data["commands"]:
continue
migrated_guilds += 1
for name, command in guild_data["commands"].items():
if not command:
continue # some keys in custom commands config are None instead of being deleted
try:
tag = self.convert_customcommand(guild_id, name, command)
except Exception as exc:
log.exception(
"An exception occured while converting custom command %s (%r) from guild %s"
% (name, command, guild_id),
exc_info=exc,
)
return await ctx.send(
f"An exception occured while converting custom command `{name}` from "
f"server {guild_id}. Check your logs for more details and report this to the cog author."
)
await tag.initialize()
migrated_ccs += 1
await ctx.send(
f"Migrated {migrated_ccs} custom commands from {migrated_guilds} servers to tags."
)
| 36.704246 | 142 | 0.610475 |
import asyncio
import logging
import re
import time
import types
from typing import Dict, List, Optional, Set, Union
from urllib.parse import quote_plus
import bs4
import discord
import TagScriptEngine as tse
from redbot.core import commands
from redbot.core.config import Config
from redbot.core.utils import AsyncIter
from redbot.core.utils.chat_formatting import humanize_list, inline, pagify
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu, start_adding_reactions
from redbot.core.utils.predicates import MessagePredicate, ReactionPredicate
from .abc import MixinMeta
from .blocks import ContextVariableBlock, ConverterBlock
from .converters import (
GlobalTagConverter,
GuildTagConverter,
TagConverter,
TagName,
TagScriptConverter,
)
from .errors import TagFeedbackError
from .objects import Tag
TAG_GUILD_LIMIT = 250
TAG_GLOBAL_LIMIT = 250
TAG_RE = re.compile(r"(?i)(\[p\])?\btag'?s?\b")
DOCS_URL = "https://phen-cogs.readthedocs.io/en/latest/"
log = logging.getLogger("red.phenom4n4n.tags.commands")
def _sub(match: re.Match) -> str:
if match.group(1):
return "[p]tag global"
repl = "global "
name = match.group(0)
repl += name
if name.istitle():
repl = repl.title()
return repl
def copy_doc(original: Union[commands.Command, types.FunctionType]):
def decorator(overriden: Union[commands.Command, types.FunctionType]):
doc = original.help if isinstance(original, commands.Command) else original.__doc__
doc = TAG_RE.sub(_sub, doc)
if isinstance(overriden, commands.Command):
overriden._help_override = doc
else:
overriden.__doc__ = doc
return overriden
return decorator
class Commands(MixinMeta):
def __init__(self):
self.custom_command_engine = tse.Interpreter([ContextVariableBlock(), ConverterBlock()])
super().__init__()
@staticmethod
def generate_tag_list(tags: Set[Tag]) -> Dict[str, List[str]]:
aliases = []
description = []
for tag in tags:
aliases.extend(tag.aliases)
tagscript = tag.tagscript.replace("\n", " ")
if len(tagscript) > 23:
tagscript = tagscript[:20] + "..."
tagscript = discord.utils.escape_markdown(tagscript)
description.append(f"`{tag}` - {tagscript}")
return {"aliases": aliases, "description": description}
@commands.command(usage="<tag_name> [args]")
async def invoketag(
self,
ctx: commands.Context,
response: Optional[bool],
tag_name: str,
*,
args: Optional[str] = "",
):
response = response or True
try:
_tag = await TagConverter(check_global=True).convert(ctx, tag_name)
except commands.BadArgument as e:
if response is True:
await ctx.send(e)
else:
seed = {"args": tse.StringAdapter(args)}
await self.process_tag(ctx, _tag, seed_variables=seed)
@commands.bot_has_permissions(embed_links=True)
@commands.command()
async def tags(self, ctx: commands.Context):
guild = ctx.guild
path = self.guild_tag_cache[guild.id] if guild else self.global_tag_cache
if not path:
return await ctx.send(
"This server has no tags." if guild else "No global tags have been added."
)
tags = path.keys()
title = f"Tags in {guild}" if guild else "Global Tags"
embed = discord.Embed(color=await ctx.embed_color(), title=title)
footer = f"{len(tags)} tags"
embeds = []
description = humanize_list([inline(tag) for tag in tags])
pages = list(pagify(description))
for index, page in enumerate(pages, 1):
e = embed.copy()
e.description = page
e.set_footer(text=f"{index}/{len(pages)} | {footer}")
embeds.append(e)
await menu(ctx, embeds, DEFAULT_CONTROLS)
@commands.guild_only()
@commands.group(aliases=["customcom", "cc", "alias"])
async def tag(self, ctx: commands.Context):
@commands.mod_or_permissions(manage_guild=True)
@tag.command(name="add", aliases=["create", "+"])
async def tag_add(
self,
ctx: commands.Context,
tag_name: TagName(allow_named_tags=True),
*,
tagscript: TagScriptConverter,
):
await self.create_tag(ctx, tag_name, tagscript)
def validate_tag_count(self, guild: discord.Guild):
tag_count = len(self.get_unique_tags(guild))
if guild:
if tag_count >= TAG_GUILD_LIMIT:
raise TagFeedbackError(
f"This server has reached the limit of **{TAG_GUILD_LIMIT}** tags."
)
else:
if tag_count >= TAG_GLOBAL_LIMIT:
raise TagFeedbackError(
f"You have reached the limit of **{TAG_GLOBAL_LIMIT}** global tags."
)
async def create_tag(
self, ctx: commands.Context, tag_name: str, tagscript: str, *, global_tag: bool = False
):
kwargs = {"author_id": ctx.author.id}
if global_tag:
guild = None
tag = self.get_tag(None, tag_name, global_priority=True)
else:
guild = ctx.guild
tag = self.get_tag(guild, tag_name, check_global=False)
kwargs["guild_id"] = guild.id
self.validate_tag_count(guild)
if tag:
tag_prefix = tag.name_prefix
msg = await ctx.send(
f"`{tag_name}` is already a registered {tag_prefix.lower()}. Would you like to overwrite it?"
)
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
try:
await ctx.bot.wait_for("reaction_add", check=pred, timeout=30)
except asyncio.TimeoutError:
return await ctx.send(f"{tag_prefix} edit cancelled.")
if pred.result is False:
return await ctx.send(f"{tag_prefix} edit cancelled.")
await ctx.send(await tag.edit_tagscript(tagscript))
return
tag = Tag(self, tag_name, tagscript, **kwargs)
await ctx.send(await tag.initialize())
@commands.mod_or_permissions(manage_guild=True)
@tag.command(name="alias")
async def tag_alias(self, ctx: commands.Context, tag: GuildTagConverter, alias: TagName):
await ctx.send(await tag.add_alias(alias))
@commands.mod_or_permissions(manage_guild=True)
@tag.command(name="unalias")
async def tag_unalias(
self, ctx: commands.Context, tag: GuildTagConverter, alias: TagName(allow_named_tags=True)
):
await ctx.send(await tag.remove_alias(alias))
@commands.mod_or_permissions(manage_guild=True)
@tag.command(name="edit", aliases=["e"])
async def tag_edit(
self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter
):
await ctx.send(await tag.edit_tagscript(tagscript))
@commands.mod_or_permissions(manage_guild=True)
@tag.command(name="remove", aliases=["delete", "-"])
async def tag_remove(self, ctx: commands.Context, tag: GuildTagConverter):
await ctx.send(await tag.delete())
@tag.command(name="info")
async def tag_info(self, ctx: commands.Context, tag: TagConverter):
await tag.send_info(ctx)
@tag.command(name="raw")
async def tag_raw(self, ctx: commands.Context, tag: GuildTagConverter):
await tag.send_raw_tagscript(ctx)
@tag.command(name="list")
async def tag_list(self, ctx: commands.Context):
tags = self.get_unique_tags(ctx.guild)
if not tags:
return await ctx.send("There are no stored tags on this server.")
data = self.generate_tag_list(tags)
aliases = data["aliases"]
description = data["description"]
e = discord.Embed(color=await ctx.embed_color())
e.set_author(name="Stored Tags", icon_url=ctx.guild.icon_url)
embeds = []
pages = list(pagify("\n".join(description)))
footer = f"{len(tags)} tags | {len(aliases)} aliases"
for index, page in enumerate(pages, 1):
embed = e.copy()
embed.description = page
embed.set_footer(text=f"{index}/{len(pages)} | {footer}")
embeds.append(embed)
await menu(ctx, embeds, DEFAULT_CONTROLS)
async def doc_fetch(self):
# from https://github.com/eunwoo1104/slash-bot/blob/8162fd5a0b6ac6c372486438e498a3140b5970bb/modules/sphinx_parser.py#L5
async with self.session.get(f"{DOCS_URL}genindex.html") as response:
text = await response.read()
soup = bs4.BeautifulSoup(text, "html.parser")
self.docs = soup.findAll("a")
async def doc_search(self, keyword: str) -> List[bs4.Tag]:
keyword = keyword.lower()
if not self.docs:
await self.doc_fetch()
return [x for x in self.docs if keyword in str(x).lower()]
@tag.command(name="docs")
async def tag_docs(self, ctx: commands.Context, keyword: str = None):
await ctx.trigger_typing()
e = discord.Embed(color=await ctx.embed_color(), title="Tags Documentation")
if keyword:
doc_tags = await self.doc_search(keyword)
description = [f"Search for: `{keyword}`"]
for doc_tag in doc_tags:
href = doc_tag.get("href")
description.append(f"[`{doc_tag.text}`]({DOCS_URL}{href})")
url = f"{DOCS_URL}search.html?q={quote_plus(keyword)}&check_keywords=yes&area=default"
e.url = url
embeds = []
description = "\n".join(description)
for page in pagify(description):
embed = e.copy()
embed.description = page
embeds.append(embed)
await menu(ctx, embeds, DEFAULT_CONTROLS)
else:
e.url = DOCS_URL
await ctx.send(embed=e)
@commands.is_owner()
@tag.command(name="run", aliases=["execute"])
async def tag_run(self, ctx: commands.Context, *, tagscript: str):
start = time.monotonic()
seed = self.get_seed_from_context(ctx)
output = self.engine.process(tagscript, seed_variables=seed)
end = time.monotonic()
actions = output.actions
content = output.body[:2000] if output.body else None
await self.send_tag_response(ctx, actions, content)
e = discord.Embed(
color=await ctx.embed_color(),
title="TagScriptEngine",
description=f"Executed in **{round((end - start) * 1000, 3)}** ms",
)
for page in pagify(tagscript, page_length=1024):
e.add_field(name="Input", value=page, inline=False)
if actions:
e.add_field(name="Actions", value=actions, inline=False)
if output.variables:
variables = "\n".join(
f"`{name}`: {type(adapter).__name__}" for name, adapter in output.variables.items()
)
for page in pagify(variables, page_length=1024):
e.add_field(name="Variables", value=page, inline=False)
await ctx.send(embed=e)
@commands.is_owner()
@tag.command(name="process")
async def tag_process(self, ctx: commands.Context, *, tagscript: str):
tag = Tag(
self,
"processed_tag",
tagscript,
author_id=ctx.author.id,
real=False,
)
await self.process_tag(ctx, tag)
await ctx.tick()
@commands.is_owner()
@tag.group(name="global")
@copy_doc(tag)
async def tag_global(self, ctx: commands.Context):
pass
@tag_global.command(name="add", aliases=["create", "+"])
@copy_doc(tag_add)
async def tag_global_add(
self,
ctx: commands.Context,
tag_name: TagName(global_priority=True),
*,
tagscript: TagScriptConverter,
):
await self.create_tag(ctx, tag_name, tagscript, global_tag=True)
@tag_global.command(name="alias")
@copy_doc(tag_alias)
async def tag_global_alias(
self, ctx: commands.Context, tag: GlobalTagConverter, alias: TagName
):
await ctx.send(await tag.add_alias(alias))
@tag_global.command(name="unalias")
@copy_doc(tag_unalias)
async def tag_global_unalias(
self, ctx: commands.Context, tag: GlobalTagConverter, alias: TagName(allow_named_tags=True)
):
await ctx.send(await tag.remove_alias(alias))
@tag_global.command(name="edit", aliases=["e"])
@copy_doc(tag_edit)
async def tag_global_edit(
self,
ctx: commands.Context,
tag: GlobalTagConverter,
*,
tagscript: TagScriptConverter,
):
await ctx.send(await tag.edit_tagscript(tagscript))
@tag_global.command(name="remove", aliases=["delete", "-"])
@copy_doc(tag_remove)
async def tag_global_remove(self, ctx: commands.Context, tag: GlobalTagConverter):
await ctx.send(await tag.delete())
@tag_global.command(name="raw")
@copy_doc(tag_raw)
async def tag_global_raw(self, ctx: commands.Context, tag: GlobalTagConverter):
await tag.send_raw_tagscript(ctx)
@tag_global.command(name="list")
@copy_doc(tag_list)
async def tag_global_list(self, ctx: commands.Context):
tags = self.get_unique_tags()
if not tags:
return await ctx.send("There are no global tags.")
data = self.generate_tag_list(tags)
aliases = data["aliases"]
description = data["description"]
e = discord.Embed(color=await ctx.embed_color())
e.set_author(name="Global Tags", icon_url=ctx.me.avatar_url)
embeds = []
pages = list(pagify("\n".join(description)))
footer = f"{len(tags)} tags | {len(aliases)} aliases"
for index, page in enumerate(pages, 1):
embed = e.copy()
embed.description = page
embed.set_footer(text=f"{index}/{len(pages)} | {footer}")
embeds.append(embed)
await menu(ctx, embeds, DEFAULT_CONTROLS)
@commands.is_owner()
@commands.command()
async def migratealias(self, ctx: commands.Context):
await ctx.send(f"Are you sure you want to migrate Alias data to tags? (Y/n)")
pred = MessagePredicate.yes_or_no(ctx)
try:
await self.bot.wait_for("message", check=pred, timeout=30)
except asyncio.TimeoutError:
return await ctx.send("Query timed out, not migrating alias to tags.")
if pred.result is False:
return await ctx.send("Migration cancelled.")
migrated_guilds = 0
migrated_guild_alias = 0
alias_config = Config.get_conf(
None, 8927348724, cog_name="Alias" # core cog doesn't use force_registration=True smh
)
# so while this is sus it is technically safe to use
alias_config.register_global(entries=[])
all_guild_data: dict = await alias_config.all_guilds()
async for guild_data in AsyncIter(all_guild_data.values(), steps=100):
if not guild_data["entries"]:
continue
migrated_guilds += 1
for alias in guild_data["entries"]:
tagscript = "{c:%s {args}}" % alias["command"]
tag = Tag(
self,
alias["name"],
tagscript,
author_id=alias["creator"],
guild_id=alias["guild"],
uses=alias["uses"],
)
await tag.initialize()
migrated_guild_alias += 1
await ctx.send(
f"Migrated {migrated_guild_alias} aliases from {migrated_guilds} "
"servers to tags. Moving on to global aliases.."
)
migrated_global_alias = 0
async for entry in AsyncIter(await alias_config.entries(), steps=50):
tagscript = "{c:%s {args}}" % entry["command"]
global_tag = Tag(
self,
entry["name"],
tagscript,
author_id=entry["creator"],
uses=entry["uses"],
)
await global_tag.initialize()
migrated_global_alias += 1
await ctx.send(f"Migrated {migrated_global_alias} global aliases to tags.")
def parse_cc_text(self, content: str) -> str:
output = self.custom_command_engine.process(content)
return output.body
def convert_customcommand(self, guild_id: int, name: str, custom_command: dict) -> Tag:
author_id = custom_command.get("author", {"id": None})["id"]
response = custom_command["response"]
if isinstance(response, str):
tagscript = self.parse_cc_text(response)
else:
tag_lines = []
indices = []
for index, response_text in enumerate(response, 1):
script = self.parse_cc_text(response_text)
tag_lines.append("{=(choice.%s):%s}" % (index, script))
indices.append(index)
random_block = "{#:%s}" % ",".join(str(i) for i in indices)
tag_lines.append("{=(chosen):%s}" % random_block)
tag_lines.append("{choice.{chosen}}")
tagscript = "\n".join(tag_lines)
return Tag(self, name, tagscript, guild_id=guild_id, author_id=author_id)
@commands.is_owner()
@commands.command(aliases=["migratecustomcommands"])
async def migratecustomcom(self, ctx: commands.Context):
await ctx.send(f"Are you sure you want to migrate CustomCommands data to tags? (Y/n)")
pred = MessagePredicate.yes_or_no(ctx)
try:
await self.bot.wait_for("message", check=pred, timeout=30)
except asyncio.TimeoutError:
return await ctx.send("Query timed out, not migrating CustomCommands to tags.")
if pred.result is False:
return await ctx.send("Migration cancelled.")
cc_config = Config.get_conf(None, 414589031223512, cog_name="CustomCommands")
migrated_guilds = 0
migrated_ccs = 0
all_guild_data: dict = await cc_config.all_guilds()
async for guild_id, guild_data in AsyncIter(all_guild_data.items(), steps=100):
if not guild_data["commands"]:
continue
migrated_guilds += 1
for name, command in guild_data["commands"].items():
if not command:
continue # some keys in custom commands config are None instead of being deleted
try:
tag = self.convert_customcommand(guild_id, name, command)
except Exception as exc:
log.exception(
"An exception occured while converting custom command %s (%r) from guild %s"
% (name, command, guild_id),
exc_info=exc,
)
return await ctx.send(
f"An exception occured while converting custom command `{name}` from "
f"server {guild_id}. Check your logs for more details and report this to the cog author."
)
await tag.initialize()
migrated_ccs += 1
await ctx.send(
f"Migrated {migrated_ccs} custom commands from {migrated_guilds} servers to tags."
)
| true | true |
f73b4603424ab56c8da38e2187352b7ea67f7ba4 | 1,087 | py | Python | invenio_app_ils/ill/serializers/custom_fields.py | NRodriguezcuellar/invenio-app-ils | 144a25a6c56330b214c6fd0b832220fa71f2e68a | [
"MIT"
] | 41 | 2018-09-04T13:00:46.000Z | 2022-03-24T20:45:56.000Z | invenio_app_ils/ill/serializers/custom_fields.py | NRodriguezcuellar/invenio-app-ils | 144a25a6c56330b214c6fd0b832220fa71f2e68a | [
"MIT"
] | 720 | 2017-03-10T08:02:41.000Z | 2022-01-14T15:36:37.000Z | invenio_app_ils/ill/serializers/custom_fields.py | NRodriguezcuellar/invenio-app-ils | 144a25a6c56330b214c6fd0b832220fa71f2e68a | [
"MIT"
] | 54 | 2017-03-09T16:05:29.000Z | 2022-03-17T08:34:51.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Loan custom serializer functions."""
from invenio_circulation.proxies import current_circulation
from invenio_pidstore.errors import PIDDeletedError, PIDDoesNotExistError
from invenio_app_ils.records.jsonresolvers.api import pick
def field_loan(metadata):
"""Get the loan object and add it to the metadata."""
loan_pid = metadata.get("patron_loan", {}).get("pid")
if not loan_pid:
return
Loan = current_circulation.loan_record_cls
try:
loan = Loan.get_record_by_pid(loan_pid)
except PIDDeletedError:
metadata["patron_loan"]["loan"] = {"pid": "This loan was deleted."}
return
except PIDDoesNotExistError:
metadata["patron_loan"]["loan"] = {"pid": "Invalid Loan PID."}
return
metadata["patron_loan"]["loan"] = pick(
loan, "pid", "start_date", "end_date", "state", "extension_count"
)
| 32.939394 | 76 | 0.689052 |
from invenio_circulation.proxies import current_circulation
from invenio_pidstore.errors import PIDDeletedError, PIDDoesNotExistError
from invenio_app_ils.records.jsonresolvers.api import pick
def field_loan(metadata):
loan_pid = metadata.get("patron_loan", {}).get("pid")
if not loan_pid:
return
Loan = current_circulation.loan_record_cls
try:
loan = Loan.get_record_by_pid(loan_pid)
except PIDDeletedError:
metadata["patron_loan"]["loan"] = {"pid": "This loan was deleted."}
return
except PIDDoesNotExistError:
metadata["patron_loan"]["loan"] = {"pid": "Invalid Loan PID."}
return
metadata["patron_loan"]["loan"] = pick(
loan, "pid", "start_date", "end_date", "state", "extension_count"
)
| true | true |
f73b4671798894fdb3235d53eb60a03b2707502f | 23,875 | py | Python | tests/integration/test_keeper_back_to_back/test.py | pdv-ru/ClickHouse | 0ff975bcf3008fa6c6373cbdfed16328e3863ec5 | [
"Apache-2.0"
] | 15,577 | 2019-09-23T11:57:53.000Z | 2022-03-31T18:21:48.000Z | tests/integration/test_keeper_back_to_back/test.py | pdv-ru/ClickHouse | 0ff975bcf3008fa6c6373cbdfed16328e3863ec5 | [
"Apache-2.0"
] | 16,476 | 2019-09-23T11:47:00.000Z | 2022-03-31T23:06:01.000Z | tests/integration/test_keeper_back_to_back/test.py | pdv-ru/ClickHouse | 0ff975bcf3008fa6c6373cbdfed16328e3863ec5 | [
"Apache-2.0"
] | 3,633 | 2019-09-23T12:18:28.000Z | 2022-03-31T15:55:48.000Z | import pytest
from helpers.cluster import ClickHouseCluster
import random
import string
import os
import time
from multiprocessing.dummy import Pool
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node', main_configs=['configs/enable_keeper.xml'], with_zookeeper=True, use_keeper=False)
from kazoo.client import KazooClient, KazooState, KeeperState
def get_genuine_zk():
print("Zoo1", cluster.get_instance_ip("zoo1"))
return cluster.get_kazoo_client('zoo1')
def get_fake_zk():
print("node", cluster.get_instance_ip("node"))
_fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181", timeout=30.0)
def reset_last_zxid_listener(state):
print("Fake zk callback called for state", state)
nonlocal _fake_zk_instance
if state != KazooState.CONNECTED:
_fake_zk_instance._reset()
_fake_zk_instance.add_listener(reset_last_zxid_listener)
_fake_zk_instance.start()
return _fake_zk_instance
def random_string(length):
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=length))
def create_random_path(prefix="", depth=1):
if depth == 0:
return prefix
return create_random_path(os.path.join(prefix, random_string(3)), depth - 1)
def stop_zk(zk):
try:
if zk:
zk.stop()
zk.close()
except:
pass
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_simple_commands(started_cluster):
try:
genuine_zk = get_genuine_zk()
fake_zk = get_fake_zk()
for zk in [genuine_zk, fake_zk]:
zk.create("/test_simple_commands", b"")
zk.create("/test_simple_commands/somenode1", b"hello")
zk.set("/test_simple_commands/somenode1", b"world")
for zk in [genuine_zk, fake_zk]:
assert zk.exists("/test_simple_commands")
assert zk.exists("/test_simple_commands/somenode1")
print(zk.get("/test_simple_commands/somenode1"))
assert zk.get("/test_simple_commands/somenode1")[0] == b"world"
finally:
for zk in [genuine_zk, fake_zk]:
stop_zk(zk)
def test_sequential_nodes(started_cluster):
try:
genuine_zk = get_genuine_zk()
fake_zk = get_fake_zk()
genuine_zk.create("/test_sequential_nodes")
fake_zk.create("/test_sequential_nodes")
for i in range(1, 11):
genuine_zk.create("/test_sequential_nodes/" + ("a" * i) + "-", sequence=True)
genuine_zk.create("/test_sequential_nodes/" + ("b" * i))
fake_zk.create("/test_sequential_nodes/" + ("a" * i) + "-", sequence=True)
fake_zk.create("/test_sequential_nodes/" + ("b" * i))
genuine_childs = list(sorted(genuine_zk.get_children("/test_sequential_nodes")))
fake_childs = list(sorted(fake_zk.get_children("/test_sequential_nodes")))
assert genuine_childs == fake_childs
genuine_zk.create("/test_sequential_nodes_1")
fake_zk.create("/test_sequential_nodes_1")
genuine_zk.create("/test_sequential_nodes_1/a", sequence=True)
fake_zk.create("/test_sequential_nodes_1/a", sequence=True)
genuine_zk.create("/test_sequential_nodes_1/a0000000002")
fake_zk.create("/test_sequential_nodes_1/a0000000002")
genuine_throw = False
fake_throw = False
try:
genuine_zk.create("/test_sequential_nodes_1/a", sequence=True)
except Exception as ex:
genuine_throw = True
try:
fake_zk.create("/test_sequential_nodes_1/a", sequence=True)
except Exception as ex:
fake_throw = True
assert genuine_throw == True
assert fake_throw == True
genuine_childs_1 = list(sorted(genuine_zk.get_children("/test_sequential_nodes_1")))
fake_childs_1 = list(sorted(fake_zk.get_children("/test_sequential_nodes_1")))
assert genuine_childs_1 == fake_childs_1
genuine_zk.create("/test_sequential_nodes_2")
fake_zk.create("/test_sequential_nodes_2")
genuine_zk.create("/test_sequential_nodes_2/node")
fake_zk.create("/test_sequential_nodes_2/node")
genuine_zk.create("/test_sequential_nodes_2/node", sequence=True)
fake_zk.create("/test_sequential_nodes_2/node", sequence=True)
genuine_childs_2 = list(sorted(genuine_zk.get_children("/test_sequential_nodes_2")))
fake_childs_2 = list(sorted(fake_zk.get_children("/test_sequential_nodes_2")))
assert genuine_childs_2 == fake_childs_2
finally:
for zk in [genuine_zk, fake_zk]:
stop_zk(zk)
def assert_eq_stats(stat1, stat2):
assert stat1.version == stat2.version
assert stat1.cversion == stat2.cversion
assert stat1.aversion == stat2.aversion
assert stat1.aversion == stat2.aversion
assert stat1.dataLength == stat2.dataLength
assert stat1.numChildren == stat2.numChildren
def test_stats(started_cluster):
try:
genuine_zk = get_genuine_zk()
fake_zk = get_fake_zk()
genuine_zk.create("/test_stats_nodes")
fake_zk.create("/test_stats_nodes")
genuine_stats = genuine_zk.exists("/test_stats_nodes")
fake_stats = fake_zk.exists("/test_stats_nodes")
assert_eq_stats(genuine_stats, fake_stats)
for i in range(1, 11):
genuine_zk.create("/test_stats_nodes/" + ("a" * i) + "-", sequence=True)
genuine_zk.create("/test_stats_nodes/" + ("b" * i))
fake_zk.create("/test_stats_nodes/" + ("a" * i) + "-", sequence=True)
fake_zk.create("/test_stats_nodes/" + ("b" * i))
genuine_stats = genuine_zk.exists("/test_stats_nodes")
fake_stats = fake_zk.exists("/test_stats_nodes")
assert_eq_stats(genuine_stats, fake_stats)
for i in range(1, 11):
print("/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2))
genuine_zk.delete("/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2))
genuine_zk.delete("/test_stats_nodes/" + ("b" * i))
fake_zk.delete("/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2))
fake_zk.delete("/test_stats_nodes/" + ("b" * i))
genuine_stats = genuine_zk.exists("/test_stats_nodes")
fake_stats = fake_zk.exists("/test_stats_nodes")
print(genuine_stats)
print(fake_stats)
assert_eq_stats(genuine_stats, fake_stats)
for i in range(100):
genuine_zk.set("/test_stats_nodes", ("q" * i).encode())
fake_zk.set("/test_stats_nodes", ("q" * i).encode())
genuine_stats = genuine_zk.exists("/test_stats_nodes")
fake_stats = fake_zk.exists("/test_stats_nodes")
print(genuine_stats)
print(fake_stats)
assert_eq_stats(genuine_stats, fake_stats)
finally:
for zk in [genuine_zk, fake_zk]:
stop_zk(zk)
def test_watchers(started_cluster):
try:
genuine_zk = get_genuine_zk()
fake_zk = get_fake_zk()
genuine_zk.create("/test_data_watches")
fake_zk.create("/test_data_watches")
genuine_data_watch_data = None
def genuine_callback(event):
print("Genuine data watch called")
nonlocal genuine_data_watch_data
genuine_data_watch_data = event
fake_data_watch_data = None
def fake_callback(event):
print("Fake data watch called")
nonlocal fake_data_watch_data
fake_data_watch_data = event
genuine_zk.get("/test_data_watches", watch=genuine_callback)
fake_zk.get("/test_data_watches", watch=fake_callback)
print("Calling set genuine")
genuine_zk.set("/test_data_watches", b"a")
print("Calling set fake")
fake_zk.set("/test_data_watches", b"a")
time.sleep(3)
print("Genuine data", genuine_data_watch_data)
print("Fake data", fake_data_watch_data)
assert genuine_data_watch_data == fake_data_watch_data
genuine_zk.create("/test_data_watches/child", b"a")
fake_zk.create("/test_data_watches/child", b"a")
genuine_children = None
def genuine_child_callback(event):
print("Genuine child watch called")
nonlocal genuine_children
genuine_children = event
fake_children = None
def fake_child_callback(event):
print("Fake child watch called")
nonlocal fake_children
fake_children = event
genuine_zk.get_children("/test_data_watches", watch=genuine_child_callback)
fake_zk.get_children("/test_data_watches", watch=fake_child_callback)
print("Calling non related genuine child")
genuine_zk.set("/test_data_watches/child", b"q")
genuine_zk.set("/test_data_watches", b"q")
print("Calling non related fake child")
fake_zk.set("/test_data_watches/child", b"q")
fake_zk.set("/test_data_watches", b"q")
time.sleep(3)
assert genuine_children == None
assert fake_children == None
print("Calling genuine child")
genuine_zk.create("/test_data_watches/child_new", b"b")
print("Calling fake child")
fake_zk.create("/test_data_watches/child_new", b"b")
time.sleep(3)
print("Genuine children", genuine_children)
print("Fake children", fake_children)
assert genuine_children == fake_children
genuine_children_delete = None
def genuine_child_delete_callback(event):
print("Genuine child watch called")
nonlocal genuine_children_delete
genuine_children_delete = event
fake_children_delete = None
def fake_child_delete_callback(event):
print("Fake child watch called")
nonlocal fake_children_delete
fake_children_delete = event
genuine_child_delete = None
def genuine_own_delete_callback(event):
print("Genuine child watch called")
nonlocal genuine_child_delete
genuine_child_delete = event
fake_child_delete = None
def fake_own_delete_callback(event):
print("Fake child watch called")
nonlocal fake_child_delete
fake_child_delete = event
genuine_zk.get_children("/test_data_watches", watch=genuine_child_delete_callback)
fake_zk.get_children("/test_data_watches", watch=fake_child_delete_callback)
genuine_zk.get_children("/test_data_watches/child", watch=genuine_own_delete_callback)
fake_zk.get_children("/test_data_watches/child", watch=fake_own_delete_callback)
print("Calling genuine child delete")
genuine_zk.delete("/test_data_watches/child")
print("Calling fake child delete")
fake_zk.delete("/test_data_watches/child")
time.sleep(3)
print("Genuine children delete", genuine_children_delete)
print("Fake children delete", fake_children_delete)
assert genuine_children_delete == fake_children_delete
print("Genuine child delete", genuine_child_delete)
print("Fake child delete", fake_child_delete)
assert genuine_child_delete == fake_child_delete
finally:
for zk in [genuine_zk, fake_zk]:
stop_zk(zk)
def test_multitransactions(started_cluster):
try:
genuine_zk = get_genuine_zk()
fake_zk = get_fake_zk()
for zk in [genuine_zk, fake_zk]:
zk.create('/test_multitransactions')
t = zk.transaction()
t.create('/test_multitransactions/freddy')
t.create('/test_multitransactions/fred', ephemeral=True)
t.create('/test_multitransactions/smith', sequence=True)
results = t.commit()
assert len(results) == 3
assert results[0] == '/test_multitransactions/freddy'
assert results[2].startswith('/test_multitransactions/smith0') is True
from kazoo.exceptions import RolledBackError, NoNodeError
for i, zk in enumerate([genuine_zk, fake_zk]):
print("Processing ZK", i)
t = zk.transaction()
t.create('/test_multitransactions/q')
t.delete('/test_multitransactions/a')
t.create('/test_multitransactions/x')
results = t.commit()
print("Results", results)
assert results[0].__class__ == RolledBackError
assert results[1].__class__ == NoNodeError
assert zk.exists('/test_multitransactions/q') is None
assert zk.exists('/test_multitransactions/a') is None
assert zk.exists('/test_multitransactions/x') is None
finally:
for zk in [genuine_zk, fake_zk]:
stop_zk(zk)
def exists(zk, path):
result = zk.exists(path)
return result is not None
def get(zk, path):
result = zk.get(path)
return result[0]
def get_children(zk, path):
return [elem for elem in list(sorted(zk.get_children(path))) if elem not in ('clickhouse', 'zookeeper')]
READ_REQUESTS = [
("exists", exists),
("get", get),
("get_children", get_children),
]
def create(zk, path, data):
zk.create(path, data.encode())
def set_data(zk, path, data):
zk.set(path, data.encode())
WRITE_REQUESTS = [
("create", create),
("set_data", set_data),
]
def delete(zk, path):
zk.delete(path)
DELETE_REQUESTS = [
("delete", delete)
]
class Request(object):
def __init__(self, name, arguments, callback, is_return):
self.name = name
self.arguments = arguments
self.callback = callback
self.is_return = is_return
def __str__(self):
arg_str = ', '.join([str(k) + "=" + str(v) for k, v in self.arguments.items()])
return "ZKRequest name {} with arguments {}".format(self.name, arg_str)
def generate_requests(prefix="/", iters=1):
requests = []
existing_paths = []
for i in range(iters):
for _ in range(100):
rand_length = random.randint(0, 10)
path = prefix
for j in range(1, rand_length):
path = create_random_path(path, 1)
existing_paths.append(path)
value = random_string(1000)
request = Request("create", {"path" : path, "value": value[0:10]}, lambda zk, path=path, value=value: create(zk, path, value), False)
requests.append(request)
for _ in range(100):
path = random.choice(existing_paths)
value = random_string(100)
request = Request("set", {"path": path, "value": value[0:10]}, lambda zk, path=path, value=value: set_data(zk, path, value), False)
requests.append(request)
for _ in range(100):
path = random.choice(existing_paths)
callback = random.choice(READ_REQUESTS)
def read_func1(zk, path=path, callback=callback):
return callback[1](zk, path)
request = Request(callback[0], {"path": path}, read_func1, True)
requests.append(request)
for _ in range(30):
path = random.choice(existing_paths)
request = Request("delete", {"path": path}, lambda zk, path=path: delete(zk, path), False)
for _ in range(100):
path = random.choice(existing_paths)
callback = random.choice(READ_REQUESTS)
def read_func2(zk, path=path, callback=callback):
return callback[1](zk, path)
request = Request(callback[0], {"path": path}, read_func2, True)
requests.append(request)
return requests
def test_random_requests(started_cluster):
try:
requests = generate_requests("/test_random_requests", 10)
print("Generated", len(requests), "requests")
genuine_zk = get_genuine_zk()
fake_zk = get_fake_zk()
genuine_zk.create("/test_random_requests")
fake_zk.create("/test_random_requests")
for i, request in enumerate(requests):
genuine_throw = False
fake_throw = False
fake_result = None
genuine_result = None
try:
genuine_result = request.callback(genuine_zk)
except Exception as ex:
print("i", i, "request", request)
print("Genuine exception", str(ex))
genuine_throw = True
try:
fake_result = request.callback(fake_zk)
except Exception as ex:
print("i", i, "request", request)
print("Fake exception", str(ex))
fake_throw = True
assert fake_throw == genuine_throw, "Fake throw genuine not or vise versa request {}"
assert fake_result == genuine_result, "Zookeeper results differ"
root_children_genuine = [elem for elem in list(sorted(genuine_zk.get_children("/test_random_requests"))) if elem not in ('clickhouse', 'zookeeper')]
root_children_fake = [elem for elem in list(sorted(fake_zk.get_children("/test_random_requests"))) if elem not in ('clickhouse', 'zookeeper')]
assert root_children_fake == root_children_genuine
finally:
for zk in [genuine_zk, fake_zk]:
stop_zk(zk)
def test_end_of_session(started_cluster):
fake_zk1 = None
fake_zk2 = None
genuine_zk1 = None
genuine_zk2 = None
try:
fake_zk1 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181")
fake_zk1.start()
fake_zk2 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181")
fake_zk2.start()
genuine_zk1 = cluster.get_kazoo_client('zoo1')
genuine_zk1.start()
genuine_zk2 = cluster.get_kazoo_client('zoo1')
genuine_zk2.start()
fake_zk1.create("/test_end_of_session")
genuine_zk1.create("/test_end_of_session")
fake_ephemeral_event = None
def fake_ephemeral_callback(event):
print("Fake watch triggered")
nonlocal fake_ephemeral_event
fake_ephemeral_event = event
genuine_ephemeral_event = None
def genuine_ephemeral_callback(event):
print("Genuine watch triggered")
nonlocal genuine_ephemeral_event
genuine_ephemeral_event = event
assert fake_zk2.exists("/test_end_of_session") is not None
assert genuine_zk2.exists("/test_end_of_session") is not None
fake_zk1.create("/test_end_of_session/ephemeral_node", ephemeral=True)
genuine_zk1.create("/test_end_of_session/ephemeral_node", ephemeral=True)
assert fake_zk2.exists("/test_end_of_session/ephemeral_node", watch=fake_ephemeral_callback) is not None
assert genuine_zk2.exists("/test_end_of_session/ephemeral_node", watch=genuine_ephemeral_callback) is not None
print("Stopping genuine zk")
genuine_zk1.stop()
print("Closing genuine zk")
genuine_zk1.close()
print("Stopping fake zk")
fake_zk1.stop()
print("Closing fake zk")
fake_zk1.close()
assert fake_zk2.exists("/test_end_of_session/ephemeral_node") is None
assert genuine_zk2.exists("/test_end_of_session/ephemeral_node") is None
assert fake_ephemeral_event == genuine_ephemeral_event
finally:
for zk in [fake_zk1, fake_zk2, genuine_zk1, genuine_zk2]:
stop_zk(zk)
def test_end_of_watches_session(started_cluster):
fake_zk1 = None
fake_zk2 = None
try:
fake_zk1 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181")
fake_zk1.start()
fake_zk2 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181")
fake_zk2.start()
fake_zk1.create("/test_end_of_watches_session")
dummy_set = 0
def dummy_callback(event):
nonlocal dummy_set
dummy_set += 1
print(event)
for child_node in range(100):
fake_zk1.create("/test_end_of_watches_session/" + str(child_node))
fake_zk1.get_children("/test_end_of_watches_session/" + str(child_node), watch=dummy_callback)
fake_zk2.get_children("/test_end_of_watches_session/" + str(0), watch=dummy_callback)
fake_zk2.get_children("/test_end_of_watches_session/" + str(1), watch=dummy_callback)
fake_zk1.stop()
fake_zk1.close()
for child_node in range(100):
fake_zk2.create("/test_end_of_watches_session/" + str(child_node) + "/" + str(child_node), b"somebytes")
assert dummy_set == 2
finally:
for zk in [fake_zk1, fake_zk2]:
stop_zk(zk)
def test_concurrent_watches(started_cluster):
try:
fake_zk = get_fake_zk()
fake_zk.restart()
global_path = "/test_concurrent_watches_0"
fake_zk.create(global_path)
dumb_watch_triggered_counter = 0
all_paths_triggered = []
existing_path = []
all_paths_created = []
watches_created = 0
def create_path_and_watch(i):
nonlocal watches_created
nonlocal all_paths_created
fake_zk.ensure_path(global_path + "/" + str(i))
# new function each time
def dumb_watch(event):
nonlocal dumb_watch_triggered_counter
dumb_watch_triggered_counter += 1
nonlocal all_paths_triggered
all_paths_triggered.append(event.path)
fake_zk.get(global_path + "/" + str(i), watch=dumb_watch)
all_paths_created.append(global_path + "/" + str(i))
watches_created += 1
existing_path.append(i)
trigger_called = 0
def trigger_watch(i):
nonlocal trigger_called
trigger_called += 1
fake_zk.set(global_path + "/" + str(i), b"somevalue")
try:
existing_path.remove(i)
except:
pass
def call(total):
for i in range(total):
create_path_and_watch(random.randint(0, 1000))
time.sleep(random.random() % 0.5)
try:
rand_num = random.choice(existing_path)
trigger_watch(rand_num)
except:
pass
while existing_path:
try:
rand_num = random.choice(existing_path)
trigger_watch(rand_num)
except:
pass
p = Pool(10)
arguments = [100] * 10
watches_must_be_created = sum(arguments)
watches_trigger_must_be_called = sum(arguments)
watches_must_be_triggered = sum(arguments)
p.map(call, arguments)
p.close()
# waiting for late watches
for i in range(50):
if dumb_watch_triggered_counter == watches_must_be_triggered:
break
time.sleep(0.1)
assert watches_created == watches_must_be_created
assert trigger_called >= watches_trigger_must_be_called
assert len(existing_path) == 0
if dumb_watch_triggered_counter != watches_must_be_triggered:
print("All created paths", all_paths_created)
print("All triggerred paths", all_paths_triggered)
print("All paths len", len(all_paths_created))
print("All triggered len", len(all_paths_triggered))
print("Diff", list(set(all_paths_created) - set(all_paths_triggered)))
assert dumb_watch_triggered_counter == watches_must_be_triggered
finally:
stop_zk(fake_zk)
| 36.562021 | 156 | 0.633257 | import pytest
from helpers.cluster import ClickHouseCluster
import random
import string
import os
import time
from multiprocessing.dummy import Pool
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node', main_configs=['configs/enable_keeper.xml'], with_zookeeper=True, use_keeper=False)
from kazoo.client import KazooClient, KazooState, KeeperState
def get_genuine_zk():
print("Zoo1", cluster.get_instance_ip("zoo1"))
return cluster.get_kazoo_client('zoo1')
def get_fake_zk():
print("node", cluster.get_instance_ip("node"))
_fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181", timeout=30.0)
def reset_last_zxid_listener(state):
print("Fake zk callback called for state", state)
nonlocal _fake_zk_instance
if state != KazooState.CONNECTED:
_fake_zk_instance._reset()
_fake_zk_instance.add_listener(reset_last_zxid_listener)
_fake_zk_instance.start()
return _fake_zk_instance
def random_string(length):
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=length))
def create_random_path(prefix="", depth=1):
if depth == 0:
return prefix
return create_random_path(os.path.join(prefix, random_string(3)), depth - 1)
def stop_zk(zk):
try:
if zk:
zk.stop()
zk.close()
except:
pass
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_simple_commands(started_cluster):
try:
genuine_zk = get_genuine_zk()
fake_zk = get_fake_zk()
for zk in [genuine_zk, fake_zk]:
zk.create("/test_simple_commands", b"")
zk.create("/test_simple_commands/somenode1", b"hello")
zk.set("/test_simple_commands/somenode1", b"world")
for zk in [genuine_zk, fake_zk]:
assert zk.exists("/test_simple_commands")
assert zk.exists("/test_simple_commands/somenode1")
print(zk.get("/test_simple_commands/somenode1"))
assert zk.get("/test_simple_commands/somenode1")[0] == b"world"
finally:
for zk in [genuine_zk, fake_zk]:
stop_zk(zk)
def test_sequential_nodes(started_cluster):
try:
genuine_zk = get_genuine_zk()
fake_zk = get_fake_zk()
genuine_zk.create("/test_sequential_nodes")
fake_zk.create("/test_sequential_nodes")
for i in range(1, 11):
genuine_zk.create("/test_sequential_nodes/" + ("a" * i) + "-", sequence=True)
genuine_zk.create("/test_sequential_nodes/" + ("b" * i))
fake_zk.create("/test_sequential_nodes/" + ("a" * i) + "-", sequence=True)
fake_zk.create("/test_sequential_nodes/" + ("b" * i))
genuine_childs = list(sorted(genuine_zk.get_children("/test_sequential_nodes")))
fake_childs = list(sorted(fake_zk.get_children("/test_sequential_nodes")))
assert genuine_childs == fake_childs
genuine_zk.create("/test_sequential_nodes_1")
fake_zk.create("/test_sequential_nodes_1")
genuine_zk.create("/test_sequential_nodes_1/a", sequence=True)
fake_zk.create("/test_sequential_nodes_1/a", sequence=True)
genuine_zk.create("/test_sequential_nodes_1/a0000000002")
fake_zk.create("/test_sequential_nodes_1/a0000000002")
genuine_throw = False
fake_throw = False
try:
genuine_zk.create("/test_sequential_nodes_1/a", sequence=True)
except Exception as ex:
genuine_throw = True
try:
fake_zk.create("/test_sequential_nodes_1/a", sequence=True)
except Exception as ex:
fake_throw = True
assert genuine_throw == True
assert fake_throw == True
genuine_childs_1 = list(sorted(genuine_zk.get_children("/test_sequential_nodes_1")))
fake_childs_1 = list(sorted(fake_zk.get_children("/test_sequential_nodes_1")))
assert genuine_childs_1 == fake_childs_1
genuine_zk.create("/test_sequential_nodes_2")
fake_zk.create("/test_sequential_nodes_2")
genuine_zk.create("/test_sequential_nodes_2/node")
fake_zk.create("/test_sequential_nodes_2/node")
genuine_zk.create("/test_sequential_nodes_2/node", sequence=True)
fake_zk.create("/test_sequential_nodes_2/node", sequence=True)
genuine_childs_2 = list(sorted(genuine_zk.get_children("/test_sequential_nodes_2")))
fake_childs_2 = list(sorted(fake_zk.get_children("/test_sequential_nodes_2")))
assert genuine_childs_2 == fake_childs_2
finally:
for zk in [genuine_zk, fake_zk]:
stop_zk(zk)
def assert_eq_stats(stat1, stat2):
assert stat1.version == stat2.version
assert stat1.cversion == stat2.cversion
assert stat1.aversion == stat2.aversion
assert stat1.aversion == stat2.aversion
assert stat1.dataLength == stat2.dataLength
assert stat1.numChildren == stat2.numChildren
def test_stats(started_cluster):
try:
genuine_zk = get_genuine_zk()
fake_zk = get_fake_zk()
genuine_zk.create("/test_stats_nodes")
fake_zk.create("/test_stats_nodes")
genuine_stats = genuine_zk.exists("/test_stats_nodes")
fake_stats = fake_zk.exists("/test_stats_nodes")
assert_eq_stats(genuine_stats, fake_stats)
for i in range(1, 11):
genuine_zk.create("/test_stats_nodes/" + ("a" * i) + "-", sequence=True)
genuine_zk.create("/test_stats_nodes/" + ("b" * i))
fake_zk.create("/test_stats_nodes/" + ("a" * i) + "-", sequence=True)
fake_zk.create("/test_stats_nodes/" + ("b" * i))
genuine_stats = genuine_zk.exists("/test_stats_nodes")
fake_stats = fake_zk.exists("/test_stats_nodes")
assert_eq_stats(genuine_stats, fake_stats)
for i in range(1, 11):
print("/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2))
genuine_zk.delete("/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2))
genuine_zk.delete("/test_stats_nodes/" + ("b" * i))
fake_zk.delete("/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2))
fake_zk.delete("/test_stats_nodes/" + ("b" * i))
genuine_stats = genuine_zk.exists("/test_stats_nodes")
fake_stats = fake_zk.exists("/test_stats_nodes")
print(genuine_stats)
print(fake_stats)
assert_eq_stats(genuine_stats, fake_stats)
for i in range(100):
genuine_zk.set("/test_stats_nodes", ("q" * i).encode())
fake_zk.set("/test_stats_nodes", ("q" * i).encode())
genuine_stats = genuine_zk.exists("/test_stats_nodes")
fake_stats = fake_zk.exists("/test_stats_nodes")
print(genuine_stats)
print(fake_stats)
assert_eq_stats(genuine_stats, fake_stats)
finally:
for zk in [genuine_zk, fake_zk]:
stop_zk(zk)
def test_watchers(started_cluster):
try:
genuine_zk = get_genuine_zk()
fake_zk = get_fake_zk()
genuine_zk.create("/test_data_watches")
fake_zk.create("/test_data_watches")
genuine_data_watch_data = None
def genuine_callback(event):
print("Genuine data watch called")
nonlocal genuine_data_watch_data
genuine_data_watch_data = event
fake_data_watch_data = None
def fake_callback(event):
print("Fake data watch called")
nonlocal fake_data_watch_data
fake_data_watch_data = event
genuine_zk.get("/test_data_watches", watch=genuine_callback)
fake_zk.get("/test_data_watches", watch=fake_callback)
print("Calling set genuine")
genuine_zk.set("/test_data_watches", b"a")
print("Calling set fake")
fake_zk.set("/test_data_watches", b"a")
time.sleep(3)
print("Genuine data", genuine_data_watch_data)
print("Fake data", fake_data_watch_data)
assert genuine_data_watch_data == fake_data_watch_data
genuine_zk.create("/test_data_watches/child", b"a")
fake_zk.create("/test_data_watches/child", b"a")
genuine_children = None
def genuine_child_callback(event):
print("Genuine child watch called")
nonlocal genuine_children
genuine_children = event
fake_children = None
def fake_child_callback(event):
print("Fake child watch called")
nonlocal fake_children
fake_children = event
genuine_zk.get_children("/test_data_watches", watch=genuine_child_callback)
fake_zk.get_children("/test_data_watches", watch=fake_child_callback)
print("Calling non related genuine child")
genuine_zk.set("/test_data_watches/child", b"q")
genuine_zk.set("/test_data_watches", b"q")
print("Calling non related fake child")
fake_zk.set("/test_data_watches/child", b"q")
fake_zk.set("/test_data_watches", b"q")
time.sleep(3)
assert genuine_children == None
assert fake_children == None
print("Calling genuine child")
genuine_zk.create("/test_data_watches/child_new", b"b")
print("Calling fake child")
fake_zk.create("/test_data_watches/child_new", b"b")
time.sleep(3)
print("Genuine children", genuine_children)
print("Fake children", fake_children)
assert genuine_children == fake_children
genuine_children_delete = None
def genuine_child_delete_callback(event):
print("Genuine child watch called")
nonlocal genuine_children_delete
genuine_children_delete = event
fake_children_delete = None
def fake_child_delete_callback(event):
print("Fake child watch called")
nonlocal fake_children_delete
fake_children_delete = event
genuine_child_delete = None
def genuine_own_delete_callback(event):
print("Genuine child watch called")
nonlocal genuine_child_delete
genuine_child_delete = event
fake_child_delete = None
def fake_own_delete_callback(event):
print("Fake child watch called")
nonlocal fake_child_delete
fake_child_delete = event
genuine_zk.get_children("/test_data_watches", watch=genuine_child_delete_callback)
fake_zk.get_children("/test_data_watches", watch=fake_child_delete_callback)
genuine_zk.get_children("/test_data_watches/child", watch=genuine_own_delete_callback)
fake_zk.get_children("/test_data_watches/child", watch=fake_own_delete_callback)
print("Calling genuine child delete")
genuine_zk.delete("/test_data_watches/child")
print("Calling fake child delete")
fake_zk.delete("/test_data_watches/child")
time.sleep(3)
print("Genuine children delete", genuine_children_delete)
print("Fake children delete", fake_children_delete)
assert genuine_children_delete == fake_children_delete
print("Genuine child delete", genuine_child_delete)
print("Fake child delete", fake_child_delete)
assert genuine_child_delete == fake_child_delete
finally:
for zk in [genuine_zk, fake_zk]:
stop_zk(zk)
def test_multitransactions(started_cluster):
try:
genuine_zk = get_genuine_zk()
fake_zk = get_fake_zk()
for zk in [genuine_zk, fake_zk]:
zk.create('/test_multitransactions')
t = zk.transaction()
t.create('/test_multitransactions/freddy')
t.create('/test_multitransactions/fred', ephemeral=True)
t.create('/test_multitransactions/smith', sequence=True)
results = t.commit()
assert len(results) == 3
assert results[0] == '/test_multitransactions/freddy'
assert results[2].startswith('/test_multitransactions/smith0') is True
from kazoo.exceptions import RolledBackError, NoNodeError
for i, zk in enumerate([genuine_zk, fake_zk]):
print("Processing ZK", i)
t = zk.transaction()
t.create('/test_multitransactions/q')
t.delete('/test_multitransactions/a')
t.create('/test_multitransactions/x')
results = t.commit()
print("Results", results)
assert results[0].__class__ == RolledBackError
assert results[1].__class__ == NoNodeError
assert zk.exists('/test_multitransactions/q') is None
assert zk.exists('/test_multitransactions/a') is None
assert zk.exists('/test_multitransactions/x') is None
finally:
for zk in [genuine_zk, fake_zk]:
stop_zk(zk)
def exists(zk, path):
result = zk.exists(path)
return result is not None
def get(zk, path):
result = zk.get(path)
return result[0]
def get_children(zk, path):
return [elem for elem in list(sorted(zk.get_children(path))) if elem not in ('clickhouse', 'zookeeper')]
READ_REQUESTS = [
("exists", exists),
("get", get),
("get_children", get_children),
]
def create(zk, path, data):
zk.create(path, data.encode())
def set_data(zk, path, data):
zk.set(path, data.encode())
WRITE_REQUESTS = [
("create", create),
("set_data", set_data),
]
def delete(zk, path):
zk.delete(path)
DELETE_REQUESTS = [
("delete", delete)
]
class Request(object):
def __init__(self, name, arguments, callback, is_return):
self.name = name
self.arguments = arguments
self.callback = callback
self.is_return = is_return
def __str__(self):
arg_str = ', '.join([str(k) + "=" + str(v) for k, v in self.arguments.items()])
return "ZKRequest name {} with arguments {}".format(self.name, arg_str)
def generate_requests(prefix="/", iters=1):
requests = []
existing_paths = []
for i in range(iters):
for _ in range(100):
rand_length = random.randint(0, 10)
path = prefix
for j in range(1, rand_length):
path = create_random_path(path, 1)
existing_paths.append(path)
value = random_string(1000)
request = Request("create", {"path" : path, "value": value[0:10]}, lambda zk, path=path, value=value: create(zk, path, value), False)
requests.append(request)
for _ in range(100):
path = random.choice(existing_paths)
value = random_string(100)
request = Request("set", {"path": path, "value": value[0:10]}, lambda zk, path=path, value=value: set_data(zk, path, value), False)
requests.append(request)
for _ in range(100):
path = random.choice(existing_paths)
callback = random.choice(READ_REQUESTS)
def read_func1(zk, path=path, callback=callback):
return callback[1](zk, path)
request = Request(callback[0], {"path": path}, read_func1, True)
requests.append(request)
for _ in range(30):
path = random.choice(existing_paths)
request = Request("delete", {"path": path}, lambda zk, path=path: delete(zk, path), False)
for _ in range(100):
path = random.choice(existing_paths)
callback = random.choice(READ_REQUESTS)
def read_func2(zk, path=path, callback=callback):
return callback[1](zk, path)
request = Request(callback[0], {"path": path}, read_func2, True)
requests.append(request)
return requests
def test_random_requests(started_cluster):
try:
requests = generate_requests("/test_random_requests", 10)
print("Generated", len(requests), "requests")
genuine_zk = get_genuine_zk()
fake_zk = get_fake_zk()
genuine_zk.create("/test_random_requests")
fake_zk.create("/test_random_requests")
for i, request in enumerate(requests):
genuine_throw = False
fake_throw = False
fake_result = None
genuine_result = None
try:
genuine_result = request.callback(genuine_zk)
except Exception as ex:
print("i", i, "request", request)
print("Genuine exception", str(ex))
genuine_throw = True
try:
fake_result = request.callback(fake_zk)
except Exception as ex:
print("i", i, "request", request)
print("Fake exception", str(ex))
fake_throw = True
assert fake_throw == genuine_throw, "Fake throw genuine not or vise versa request {}"
assert fake_result == genuine_result, "Zookeeper results differ"
root_children_genuine = [elem for elem in list(sorted(genuine_zk.get_children("/test_random_requests"))) if elem not in ('clickhouse', 'zookeeper')]
root_children_fake = [elem for elem in list(sorted(fake_zk.get_children("/test_random_requests"))) if elem not in ('clickhouse', 'zookeeper')]
assert root_children_fake == root_children_genuine
finally:
for zk in [genuine_zk, fake_zk]:
stop_zk(zk)
def test_end_of_session(started_cluster):
fake_zk1 = None
fake_zk2 = None
genuine_zk1 = None
genuine_zk2 = None
try:
fake_zk1 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181")
fake_zk1.start()
fake_zk2 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181")
fake_zk2.start()
genuine_zk1 = cluster.get_kazoo_client('zoo1')
genuine_zk1.start()
genuine_zk2 = cluster.get_kazoo_client('zoo1')
genuine_zk2.start()
fake_zk1.create("/test_end_of_session")
genuine_zk1.create("/test_end_of_session")
fake_ephemeral_event = None
def fake_ephemeral_callback(event):
print("Fake watch triggered")
nonlocal fake_ephemeral_event
fake_ephemeral_event = event
genuine_ephemeral_event = None
def genuine_ephemeral_callback(event):
print("Genuine watch triggered")
nonlocal genuine_ephemeral_event
genuine_ephemeral_event = event
assert fake_zk2.exists("/test_end_of_session") is not None
assert genuine_zk2.exists("/test_end_of_session") is not None
fake_zk1.create("/test_end_of_session/ephemeral_node", ephemeral=True)
genuine_zk1.create("/test_end_of_session/ephemeral_node", ephemeral=True)
assert fake_zk2.exists("/test_end_of_session/ephemeral_node", watch=fake_ephemeral_callback) is not None
assert genuine_zk2.exists("/test_end_of_session/ephemeral_node", watch=genuine_ephemeral_callback) is not None
print("Stopping genuine zk")
genuine_zk1.stop()
print("Closing genuine zk")
genuine_zk1.close()
print("Stopping fake zk")
fake_zk1.stop()
print("Closing fake zk")
fake_zk1.close()
assert fake_zk2.exists("/test_end_of_session/ephemeral_node") is None
assert genuine_zk2.exists("/test_end_of_session/ephemeral_node") is None
assert fake_ephemeral_event == genuine_ephemeral_event
finally:
for zk in [fake_zk1, fake_zk2, genuine_zk1, genuine_zk2]:
stop_zk(zk)
def test_end_of_watches_session(started_cluster):
fake_zk1 = None
fake_zk2 = None
try:
fake_zk1 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181")
fake_zk1.start()
fake_zk2 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181")
fake_zk2.start()
fake_zk1.create("/test_end_of_watches_session")
dummy_set = 0
def dummy_callback(event):
nonlocal dummy_set
dummy_set += 1
print(event)
for child_node in range(100):
fake_zk1.create("/test_end_of_watches_session/" + str(child_node))
fake_zk1.get_children("/test_end_of_watches_session/" + str(child_node), watch=dummy_callback)
fake_zk2.get_children("/test_end_of_watches_session/" + str(0), watch=dummy_callback)
fake_zk2.get_children("/test_end_of_watches_session/" + str(1), watch=dummy_callback)
fake_zk1.stop()
fake_zk1.close()
for child_node in range(100):
fake_zk2.create("/test_end_of_watches_session/" + str(child_node) + "/" + str(child_node), b"somebytes")
assert dummy_set == 2
finally:
for zk in [fake_zk1, fake_zk2]:
stop_zk(zk)
def test_concurrent_watches(started_cluster):
try:
fake_zk = get_fake_zk()
fake_zk.restart()
global_path = "/test_concurrent_watches_0"
fake_zk.create(global_path)
dumb_watch_triggered_counter = 0
all_paths_triggered = []
existing_path = []
all_paths_created = []
watches_created = 0
def create_path_and_watch(i):
nonlocal watches_created
nonlocal all_paths_created
fake_zk.ensure_path(global_path + "/" + str(i))
def dumb_watch(event):
nonlocal dumb_watch_triggered_counter
dumb_watch_triggered_counter += 1
nonlocal all_paths_triggered
all_paths_triggered.append(event.path)
fake_zk.get(global_path + "/" + str(i), watch=dumb_watch)
all_paths_created.append(global_path + "/" + str(i))
watches_created += 1
existing_path.append(i)
trigger_called = 0
def trigger_watch(i):
nonlocal trigger_called
trigger_called += 1
fake_zk.set(global_path + "/" + str(i), b"somevalue")
try:
existing_path.remove(i)
except:
pass
def call(total):
for i in range(total):
create_path_and_watch(random.randint(0, 1000))
time.sleep(random.random() % 0.5)
try:
rand_num = random.choice(existing_path)
trigger_watch(rand_num)
except:
pass
while existing_path:
try:
rand_num = random.choice(existing_path)
trigger_watch(rand_num)
except:
pass
p = Pool(10)
arguments = [100] * 10
watches_must_be_created = sum(arguments)
watches_trigger_must_be_called = sum(arguments)
watches_must_be_triggered = sum(arguments)
p.map(call, arguments)
p.close()
for i in range(50):
if dumb_watch_triggered_counter == watches_must_be_triggered:
break
time.sleep(0.1)
assert watches_created == watches_must_be_created
assert trigger_called >= watches_trigger_must_be_called
assert len(existing_path) == 0
if dumb_watch_triggered_counter != watches_must_be_triggered:
print("All created paths", all_paths_created)
print("All triggerred paths", all_paths_triggered)
print("All paths len", len(all_paths_created))
print("All triggered len", len(all_paths_triggered))
print("Diff", list(set(all_paths_created) - set(all_paths_triggered)))
assert dumb_watch_triggered_counter == watches_must_be_triggered
finally:
stop_zk(fake_zk)
| true | true |
f73b46b26aa68b33f42d16333517bedecfca15eb | 2,222 | py | Python | growth/cells/cells.py | sebastianbernasek/growth | 6d1cace75b19ad8b6130d0940584c24dd26bbe91 | [
"MIT"
] | 1 | 2022-03-01T14:48:14.000Z | 2022-03-01T14:48:14.000Z | growth/cells/cells.py | sbernasek/growth | 6d1cace75b19ad8b6130d0940584c24dd26bbe91 | [
"MIT"
] | null | null | null | growth/cells/cells.py | sbernasek/growth | 6d1cace75b19ad8b6130d0940584c24dd26bbe91 | [
"MIT"
] | null | null | null | from os.path import join
import numpy as np
from functools import reduce
from operator import add
class Cell:
def __init__(self, xy=None, chromosomes=None, lineage=''):
# set generation
self.lineage = lineage
# set chromosomes
if chromosomes is None:
chromosomes = np.array([0, 1])
self.chromosomes = chromosomes
# set position
if xy is None:
xy = np.zeros(2, dtype=float)
self.xy = xy
@property
def generation(self):
return len(self.lineage)
@property
def genotype(self):
return self.chromosomes.sum()
@property
def phenotype(self):
return np.random.normal(loc=self.genotype, scale=1.)
def copy(self):
""" Returns copy of cell. """
return self.__class__(self.xy, self.chromosomes, self.lineage)
def set_xy(self, xy):
self.xy = xy
def recombine(self, rate=0.):
# duplicate chromosomes
chromosomes = np.tile(self.chromosomes, 2)
# recombination
if np.random.random() <= rate:
chromosomes.sort()
return chromosomes
def divide(self, recombination_rate=0., reference_population=1000):
# set average spacing between cells
spacing = np.sqrt(2/reference_population) / 1e5
# perform recombination
chromosomes = self.recombine(rate=recombination_rate)
# determine child positions
jitter = np.random.normal(scale=spacing, size=(2, 2))
xy_a, xy_b = self.xy+jitter[0], self.xy+jitter[1]
# instantiate children
daughter_a = self.__class__(xy_a, chromosomes[:2], self.lineage+'0')
daughter_b = self.__class__(xy_b, chromosomes[2:], self.lineage+'1')
return [daughter_a, daughter_b]
def grow(self, max_generation=3, **kwargs):
"""
Recursive growth.
"""
# stopping criterion
if self.generation >= max_generation:
return [self]
# divide
else:
children = self.divide(**kwargs)
recurse = lambda x: x.grow(max_generation=max_generation, **kwargs)
return reduce(add, map(recurse, children))
| 25.837209 | 79 | 0.60486 | from os.path import join
import numpy as np
from functools import reduce
from operator import add
class Cell:
def __init__(self, xy=None, chromosomes=None, lineage=''):
self.lineage = lineage
if chromosomes is None:
chromosomes = np.array([0, 1])
self.chromosomes = chromosomes
if xy is None:
xy = np.zeros(2, dtype=float)
self.xy = xy
@property
def generation(self):
return len(self.lineage)
@property
def genotype(self):
return self.chromosomes.sum()
@property
def phenotype(self):
return np.random.normal(loc=self.genotype, scale=1.)
def copy(self):
return self.__class__(self.xy, self.chromosomes, self.lineage)
def set_xy(self, xy):
self.xy = xy
def recombine(self, rate=0.):
chromosomes = np.tile(self.chromosomes, 2)
if np.random.random() <= rate:
chromosomes.sort()
return chromosomes
def divide(self, recombination_rate=0., reference_population=1000):
spacing = np.sqrt(2/reference_population) / 1e5
chromosomes = self.recombine(rate=recombination_rate)
jitter = np.random.normal(scale=spacing, size=(2, 2))
xy_a, xy_b = self.xy+jitter[0], self.xy+jitter[1]
daughter_a = self.__class__(xy_a, chromosomes[:2], self.lineage+'0')
daughter_b = self.__class__(xy_b, chromosomes[2:], self.lineage+'1')
return [daughter_a, daughter_b]
def grow(self, max_generation=3, **kwargs):
if self.generation >= max_generation:
return [self]
else:
children = self.divide(**kwargs)
recurse = lambda x: x.grow(max_generation=max_generation, **kwargs)
return reduce(add, map(recurse, children))
| true | true |
f73b47001626570e7f6ff47c0c386a4ef1d7d657 | 392 | py | Python | recursion/tail/power_of_two.py | MelkiyHondavod/computations | 5ccb28c2021270e25c23ae9836979ae63febc66a | [
"Apache-2.0"
] | null | null | null | recursion/tail/power_of_two.py | MelkiyHondavod/computations | 5ccb28c2021270e25c23ae9836979ae63febc66a | [
"Apache-2.0"
] | null | null | null | recursion/tail/power_of_two.py | MelkiyHondavod/computations | 5ccb28c2021270e25c23ae9836979ae63febc66a | [
"Apache-2.0"
] | 4 | 2020-12-16T18:12:02.000Z | 2021-06-05T08:45:26.000Z | #Дано натуральное число N. Выведите слово YES, если число N является точной степенью двойки, или слово NO в противном случае.
#Операцией возведения в степень пользоваться нельзя!
# Оформите в виде обычной и хвостовой рекурсии
# Вариант с хвостовой рекурсией преобразуйте в цикл while
def is_power_of_two(N):
return 'YES'
def is_power_of_two_tail(N, n):
return 'YES'
| 28 | 126 | 0.75 |
def is_power_of_two(N):
return 'YES'
def is_power_of_two_tail(N, n):
return 'YES'
| true | true |
f73b4774199c20095648e24e622c1ae1a33199bc | 5,614 | py | Python | google/ads/google_ads/v2/proto/services/shopping_performance_view_service_pb2.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | 1 | 2019-11-30T23:42:39.000Z | 2019-11-30T23:42:39.000Z | google/ads/google_ads/v2/proto/services/shopping_performance_view_service_pb2.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v2/proto/services/shopping_performance_view_service_pb2.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | 1 | 2020-09-30T17:04:06.000Z | 2020-09-30T17:04:06.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v2/proto/services/shopping_performance_view_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v2.proto.resources import shopping_performance_view_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_shopping__performance__view__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v2/proto/services/shopping_performance_view_service.proto',
package='google.ads.googleads.v2.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v2.servicesB#ShoppingPerformanceViewServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v2/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V2.Services\312\002 Google\\Ads\\GoogleAds\\V2\\Services\352\002$Google::Ads::GoogleAds::V2::Services'),
serialized_pb=_b('\nNgoogle/ads/googleads_v2/proto/services/shopping_performance_view_service.proto\x12 google.ads.googleads.v2.services\x1aGgoogle/ads/googleads_v2/proto/resources/shopping_performance_view.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\":\n!GetShoppingPerformanceViewRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t2\x9e\x02\n\x1eShoppingPerformanceViewService\x12\xde\x01\n\x1aGetShoppingPerformanceView\x12\x43.google.ads.googleads.v2.services.GetShoppingPerformanceViewRequest\x1a:.google.ads.googleads.v2.resources.ShoppingPerformanceView\"?\x82\xd3\xe4\x93\x02\x39\x12\x37/v2/{resource_name=customers/*/shoppingPerformanceView}\x1a\x1b\xca\x41\x18googleads.googleapis.comB\x8a\x02\n$com.google.ads.googleads.v2.servicesB#ShoppingPerformanceViewServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v2/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V2.Services\xca\x02 Google\\Ads\\GoogleAds\\V2\\Services\xea\x02$Google::Ads::GoogleAds::V2::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_shopping__performance__view__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,])
_GETSHOPPINGPERFORMANCEVIEWREQUEST = _descriptor.Descriptor(
name='GetShoppingPerformanceViewRequest',
full_name='google.ads.googleads.v2.services.GetShoppingPerformanceViewRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v2.services.GetShoppingPerformanceViewRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=244,
serialized_end=302,
)
DESCRIPTOR.message_types_by_name['GetShoppingPerformanceViewRequest'] = _GETSHOPPINGPERFORMANCEVIEWREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetShoppingPerformanceViewRequest = _reflection.GeneratedProtocolMessageType('GetShoppingPerformanceViewRequest', (_message.Message,), dict(
DESCRIPTOR = _GETSHOPPINGPERFORMANCEVIEWREQUEST,
__module__ = 'google.ads.googleads_v2.proto.services.shopping_performance_view_service_pb2'
,
__doc__ = """Request message for
[ShoppingPerformanceViewService.GetShoppingPerformanceView][google.ads.googleads.v2.services.ShoppingPerformanceViewService.GetShoppingPerformanceView].
Attributes:
resource_name:
The resource name of the Shopping performance view to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v2.services.GetShoppingPerformanceViewRequest)
))
_sym_db.RegisterMessage(GetShoppingPerformanceViewRequest)
DESCRIPTOR._options = None
_SHOPPINGPERFORMANCEVIEWSERVICE = _descriptor.ServiceDescriptor(
name='ShoppingPerformanceViewService',
full_name='google.ads.googleads.v2.services.ShoppingPerformanceViewService',
file=DESCRIPTOR,
index=0,
serialized_options=_b('\312A\030googleads.googleapis.com'),
serialized_start=305,
serialized_end=591,
methods=[
_descriptor.MethodDescriptor(
name='GetShoppingPerformanceView',
full_name='google.ads.googleads.v2.services.ShoppingPerformanceViewService.GetShoppingPerformanceView',
index=0,
containing_service=None,
input_type=_GETSHOPPINGPERFORMANCEVIEWREQUEST,
output_type=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_shopping__performance__view__pb2._SHOPPINGPERFORMANCEVIEW,
serialized_options=_b('\202\323\344\223\0029\0227/v2/{resource_name=customers/*/shoppingPerformanceView}'),
),
])
_sym_db.RegisterServiceDescriptor(_SHOPPINGPERFORMANCEVIEWSERVICE)
DESCRIPTOR.services_by_name['ShoppingPerformanceViewService'] = _SHOPPINGPERFORMANCEVIEWSERVICE
# @@protoc_insertion_point(module_scope)
| 51.504587 | 1,045 | 0.82508 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v2.proto.resources import shopping_performance_view_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_shopping__performance__view__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v2/proto/services/shopping_performance_view_service.proto',
package='google.ads.googleads.v2.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v2.servicesB#ShoppingPerformanceViewServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v2/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V2.Services\312\002 Google\\Ads\\GoogleAds\\V2\\Services\352\002$Google::Ads::GoogleAds::V2::Services'),
serialized_pb=_b('\nNgoogle/ads/googleads_v2/proto/services/shopping_performance_view_service.proto\x12 google.ads.googleads.v2.services\x1aGgoogle/ads/googleads_v2/proto/resources/shopping_performance_view.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\":\n!GetShoppingPerformanceViewRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t2\x9e\x02\n\x1eShoppingPerformanceViewService\x12\xde\x01\n\x1aGetShoppingPerformanceView\x12\x43.google.ads.googleads.v2.services.GetShoppingPerformanceViewRequest\x1a:.google.ads.googleads.v2.resources.ShoppingPerformanceView\"?\x82\xd3\xe4\x93\x02\x39\x12\x37/v2/{resource_name=customers/*/shoppingPerformanceView}\x1a\x1b\xca\x41\x18googleads.googleapis.comB\x8a\x02\n$com.google.ads.googleads.v2.servicesB#ShoppingPerformanceViewServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v2/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V2.Services\xca\x02 Google\\Ads\\GoogleAds\\V2\\Services\xea\x02$Google::Ads::GoogleAds::V2::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_shopping__performance__view__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,])
_GETSHOPPINGPERFORMANCEVIEWREQUEST = _descriptor.Descriptor(
name='GetShoppingPerformanceViewRequest',
full_name='google.ads.googleads.v2.services.GetShoppingPerformanceViewRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v2.services.GetShoppingPerformanceViewRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=244,
serialized_end=302,
)
DESCRIPTOR.message_types_by_name['GetShoppingPerformanceViewRequest'] = _GETSHOPPINGPERFORMANCEVIEWREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetShoppingPerformanceViewRequest = _reflection.GeneratedProtocolMessageType('GetShoppingPerformanceViewRequest', (_message.Message,), dict(
DESCRIPTOR = _GETSHOPPINGPERFORMANCEVIEWREQUEST,
__module__ = 'google.ads.googleads_v2.proto.services.shopping_performance_view_service_pb2'
,
__doc__ = """Request message for
[ShoppingPerformanceViewService.GetShoppingPerformanceView][google.ads.googleads.v2.services.ShoppingPerformanceViewService.GetShoppingPerformanceView].
Attributes:
resource_name:
The resource name of the Shopping performance view to fetch.
""",
))
_sym_db.RegisterMessage(GetShoppingPerformanceViewRequest)
DESCRIPTOR._options = None
_SHOPPINGPERFORMANCEVIEWSERVICE = _descriptor.ServiceDescriptor(
name='ShoppingPerformanceViewService',
full_name='google.ads.googleads.v2.services.ShoppingPerformanceViewService',
file=DESCRIPTOR,
index=0,
serialized_options=_b('\312A\030googleads.googleapis.com'),
serialized_start=305,
serialized_end=591,
methods=[
_descriptor.MethodDescriptor(
name='GetShoppingPerformanceView',
full_name='google.ads.googleads.v2.services.ShoppingPerformanceViewService.GetShoppingPerformanceView',
index=0,
containing_service=None,
input_type=_GETSHOPPINGPERFORMANCEVIEWREQUEST,
output_type=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_shopping__performance__view__pb2._SHOPPINGPERFORMANCEVIEW,
serialized_options=_b('\202\323\344\223\0029\0227/v2/{resource_name=customers/*/shoppingPerformanceView}'),
),
])
_sym_db.RegisterServiceDescriptor(_SHOPPINGPERFORMANCEVIEWSERVICE)
DESCRIPTOR.services_by_name['ShoppingPerformanceViewService'] = _SHOPPINGPERFORMANCEVIEWSERVICE
| true | true |
f73b4856009641882adbb56e61af6046f93e30fe | 4,033 | py | Python | evaluation/Traditional/eval_metrics/rouge/rouge.py | Guaguago/CommonGen | 0a81b4edb8cd111571eba817eb994420f1070c48 | [
"MIT"
] | 100 | 2020-01-30T08:14:25.000Z | 2022-03-30T08:59:33.000Z | evaluation/Traditional/eval_metrics/rouge/rouge.py | Guaguago/CommonGen | 0a81b4edb8cd111571eba817eb994420f1070c48 | [
"MIT"
] | 4 | 2021-06-08T22:34:33.000Z | 2022-03-12T00:50:13.000Z | evaluation/Traditional/eval_metrics/rouge/rouge.py | Guaguago/CommonGen | 0a81b4edb8cd111571eba817eb994420f1070c48 | [
"MIT"
] | 15 | 2020-04-13T22:56:27.000Z | 2022-03-10T02:44:26.000Z | #!/usr/bin/env python
#
# File Name : rouge.py
#
# Description : Computes ROUGE-L metric as described by Lin and Hovey (2004)
#
# Creation Date : 2015-01-07 06:03
# Author : Ramakrishna Vedantam <vrama91@vt.edu>
import numpy as np
import pdb
def my_lcs(string, sub):
"""
Calculates longest common subsequence for a pair of tokenized strings
:param string : list of str : tokens from a string split using whitespace
:param sub : list of str : shorter string, also split using whitespace
:returns: length (list of int): length of the longest common subsequence between the two strings
Note: my_lcs only gives length of the longest common subsequence, not the actual LCS
"""
if(len(string)< len(sub)):
sub, string = string, sub
lengths = [[0 for i in range(0,len(sub)+1)] for j in range(0,len(string)+1)]
for j in range(1,len(sub)+1):
for i in range(1,len(string)+1):
if(string[i-1] == sub[j-1]):
lengths[i][j] = lengths[i-1][j-1] + 1
else:
lengths[i][j] = max(lengths[i-1][j] , lengths[i][j-1])
return lengths[len(string)][len(sub)]
class Rouge():
'''
Class for computing ROUGE-L score for a set of candidate sentences for the MS COCO test set
'''
def __init__(self):
# vrama91: updated the value below based on discussion with Hovey
self.beta = 1.2
def calc_score(self, candidate, refs):
"""
Compute ROUGE-L score given one candidate and references for an image
:param candidate: str : candidate sentence to be evaluated
:param refs: list of str : COCO reference sentences for the particular image to be evaluated
:returns score: int (ROUGE-L score for the candidate evaluated against references)
"""
assert(len(candidate)==1)
assert(len(refs)>0)
prec = []
rec = []
# split into tokens
token_c = candidate[0].split(" ")
for reference in refs:
# split into tokens
token_r = reference.split(" ")
# compute the longest common subsequence
lcs = my_lcs(token_r, token_c)
prec.append(lcs/float(len(token_c)))
rec.append(lcs/float(len(token_r)))
prec_max = max(prec)
rec_max = max(rec)
if(prec_max!=0 and rec_max !=0):
score = ((1 + self.beta**2)*prec_max*rec_max)/float(rec_max + self.beta**2*prec_max)
else:
score = 0.0
return score
def compute_score(self, gts, res):
"""
Computes Rouge-L score given a set of reference and candidate sentences for the dataset
Invoked by evaluate_captions.py
:param hypo_for_image: dict : candidate / test sentences with "image name" key and "tokenized sentences" as values
:param ref_for_image: dict : reference MS-COCO sentences with "image name" key and "tokenized sentences" as values
:returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images)
"""
assert(gts.keys() == res.keys())
imgIds = gts.keys()
score = []
for id in imgIds:
hypo = res[id]
ref = gts[id]
score.append(self.calc_score(hypo, ref))
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) > 0)
average_score = np.mean(np.array(score))
return average_score, np.array(score)
def method(self):
return "Rouge"
# if __name__ == "__main__":
#
# cand_1 = "A boy picks an apple tree and places it into bags."
# cand_2 = "Two girls pick many red apples from trees and place them in a large bag."
# ref = "A boy picks an apple from a tree and places it into bags."
# concepts = ["pick", "apple", "tree", "place", "bag"]
#
#
# rouge = Rouge()
# print rouge.calc_score([cand_1], ref) | 34.767241 | 123 | 0.604513 |
import numpy as np
import pdb
def my_lcs(string, sub):
if(len(string)< len(sub)):
sub, string = string, sub
lengths = [[0 for i in range(0,len(sub)+1)] for j in range(0,len(string)+1)]
for j in range(1,len(sub)+1):
for i in range(1,len(string)+1):
if(string[i-1] == sub[j-1]):
lengths[i][j] = lengths[i-1][j-1] + 1
else:
lengths[i][j] = max(lengths[i-1][j] , lengths[i][j-1])
return lengths[len(string)][len(sub)]
class Rouge():
def __init__(self):
self.beta = 1.2
def calc_score(self, candidate, refs):
assert(len(candidate)==1)
assert(len(refs)>0)
prec = []
rec = []
token_c = candidate[0].split(" ")
for reference in refs:
token_r = reference.split(" ")
lcs = my_lcs(token_r, token_c)
prec.append(lcs/float(len(token_c)))
rec.append(lcs/float(len(token_r)))
prec_max = max(prec)
rec_max = max(rec)
if(prec_max!=0 and rec_max !=0):
score = ((1 + self.beta**2)*prec_max*rec_max)/float(rec_max + self.beta**2*prec_max)
else:
score = 0.0
return score
def compute_score(self, gts, res):
assert(gts.keys() == res.keys())
imgIds = gts.keys()
score = []
for id in imgIds:
hypo = res[id]
ref = gts[id]
score.append(self.calc_score(hypo, ref))
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) > 0)
average_score = np.mean(np.array(score))
return average_score, np.array(score)
def method(self):
return "Rouge"
| true | true |
f73b486dbb5003f07d093a2a0505b70af55d1f5c | 432 | py | Python | data/gunicorn.conf.py | Fenthick/sla_dashboard_webapp | fe7409b916f2c0504dd8fd097a2f6c5ba5f77b60 | [
"Apache-2.0"
] | null | null | null | data/gunicorn.conf.py | Fenthick/sla_dashboard_webapp | fe7409b916f2c0504dd8fd097a2f6c5ba5f77b60 | [
"Apache-2.0"
] | null | null | null | data/gunicorn.conf.py | Fenthick/sla_dashboard_webapp | fe7409b916f2c0504dd8fd097a2f6c5ba5f77b60 | [
"Apache-2.0"
] | null | null | null | # For more settings, see: https://docs.gunicorn.org/en/stable/settings.html
import multiprocessing
wsgi_app = "main:run()"
workers = 1
worker_connections = 100
bind = ":8050"
timeout = 30
# Worker is changed to prevent worker timeouts
# See: https://github.com/benoitc/gunicorn/issues/1801#issuecomment-585886471
worker_class = "gthread"
threads = 2 * multiprocessing.cpu_count() + 1 # this formula is suggested in gunicorn docs
| 30.857143 | 91 | 0.761574 |
import multiprocessing
wsgi_app = "main:run()"
workers = 1
worker_connections = 100
bind = ":8050"
timeout = 30
d"
threads = 2 * multiprocessing.cpu_count() + 1
| true | true |
f73b48a4775565b9308c93076603d2d095dda06f | 1,050 | py | Python | doc/tutorials/examples/gateways/appengine/demo/simplejson/tests/test_unicode.py | elmordo/Py3AMF | ac12211459d6e11de3fb4f03a43bc0e688c6c1f6 | [
"MIT"
] | 87 | 2015-01-25T14:54:00.000Z | 2021-11-16T13:12:40.000Z | doc/tutorials/examples/gateways/appengine/demo/simplejson/tests/test_unicode.py | thijstriemstra/pyamf | d13915dfc68d06eb69ffc3e4e2a23257383568cc | [
"MIT"
] | 36 | 2015-01-05T01:24:59.000Z | 2021-09-15T20:40:33.000Z | doc/tutorials/examples/gateways/appengine/demo/simplejson/tests/test_unicode.py | thijstriemstra/pyamf | d13915dfc68d06eb69ffc3e4e2a23257383568cc | [
"MIT"
] | 37 | 2015-01-04T03:31:28.000Z | 2022-01-20T04:38:49.000Z | import simplejson as S
def test_encoding1():
encoder = S.JSONEncoder(encoding='utf-8')
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = encoder.encode(u)
js = encoder.encode(s)
assert ju == js
def test_encoding2():
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = S.dumps(u, encoding='utf-8')
js = S.dumps(s, encoding='utf-8')
assert ju == js
def test_big_unicode_encode():
u = u'\U0001d120'
assert S.dumps(u) == '"\\ud834\\udd20"'
assert S.dumps(u, ensure_ascii=False) == '"\\ud834\\udd20"'
def test_big_unicode_decode():
u = u'z\U0001d120x'
assert S.loads('"' + u + '"') == u
assert S.loads('"z\\ud834\\udd20x"') == u
def test_unicode_decode():
for i in range(0, 0xd7ff):
u = unichr(i)
json = '"\\u%04x"' % (i,)
res = S.loads(json)
assert res == u, 'S.loads(%r) != %r got %r' % (json, u, res)
if __name__ == '__main__':
test_unicode_decode()
| 28.378378 | 69 | 0.58381 | import simplejson as S
def test_encoding1():
encoder = S.JSONEncoder(encoding='utf-8')
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = encoder.encode(u)
js = encoder.encode(s)
assert ju == js
def test_encoding2():
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = S.dumps(u, encoding='utf-8')
js = S.dumps(s, encoding='utf-8')
assert ju == js
def test_big_unicode_encode():
u = u'\U0001d120'
assert S.dumps(u) == '"\\ud834\\udd20"'
assert S.dumps(u, ensure_ascii=False) == '"\\ud834\\udd20"'
def test_big_unicode_decode():
u = u'z\U0001d120x'
assert S.loads('"' + u + '"') == u
assert S.loads('"z\\ud834\\udd20x"') == u
def test_unicode_decode():
for i in range(0, 0xd7ff):
u = unichr(i)
json = '"\\u%04x"' % (i,)
res = S.loads(json)
assert res == u, 'S.loads(%r) != %r got %r' % (json, u, res)
if __name__ == '__main__':
test_unicode_decode()
| true | true |
f73b48aee603d440f4c06e828cbd0f393ff152ca | 10,179 | py | Python | custom_components/emporia_vue/__init__.py | nkm8/ha-emporia-vue | e21924e9edf19206c481f9cc4098388bce896122 | [
"MIT"
] | null | null | null | custom_components/emporia_vue/__init__.py | nkm8/ha-emporia-vue | e21924e9edf19206c481f9cc4098388bce896122 | [
"MIT"
] | null | null | null | custom_components/emporia_vue/__init__.py | nkm8/ha-emporia-vue | e21924e9edf19206c481f9cc4098388bce896122 | [
"MIT"
] | null | null | null | """The Emporia Vue integration."""
import asyncio
from datetime import datetime, timedelta
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
import logging
from pyemvue import PyEmVue
from pyemvue.device import VueDeviceChannel
from pyemvue.enums import Scale
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
from .const import DOMAIN, VUE_DATA, ENABLE_1M, ENABLE_1D, ENABLE_1MON
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_EMAIL): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(ENABLE_1M, default=True): cv.boolean,
vol.Optional(ENABLE_1D, default=True): cv.boolean,
vol.Optional(ENABLE_1MON, default=True): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor", "switch"]
device_gids = []
device_information = {}
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Emporia Vue component."""
hass.data.setdefault(DOMAIN, {})
conf = config.get(DOMAIN)
if not conf:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_EMAIL: conf[CONF_EMAIL],
CONF_PASSWORD: conf[CONF_PASSWORD],
ENABLE_1M: conf[ENABLE_1M],
ENABLE_1D: conf[ENABLE_1D],
ENABLE_1MON: conf[ENABLE_1MON],
},
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Emporia Vue from a config entry."""
global device_gids
global device_information
device_gids = []
device_information = {}
entry_data = entry.data
email = entry_data[CONF_EMAIL]
password = entry_data[CONF_PASSWORD]
# _LOGGER.info(entry_data)
vue = PyEmVue()
loop = asyncio.get_event_loop()
try:
result = await loop.run_in_executor(None, vue.login, email, password)
if not result:
raise Exception("Could not authenticate with Emporia API")
except Exception:
_LOGGER.error("Could not authenticate with Emporia API")
return False
scales_1hr = []
try:
devices = await loop.run_in_executor(None, vue.get_devices)
total_channels = 0
for d in devices:
total_channels += len(d.channels)
_LOGGER.info(
"Found {0} Emporia devices with {1} total channels".format(
len(devices), total_channels
)
)
for device in devices:
if not device.device_gid in device_gids:
device_gids.append(device.device_gid)
await loop.run_in_executor(None, vue.populate_device_properties, device)
device_information[device.device_gid] = device
else:
device_information[device.device_gid].channels += device.channels
async def async_update_data_1min():
"""Fetch data from API endpoint at a 1 minute interval
This is the place to pre-process the data to lookup tables
so entities can quickly look up their data.
"""
return await update_sensors(vue, [Scale.MINUTE.value])
async def async_update_data_1hr():
"""Fetch data from API endpoint at a 1 hour interval
This is the place to pre-process the data to lookup tables
so entities can quickly look up their data.
"""
return await update_sensors(vue, scales_1hr)
if ENABLE_1D not in entry_data or entry_data[ENABLE_1D]:
scales_1hr.append(Scale.DAY.value)
if ENABLE_1MON not in entry_data or entry_data[ENABLE_1MON]:
scales_1hr.append(Scale.MONTH.value)
coordinator_1min = None
if ENABLE_1M not in entry_data or entry_data[ENABLE_1M]:
coordinator_1min = DataUpdateCoordinator(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="sensor",
update_method=async_update_data_1min,
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(minutes=1),
)
await coordinator_1min.async_config_entry_first_refresh()
_LOGGER.info(f"1min Update data: {coordinator_1min.data}")
if scales_1hr:
coordinator_1hr = DataUpdateCoordinator(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="sensor",
update_method=async_update_data_1hr,
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(hours=1),
)
await coordinator_1hr.async_config_entry_first_refresh()
_LOGGER.info(f"1hr Update data: {coordinator_1hr.data}")
except Exception as err:
_LOGGER.warn(f"Exception while setting up Emporia Vue. Will retry. {err}")
raise ConfigEntryNotReady(
f"Exception while setting up Emporia Vue. Will retry. {err}"
)
hass.data[DOMAIN][entry.entry_id] = {
VUE_DATA: vue,
"coordinator_1min": coordinator_1min,
"coordinator_1hr": coordinator_1hr
}
try:
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
except Exception as err:
_LOGGER.warn(f"Error setting up platforms: {err}")
raise ConfigEntryNotReady(f"Error setting up platforms: {err}")
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def update_sensors(vue, scales):
try:
# Note: asyncio.TimeoutError and aiohttp.ClientError are already
# handled by the data update coordinator.
data = {}
loop = asyncio.get_event_loop()
for scale in scales:
now = datetime.utcnow()
channels = await loop.run_in_executor(
None, vue.get_devices_usage, device_gids, now, scale
)
if not channels:
_LOGGER.warn(
f"No channels found during update for scale {scale}. Retrying..."
)
channels = await loop.run_in_executor(
None, vue.get_devices_usage, device_gids, now, scale
)
if channels:
for channel in channels:
id = "{0}-{1}-{2}".format(
channel.device_gid, channel.channel_num, scale
)
usage = round(channel.usage, 3)
if scale == Scale.MINUTE.value:
usage = round(
60 * 1000 * channel.usage
) # convert from kwh to w rate
elif scale == Scale.SECOND.value:
usage = round(3600 * 1000 * channel.usage) # convert to rate
elif scale == Scale.MINUTES_15.value:
usage = round(
4 * 1000 * channel.usage
) # this might never be used but for safety, convert to rate
info = None
if channel.device_gid in device_information:
info = device_information[channel.device_gid]
if channel.channel_num in ["MainsFromGrid", "MainsToGrid"]:
found = False
channel_123 = None
for channel2 in info.channels:
if channel2.channel_num == channel.channel_num:
found = True
break
elif channel2.channel_num == "1,2,3":
channel_123 = channel2
if not found:
_LOGGER.info(
f"Adding channel for channel {channel.device_gid}-{channel.channel_num}"
)
info.channels.append(
VueDeviceChannel(
gid=channel.device_gid,
name=None,
channelNum=channel.channel_num,
channelMultiplier=channel_123.channel_multiplier,
channelTypeGid=channel_123.channel_type_gid,
)
)
data[id] = {
"device_gid": channel.device_gid,
"channel_num": channel.channel_num,
"usage": usage,
"scale": scale,
"info": info,
}
else:
raise UpdateFailed(f"No channels found during update for scale {scale}")
return data
except Exception as err:
_LOGGER.error(f"Error communicating with Emporia API: {err}")
raise UpdateFailed(f"Error communicating with Emporia API: {err}")
| 37.840149 | 108 | 0.558896 | import asyncio
from datetime import datetime, timedelta
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
import logging
from pyemvue import PyEmVue
from pyemvue.device import VueDeviceChannel
from pyemvue.enums import Scale
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
from .const import DOMAIN, VUE_DATA, ENABLE_1M, ENABLE_1D, ENABLE_1MON
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_EMAIL): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(ENABLE_1M, default=True): cv.boolean,
vol.Optional(ENABLE_1D, default=True): cv.boolean,
vol.Optional(ENABLE_1MON, default=True): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor", "switch"]
device_gids = []
device_information = {}
async def async_setup(hass: HomeAssistant, config: dict):
hass.data.setdefault(DOMAIN, {})
conf = config.get(DOMAIN)
if not conf:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_EMAIL: conf[CONF_EMAIL],
CONF_PASSWORD: conf[CONF_PASSWORD],
ENABLE_1M: conf[ENABLE_1M],
ENABLE_1D: conf[ENABLE_1D],
ENABLE_1MON: conf[ENABLE_1MON],
},
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
global device_gids
global device_information
device_gids = []
device_information = {}
entry_data = entry.data
email = entry_data[CONF_EMAIL]
password = entry_data[CONF_PASSWORD]
vue = PyEmVue()
loop = asyncio.get_event_loop()
try:
result = await loop.run_in_executor(None, vue.login, email, password)
if not result:
raise Exception("Could not authenticate with Emporia API")
except Exception:
_LOGGER.error("Could not authenticate with Emporia API")
return False
scales_1hr = []
try:
devices = await loop.run_in_executor(None, vue.get_devices)
total_channels = 0
for d in devices:
total_channels += len(d.channels)
_LOGGER.info(
"Found {0} Emporia devices with {1} total channels".format(
len(devices), total_channels
)
)
for device in devices:
if not device.device_gid in device_gids:
device_gids.append(device.device_gid)
await loop.run_in_executor(None, vue.populate_device_properties, device)
device_information[device.device_gid] = device
else:
device_information[device.device_gid].channels += device.channels
async def async_update_data_1min():
return await update_sensors(vue, [Scale.MINUTE.value])
async def async_update_data_1hr():
return await update_sensors(vue, scales_1hr)
if ENABLE_1D not in entry_data or entry_data[ENABLE_1D]:
scales_1hr.append(Scale.DAY.value)
if ENABLE_1MON not in entry_data or entry_data[ENABLE_1MON]:
scales_1hr.append(Scale.MONTH.value)
coordinator_1min = None
if ENABLE_1M not in entry_data or entry_data[ENABLE_1M]:
coordinator_1min = DataUpdateCoordinator(
hass,
_LOGGER,
name="sensor",
update_method=async_update_data_1min,
update_interval=timedelta(minutes=1),
)
await coordinator_1min.async_config_entry_first_refresh()
_LOGGER.info(f"1min Update data: {coordinator_1min.data}")
if scales_1hr:
coordinator_1hr = DataUpdateCoordinator(
hass,
_LOGGER,
name="sensor",
update_method=async_update_data_1hr,
update_interval=timedelta(hours=1),
)
await coordinator_1hr.async_config_entry_first_refresh()
_LOGGER.info(f"1hr Update data: {coordinator_1hr.data}")
except Exception as err:
_LOGGER.warn(f"Exception while setting up Emporia Vue. Will retry. {err}")
raise ConfigEntryNotReady(
f"Exception while setting up Emporia Vue. Will retry. {err}"
)
hass.data[DOMAIN][entry.entry_id] = {
VUE_DATA: vue,
"coordinator_1min": coordinator_1min,
"coordinator_1hr": coordinator_1hr
}
try:
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
except Exception as err:
_LOGGER.warn(f"Error setting up platforms: {err}")
raise ConfigEntryNotReady(f"Error setting up platforms: {err}")
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def update_sensors(vue, scales):
try:
data = {}
loop = asyncio.get_event_loop()
for scale in scales:
now = datetime.utcnow()
channels = await loop.run_in_executor(
None, vue.get_devices_usage, device_gids, now, scale
)
if not channels:
_LOGGER.warn(
f"No channels found during update for scale {scale}. Retrying..."
)
channels = await loop.run_in_executor(
None, vue.get_devices_usage, device_gids, now, scale
)
if channels:
for channel in channels:
id = "{0}-{1}-{2}".format(
channel.device_gid, channel.channel_num, scale
)
usage = round(channel.usage, 3)
if scale == Scale.MINUTE.value:
usage = round(
60 * 1000 * channel.usage
)
elif scale == Scale.SECOND.value:
usage = round(3600 * 1000 * channel.usage)
elif scale == Scale.MINUTES_15.value:
usage = round(
4 * 1000 * channel.usage
)
info = None
if channel.device_gid in device_information:
info = device_information[channel.device_gid]
if channel.channel_num in ["MainsFromGrid", "MainsToGrid"]:
found = False
channel_123 = None
for channel2 in info.channels:
if channel2.channel_num == channel.channel_num:
found = True
break
elif channel2.channel_num == "1,2,3":
channel_123 = channel2
if not found:
_LOGGER.info(
f"Adding channel for channel {channel.device_gid}-{channel.channel_num}"
)
info.channels.append(
VueDeviceChannel(
gid=channel.device_gid,
name=None,
channelNum=channel.channel_num,
channelMultiplier=channel_123.channel_multiplier,
channelTypeGid=channel_123.channel_type_gid,
)
)
data[id] = {
"device_gid": channel.device_gid,
"channel_num": channel.channel_num,
"usage": usage,
"scale": scale,
"info": info,
}
else:
raise UpdateFailed(f"No channels found during update for scale {scale}")
return data
except Exception as err:
_LOGGER.error(f"Error communicating with Emporia API: {err}")
raise UpdateFailed(f"Error communicating with Emporia API: {err}")
| true | true |
f73b4969d19bb8629ca74db252d01808d03ce2f3 | 838 | py | Python | tests/system/verbs/catkin_init/test_init.py | lucasw/catkin_tools | 0918ea5865014053bd5786b629001d70e02a314c | [
"Apache-2.0"
] | 1 | 2020-07-26T12:55:29.000Z | 2020-07-26T12:55:29.000Z | tests/system/verbs/catkin_init/test_init.py | lucasw/catkin_tools | 0918ea5865014053bd5786b629001d70e02a314c | [
"Apache-2.0"
] | null | null | null | tests/system/verbs/catkin_init/test_init.py | lucasw/catkin_tools | 0918ea5865014053bd5786b629001d70e02a314c | [
"Apache-2.0"
] | null | null | null | import os
from ....utils import catkin_success
from ....utils import in_temporary_directory
from ....utils import redirected_stdio
from ....workspace_assertions import assert_no_warnings
from ....workspace_assertions import assert_warning_message
from ....workspace_assertions import assert_workspace_initialized
@in_temporary_directory
def test_init_local_no_src():
with redirected_stdio() as (out, err):
assert catkin_success(['init'])
assert_warning_message(out, 'Source space .+ does not yet exist')
assert_workspace_initialized('.')
@in_temporary_directory
def test_init_local_empty_src():
cwd = os.getcwd()
os.mkdir(os.path.join(cwd, 'src'))
with redirected_stdio() as (out, err):
assert catkin_success(['init'])
assert_no_warnings(out)
assert_workspace_initialized('.')
| 31.037037 | 73 | 0.747017 | import os
from ....utils import catkin_success
from ....utils import in_temporary_directory
from ....utils import redirected_stdio
from ....workspace_assertions import assert_no_warnings
from ....workspace_assertions import assert_warning_message
from ....workspace_assertions import assert_workspace_initialized
@in_temporary_directory
def test_init_local_no_src():
with redirected_stdio() as (out, err):
assert catkin_success(['init'])
assert_warning_message(out, 'Source space .+ does not yet exist')
assert_workspace_initialized('.')
@in_temporary_directory
def test_init_local_empty_src():
cwd = os.getcwd()
os.mkdir(os.path.join(cwd, 'src'))
with redirected_stdio() as (out, err):
assert catkin_success(['init'])
assert_no_warnings(out)
assert_workspace_initialized('.')
| true | true |
f73b4b3e6edad5363e7eb09c70bb78ae1f39dea3 | 149,485 | py | Python | src/azure-cli/azure/cli/command_modules/acs/custom.py | avoidik/azure-cli | 93a50bb7beb93834978e57141dd07572b98809ac | [
"MIT"
] | 1 | 2021-09-07T18:53:01.000Z | 2021-09-07T18:53:01.000Z | src/azure-cli/azure/cli/command_modules/acs/custom.py | andyliuliming/azure-cli | fd1c96077c8dd57b168439b6280009d84d40c7f5 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/acs/custom.py | andyliuliming/azure-cli | fd1c96077c8dd57b168439b6280009d84d40c7f5 | [
"MIT"
] | 1 | 2020-11-12T01:49:27.000Z | 2020-11-12T01:49:27.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
from ._helpers import _populate_api_server_access_profile, _set_load_balancer_sku, _set_vm_set_type
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterServicePrincipalProfile
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceStorageProfileTypes
from azure.mgmt.containerservice.v2019_08_01.models import ManagedCluster
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfileManagedOutboundIPs
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfileOutboundIPPrefixes
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfileOutboundIPs
from azure.mgmt.containerservice.v2019_08_01.models import AgentPool
from azure.mgmt.containerservice.v2019_08_01.models import ResourceReference
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import NetworkProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterMonitorProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None):
"""Install kubectl, a command-line interface for Kubernetes clusters."""
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("install", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def k8s_upgrade_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("upgrade", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def _k8s_install_or_upgrade_connector(helm_cmd, cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group):
from subprocess import PIPE, Popen
instance = client.get(resource_group_name, name)
helm_not_installed = 'Helm not detected, please verify if it is installed.'
url_chart = chart_url
if image_tag is None:
image_tag = 'latest'
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# If SPN is specified, the secret should also be specified
if service_principal is not None and client_secret is None:
raise CLIError('--client-secret must be specified when --service-principal is specified')
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, aci_resource_group or resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
# Validate the location upon the ACI avaiable regions
_validate_aci_location(norm_location)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
subscription_id = get_subscription_id(cmd.cli_ctx)
# Get the TenantID
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, tenant_id = profile.get_login_credentials()
# Check if we want the linux connector
if os_type.lower() in ['linux', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Linux', instance.enable_rbac, instance.fqdn)
# Check if we want the windows connector
if os_type.lower() in ['windows', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Windows', instance.enable_rbac, instance.fqdn)
def _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, os_type, use_rbac, masterFqdn):
rbac_install = "true" if use_rbac else "false"
node_taint = 'azure.com/aci'
helm_release_name = connector_name.lower() + '-' + os_type.lower() + '-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
k8s_master = 'https://{}'.format(masterFqdn)
logger.warning("Deploying the ACI connector for '%s' using Helm", os_type)
try:
values = 'env.nodeName={},env.nodeTaint={},env.nodeOsType={},image.tag={},rbac.install={}'.format(
node_name, node_taint, os_type, image_tag, rbac_install)
if service_principal:
values += ",env.azureClientId=" + service_principal
if client_secret:
values += ",env.azureClientKey=" + client_secret
if subscription_id:
values += ",env.azureSubscriptionId=" + subscription_id
if tenant_id:
values += ",env.azureTenantId=" + tenant_id
if aci_resource_group:
values += ",env.aciResourceGroup=" + aci_resource_group
if norm_location:
values += ",env.aciRegion=" + norm_location
# Currently, we need to set the master FQDN.
# This is temporary and we should remove it when possible
values += ",env.masterUri=" + k8s_master
if helm_cmd == "install":
subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", values])
elif helm_cmd == "upgrade":
subprocess.call(["helm", "upgrade", helm_release_name, url_chart, "--set", values])
except subprocess.CalledProcessError as err:
raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err))
def k8s_uninstall_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, graceful=False, os_type='Linux'):
from subprocess import PIPE, Popen
helm_not_installed = "Error : Helm not detected, please verify if it is installed."
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
if os_type.lower() in ['linux', 'both']:
helm_release_name = connector_name.lower() + '-linux-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
if os_type.lower() in ['windows', 'both']:
helm_release_name = connector_name.lower() + '-windows-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
def _undeploy_connector(graceful, node_name, helm_release_name):
if graceful:
logger.warning('Graceful option selected, will try to drain the node first')
from subprocess import PIPE, Popen
kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.'
try:
Popen(["kubectl"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(kubectl_not_installed)
try:
drain_node = subprocess.check_output(
['kubectl', 'drain', node_name, '--force', '--delete-local-data'],
universal_newlines=True)
if not drain_node:
raise CLIError('Could not find the node, make sure you' +
' are using the correct --os-type')
except subprocess.CalledProcessError as err:
raise CLIError('Could not find the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
logger.warning("Undeploying the '%s' using Helm", helm_release_name)
try:
subprocess.call(['helm', 'del', helm_release_name, '--purge'])
except subprocess.CalledProcessError as err:
raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err))
try:
subprocess.check_output(
['kubectl', 'delete', 'node', node_name],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not delete the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd.cli_ctx, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType, get_sdk
DeploymentProperties = get_sdk(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, 'DeploymentProperties', mod='models')
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n, NoTTYException
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope)
def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
# pylint: disable=too-many-statements
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
addon_profile = addon_profiles.get("kubeDashboard", ManagedClusterAddonProfile(enabled=True))
if not addon_profile.enabled:
raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n'
'To use "az aks browse" first enable the add-on\n'
'by running "az aks enable-addons --addons kube-dashboard".')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = '{0}://{1}:{2}/'.format(protocol, listen_address, listen_port)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{}'.format(term_id),
json={"url": result['url']})
logger.warning('To view the console, please open %s in a new tab', result['url'])
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(proxy_url)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", "--address", listen_address, dashboard_pod,
"{0}:{1}".format(listen_port, dashboard_port)], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", dashboard_pod, "{0}:{1}".format(listen_port, dashboard_port)])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_count=3,
nodepool_name="nodepool1",
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
attach_acr=None,
no_wait=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = _set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
availability_zones=zones,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_profile.client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = _get_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes)
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id
)
monitoring = False
if 'omsagent' in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if api_server_authorized_ip_ranges:
api_server_access_profile = _populate_api_server_access_profile(api_server_authorized_ip_ranges)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
api_server_access_profile=api_server_access_profile
)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name, parameters=mc)
# add cluster spn with Monitoring Metrics Publisher role assignment to the cluster resource
# mdm metrics supported only in azure public cloud so add the role assignment only in this cloud
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud' and monitoring:
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_profile.client_id, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for monitoring addon. '
'Are you an Owner on this subscription?')
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
service_principal_client_id = instance.service_principal_profile.client_id
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_client_id, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector',
'kube-dashboard': 'kubeDashboard'
}
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
if node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = load_balancer_managed_outbound_ip_count is not None or \
load_balancer_outbound_ips is not None or load_balancer_outbound_ip_prefixes is not None
if (update_autoscaler != 1 and not update_lb_profile and
not attach_acr and
not detach_acr and
api_server_authorized_ip_ranges is None):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--load-balancer-managed-outbound-ip-count",'
'"--load-balancer-outbound-ips",'
'"--load-balancer-outbound-ip-prefixes",'
'"--attach-acr" or "--dettach-acr",'
'"--"api-server-authorized-ip-ranges')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
node_count = instance.agent_pool_profiles[0].count
_validate_autoscaler_update_counts(min_count, max_count, node_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
load_balancer_profile = _get_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes)
if load_balancer_profile:
instance.network_profile.load_balancer_profile = load_balancer_profile
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
# pylint: disable=unused-argument,inconsistent-return-statements
def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, control_plane_only=False,
no_wait=False, **kwargs):
instance = client.get(resource_group_name, name)
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
from knack.prompting import prompt_y_n
upgrade_all = False
instance.kubernetes_version = kubernetes_version
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None, prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an interactive selection experience.
:type space_name: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
subnet_name=None, no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
if 'kube-dashboard' in addon_args and 'kubeDashboard' not in addon_profiles:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# addon name is case insensitive
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID']
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
raise CLIError('Windows nodepool is not supported')
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
node_taints=taints_array
)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if new_node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
kubernetes_version,
nodepool_name,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
no_wait=False):
update_flags = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_flags != 1:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
node_count = instance.count
_validate_autoscaler_update_counts(min_count, max_count, node_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal)
return load_acs_service_principal(subscription_id)
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, node_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count.')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError("Current node count '{}' is not in the range of min-count and max-count.".format(node_count))
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _get_load_balancer_outbound_ips(load_balancer_outbound_ips):
"""parse load balancer profile outbound IP ids and return an array of references to the outbound IP resources"""
load_balancer_outbound_ip_resources = None
if load_balancer_outbound_ips:
load_balancer_outbound_ip_resources = \
[ResourceReference(id=x.strip()) for x in load_balancer_outbound_ips.split(',')]
return load_balancer_outbound_ip_resources
def _get_load_balancer_outbound_ip_prefixes(load_balancer_outbound_ip_prefixes):
"""parse load balancer profile outbound IP prefix ids and return an array \
of references to the outbound IP prefix resources"""
load_balancer_outbound_ip_prefix_resources = None
if load_balancer_outbound_ip_prefixes:
load_balancer_outbound_ip_prefix_resources = \
[ResourceReference(id=x.strip()) for x in load_balancer_outbound_ip_prefixes.split(',')]
return load_balancer_outbound_ip_prefix_resources
def _get_load_balancer_profile(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes):
"""parse and build load balancer profile"""
load_balancer_outbound_ip_resources = _get_load_balancer_outbound_ips(load_balancer_outbound_ips)
load_balancer_outbound_ip_prefix_resources = _get_load_balancer_outbound_ip_prefixes(
load_balancer_outbound_ip_prefixes)
load_balancer_profile = None
if any([load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ip_resources,
load_balancer_outbound_ip_prefix_resources]):
load_balancer_profile = ManagedClusterLoadBalancerProfile()
if load_balancer_managed_outbound_ip_count:
load_balancer_profile.managed_outbound_ips = ManagedClusterLoadBalancerProfileManagedOutboundIPs(
count=load_balancer_managed_outbound_ip_count
)
if load_balancer_outbound_ip_resources:
load_balancer_profile.outbound_ips = ManagedClusterLoadBalancerProfileOutboundIPs(
public_ips=load_balancer_outbound_ip_resources
)
if load_balancer_outbound_ip_prefix_resources:
load_balancer_profile.outbound_ip_prefixes = ManagedClusterLoadBalancerProfileOutboundIPPrefixes(
public_ip_prefixes=load_balancer_outbound_ip_prefix_resources
)
return load_balancer_profile
| 45.202601 | 222 | 0.658347 |
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import URLError
from ._helpers import _populate_api_server_access_profile, _set_load_balancer_sku, _set_vm_set_type
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
import requests
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterServicePrincipalProfile
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceStorageProfileTypes
from azure.mgmt.containerservice.v2019_08_01.models import ManagedCluster
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfileManagedOutboundIPs
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfileOutboundIPPrefixes
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfileOutboundIPs
from azure.mgmt.containerservice.v2019_08_01.models import AgentPool
from azure.mgmt.containerservice.v2019_08_01.models import ResourceReference
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import NetworkProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterMonitorProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
logger = get_logger(__name__)
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'):
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None):
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows':
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("install", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def k8s_upgrade_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("upgrade", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def _k8s_install_or_upgrade_connector(helm_cmd, cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group):
from subprocess import PIPE, Popen
instance = client.get(resource_group_name, name)
helm_not_installed = 'Helm not detected, please verify if it is installed.'
url_chart = chart_url
if image_tag is None:
image_tag = 'latest'
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# If SPN is specified, the secret should also be specified
if service_principal is not None and client_secret is None:
raise CLIError('--client-secret must be specified when --service-principal is specified')
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, aci_resource_group or resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
# Validate the location upon the ACI avaiable regions
_validate_aci_location(norm_location)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
subscription_id = get_subscription_id(cmd.cli_ctx)
# Get the TenantID
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, tenant_id = profile.get_login_credentials()
# Check if we want the linux connector
if os_type.lower() in ['linux', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Linux', instance.enable_rbac, instance.fqdn)
# Check if we want the windows connector
if os_type.lower() in ['windows', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Windows', instance.enable_rbac, instance.fqdn)
def _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, os_type, use_rbac, masterFqdn):
rbac_install = "true" if use_rbac else "false"
node_taint = 'azure.com/aci'
helm_release_name = connector_name.lower() + '-' + os_type.lower() + '-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
k8s_master = 'https://{}'.format(masterFqdn)
logger.warning("Deploying the ACI connector for '%s' using Helm", os_type)
try:
values = 'env.nodeName={},env.nodeTaint={},env.nodeOsType={},image.tag={},rbac.install={}'.format(
node_name, node_taint, os_type, image_tag, rbac_install)
if service_principal:
values += ",env.azureClientId=" + service_principal
if client_secret:
values += ",env.azureClientKey=" + client_secret
if subscription_id:
values += ",env.azureSubscriptionId=" + subscription_id
if tenant_id:
values += ",env.azureTenantId=" + tenant_id
if aci_resource_group:
values += ",env.aciResourceGroup=" + aci_resource_group
if norm_location:
values += ",env.aciRegion=" + norm_location
# Currently, we need to set the master FQDN.
# This is temporary and we should remove it when possible
values += ",env.masterUri=" + k8s_master
if helm_cmd == "install":
subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", values])
elif helm_cmd == "upgrade":
subprocess.call(["helm", "upgrade", helm_release_name, url_chart, "--set", values])
except subprocess.CalledProcessError as err:
raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err))
def k8s_uninstall_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, graceful=False, os_type='Linux'):
from subprocess import PIPE, Popen
helm_not_installed = "Error : Helm not detected, please verify if it is installed."
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
if os_type.lower() in ['linux', 'both']:
helm_release_name = connector_name.lower() + '-linux-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
if os_type.lower() in ['windows', 'both']:
helm_release_name = connector_name.lower() + '-windows-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
def _undeploy_connector(graceful, node_name, helm_release_name):
if graceful:
logger.warning('Graceful option selected, will try to drain the node first')
from subprocess import PIPE, Popen
kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.'
try:
Popen(["kubectl"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(kubectl_not_installed)
try:
drain_node = subprocess.check_output(
['kubectl', 'drain', node_name, '--force', '--delete-local-data'],
universal_newlines=True)
if not drain_node:
raise CLIError('Could not find the node, make sure you' +
' are using the correct --os-type')
except subprocess.CalledProcessError as err:
raise CLIError('Could not find the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
logger.warning("Undeploying the '%s' using Helm", helm_release_name)
try:
subprocess.call(['helm', 'del', helm_release_name, '--purge'])
except subprocess.CalledProcessError as err:
raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err))
try:
subprocess.check_output(
['kubectl', 'delete', 'node', node_name],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not delete the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd.cli_ctx, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except:
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType, get_sdk
DeploymentProperties = get_sdk(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, 'DeploymentProperties', mod='models')
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix
location = acs_info.location
user = acs_info.linux_profile.admin_username
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n, NoTTYException
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope)
def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0:
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result:
result = _get_object_stubs(client, [assignee])
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
addon_profile = addon_profiles.get("kubeDashboard", ManagedClusterAddonProfile(enabled=True))
if not addon_profile.enabled:
raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n'
'To use "az aks browse" first enable the add-on\n'
'by running "az aks enable-addons --addons kube-dashboard".')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = '{0}://{1}:{2}/'.format(protocol, listen_address, listen_port)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{}'.format(term_id),
json={"url": result['url']})
logger.warning('To view the console, please open %s in a new tab', result['url'])
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(proxy_url)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", "--address", listen_address, dashboard_pod,
"{0}:{1}".format(listen_port, dashboard_port)], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", dashboard_pod, "{0}:{1}".format(listen_port, dashboard_port)])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_count=3,
nodepool_name="nodepool1",
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
attach_acr=None,
no_wait=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = _set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
availability_zones=zones,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_profile.client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = _get_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes)
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id
)
monitoring = False
if 'omsagent' in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if api_server_authorized_ip_ranges:
api_server_access_profile = _populate_api_server_access_profile(api_server_authorized_ip_ranges)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
api_server_access_profile=api_server_access_profile
)
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name, parameters=mc)
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud' and monitoring:
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_profile.client_id, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for monitoring addon. '
'Are you an Owner on this subscription?')
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
service_principal_client_id = instance.service_principal_profile.client_id
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_client_id, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector',
'kube-dashboard': 'kubeDashboard'
}
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
if node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = load_balancer_managed_outbound_ip_count is not None or \
load_balancer_outbound_ips is not None or load_balancer_outbound_ip_prefixes is not None
if (update_autoscaler != 1 and not update_lb_profile and
not attach_acr and
not detach_acr and
api_server_authorized_ip_ranges is None):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--load-balancer-managed-outbound-ip-count",'
'"--load-balancer-outbound-ips",'
'"--load-balancer-outbound-ip-prefixes",'
'"--attach-acr" or "--dettach-acr",'
'"--"api-server-authorized-ip-ranges')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
node_count = instance.agent_pool_profiles[0].count
_validate_autoscaler_update_counts(min_count, max_count, node_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
load_balancer_profile = _get_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes)
if load_balancer_profile:
instance.network_profile.load_balancer_profile = load_balancer_profile
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, control_plane_only=False,
no_wait=False, **kwargs):
instance = client.get(resource_group_name, name)
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
from knack.prompting import prompt_y_n
upgrade_all = False
instance.kubernetes_version = kubernetes_version
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None, prompt=False):
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
subnet_name=None, no_wait=False):
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
if 'kube-dashboard' in addon_args and 'kubeDashboard' not in addon_profiles:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
os_type = 'Linux'
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
addon += os_type
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
if 'monitoring' in addons:
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID']
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
raise CLIError('Windows nodepool is not supported')
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
node_taints=taints_array
)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if new_node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
kubernetes_version,
nodepool_name,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
no_wait=False):
update_flags = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_flags != 1:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
node_count = instance.count
_validate_autoscaler_update_counts(min_count, max_count, node_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal)
return load_acs_service_principal(subscription_id)
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, node_count, is_enable_or_update):
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count.')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError("Current node count '{}' is not in the range of min-count and max-count.".format(node_count))
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed')
if "No registered resource provider found for location" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed')
raise ex
def openshift_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
instance = client.get(resource_group_name, name)
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(compute_count)
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _get_load_balancer_outbound_ips(load_balancer_outbound_ips):
load_balancer_outbound_ip_resources = None
if load_balancer_outbound_ips:
load_balancer_outbound_ip_resources = \
[ResourceReference(id=x.strip()) for x in load_balancer_outbound_ips.split(',')]
return load_balancer_outbound_ip_resources
def _get_load_balancer_outbound_ip_prefixes(load_balancer_outbound_ip_prefixes):
load_balancer_outbound_ip_prefix_resources = None
if load_balancer_outbound_ip_prefixes:
load_balancer_outbound_ip_prefix_resources = \
[ResourceReference(id=x.strip()) for x in load_balancer_outbound_ip_prefixes.split(',')]
return load_balancer_outbound_ip_prefix_resources
def _get_load_balancer_profile(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes):
load_balancer_outbound_ip_resources = _get_load_balancer_outbound_ips(load_balancer_outbound_ips)
load_balancer_outbound_ip_prefix_resources = _get_load_balancer_outbound_ip_prefixes(
load_balancer_outbound_ip_prefixes)
load_balancer_profile = None
if any([load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ip_resources,
load_balancer_outbound_ip_prefix_resources]):
load_balancer_profile = ManagedClusterLoadBalancerProfile()
if load_balancer_managed_outbound_ip_count:
load_balancer_profile.managed_outbound_ips = ManagedClusterLoadBalancerProfileManagedOutboundIPs(
count=load_balancer_managed_outbound_ip_count
)
if load_balancer_outbound_ip_resources:
load_balancer_profile.outbound_ips = ManagedClusterLoadBalancerProfileOutboundIPs(
public_ips=load_balancer_outbound_ip_resources
)
if load_balancer_outbound_ip_prefix_resources:
load_balancer_profile.outbound_ip_prefixes = ManagedClusterLoadBalancerProfileOutboundIPPrefixes(
public_ip_prefixes=load_balancer_outbound_ip_prefix_resources
)
return load_balancer_profile
| true | true |
f73b4b54ccb5ccc4dfa18a8a22dfa8a4d5a1fa3e | 1,971 | py | Python | selfdrive/debug/cycle_alerts.py | tobylinjs/openpilot | 6c525691e9943cb88096b1d94c1c29cc4b22369f | [
"MIT"
] | null | null | null | selfdrive/debug/cycle_alerts.py | tobylinjs/openpilot | 6c525691e9943cb88096b1d94c1c29cc4b22369f | [
"MIT"
] | null | null | null | selfdrive/debug/cycle_alerts.py | tobylinjs/openpilot | 6c525691e9943cb88096b1d94c1c29cc4b22369f | [
"MIT"
] | 2 | 2020-11-30T23:10:07.000Z | 2020-12-01T15:53:03.000Z | #!/usr/bin/env python3
# flake8: noqa
# pylint: skip-file
# type: ignore
import time
import cereal.messaging as messaging
from selfdrive.car.honda.interface import CarInterface
from selfdrive.controls.lib.events import ET, EVENTS, Events
from selfdrive.controls.lib.alertmanager import AlertManager
def cycle_alerts(duration=200, is_metric=False):
alerts = list(EVENTS.keys())
print(alerts)
CP = CarInterface.get_params("HONDA CIVIC 2016 TOURING")
sm = messaging.SubMaster(['thermal', 'health', 'frame', 'model', 'liveCalibration',
'dMonitoringState', 'plan', 'pathPlan', 'liveLocationKalman'])
controls_state = messaging.pub_sock('controlsState')
thermal = messaging.pub_sock('thermal')
idx, last_alert_millis = 0, 0
alert = alerts[0]
events = Events()
AM = AlertManager()
frame = 0
while 1:
if frame % duration == 0:
idx = (idx + 1) % len(alerts)
events.clear()
events.add(alerts[idx])
current_alert_types = [ET.PERMANENT, ET.USER_DISABLE, ET.IMMEDIATE_DISABLE,
ET.SOFT_DISABLE, ET.PRE_ENABLE, ET.NO_ENTRY,
ET.ENABLE, ET.WARNING]
a = events.create_alerts(current_alert_types, [CP, sm, is_metric])
AM.add_many(frame, a)
AM.process_alerts(frame)
dat = messaging.new_message()
dat.init('controlsState')
dat.controlsState.alertText1 = AM.alert_text_1
dat.controlsState.alertText2 = AM.alert_text_2
dat.controlsState.alertSize = AM.alert_size
dat.controlsState.alertStatus = AM.alert_status
dat.controlsState.alertBlinkingRate = AM.alert_rate
dat.controlsState.alertType = AM.alert_type
dat.controlsState.alertSound = AM.audible_alert
controls_state.send(dat.to_bytes())
dat = messaging.new_message()
dat.init('thermal')
dat.thermal.started = True
thermal.send(dat.to_bytes())
frame += 1
time.sleep(0.01)
if __name__ == '__main__':
cycle_alerts()
| 28.565217 | 90 | 0.68899 |
import time
import cereal.messaging as messaging
from selfdrive.car.honda.interface import CarInterface
from selfdrive.controls.lib.events import ET, EVENTS, Events
from selfdrive.controls.lib.alertmanager import AlertManager
def cycle_alerts(duration=200, is_metric=False):
alerts = list(EVENTS.keys())
print(alerts)
CP = CarInterface.get_params("HONDA CIVIC 2016 TOURING")
sm = messaging.SubMaster(['thermal', 'health', 'frame', 'model', 'liveCalibration',
'dMonitoringState', 'plan', 'pathPlan', 'liveLocationKalman'])
controls_state = messaging.pub_sock('controlsState')
thermal = messaging.pub_sock('thermal')
idx, last_alert_millis = 0, 0
alert = alerts[0]
events = Events()
AM = AlertManager()
frame = 0
while 1:
if frame % duration == 0:
idx = (idx + 1) % len(alerts)
events.clear()
events.add(alerts[idx])
current_alert_types = [ET.PERMANENT, ET.USER_DISABLE, ET.IMMEDIATE_DISABLE,
ET.SOFT_DISABLE, ET.PRE_ENABLE, ET.NO_ENTRY,
ET.ENABLE, ET.WARNING]
a = events.create_alerts(current_alert_types, [CP, sm, is_metric])
AM.add_many(frame, a)
AM.process_alerts(frame)
dat = messaging.new_message()
dat.init('controlsState')
dat.controlsState.alertText1 = AM.alert_text_1
dat.controlsState.alertText2 = AM.alert_text_2
dat.controlsState.alertSize = AM.alert_size
dat.controlsState.alertStatus = AM.alert_status
dat.controlsState.alertBlinkingRate = AM.alert_rate
dat.controlsState.alertType = AM.alert_type
dat.controlsState.alertSound = AM.audible_alert
controls_state.send(dat.to_bytes())
dat = messaging.new_message()
dat.init('thermal')
dat.thermal.started = True
thermal.send(dat.to_bytes())
frame += 1
time.sleep(0.01)
if __name__ == '__main__':
cycle_alerts()
| true | true |
f73b4b9a37043f259e772fd116cf096efcbdb51e | 3,374 | py | Python | detectron2/modeling/proposal_generator/rrpn.py | dnandha/grraspn | 0a660d3f73487ea2f8caabf791809de283e8b806 | [
"Apache-2.0"
] | null | null | null | detectron2/modeling/proposal_generator/rrpn.py | dnandha/grraspn | 0a660d3f73487ea2f8caabf791809de283e8b806 | [
"Apache-2.0"
] | null | null | null | detectron2/modeling/proposal_generator/rrpn.py | dnandha/grraspn | 0a660d3f73487ea2f8caabf791809de283e8b806 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from typing import Dict
import torch
from detectron2.layers import ShapeSpec
from ..box_regression import Box2BoxTransformRotated
from .build import PROPOSAL_GENERATOR_REGISTRY
from .rpn import RPN
from .rrpn_outputs import RRPNOutputs, find_top_rrpn_proposals
logger = logging.getLogger(__name__)
@PROPOSAL_GENERATOR_REGISTRY.register()
class RRPN(RPN):
"""
Rotated RPN subnetwork.
Please refer to https://arxiv.org/pdf/1703.01086.pdf for the original RRPN paper:
Ma, J., Shao, W., Ye, H., Wang, L., Wang, H., Zheng, Y., & Xue, X. (2018).
Arbitrary-oriented scene text detection via rotation proposals.
IEEE Transactions on Multimedia, 20(11), 3111-3122.
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__(cfg, input_shape)
self.box2box_transform = Box2BoxTransformRotated(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS)
def forward(self, images, features, gt_instances=None):
"""
Args:
images (ImageList): input images of length `N`
features (dict[str: Tensor]): input data as a mapping from feature
map name to tensor. Axis 0 represents the number of images `N` in
the input data; axes 1-3 are channels, height, and width, which may
vary between feature maps (e.g., if a feature pyramid is used).
gt_instances (list[Instances], optional): a length `N` list of `Instances`s.
Each `Instances` stores ground-truth instances for the corresponding image.
Returns:
proposals: list[Instances] or None
loss: dict[Tensor]
"""
gt_boxes = [x.gt_boxes for x in gt_instances] if gt_instances is not None else None
del gt_instances
features = [features[f] for f in self.in_features]
pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features)
anchors = self.anchor_generator(features)
outputs = RRPNOutputs(
self.box2box_transform,
self.anchor_matcher,
self.batch_size_per_image,
self.positive_fraction,
images,
pred_objectness_logits,
pred_anchor_deltas,
anchors,
self.boundary_threshold,
gt_boxes,
self.smooth_l1_beta,
self.lambda_
)
if self.training:
losses = outputs.losses()
else:
losses = {}
with torch.no_grad():
# Find the top proposals by applying NMS and removing boxes that
# are too small. The proposals are treated as fixed for approximate
# joint training with roi heads. This approach ignores the derivative
# w.r.t. the proposal boxes’ coordinates that are also network
# responses, so is approximate.
proposals = find_top_rrpn_proposals(
outputs.predict_proposals(),
outputs.predict_objectness_logits(),
images,
self.nms_thresh,
self.pre_nms_topk[self.training],
self.post_nms_topk[self.training],
self.min_box_side_len,
self.training,
)
return proposals, losses
| 37.910112 | 96 | 0.630113 |
import logging
from typing import Dict
import torch
from detectron2.layers import ShapeSpec
from ..box_regression import Box2BoxTransformRotated
from .build import PROPOSAL_GENERATOR_REGISTRY
from .rpn import RPN
from .rrpn_outputs import RRPNOutputs, find_top_rrpn_proposals
logger = logging.getLogger(__name__)
@PROPOSAL_GENERATOR_REGISTRY.register()
class RRPN(RPN):
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__(cfg, input_shape)
self.box2box_transform = Box2BoxTransformRotated(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS)
def forward(self, images, features, gt_instances=None):
gt_boxes = [x.gt_boxes for x in gt_instances] if gt_instances is not None else None
del gt_instances
features = [features[f] for f in self.in_features]
pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features)
anchors = self.anchor_generator(features)
outputs = RRPNOutputs(
self.box2box_transform,
self.anchor_matcher,
self.batch_size_per_image,
self.positive_fraction,
images,
pred_objectness_logits,
pred_anchor_deltas,
anchors,
self.boundary_threshold,
gt_boxes,
self.smooth_l1_beta,
self.lambda_
)
if self.training:
losses = outputs.losses()
else:
losses = {}
with torch.no_grad():
proposals = find_top_rrpn_proposals(
outputs.predict_proposals(),
outputs.predict_objectness_logits(),
images,
self.nms_thresh,
self.pre_nms_topk[self.training],
self.post_nms_topk[self.training],
self.min_box_side_len,
self.training,
)
return proposals, losses
| true | true |
f73b4ba632336be8c5be6b4dd6a69eee2a3f8f66 | 5,307 | py | Python | dcbench/common/modeling.py | data-centric-ai/dcbench | 831ab2359d686739d0b0c7a589974ce08448e58d | [
"Apache-2.0"
] | 40 | 2021-11-06T00:16:20.000Z | 2022-03-17T11:45:00.000Z | dcbench/common/modeling.py | data-centric-ai/dcbench | 831ab2359d686739d0b0c7a589974ce08448e58d | [
"Apache-2.0"
] | 1 | 2022-01-01T14:40:19.000Z | 2022-01-04T05:25:04.000Z | dcbench/common/modeling.py | data-centric-ai/dcbench | 831ab2359d686739d0b0c7a589974ce08448e58d | [
"Apache-2.0"
] | 5 | 2021-11-17T17:15:41.000Z | 2021-12-31T06:45:13.000Z | from abc import abstractmethod
import PIL
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.hub import load_state_dict_from_url
from torchvision.models import DenseNet as _DenseNet
from torchvision.models import ResNet as _ResNet
from torchvision.models.densenet import _load_state_dict
from torchvision.models.densenet import model_urls as densenet_model_urls
from torchvision.models.resnet import BasicBlock, Bottleneck
from torchvision.models.resnet import model_urls as resnet_model_urls
class Model(pl.LightningModule):
DEFAULT_CONFIG = {}
def __init__(self, config: dict = None):
super().__init__()
self.config = self.DEFAULT_CONFIG.copy()
if config is not None:
self.config.update(config)
self._set_model()
@abstractmethod
def _set_model(self):
raise NotImplementedError()
class ResNet(_ResNet):
ACTIVATION_DIMS = [64, 128, 256, 512]
ACTIVATION_WIDTH_HEIGHT = [64, 32, 16, 8]
RESNET_TO_ARCH = {"resnet18": [2, 2, 2, 2], "resnet50": [3, 4, 6, 3]}
def __init__(
self,
num_classes: int,
arch: str = "resnet18",
dropout: float = 0.0,
pretrained: bool = True,
):
if arch not in self.RESNET_TO_ARCH:
raise ValueError(
f"config['classifier'] must be one of: {self.RESNET_TO_ARCH.keys()}"
)
block = BasicBlock if arch == "resnet18" else Bottleneck
super().__init__(block, self.RESNET_TO_ARCH[arch])
if pretrained:
state_dict = load_state_dict_from_url(
resnet_model_urls[arch], progress=True
)
self.load_state_dict(state_dict)
# self.fc = nn.Linear(512 * block.expansion, num_classes)
self.fc = nn.Sequential(
nn.Dropout(dropout), nn.Linear(512 * block.expansion, num_classes)
)
def default_transform(img: PIL.Image.Image):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)(img)
def default_train_transform(img: PIL.Image.Image):
return transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)(img)
class DenseNet(_DenseNet):
DENSENET_TO_ARCH = {
"densenet121": {
"growth_rate": 32,
"block_config": (6, 12, 24, 16),
"num_init_features": 64,
}
}
def __init__(
self, num_classes: int, arch: str = "densenet121", pretrained: bool = True
):
if arch not in self.DENSENET_TO_ARCH:
raise ValueError(
f"config['classifier'] must be one of: {self.DENSENET_TO_ARCH.keys()}"
)
super().__init__(**self.DENSENET_TO_ARCH[arch])
if pretrained:
_load_state_dict(self, densenet_model_urls[arch], progress=True)
self.classifier = nn.Linear(self.classifier.in_features, num_classes)
class VisionClassifier(Model):
DEFAULT_CONFIG = {
"lr": 1e-4,
"model_name": "resnet",
"arch": "resnet18",
"pretrained": True,
"num_classes": 2,
"transform": default_transform,
"train_transform": default_train_transform,
}
def _set_model(self):
if self.config["model_name"] == "resnet":
self.model = ResNet(
num_classes=self.config["num_classes"],
arch=self.config["arch"],
pretrained=self.config["pretrained"],
)
elif self.config["model_name"] == "densenet":
self.model = DenseNet(
num_classes=self.config["num_classes"], arch=self.config["arch"]
)
else:
raise ValueError(f"Model name {self.config['model_name']} not supported.")
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
inputs, targets, _ = batch["input"], batch["target"], batch["id"]
outs = self.forward(inputs)
loss = nn.functional.cross_entropy(outs, targets)
self.log("train_loss", loss, on_step=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
inputs, targets = batch["input"], batch["target"]
outs = self.forward(inputs)
loss = nn.functional.cross_entropy(outs, targets)
self.log("valid_loss", loss)
def validation_epoch_end(self, outputs) -> None:
for metric_name, metric in self.metrics.items():
self.log(f"valid_{metric_name}", metric.compute())
metric.reset()
def test_epoch_end(self, outputs) -> None:
return self.validation_epoch_end(outputs)
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.config["lr"])
return optimizer
| 31.035088 | 88 | 0.618805 | from abc import abstractmethod
import PIL
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.hub import load_state_dict_from_url
from torchvision.models import DenseNet as _DenseNet
from torchvision.models import ResNet as _ResNet
from torchvision.models.densenet import _load_state_dict
from torchvision.models.densenet import model_urls as densenet_model_urls
from torchvision.models.resnet import BasicBlock, Bottleneck
from torchvision.models.resnet import model_urls as resnet_model_urls
class Model(pl.LightningModule):
DEFAULT_CONFIG = {}
def __init__(self, config: dict = None):
super().__init__()
self.config = self.DEFAULT_CONFIG.copy()
if config is not None:
self.config.update(config)
self._set_model()
@abstractmethod
def _set_model(self):
raise NotImplementedError()
class ResNet(_ResNet):
ACTIVATION_DIMS = [64, 128, 256, 512]
ACTIVATION_WIDTH_HEIGHT = [64, 32, 16, 8]
RESNET_TO_ARCH = {"resnet18": [2, 2, 2, 2], "resnet50": [3, 4, 6, 3]}
def __init__(
self,
num_classes: int,
arch: str = "resnet18",
dropout: float = 0.0,
pretrained: bool = True,
):
if arch not in self.RESNET_TO_ARCH:
raise ValueError(
f"config['classifier'] must be one of: {self.RESNET_TO_ARCH.keys()}"
)
block = BasicBlock if arch == "resnet18" else Bottleneck
super().__init__(block, self.RESNET_TO_ARCH[arch])
if pretrained:
state_dict = load_state_dict_from_url(
resnet_model_urls[arch], progress=True
)
self.load_state_dict(state_dict)
self.fc = nn.Sequential(
nn.Dropout(dropout), nn.Linear(512 * block.expansion, num_classes)
)
def default_transform(img: PIL.Image.Image):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)(img)
def default_train_transform(img: PIL.Image.Image):
return transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)(img)
class DenseNet(_DenseNet):
DENSENET_TO_ARCH = {
"densenet121": {
"growth_rate": 32,
"block_config": (6, 12, 24, 16),
"num_init_features": 64,
}
}
def __init__(
self, num_classes: int, arch: str = "densenet121", pretrained: bool = True
):
if arch not in self.DENSENET_TO_ARCH:
raise ValueError(
f"config['classifier'] must be one of: {self.DENSENET_TO_ARCH.keys()}"
)
super().__init__(**self.DENSENET_TO_ARCH[arch])
if pretrained:
_load_state_dict(self, densenet_model_urls[arch], progress=True)
self.classifier = nn.Linear(self.classifier.in_features, num_classes)
class VisionClassifier(Model):
DEFAULT_CONFIG = {
"lr": 1e-4,
"model_name": "resnet",
"arch": "resnet18",
"pretrained": True,
"num_classes": 2,
"transform": default_transform,
"train_transform": default_train_transform,
}
def _set_model(self):
if self.config["model_name"] == "resnet":
self.model = ResNet(
num_classes=self.config["num_classes"],
arch=self.config["arch"],
pretrained=self.config["pretrained"],
)
elif self.config["model_name"] == "densenet":
self.model = DenseNet(
num_classes=self.config["num_classes"], arch=self.config["arch"]
)
else:
raise ValueError(f"Model name {self.config['model_name']} not supported.")
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
inputs, targets, _ = batch["input"], batch["target"], batch["id"]
outs = self.forward(inputs)
loss = nn.functional.cross_entropy(outs, targets)
self.log("train_loss", loss, on_step=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
inputs, targets = batch["input"], batch["target"]
outs = self.forward(inputs)
loss = nn.functional.cross_entropy(outs, targets)
self.log("valid_loss", loss)
def validation_epoch_end(self, outputs) -> None:
for metric_name, metric in self.metrics.items():
self.log(f"valid_{metric_name}", metric.compute())
metric.reset()
def test_epoch_end(self, outputs) -> None:
return self.validation_epoch_end(outputs)
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.config["lr"])
return optimizer
| true | true |
f73b4bb06333f21f4b607945e4cbb10da1175c58 | 6,389 | py | Python | autossl/server/incapsula.py | rootheory/AutoSSL | d0f28653f68dbc5455f5027c1f35e2978275b3ec | [
"MIT"
] | null | null | null | autossl/server/incapsula.py | rootheory/AutoSSL | d0f28653f68dbc5455f5027c1f35e2978275b3ec | [
"MIT"
] | null | null | null | autossl/server/incapsula.py | rootheory/AutoSSL | d0f28653f68dbc5455f5027c1f35e2978275b3ec | [
"MIT"
] | null | null | null | import base64
import datetime
import json
import logging
import time
import requests
from .. import exception, ssl
from . import base
logger = logging.getLogger(__name__)
class IncapsulaSite(base.Server):
BASE_URL = "https://my.incapsula.com:443"
def __init__(self, api_key, api_id, site_id, crt_name, **kwargs):
super(IncapsulaSite, self).__init__(crt_name=crt_name, **kwargs)
self.api_key = api_key
self.api_id = api_id
self.site_id = site_id
# The certificate must contain the full chain - root CA, intermediate CA, and the origin server certificates.
# see https://docs.imperva.com/bundle/cloud-application-security/page/more/upload-ssl.htm
self.deploy_full_chain = True
# enable retry on :
# - failed DNS lookups
# - socket connections
# - connection timeouts
# but never to requests where data has made it to the server
self.session = requests.Session()
self.session.mount('https://', requests.adapters.HTTPAdapter(max_retries=5))
# all queries must contain at least this parameters
self.basic_params = {'api_key': self.api_key, 'api_id': self.api_id, 'site_id': self.site_id}
self.site_status = self.session.post(
url='{}/api/prov/v1/sites/status'.format(self.BASE_URL),
data=self.basic_params,
).json()
def get_description(self):
return "[{} - {} ({})]".format(self.__class__.__name__, self.site_status['domain'], self.site_id)
def deploy_cert(self, key, cert, **kwargs):
# authentication data and site ID
parameters = self.basic_params.copy()
parameters.update({
'certificate': base64.b64encode(cert.read_bytes()),
'private_key': base64.b64encode(key.read_bytes()),
})
try:
json_response = {}
for _ in range(5):
json_response = self.session.post(
url='{}/api/prov/v1/sites/customCertificate/upload'.format(self.BASE_URL),
data=parameters,
).json()
# deployment is a success
if json_response['res'] == 0:
break
# specific behavior for Internal error code, that is returned quite often by Incapsula...
# => just retry, it generally works fine the 2nd or 3rd time
elif json_response['res'] == 3015:
time.sleep(1)
continue
# no success, just return exception with last error message
else:
raise exception.DeployCertificateError('Unable to deploy new certificate on {}: {}'.format(
self.get_description(), json.dumps(json_response, indent=4)))
# Upload successful
logger.info("Certificate/Key %s updated successfully on %s.", self.crt_name, self.get_description())
except requests.exceptions.RequestException as ex:
raise exception.DeployCertificateError(
msg='Unable to deploy new certificate on {}: {}'.format(self.get_description(), str(ex)),
original_exception=ex,
)
def is_same(self, common_name=None, sans=None, exact_match=False):
"""Check if domain for targeted Incapsula site is part of specified domains
:param common_name: Common name
:type common_name: str
:param sans: list of Subject Alternate Names
:type sans: list
:param exact_match: if True, certificate must exactly match input domains
if False, input domain will also match wildcard certificate and additional domains in certificate will
be ignored
:type exact_match: bool
:return: True if Incapsula site domain is covered by input domains
"""
blueprint_domains = ssl.get_domains(common_name=common_name, sans=sans)
# check if Incapsula domain is included in input domains
for blueprint_domain in blueprint_domains:
if ssl.is_domain_matching(domain_to_check=self.site_status['domain'],
reference_domain=blueprint_domain,
exact_match=exact_match):
return True
return False
def get_certificate_information(self):
"""Retrieve certificate information from Incapsula site.
:return: SSL certificate information
:rtype: autossl.ssl.SslCertificate
:raise autossl.exception.CertificateNotFound: if certificate does not exist yet on server
"""
custom_certificate = self.site_status.get('ssl', {}).get('custom_certificate')
# if invalid incapsula response or no custom_certificate deployed yet
if self.site_status['res'] != 0 or custom_certificate is None or custom_certificate.get('active') is not True:
raise exception.CertificateNotFound("No certificate found for site ID {}".format(self.get_description()))
# Get expiration date (in milliseconds, since 1970) from site configuration
expiration_date_ms = custom_certificate.get('expirationDate')
if expiration_date_ms is None:
raise RuntimeError(
"Unable to get certificate expiration date (path: ssl.custom_certificate.expirationDate)"
" for site ID {} in api response {}".format(self.get_description(),
json.dumps(self.site_status, indent=4))
)
return ssl.SslCertificate(
common_name=self.site_status['domain'],
sans=[],
expiration=datetime.datetime.utcfromtimestamp(float(expiration_date_ms)/1000),
)
def create_acme_challenge(self, token, key_authorization):
"""Create token on server with specified value
:param token: challenge key
:param key_authorization: challenge value
"""
logger.debug("No challenge to deploy for Incapsula that is just acting as a proxy to real server.")
def delete_acme_challenge(self, token):
"""Delete challenge created on server
:param token: challenge key to delete from server
"""
logger.debug("No challenge to cleanup for Incapsula that is just acting as a proxy to real server.")
| 42.593333 | 118 | 0.631085 | import base64
import datetime
import json
import logging
import time
import requests
from .. import exception, ssl
from . import base
logger = logging.getLogger(__name__)
class IncapsulaSite(base.Server):
BASE_URL = "https://my.incapsula.com:443"
def __init__(self, api_key, api_id, site_id, crt_name, **kwargs):
super(IncapsulaSite, self).__init__(crt_name=crt_name, **kwargs)
self.api_key = api_key
self.api_id = api_id
self.site_id = site_id
self.deploy_full_chain = True
self.session = requests.Session()
self.session.mount('https://', requests.adapters.HTTPAdapter(max_retries=5))
self.basic_params = {'api_key': self.api_key, 'api_id': self.api_id, 'site_id': self.site_id}
self.site_status = self.session.post(
url='{}/api/prov/v1/sites/status'.format(self.BASE_URL),
data=self.basic_params,
).json()
def get_description(self):
return "[{} - {} ({})]".format(self.__class__.__name__, self.site_status['domain'], self.site_id)
def deploy_cert(self, key, cert, **kwargs):
parameters = self.basic_params.copy()
parameters.update({
'certificate': base64.b64encode(cert.read_bytes()),
'private_key': base64.b64encode(key.read_bytes()),
})
try:
json_response = {}
for _ in range(5):
json_response = self.session.post(
url='{}/api/prov/v1/sites/customCertificate/upload'.format(self.BASE_URL),
data=parameters,
).json()
if json_response['res'] == 0:
break
elif json_response['res'] == 3015:
time.sleep(1)
continue
else:
raise exception.DeployCertificateError('Unable to deploy new certificate on {}: {}'.format(
self.get_description(), json.dumps(json_response, indent=4)))
logger.info("Certificate/Key %s updated successfully on %s.", self.crt_name, self.get_description())
except requests.exceptions.RequestException as ex:
raise exception.DeployCertificateError(
msg='Unable to deploy new certificate on {}: {}'.format(self.get_description(), str(ex)),
original_exception=ex,
)
def is_same(self, common_name=None, sans=None, exact_match=False):
blueprint_domains = ssl.get_domains(common_name=common_name, sans=sans)
for blueprint_domain in blueprint_domains:
if ssl.is_domain_matching(domain_to_check=self.site_status['domain'],
reference_domain=blueprint_domain,
exact_match=exact_match):
return True
return False
def get_certificate_information(self):
custom_certificate = self.site_status.get('ssl', {}).get('custom_certificate')
if self.site_status['res'] != 0 or custom_certificate is None or custom_certificate.get('active') is not True:
raise exception.CertificateNotFound("No certificate found for site ID {}".format(self.get_description()))
expiration_date_ms = custom_certificate.get('expirationDate')
if expiration_date_ms is None:
raise RuntimeError(
"Unable to get certificate expiration date (path: ssl.custom_certificate.expirationDate)"
" for site ID {} in api response {}".format(self.get_description(),
json.dumps(self.site_status, indent=4))
)
return ssl.SslCertificate(
common_name=self.site_status['domain'],
sans=[],
expiration=datetime.datetime.utcfromtimestamp(float(expiration_date_ms)/1000),
)
def create_acme_challenge(self, token, key_authorization):
logger.debug("No challenge to deploy for Incapsula that is just acting as a proxy to real server.")
def delete_acme_challenge(self, token):
logger.debug("No challenge to cleanup for Incapsula that is just acting as a proxy to real server.")
| true | true |
f73b4ca6b926417368bedf7c1e713ad258349646 | 16,911 | py | Python | SpiceyPy/support_types.py | johnnycakes79/SpiceyPy | 7b63a1555df0adb7926cf5a6cfff14746a9dc4c1 | [
"MIT"
] | null | null | null | SpiceyPy/support_types.py | johnnycakes79/SpiceyPy | 7b63a1555df0adb7926cf5a6cfff14746a9dc4c1 | [
"MIT"
] | null | null | null | SpiceyPy/support_types.py | johnnycakes79/SpiceyPy | 7b63a1555df0adb7926cf5a6cfff14746a9dc4c1 | [
"MIT"
] | null | null | null | # Collection of supporting functions for wrapper functions
__author__ = 'AndrewAnnex'
from ctypes import c_char_p, c_bool, c_int, c_double, c_char, c_void_p, sizeof, \
POINTER, pointer, Array, create_string_buffer, create_unicode_buffer, cast, Structure, \
CFUNCTYPE, string_at
import numpy
from numpy import ctypeslib as numpc
import six
errorformat = """
================================================================================
Toolkit version: {tkvsn}
{short} --
{explain}
{long}
{traceback}
================================================================================\
"""
class SpiceyError(Exception):
"""
SpiceyError wraps CSPICE errors.
:type value: str
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def toDoubleVector(x):
return DoubleArray.from_param(param=x)
def toDoubleMatrix(x):
return DoubleMatrix.from_param(param=x)
def toIntVector(x):
return IntArray.from_param(param=x)
def toBoolVector(x):
return BoolArray.from_param(param=x)
def toPythonString(inString):
if six.PY2:
if isinstance(inString, c_char_p):
return toPythonString(inString.value)
return string_at(inString)
elif six.PY3:
if isinstance(inString, c_char_p):
return toPythonString(inString.value)
return bytes.decode(string_at(inString))
def listtocharvector(x):
assert (isinstance(x, list))
return (c_char_p * len(x))(*[stringToCharP(y) for y in x])
def charvector(ndim=1, lenvals=10):
return ((c_char * lenvals) * ndim)()
def listtodoublematrix(data, x=3, y=3):
matrix = ((c_double * x) * y)()
for i, row in enumerate(data):
matrix[i] = tuple(row)
return matrix
def emptyCharArray(xLen=None, yLen=None):
if not yLen:
yLen = 1
if not xLen:
xLen = 1
if isinstance(xLen, c_int):
xLen = xLen.value
if isinstance(yLen, c_int):
yLen = yLen.value
return ((c_char * xLen) * yLen)()
def emptyDoubleMatrix(x=3, y=3):
return ((c_double * x) * y)()
def emptyDoubleVector(n):
if isinstance(n, c_int):
n = n.value
assert(isinstance(n, int))
return (c_double * n)()
def emptyIntVector(n):
if isinstance(n, c_int):
n = n.value
assert (isinstance(n, int))
return (c_int * n)()
def vectorToList(x):
if isinstance(x[0], bool):
return numpy.fromiter(x, numpy.bool, count=len(x))
elif isinstance(x[0], int):
return numpy.fromiter(x, numpy.int_, count=len(x))
elif isinstance(x[0], float):
return numpy.fromiter(x, numpy.float64, count=len(x))
elif isinstance(x[0].value, bytes):
return [toPythonString(y) for y in x]
def matrixToList(x):
return numpc.as_array(x)
def stringToCharP(inobject, inlen=None):
"""
:param inobject: input string, int for getting null string of length of int
:param inlen: optional parameter, length of a given string can be specified
:return:
"""
if inlen and isinstance(inobject, str):
return create_string_buffer(inobject.encode(encoding='UTF-8'), inlen)
if isinstance(inobject, bytes):
return inobject
if isinstance(inobject, c_int):
return stringToCharP(" " * inobject.value)
if isinstance(inobject, int):
return stringToCharP(" " * inobject)
return c_char_p(inobject.encode(encoding='UTF-8'))
def listToCharArray(inList, xLen=None, yLen=None):
assert (isinstance(inList, list))
if not yLen:
yLen = len(inList)
if not xLen:
xLen = max(len(s) for s in inList) + 1
if isinstance(xLen, c_int):
xLen = xLen.value
if isinstance(yLen, c_int):
yLen = yLen.value
return ((c_char * xLen) * yLen)(*[stringToCharP(l, inlen=xLen) for l in inList])
def listToCharArrayPtr(inList, xLen=None, yLen=None):
assert (isinstance(inList, list))
if not yLen:
yLen = len(inList)
if not xLen:
xLen = max(len(s) for s in inList) + 1
if isinstance(xLen, c_int):
xLen = xLen.value
if isinstance(yLen, c_int):
yLen = yLen.value
return cast(((c_char * xLen) * yLen)(*[stringToCharP(l, inlen=xLen) for l in inList]), c_char_p)
class DoubleArrayType:
# Class type that will handle all double vectors, inspiration from python cookbook 3rd edition
def from_param(self, param):
typename = type(param).__name__
if hasattr(self, 'from_' + typename):
return getattr(self, 'from_' + typename)(param)
elif isinstance(param, Array):
return param
else:
raise TypeError("Can't convert %s" % typename)
# Cast from lists/tuples
def from_list(self, param):
val = ((c_double) * len(param))(*param)
return val
# Cast from Tuple
def from_tuple(self, param):
val = ((c_double) * len(param))(*param)
return val
# Cast from a numpy array,
def from_ndarray(self, param):
# return param.data_as(POINTER(c_double))
# the above older method does not work with functions which take vectors of known size
return numpy.ctypeslib.as_ctypes(param)
# Cast from array.array objects
def from_array(self, param):
if param.typecode != 'd':
raise TypeError('must be an array of doubles')
ptr, _ = param.buffer_info()
return cast(ptr, POINTER(c_double))
class DoubleMatrixType:
# Class type that will handle all double matricies, inspiration from python cookbook 3rd edition
def from_param(self, param):
typename = type(param).__name__
if hasattr(self, 'from_' + typename):
return getattr(self, 'from_' + typename)(param)
elif isinstance(param, Array):
return param
else:
raise TypeError("Can't convert %s" % typename)
# Cast from lists/tuples
def from_list(self, param):
val = ((c_double * len(param[0])) * len(param))(*[DoubleArray.from_param(x) for x in param])
return val
# Cast from Tuple
def from_tuple(self, param):
val = ((c_double * len(param[0])) * len(param))(*[DoubleArray.from_param(x) for x in param])
return val
# Cast from a numpy array
def from_ndarray(self, param):
#return param.data_as(POINTER(c_double))
return numpy.ctypeslib.as_ctypes(param)
# Cast from a numpy matrix
def from_matrix(self, param):
#return param.data_as(POINTER(c_double))
return numpy.ctypeslib.as_ctypes(param)
class IntArrayType:
# Class type that will handle all int vectors, inspiration from python cookbook 3rd edition
def from_param(self, param):
typename = type(param).__name__
if hasattr(self, 'from_' + typename):
return getattr(self, 'from_' + typename)(param)
elif isinstance(param, Array):
return param
else:
raise TypeError("Can't convert %s" % typename)
# Cast from lists/tuples
def from_list(self, param):
val = ((c_int) * len(param))(*param)
return val
# Cast from Tuple
def from_tuple(self, param):
val = ((c_int) * len(param))(*param)
return val
# Cast from a numpy array
def from_ndarray(self, param):
#return param.data_as(POINTER(c_int)) # not sure if long is same as int, it should be..
#return numpy.ctypeslib.as_ctypes(param)
return self.from_param(param.tolist())
# Cast from array.array objects
def from_array(self, param):
if param.typecode != 'i':
raise TypeError('must be an array of ints')
ptr, _ = param.buffer_info()
return cast(ptr, POINTER(c_int))
class BoolArrayType:
# Class type that will handle all int vectors, inspiration from python cookbook 3rd edition
def from_param(self, param):
typename = type(param).__name__
if hasattr(self, 'from_' + typename):
return getattr(self, 'from_' + typename)(param)
elif isinstance(param, Array):
return param
else:
raise TypeError("Can't convert %s" % typename)
# Cast from lists/tuples
def from_list(self, param):
val = ((c_bool) * len(param))(*param)
return val
# Cast from Tuple
def from_tuple(self, param):
val = ((c_bool) * len(param))(*param)
return val
# Cast from a numpy array
def from_ndarray(self, param):
#return param.data_as(POINTER(c_int)) # not sure if long is same as int, it should be..
#return numpy.ctypeslib.as_ctypes(param)
return self.from_param(param.tolist())
DoubleArray = DoubleArrayType()
IntArray = IntArrayType()
BoolArray = BoolArrayType()
DoubleMatrix = DoubleMatrixType()
class Plane(Structure):
_fields_ = [
('_normal', c_double * 3),
('_constant', c_double)
]
@property
def normal(self):
return vectorToList(self._normal)
@property
def constant(self):
return self._constant
def __str__(self):
return '<Plane: normal=%s; constant=%s>' % (', '.join([str(x) for x in self._normal]), self._constant)
class Ellipse(Structure):
_fields_ = [
('_center', c_double * 3),
('_semi_major', c_double * 3),
('_semi_minor', c_double * 3)
]
@property
def center(self):
return vectorToList(self._center)
@property
def semi_major(self):
return vectorToList(self._semi_major)
@property
def semi_minor(self):
return vectorToList(self._semi_minor)
def __str__(self):
return '<SpiceEllipse: center = %s, semi_major = %s, semi_minor = %s>' % \
(self.center, self.semi_major, self.semi_minor)
class DataType(object):
SPICE_CHR = 0
SPICE_DP = 1
SPICE_INT = 2
SPICE_TIME = 3
SPICE_BOOL = 4
CHR = 0
DP = 1
INT = 2
TIME = 3
BOOL = 4
def __init__(self):
pass
class SpiceEKDataType(c_int):
_fields_ = [
('SPICE_CHR', c_int(0)),
('SPICE_DP', c_int(1)),
('SPICE_INT', c_int(2)),
('SPICE_TIME', c_int(3)),
('SPICE_BOOL', c_int(4)),
]
class SpiceEKExprClass(c_int):
_fields_ = [
('SPICE_EK_EXP_COL', c_int(0)),
('SPICE_EK_EXP_FUNC', c_int(1)),
('SPICE_EK_EXP_EXPR', c_int(2))
]
class SpiceEKAttDsc(Structure):
_fields_ = [
('_cclass', c_int),
('_dtype', SpiceEKDataType),
('_strlen', c_int),
('_size', c_int),
('_indexd', c_bool),
('_nullok', c_bool)
]
@property
def cclass(self):
return self._cclass
@property
def dtype(self):
return self._dtype.value
@property
def strlen(self):
return self._strlen
@property
def size(self):
return self._size
@property
def indexd(self):
return self._indexd
@property
def nullok(self):
return self._nullok
def __str__(self):
return '<SpiceEKAttDsc cclass = %s, dtype = %s, strlen = %s, size = %s, indexd = %s, nullok = %s >' % \
(self.cclass, self.dtype, self.strlen, self.size, self.indexd, self.nullok)
class SpiceEKSegSum(Structure):
_fields_ = [
('_tabnam', c_char * 65),
('_nrows', c_int),
('_ncols', c_int),
('_cnames', (c_char * 100) * 33),
('_cdescrs', SpiceEKAttDsc * 100)
]
@property
def tabnam(self):
return toPythonString(self._tabnam)
@property
def nrows(self):
return self._nrows
@property
def ncols(self):
return self._ncols
@property
def cnames(self):
return vectorToList(self._cnames)[0:self.ncols]
@property
def cdescrs(self):
return self._cdescrs[0:self.ncols]
def __str__(self):
return '<SpiceEKSegSum tabnam = %s, nrows = %s, ncols = %s, cnames = %s, cdescrs = %s >' % (self.tabnam, self.nrows, self.ncols, self.cnames, self.cdescrs)
#SpiceCell implementation below is inpart from github.com/DaRasch/spiceminer/
# and modified as needed for this author, maybe we should work together?
### helper classes/functions ###
BITSIZE = {'char': sizeof(c_char), 'int': sizeof(c_int), 'double': sizeof(c_double)}
def _char_getter(data_p, index, length):
return toPythonString((c_char * length).from_address(data_p + index * length * BITSIZE['char']))
def _double_getter(data_p, index, length):
return c_double.from_address(data_p + index * BITSIZE['double']).value
def _int_getter(data_p, index, length):
return c_int.from_address(data_p + index * BITSIZE['int']).value
def SPICEDOUBLE_CELL(size):
return SpiceCell.double(size)
def SPICEINT_CELL(size):
return SpiceCell.integer(size)
def SPICECHAR_CELL(size, length):
return SpiceCell.character(size, length)
class SpiceCell(Structure):
#Most written by DaRasch
DATATYPES_ENUM = {'char': 0, 'double': 1, 'int': 2, 'time': 3, 'bool': 4}
DATATYPES_GET = [_char_getter, _double_getter] + [_int_getter] * 3
baseSize = 6
minCharLen = 6
CTRLBLOCK = 6
_fields_ = [
('dtype', c_int),
('length', c_int),
('size', c_int),
('card', c_int),
('isSet', c_int),
('adjust', c_int),
('init', c_int),
('base', c_void_p),
('data', c_void_p)
]
def __init__(self, dtype=None, length=None, size=None, card=None, isSet=None, base=None, data=None):
super(SpiceCell, self).__init__()
self.dtype = dtype
self.length = length
self.size = size
self.card = card
self.isSet = isSet
self.adjust = 0 # Always False, because not implemented
self.init = 0 # Always False, because this is the constructor
self.base = base # void pointer
self.data = data
def __str__(self):
return '<SpiceCell dtype = %s, length = %s, size = %s, card = %s, isSet = %s, adjust = %s, init = %s, base = %s, data = %s>' % (self.dtype, self.length, self.size, self.card, self.isSet, self.adjust, self.init, self.base, self.data)
def is_int(self):
return self.dtype == 2
def is_double(self):
return self.dtype == 1
def is_char(self):
return self.dtype == 0
def is_time(self):
return self.dtype == 3
def is_bool(self):
return self.dtype == 4
def is_set(self):
return self.isSet == 1
@classmethod
def character(cls, size, length):
base = (c_char * ((cls.CTRLBLOCK + size) * length))()
data = (c_char * (size * length)).from_buffer(
base, cls.CTRLBLOCK * BITSIZE['char'] * length)
instance = cls(cls.DATATYPES_ENUM['char'], length, size, 0, 1,
cast(base, c_void_p),
cast(data, c_void_p))
return instance
@classmethod
def integer(cls, size):
base = (c_int * (cls.CTRLBLOCK + size))()
data = (c_int * size).from_buffer(
base, cls.CTRLBLOCK * BITSIZE['int'])
instance = cls(cls.DATATYPES_ENUM['int'], 0, size, 0, 1,
cast(base, c_void_p),
cast(data, c_void_p))
return instance
@classmethod
def double(cls, size):
base = (c_double * (cls.CTRLBLOCK + size))()
data = (c_double * size).from_buffer(
base, cls.CTRLBLOCK * BITSIZE['double'])
instance = cls(cls.DATATYPES_ENUM['double'], 0, size, 0, 1,
cast(base, c_void_p),
cast(data, c_void_p))
return instance
def __len__(self):
return self.card
def __iter__(self):
getter = SpiceCell.DATATYPES_GET[self.dtype]
length, card, data = self.length, self.card, self.data
for i in range(card):
yield (getter(data, i, length))
def __contains__(self, key):
return key in self.__iter__()
def __getitem__(self, key):
getter = SpiceCell.DATATYPES_GET[self.dtype]
length, card, data = self.length, self.card, self.data
if isinstance(key, slice):
start, stop, step = key.start or 0, key.stop or -1, key.step or 1
#TODO Typechecking
if card == 0:
return []
else:
return list(getter(data, i, length)
for i in range(start % card, stop % card + 1, step))
if key in range(-card, card):
return getter(data, key, length)
elif not isinstance(key, int):
msg = 'SpiceCell inices must be integers, not {}'.format(type(key))
raise TypeError(msg)
else:
raise IndexError('SpiceCell index out of range')
def reset(self):
self.card = 0
self.init = 0 | 28.091362 | 240 | 0.600911 |
__author__ = 'AndrewAnnex'
from ctypes import c_char_p, c_bool, c_int, c_double, c_char, c_void_p, sizeof, \
POINTER, pointer, Array, create_string_buffer, create_unicode_buffer, cast, Structure, \
CFUNCTYPE, string_at
import numpy
from numpy import ctypeslib as numpc
import six
errorformat = """
================================================================================
Toolkit version: {tkvsn}
{short} --
{explain}
{long}
{traceback}
================================================================================\
"""
class SpiceyError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def toDoubleVector(x):
return DoubleArray.from_param(param=x)
def toDoubleMatrix(x):
return DoubleMatrix.from_param(param=x)
def toIntVector(x):
return IntArray.from_param(param=x)
def toBoolVector(x):
return BoolArray.from_param(param=x)
def toPythonString(inString):
if six.PY2:
if isinstance(inString, c_char_p):
return toPythonString(inString.value)
return string_at(inString)
elif six.PY3:
if isinstance(inString, c_char_p):
return toPythonString(inString.value)
return bytes.decode(string_at(inString))
def listtocharvector(x):
assert (isinstance(x, list))
return (c_char_p * len(x))(*[stringToCharP(y) for y in x])
def charvector(ndim=1, lenvals=10):
return ((c_char * lenvals) * ndim)()
def listtodoublematrix(data, x=3, y=3):
matrix = ((c_double * x) * y)()
for i, row in enumerate(data):
matrix[i] = tuple(row)
return matrix
def emptyCharArray(xLen=None, yLen=None):
if not yLen:
yLen = 1
if not xLen:
xLen = 1
if isinstance(xLen, c_int):
xLen = xLen.value
if isinstance(yLen, c_int):
yLen = yLen.value
return ((c_char * xLen) * yLen)()
def emptyDoubleMatrix(x=3, y=3):
return ((c_double * x) * y)()
def emptyDoubleVector(n):
if isinstance(n, c_int):
n = n.value
assert(isinstance(n, int))
return (c_double * n)()
def emptyIntVector(n):
if isinstance(n, c_int):
n = n.value
assert (isinstance(n, int))
return (c_int * n)()
def vectorToList(x):
if isinstance(x[0], bool):
return numpy.fromiter(x, numpy.bool, count=len(x))
elif isinstance(x[0], int):
return numpy.fromiter(x, numpy.int_, count=len(x))
elif isinstance(x[0], float):
return numpy.fromiter(x, numpy.float64, count=len(x))
elif isinstance(x[0].value, bytes):
return [toPythonString(y) for y in x]
def matrixToList(x):
return numpc.as_array(x)
def stringToCharP(inobject, inlen=None):
if inlen and isinstance(inobject, str):
return create_string_buffer(inobject.encode(encoding='UTF-8'), inlen)
if isinstance(inobject, bytes):
return inobject
if isinstance(inobject, c_int):
return stringToCharP(" " * inobject.value)
if isinstance(inobject, int):
return stringToCharP(" " * inobject)
return c_char_p(inobject.encode(encoding='UTF-8'))
def listToCharArray(inList, xLen=None, yLen=None):
assert (isinstance(inList, list))
if not yLen:
yLen = len(inList)
if not xLen:
xLen = max(len(s) for s in inList) + 1
if isinstance(xLen, c_int):
xLen = xLen.value
if isinstance(yLen, c_int):
yLen = yLen.value
return ((c_char * xLen) * yLen)(*[stringToCharP(l, inlen=xLen) for l in inList])
def listToCharArrayPtr(inList, xLen=None, yLen=None):
assert (isinstance(inList, list))
if not yLen:
yLen = len(inList)
if not xLen:
xLen = max(len(s) for s in inList) + 1
if isinstance(xLen, c_int):
xLen = xLen.value
if isinstance(yLen, c_int):
yLen = yLen.value
return cast(((c_char * xLen) * yLen)(*[stringToCharP(l, inlen=xLen) for l in inList]), c_char_p)
class DoubleArrayType:
def from_param(self, param):
typename = type(param).__name__
if hasattr(self, 'from_' + typename):
return getattr(self, 'from_' + typename)(param)
elif isinstance(param, Array):
return param
else:
raise TypeError("Can't convert %s" % typename)
# Cast from lists/tuples
def from_list(self, param):
val = ((c_double) * len(param))(*param)
return val
# Cast from Tuple
def from_tuple(self, param):
val = ((c_double) * len(param))(*param)
return val
# Cast from a numpy array,
def from_ndarray(self, param):
# return param.data_as(POINTER(c_double))
# the above older method does not work with functions which take vectors of known size
return numpy.ctypeslib.as_ctypes(param)
# Cast from array.array objects
def from_array(self, param):
if param.typecode != 'd':
raise TypeError('must be an array of doubles')
ptr, _ = param.buffer_info()
return cast(ptr, POINTER(c_double))
class DoubleMatrixType:
# Class type that will handle all double matricies, inspiration from python cookbook 3rd edition
def from_param(self, param):
typename = type(param).__name__
if hasattr(self, 'from_' + typename):
return getattr(self, 'from_' + typename)(param)
elif isinstance(param, Array):
return param
else:
raise TypeError("Can't convert %s" % typename)
def from_list(self, param):
val = ((c_double * len(param[0])) * len(param))(*[DoubleArray.from_param(x) for x in param])
return val
def from_tuple(self, param):
val = ((c_double * len(param[0])) * len(param))(*[DoubleArray.from_param(x) for x in param])
return val
def from_ndarray(self, param):
return numpy.ctypeslib.as_ctypes(param)
def from_matrix(self, param):
return numpy.ctypeslib.as_ctypes(param)
class IntArrayType:
def from_param(self, param):
typename = type(param).__name__
if hasattr(self, 'from_' + typename):
return getattr(self, 'from_' + typename)(param)
elif isinstance(param, Array):
return param
else:
raise TypeError("Can't convert %s" % typename)
# Cast from lists/tuples
def from_list(self, param):
val = ((c_int) * len(param))(*param)
return val
# Cast from Tuple
def from_tuple(self, param):
val = ((c_int) * len(param))(*param)
return val
# Cast from a numpy array
def from_ndarray(self, param):
#return param.data_as(POINTER(c_int)) # not sure if long is same as int, it should be..
#return numpy.ctypeslib.as_ctypes(param)
return self.from_param(param.tolist())
# Cast from array.array objects
def from_array(self, param):
if param.typecode != 'i':
raise TypeError('must be an array of ints')
ptr, _ = param.buffer_info()
return cast(ptr, POINTER(c_int))
class BoolArrayType:
# Class type that will handle all int vectors, inspiration from python cookbook 3rd edition
def from_param(self, param):
typename = type(param).__name__
if hasattr(self, 'from_' + typename):
return getattr(self, 'from_' + typename)(param)
elif isinstance(param, Array):
return param
else:
raise TypeError("Can't convert %s" % typename)
def from_list(self, param):
val = ((c_bool) * len(param))(*param)
return val
def from_tuple(self, param):
val = ((c_bool) * len(param))(*param)
return val
def from_ndarray(self, param):
list())
DoubleArray = DoubleArrayType()
IntArray = IntArrayType()
BoolArray = BoolArrayType()
DoubleMatrix = DoubleMatrixType()
class Plane(Structure):
_fields_ = [
('_normal', c_double * 3),
('_constant', c_double)
]
@property
def normal(self):
return vectorToList(self._normal)
@property
def constant(self):
return self._constant
def __str__(self):
return '<Plane: normal=%s; constant=%s>' % (', '.join([str(x) for x in self._normal]), self._constant)
class Ellipse(Structure):
_fields_ = [
('_center', c_double * 3),
('_semi_major', c_double * 3),
('_semi_minor', c_double * 3)
]
@property
def center(self):
return vectorToList(self._center)
@property
def semi_major(self):
return vectorToList(self._semi_major)
@property
def semi_minor(self):
return vectorToList(self._semi_minor)
def __str__(self):
return '<SpiceEllipse: center = %s, semi_major = %s, semi_minor = %s>' % \
(self.center, self.semi_major, self.semi_minor)
class DataType(object):
SPICE_CHR = 0
SPICE_DP = 1
SPICE_INT = 2
SPICE_TIME = 3
SPICE_BOOL = 4
CHR = 0
DP = 1
INT = 2
TIME = 3
BOOL = 4
def __init__(self):
pass
class SpiceEKDataType(c_int):
_fields_ = [
('SPICE_CHR', c_int(0)),
('SPICE_DP', c_int(1)),
('SPICE_INT', c_int(2)),
('SPICE_TIME', c_int(3)),
('SPICE_BOOL', c_int(4)),
]
class SpiceEKExprClass(c_int):
_fields_ = [
('SPICE_EK_EXP_COL', c_int(0)),
('SPICE_EK_EXP_FUNC', c_int(1)),
('SPICE_EK_EXP_EXPR', c_int(2))
]
class SpiceEKAttDsc(Structure):
_fields_ = [
('_cclass', c_int),
('_dtype', SpiceEKDataType),
('_strlen', c_int),
('_size', c_int),
('_indexd', c_bool),
('_nullok', c_bool)
]
@property
def cclass(self):
return self._cclass
@property
def dtype(self):
return self._dtype.value
@property
def strlen(self):
return self._strlen
@property
def size(self):
return self._size
@property
def indexd(self):
return self._indexd
@property
def nullok(self):
return self._nullok
def __str__(self):
return '<SpiceEKAttDsc cclass = %s, dtype = %s, strlen = %s, size = %s, indexd = %s, nullok = %s >' % \
(self.cclass, self.dtype, self.strlen, self.size, self.indexd, self.nullok)
class SpiceEKSegSum(Structure):
_fields_ = [
('_tabnam', c_char * 65),
('_nrows', c_int),
('_ncols', c_int),
('_cnames', (c_char * 100) * 33),
('_cdescrs', SpiceEKAttDsc * 100)
]
@property
def tabnam(self):
return toPythonString(self._tabnam)
@property
def nrows(self):
return self._nrows
@property
def ncols(self):
return self._ncols
@property
def cnames(self):
return vectorToList(self._cnames)[0:self.ncols]
@property
def cdescrs(self):
return self._cdescrs[0:self.ncols]
def __str__(self):
return '<SpiceEKSegSum tabnam = %s, nrows = %s, ncols = %s, cnames = %s, cdescrs = %s >' % (self.tabnam, self.nrows, self.ncols, self.cnames, self.cdescrs)
sizeof(c_double)}
def _char_getter(data_p, index, length):
return toPythonString((c_char * length).from_address(data_p + index * length * BITSIZE['char']))
def _double_getter(data_p, index, length):
return c_double.from_address(data_p + index * BITSIZE['double']).value
def _int_getter(data_p, index, length):
return c_int.from_address(data_p + index * BITSIZE['int']).value
def SPICEDOUBLE_CELL(size):
return SpiceCell.double(size)
def SPICEINT_CELL(size):
return SpiceCell.integer(size)
def SPICECHAR_CELL(size, length):
return SpiceCell.character(size, length)
class SpiceCell(Structure):
DATATYPES_ENUM = {'char': 0, 'double': 1, 'int': 2, 'time': 3, 'bool': 4}
DATATYPES_GET = [_char_getter, _double_getter] + [_int_getter] * 3
baseSize = 6
minCharLen = 6
CTRLBLOCK = 6
_fields_ = [
('dtype', c_int),
('length', c_int),
('size', c_int),
('card', c_int),
('isSet', c_int),
('adjust', c_int),
('init', c_int),
('base', c_void_p),
('data', c_void_p)
]
def __init__(self, dtype=None, length=None, size=None, card=None, isSet=None, base=None, data=None):
super(SpiceCell, self).__init__()
self.dtype = dtype
self.length = length
self.size = size
self.card = card
self.isSet = isSet
self.adjust = 0
self.init = 0
self.base = base
self.data = data
def __str__(self):
return '<SpiceCell dtype = %s, length = %s, size = %s, card = %s, isSet = %s, adjust = %s, init = %s, base = %s, data = %s>' % (self.dtype, self.length, self.size, self.card, self.isSet, self.adjust, self.init, self.base, self.data)
def is_int(self):
return self.dtype == 2
def is_double(self):
return self.dtype == 1
def is_char(self):
return self.dtype == 0
def is_time(self):
return self.dtype == 3
def is_bool(self):
return self.dtype == 4
def is_set(self):
return self.isSet == 1
@classmethod
def character(cls, size, length):
base = (c_char * ((cls.CTRLBLOCK + size) * length))()
data = (c_char * (size * length)).from_buffer(
base, cls.CTRLBLOCK * BITSIZE['char'] * length)
instance = cls(cls.DATATYPES_ENUM['char'], length, size, 0, 1,
cast(base, c_void_p),
cast(data, c_void_p))
return instance
@classmethod
def integer(cls, size):
base = (c_int * (cls.CTRLBLOCK + size))()
data = (c_int * size).from_buffer(
base, cls.CTRLBLOCK * BITSIZE['int'])
instance = cls(cls.DATATYPES_ENUM['int'], 0, size, 0, 1,
cast(base, c_void_p),
cast(data, c_void_p))
return instance
@classmethod
def double(cls, size):
base = (c_double * (cls.CTRLBLOCK + size))()
data = (c_double * size).from_buffer(
base, cls.CTRLBLOCK * BITSIZE['double'])
instance = cls(cls.DATATYPES_ENUM['double'], 0, size, 0, 1,
cast(base, c_void_p),
cast(data, c_void_p))
return instance
def __len__(self):
return self.card
def __iter__(self):
getter = SpiceCell.DATATYPES_GET[self.dtype]
length, card, data = self.length, self.card, self.data
for i in range(card):
yield (getter(data, i, length))
def __contains__(self, key):
return key in self.__iter__()
def __getitem__(self, key):
getter = SpiceCell.DATATYPES_GET[self.dtype]
length, card, data = self.length, self.card, self.data
if isinstance(key, slice):
start, stop, step = key.start or 0, key.stop or -1, key.step or 1
if card == 0:
return []
else:
return list(getter(data, i, length)
for i in range(start % card, stop % card + 1, step))
if key in range(-card, card):
return getter(data, key, length)
elif not isinstance(key, int):
msg = 'SpiceCell inices must be integers, not {}'.format(type(key))
raise TypeError(msg)
else:
raise IndexError('SpiceCell index out of range')
def reset(self):
self.card = 0
self.init = 0 | true | true |
f73b4d3ca2fe5333ca7d75c1015f0972eed3cc83 | 1,993 | py | Python | NewClassifier.py | Youssefares/IMDB-sentiment-analysis | 18e6037c15c2011aa49b6118fb1182de548ff9ba | [
"MIT"
] | null | null | null | NewClassifier.py | Youssefares/IMDB-sentiment-analysis | 18e6037c15c2011aa49b6118fb1182de548ff9ba | [
"MIT"
] | null | null | null | NewClassifier.py | Youssefares/IMDB-sentiment-analysis | 18e6037c15c2011aa49b6118fb1182de548ff9ba | [
"MIT"
] | null | null | null | from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from itertools import product
class Classifier:
def __init__(self, clf_strategy, fit_data, fit_labels):
classifers = {
'KNN': KNeighborsClassifier,
'SVC': SVC,
'DecisionTree': DecisionTreeClassifier,
'RandomForestClassifier': RandomForestClassifier,
'LogisticRegression': LogisticRegression,
'MLP': MLPClassifier,
'AdaBoost': AdaBoostClassifier,
'Bagging': BaggingClassifier
}
self.clf_call = classifers[clf_strategy]
self.fit_data = fit_data
self.fit_labels = fit_labels
def tune(self, params_ranges, tst_data, tst_labels, max_only=True):
max_score = 0.0
max_params = None
all_params = {}
keys = list(params_ranges.keys())
for values in product(*params_ranges.values()):
# classification & score with params
params = {keys[i]:value for i, value in enumerate(values)}
clf = self.clf_call(**params).fit(self.fit_data, self.fit_labels)
score = clf.score(tst_data, tst_labels)
# book keeping
all_params[values] = score
if max_score < score:
max_score = score
max_params = params
# setting class best params and current score
self.max_params = max_params
self.score = max_score
if max_only:
return {max(all_params):max_score}
else:
return all_params
def score(self, params, tst_data, tst_labels):
clf = self.clf_call(**params).fit(self.fit_data, self.fit_labels)
return clf.score(tst_data, tst_labels)
def string_from_dict(dictionary):
s = ""
i = 0
for key, val in dictionary.items():
if i > 0:
s += ","
s += str(key)+"="+str(val)
i += 1
return s
| 30.661538 | 90 | 0.696939 | from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from itertools import product
class Classifier:
def __init__(self, clf_strategy, fit_data, fit_labels):
classifers = {
'KNN': KNeighborsClassifier,
'SVC': SVC,
'DecisionTree': DecisionTreeClassifier,
'RandomForestClassifier': RandomForestClassifier,
'LogisticRegression': LogisticRegression,
'MLP': MLPClassifier,
'AdaBoost': AdaBoostClassifier,
'Bagging': BaggingClassifier
}
self.clf_call = classifers[clf_strategy]
self.fit_data = fit_data
self.fit_labels = fit_labels
def tune(self, params_ranges, tst_data, tst_labels, max_only=True):
max_score = 0.0
max_params = None
all_params = {}
keys = list(params_ranges.keys())
for values in product(*params_ranges.values()):
params = {keys[i]:value for i, value in enumerate(values)}
clf = self.clf_call(**params).fit(self.fit_data, self.fit_labels)
score = clf.score(tst_data, tst_labels)
all_params[values] = score
if max_score < score:
max_score = score
max_params = params
self.max_params = max_params
self.score = max_score
if max_only:
return {max(all_params):max_score}
else:
return all_params
def score(self, params, tst_data, tst_labels):
clf = self.clf_call(**params).fit(self.fit_data, self.fit_labels)
return clf.score(tst_data, tst_labels)
def string_from_dict(dictionary):
s = ""
i = 0
for key, val in dictionary.items():
if i > 0:
s += ","
s += str(key)+"="+str(val)
i += 1
return s
| true | true |
f73b4ee6ffa382f136f373d379b64ccf08de3d1e | 22,210 | py | Python | python/ccxt/async_support/gateio.py | Bhanditz/ccxt | 5b38d79a315ca9ce70cc7c659033caa831c27fc6 | [
"MIT"
] | 1 | 2020-12-28T05:10:32.000Z | 2020-12-28T05:10:32.000Z | python/ccxt/async_support/gateio.py | Bhanditz/ccxt | 5b38d79a315ca9ce70cc7c659033caa831c27fc6 | [
"MIT"
] | null | null | null | python/ccxt/async_support/gateio.py | Bhanditz/ccxt | 5b38d79a315ca9ce70cc7c659033caa831c27fc6 | [
"MIT"
] | 2 | 2019-03-31T00:03:07.000Z | 2019-04-08T21:27:44.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
class gateio (Exchange):
def describe(self):
return self.deep_extend(super(gateio, self).describe(), {
'id': 'gateio',
'name': 'Gate.io',
'countries': ['CN'],
'version': '2',
'rateLimit': 1000,
'has': {
'CORS': False,
'createMarketOrder': False,
'fetchTickers': True,
'withdraw': True,
'createDepositAddress': True,
'fetchDepositAddress': True,
'fetchClosedOrders': True,
'fetchOpenOrders': True,
'fetchOrderTrades': True,
'fetchOrders': True,
'fetchOrder': True,
'fetchMyTrades': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/31784029-0313c702-b509-11e7-9ccc-bc0da6a0e435.jpg',
'api': {
'public': 'https://data.gate.io/api',
'private': 'https://data.gate.io/api',
},
'www': 'https://gate.io/',
'doc': 'https://gate.io/api2',
'fees': [
'https://gate.io/fee',
'https://support.gate.io/hc/en-us/articles/115003577673',
],
},
'api': {
'public': {
'get': [
'pairs',
'marketinfo',
'marketlist',
'tickers',
'ticker/{id}',
'orderBook/{id}',
'trade/{id}',
'tradeHistory/{id}',
'tradeHistory/{id}/{tid}',
],
},
'private': {
'post': [
'balances',
'depositAddress',
'newAddress',
'depositsWithdrawals',
'buy',
'sell',
'cancelOrder',
'cancelAllOrders',
'getOrder',
'openOrders',
'tradeHistory',
'withdraw',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'maker': 0.002,
'taker': 0.002,
},
},
'exceptions': {
'4': DDoSProtection,
'7': NotSupported,
'8': NotSupported,
'9': NotSupported,
'15': DDoSProtection,
'16': OrderNotFound,
'17': OrderNotFound,
'21': InsufficientFunds,
},
# https://gate.io/api2#errCode
'errorCodeNames': {
'1': 'Invalid request',
'2': 'Invalid version',
'3': 'Invalid request',
'4': 'Too many attempts',
'5': 'Invalid sign',
'6': 'Invalid sign',
'7': 'Currency is not supported',
'8': 'Currency is not supported',
'9': 'Currency is not supported',
'10': 'Verified failed',
'11': 'Obtaining address failed',
'12': 'Empty params',
'13': 'Internal error, please report to administrator',
'14': 'Invalid user',
'15': 'Cancel order too fast, please wait 1 min and try again',
'16': 'Invalid order id or order is already closed',
'17': 'Invalid orderid',
'18': 'Invalid amount',
'19': 'Not permitted or trade is disabled',
'20': 'Your order size is too small',
'21': 'You don\'t have enough fund',
},
'options': {
'limits': {
'cost': {
'min': {
'BTC': 0.0001,
'ETH': 0.001,
'USDT': 1,
},
},
},
},
})
async def fetch_markets(self, params={}):
response = await self.publicGetMarketinfo()
markets = self.safe_value(response, 'pairs')
if not markets:
raise ExchangeError(self.id + ' fetchMarkets got an unrecognized response')
result = []
for i in range(0, len(markets)):
market = markets[i]
keys = list(market.keys())
id = keys[0]
details = market[id]
baseId, quoteId = id.split('_')
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': 8,
'price': details['decimal_places'],
}
amountLimits = {
'min': details['min_amount'],
'max': None,
}
priceLimits = {
'min': math.pow(10, -details['decimal_places']),
'max': None,
}
defaultCost = amountLimits['min'] * priceLimits['min']
minCost = self.safe_float(self.options['limits']['cost']['min'], quote, defaultCost)
costLimits = {
'min': minCost,
'max': None,
}
limits = {
'amount': amountLimits,
'price': priceLimits,
'cost': costLimits,
}
active = True
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'active': active,
'maker': details['fee'] / 100,
'taker': details['fee'] / 100,
'precision': precision,
'limits': limits,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
balance = await self.privatePostBalances()
result = {'info': balance}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
code = self.common_currency_code(currency)
account = self.account()
if 'available' in balance:
if currency in balance['available']:
account['free'] = float(balance['available'][currency])
if 'locked' in balance:
if currency in balance['locked']:
account['used'] = float(balance['locked'][currency])
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
orderbook = await self.publicGetOrderBookId(self.extend({
'id': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
percentage = self.safe_float(ticker, 'percentChange')
open = None
change = None
average = None
if (last is not None) and(percentage is not None):
relativeChange = percentage / 100
open = last / self.sum(1, relativeChange)
change = last - open
average = self.sum(last, open) / 2
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high24hr'),
'low': self.safe_float(ticker, 'low24hr'),
'bid': self.safe_float(ticker, 'highestBid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'lowestAsk'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': self.safe_float(ticker, 'quoteVolume'),
'quoteVolume': self.safe_float(ticker, 'baseVolume'),
'info': ticker,
}
def handle_errors(self, code, reason, url, method, headers, body, response):
if len(body) <= 0:
return
if body[0] != '{':
return
resultString = self.safe_string(response, 'result', '')
if resultString != 'false':
return
errorCode = self.safe_string(response, 'code')
if errorCode is not None:
exceptions = self.exceptions
errorCodeNames = self.errorCodeNames
if errorCode in exceptions:
message = ''
if errorCode in errorCodeNames:
message = errorCodeNames[errorCode]
else:
message = self.safe_string(response, 'message', '(unknown)')
raise exceptions[errorCode](message)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
tickers = await self.publicGetTickers(params)
result = {}
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
baseId, quoteId = id.split('_')
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
ticker = tickers[id]
market = None
if symbol in self.markets:
market = self.markets[symbol]
if id in self.markets_by_id:
market = self.markets_by_id[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
ticker = await self.publicGetTickerId(self.extend({
'id': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market):
# public fetchTrades
timestamp = self.safe_integer(trade, 'timestamp')
# private fetchMyTrades
timestamp = self.safe_integer(trade, 'time_unix', timestamp)
if timestamp is not None:
timestamp *= 1000
id = self.safe_string(trade, 'tradeID')
id = self.safe_string(trade, 'id', id)
# take either of orderid or orderId
orderId = self.safe_string(trade, 'orderid')
orderId = self.safe_string(trade, 'orderNumber', orderId)
price = self.safe_float(trade, 'rate')
amount = self.safe_float(trade, 'amount')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': orderId,
'type': None,
'side': trade['type'],
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetTradeHistoryId(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response['data'], market, since, limit)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
response = await self.privatePostOpenOrders(params)
return self.parse_orders(response['orders'], None, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
response = await self.privatePostGetOrder(self.extend({
'orderNumber': id,
'currencyPair': self.market_id(symbol),
}, params))
return self.parse_order(response['order'])
def parse_order_status(self, status):
statuses = {
'cancelled': 'canceled',
# 'closed': 'closed', # these two statuses aren't actually needed
# 'open': 'open', # as they are mapped one-to-one
}
if status in statuses:
return statuses[status]
return status
def parse_order(self, order, market=None):
#
# {'amount': '0.00000000',
# 'currencyPair': 'xlm_usdt',
# 'fee': '0.0113766632239302 USDT',
# 'feeCurrency': 'USDT',
# 'feePercentage': 0.18,
# 'feeValue': '0.0113766632239302',
# 'filledAmount': '30.14004987',
# 'filledRate': 0.2097,
# 'initialAmount': '30.14004987',
# 'initialRate': '0.2097',
# 'left': 0,
# 'orderNumber': '998307286',
# 'rate': '0.2097',
# 'status': 'closed',
# 'timestamp': 1531158583,
# 'type': 'sell'},
#
id = self.safe_string(order, 'orderNumber')
symbol = None
marketId = self.safe_string(order, 'currencyPair')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'timestamp')
if timestamp is not None:
timestamp *= 1000
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'type')
price = self.safe_float(order, 'filledRate')
amount = self.safe_float(order, 'initialAmount')
filled = self.safe_float(order, 'filledAmount')
remaining = self.safe_float(order, 'leftAmount')
if remaining is None:
# In the order status response, self field has a different name.
remaining = self.safe_float(order, 'left')
feeCost = self.safe_float(order, 'feeValue')
feeCurrency = self.safe_string(order, 'feeCurrency')
feeRate = self.safe_float(order, 'feePercentage')
if feeRate is not None:
feeRate = feeRate / 100
if feeCurrency is not None:
if feeCurrency in self.currencies_by_id:
feeCurrency = self.currencies_by_id[feeCurrency]['code']
return {
'id': id,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'status': status,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': None,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': {
'cost': feeCost,
'currency': feeCurrency,
'rate': feeRate,
},
'info': order,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
await self.load_markets()
method = 'privatePost' + self.capitalize(side)
market = self.market(symbol)
order = {
'currencyPair': market['id'],
'rate': price,
'amount': amount,
}
response = await getattr(self, method)(self.extend(order, params))
return self.parse_order(self.extend({
'status': 'open',
'type': side,
'initialAmount': amount,
}, response), market)
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires symbol argument')
await self.load_markets()
return await self.privatePostCancelOrder({
'orderNumber': id,
'currencyPair': self.market_id(symbol),
})
async def query_deposit_address(self, method, code, params={}):
await self.load_markets()
currency = self.currency(code)
method = 'privatePost' + method + 'Address'
response = await getattr(self, method)(self.extend({
'currency': currency['id'],
}, params))
address = self.safe_string(response, 'addr')
tag = None
if (address is not None) and(address.find('address') >= 0):
raise InvalidAddress(self.id + ' queryDepositAddress ' + address)
if code == 'XRP':
parts = address.split(' ')
address = parts[0]
tag = parts[1]
return {
'currency': currency,
'address': address,
'tag': tag,
'info': response,
}
async def create_deposit_address(self, code, params={}):
return await self.query_deposit_address('New', code, params)
async def fetch_deposit_address(self, code, params={}):
return await self.query_deposit_address('Deposit', code, params)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
response = await self.privatePostOpenOrders()
return self.parse_orders(response['orders'], market, since, limit)
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
response = await self.privatePostTradeHistory(self.extend({
'currencyPair': market['id'],
'orderNumber': id,
}, params))
return self.parse_trades(response['trades'], market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchMyTrades requires symbol param')
await self.load_markets()
market = self.market(symbol)
id = market['id']
response = await self.privatePostTradeHistory(self.extend({'currencyPair': id}, params))
return self.parse_trades(response['trades'], market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
response = await self.privatePostWithdraw(self.extend({
'currency': currency['id'],
'amount': amount,
'address': address, # Address must exist in you AddressBook in security settings
}, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
prefix = (api + '/') if (api == 'private') else ''
url = self.urls['api'][api] + self.version + '/1/' + prefix + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce()
request = {'nonce': nonce}
body = self.urlencode(self.extend(request, query))
signature = self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512)
headers = {
'Key': self.apiKey,
'Sign': signature,
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'result' in response:
result = response['result']
message = self.id + ' ' + self.json(response)
if result is None:
raise ExchangeError(message)
if isinstance(result, basestring):
if result != 'true':
raise ExchangeError(message)
elif not result:
raise ExchangeError(message)
return response
| 38.35924 | 126 | 0.509005 |
rt.base.exchange import Exchange
try:
basestring
except NameError:
basestring = str
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
class gateio (Exchange):
def describe(self):
return self.deep_extend(super(gateio, self).describe(), {
'id': 'gateio',
'name': 'Gate.io',
'countries': ['CN'],
'version': '2',
'rateLimit': 1000,
'has': {
'CORS': False,
'createMarketOrder': False,
'fetchTickers': True,
'withdraw': True,
'createDepositAddress': True,
'fetchDepositAddress': True,
'fetchClosedOrders': True,
'fetchOpenOrders': True,
'fetchOrderTrades': True,
'fetchOrders': True,
'fetchOrder': True,
'fetchMyTrades': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/31784029-0313c702-b509-11e7-9ccc-bc0da6a0e435.jpg',
'api': {
'public': 'https://data.gate.io/api',
'private': 'https://data.gate.io/api',
},
'www': 'https://gate.io/',
'doc': 'https://gate.io/api2',
'fees': [
'https://gate.io/fee',
'https://support.gate.io/hc/en-us/articles/115003577673',
],
},
'api': {
'public': {
'get': [
'pairs',
'marketinfo',
'marketlist',
'tickers',
'ticker/{id}',
'orderBook/{id}',
'trade/{id}',
'tradeHistory/{id}',
'tradeHistory/{id}/{tid}',
],
},
'private': {
'post': [
'balances',
'depositAddress',
'newAddress',
'depositsWithdrawals',
'buy',
'sell',
'cancelOrder',
'cancelAllOrders',
'getOrder',
'openOrders',
'tradeHistory',
'withdraw',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'maker': 0.002,
'taker': 0.002,
},
},
'exceptions': {
'4': DDoSProtection,
'7': NotSupported,
'8': NotSupported,
'9': NotSupported,
'15': DDoSProtection,
'16': OrderNotFound,
'17': OrderNotFound,
'21': InsufficientFunds,
},
'errorCodeNames': {
'1': 'Invalid request',
'2': 'Invalid version',
'3': 'Invalid request',
'4': 'Too many attempts',
'5': 'Invalid sign',
'6': 'Invalid sign',
'7': 'Currency is not supported',
'8': 'Currency is not supported',
'9': 'Currency is not supported',
'10': 'Verified failed',
'11': 'Obtaining address failed',
'12': 'Empty params',
'13': 'Internal error, please report to administrator',
'14': 'Invalid user',
'15': 'Cancel order too fast, please wait 1 min and try again',
'16': 'Invalid order id or order is already closed',
'17': 'Invalid orderid',
'18': 'Invalid amount',
'19': 'Not permitted or trade is disabled',
'20': 'Your order size is too small',
'21': 'You don\'t have enough fund',
},
'options': {
'limits': {
'cost': {
'min': {
'BTC': 0.0001,
'ETH': 0.001,
'USDT': 1,
},
},
},
},
})
async def fetch_markets(self, params={}):
response = await self.publicGetMarketinfo()
markets = self.safe_value(response, 'pairs')
if not markets:
raise ExchangeError(self.id + ' fetchMarkets got an unrecognized response')
result = []
for i in range(0, len(markets)):
market = markets[i]
keys = list(market.keys())
id = keys[0]
details = market[id]
baseId, quoteId = id.split('_')
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': 8,
'price': details['decimal_places'],
}
amountLimits = {
'min': details['min_amount'],
'max': None,
}
priceLimits = {
'min': math.pow(10, -details['decimal_places']),
'max': None,
}
defaultCost = amountLimits['min'] * priceLimits['min']
minCost = self.safe_float(self.options['limits']['cost']['min'], quote, defaultCost)
costLimits = {
'min': minCost,
'max': None,
}
limits = {
'amount': amountLimits,
'price': priceLimits,
'cost': costLimits,
}
active = True
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'active': active,
'maker': details['fee'] / 100,
'taker': details['fee'] / 100,
'precision': precision,
'limits': limits,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
balance = await self.privatePostBalances()
result = {'info': balance}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
code = self.common_currency_code(currency)
account = self.account()
if 'available' in balance:
if currency in balance['available']:
account['free'] = float(balance['available'][currency])
if 'locked' in balance:
if currency in balance['locked']:
account['used'] = float(balance['locked'][currency])
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
orderbook = await self.publicGetOrderBookId(self.extend({
'id': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
percentage = self.safe_float(ticker, 'percentChange')
open = None
change = None
average = None
if (last is not None) and(percentage is not None):
relativeChange = percentage / 100
open = last / self.sum(1, relativeChange)
change = last - open
average = self.sum(last, open) / 2
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high24hr'),
'low': self.safe_float(ticker, 'low24hr'),
'bid': self.safe_float(ticker, 'highestBid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'lowestAsk'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': self.safe_float(ticker, 'quoteVolume'),
'quoteVolume': self.safe_float(ticker, 'baseVolume'),
'info': ticker,
}
def handle_errors(self, code, reason, url, method, headers, body, response):
if len(body) <= 0:
return
if body[0] != '{':
return
resultString = self.safe_string(response, 'result', '')
if resultString != 'false':
return
errorCode = self.safe_string(response, 'code')
if errorCode is not None:
exceptions = self.exceptions
errorCodeNames = self.errorCodeNames
if errorCode in exceptions:
message = ''
if errorCode in errorCodeNames:
message = errorCodeNames[errorCode]
else:
message = self.safe_string(response, 'message', '(unknown)')
raise exceptions[errorCode](message)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
tickers = await self.publicGetTickers(params)
result = {}
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
baseId, quoteId = id.split('_')
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
ticker = tickers[id]
market = None
if symbol in self.markets:
market = self.markets[symbol]
if id in self.markets_by_id:
market = self.markets_by_id[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
ticker = await self.publicGetTickerId(self.extend({
'id': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market):
# public fetchTrades
timestamp = self.safe_integer(trade, 'timestamp')
# private fetchMyTrades
timestamp = self.safe_integer(trade, 'time_unix', timestamp)
if timestamp is not None:
timestamp *= 1000
id = self.safe_string(trade, 'tradeID')
id = self.safe_string(trade, 'id', id)
# take either of orderid or orderId
orderId = self.safe_string(trade, 'orderid')
orderId = self.safe_string(trade, 'orderNumber', orderId)
price = self.safe_float(trade, 'rate')
amount = self.safe_float(trade, 'amount')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': orderId,
'type': None,
'side': trade['type'],
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetTradeHistoryId(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response['data'], market, since, limit)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
response = await self.privatePostOpenOrders(params)
return self.parse_orders(response['orders'], None, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
response = await self.privatePostGetOrder(self.extend({
'orderNumber': id,
'currencyPair': self.market_id(symbol),
}, params))
return self.parse_order(response['order'])
def parse_order_status(self, status):
statuses = {
'cancelled': 'canceled',
# 'closed': 'closed', # these two statuses aren't actually needed
statuses:
return statuses[status]
return status
def parse_order(self, order, market=None):
id = self.safe_string(order, 'orderNumber')
symbol = None
marketId = self.safe_string(order, 'currencyPair')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'timestamp')
if timestamp is not None:
timestamp *= 1000
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'type')
price = self.safe_float(order, 'filledRate')
amount = self.safe_float(order, 'initialAmount')
filled = self.safe_float(order, 'filledAmount')
remaining = self.safe_float(order, 'leftAmount')
if remaining is None:
remaining = self.safe_float(order, 'left')
feeCost = self.safe_float(order, 'feeValue')
feeCurrency = self.safe_string(order, 'feeCurrency')
feeRate = self.safe_float(order, 'feePercentage')
if feeRate is not None:
feeRate = feeRate / 100
if feeCurrency is not None:
if feeCurrency in self.currencies_by_id:
feeCurrency = self.currencies_by_id[feeCurrency]['code']
return {
'id': id,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'status': status,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': None,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': {
'cost': feeCost,
'currency': feeCurrency,
'rate': feeRate,
},
'info': order,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
await self.load_markets()
method = 'privatePost' + self.capitalize(side)
market = self.market(symbol)
order = {
'currencyPair': market['id'],
'rate': price,
'amount': amount,
}
response = await getattr(self, method)(self.extend(order, params))
return self.parse_order(self.extend({
'status': 'open',
'type': side,
'initialAmount': amount,
}, response), market)
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires symbol argument')
await self.load_markets()
return await self.privatePostCancelOrder({
'orderNumber': id,
'currencyPair': self.market_id(symbol),
})
async def query_deposit_address(self, method, code, params={}):
await self.load_markets()
currency = self.currency(code)
method = 'privatePost' + method + 'Address'
response = await getattr(self, method)(self.extend({
'currency': currency['id'],
}, params))
address = self.safe_string(response, 'addr')
tag = None
if (address is not None) and(address.find('address') >= 0):
raise InvalidAddress(self.id + ' queryDepositAddress ' + address)
if code == 'XRP':
parts = address.split(' ')
address = parts[0]
tag = parts[1]
return {
'currency': currency,
'address': address,
'tag': tag,
'info': response,
}
async def create_deposit_address(self, code, params={}):
return await self.query_deposit_address('New', code, params)
async def fetch_deposit_address(self, code, params={}):
return await self.query_deposit_address('Deposit', code, params)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
response = await self.privatePostOpenOrders()
return self.parse_orders(response['orders'], market, since, limit)
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
response = await self.privatePostTradeHistory(self.extend({
'currencyPair': market['id'],
'orderNumber': id,
}, params))
return self.parse_trades(response['trades'], market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchMyTrades requires symbol param')
await self.load_markets()
market = self.market(symbol)
id = market['id']
response = await self.privatePostTradeHistory(self.extend({'currencyPair': id}, params))
return self.parse_trades(response['trades'], market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
response = await self.privatePostWithdraw(self.extend({
'currency': currency['id'],
'amount': amount,
'address': address,
}, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
prefix = (api + '/') if (api == 'private') else ''
url = self.urls['api'][api] + self.version + '/1/' + prefix + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce()
request = {'nonce': nonce}
body = self.urlencode(self.extend(request, query))
signature = self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512)
headers = {
'Key': self.apiKey,
'Sign': signature,
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'result' in response:
result = response['result']
message = self.id + ' ' + self.json(response)
if result is None:
raise ExchangeError(message)
if isinstance(result, basestring):
if result != 'true':
raise ExchangeError(message)
elif not result:
raise ExchangeError(message)
return response
| true | true |
f73b4f1ffaf332c07660ce22f9e71bf93045384a | 632 | py | Python | www/apps/profiles/admin/job.py | un33k/outsourcefactor | c48dbd11b74ba5fb72b85f05c431a16287f62507 | [
"MIT"
] | 2 | 2018-12-23T04:14:32.000Z | 2018-12-23T15:02:08.000Z | www/apps/profiles/admin/job.py | un33k/outsourcefactor | c48dbd11b74ba5fb72b85f05c431a16287f62507 | [
"MIT"
] | null | null | null | www/apps/profiles/admin/job.py | un33k/outsourcefactor | c48dbd11b74ba5fb72b85f05c431a16287f62507 | [
"MIT"
] | 1 | 2019-11-17T19:53:07.000Z | 2019-11-17T19:53:07.000Z | from django.contrib import admin
class JobPostAdmin(admin.ModelAdmin):
list_display = (
'id',
'user',
'title',
'slug',
'employment_option',
'wage_salary',
'start_date',
'bookmarked',
'bookmarked',
'is_active',
'keywords',
'created_at',
'updated_at',
)
search_fields = [
'id',
'user__username',
'employment_option',
'start_date',
'is_active',
'keywords',
]
list_per_page = 25
| 19.75 | 37 | 0.427215 | from django.contrib import admin
class JobPostAdmin(admin.ModelAdmin):
list_display = (
'id',
'user',
'title',
'slug',
'employment_option',
'wage_salary',
'start_date',
'bookmarked',
'bookmarked',
'is_active',
'keywords',
'created_at',
'updated_at',
)
search_fields = [
'id',
'user__username',
'employment_option',
'start_date',
'is_active',
'keywords',
]
list_per_page = 25
| true | true |
f73b4f408bc99bcd38ca01772eb08e3c31efec72 | 2,915 | py | Python | memote/experimental/growth.py | matthiaskoenig/memote | 7c14cd304523dda83eaf4835ee007243e8673f85 | [
"Apache-2.0"
] | null | null | null | memote/experimental/growth.py | matthiaskoenig/memote | 7c14cd304523dda83eaf4835ee007243e8673f85 | [
"Apache-2.0"
] | null | null | null | memote/experimental/growth.py | matthiaskoenig/memote | 7c14cd304523dda83eaf4835ee007243e8673f85 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide an interface for growth experiments."""
from __future__ import absolute_import
import logging
from pandas import DataFrame
from memote.experimental.experiment import Experiment
__all__ = ("GrowthExperiment",)
LOGGER = logging.getLogger(__name__)
class GrowthExperiment(Experiment):
"""Represent a growth experiment."""
SCHEMA = "growth.json"
def __init__(self, **kwargs):
"""
Initialize a growth experiment.
Parameters
----------
kwargs
"""
super(GrowthExperiment, self).__init__(**kwargs)
def load(self, dtype_conversion=None):
"""
Load the data table and corresponding validation schema.
Parameters
----------
dtype_conversion : dict
Column names as keys and corresponding type for loading the data.
Please take a look at the `pandas documentation
<https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__
for detailed explanations.
"""
if dtype_conversion is None:
dtype_conversion = {"growth": str}
super(GrowthExperiment, self).load(dtype_conversion=dtype_conversion)
self.data["growth"] = self.data["growth"].isin(self.TRUTHY)
def evaluate(self, model, threshold=0.1):
"""Evaluate in silico growth rates."""
with model:
if self.medium is not None:
self.medium.apply(model)
if self.objective is not None:
model.objective = self.objective
model.add_cons_vars(self.constraints)
threshold *= model.slim_optimize()
growth = list()
for row in self.data.itertuples(index=False):
with model:
exchange = model.reactions.get_by_id(row.exchange)
if bool(exchange.reactants):
exchange.lower_bound = -row.uptake
else:
exchange.upper_bound = row.uptake
growth.append(model.slim_optimize() >= threshold)
return DataFrame({
"exchange": self.data["exchange"],
"growth": growth
})
| 32.752809 | 98 | 0.630189 |
from __future__ import absolute_import
import logging
from pandas import DataFrame
from memote.experimental.experiment import Experiment
__all__ = ("GrowthExperiment",)
LOGGER = logging.getLogger(__name__)
class GrowthExperiment(Experiment):
SCHEMA = "growth.json"
def __init__(self, **kwargs):
super(GrowthExperiment, self).__init__(**kwargs)
def load(self, dtype_conversion=None):
if dtype_conversion is None:
dtype_conversion = {"growth": str}
super(GrowthExperiment, self).load(dtype_conversion=dtype_conversion)
self.data["growth"] = self.data["growth"].isin(self.TRUTHY)
def evaluate(self, model, threshold=0.1):
with model:
if self.medium is not None:
self.medium.apply(model)
if self.objective is not None:
model.objective = self.objective
model.add_cons_vars(self.constraints)
threshold *= model.slim_optimize()
growth = list()
for row in self.data.itertuples(index=False):
with model:
exchange = model.reactions.get_by_id(row.exchange)
if bool(exchange.reactants):
exchange.lower_bound = -row.uptake
else:
exchange.upper_bound = row.uptake
growth.append(model.slim_optimize() >= threshold)
return DataFrame({
"exchange": self.data["exchange"],
"growth": growth
})
| true | true |
f73b50821cf4fcc5259c856d808d3de247a21219 | 1,321 | py | Python | platon_env/utils/key/keytool.py | shinnng/platon-env | c1adba15a53ea2757fc94904c12b606a9d220b76 | [
"MIT"
] | null | null | null | platon_env/utils/key/keytool.py | shinnng/platon-env | c1adba15a53ea2757fc94904c12b606a9d220b76 | [
"MIT"
] | null | null | null | platon_env/utils/key/keytool.py | shinnng/platon-env | c1adba15a53ea2757fc94904c12b606a9d220b76 | [
"MIT"
] | null | null | null | import os
import sys
from platon_keys import keys
from platon_utils.curried import keccak, text_if_str, to_bytes
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def gen_node_keypair(extra_entropy=''):
extra_key_bytes = text_if_str(to_bytes, extra_entropy)
key_bytes = keccak(os.urandom(32) + extra_key_bytes)
private_key = keys.PrivateKey(key_bytes)
return keys.private_key_to_public_key(private_key).to_hex()[2:], private_key.to_hex()[2:],
def gen_bls_keypair():
if 'linux' in sys.platform:
tool_file = os.path.abspath(os.path.join(BASE_DIR, 'bin/keytool'))
execute_cmd('chmod +x {}'.format(tool_file))
elif 'win' in sys.platform:
tool_file = os.path.abspath(os.path.join(BASE_DIR, 'bin/keytool.exe'))
else:
raise Exception('This platform is not supported currently')
keypair = execute_cmd(f'{tool_file} genblskeypair')
if not keypair:
raise Exception('Unable generate bls keypair')
lines = keypair.split('\n')
private_key = lines[0].split(':')[1].strip()
public_key = lines[1].split(':')[1].strip()
if not private_key or not public_key:
raise Exception('Incorrect bls keypair')
return public_key, private_key
def execute_cmd(cmd):
r = os.popen(cmd)
out = r.read()
r.close()
return out
| 32.219512 | 94 | 0.692657 | import os
import sys
from platon_keys import keys
from platon_utils.curried import keccak, text_if_str, to_bytes
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def gen_node_keypair(extra_entropy=''):
extra_key_bytes = text_if_str(to_bytes, extra_entropy)
key_bytes = keccak(os.urandom(32) + extra_key_bytes)
private_key = keys.PrivateKey(key_bytes)
return keys.private_key_to_public_key(private_key).to_hex()[2:], private_key.to_hex()[2:],
def gen_bls_keypair():
if 'linux' in sys.platform:
tool_file = os.path.abspath(os.path.join(BASE_DIR, 'bin/keytool'))
execute_cmd('chmod +x {}'.format(tool_file))
elif 'win' in sys.platform:
tool_file = os.path.abspath(os.path.join(BASE_DIR, 'bin/keytool.exe'))
else:
raise Exception('This platform is not supported currently')
keypair = execute_cmd(f'{tool_file} genblskeypair')
if not keypair:
raise Exception('Unable generate bls keypair')
lines = keypair.split('\n')
private_key = lines[0].split(':')[1].strip()
public_key = lines[1].split(':')[1].strip()
if not private_key or not public_key:
raise Exception('Incorrect bls keypair')
return public_key, private_key
def execute_cmd(cmd):
r = os.popen(cmd)
out = r.read()
r.close()
return out
| true | true |
f73b50b00c8b1f0c9fd9f703afa444be20424b30 | 999 | py | Python | examples/data.py | LilJohny/CourseProject | 8c16a44516a7de4086bb124eac9ebc6f9d3b606a | [
"MIT"
] | null | null | null | examples/data.py | LilJohny/CourseProject | 8c16a44516a7de4086bb124eac9ebc6f9d3b606a | [
"MIT"
] | 7 | 2020-04-04T07:37:09.000Z | 2022-03-11T23:42:09.000Z | examples/data.py | LilJohny/Shorty | 8c16a44516a7de4086bb124eac9ebc6f9d3b606a | [
"MIT"
] | null | null | null | import os
import nltk
import wget
def download_data():
try:
nltk.data.find("tokenizers/punkt")
except LookupError:
nltk.download("punkt")
try:
nltk.data.find("corpora/stopwords")
except LookupError:
nltk.download("stopwords")
if "data" not in os.listdir(os.getcwd()):
os.mkdir("data/")
if "glove.6B.100d.pkl" not in os.listdir(os.sep.join([os.getcwd(), "data"])):
wget.download("https://doc-14-68-docs.googleusercontent.com/docs/securesc/dg109s2vqv57kb64g6hfhghsu9m671u8"
"/ola8a3lkirqch2ae9c9sk6pvo4i6ri5h/1558180800000/17248041771668612386/17248041771668612386"
"/1WtZCbj8mHUx_QMVYKbkUBzfSd4gkh3wB?e=download&h=06751138000822233008&nonce=q71lkqthtongs&user"
"=17248041771668612386&hash=5o2kqo65fqbnma4negmlr02c32bkg3v3", "glove.6B.100d.pkl")
os.rename("glove.6B.100d.pkl", "data/glove.6B.100d.pkl")
if __name__ == "__main__":
download_data()
| 35.678571 | 117 | 0.670671 | import os
import nltk
import wget
def download_data():
try:
nltk.data.find("tokenizers/punkt")
except LookupError:
nltk.download("punkt")
try:
nltk.data.find("corpora/stopwords")
except LookupError:
nltk.download("stopwords")
if "data" not in os.listdir(os.getcwd()):
os.mkdir("data/")
if "glove.6B.100d.pkl" not in os.listdir(os.sep.join([os.getcwd(), "data"])):
wget.download("https://doc-14-68-docs.googleusercontent.com/docs/securesc/dg109s2vqv57kb64g6hfhghsu9m671u8"
"/ola8a3lkirqch2ae9c9sk6pvo4i6ri5h/1558180800000/17248041771668612386/17248041771668612386"
"/1WtZCbj8mHUx_QMVYKbkUBzfSd4gkh3wB?e=download&h=06751138000822233008&nonce=q71lkqthtongs&user"
"=17248041771668612386&hash=5o2kqo65fqbnma4negmlr02c32bkg3v3", "glove.6B.100d.pkl")
os.rename("glove.6B.100d.pkl", "data/glove.6B.100d.pkl")
if __name__ == "__main__":
download_data()
| true | true |
f73b51ab5aa95f3f229d561ec32e95014e446968 | 4,489 | py | Python | build/PureCloudPlatformClientV2/models/bu_average_speed_of_answer.py | cjohnson-ctl/platform-client-sdk-python | 38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100 | [
"MIT"
] | 10 | 2019-02-22T00:27:08.000Z | 2021-09-12T23:23:44.000Z | libs/PureCloudPlatformClientV2/models/bu_average_speed_of_answer.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | 5 | 2018-06-07T08:32:00.000Z | 2021-07-28T17:37:26.000Z | libs/PureCloudPlatformClientV2/models/bu_average_speed_of_answer.py | rocketbot-cl/genesysCloud | dd9d9b5ebb90a82bab98c0d88b9585c22c91f333 | [
"MIT"
] | 6 | 2020-04-09T17:43:07.000Z | 2022-02-17T08:48:05.000Z | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class BuAverageSpeedOfAnswer(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
BuAverageSpeedOfAnswer - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'include': 'bool',
'seconds': 'int'
}
self.attribute_map = {
'include': 'include',
'seconds': 'seconds'
}
self._include = None
self._seconds = None
@property
def include(self):
"""
Gets the include of this BuAverageSpeedOfAnswer.
Whether to include average speed of answer (ASA) in the associated configuration
:return: The include of this BuAverageSpeedOfAnswer.
:rtype: bool
"""
return self._include
@include.setter
def include(self, include):
"""
Sets the include of this BuAverageSpeedOfAnswer.
Whether to include average speed of answer (ASA) in the associated configuration
:param include: The include of this BuAverageSpeedOfAnswer.
:type: bool
"""
self._include = include
@property
def seconds(self):
"""
Gets the seconds of this BuAverageSpeedOfAnswer.
The target average speed of answer (ASA) in seconds. Required if include == true
:return: The seconds of this BuAverageSpeedOfAnswer.
:rtype: int
"""
return self._seconds
@seconds.setter
def seconds(self, seconds):
"""
Sets the seconds of this BuAverageSpeedOfAnswer.
The target average speed of answer (ASA) in seconds. Required if include == true
:param seconds: The seconds of this BuAverageSpeedOfAnswer.
:type: int
"""
self._seconds = seconds
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.592357 | 88 | 0.580085 |
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class BuAverageSpeedOfAnswer(object):
def __init__(self):
self.swagger_types = {
'include': 'bool',
'seconds': 'int'
}
self.attribute_map = {
'include': 'include',
'seconds': 'seconds'
}
self._include = None
self._seconds = None
@property
def include(self):
return self._include
@include.setter
def include(self, include):
self._include = include
@property
def seconds(self):
return self._seconds
@seconds.setter
def seconds(self, seconds):
self._seconds = seconds
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f73b51db43b0ca9bcac7c235f6d853779c88c6b8 | 885 | py | Python | resources/lib/common.py | torstehu/Transmission-XBMC | 10f319deb0d9e5839e62f86c8c6ed5a9175d26fc | [
"MIT"
] | 22 | 2015-02-04T19:58:02.000Z | 2021-07-29T05:25:08.000Z | resources/lib/common.py | torstehu/Transmission-XBMC | 10f319deb0d9e5839e62f86c8c6ed5a9175d26fc | [
"MIT"
] | 14 | 2015-01-07T00:08:28.000Z | 2019-10-24T00:27:48.000Z | resources/lib/common.py | torstehu/Transmission-XBMC | 10f319deb0d9e5839e62f86c8c6ed5a9175d26fc | [
"MIT"
] | 15 | 2015-02-26T15:01:04.000Z | 2020-12-02T09:14:44.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2013 Artem Glebov
import sys
import transmissionrpc
__settings__ = sys.modules[ "__main__" ].__settings__
def get_settings():
params = {
'address': __settings__.getSetting('rpc_host'),
'port': __settings__.getSetting('rpc_port'),
'user': __settings__.getSetting('rpc_user'),
'password': __settings__.getSetting('rpc_password'),
'stop_all_on_playback': __settings__.getSetting('stop_all_on_playback')
}
return params
def get_params():
params = {
'address': __settings__.getSetting('rpc_host'),
'port': __settings__.getSetting('rpc_port'),
'user': __settings__.getSetting('rpc_user'),
'password': __settings__.getSetting('rpc_password'),
}
return params
def get_rpc_client():
params = get_params()
return transmissionrpc.Client(**params)
| 28.548387 | 79 | 0.670056 |
import sys
import transmissionrpc
__settings__ = sys.modules[ "__main__" ].__settings__
def get_settings():
params = {
'address': __settings__.getSetting('rpc_host'),
'port': __settings__.getSetting('rpc_port'),
'user': __settings__.getSetting('rpc_user'),
'password': __settings__.getSetting('rpc_password'),
'stop_all_on_playback': __settings__.getSetting('stop_all_on_playback')
}
return params
def get_params():
params = {
'address': __settings__.getSetting('rpc_host'),
'port': __settings__.getSetting('rpc_port'),
'user': __settings__.getSetting('rpc_user'),
'password': __settings__.getSetting('rpc_password'),
}
return params
def get_rpc_client():
params = get_params()
return transmissionrpc.Client(**params)
| true | true |
f73b53714d4d12cfa40e796ff768275de8fa15d4 | 1,516 | py | Python | heron/instance/tests/python/utils/log_unittest.py | takeratta/heron | 7b7c38594186f009741c62d379364b9b45d82b61 | [
"Apache-2.0"
] | 1 | 2021-06-29T07:00:10.000Z | 2021-06-29T07:00:10.000Z | heron/instance/tests/python/utils/log_unittest.py | kalimfaria/heron | d59bd016b826006e2af22c7a6452342f5e7d637c | [
"Apache-2.0"
] | null | null | null | heron/instance/tests/python/utils/log_unittest.py | kalimfaria/heron | d59bd016b826006e2af22c7a6452342f5e7d637c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
import unittest
from mock import patch, Mock, call
from io import StringIO
from heron.common.src.python.utils import proc
class LogTest(unittest.TestCase):
def setUp(self):
pass
def test_stream_process_stdout(self):
ret = StringIO(u'hello\nworld\n')
log_fn = Mock()
with patch("subprocess.Popen") as mock_process:
mock_process.stdout = ret
proc.stream_process_stdout(mock_process, log_fn)
log_fn.assert_has_calls([call(u'hello\n'), call(u'world\n')])
def test_async_stream_process_stdout(self):
ret = StringIO(u'hello\nworld\n')
log_fn = Mock()
with patch("subprocess.Popen") as mock_process:
mock_process.stdout = ret
thread = proc.async_stream_process_stdout(mock_process, log_fn)
thread.join()
log_fn.assert_has_calls([call(u'hello\n'), call(u'world\n')])
| 31.583333 | 74 | 0.730211 |
import unittest
from mock import patch, Mock, call
from io import StringIO
from heron.common.src.python.utils import proc
class LogTest(unittest.TestCase):
def setUp(self):
pass
def test_stream_process_stdout(self):
ret = StringIO(u'hello\nworld\n')
log_fn = Mock()
with patch("subprocess.Popen") as mock_process:
mock_process.stdout = ret
proc.stream_process_stdout(mock_process, log_fn)
log_fn.assert_has_calls([call(u'hello\n'), call(u'world\n')])
def test_async_stream_process_stdout(self):
ret = StringIO(u'hello\nworld\n')
log_fn = Mock()
with patch("subprocess.Popen") as mock_process:
mock_process.stdout = ret
thread = proc.async_stream_process_stdout(mock_process, log_fn)
thread.join()
log_fn.assert_has_calls([call(u'hello\n'), call(u'world\n')])
| true | true |
f73b569fb6f11e4627e4e8c4c171d62600f128c0 | 3,606 | py | Python | Examples/run_protocol_testing.py | nshelch/NCams | a2027a739337df8b620b2454cf83bb2516db8a00 | [
"MIT"
] | null | null | null | Examples/run_protocol_testing.py | nshelch/NCams | a2027a739337df8b620b2454cf83bb2516db8a00 | [
"MIT"
] | null | null | null | Examples/run_protocol_testing.py | nshelch/NCams | a2027a739337df8b620b2454cf83bb2516db8a00 | [
"MIT"
] | null | null | null | """
NCams Toolbox
Copyright 2019-2020 Charles M Greenspon, Anton Sobinov
https://github.com/CMGreenspon/NCams
"""
import os
import time
import math
import pylab
import ncams
BASE_DIR = os.path.join('C:\\', 'FLIR_cameras', 'PublicExample')
def main():
cdatetime = '2019.12.19_10.38.38';
camera_config_dir = os.path.join(BASE_DIR, 'camconf_'+cdatetime)
camera_config = ncams.yaml_to_config(os.path.join(camera_config_dir, 'config.yaml'))
calibration_config, pose_estimation_config = ncams.load_camera_config(camera_config)
session_shortnames = (
'exp_session_2019.12.20_09.49.42_AS_CMG_1',
'exp_session_2019.12.20_09.56.37_AS_CMG_2',
'exp_session_2019.12.20_09.57.31_AS_CMG_3',
'exp_session_2019.12.20_09.58.36_AS_CMG_4',
'exp_session_2019.12.20_10.09.44_AS_CMG_5',
'exp_session_2019.12.20_10.16.13_AS_CMG_6',
'exp_session_2019.12.20_10.34.40_AS_CMG_7',
'exp_session_2019.12.20_10.39.45_AS_CMG_8',
'exp_session_2019.12.20_10.45.01_AS_CMG_9',
'exp_session_2019.12.20_10.51.06_AS_CMG_10',
'exp_session_2019.12.20_11.11.21_AS_CMG_11',
'exp_session_2019.12.20_11.17.24_AS_CMG_12',
'exp_session_2019.12.20_11.21.52_AS_CMG_13',
)
for session_shortname in session_shortnames:
print('Processing session {}'.format(session_shortname))
session_full_filename = os.path.join(BASE_DIR, session_shortname, 'session_config.yaml')
session_config = ncams.import_session_config(session_full_filename)
session_config['video_path'] = 'videos'
session_config['ud_video_path'] = 'undistorted_videos'
for p in (os.path.join(session_config['session_path'], session_config['video_path']),
os.path.join(session_config['session_path'], session_config['ud_video_path'])):
if not os.path.isdir(p):
print('Making dir {}'.format(p))
os.mkdir(p)
for serial in camera_config['serials']:
session_config['cam_dicts'][serial]['pic_dir'] = session_config['cam_dicts'][serial]['name']
session_config['cam_dicts'][serial]['video'] = os.path.join(
session_config['video_path'], session_config['cam_dicts'][serial]['name']+'.mp4')
session_config['cam_dicts'][serial]['ud_video'] = os.path.join(
session_config['ud_video_path'], session_config['cam_dicts'][serial]['name']+'.mp4')
for cam_dict in session_config['cam_dicts'].values():
image_list = ncams.utils.get_image_list(
sort=True, path=os.path.join(session_config['session_path'], cam_dict['pic_dir']))
print('\tMaking a video for camera {} from {} images.'.format(
cam_dict['name'], len(image_list)))
ncams.images_to_video(
image_list, cam_dict['video'], fps=session_config['frame_rate'],
output_folder=session_config['session_path'])
for icam, serial in enumerate(camera_config['serials']):
cam_dict = session_config['cam_dicts'][serial]
ncams.undistort_video(
os.path.join(session_config['session_path'], cam_dict['video']),
calibration_config['dicts'][serial],
crop_and_resize=False,
output_filename=os.path.join(session_config['session_path'], cam_dict['ud_video']))
print('\tCamera {} video undistorted.'.format(cam_dict['name']))
ncams.export_session_config(session_config)
if __name__ == '__main__':
main()
pylab.show()
| 42.423529 | 104 | 0.660843 | import os
import time
import math
import pylab
import ncams
BASE_DIR = os.path.join('C:\\', 'FLIR_cameras', 'PublicExample')
def main():
cdatetime = '2019.12.19_10.38.38';
camera_config_dir = os.path.join(BASE_DIR, 'camconf_'+cdatetime)
camera_config = ncams.yaml_to_config(os.path.join(camera_config_dir, 'config.yaml'))
calibration_config, pose_estimation_config = ncams.load_camera_config(camera_config)
session_shortnames = (
'exp_session_2019.12.20_09.49.42_AS_CMG_1',
'exp_session_2019.12.20_09.56.37_AS_CMG_2',
'exp_session_2019.12.20_09.57.31_AS_CMG_3',
'exp_session_2019.12.20_09.58.36_AS_CMG_4',
'exp_session_2019.12.20_10.09.44_AS_CMG_5',
'exp_session_2019.12.20_10.16.13_AS_CMG_6',
'exp_session_2019.12.20_10.34.40_AS_CMG_7',
'exp_session_2019.12.20_10.39.45_AS_CMG_8',
'exp_session_2019.12.20_10.45.01_AS_CMG_9',
'exp_session_2019.12.20_10.51.06_AS_CMG_10',
'exp_session_2019.12.20_11.11.21_AS_CMG_11',
'exp_session_2019.12.20_11.17.24_AS_CMG_12',
'exp_session_2019.12.20_11.21.52_AS_CMG_13',
)
for session_shortname in session_shortnames:
print('Processing session {}'.format(session_shortname))
session_full_filename = os.path.join(BASE_DIR, session_shortname, 'session_config.yaml')
session_config = ncams.import_session_config(session_full_filename)
session_config['video_path'] = 'videos'
session_config['ud_video_path'] = 'undistorted_videos'
for p in (os.path.join(session_config['session_path'], session_config['video_path']),
os.path.join(session_config['session_path'], session_config['ud_video_path'])):
if not os.path.isdir(p):
print('Making dir {}'.format(p))
os.mkdir(p)
for serial in camera_config['serials']:
session_config['cam_dicts'][serial]['pic_dir'] = session_config['cam_dicts'][serial]['name']
session_config['cam_dicts'][serial]['video'] = os.path.join(
session_config['video_path'], session_config['cam_dicts'][serial]['name']+'.mp4')
session_config['cam_dicts'][serial]['ud_video'] = os.path.join(
session_config['ud_video_path'], session_config['cam_dicts'][serial]['name']+'.mp4')
for cam_dict in session_config['cam_dicts'].values():
image_list = ncams.utils.get_image_list(
sort=True, path=os.path.join(session_config['session_path'], cam_dict['pic_dir']))
print('\tMaking a video for camera {} from {} images.'.format(
cam_dict['name'], len(image_list)))
ncams.images_to_video(
image_list, cam_dict['video'], fps=session_config['frame_rate'],
output_folder=session_config['session_path'])
for icam, serial in enumerate(camera_config['serials']):
cam_dict = session_config['cam_dicts'][serial]
ncams.undistort_video(
os.path.join(session_config['session_path'], cam_dict['video']),
calibration_config['dicts'][serial],
crop_and_resize=False,
output_filename=os.path.join(session_config['session_path'], cam_dict['ud_video']))
print('\tCamera {} video undistorted.'.format(cam_dict['name']))
ncams.export_session_config(session_config)
if __name__ == '__main__':
main()
pylab.show()
| true | true |
f73b56a3f41cd7c137338ddf12bd233fed94d049 | 1,130 | py | Python | tests/storage/cases/test_KT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2021-05-20T16:52:08.000Z | 2021-05-20T16:52:08.000Z | tests/storage/cases/test_KT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2020-12-30T16:44:56.000Z | 2020-12-30T16:44:56.000Z | tests/storage/cases/test_KT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2022-03-20T19:01:00.000Z | 2022-03-20T19:01:00.000Z | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/zeronet/KT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW.json')
def test_storage_encoding_KT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| 40.357143 | 112 | 0.748673 | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/zeronet/KT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW.json')
def test_storage_encoding_KT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1DkY42MwUA1eC9jHNU2MVHqcLqhLvvbxaW(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| true | true |
f73b57a230b5efc551fa33033f4049348ad3ba48 | 174 | py | Python | server/user.py | cprochnow28/cornHacks2019 | a6b707aa2b80c949ddeb68034e006b2a9c4b7faf | [
"MIT"
] | null | null | null | server/user.py | cprochnow28/cornHacks2019 | a6b707aa2b80c949ddeb68034e006b2a9c4b7faf | [
"MIT"
] | 1 | 2021-06-01T23:23:27.000Z | 2021-06-01T23:23:27.000Z | server/user.py | cprochnow28/cornHacks2019 | a6b707aa2b80c949ddeb68034e006b2a9c4b7faf | [
"MIT"
] | null | null | null | class User():
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
self.rooms = []
| 21.75 | 47 | 0.568966 | class User():
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
self.rooms = []
| true | true |
f73b58455b1e82b17ef668ac999b3ff19c03f343 | 1,288 | py | Python | DATA_SORT/3cities/SCAM_CLMINIT_60days_withclearsky/outputcesmscam_FLNSC_CLM5_CLM5F_001.py | islasimpson/snowpaper_2022 | d6ee677f696d7fd6e7cadef8168ce4fd8b184cac | [
"Apache-2.0"
] | null | null | null | DATA_SORT/3cities/SCAM_CLMINIT_60days_withclearsky/outputcesmscam_FLNSC_CLM5_CLM5F_001.py | islasimpson/snowpaper_2022 | d6ee677f696d7fd6e7cadef8168ce4fd8b184cac | [
"Apache-2.0"
] | null | null | null | DATA_SORT/3cities/SCAM_CLMINIT_60days_withclearsky/outputcesmscam_FLNSC_CLM5_CLM5F_001.py | islasimpson/snowpaper_2022 | d6ee677f696d7fd6e7cadef8168ce4fd8b184cac | [
"Apache-2.0"
] | null | null | null | import importlib
import xarray as xr
import numpy as np
import pandas as pd
import sys
from CASutils import filter_utils as filt
from CASutils import readdata_utils as read
from CASutils import calendar_utils as cal
importlib.reload(filt)
importlib.reload(read)
importlib.reload(cal)
expname=['SASK_CLM5_CLM5F_01.001.FSCAM.sask_1979_2014',
'TOR_CLM5_CLM5F_01.001.FSCAM.tor_1979_2014',
'SID_CLM5_CLM5F_01.001.FSCAM.sid_1979_2014']
outname='SCAM_CLM5_CLM5F_01'
cityname=['Saskatoon','Toronto','Siderovsk']
citylon=[253.330, 280.617, 82.3139]
citylat=[52.1579, 43.6532, 66.5973]
for icity in np.arange(0,3,1):
basedir="/project/cas02/islas/CLM5_CLM4/raw/SCAM_CLM_INIT_60days_withclearsky/"
pathout="/project/cas/islas/python_savs/snowpaper/DATA_SORT/3cities/SCAM_CLMINIT_60days_withclearsky/"
fpath=basedir+expname[icity]+"/atm/hist/h0concat.nc"
print(fpath)
dat = read.read_sfc_cesm(fpath,"1979-01-01T12:00:00","2014-12-31T12:00:00")
if (icity == 0):
flnsc = xr.DataArray(np.zeros([dat.time.size, 3]), coords=[dat.time, cityname],
dims=['time','city'], name='flnsc')
flnsc[:,icity] = dat.FLNSC.isel(lon=0,lat=0)
flnsc.to_netcdf(path=pathout+"FLNSC_"+outname+".nc")
| 30.666667 | 106 | 0.706522 | import importlib
import xarray as xr
import numpy as np
import pandas as pd
import sys
from CASutils import filter_utils as filt
from CASutils import readdata_utils as read
from CASutils import calendar_utils as cal
importlib.reload(filt)
importlib.reload(read)
importlib.reload(cal)
expname=['SASK_CLM5_CLM5F_01.001.FSCAM.sask_1979_2014',
'TOR_CLM5_CLM5F_01.001.FSCAM.tor_1979_2014',
'SID_CLM5_CLM5F_01.001.FSCAM.sid_1979_2014']
outname='SCAM_CLM5_CLM5F_01'
cityname=['Saskatoon','Toronto','Siderovsk']
citylon=[253.330, 280.617, 82.3139]
citylat=[52.1579, 43.6532, 66.5973]
for icity in np.arange(0,3,1):
basedir="/project/cas02/islas/CLM5_CLM4/raw/SCAM_CLM_INIT_60days_withclearsky/"
pathout="/project/cas/islas/python_savs/snowpaper/DATA_SORT/3cities/SCAM_CLMINIT_60days_withclearsky/"
fpath=basedir+expname[icity]+"/atm/hist/h0concat.nc"
print(fpath)
dat = read.read_sfc_cesm(fpath,"1979-01-01T12:00:00","2014-12-31T12:00:00")
if (icity == 0):
flnsc = xr.DataArray(np.zeros([dat.time.size, 3]), coords=[dat.time, cityname],
dims=['time','city'], name='flnsc')
flnsc[:,icity] = dat.FLNSC.isel(lon=0,lat=0)
flnsc.to_netcdf(path=pathout+"FLNSC_"+outname+".nc")
| true | true |
f73b58b1b56f2779b977ddbb3e54942f7b71a453 | 534 | py | Python | parsec/commands/histories/delete_dataset_collection.py | abretaud/parsec | 8ebcafac34b5d6df45de4cecc882b129bb604170 | [
"Apache-2.0"
] | null | null | null | parsec/commands/histories/delete_dataset_collection.py | abretaud/parsec | 8ebcafac34b5d6df45de4cecc882b129bb604170 | [
"Apache-2.0"
] | null | null | null | parsec/commands/histories/delete_dataset_collection.py | abretaud/parsec | 8ebcafac34b5d6df45de4cecc882b129bb604170 | [
"Apache-2.0"
] | null | null | null | import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, dict_output, _arg_split
@click.command('delete_dataset_collection')
@click.argument("history_id", type=str)
@click.argument("dataset_collection_id", type=str)
@pass_context
@custom_exception
@dict_output
def cli(ctx, history_id, dataset_collection_id):
"""Mark corresponding dataset collection as deleted.
Output:
"""
return ctx.gi.histories.delete_dataset_collection(history_id, dataset_collection_id)
| 25.428571 | 88 | 0.799625 | import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, dict_output, _arg_split
@click.command('delete_dataset_collection')
@click.argument("history_id", type=str)
@click.argument("dataset_collection_id", type=str)
@pass_context
@custom_exception
@dict_output
def cli(ctx, history_id, dataset_collection_id):
return ctx.gi.histories.delete_dataset_collection(history_id, dataset_collection_id)
| true | true |
f73b5aacf2a5d4bf0de1663870b4a219a87d4232 | 4,265 | py | Python | python_excel.py | mmmaaaggg/fund_evaluation | 124b9873658d3f9bcd126819c8d7738989b1b011 | [
"MIT"
] | null | null | null | python_excel.py | mmmaaaggg/fund_evaluation | 124b9873658d3f9bcd126819c8d7738989b1b011 | [
"MIT"
] | null | null | null | python_excel.py | mmmaaaggg/fund_evaluation | 124b9873658d3f9bcd126819c8d7738989b1b011 | [
"MIT"
] | 1 | 2019-05-14T01:48:54.000Z | 2019-05-14T01:48:54.000Z | import xlwt
# 创建excel工作表
workbook = xlwt.Workbook(encoding='utf-8')
worksheet = workbook.add_sheet('sheet1')
# 设置表头
worksheet.write(0, 0, label='产品名称') # product_name
worksheet.write(0, 1, label='产品规模(万)') # volume
worksheet.write(0, 2, label='基金成立日期') # setup_date
worksheet.write(0, 3, label='所投资管计划/信托计划名称') # sub_product_list
worksheet.write(0, 4, label='子基金所投规模(万)') # volume
worksheet.write(0, 5, label='子基金净值') # nav
worksheet.write(0, 6, label='子基金上期净值') # nav_last
worksheet.write(0, 7, label='子基金净值变动率') # nav_chg
worksheet.write(0, 8, label='子基金收益率(年化)') # rr
worksheet.write(0, 9, label='子基金持仓比例') # vol_pct
worksheet.write(0, 10, label='基金净值') # nav
worksheet.write(0, 11, label='上期净值') # nav_last
worksheet.write(0, 12, label='收益率(年化)') # rr
worksheet.write(0, 13, label='净值变动率') # nav_chg
# 将数据写入excel
row_num = 0
for list_item in data_list:
row_num += 1
row_sub_num = -1
for key, value in list_item.items():
if key == "product_name":
worksheet.write(row_num, 0, value)
elif key == "volume":
worksheet.write(row_num, 1, value)
elif key == "setup_date":
worksheet.write(row_num, 2, value)
elif key == "nav":
worksheet.write(row_num, 10, value)
elif key == "nav_last":
worksheet.write(row_num, 11, value)
elif key == "rr":
worksheet.write(row_num, 12, value)
elif key == "nav_chg":
worksheet.write(row_num, 13, value)
elif key == "sub_product_list":
for list_item_sub in data_list[0]['sub_product_list']:
row_sub_num += 1
for key, value in list_item_sub.items():
row_real_num = row_num + row_sub_num
if key == "product_name":
worksheet.write(row_real_num, 3, value)
elif key == "volume":
worksheet.write(row_real_num, 4, value)
elif key == "nav":
worksheet.write(row_real_num, 5, value)
elif key == "nav_last":
worksheet.write(row_real_num, 6, value)
elif key == "nav_chg":
worksheet.write(row_real_num, 7, value)
elif key == "rr":
worksheet.write(row_real_num, 8, value)
elif key == "vol_pct":
worksheet.write(row_real_num, 9, value)
else:
pass
if row_sub_num >= 0:
row_num += row_sub_num
# 保存
workbook.save(r'D:\OK.xls')
data_list = [
{
'product_name': '复华财通定增投资基金',
'volume': 3924.53,
'setup_date': '2013/12/31',
'nav': 1.1492,
'nav_last': 1.1521,
'nav_chg': 0.0025,
'rr': 0.1325,
'sub_product_list': [
{
'product_name': '展弘稳进1号',
'volume': 400.00,
'nav': 1.1492,
'nav_last': 1.1521,
'nav_chg': 0.0025,
'rr': 0.1325,
'vol_pct': 0.1, # 持仓比例
},
{
'product_name': '新萌亮点1号',
'volume': 800.00,
'nav': 1.1592,
'nav_last': 1.1721,
'nav_chg': 0.0025,
'rr': 0.1425,
'vol_pct': 0.2, # 持仓比例
},
],
},
{
'product_name': '鑫隆稳进FOF',
'volume': 3924.53,
'setup_date': '2013/12/31',
'sub_product_list': [
{
'product_name': '展弘稳进1号',
'volume': 400.00,
'nav': 1.1492,
'nav_last': 1.1521,
'nav_chg': 0.0025,
'rr': 0.1325,
'vol_pct': 0.1, # 持仓比例
},
{
'product_name': '新萌亮点1号',
'volume': 800.00,
'nav': 1.1592,
'nav_last': 1.1721,
'nav_chg': 0.0025,
'rr': 0.1425,
'vol_pct': 0.2, # 持仓比例
},
],
'nav': 1.1492,
'nav_last': 1.1521,
'nav_chg': 0.0025,
'rr': 0.1325,
},
]
| 29.413793 | 66 | 0.472216 | import xlwt
workbook = xlwt.Workbook(encoding='utf-8')
worksheet = workbook.add_sheet('sheet1')
worksheet.write(0, 0, label='产品名称')
worksheet.write(0, 1, label='产品规模(万)')
worksheet.write(0, 2, label='基金成立日期')
worksheet.write(0, 3, label='所投资管计划/信托计划名称')
worksheet.write(0, 4, label='子基金所投规模(万)')
worksheet.write(0, 5, label='子基金净值')
worksheet.write(0, 6, label='子基金上期净值')
worksheet.write(0, 7, label='子基金净值变动率')
worksheet.write(0, 8, label='子基金收益率(年化)')
worksheet.write(0, 9, label='子基金持仓比例')
worksheet.write(0, 10, label='基金净值')
worksheet.write(0, 11, label='上期净值')
worksheet.write(0, 12, label='收益率(年化)')
worksheet.write(0, 13, label='净值变动率')
row_num = 0
for list_item in data_list:
row_num += 1
row_sub_num = -1
for key, value in list_item.items():
if key == "product_name":
worksheet.write(row_num, 0, value)
elif key == "volume":
worksheet.write(row_num, 1, value)
elif key == "setup_date":
worksheet.write(row_num, 2, value)
elif key == "nav":
worksheet.write(row_num, 10, value)
elif key == "nav_last":
worksheet.write(row_num, 11, value)
elif key == "rr":
worksheet.write(row_num, 12, value)
elif key == "nav_chg":
worksheet.write(row_num, 13, value)
elif key == "sub_product_list":
for list_item_sub in data_list[0]['sub_product_list']:
row_sub_num += 1
for key, value in list_item_sub.items():
row_real_num = row_num + row_sub_num
if key == "product_name":
worksheet.write(row_real_num, 3, value)
elif key == "volume":
worksheet.write(row_real_num, 4, value)
elif key == "nav":
worksheet.write(row_real_num, 5, value)
elif key == "nav_last":
worksheet.write(row_real_num, 6, value)
elif key == "nav_chg":
worksheet.write(row_real_num, 7, value)
elif key == "rr":
worksheet.write(row_real_num, 8, value)
elif key == "vol_pct":
worksheet.write(row_real_num, 9, value)
else:
pass
if row_sub_num >= 0:
row_num += row_sub_num
workbook.save(r'D:\OK.xls')
data_list = [
{
'product_name': '复华财通定增投资基金',
'volume': 3924.53,
'setup_date': '2013/12/31',
'nav': 1.1492,
'nav_last': 1.1521,
'nav_chg': 0.0025,
'rr': 0.1325,
'sub_product_list': [
{
'product_name': '展弘稳进1号',
'volume': 400.00,
'nav': 1.1492,
'nav_last': 1.1521,
'nav_chg': 0.0025,
'rr': 0.1325,
'vol_pct': 0.1,
},
{
'product_name': '新萌亮点1号',
'volume': 800.00,
'nav': 1.1592,
'nav_last': 1.1721,
'nav_chg': 0.0025,
'rr': 0.1425,
'vol_pct': 0.2,
},
],
},
{
'product_name': '鑫隆稳进FOF',
'volume': 3924.53,
'setup_date': '2013/12/31',
'sub_product_list': [
{
'product_name': '展弘稳进1号',
'volume': 400.00,
'nav': 1.1492,
'nav_last': 1.1521,
'nav_chg': 0.0025,
'rr': 0.1325,
'vol_pct': 0.1,
},
{
'product_name': '新萌亮点1号',
'volume': 800.00,
'nav': 1.1592,
'nav_last': 1.1721,
'nav_chg': 0.0025,
'rr': 0.1425,
'vol_pct': 0.2,
},
],
'nav': 1.1492,
'nav_last': 1.1521,
'nav_chg': 0.0025,
'rr': 0.1325,
},
]
| true | true |
f73b5b866ffbb95b8ea5bb63ec08368120ffb5cb | 6,400 | py | Python | contrib/DomainAdaptation/train.py | DaoDaoer/PaddleSeg | 7fe2e41de0f192494b8f2088ee500bb55d17708e | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-06-12T05:14:37.000Z | 2021-12-20T12:31:32.000Z | contrib/DomainAdaptation/train.py | DaoDaoer/PaddleSeg | 7fe2e41de0f192494b8f2088ee500bb55d17708e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | contrib/DomainAdaptation/train.py | DaoDaoer/PaddleSeg | 7fe2e41de0f192494b8f2088ee500bb55d17708e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import argparse
import numpy as np
import paddle
from paddleseg.utils import logger, get_sys_env
import utils
from cvlibs import Config
from script.train import Trainer
from datasets import CityDataset, GTA5Dataset, SYNTHIADataset
def parse_args():
parser = argparse.ArgumentParser(description='Model training')
# params of training
parser.add_argument(
"--config", dest="cfg", help="The config file.", default=None, type=str)
parser.add_argument(
'--iters',
dest='iters',
help='iters for training',
type=int,
default=None)
parser.add_argument(
'--batch_size',
dest='batch_size',
help='Mini batch size of one gpu or cpu',
type=int,
default=None)
parser.add_argument(
'--learning_rate',
dest='learning_rate',
help='Learning rate',
type=float,
default=None)
parser.add_argument(
'--save_interval',
dest='save_interval',
help='How many iters to save a model snapshot once during training.',
type=int,
default=1000)
parser.add_argument(
'--resume_model',
dest='resume_model',
help='The path of resume model',
type=str,
default=None)
parser.add_argument(
'--save_dir',
dest='save_dir',
help='The directory for saving the model snapshot',
type=str,
default='./output')
parser.add_argument(
'--keep_checkpoint_max',
dest='keep_checkpoint_max',
help='Maximum number of checkpoints to save',
type=int,
default=5)
parser.add_argument(
'--num_workers',
dest='num_workers',
help='Num workers for data loader',
type=int,
default=0)
parser.add_argument(
'--do_eval',
dest='do_eval',
help='Eval while training',
action='store_true')
parser.add_argument(
'--log_iters',
dest='log_iters',
help='Display logging information at every log_iters',
default=10,
type=int)
parser.add_argument(
'--use_vdl',
dest='use_vdl',
help='Whether to record the data to VisualDL during training',
action='store_true')
parser.add_argument(
'--seed',
dest='seed',
help='Set the random seed during training.',
default=42,
type=int)
parser.add_argument(
'--fp16', dest='fp16', help='Whther to use amp', action='store_true')
parser.add_argument(
'--data_format',
dest='data_format',
help=
'Data format that specifies the layout of input. It can be "NCHW" or "NHWC". Default: "NCHW".',
type=str,
default='NCHW')
return parser.parse_args()
def main(args):
if args.seed is not None:
paddle.seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
logger.info('Set seed to {}'.format(args.seed))
env_info = get_sys_env()
info = ['{}: {}'.format(k, v) for k, v in env_info.items()]
info = '\n'.join(['', format('Environment Information', '-^48s')] + info +
['-' * 48])
logger.info(info)
place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
'GPUs used'] else 'cpu'
paddle.set_device(place)
if not args.cfg:
raise RuntimeError('No configuration file specified.')
cfg = Config(
args.cfg,
learning_rate=args.learning_rate,
iters=args.iters,
batch_size=args.batch_size)
if cfg.dic["data"]["source"]["dataset"] == 'synthia':
train_dataset_src = SYNTHIADataset(
split='train', **cfg.dic["data"]["source"]["kwargs"])
val_dataset_src = SYNTHIADataset(
split='val', **cfg.dic["data"]["source"]["kwargs"])
elif cfg.dic["data"]["source"]["dataset"] == 'gta5':
train_dataset_src = GTA5Dataset(
split='train', **cfg.dic["data"]["source"]["kwargs"])
val_dataset_src = GTA5Dataset(
split='val', **cfg.dic["data"]["source"]["kwargs"])
else:
raise NotImplementedError()
if cfg.dic["data"]["target"]["dataset"] == 'cityscapes':
train_dataset_tgt = CityDataset(
split='train', **cfg.dic["data"]["target"]["kwargs"])
val_dataset_tgt = CityDataset(
split='val', **cfg.dic["data"]["target"]["kwargs"])
else:
raise NotImplementedError()
val_dataset_tgt = val_dataset_tgt if args.do_eval else None
val_dataset_src = val_dataset_src if args.do_eval else None
if train_dataset_src is None:
raise RuntimeError(
'The training dataset is not specified in the configuration file.')
elif len(train_dataset_src) == 0:
raise ValueError(
'The length of train_dataset is 0. Please check if your dataset is valid'
)
msg = '\n---------------Config Information---------------\n'
msg += str(cfg)
msg += '------------------------------------------------'
logger.info(msg)
trainer = Trainer(model=cfg.model, cfg=cfg.dic)
trainer.train(
train_dataset_src,
train_dataset_tgt,
val_dataset_tgt=val_dataset_tgt,
val_dataset_src=val_dataset_src,
optimizer=cfg.optimizer,
save_dir=args.save_dir,
iters=cfg.iters,
batch_size=cfg.batch_size,
resume_model=args.resume_model,
save_interval=args.save_interval,
log_iters=args.log_iters,
num_workers=args.num_workers,
use_vdl=args.use_vdl,
keep_checkpoint_max=args.keep_checkpoint_max,
test_config=cfg.test_config)
if __name__ == '__main__':
args = parse_args()
main(args)
| 31.683168 | 103 | 0.61 |
import random
import argparse
import numpy as np
import paddle
from paddleseg.utils import logger, get_sys_env
import utils
from cvlibs import Config
from script.train import Trainer
from datasets import CityDataset, GTA5Dataset, SYNTHIADataset
def parse_args():
parser = argparse.ArgumentParser(description='Model training')
parser.add_argument(
"--config", dest="cfg", help="The config file.", default=None, type=str)
parser.add_argument(
'--iters',
dest='iters',
help='iters for training',
type=int,
default=None)
parser.add_argument(
'--batch_size',
dest='batch_size',
help='Mini batch size of one gpu or cpu',
type=int,
default=None)
parser.add_argument(
'--learning_rate',
dest='learning_rate',
help='Learning rate',
type=float,
default=None)
parser.add_argument(
'--save_interval',
dest='save_interval',
help='How many iters to save a model snapshot once during training.',
type=int,
default=1000)
parser.add_argument(
'--resume_model',
dest='resume_model',
help='The path of resume model',
type=str,
default=None)
parser.add_argument(
'--save_dir',
dest='save_dir',
help='The directory for saving the model snapshot',
type=str,
default='./output')
parser.add_argument(
'--keep_checkpoint_max',
dest='keep_checkpoint_max',
help='Maximum number of checkpoints to save',
type=int,
default=5)
parser.add_argument(
'--num_workers',
dest='num_workers',
help='Num workers for data loader',
type=int,
default=0)
parser.add_argument(
'--do_eval',
dest='do_eval',
help='Eval while training',
action='store_true')
parser.add_argument(
'--log_iters',
dest='log_iters',
help='Display logging information at every log_iters',
default=10,
type=int)
parser.add_argument(
'--use_vdl',
dest='use_vdl',
help='Whether to record the data to VisualDL during training',
action='store_true')
parser.add_argument(
'--seed',
dest='seed',
help='Set the random seed during training.',
default=42,
type=int)
parser.add_argument(
'--fp16', dest='fp16', help='Whther to use amp', action='store_true')
parser.add_argument(
'--data_format',
dest='data_format',
help=
'Data format that specifies the layout of input. It can be "NCHW" or "NHWC". Default: "NCHW".',
type=str,
default='NCHW')
return parser.parse_args()
def main(args):
if args.seed is not None:
paddle.seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
logger.info('Set seed to {}'.format(args.seed))
env_info = get_sys_env()
info = ['{}: {}'.format(k, v) for k, v in env_info.items()]
info = '\n'.join(['', format('Environment Information', '-^48s')] + info +
['-' * 48])
logger.info(info)
place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
'GPUs used'] else 'cpu'
paddle.set_device(place)
if not args.cfg:
raise RuntimeError('No configuration file specified.')
cfg = Config(
args.cfg,
learning_rate=args.learning_rate,
iters=args.iters,
batch_size=args.batch_size)
if cfg.dic["data"]["source"]["dataset"] == 'synthia':
train_dataset_src = SYNTHIADataset(
split='train', **cfg.dic["data"]["source"]["kwargs"])
val_dataset_src = SYNTHIADataset(
split='val', **cfg.dic["data"]["source"]["kwargs"])
elif cfg.dic["data"]["source"]["dataset"] == 'gta5':
train_dataset_src = GTA5Dataset(
split='train', **cfg.dic["data"]["source"]["kwargs"])
val_dataset_src = GTA5Dataset(
split='val', **cfg.dic["data"]["source"]["kwargs"])
else:
raise NotImplementedError()
if cfg.dic["data"]["target"]["dataset"] == 'cityscapes':
train_dataset_tgt = CityDataset(
split='train', **cfg.dic["data"]["target"]["kwargs"])
val_dataset_tgt = CityDataset(
split='val', **cfg.dic["data"]["target"]["kwargs"])
else:
raise NotImplementedError()
val_dataset_tgt = val_dataset_tgt if args.do_eval else None
val_dataset_src = val_dataset_src if args.do_eval else None
if train_dataset_src is None:
raise RuntimeError(
'The training dataset is not specified in the configuration file.')
elif len(train_dataset_src) == 0:
raise ValueError(
'The length of train_dataset is 0. Please check if your dataset is valid'
)
msg = '\n---------------Config Information---------------\n'
msg += str(cfg)
msg += '------------------------------------------------'
logger.info(msg)
trainer = Trainer(model=cfg.model, cfg=cfg.dic)
trainer.train(
train_dataset_src,
train_dataset_tgt,
val_dataset_tgt=val_dataset_tgt,
val_dataset_src=val_dataset_src,
optimizer=cfg.optimizer,
save_dir=args.save_dir,
iters=cfg.iters,
batch_size=cfg.batch_size,
resume_model=args.resume_model,
save_interval=args.save_interval,
log_iters=args.log_iters,
num_workers=args.num_workers,
use_vdl=args.use_vdl,
keep_checkpoint_max=args.keep_checkpoint_max,
test_config=cfg.test_config)
if __name__ == '__main__':
args = parse_args()
main(args)
| true | true |
f73b5bdd9a518c6bca8d2650aebf8ff0e3ae1b69 | 270 | py | Python | ABC/186/b_ans.py | fumiyanll23/AtCoder | 362ca9fcacb5415c1458bc8dee5326ba2cc70b65 | [
"MIT"
] | null | null | null | ABC/186/b_ans.py | fumiyanll23/AtCoder | 362ca9fcacb5415c1458bc8dee5326ba2cc70b65 | [
"MIT"
] | null | null | null | ABC/186/b_ans.py | fumiyanll23/AtCoder | 362ca9fcacb5415c1458bc8dee5326ba2cc70b65 | [
"MIT"
] | null | null | null | import numpy as np
def main():
# input
H, W = map(int, input().split())
Ass = [[*map(int, input().split())] for _ in range(H)]
# compute
Ass = np.array(Ass)
# output
print(np.sum(Ass - np.min(Ass)))
if __name__ == '__main__':
main()
| 15.882353 | 58 | 0.537037 | import numpy as np
def main():
H, W = map(int, input().split())
Ass = [[*map(int, input().split())] for _ in range(H)]
Ass = np.array(Ass)
print(np.sum(Ass - np.min(Ass)))
if __name__ == '__main__':
main()
| true | true |
f73b5bf2af9066b425c3ca2403a9dd39d0dc81aa | 22,549 | py | Python | tripleo_workflows/tests/actions/test_deployment.py | bcrochet/tripleo-workflows | 3ac8aa6a47ef005358c98c987ddb17c31fd1d1a4 | [
"Apache-2.0"
] | null | null | null | tripleo_workflows/tests/actions/test_deployment.py | bcrochet/tripleo-workflows | 3ac8aa6a47ef005358c98c987ddb17c31fd1d1a4 | [
"Apache-2.0"
] | null | null | null | tripleo_workflows/tests/actions/test_deployment.py | bcrochet/tripleo-workflows | 3ac8aa6a47ef005358c98c987ddb17c31fd1d1a4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import tempfile
import yaml
from heatclient import exc as heat_exc
from mistral_lib import actions
from swiftclient import exceptions as swiftexceptions
from tripleo_workflows.actions import deployment
from tripleo_workflows import constants
from tripleo_workflows.tests import base
class OrchestrationDeployActionTest(base.TestCase):
def setUp(self,):
super(OrchestrationDeployActionTest, self).setUp()
self.server_id = 'server_id'
self.config = 'config'
self.name = 'name'
self.input_values = []
self.action = 'CREATE'
self.signal_transport = 'TEMP_URL_SIGNAL'
self.timeout = 300
self.group = 'script'
def test_extract_container_object_from_swift_url(self):
swift_url = 'https://example.com' + \
'/v1/a422b2-91f3-2f46-74b7-d7c9e8958f5d30/container/object' + \
'?temp_url_sig=da39a3ee5e6b4&temp_url_expires=1323479485'
action = deployment.OrchestrationDeployAction(self.server_id,
self.config, self.name,
self.timeout)
self.assertEqual(('container', 'object'),
action._extract_container_object_from_swift_url(
swift_url))
@mock.patch(
'heatclient.common.deployment_utils.build_derived_config_params')
def test_build_sc_params(self, build_derived_config_params_mock):
build_derived_config_params_mock.return_value = 'built_params'
action = deployment.OrchestrationDeployAction(self.server_id,
self.config, self.name)
self.assertEqual('built_params', action._build_sc_params('swift_url'))
build_derived_config_params_mock.assert_called_once()
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
def test_wait_for_data(self, get_obj_client_mock):
mock_ctx = mock.MagicMock()
swift = mock.MagicMock()
swift.get_object.return_value = ({}, 'body')
get_obj_client_mock.return_value = swift
action = deployment.OrchestrationDeployAction(self.server_id,
self.config, self.name)
self.assertEqual('body', action._wait_for_data('container',
'object',
context=mock_ctx))
get_obj_client_mock.assert_called_once()
swift.get_object.assert_called_once_with('container', 'object')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('time.sleep')
def test_wait_for_data_timeout(self, sleep, get_obj_client_mock):
mock_ctx = mock.MagicMock()
swift = mock.MagicMock()
swift.get_object.return_value = ({}, None)
get_obj_client_mock.return_value = swift
action = deployment.OrchestrationDeployAction(self.server_id,
self.config, self.name,
timeout=10)
self.assertIsNone(action._wait_for_data('container',
'object',
context=mock_ctx))
get_obj_client_mock.assert_called_once()
swift.get_object.assert_called_with('container', 'object')
# Trying every 3 seconds, so 4 times for a timeout of 10 seconds
self.assertEqual(swift.get_object.call_count, 4)
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('heatclient.common.deployment_utils.create_temp_url')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction.'
'_extract_container_object_from_swift_url')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction._build_sc_params')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction._wait_for_data')
def test_run(self, wait_for_data_mock, build_sc_params_mock,
extract_from_swift_url_mock, create_temp_url_mock,
get_heat_mock, get_obj_client_mock):
extract_from_swift_url_mock.return_value = ('container', 'object')
mock_ctx = mock.MagicMock()
build_sc_params_mock.return_value = {'foo': 'bar'}
config = mock.MagicMock()
sd = mock.MagicMock()
get_heat_mock().software_configs.create.return_value = config
get_heat_mock().software_deployments.create.return_value = sd
wait_for_data_mock.return_value = '{"deploy_status_code": 0}'
action = deployment.OrchestrationDeployAction(self.server_id,
self.config, self.name)
expected = actions.Result(
data={"deploy_status_code": 0},
error=None)
self.assertEqual(expected, action.run(context=mock_ctx))
create_temp_url_mock.assert_called_once()
extract_from_swift_url_mock.assert_called_once()
build_sc_params_mock.assert_called_once()
get_obj_client_mock.assert_called_once()
wait_for_data_mock.assert_called_once()
sd.delete.assert_called_once()
config.delete.assert_called_once()
get_obj_client_mock.delete_object.called_once_with('container',
'object')
get_obj_client_mock.delete_container.called_once_with('container')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('heatclient.common.deployment_utils.create_temp_url')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction.'
'_extract_container_object_from_swift_url')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction._build_sc_params')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction._wait_for_data')
def test_run_timeout(self, wait_for_data_mock, build_sc_params_mock,
extract_from_swift_url_mock, create_temp_url_mock,
get_heat_mock, get_obj_client_mock):
extract_from_swift_url_mock.return_value = ('container', 'object')
mock_ctx = mock.MagicMock()
config = mock.MagicMock()
sd = mock.MagicMock()
get_heat_mock().software_configs.create.return_value = config
get_heat_mock().software_deployments.create.return_value = sd
wait_for_data_mock.return_value = None
action = deployment.OrchestrationDeployAction(self.server_id,
self.config, self.name)
expected = actions.Result(
data={},
error="Timeout for heat deployment 'name'")
self.assertEqual(expected, action.run(mock_ctx))
sd.delete.assert_called_once()
config.delete.assert_called_once()
get_obj_client_mock.delete_object.called_once_with('container',
'object')
get_obj_client_mock.delete_container.called_once_with('container')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('heatclient.common.deployment_utils.create_temp_url')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction.'
'_extract_container_object_from_swift_url')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction._build_sc_params')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction._wait_for_data')
def test_run_failed(self, wait_for_data_mock, build_sc_params_mock,
extract_from_swift_url_mock, create_temp_url_mock,
get_heat_mock, get_obj_client_mock):
extract_from_swift_url_mock.return_value = ('container', 'object')
mock_ctx = mock.MagicMock()
config = mock.MagicMock()
sd = mock.MagicMock()
get_heat_mock().software_configs.create.return_value = config
get_heat_mock().software_deployments.create.return_value = sd
wait_for_data_mock.return_value = '{"deploy_status_code": 1}'
action = deployment.OrchestrationDeployAction(self.server_id,
self.config, self.name)
expected = actions.Result(
data={"deploy_status_code": 1},
error="Heat deployment failed for 'name'")
self.assertEqual(expected, action.run(mock_ctx))
sd.delete.assert_called_once()
config.delete.assert_called_once()
get_obj_client_mock.delete_object.called_once_with('container',
'object')
get_obj_client_mock.delete_container.called_once_with('container')
class DeployStackActionTest(base.TestCase):
def setUp(self,):
super(DeployStackActionTest, self).setUp()
@mock.patch('tripleo_workflows.actions.deployment.time')
@mock.patch('heatclient.common.template_utils.'
'process_multiple_environments_and_files')
@mock.patch('heatclient.common.template_utils.get_template_contents')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch(
'tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
def test_run(self, get_orchestration_client_mock,
mock_get_object_client, mock_get_template_contents,
mock_process_multiple_environments_and_files,
mock_time):
mock_ctx = mock.MagicMock()
# setup swift
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'name': 'overcloud',
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}],
'parameter_defaults': {'random_existing_data': 'a_value'},
}, default_flow_style=False)
swift.get_object.side_effect = (
({}, mock_env),
({}, mock_env),
swiftexceptions.ClientException('atest2')
)
mock_get_object_client.return_value = swift
heat = mock.MagicMock()
heat.stacks.get.return_value = None
get_orchestration_client_mock.return_value = heat
mock_get_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
mock_process_multiple_environments_and_files.return_value = ({}, {})
# freeze time at datetime.datetime(2016, 9, 8, 16, 24, 24)
mock_time.time.return_value = 1473366264
action = deployment.DeployStackAction(1, 'overcloud')
action.run(mock_ctx)
# verify parameters are as expected
expected_defaults = {'DeployIdentifier': 1473366264,
'StackAction': 'CREATE',
'UpdateIdentifier': '',
'random_existing_data': 'a_value'}
mock_env_updated = yaml.safe_dump({
'name': 'overcloud',
'temp_environment': 'temp_environment',
'parameter_defaults': expected_defaults,
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
swift.put_object.assert_called_once_with(
'overcloud',
constants.PLAN_ENVIRONMENT,
mock_env_updated
)
heat.stacks.create.assert_called_once_with(
environment={},
files={},
stack_name='overcloud',
template={'heat_template_version': '2016-04-30'},
timeout_mins=1,
)
swift.delete_object.assert_called_once_with(
"overcloud-swift-rings", "swift-rings.tar.gz")
swift.copy_object.assert_called_once_with(
"overcloud-swift-rings", "swift-rings.tar.gz",
"overcloud-swift-rings/swift-rings.tar.gz-%d" % 1473366264)
@mock.patch('tripleo_workflows.actions.deployment.time')
@mock.patch('heatclient.common.template_utils.'
'process_multiple_environments_and_files')
@mock.patch('heatclient.common.template_utils.get_template_contents')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch(
'tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
def test_run_skip_deploy_identifier(
self, get_orchestration_client_mock,
mock_get_object_client, mock_get_template_contents,
mock_process_multiple_environments_and_files,
mock_time):
mock_ctx = mock.MagicMock()
# setup swift
swift = mock.MagicMock(url="http://test.com")
mock_get_object_client.return_value = swift
heat = mock.MagicMock()
heat.stacks.get.return_value = None
get_orchestration_client_mock.return_value = heat
mock_env = yaml.safe_dump({
'name': constants.DEFAULT_CONTAINER_NAME,
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}],
'parameter_defaults': {'random_existing_data': 'a_value'},
}, default_flow_style=False)
swift.get_object.side_effect = (
({}, mock_env),
({}, mock_env),
swiftexceptions.ClientException('atest2')
)
mock_get_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
mock_process_multiple_environments_and_files.return_value = ({}, {})
# freeze time at datetime.datetime(2016, 9, 8, 16, 24, 24)
mock_time.time.return_value = 1473366264
action = deployment.DeployStackAction(1, 'overcloud',
skip_deploy_identifier=True)
action.run(mock_ctx)
# verify parameters are as expected
mock_env_updated = yaml.safe_dump({
'name': constants.DEFAULT_CONTAINER_NAME,
'temp_environment': 'temp_environment',
'parameter_defaults': {'StackAction': 'CREATE',
'UpdateIdentifier': '',
'random_existing_data': 'a_value'},
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
swift.put_object.assert_called_once_with(
constants.DEFAULT_CONTAINER_NAME,
constants.PLAN_ENVIRONMENT,
mock_env_updated
)
heat.stacks.create.assert_called_once_with(
environment={},
files={},
stack_name='overcloud',
template={'heat_template_version': '2016-04-30'},
timeout_mins=1,
)
swift.delete_object.assert_called_once_with(
"overcloud-swift-rings", "swift-rings.tar.gz")
swift.copy_object.assert_called_once_with(
"overcloud-swift-rings", "swift-rings.tar.gz",
"overcloud-swift-rings/swift-rings.tar.gz-%d" % 1473366264)
def test_set_tls_parameters_no_ca_found(self):
action = deployment.DeployStackAction(1, 'overcloud',
skip_deploy_identifier=True)
my_params = {}
my_env = {'parameter_defaults': {}}
action.set_tls_parameters(parameters=my_params, env=my_env,
local_ca_path='/tmp/my-unexistent-file.txt')
self.assertEqual(my_params, {})
def test_set_tls_parameters_ca_found_no_camap_provided(self):
action = deployment.DeployStackAction(1, 'overcloud',
skip_deploy_identifier=True)
my_params = {}
my_env = {'parameter_defaults': {}}
with tempfile.NamedTemporaryFile() as ca_file:
# Write test data
ca_file.write(b'FAKE CA CERT')
ca_file.flush()
# Test
action.set_tls_parameters(parameters=my_params, env=my_env,
local_ca_path=ca_file.name)
self.assertIn('CAMap', my_params)
self.assertIn('undercloud-ca', my_params['CAMap'])
self.assertIn('content', my_params['CAMap']['undercloud-ca'])
self.assertEqual('FAKE CA CERT',
my_params['CAMap']['undercloud-ca']['content'])
def test_set_tls_parameters_ca_found_camap_provided(self):
action = deployment.DeployStackAction(1, 'overcloud',
skip_deploy_identifier=True)
my_params = {}
my_env = {
'parameter_defaults': {
'CAMap': {'overcloud-ca': {'content': 'ANOTER FAKE CERT'}}}}
with tempfile.NamedTemporaryFile() as ca_file:
# Write test data
ca_file.write(b'FAKE CA CERT')
ca_file.flush()
# Test
action.set_tls_parameters(parameters=my_params, env=my_env,
local_ca_path=ca_file.name)
self.assertIn('CAMap', my_params)
self.assertIn('undercloud-ca', my_params['CAMap'])
self.assertIn('content', my_params['CAMap']['undercloud-ca'])
self.assertEqual('FAKE CA CERT',
my_params['CAMap']['undercloud-ca']['content'])
self.assertIn('overcloud-ca', my_params['CAMap'])
self.assertIn('content', my_params['CAMap']['overcloud-ca'])
self.assertEqual('ANOTER FAKE CERT',
my_params['CAMap']['overcloud-ca']['content'])
class OvercloudRcActionTestCase(base.TestCase):
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
def test_no_stack(self, mock_get_orchestration, mock_get_object):
mock_ctx = mock.MagicMock()
not_found = heat_exc.HTTPNotFound()
mock_get_orchestration.return_value.stacks.get.side_effect = not_found
action = deployment.OvercloudRcAction("overcast")
result = action.run(mock_ctx)
self.assertEqual(result.error, (
"The Heat stack overcast could not be found. Make sure you have "
"deployed before calling this action."
))
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
def test_no_env(self, mock_get_orchestration, mock_get_object):
mock_ctx = mock.MagicMock()
mock_get_object.return_value.get_object.side_effect = (
swiftexceptions.ClientException("overcast"))
action = deployment.OvercloudRcAction("overcast")
result = action.run(mock_ctx)
self.assertEqual(result.error, "Error retrieving environment for plan "
"overcast: overcast")
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
def test_no_password(self, mock_get_orchestration, mock_get_object):
mock_ctx = mock.MagicMock()
mock_get_object.return_value.get_object.return_value = (
{}, "version: 1.0")
action = deployment.OvercloudRcAction("overcast")
result = action.run(mock_ctx)
self.assertEqual(
result.error,
"Unable to find the AdminPassword in the plan environment.")
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('tripleo_workflows.utils.overcloudrc.create_overcloudrc')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
def test_success(self, mock_get_orchestration, mock_create_overcloudrc,
mock_get_object):
mock_ctx = mock.MagicMock()
mock_env = """
version: 1.0
template: overcloud.yaml
environments:
- path: overcloud-resource-registry-puppet.yaml
- path: environments/services/sahara.yaml
parameter_defaults:
BlockStorageCount: 42
OvercloudControlFlavor: yummy
passwords:
AdminPassword: SUPERSECUREPASSWORD
"""
mock_get_object.return_value.get_object.return_value = ({}, mock_env)
mock_create_overcloudrc.return_value = {
"overcloudrc": "fake overcloudrc"
}
action = deployment.OvercloudRcAction("overcast")
result = action.run(mock_ctx)
self.assertEqual(result, {"overcloudrc": "fake overcloudrc"})
| 43.784466 | 79 | 0.625571 |
import mock
import tempfile
import yaml
from heatclient import exc as heat_exc
from mistral_lib import actions
from swiftclient import exceptions as swiftexceptions
from tripleo_workflows.actions import deployment
from tripleo_workflows import constants
from tripleo_workflows.tests import base
class OrchestrationDeployActionTest(base.TestCase):
def setUp(self,):
super(OrchestrationDeployActionTest, self).setUp()
self.server_id = 'server_id'
self.config = 'config'
self.name = 'name'
self.input_values = []
self.action = 'CREATE'
self.signal_transport = 'TEMP_URL_SIGNAL'
self.timeout = 300
self.group = 'script'
def test_extract_container_object_from_swift_url(self):
swift_url = 'https://example.com' + \
'/v1/a422b2-91f3-2f46-74b7-d7c9e8958f5d30/container/object' + \
'?temp_url_sig=da39a3ee5e6b4&temp_url_expires=1323479485'
action = deployment.OrchestrationDeployAction(self.server_id,
self.config, self.name,
self.timeout)
self.assertEqual(('container', 'object'),
action._extract_container_object_from_swift_url(
swift_url))
@mock.patch(
'heatclient.common.deployment_utils.build_derived_config_params')
def test_build_sc_params(self, build_derived_config_params_mock):
build_derived_config_params_mock.return_value = 'built_params'
action = deployment.OrchestrationDeployAction(self.server_id,
self.config, self.name)
self.assertEqual('built_params', action._build_sc_params('swift_url'))
build_derived_config_params_mock.assert_called_once()
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
def test_wait_for_data(self, get_obj_client_mock):
mock_ctx = mock.MagicMock()
swift = mock.MagicMock()
swift.get_object.return_value = ({}, 'body')
get_obj_client_mock.return_value = swift
action = deployment.OrchestrationDeployAction(self.server_id,
self.config, self.name)
self.assertEqual('body', action._wait_for_data('container',
'object',
context=mock_ctx))
get_obj_client_mock.assert_called_once()
swift.get_object.assert_called_once_with('container', 'object')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('time.sleep')
def test_wait_for_data_timeout(self, sleep, get_obj_client_mock):
mock_ctx = mock.MagicMock()
swift = mock.MagicMock()
swift.get_object.return_value = ({}, None)
get_obj_client_mock.return_value = swift
action = deployment.OrchestrationDeployAction(self.server_id,
self.config, self.name,
timeout=10)
self.assertIsNone(action._wait_for_data('container',
'object',
context=mock_ctx))
get_obj_client_mock.assert_called_once()
swift.get_object.assert_called_with('container', 'object')
self.assertEqual(swift.get_object.call_count, 4)
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('heatclient.common.deployment_utils.create_temp_url')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction.'
'_extract_container_object_from_swift_url')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction._build_sc_params')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction._wait_for_data')
def test_run(self, wait_for_data_mock, build_sc_params_mock,
extract_from_swift_url_mock, create_temp_url_mock,
get_heat_mock, get_obj_client_mock):
extract_from_swift_url_mock.return_value = ('container', 'object')
mock_ctx = mock.MagicMock()
build_sc_params_mock.return_value = {'foo': 'bar'}
config = mock.MagicMock()
sd = mock.MagicMock()
get_heat_mock().software_configs.create.return_value = config
get_heat_mock().software_deployments.create.return_value = sd
wait_for_data_mock.return_value = '{"deploy_status_code": 0}'
action = deployment.OrchestrationDeployAction(self.server_id,
self.config, self.name)
expected = actions.Result(
data={"deploy_status_code": 0},
error=None)
self.assertEqual(expected, action.run(context=mock_ctx))
create_temp_url_mock.assert_called_once()
extract_from_swift_url_mock.assert_called_once()
build_sc_params_mock.assert_called_once()
get_obj_client_mock.assert_called_once()
wait_for_data_mock.assert_called_once()
sd.delete.assert_called_once()
config.delete.assert_called_once()
get_obj_client_mock.delete_object.called_once_with('container',
'object')
get_obj_client_mock.delete_container.called_once_with('container')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('heatclient.common.deployment_utils.create_temp_url')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction.'
'_extract_container_object_from_swift_url')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction._build_sc_params')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction._wait_for_data')
def test_run_timeout(self, wait_for_data_mock, build_sc_params_mock,
extract_from_swift_url_mock, create_temp_url_mock,
get_heat_mock, get_obj_client_mock):
extract_from_swift_url_mock.return_value = ('container', 'object')
mock_ctx = mock.MagicMock()
config = mock.MagicMock()
sd = mock.MagicMock()
get_heat_mock().software_configs.create.return_value = config
get_heat_mock().software_deployments.create.return_value = sd
wait_for_data_mock.return_value = None
action = deployment.OrchestrationDeployAction(self.server_id,
self.config, self.name)
expected = actions.Result(
data={},
error="Timeout for heat deployment 'name'")
self.assertEqual(expected, action.run(mock_ctx))
sd.delete.assert_called_once()
config.delete.assert_called_once()
get_obj_client_mock.delete_object.called_once_with('container',
'object')
get_obj_client_mock.delete_container.called_once_with('container')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('heatclient.common.deployment_utils.create_temp_url')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction.'
'_extract_container_object_from_swift_url')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction._build_sc_params')
@mock.patch('tripleo_workflows.actions.deployment.'
'OrchestrationDeployAction._wait_for_data')
def test_run_failed(self, wait_for_data_mock, build_sc_params_mock,
extract_from_swift_url_mock, create_temp_url_mock,
get_heat_mock, get_obj_client_mock):
extract_from_swift_url_mock.return_value = ('container', 'object')
mock_ctx = mock.MagicMock()
config = mock.MagicMock()
sd = mock.MagicMock()
get_heat_mock().software_configs.create.return_value = config
get_heat_mock().software_deployments.create.return_value = sd
wait_for_data_mock.return_value = '{"deploy_status_code": 1}'
action = deployment.OrchestrationDeployAction(self.server_id,
self.config, self.name)
expected = actions.Result(
data={"deploy_status_code": 1},
error="Heat deployment failed for 'name'")
self.assertEqual(expected, action.run(mock_ctx))
sd.delete.assert_called_once()
config.delete.assert_called_once()
get_obj_client_mock.delete_object.called_once_with('container',
'object')
get_obj_client_mock.delete_container.called_once_with('container')
class DeployStackActionTest(base.TestCase):
def setUp(self,):
super(DeployStackActionTest, self).setUp()
@mock.patch('tripleo_workflows.actions.deployment.time')
@mock.patch('heatclient.common.template_utils.'
'process_multiple_environments_and_files')
@mock.patch('heatclient.common.template_utils.get_template_contents')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch(
'tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
def test_run(self, get_orchestration_client_mock,
mock_get_object_client, mock_get_template_contents,
mock_process_multiple_environments_and_files,
mock_time):
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'name': 'overcloud',
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}],
'parameter_defaults': {'random_existing_data': 'a_value'},
}, default_flow_style=False)
swift.get_object.side_effect = (
({}, mock_env),
({}, mock_env),
swiftexceptions.ClientException('atest2')
)
mock_get_object_client.return_value = swift
heat = mock.MagicMock()
heat.stacks.get.return_value = None
get_orchestration_client_mock.return_value = heat
mock_get_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
mock_process_multiple_environments_and_files.return_value = ({}, {})
mock_time.time.return_value = 1473366264
action = deployment.DeployStackAction(1, 'overcloud')
action.run(mock_ctx)
expected_defaults = {'DeployIdentifier': 1473366264,
'StackAction': 'CREATE',
'UpdateIdentifier': '',
'random_existing_data': 'a_value'}
mock_env_updated = yaml.safe_dump({
'name': 'overcloud',
'temp_environment': 'temp_environment',
'parameter_defaults': expected_defaults,
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
swift.put_object.assert_called_once_with(
'overcloud',
constants.PLAN_ENVIRONMENT,
mock_env_updated
)
heat.stacks.create.assert_called_once_with(
environment={},
files={},
stack_name='overcloud',
template={'heat_template_version': '2016-04-30'},
timeout_mins=1,
)
swift.delete_object.assert_called_once_with(
"overcloud-swift-rings", "swift-rings.tar.gz")
swift.copy_object.assert_called_once_with(
"overcloud-swift-rings", "swift-rings.tar.gz",
"overcloud-swift-rings/swift-rings.tar.gz-%d" % 1473366264)
@mock.patch('tripleo_workflows.actions.deployment.time')
@mock.patch('heatclient.common.template_utils.'
'process_multiple_environments_and_files')
@mock.patch('heatclient.common.template_utils.get_template_contents')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch(
'tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
def test_run_skip_deploy_identifier(
self, get_orchestration_client_mock,
mock_get_object_client, mock_get_template_contents,
mock_process_multiple_environments_and_files,
mock_time):
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_get_object_client.return_value = swift
heat = mock.MagicMock()
heat.stacks.get.return_value = None
get_orchestration_client_mock.return_value = heat
mock_env = yaml.safe_dump({
'name': constants.DEFAULT_CONTAINER_NAME,
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}],
'parameter_defaults': {'random_existing_data': 'a_value'},
}, default_flow_style=False)
swift.get_object.side_effect = (
({}, mock_env),
({}, mock_env),
swiftexceptions.ClientException('atest2')
)
mock_get_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
mock_process_multiple_environments_and_files.return_value = ({}, {})
mock_time.time.return_value = 1473366264
action = deployment.DeployStackAction(1, 'overcloud',
skip_deploy_identifier=True)
action.run(mock_ctx)
mock_env_updated = yaml.safe_dump({
'name': constants.DEFAULT_CONTAINER_NAME,
'temp_environment': 'temp_environment',
'parameter_defaults': {'StackAction': 'CREATE',
'UpdateIdentifier': '',
'random_existing_data': 'a_value'},
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
swift.put_object.assert_called_once_with(
constants.DEFAULT_CONTAINER_NAME,
constants.PLAN_ENVIRONMENT,
mock_env_updated
)
heat.stacks.create.assert_called_once_with(
environment={},
files={},
stack_name='overcloud',
template={'heat_template_version': '2016-04-30'},
timeout_mins=1,
)
swift.delete_object.assert_called_once_with(
"overcloud-swift-rings", "swift-rings.tar.gz")
swift.copy_object.assert_called_once_with(
"overcloud-swift-rings", "swift-rings.tar.gz",
"overcloud-swift-rings/swift-rings.tar.gz-%d" % 1473366264)
def test_set_tls_parameters_no_ca_found(self):
action = deployment.DeployStackAction(1, 'overcloud',
skip_deploy_identifier=True)
my_params = {}
my_env = {'parameter_defaults': {}}
action.set_tls_parameters(parameters=my_params, env=my_env,
local_ca_path='/tmp/my-unexistent-file.txt')
self.assertEqual(my_params, {})
def test_set_tls_parameters_ca_found_no_camap_provided(self):
action = deployment.DeployStackAction(1, 'overcloud',
skip_deploy_identifier=True)
my_params = {}
my_env = {'parameter_defaults': {}}
with tempfile.NamedTemporaryFile() as ca_file:
ca_file.write(b'FAKE CA CERT')
ca_file.flush()
action.set_tls_parameters(parameters=my_params, env=my_env,
local_ca_path=ca_file.name)
self.assertIn('CAMap', my_params)
self.assertIn('undercloud-ca', my_params['CAMap'])
self.assertIn('content', my_params['CAMap']['undercloud-ca'])
self.assertEqual('FAKE CA CERT',
my_params['CAMap']['undercloud-ca']['content'])
def test_set_tls_parameters_ca_found_camap_provided(self):
action = deployment.DeployStackAction(1, 'overcloud',
skip_deploy_identifier=True)
my_params = {}
my_env = {
'parameter_defaults': {
'CAMap': {'overcloud-ca': {'content': 'ANOTER FAKE CERT'}}}}
with tempfile.NamedTemporaryFile() as ca_file:
ca_file.write(b'FAKE CA CERT')
ca_file.flush()
action.set_tls_parameters(parameters=my_params, env=my_env,
local_ca_path=ca_file.name)
self.assertIn('CAMap', my_params)
self.assertIn('undercloud-ca', my_params['CAMap'])
self.assertIn('content', my_params['CAMap']['undercloud-ca'])
self.assertEqual('FAKE CA CERT',
my_params['CAMap']['undercloud-ca']['content'])
self.assertIn('overcloud-ca', my_params['CAMap'])
self.assertIn('content', my_params['CAMap']['overcloud-ca'])
self.assertEqual('ANOTER FAKE CERT',
my_params['CAMap']['overcloud-ca']['content'])
class OvercloudRcActionTestCase(base.TestCase):
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
def test_no_stack(self, mock_get_orchestration, mock_get_object):
mock_ctx = mock.MagicMock()
not_found = heat_exc.HTTPNotFound()
mock_get_orchestration.return_value.stacks.get.side_effect = not_found
action = deployment.OvercloudRcAction("overcast")
result = action.run(mock_ctx)
self.assertEqual(result.error, (
"The Heat stack overcast could not be found. Make sure you have "
"deployed before calling this action."
))
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
def test_no_env(self, mock_get_orchestration, mock_get_object):
mock_ctx = mock.MagicMock()
mock_get_object.return_value.get_object.side_effect = (
swiftexceptions.ClientException("overcast"))
action = deployment.OvercloudRcAction("overcast")
result = action.run(mock_ctx)
self.assertEqual(result.error, "Error retrieving environment for plan "
"overcast: overcast")
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
def test_no_password(self, mock_get_orchestration, mock_get_object):
mock_ctx = mock.MagicMock()
mock_get_object.return_value.get_object.return_value = (
{}, "version: 1.0")
action = deployment.OvercloudRcAction("overcast")
result = action.run(mock_ctx)
self.assertEqual(
result.error,
"Unable to find the AdminPassword in the plan environment.")
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('tripleo_workflows.utils.overcloudrc.create_overcloudrc')
@mock.patch('tripleo_workflows.actions.base.TripleOAction.'
'get_orchestration_client')
def test_success(self, mock_get_orchestration, mock_create_overcloudrc,
mock_get_object):
mock_ctx = mock.MagicMock()
mock_env = """
version: 1.0
template: overcloud.yaml
environments:
- path: overcloud-resource-registry-puppet.yaml
- path: environments/services/sahara.yaml
parameter_defaults:
BlockStorageCount: 42
OvercloudControlFlavor: yummy
passwords:
AdminPassword: SUPERSECUREPASSWORD
"""
mock_get_object.return_value.get_object.return_value = ({}, mock_env)
mock_create_overcloudrc.return_value = {
"overcloudrc": "fake overcloudrc"
}
action = deployment.OvercloudRcAction("overcast")
result = action.run(mock_ctx)
self.assertEqual(result, {"overcloudrc": "fake overcloudrc"})
| true | true |
f73b5c1e79c92fa6ef6f381bd62a1507aff098fb | 1,393 | py | Python | gpipsfs/tests/test_main.py | brynickson/gpipsfs | 88b352d8767dae61c90e6d7904d9af42bc3ec4b2 | [
"BSD-3-Clause"
] | 13 | 2017-04-18T21:39:22.000Z | 2022-01-06T03:54:16.000Z | gpipsfs/tests/test_main.py | brynickson/gpipsfs | 88b352d8767dae61c90e6d7904d9af42bc3ec4b2 | [
"BSD-3-Clause"
] | 4 | 2015-05-20T14:06:55.000Z | 2018-07-20T00:57:49.000Z | gpipsfs/tests/test_main.py | brynickson/gpipsfs | 88b352d8767dae61c90e6d7904d9af42bc3ec4b2 | [
"BSD-3-Clause"
] | 10 | 2015-05-19T23:43:12.000Z | 2022-01-06T03:52:37.000Z | import gpipsfs
def test_coron():
gpi = gpipsfs.GPI()
gpi.obsmode='H_coron'
psf = gpi.calc_psf(monochromatic=1.6e-6)
assert psf[0].data.sum() < 5e-4
def test_direct():
gpi = gpipsfs.GPI()
gpi.obsmode='H_direct'
psf = gpi.calc_psf(monochromatic=1.6e-6)
assert psf[0].data.sum() > 0.99
def test_unblocked():
gpi = gpipsfs.GPI()
gpi.obsmode='H_unblocked'
psf = gpi.calc_psf(monochromatic=1.6e-6)
assert psf[0].data.sum() > 0.35
assert psf[0].data.sum() < 0.40
def test_obsmode():
def check_modes(gpi, apod, occ, lyot, filt):
assert gpi.apodizer == apod, 'Got unexpected apodizer value. Was {}, expected {}'.format(gpi.apodizer, apod)
assert gpi.occulter == occ, 'Got unexpected occulter value. Was {}, expected {}'.format(gpi.occulter, occ)
assert gpi.lyotmask == lyot, 'Got unexpected lyotmask value. Was {}, expected {}'.format(gpi.lyotmask, lyot)
assert gpi.filter == filt, 'Got unexpected filter value. Was {}, expected {}'.format(gpi.filter, filt)
gpi = gpipsfs.GPI()
gpi.obsmode='H_direct'
check_modes(gpi, 'CLEAR','SCIENCE','Open', 'H')
gpi.obsmode='H_coron'
check_modes(gpi, 'H','H','080m12_04', 'H')
gpi.obsmode='K1_unblocked'
check_modes(gpi, 'K1','SCIENCE','080m12_06_03', 'K1')
gpi.obsmode='NRM_J'
check_modes(gpi, 'NRM','SCIENCE','Open', 'J')
| 27.86 | 116 | 0.637473 | import gpipsfs
def test_coron():
gpi = gpipsfs.GPI()
gpi.obsmode='H_coron'
psf = gpi.calc_psf(monochromatic=1.6e-6)
assert psf[0].data.sum() < 5e-4
def test_direct():
gpi = gpipsfs.GPI()
gpi.obsmode='H_direct'
psf = gpi.calc_psf(monochromatic=1.6e-6)
assert psf[0].data.sum() > 0.99
def test_unblocked():
gpi = gpipsfs.GPI()
gpi.obsmode='H_unblocked'
psf = gpi.calc_psf(monochromatic=1.6e-6)
assert psf[0].data.sum() > 0.35
assert psf[0].data.sum() < 0.40
def test_obsmode():
def check_modes(gpi, apod, occ, lyot, filt):
assert gpi.apodizer == apod, 'Got unexpected apodizer value. Was {}, expected {}'.format(gpi.apodizer, apod)
assert gpi.occulter == occ, 'Got unexpected occulter value. Was {}, expected {}'.format(gpi.occulter, occ)
assert gpi.lyotmask == lyot, 'Got unexpected lyotmask value. Was {}, expected {}'.format(gpi.lyotmask, lyot)
assert gpi.filter == filt, 'Got unexpected filter value. Was {}, expected {}'.format(gpi.filter, filt)
gpi = gpipsfs.GPI()
gpi.obsmode='H_direct'
check_modes(gpi, 'CLEAR','SCIENCE','Open', 'H')
gpi.obsmode='H_coron'
check_modes(gpi, 'H','H','080m12_04', 'H')
gpi.obsmode='K1_unblocked'
check_modes(gpi, 'K1','SCIENCE','080m12_06_03', 'K1')
gpi.obsmode='NRM_J'
check_modes(gpi, 'NRM','SCIENCE','Open', 'J')
| true | true |
f73b5c7e264fbb04fc6fa186f79cd693449b5d9f | 1,115 | py | Python | backend/transaction/schema.py | elielagmay/react-budgeteer | 49a25dbd6dd6ea5d8bc93421eefbc12808f585af | [
"Unlicense"
] | 2 | 2018-10-23T00:40:53.000Z | 2021-05-31T08:19:40.000Z | backend/transaction/schema.py | elielagmay/react-budgeteer | 49a25dbd6dd6ea5d8bc93421eefbc12808f585af | [
"Unlicense"
] | null | null | null | backend/transaction/schema.py | elielagmay/react-budgeteer | 49a25dbd6dd6ea5d8bc93421eefbc12808f585af | [
"Unlicense"
] | null | null | null | import graphene
from graphene_django.types import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from graphql_relay.node.node import from_global_id
from . import models
class TransactionNode(DjangoObjectType):
class Meta:
model = models.Transaction
filter_fields = {
'payee': ['exact'],
'date': ['exact', 'lt', 'lte', 'gt', 'gte']
}
interfaces = (graphene.relay.Node, )
class EntryNode(DjangoObjectType):
class Meta:
model = models.Entry
filter_fields = ['account', 'is_cleared']
interfaces = (graphene.relay.Node, )
class Query(object):
transaction_list = DjangoFilterConnectionField(
TransactionNode,
ledger_id=graphene.ID(required=True)
)
def resolve_transaction_list(self, info, **kwargs):
node, ledger_id = from_global_id(kwargs.get('ledger_id'))
assert node == 'LedgerNode'
return models.Transaction.objects.filter(
ledger_id=ledger_id,
ledger__creator=info.context.user
).order_by('-date', 'id')
| 29.342105 | 65 | 0.66009 | import graphene
from graphene_django.types import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from graphql_relay.node.node import from_global_id
from . import models
class TransactionNode(DjangoObjectType):
class Meta:
model = models.Transaction
filter_fields = {
'payee': ['exact'],
'date': ['exact', 'lt', 'lte', 'gt', 'gte']
}
interfaces = (graphene.relay.Node, )
class EntryNode(DjangoObjectType):
class Meta:
model = models.Entry
filter_fields = ['account', 'is_cleared']
interfaces = (graphene.relay.Node, )
class Query(object):
transaction_list = DjangoFilterConnectionField(
TransactionNode,
ledger_id=graphene.ID(required=True)
)
def resolve_transaction_list(self, info, **kwargs):
node, ledger_id = from_global_id(kwargs.get('ledger_id'))
assert node == 'LedgerNode'
return models.Transaction.objects.filter(
ledger_id=ledger_id,
ledger__creator=info.context.user
).order_by('-date', 'id')
| true | true |
f73b5cbe03c416b9c5425c7ecbdfdda0e06c6149 | 238 | py | Python | sdk/python/pulumi_kubernetes/extensions/__init__.py | kulado/kulado-kubernetes | ecb72f9b25f6dbbae41f00c82388b1ca32329cc7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_kubernetes/extensions/__init__.py | kulado/kulado-kubernetes | ecb72f9b25f6dbbae41f00c82388b1ca32329cc7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_kubernetes/extensions/__init__.py | kulado/kulado-kubernetes | ecb72f9b25f6dbbae41f00c82388b1ca32329cc7 | [
"Apache-2.0"
] | 1 | 2019-08-20T22:51:57.000Z | 2019-08-20T22:51:57.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Kulado Kubernetes codegen tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Make subpackages available:
__all__ = [
"v1beta1",
]
| 26.444444 | 81 | 0.680672 |
# Make subpackages available:
__all__ = [
"v1beta1",
]
| true | true |
f73b5ced4b615cbfe356275292c3fc0b19c10053 | 11,037 | py | Python | as_download.py | frozenburst/download_audioset | a4ce2fbdeaf23c155717800bd17a986b5c1f51ad | [
"Apache-2.0"
] | null | null | null | as_download.py | frozenburst/download_audioset | a4ce2fbdeaf23c155717800bd17a986b5c1f51ad | [
"Apache-2.0"
] | null | null | null | as_download.py | frozenburst/download_audioset | a4ce2fbdeaf23c155717800bd17a986b5c1f51ad | [
"Apache-2.0"
] | null | null | null | '''
================================================
DOWNLOAD_AUDIOSET REPOSITORY
================================================
Original:
repository name: download_audioset
repository version: 1.0
repository link: https://github.com/jim-schwoebel/download_audioset
author: Jim Schwoebel
author contact: js@neurolex.co
description: downloads the raw audio files from AudioSet (released by Google).
license category: opensource
license: Apache 2.0 license
organization name: NeuroLex Laboratories, Inc.
location: Seattle, WA
website: https://neurolex.ai
release date: 2018-11-08
Edit:
repository name: download_audioset
repository version: 1.1
repository link: https://github.com/frozenburst/download_audioset
author: POYU WU
release date: 2020-11-10
This code (download_audioset) is hereby released under a Apache 2.0 license license.
For more information, check out the license terms below.
================================================
SPECIAL NOTES
================================================
This script parses through the entire balanced audioset dataset and downloads
all the raw audio files. The files are arranged in folders according to their
representative classes.
Please ensure that you have roughly 35GB of free space on your computer before
downloading the files. Note that it may take up to 2 days to fully download
all the files.
Enjoy! - :)
#-Jim
================================================
LICENSE TERMS
================================================
Copyright 2018 NeuroLex Laboratories, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
SERVICE STATEMENT
================================================
If you are using the code written for a larger project, we are
happy to consult with you and help you with deployment. Our team
has >10 world experts in Kafka distributed architectures, microservices
built on top of Node.js / Python / Docker, and applying machine learning to
model speech and text data.
We have helped a wide variety of enterprises - small businesses,
researchers, enterprises, and/or independent developers.
If you would like to work with us let us know @ develop@neurolex.co.
usage: as_download.py [options]
options:
--data_pth=<data path>
--label_pth=<labels.xlsx>
--segment_file=<xlsx file>
--partial=<0, 1, 2, ...> # The unbalance csv could split to parts for parallel.
'''
################################################################################
## IMPORT STATEMENTS ##
################################################################################
import pafy, os, shutil, time, ffmpy
import os.path as op
import pandas as pd
import soundfile as sf
from natsort import natsorted
from tqdm import tqdm
from pathlib import Path
from docopt import docopt
################################################################################
## HELPER FUNCTIONS ##
################################################################################
#function to clean labels
def convertlabels(sortlist,labels,textlabels):
clabels=list()
# Debug for sortlist data type, split with each label ids.
sortlist = sortlist.split(',')
for i in range(len(sortlist)):
#find index in list corresponding
index=labels.index(sortlist[i])
clabel=textlabels[index]
#pull out converted label
clabels.append(clabel)
return clabels
def download_audio(link):
listdir=os.listdir()
cmd = f"youtube-dl --quiet -f 'bestaudio[ext=m4a]' '{link}'"
print(cmd)
os.system(cmd)
listdir2=os.listdir()
filename=''
for i in range(len(listdir2)):
if listdir2[i] not in listdir and listdir2[i].endswith('.m4a'):
filename=listdir2[i]
break
return filename
################################################################################
## MAIN SCRIPT ##
################################################################################
if __name__ == '__main__':
args = docopt(__doc__)
print(args)
data_pth = args['--data_pth']
label_pth = args['--label_pth']
segment_file = args['--segment_file']
partial = args['--partial']
if data_pth is None:
raise ValueError("Please set the path for model's output.")
if label_pth is None:
raise ValueError("Please set the path for model's output.")
if segment_file is None:
raise ValueError("Please set the path for model's output.")
if partial is not None:
print("Partial detected. The naming of wav would follow the partial name.")
defaultdir=os.getcwd()
os.chdir(defaultdir)
#load labels of the videos
#number, label, words
loadfile=pd.read_excel(label_pth)
number=loadfile.iloc[:,0].tolist()
labels=loadfile.iloc[:,1].tolist()
textlabels=loadfile.iloc[:,2].tolist()
#remove spaces for folders
for i in range(len(textlabels)):
textlabels[i]=textlabels[i].replace(' ','')
#now load data for download
xlsx_filename = segment_file
if op.isfile(xlsx_filename) is False:
raise ValueError("Xlsx file of segment is not exits with value:", xlsx_filename)
loadfile2=pd.read_excel(xlsx_filename)
# ylabels have to be cleaned to make a good list (CSV --> LIST)
yid=loadfile2.iloc[:,0].tolist()[2:]
ystart=loadfile2.iloc[:,1].tolist()[2:]
yend=loadfile2.iloc[:,2].tolist()[2:]
ylabels=loadfile2.iloc[:,3].tolist()[2:]
dataset_dir = data_pth
if op.isdir(dataset_dir) is False:
raise ValueError("Dataset directory is not exits with path:", dataset_dir)
#make folders
if partial is not None:
# segment_folder_name = op.basename(xlsx_filename).split('.')[0]
# Easy method is the best solution.
segment_folder_name = 'unbalanced_train_segments'
else:
segment_folder_name = op.basename(xlsx_filename).split('.')[0]
try:
defaultdir2=op.join(dataset_dir, segment_folder_name)
os.chdir(defaultdir2)
except:
defaultdir2=op.join(dataset_dir, segment_folder_name)
os.mkdir(defaultdir2)
os.chdir(defaultdir2)
# Should implement the check of existed file as well.
# Implemented by frozenburst
existing_wavfiles=list()
for dirname in tqdm(sorted(Path(defaultdir2).glob('*'))):
if partial is not None:
for filename in sorted(Path(dirname).glob(f'{partial}_*')):
existing_wavfiles.append(op.basename(filename))
else:
for filename in sorted(Path(dirname).glob(f'*')):
existing_wavfiles.append(op.basename(filename))
# get last file checkpoint to leave off
existing_wavfiles=natsorted(existing_wavfiles)
print(existing_wavfiles)
try:
lastfile=int(existing_wavfiles[-1].split('.')[0][7:])
except:
lastfile=0
#iterate through entire CSV file, look for '--' if found, find index, delete section, then go to next index
slink='https://www.youtube.com/watch?v='
for i in tqdm(range(len(yid))):
if i < lastfile:
# print('Skipping, already downloaded file...')
continue
else:
link=slink+yid[i]
start=float(ystart[i])
end=float(yend[i])
# print(ylabels[i])
clabels=convertlabels(ylabels[i],labels,textlabels)
# print(clabels)
if clabels != []:
#change to the right directory
for j in range(len(clabels)):
newdir = op.join(defaultdir2, clabels[j])
if op.isdir(newdir) is False:
os.mkdir(newdir)
os.chdir(newdir)
#if it is the first download, pursue this path to download video
lastdir=os.getcwd()
if partial is not None:
filename_check = f'{partial}_snipped'+str(i)+'.wav'
else:
filename_check = 'snipped'+str(i)+'.wav'
if filename_check not in os.listdir():
try:
# use YouTube DL to download audio
filename=download_audio(link)
extension='.m4a'
#get file extension and convert to .wav for processing later
os.rename(filename,'%s%s'%(str(i),extension))
filename='%s%s'%(str(i),extension)
if extension not in ['.wav']:
xindex=filename.find(extension)
filename=filename[0:xindex]
ff=ffmpy.FFmpeg(
inputs={filename+extension:None},
outputs={filename+'.wav':None}
)
ff.run()
os.remove(filename+extension)
file=filename+'.wav'
data,samplerate=sf.read(file)
totalframes=len(data)
totalseconds=totalframes/samplerate
startsec=start
startframe=samplerate*startsec
endsec=end
endframe=samplerate*endsec
# print(startframe)
# print(endframe)
if partial is not None:
newname = f'{partial}_snipped'+file
else:
newname = 'snipped'+file
sf.write(newname, data[int(startframe):int(endframe)], samplerate)
snippedfile=newname
os.remove(file)
except:
print('no urls')
#sleep 3 second sleep to prevent IP from getting banned
time.sleep(2)
else:
print('skipping, already downloaded file...')
| 37.287162 | 111 | 0.542358 | true | true | |
f73b5e0ff24c3074a9cb2314daee337c069474cd | 7,366 | py | Python | SprityBird/spritybird/python3.5/lib/python3.5/site-packages/openpyxl/drawing/tests/test_shape.py | MobileAnalytics/iPython-Framework | da0e598308c067cd5c5290a6364b3ffaf2d2418f | [
"MIT"
] | 4 | 2018-07-04T17:20:12.000Z | 2019-07-14T18:07:25.000Z | SprityBird/spritybird/python3.5/lib/python3.5/site-packages/openpyxl/drawing/tests/test_shape.py | MobileAnalytics/iPython-Framework | da0e598308c067cd5c5290a6364b3ffaf2d2418f | [
"MIT"
] | null | null | null | SprityBird/spritybird/python3.5/lib/python3.5/site-packages/openpyxl/drawing/tests/test_shape.py | MobileAnalytics/iPython-Framework | da0e598308c067cd5c5290a6364b3ffaf2d2418f | [
"MIT"
] | 1 | 2018-09-03T03:02:06.000Z | 2018-09-03T03:02:06.000Z | from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
import pytest
from openpyxl.xml.constants import CHART_DRAWING_NS
from openpyxl.xml.functions import Element, fromstring, tostring
from openpyxl.tests.helper import compare_xml
class DummyDrawing(object):
"""Shapes need charts which need drawings"""
width = 10
height = 20
class DummyChart(object):
"""Shapes need a chart to calculate their coordinates"""
width = 100
height = 100
def __init__(self):
self.drawing = DummyDrawing()
def _get_margin_left(self):
return 10
def _get_margin_top(self):
return 5
def get_x_units(self):
return 25
def get_y_units(self):
return 15
class TestShape(object):
def setup(self):
from ..shape import Shape
self.shape = Shape(chart=DummyChart())
def test_ctor(self):
s = self.shape
assert s.axis_coordinates == ((0, 0), (1, 1))
assert s.text is None
assert s.scheme == "accent1"
assert s.style == "rect"
assert s.border_color == "000000"
assert s.color == "FFFFFF"
assert s.text_color == "000000"
assert s.border_width == 0
def test_border_color(self):
s = self.shape
s.border_color = "BBBBBB"
assert s.border_color == "BBBBBB"
def test_color(self):
s = self.shape
s.color = "000000"
assert s.color == "000000"
def test_text_color(self):
s = self.shape
s.text_color = "FF0000"
assert s.text_color == "FF0000"
def test_border_width(self):
s = self.shape
s.border_width = 50
assert s.border_width == 50
def test_coordinates(self):
s = self.shape
s.coordinates = ((0, 0), (60, 80))
assert s.axis_coordinates == ((0, 0), (60, 80))
assert s.coordinates == (1, 1, 1, 1)
def test_pct(self):
s = self.shape
assert s._norm_pct(10) == 1
assert s._norm_pct(0.5) == 0.5
assert s._norm_pct(-10) == 0
class TestShapeWriter(object):
def setup(self):
from ..shape import ShapeWriter
from ..shape import Shape
chart = DummyChart()
self.shape = Shape(chart=chart, text="My first chart")
self.sw = ShapeWriter(shapes=[self.shape])
def test_write(self):
xml = self.sw.write(0)
tree = fromstring(xml)
expected = """
<c:userShapes xmlns:c="http://schemas.openxmlformats.org/drawingml/2006/chart">
<cdr:relSizeAnchor xmlns:cdr="http://schemas.openxmlformats.org/drawingml/2006/chartDrawing">
<cdr:from>
<cdr:x>1</cdr:x>
<cdr:y>1</cdr:y>
</cdr:from>
<cdr:to>
<cdr:x>1</cdr:x>
<cdr:y>1</cdr:y>
</cdr:to>
<cdr:sp macro="" textlink="">
<cdr:nvSpPr>
<cdr:cNvPr id="0" name="shape 0" />
<cdr:cNvSpPr />
</cdr:nvSpPr>
<cdr:spPr>
<a:xfrm xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:off x="0" y="0" />
<a:ext cx="0" cy="0" />
</a:xfrm>
<a:prstGeom prst="rect" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:avLst />
</a:prstGeom>
<a:solidFill xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:srgbClr val="FFFFFF" />
</a:solidFill>
<a:ln w="0" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:solidFill>
<a:srgbClr val="000000" />
</a:solidFill>
</a:ln>
</cdr:spPr>
<cdr:style>
<a:lnRef idx="2" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:schemeClr val="accent1">
<a:shade val="50000" />
</a:schemeClr>
</a:lnRef>
<a:fillRef idx="1" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:schemeClr val="accent1" />
</a:fillRef>
<a:effectRef idx="0" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:schemeClr val="accent1" />
</a:effectRef>
<a:fontRef idx="minor" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:schemeClr val="lt1" />
</a:fontRef>
</cdr:style>
<cdr:txBody>
<a:bodyPr vertOverflow="clip" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" />
<a:lstStyle xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" />
<a:p xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:r>
<a:rPr lang="en-US">
<a:solidFill>
<a:srgbClr val="000000" />
</a:solidFill>
</a:rPr>
<a:t>My first chart</a:t>
</a:r>
</a:p>
</cdr:txBody>
</cdr:sp>
</cdr:relSizeAnchor>
</c:userShapes>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_text(self):
root = Element("{%s}test" % CHART_DRAWING_NS)
self.sw._write_text(root, self.shape)
xml = tostring(root)
expected = """<cdr:test xmlns:cdr="http://schemas.openxmlformats.org/drawingml/2006/chartDrawing"><cdr:txBody><a:bodyPr vertOverflow="clip" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" /><a:lstStyle xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" /><a:p xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"><a:r><a:rPr lang="en-US"><a:solidFill><a:srgbClr val="000000" /></a:solidFill></a:rPr><a:t>My first chart</a:t></a:r></a:p></cdr:txBody></cdr:test>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_style(self):
root = Element("{%s}test" % CHART_DRAWING_NS)
self.sw._write_style(root)
xml = tostring(root)
expected = """<cdr:test xmlns:cdr="http://schemas.openxmlformats.org/drawingml/2006/chartDrawing"><cdr:style><a:lnRef idx="2" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"><a:schemeClr val="accent1"><a:shade val="50000" /></a:schemeClr></a:lnRef><a:fillRef idx="1" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"><a:schemeClr val="accent1" /></a:fillRef><a:effectRef idx="0" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"><a:schemeClr val="accent1" /></a:effectRef><a:fontRef idx="minor" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"><a:schemeClr val="lt1" /></a:fontRef></cdr:style></cdr:test>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
| 38.768421 | 674 | 0.543171 | from __future__ import absolute_import
import pytest
from openpyxl.xml.constants import CHART_DRAWING_NS
from openpyxl.xml.functions import Element, fromstring, tostring
from openpyxl.tests.helper import compare_xml
class DummyDrawing(object):
width = 10
height = 20
class DummyChart(object):
width = 100
height = 100
def __init__(self):
self.drawing = DummyDrawing()
def _get_margin_left(self):
return 10
def _get_margin_top(self):
return 5
def get_x_units(self):
return 25
def get_y_units(self):
return 15
class TestShape(object):
def setup(self):
from ..shape import Shape
self.shape = Shape(chart=DummyChart())
def test_ctor(self):
s = self.shape
assert s.axis_coordinates == ((0, 0), (1, 1))
assert s.text is None
assert s.scheme == "accent1"
assert s.style == "rect"
assert s.border_color == "000000"
assert s.color == "FFFFFF"
assert s.text_color == "000000"
assert s.border_width == 0
def test_border_color(self):
s = self.shape
s.border_color = "BBBBBB"
assert s.border_color == "BBBBBB"
def test_color(self):
s = self.shape
s.color = "000000"
assert s.color == "000000"
def test_text_color(self):
s = self.shape
s.text_color = "FF0000"
assert s.text_color == "FF0000"
def test_border_width(self):
s = self.shape
s.border_width = 50
assert s.border_width == 50
def test_coordinates(self):
s = self.shape
s.coordinates = ((0, 0), (60, 80))
assert s.axis_coordinates == ((0, 0), (60, 80))
assert s.coordinates == (1, 1, 1, 1)
def test_pct(self):
s = self.shape
assert s._norm_pct(10) == 1
assert s._norm_pct(0.5) == 0.5
assert s._norm_pct(-10) == 0
class TestShapeWriter(object):
def setup(self):
from ..shape import ShapeWriter
from ..shape import Shape
chart = DummyChart()
self.shape = Shape(chart=chart, text="My first chart")
self.sw = ShapeWriter(shapes=[self.shape])
def test_write(self):
xml = self.sw.write(0)
tree = fromstring(xml)
expected = """
<c:userShapes xmlns:c="http://schemas.openxmlformats.org/drawingml/2006/chart">
<cdr:relSizeAnchor xmlns:cdr="http://schemas.openxmlformats.org/drawingml/2006/chartDrawing">
<cdr:from>
<cdr:x>1</cdr:x>
<cdr:y>1</cdr:y>
</cdr:from>
<cdr:to>
<cdr:x>1</cdr:x>
<cdr:y>1</cdr:y>
</cdr:to>
<cdr:sp macro="" textlink="">
<cdr:nvSpPr>
<cdr:cNvPr id="0" name="shape 0" />
<cdr:cNvSpPr />
</cdr:nvSpPr>
<cdr:spPr>
<a:xfrm xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:off x="0" y="0" />
<a:ext cx="0" cy="0" />
</a:xfrm>
<a:prstGeom prst="rect" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:avLst />
</a:prstGeom>
<a:solidFill xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:srgbClr val="FFFFFF" />
</a:solidFill>
<a:ln w="0" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:solidFill>
<a:srgbClr val="000000" />
</a:solidFill>
</a:ln>
</cdr:spPr>
<cdr:style>
<a:lnRef idx="2" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:schemeClr val="accent1">
<a:shade val="50000" />
</a:schemeClr>
</a:lnRef>
<a:fillRef idx="1" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:schemeClr val="accent1" />
</a:fillRef>
<a:effectRef idx="0" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:schemeClr val="accent1" />
</a:effectRef>
<a:fontRef idx="minor" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:schemeClr val="lt1" />
</a:fontRef>
</cdr:style>
<cdr:txBody>
<a:bodyPr vertOverflow="clip" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" />
<a:lstStyle xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" />
<a:p xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:r>
<a:rPr lang="en-US">
<a:solidFill>
<a:srgbClr val="000000" />
</a:solidFill>
</a:rPr>
<a:t>My first chart</a:t>
</a:r>
</a:p>
</cdr:txBody>
</cdr:sp>
</cdr:relSizeAnchor>
</c:userShapes>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_text(self):
root = Element("{%s}test" % CHART_DRAWING_NS)
self.sw._write_text(root, self.shape)
xml = tostring(root)
expected = """<cdr:test xmlns:cdr="http://schemas.openxmlformats.org/drawingml/2006/chartDrawing"><cdr:txBody><a:bodyPr vertOverflow="clip" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" /><a:lstStyle xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" /><a:p xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"><a:r><a:rPr lang="en-US"><a:solidFill><a:srgbClr val="000000" /></a:solidFill></a:rPr><a:t>My first chart</a:t></a:r></a:p></cdr:txBody></cdr:test>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_style(self):
root = Element("{%s}test" % CHART_DRAWING_NS)
self.sw._write_style(root)
xml = tostring(root)
expected = """<cdr:test xmlns:cdr="http://schemas.openxmlformats.org/drawingml/2006/chartDrawing"><cdr:style><a:lnRef idx="2" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"><a:schemeClr val="accent1"><a:shade val="50000" /></a:schemeClr></a:lnRef><a:fillRef idx="1" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"><a:schemeClr val="accent1" /></a:fillRef><a:effectRef idx="0" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"><a:schemeClr val="accent1" /></a:effectRef><a:fontRef idx="minor" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"><a:schemeClr val="lt1" /></a:fontRef></cdr:style></cdr:test>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
| true | true |
f73b5e276b69634a1739f519d1cdbf87ab8c964d | 9,862 | py | Python | test_app/tests/test_lare_request.py | iekadou/lare.js | 9f01fcb585d3532d4ab6d169978d3abde1f5796a | [
"MIT"
] | null | null | null | test_app/tests/test_lare_request.py | iekadou/lare.js | 9f01fcb585d3532d4ab6d169978d3abde1f5796a | [
"MIT"
] | 1 | 2016-05-20T08:31:09.000Z | 2016-05-20T08:31:09.000Z | test_app/tests/test_lare_request.py | lare-team/lare.js | 9f01fcb585d3532d4ab6d169978d3abde1f5796a | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from selenium.common.exceptions import NoSuchElementException
from .helpers import SeleniumTestCase
class LareRequestTest(SeleniumTestCase):
def test_lare_request_depth_1(self):
self.browser_get_reverse('index')
self.assert_title('index-title')
self.assert_content('index-content')
self.assert_body_attr('lare-done', None)
self.browser.find_element_by_class_name('to_be_removed_script')
about_link = self.browser.find_element_by_css_selector('#about-link')
about_link.click()
self.wait.until(lambda browser: browser.title == 'about-title')
self.assert_title('about-title')
self.assert_content('about-content')
keywords_metatag = self.browser.find_element_by_css_selector('meta[name="keywords"]')
self.assertEqual(keywords_metatag.get_attribute('content'), 'This is a test')
with self.assertRaises(NoSuchElementException):
self.browser.find_element_by_class_name('to_be_removed_script')
self.assert_body_attr('lare-click', 'true')
self.assert_body_attr('lare-before-send', 'true')
self.assert_body_attr('lare-send', 'true')
self.assert_body_attr('lare-timeout', None)
self.assert_body_attr('lare-start', 'true')
self.assert_body_attr('lare-success', 'true')
self.assert_body_attr('lare-done', 'true')
self.assert_body_attr('lare-fail', None)
self.assert_body_attr('lare-always', 'true')
self.assert_body_attr('lare-end', 'true')
self.reset_body_attrs()
self.assert_body_attr('lare-click', None)
self.assert_body_attr('lare-before-send', None)
self.assert_body_attr('lare-send', None)
self.assert_body_attr('lare-timeout', None)
self.assert_body_attr('lare-start', None)
self.assert_body_attr('lare-success', None)
self.assert_body_attr('lare-done', None)
self.assert_body_attr('lare-fail', None)
self.assert_body_attr('lare-always', None)
self.assert_body_attr('lare-end', None)
def test_lare_request_depth_1_and_back(self):
self.browser_get_reverse('index')
self.assert_title('index-title')
self.assert_content('index-content')
self.assert_body_attr('lare-done', None)
self.browser.find_element_by_class_name('to_be_removed_script')
about_link = self.browser.find_element_by_css_selector('#about-link')
about_link.click()
self.wait.until(lambda browser: browser.title == 'about-title')
self.assert_title('about-title')
self.assert_content('about-content')
keywords_metatag = self.browser.find_element_by_css_selector('meta[name="keywords"]')
self.assertEqual(keywords_metatag.get_attribute('content'), 'This is a test')
with self.assertRaises(NoSuchElementException):
self.browser.find_element_by_class_name('to_be_removed_script')
self.assert_body_attr('lare-done', 'true')
self.reset_body_attrs()
self.assert_body_attr('lare-done', None)
self.browser_go_back()
self.assert_title('index-title')
self.assert_content('index-content')
self.assert_body_attr('lare-done', None)
self.browser.find_element_by_class_name('to_be_removed_script')
def test_lare_request_javascript(self):
self.browser_get_reverse('index')
self.assert_title('index-title')
self.assert_content('index-content')
self.assert_body_attr('lare-done', None)
self.browser.find_element_by_class_name('to_be_removed_script')
self.browser.execute_script("$(document).lare.request('{0}')".format(reverse('about')))
self.wait.until(lambda browser: browser.title == 'about-title')
self.assert_title('about-title')
self.assert_content('about-content')
keywords_metatag = self.browser.find_element_by_css_selector('meta[name="keywords"]')
self.assertEqual(keywords_metatag.get_attribute('content'), 'This is a test')
with self.assertRaises(NoSuchElementException):
self.browser.find_element_by_class_name('to_be_removed_script')
self.assert_body_attr('lare-click', None)
self.assert_body_attr('lare-before-send', 'true')
self.assert_body_attr('lare-send', 'true')
self.assert_body_attr('lare-timeout', None)
self.assert_body_attr('lare-start', 'true')
self.assert_body_attr('lare-success', 'true')
self.assert_body_attr('lare-done', 'true')
self.assert_body_attr('lare-fail', None)
self.assert_body_attr('lare-always', 'true')
self.assert_body_attr('lare-end', 'true')
def test_lare_request_javascript_and_back(self):
self.browser_get_reverse('index')
self.assert_title('index-title')
self.assert_content('index-content')
self.assert_body_attr('lare-done', None)
self.browser.find_element_by_class_name('to_be_removed_script')
self.browser.execute_script("$(document).lare.request('{0}')".format(reverse('about')))
self.wait.until(lambda browser: browser.title == 'about-title')
self.assert_title('about-title')
self.assert_content('about-content')
keywords_metatag = self.browser.find_element_by_css_selector('meta[name="keywords"]')
self.assertEqual(keywords_metatag.get_attribute('content'), 'This is a test')
with self.assertRaises(NoSuchElementException):
self.browser.find_element_by_class_name('to_be_removed_script')
self.assert_body_attr('lare-done', 'true')
self.reset_body_attrs()
self.assert_body_attr('lare-done', None)
self.browser_go_back()
self.assert_title('index-title')
self.assert_content('index-content')
self.assert_body_attr('lare-done', None)
self.browser.find_element_by_class_name('to_be_removed_script')
def test_lare_request_depth_2_to_no_lare_and_back(self):
self.browser_get_reverse('index')
self.assert_title('index-title')
self.assert_content('index-content')
self.assert_body_attr('lare-done', None)
about_link = self.browser.find_element_by_css_selector('#about-link')
about_link.click()
self.wait.until(lambda browser: browser.title == 'about-title')
self.assert_title('about-title')
self.assert_content('about-content')
self.assert_body_attr('lare-done', 'true')
self.reset_body_attrs()
self.assert_body_attr('lare-done', None)
project_link = self.browser.find_element_by_css_selector('#project-link')
project_link.click()
self.wait.until(lambda browser: browser.title == 'project-title')
self.assert_title('project-title')
self.assert_content('project-content')
self.assert_body_attr('lare-done', 'true')
self.reset_body_attrs()
self.assert_body_attr('lare-done', None)
no_lare_response_link = self.browser.find_element_by_css_selector('#no-lare-response-link')
no_lare_response_link.click()
self.wait.until(lambda browser: browser.title == 'no-lare-response-title')
self.assert_title('no-lare-response-title')
self.assert_content('no-lare-response-content')
self.assert_body_attr('lare-done', None)
self.browser_go_back()
# On Chrome/Webkit caching will return the content out of the lare xhr-response, not a new initial content.
# fix #16 - lare load + fallback
self.wait.until(lambda browser: browser.find_element_by_css_selector('#site'))
self.wait.until(lambda browser: browser.title == 'project-title')
self.assert_title('project-title')
self.assert_content('project-content')
self.assert_body_attr('lare-done', None)
def test_lare_request_depth_2_to_no_lare_through_hard_load_and_back(self):
self.browser_get_reverse('index')
self.assert_title('index-title')
self.assert_content('index-content')
self.assert_body_attr('lare-done', None)
about_link = self.browser.find_element_by_css_selector('#about-link')
about_link.click()
self.wait.until(lambda browser: browser.title == 'about-title')
self.assert_title('about-title')
self.assert_content('about-content')
self.assert_body_attr('lare-done', 'true')
self.reset_body_attrs()
self.assert_body_attr('lare-done', None)
project_link = self.browser.find_element_by_css_selector('#project-link')
project_link.click()
self.wait.until(lambda browser: browser.title == 'project-title')
self.assert_title('project-title')
self.assert_content('project-content')
self.assert_body_attr('lare-done', 'true')
self.reset_body_attrs()
self.assert_body_attr('lare-done', None)
self.browser.execute_script('location.href="{0}"'.format(reverse('no_lare_response')))
self.wait.until(lambda browser: browser.title == 'no-lare-response-title')
self.assert_title('no-lare-response-title')
self.assert_content('no-lare-response-content')
self.assert_body_attr('lare-done', None)
self.browser_go_back()
# On Chrome/Webkit caching will return the content out of the lare xhr-response, not a new initial content.
# fix #16 - hard load
self.wait.until(lambda browser: browser.find_element_by_css_selector('#site'))
self.wait.until(lambda browser: browser.title == 'project-title')
self.assert_title('project-title')
self.assert_content('project-content')
self.assert_body_attr('lare-done', None)
| 40.752066 | 115 | 0.685054 | from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from selenium.common.exceptions import NoSuchElementException
from .helpers import SeleniumTestCase
class LareRequestTest(SeleniumTestCase):
def test_lare_request_depth_1(self):
self.browser_get_reverse('index')
self.assert_title('index-title')
self.assert_content('index-content')
self.assert_body_attr('lare-done', None)
self.browser.find_element_by_class_name('to_be_removed_script')
about_link = self.browser.find_element_by_css_selector('#about-link')
about_link.click()
self.wait.until(lambda browser: browser.title == 'about-title')
self.assert_title('about-title')
self.assert_content('about-content')
keywords_metatag = self.browser.find_element_by_css_selector('meta[name="keywords"]')
self.assertEqual(keywords_metatag.get_attribute('content'), 'This is a test')
with self.assertRaises(NoSuchElementException):
self.browser.find_element_by_class_name('to_be_removed_script')
self.assert_body_attr('lare-click', 'true')
self.assert_body_attr('lare-before-send', 'true')
self.assert_body_attr('lare-send', 'true')
self.assert_body_attr('lare-timeout', None)
self.assert_body_attr('lare-start', 'true')
self.assert_body_attr('lare-success', 'true')
self.assert_body_attr('lare-done', 'true')
self.assert_body_attr('lare-fail', None)
self.assert_body_attr('lare-always', 'true')
self.assert_body_attr('lare-end', 'true')
self.reset_body_attrs()
self.assert_body_attr('lare-click', None)
self.assert_body_attr('lare-before-send', None)
self.assert_body_attr('lare-send', None)
self.assert_body_attr('lare-timeout', None)
self.assert_body_attr('lare-start', None)
self.assert_body_attr('lare-success', None)
self.assert_body_attr('lare-done', None)
self.assert_body_attr('lare-fail', None)
self.assert_body_attr('lare-always', None)
self.assert_body_attr('lare-end', None)
def test_lare_request_depth_1_and_back(self):
self.browser_get_reverse('index')
self.assert_title('index-title')
self.assert_content('index-content')
self.assert_body_attr('lare-done', None)
self.browser.find_element_by_class_name('to_be_removed_script')
about_link = self.browser.find_element_by_css_selector('#about-link')
about_link.click()
self.wait.until(lambda browser: browser.title == 'about-title')
self.assert_title('about-title')
self.assert_content('about-content')
keywords_metatag = self.browser.find_element_by_css_selector('meta[name="keywords"]')
self.assertEqual(keywords_metatag.get_attribute('content'), 'This is a test')
with self.assertRaises(NoSuchElementException):
self.browser.find_element_by_class_name('to_be_removed_script')
self.assert_body_attr('lare-done', 'true')
self.reset_body_attrs()
self.assert_body_attr('lare-done', None)
self.browser_go_back()
self.assert_title('index-title')
self.assert_content('index-content')
self.assert_body_attr('lare-done', None)
self.browser.find_element_by_class_name('to_be_removed_script')
def test_lare_request_javascript(self):
self.browser_get_reverse('index')
self.assert_title('index-title')
self.assert_content('index-content')
self.assert_body_attr('lare-done', None)
self.browser.find_element_by_class_name('to_be_removed_script')
self.browser.execute_script("$(document).lare.request('{0}')".format(reverse('about')))
self.wait.until(lambda browser: browser.title == 'about-title')
self.assert_title('about-title')
self.assert_content('about-content')
keywords_metatag = self.browser.find_element_by_css_selector('meta[name="keywords"]')
self.assertEqual(keywords_metatag.get_attribute('content'), 'This is a test')
with self.assertRaises(NoSuchElementException):
self.browser.find_element_by_class_name('to_be_removed_script')
self.assert_body_attr('lare-click', None)
self.assert_body_attr('lare-before-send', 'true')
self.assert_body_attr('lare-send', 'true')
self.assert_body_attr('lare-timeout', None)
self.assert_body_attr('lare-start', 'true')
self.assert_body_attr('lare-success', 'true')
self.assert_body_attr('lare-done', 'true')
self.assert_body_attr('lare-fail', None)
self.assert_body_attr('lare-always', 'true')
self.assert_body_attr('lare-end', 'true')
def test_lare_request_javascript_and_back(self):
self.browser_get_reverse('index')
self.assert_title('index-title')
self.assert_content('index-content')
self.assert_body_attr('lare-done', None)
self.browser.find_element_by_class_name('to_be_removed_script')
self.browser.execute_script("$(document).lare.request('{0}')".format(reverse('about')))
self.wait.until(lambda browser: browser.title == 'about-title')
self.assert_title('about-title')
self.assert_content('about-content')
keywords_metatag = self.browser.find_element_by_css_selector('meta[name="keywords"]')
self.assertEqual(keywords_metatag.get_attribute('content'), 'This is a test')
with self.assertRaises(NoSuchElementException):
self.browser.find_element_by_class_name('to_be_removed_script')
self.assert_body_attr('lare-done', 'true')
self.reset_body_attrs()
self.assert_body_attr('lare-done', None)
self.browser_go_back()
self.assert_title('index-title')
self.assert_content('index-content')
self.assert_body_attr('lare-done', None)
self.browser.find_element_by_class_name('to_be_removed_script')
def test_lare_request_depth_2_to_no_lare_and_back(self):
self.browser_get_reverse('index')
self.assert_title('index-title')
self.assert_content('index-content')
self.assert_body_attr('lare-done', None)
about_link = self.browser.find_element_by_css_selector('#about-link')
about_link.click()
self.wait.until(lambda browser: browser.title == 'about-title')
self.assert_title('about-title')
self.assert_content('about-content')
self.assert_body_attr('lare-done', 'true')
self.reset_body_attrs()
self.assert_body_attr('lare-done', None)
project_link = self.browser.find_element_by_css_selector('#project-link')
project_link.click()
self.wait.until(lambda browser: browser.title == 'project-title')
self.assert_title('project-title')
self.assert_content('project-content')
self.assert_body_attr('lare-done', 'true')
self.reset_body_attrs()
self.assert_body_attr('lare-done', None)
no_lare_response_link = self.browser.find_element_by_css_selector('#no-lare-response-link')
no_lare_response_link.click()
self.wait.until(lambda browser: browser.title == 'no-lare-response-title')
self.assert_title('no-lare-response-title')
self.assert_content('no-lare-response-content')
self.assert_body_attr('lare-done', None)
self.browser_go_back()
ambda browser: browser.find_element_by_css_selector('#site'))
self.wait.until(lambda browser: browser.title == 'project-title')
self.assert_title('project-title')
self.assert_content('project-content')
self.assert_body_attr('lare-done', None)
def test_lare_request_depth_2_to_no_lare_through_hard_load_and_back(self):
self.browser_get_reverse('index')
self.assert_title('index-title')
self.assert_content('index-content')
self.assert_body_attr('lare-done', None)
about_link = self.browser.find_element_by_css_selector('#about-link')
about_link.click()
self.wait.until(lambda browser: browser.title == 'about-title')
self.assert_title('about-title')
self.assert_content('about-content')
self.assert_body_attr('lare-done', 'true')
self.reset_body_attrs()
self.assert_body_attr('lare-done', None)
project_link = self.browser.find_element_by_css_selector('#project-link')
project_link.click()
self.wait.until(lambda browser: browser.title == 'project-title')
self.assert_title('project-title')
self.assert_content('project-content')
self.assert_body_attr('lare-done', 'true')
self.reset_body_attrs()
self.assert_body_attr('lare-done', None)
self.browser.execute_script('location.href="{0}"'.format(reverse('no_lare_response')))
self.wait.until(lambda browser: browser.title == 'no-lare-response-title')
self.assert_title('no-lare-response-title')
self.assert_content('no-lare-response-content')
self.assert_body_attr('lare-done', None)
self.browser_go_back()
ait.until(lambda browser: browser.find_element_by_css_selector('#site'))
self.wait.until(lambda browser: browser.title == 'project-title')
self.assert_title('project-title')
self.assert_content('project-content')
self.assert_body_attr('lare-done', None)
| true | true |
f73b5e8ec3430e389c2cf9ed9eb6b37bcc781a92 | 1,001 | py | Python | opencv/q23.py | wuwuwuyuanhang/python | eb5ac23cb46c4beeab1638fda963dd154b9db1b7 | [
"MIT"
] | 1 | 2021-08-02T09:22:34.000Z | 2021-08-02T09:22:34.000Z | opencv/q23.py | wuwuwuyuanhang/python | eb5ac23cb46c4beeab1638fda963dd154b9db1b7 | [
"MIT"
] | null | null | null | opencv/q23.py | wuwuwuyuanhang/python | eb5ac23cb46c4beeab1638fda963dd154b9db1b7 | [
"MIT"
] | null | null | null | # @Auther : wuwuwu
# @Time : 2020/4/15
# @File : q23.py
# @Description : 直方图均衡化
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
def histogramEqualization(img, Zmax=255):
"""
直方图均衡化
:param img:
:param Zmax: 像素的最大取值
:return:
"""
H, W, C = img.shape
S = H * W * C
dst = img.copy()
sum_h = 0
for i in range(1, 255):
index = np.where(img == i)
sum_h += len(img[index])
dst[index] = Zmax / S * sum_h
return np.clip(dst, 0, 255).astype(np.uint8)
if __name__ == '__main__':
img = cv.imread('lenna.jpg')
dst = histogramEqualization(img, Zmax=255)
plt.figure()
plt.hist(img.flatten(), bins=255, rwidth=0.8, range=(0, 255))
plt.title('input histogram')
plt.figure()
plt.hist(dst.flatten(), bins=255, rwidth=0.8, range=(0, 255))
plt.title('output histogram')
plt.show()
cv.imshow('input', img)
cv.imshow('output', dst)
cv.waitKey(0)
cv.destroyAllWindows() | 22.75 | 65 | 0.59041 |
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
def histogramEqualization(img, Zmax=255):
H, W, C = img.shape
S = H * W * C
dst = img.copy()
sum_h = 0
for i in range(1, 255):
index = np.where(img == i)
sum_h += len(img[index])
dst[index] = Zmax / S * sum_h
return np.clip(dst, 0, 255).astype(np.uint8)
if __name__ == '__main__':
img = cv.imread('lenna.jpg')
dst = histogramEqualization(img, Zmax=255)
plt.figure()
plt.hist(img.flatten(), bins=255, rwidth=0.8, range=(0, 255))
plt.title('input histogram')
plt.figure()
plt.hist(dst.flatten(), bins=255, rwidth=0.8, range=(0, 255))
plt.title('output histogram')
plt.show()
cv.imshow('input', img)
cv.imshow('output', dst)
cv.waitKey(0)
cv.destroyAllWindows() | true | true |
f73b5ef50666564fef665b6054b2cf691e77900f | 2,187 | py | Python | othello/fast_bit_board_tester.py | yuishihara/Pythello | 1b4b91bfc8bd5d1bd0bdc96d3d289d8058c08b84 | [
"MIT"
] | null | null | null | othello/fast_bit_board_tester.py | yuishihara/Pythello | 1b4b91bfc8bd5d1bd0bdc96d3d289d8058c08b84 | [
"MIT"
] | null | null | null | othello/fast_bit_board_tester.py | yuishihara/Pythello | 1b4b91bfc8bd5d1bd0bdc96d3d289d8058c08b84 | [
"MIT"
] | null | null | null | from board import Board
from logging import getLogger, DEBUG, basicConfig
from libfastbb import FastBitBoard
import numpy as np
import utilities
class FastBitBoardTester(Board):
def __init__(self, rows=8, columns=8):
super(FastBitBoardTester, self).__init__()
self._impl = FastBitBoard()
self._rows = rows
self._columns = columns
self._logger = getLogger(__name__)
self.shape = (rows, columns)
def is_valid_move(self, move, player_color):
return self._impl.is_valid_move(move, player_color)
def apply_new_move(self, move, player_color):
return self._impl.apply_valid_move(move, player_color)
def has_valid_move(self, player_color):
return self._impl.has_valid_move(player_color)
def is_end_state(self):
return self._impl.is_end_state()
def is_empty_position(self, position):
return self._impl.is_empty_position(position)
def list_all_valid_moves(self, player_color):
return self._impl.list_all_valid_moves(player_color)
def list_all_next_states(self, player_color):
return self._impl.list_all_next_states(player_color)
def list_all_empty_positions(self):
return self._impl.list_all_empty_positions()
def print_bit_board(self, binary):
binary_string = str(
format(binary, '0' + str(self._rows * self._columns) + 'b'))
board = ''
for i in range(self._rows):
board += str(i) + ":" + \
binary_string[i * self._columns: (i + 1) * self._columns]
board += '\n'
self._logger.info("\n ABCDEFGH\n" + board)
def as_numpy_matrix(self):
return self._impl.as_numpy_matrix()
def next_board_state(self, move, player_color):
return self._impl.next_board_state(move, player_color)
if __name__ == "__main__":
basicConfig(level=DEBUG)
logger = getLogger(__name__)
fast_bb = FastBitBoardTester()
logger.info("As numpy:\n %s", str(fast_bb.as_numpy_matrix()))
next_states = fast_bb.list_all_next_states('black')
for state in next_states:
logger.info("Next state:\n %s", str(state.as_numpy_matrix()))
| 32.161765 | 73 | 0.677641 | from board import Board
from logging import getLogger, DEBUG, basicConfig
from libfastbb import FastBitBoard
import numpy as np
import utilities
class FastBitBoardTester(Board):
def __init__(self, rows=8, columns=8):
super(FastBitBoardTester, self).__init__()
self._impl = FastBitBoard()
self._rows = rows
self._columns = columns
self._logger = getLogger(__name__)
self.shape = (rows, columns)
def is_valid_move(self, move, player_color):
return self._impl.is_valid_move(move, player_color)
def apply_new_move(self, move, player_color):
return self._impl.apply_valid_move(move, player_color)
def has_valid_move(self, player_color):
return self._impl.has_valid_move(player_color)
def is_end_state(self):
return self._impl.is_end_state()
def is_empty_position(self, position):
return self._impl.is_empty_position(position)
def list_all_valid_moves(self, player_color):
return self._impl.list_all_valid_moves(player_color)
def list_all_next_states(self, player_color):
return self._impl.list_all_next_states(player_color)
def list_all_empty_positions(self):
return self._impl.list_all_empty_positions()
def print_bit_board(self, binary):
binary_string = str(
format(binary, '0' + str(self._rows * self._columns) + 'b'))
board = ''
for i in range(self._rows):
board += str(i) + ":" + \
binary_string[i * self._columns: (i + 1) * self._columns]
board += '\n'
self._logger.info("\n ABCDEFGH\n" + board)
def as_numpy_matrix(self):
return self._impl.as_numpy_matrix()
def next_board_state(self, move, player_color):
return self._impl.next_board_state(move, player_color)
if __name__ == "__main__":
basicConfig(level=DEBUG)
logger = getLogger(__name__)
fast_bb = FastBitBoardTester()
logger.info("As numpy:\n %s", str(fast_bb.as_numpy_matrix()))
next_states = fast_bb.list_all_next_states('black')
for state in next_states:
logger.info("Next state:\n %s", str(state.as_numpy_matrix()))
| true | true |
f73b5f789bb033d53575ff2de4de2cf83d105738 | 7,902 | py | Python | utils.py | voodoohop/Creative-Adversarial-Networks | 7d8632b7bfe12a698f61c442aa9c1a07d68d21c9 | [
"MIT"
] | 1 | 2020-10-20T02:15:10.000Z | 2020-10-20T02:15:10.000Z | utils.py | Chediak/Creative-Adversarial-Networks | 7d8632b7bfe12a698f61c442aa9c1a07d68d21c9 | [
"MIT"
] | null | null | null | utils.py | Chediak/Creative-Adversarial-Networks | 7d8632b7bfe12a698f61c442aa9c1a07d68d21c9 | [
"MIT"
] | 1 | 2020-10-20T02:13:10.000Z | 2020-10-20T02:13:10.000Z | """
Some codes from https://github.com/Newmu/dcgan_code
"""
from __future__ import division
import math
import json
import random
import pprint
import scipy.misc
import numpy as np
from time import gmtime, strftime
from six.moves import xrange
from glob import glob
import cv2
import imageio
import tensorflow as tf
import tensorflow.contrib.slim as slim
pp = pprint.PrettyPrinter()
get_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1])
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def get_image(image_path, input_height, input_width,
resize_height=64, resize_width=64,
crop=True, grayscale=False):
image = imread(image_path, grayscale)
try:
return transform(image, input_height, input_width,
resize_height, resize_width, crop)
except ValueError :
print("Bad image. filepath: ", image_path)
except AttributeError:
print("Bad image. filepath: ", image_path)
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imread(path, grayscale = False):
try:
if (grayscale):
img = cv2.imread(path)
new_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return cv2.imread(new_img, flatten = True).astype(np.float)
else:
img = cv2.imread(path)
new_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return new_img.astype(np.float)
except(TypeError):
print(path)
#Do
def test_images(path_glob):
for path in path_glob:
imread(path)
def merge_images(images, size):
return inverse_transform(images)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3]==1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]
return img
else:
raise ValueError('in merge(images,size) images parameter '
'must have dimensions: HxW or HxWx3 or HxWx4')
def imsave(images, size, path):
image = np.squeeze(merge(images, size))
return scipy.misc.imsave(path, image)
def center_crop(x, crop_h, crop_w,
resize_h=64, resize_w=64):
if crop_w is None:
crop_w = crop_h
h, w = x.shape[:2]
j = int(round((h - crop_h)/2.))
i = int(round((w - crop_w)/2.))
return scipy.misc.imresize(
x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w])
def transform(image, input_height, input_width,
resize_height=64, resize_width=64, crop=True):
if crop:
cropped_image = center_crop(
image, input_height, input_width,
resize_height, resize_width)
else:
cropped_image = cv2.resize(image, (resize_height, resize_width))
return np.array(cropped_image)/127.5 - 1.
def inverse_transform(images):
return (images+1.)/2.
def make_gif(images, fname, duration=2, true_image=False):
import moviepy.editor as mpy
def make_frame(t):
try:
x = images[int(len(images)/duration*t)]
except:
x = images[-1]
if true_image:
return x.astype(np.uint8)
else:
return ((x+1)/2*255).astype(np.uint8)
clip = mpy.VideoClip(make_frame, duration=duration)
clip.write_gif(fname, fps = len(images) / duration)
def visualize(sess, dcgan, config, option):
image_frame_dim = int(math.ceil(config.batch_size**.5))
if option == 0:
z_sample = np.random.normal(0, 1, size=(config.batch_size, dcgan.z_dim))
z_sample /= np.linalg.norm(z_sample, axis=0)
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
save_images(samples, [image_frame_dim, image_frame_dim], '/content/gdrive/My Drive/samples/test_%s.png' % strftime("%Y%m%d%H%M%S", gmtime()))
elif option == 1:
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(100):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
if config.dataset == "mnist":
y = np.random.choice(10, config.batch_size)
y_one_hot = np.zeros((config.batch_size, 10))
y_one_hot[np.arange(config.batch_size), y] = 1
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
elif config.dataset == 'wikiart':
y = np.random.choice(27, config.batch_size)
y_one_hot = np.zeros((config.batch_size, 27))
y_one_hot[np.arange(config.batch_size), y] = 1
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_arange_%s.png' % (idx))
elif option == 2:
values = np.arange(0, 1, 1./config.batch_size)
for idx in [random.randint(0, 99) for _ in xrange(100)]:
print(" [*] %d" % idx)
z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
z_sample = np.tile(z, (config.batch_size, 1))
#z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
if config.dataset == "mnist":
y = np.random.choice(10, config.batch_size)
y_one_hot = np.zeros((config.batch_size, 10))
y_one_hot[np.arange(config.batch_size), y] = 1
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
elif config.dataset == 'wikiart':
y = np.random.choice(27, config.batch_size)
y_one_hot = np.zeros((config.batch_size, 27))
y_one_hot[np.arange(config.batch_size), y] = 1
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
try:
make_gif(samples, './samples/test_gif_%s.gif' % (idx))
except:
save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_%s.png' % strftime("%Y%m%d%H%M%S", gmtime()))
elif option == 3:
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(100):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
make_gif(samples, './samples/test_gif_%s.gif' % (idx))
elif option == 4:
image_set = []
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(100):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample): z[idx] = values[kdx]
image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))
new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) \
for idx in range(64) + range(63, -1, -1)]
make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8)
def get_max_end(path_dir, num_len=3, fname_pattern='*.jpg'):
max_ = 0
for f in glob(path_dir + fname_pattern):
curr = int(f[-num_len-4:-4])
if curr > max_:
max_ = curr
return max_
def image_manifold_size(num_images):
print(num_images)
manifold_h = int(np.floor(np.sqrt(num_images)))
manifold_w = int(np.ceil(np.sqrt(num_images)))
assert manifold_h * manifold_w == num_images
return manifold_h, manifold_w
if __name__ == '__main__':
print('Getting image!')
import time
start = time.time()
get_image("albert-gleizes_acrobats-1916.jpg",256,256,256,256)
end = (time.time() - start)
print ('Took : {.:%4f}'.format(end)) | 34.50655 | 145 | 0.649709 | from __future__ import division
import math
import json
import random
import pprint
import scipy.misc
import numpy as np
from time import gmtime, strftime
from six.moves import xrange
from glob import glob
import cv2
import imageio
import tensorflow as tf
import tensorflow.contrib.slim as slim
pp = pprint.PrettyPrinter()
get_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1])
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def get_image(image_path, input_height, input_width,
resize_height=64, resize_width=64,
crop=True, grayscale=False):
image = imread(image_path, grayscale)
try:
return transform(image, input_height, input_width,
resize_height, resize_width, crop)
except ValueError :
print("Bad image. filepath: ", image_path)
except AttributeError:
print("Bad image. filepath: ", image_path)
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imread(path, grayscale = False):
try:
if (grayscale):
img = cv2.imread(path)
new_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return cv2.imread(new_img, flatten = True).astype(np.float)
else:
img = cv2.imread(path)
new_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return new_img.astype(np.float)
except(TypeError):
print(path)
def test_images(path_glob):
for path in path_glob:
imread(path)
def merge_images(images, size):
return inverse_transform(images)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3]==1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]
return img
else:
raise ValueError('in merge(images,size) images parameter '
'must have dimensions: HxW or HxWx3 or HxWx4')
def imsave(images, size, path):
image = np.squeeze(merge(images, size))
return scipy.misc.imsave(path, image)
def center_crop(x, crop_h, crop_w,
resize_h=64, resize_w=64):
if crop_w is None:
crop_w = crop_h
h, w = x.shape[:2]
j = int(round((h - crop_h)/2.))
i = int(round((w - crop_w)/2.))
return scipy.misc.imresize(
x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w])
def transform(image, input_height, input_width,
resize_height=64, resize_width=64, crop=True):
if crop:
cropped_image = center_crop(
image, input_height, input_width,
resize_height, resize_width)
else:
cropped_image = cv2.resize(image, (resize_height, resize_width))
return np.array(cropped_image)/127.5 - 1.
def inverse_transform(images):
return (images+1.)/2.
def make_gif(images, fname, duration=2, true_image=False):
import moviepy.editor as mpy
def make_frame(t):
try:
x = images[int(len(images)/duration*t)]
except:
x = images[-1]
if true_image:
return x.astype(np.uint8)
else:
return ((x+1)/2*255).astype(np.uint8)
clip = mpy.VideoClip(make_frame, duration=duration)
clip.write_gif(fname, fps = len(images) / duration)
def visualize(sess, dcgan, config, option):
image_frame_dim = int(math.ceil(config.batch_size**.5))
if option == 0:
z_sample = np.random.normal(0, 1, size=(config.batch_size, dcgan.z_dim))
z_sample /= np.linalg.norm(z_sample, axis=0)
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
save_images(samples, [image_frame_dim, image_frame_dim], '/content/gdrive/My Drive/samples/test_%s.png' % strftime("%Y%m%d%H%M%S", gmtime()))
elif option == 1:
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(100):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
if config.dataset == "mnist":
y = np.random.choice(10, config.batch_size)
y_one_hot = np.zeros((config.batch_size, 10))
y_one_hot[np.arange(config.batch_size), y] = 1
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
elif config.dataset == 'wikiart':
y = np.random.choice(27, config.batch_size)
y_one_hot = np.zeros((config.batch_size, 27))
y_one_hot[np.arange(config.batch_size), y] = 1
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_arange_%s.png' % (idx))
elif option == 2:
values = np.arange(0, 1, 1./config.batch_size)
for idx in [random.randint(0, 99) for _ in xrange(100)]:
print(" [*] %d" % idx)
z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
z_sample = np.tile(z, (config.batch_size, 1))
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
if config.dataset == "mnist":
y = np.random.choice(10, config.batch_size)
y_one_hot = np.zeros((config.batch_size, 10))
y_one_hot[np.arange(config.batch_size), y] = 1
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
elif config.dataset == 'wikiart':
y = np.random.choice(27, config.batch_size)
y_one_hot = np.zeros((config.batch_size, 27))
y_one_hot[np.arange(config.batch_size), y] = 1
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
try:
make_gif(samples, './samples/test_gif_%s.gif' % (idx))
except:
save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_%s.png' % strftime("%Y%m%d%H%M%S", gmtime()))
elif option == 3:
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(100):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
make_gif(samples, './samples/test_gif_%s.gif' % (idx))
elif option == 4:
image_set = []
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(100):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample): z[idx] = values[kdx]
image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))
new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) \
for idx in range(64) + range(63, -1, -1)]
make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8)
def get_max_end(path_dir, num_len=3, fname_pattern='*.jpg'):
max_ = 0
for f in glob(path_dir + fname_pattern):
curr = int(f[-num_len-4:-4])
if curr > max_:
max_ = curr
return max_
def image_manifold_size(num_images):
print(num_images)
manifold_h = int(np.floor(np.sqrt(num_images)))
manifold_w = int(np.ceil(np.sqrt(num_images)))
assert manifold_h * manifold_w == num_images
return manifold_h, manifold_w
if __name__ == '__main__':
print('Getting image!')
import time
start = time.time()
get_image("albert-gleizes_acrobats-1916.jpg",256,256,256,256)
end = (time.time() - start)
print ('Took : {.:%4f}'.format(end)) | true | true |
f73b6022673fcd9dfb2e398bb51a7bbf33d4bb06 | 2,467 | py | Python | alien_tag.py | sacherjj/python-AlienRFID | aaddd846d46cca533dca43c256890c072e8f5ec5 | [
"MIT"
] | 1 | 2021-03-21T13:52:00.000Z | 2021-03-21T13:52:00.000Z | alien_tag.py | sacherjj/python-AlienRFID | aaddd846d46cca533dca43c256890c072e8f5ec5 | [
"MIT"
] | null | null | null | alien_tag.py | sacherjj/python-AlienRFID | aaddd846d46cca533dca43c256890c072e8f5ec5 | [
"MIT"
] | 2 | 2015-10-12T10:02:50.000Z | 2020-03-09T13:30:12.000Z | __author__ = 'joesacher'
import datetime as dt
class AlienTag(object):
def __init__(self, taglist_entry):
self.disc = 0
self.last = 0
self.last_last = 0
self.id = 0
self.ant = 0
self.count = 0
self.proto = 0
self.rssi = 0
self.freq = 0
# self.speed = 0
# self.speed_smooth = 0
# self.speed_last = 0
# self.pos_smooth = 0
# self.pos_last = 0
# self.pos_min = 0
self.create(taglist_entry)
def __str__(self):
return self.id
def __gt__(self, other):
return self.id > other.id
def create(self, taglist_entry):
"""
Try to parse a taglist entry into a set of Tag object variables.
Uses a simple mapping from Alien 'text' format:
Tag:0102 0304 0506 0708 0900 0A0B, Disc:2008/10/28 10:49:35, Last:2008/10/28 10:49:35, Count:1, Ant:3, Proto:2
*rssi* and *speed* attributes are not included in the default text format.
In order to have them parsed correctly the _TagListFormat_ must be set to _custom_ and
the _TagListCustomFormat_ fields must be separated by the following text tokens:
'tag:', 'disc:', 'last:', 'count:', 'ant:', 'proto:', 'speed:', 'rssi:'
For example:
@rdr.taglistcustomformat("Tag:%i, Disc:${DATE1} ${TIME1}, Last:${DATE2} ${TIME2}, Count:${COUNT}, Ant:${TX}, Proto:${PROTO#}, Speed:${SPEED}, rssi:${RSSI})"
@rdr.taglistformat("custom")
"""
self.id = ""
if taglist_entry == "(No Tags)":
return
tagline = taglist_entry.split('\r\n')[0]
tagbits = {}
for keyval in tagline.split(", "):
key, val = keyval.split(":", 1)
# TODO: Raise Error on Bad Key Val parse
tagbits[key.lower()] = val
self.id = tagbits.get('tag', 'NO TAG ID')
self.ant = tagbits.get('ant', 0)
self.count = tagbits.get('count', 0)
self.disc = tagbits.get('disc', 0)
self.last = tagbits.get('last', 0)
# TODO: Convert self.last into datetime
self.last_last = self.last
self.proto = tagbits.get('proto', 0)
self.rssi = tagbits.get('rssi', 0)
self.freq = tagbits.get('freq', 0)
self.speed = tagbits.get('speed', 0)
def update(self, new_tag):
self.last = new_tag.last
self.count += new_tag.count
self.last_last = self.last
| 31.628205 | 165 | 0.565464 | __author__ = 'joesacher'
import datetime as dt
class AlienTag(object):
def __init__(self, taglist_entry):
self.disc = 0
self.last = 0
self.last_last = 0
self.id = 0
self.ant = 0
self.count = 0
self.proto = 0
self.rssi = 0
self.freq = 0
self.create(taglist_entry)
def __str__(self):
return self.id
def __gt__(self, other):
return self.id > other.id
def create(self, taglist_entry):
self.id = ""
if taglist_entry == "(No Tags)":
return
tagline = taglist_entry.split('\r\n')[0]
tagbits = {}
for keyval in tagline.split(", "):
key, val = keyval.split(":", 1)
tagbits[key.lower()] = val
self.id = tagbits.get('tag', 'NO TAG ID')
self.ant = tagbits.get('ant', 0)
self.count = tagbits.get('count', 0)
self.disc = tagbits.get('disc', 0)
self.last = tagbits.get('last', 0)
self.last_last = self.last
self.proto = tagbits.get('proto', 0)
self.rssi = tagbits.get('rssi', 0)
self.freq = tagbits.get('freq', 0)
self.speed = tagbits.get('speed', 0)
def update(self, new_tag):
self.last = new_tag.last
self.count += new_tag.count
self.last_last = self.last
| true | true |
f73b60439e1031a0d4a27c44af81f40197e78d1f | 1,048 | py | Python | clemency_fix.py | cseagle/ida_clemency | 7101ce142df676c7b62b176a96f43f8df01ce3d6 | [
"MIT"
] | 66 | 2017-07-31T03:44:15.000Z | 2022-03-27T07:58:13.000Z | clemency_fix.py | cseagle/ida_clemency | 7101ce142df676c7b62b176a96f43f8df01ce3d6 | [
"MIT"
] | null | null | null | clemency_fix.py | cseagle/ida_clemency | 7101ce142df676c7b62b176a96f43f8df01ce3d6 | [
"MIT"
] | 14 | 2017-07-31T13:39:10.000Z | 2021-12-10T03:16:01.000Z | from idaapi import *
'''
Author: Chris Eagle
Name: Clemency function fixup plugin defcon 25
How: Install into <idadir>/plugins
Activate within a function using Alt-8
'''
class clemency_plugin_t(plugin_t):
flags = 0
wanted_name = "Fix Clemency Functions"
wanted_hotkey = "Alt-8"
comment = ""
help = ""
def init(self):
return PLUGIN_OK
def term(self):
pass
def run(self, arg):
f = get_func(ScreenEA())
if f is not None:
fitems = FuncItems(f.startEA)
for a in fitems:
for x in XrefsFrom(a, XREF_FAR):
if x.type == fl_JN:
if not isCode(GetFlags(x.to)):
do_unknown(ItemHead(x.to), 0)
MakeCode(x.to)
elif x.type == fl_CN:
if not isCode(GetFlags(x.to)):
do_unknown(ItemHead(x.to), 0)
MakeCode(x.to)
MakeFunction(x.to, BADADDR)
def PLUGIN_ENTRY():
return clemency_plugin_t()
| 25.560976 | 66 | 0.537214 | from idaapi import *
class clemency_plugin_t(plugin_t):
flags = 0
wanted_name = "Fix Clemency Functions"
wanted_hotkey = "Alt-8"
comment = ""
help = ""
def init(self):
return PLUGIN_OK
def term(self):
pass
def run(self, arg):
f = get_func(ScreenEA())
if f is not None:
fitems = FuncItems(f.startEA)
for a in fitems:
for x in XrefsFrom(a, XREF_FAR):
if x.type == fl_JN:
if not isCode(GetFlags(x.to)):
do_unknown(ItemHead(x.to), 0)
MakeCode(x.to)
elif x.type == fl_CN:
if not isCode(GetFlags(x.to)):
do_unknown(ItemHead(x.to), 0)
MakeCode(x.to)
MakeFunction(x.to, BADADDR)
def PLUGIN_ENTRY():
return clemency_plugin_t()
| true | true |
f73b6077afbbe4aea1500a7d721be460eba78b98 | 1,235 | py | Python | config_app/migrations/0001_initial.py | radekska/django-network-controller | 6bcb847cbe1efa7dee118974de5e49b4f411e5da | [
"MIT"
] | null | null | null | config_app/migrations/0001_initial.py | radekska/django-network-controller | 6bcb847cbe1efa7dee118974de5e49b4f411e5da | [
"MIT"
] | null | null | null | config_app/migrations/0001_initial.py | radekska/django-network-controller | 6bcb847cbe1efa7dee118974de5e49b4f411e5da | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-09-23 19:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Network',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('network_ip', models.TextField()),
('subnet', models.TextField()),
],
),
migrations.CreateModel(
name='System',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('network_device_os', models.TextField()),
('discovery_protocol', models.TextField()),
],
),
migrations.CreateModel(
name='Users',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.TextField()),
('password', models.TextField()),
('secret', models.TextField()),
],
),
]
| 30.875 | 114 | 0.527126 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Network',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('network_ip', models.TextField()),
('subnet', models.TextField()),
],
),
migrations.CreateModel(
name='System',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('network_device_os', models.TextField()),
('discovery_protocol', models.TextField()),
],
),
migrations.CreateModel(
name='Users',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.TextField()),
('password', models.TextField()),
('secret', models.TextField()),
],
),
]
| true | true |
f73b610d1eca0381b83984305cd68fb9135b1ecd | 5,834 | py | Python | test/azure/low-level/Expected/AcceptanceTests/AzureSpecialsLowLevel/azurespecialpropertieslowlevel/rest/api_version_local/_request_builders_py3.py | Azure/autorest.python | c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7 | [
"MIT"
] | 35 | 2018-04-03T12:15:53.000Z | 2022-03-11T14:03:34.000Z | test/azure/low-level/Expected/AcceptanceTests/AzureSpecialsLowLevel/azurespecialpropertieslowlevel/rest/api_version_local/_request_builders_py3.py | Azure/autorest.python | c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7 | [
"MIT"
] | 652 | 2017-08-28T22:44:41.000Z | 2022-03-31T21:20:31.000Z | test/azure/low-level/Expected/AcceptanceTests/AzureSpecialsLowLevel/azurespecialpropertieslowlevel/rest/api_version_local/_request_builders_py3.py | Azure/autorest.python | c36f5c1a2d614a1eeba6fec6a2c02517f2d1cce7 | [
"MIT"
] | 29 | 2017-08-28T20:57:01.000Z | 2022-03-11T14:03:38.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional
from azure.core.rest import HttpRequest
from msrest import Serializer
from ..._vendor import _format_url_section
_SERIALIZER = Serializer()
def build_get_method_local_valid_request(**kwargs: Any) -> HttpRequest:
"""Get method with api-version modeled in the method. pass in api-version = '2.0' to succeed.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
api_version = "2.0"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", "/azurespecials/apiVersion/method/string/none/query/local/2.0")
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_get_method_local_null_request(*, api_version: Optional[str] = None, **kwargs: Any) -> HttpRequest:
"""Get method with api-version modeled in the method. pass in api-version = null to succeed.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:keyword api_version: This should appear as a method parameter, use value null, this should
result in no serialized parameter.
:paramtype api_version: str
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", "/azurespecials/apiVersion/method/string/none/query/local/null")
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if api_version is not None:
query_parameters["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_get_path_local_valid_request(**kwargs: Any) -> HttpRequest:
"""Get method with api-version modeled in the method. pass in api-version = '2.0' to succeed.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
api_version = "2.0"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", "/azurespecials/apiVersion/path/string/none/query/local/2.0")
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_get_swagger_local_valid_request(**kwargs: Any) -> HttpRequest:
"""Get method with api-version modeled in the method. pass in api-version = '2.0' to succeed.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
"""
api_version = "2.0"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", "/azurespecials/apiVersion/swagger/string/none/query/local/2.0")
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
| 44.534351 | 108 | 0.700034 |
from typing import Any, Optional
from azure.core.rest import HttpRequest
from msrest import Serializer
from ..._vendor import _format_url_section
_SERIALIZER = Serializer()
def build_get_method_local_valid_request(**kwargs: Any) -> HttpRequest:
api_version = "2.0"
accept = "application/json"
url = kwargs.pop("template_url", "/azurespecials/apiVersion/method/string/none/query/local/2.0")
query_parameters = kwargs.pop("params", {})
query_parameters["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
header_parameters = kwargs.pop("headers", {})
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_get_method_local_null_request(*, api_version: Optional[str] = None, **kwargs: Any) -> HttpRequest:
accept = "application/json"
url = kwargs.pop("template_url", "/azurespecials/apiVersion/method/string/none/query/local/null")
query_parameters = kwargs.pop("params", {})
if api_version is not None:
query_parameters["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
header_parameters = kwargs.pop("headers", {})
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_get_path_local_valid_request(**kwargs: Any) -> HttpRequest:
api_version = "2.0"
accept = "application/json"
url = kwargs.pop("template_url", "/azurespecials/apiVersion/path/string/none/query/local/2.0")
query_parameters = kwargs.pop("params", {})
query_parameters["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
header_parameters = kwargs.pop("headers", {})
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
def build_get_swagger_local_valid_request(**kwargs: Any) -> HttpRequest:
api_version = "2.0"
accept = "application/json"
url = kwargs.pop("template_url", "/azurespecials/apiVersion/swagger/string/none/query/local/2.0")
query_parameters = kwargs.pop("params", {})
query_parameters["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
header_parameters = kwargs.pop("headers", {})
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs)
| true | true |
f73b619dc67c20e47d8c1c61f60f735298ab7699 | 11,260 | py | Python | src/tests/ftest/container/attribute.py | fedepad/daos | ac71a320b8426b1eeb1457b0b6f5e6e115dfc9aa | [
"BSD-2-Clause-Patent"
] | 429 | 2016-09-28T20:43:20.000Z | 2022-03-25T01:22:50.000Z | src/tests/ftest/container/attribute.py | fedepad/daos | ac71a320b8426b1eeb1457b0b6f5e6e115dfc9aa | [
"BSD-2-Clause-Patent"
] | 6,341 | 2016-11-24T12:34:26.000Z | 2022-03-31T23:53:46.000Z | src/tests/ftest/container/attribute.py | fedepad/daos | ac71a320b8426b1eeb1457b0b6f5e6e115dfc9aa | [
"BSD-2-Clause-Patent"
] | 202 | 2016-10-30T14:47:53.000Z | 2022-03-30T21:29:11.000Z | #!/usr/bin/python3
'''
(C) Copyright 2018-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
'''
import traceback
import threading
import random
import base64
from apricot import TestWithServers
from general_utils import DaosTestError, get_random_bytes
from pydaos.raw import DaosApiError
from daos_utils import DaosCommand
# pylint: disable = global-variable-not-assigned, global-statement
GLOB_SIGNAL = None
GLOB_RC = -99000000
def cb_func(event):
"""Call back Function for asynchronous mode."""
global GLOB_SIGNAL
global GLOB_RC
GLOB_RC = event.event.ev_error
GLOB_SIGNAL.set()
class ContainerAttributeTest(TestWithServers):
"""
Tests DAOS container attribute get/set/list.
:avocado: recursive
"""
def __init__(self, *args, **kwargs):
"""Initialize a ContainerAttributeTest object."""
super().__init__(*args, **kwargs)
self.expected_cont_uuid = None
self.daos_cmd = None
@staticmethod
def create_data_set():
"""Create the large attribute dictionary.
Returns:
dict: a large attribute dictionary
"""
data_set = {}
for index in range(1024):
size = random.randint(1, 100) #nosec
key = str(index).encode("utf-8")
data_set[key] = get_random_bytes(size)
return data_set
def verify_list_attr(self, indata, attributes_list, mode="sync"):
"""
Args:
indata: Dict used to set attr
attributes_list: List obtained from list attr
mode: sync or async
Verify the length of the Attribute names
"""
length = 0
for key in indata.keys():
length += len(key)
if mode == "async":
length += 1
size = 0
for attr in attributes_list:
size += len(attr)
self.log.info("Verifying list_attr output:")
self.log.info(" set_attr names: %s", list(indata.keys()))
self.log.info(" set_attr size: %s", length)
self.log.info(" list_attr names: %s", attributes_list)
self.log.info(" list_attr size: %s", size)
if length != size:
self.fail(
"FAIL: Size is not matching for Names in list attr, Expected "
"len={} and received len={}".format(length, size))
# verify the Attributes names in list_attr retrieve
for key in list(indata.keys()):
found = False
for attr in attributes_list:
# Workaround
# decode() is used to be able to compare bytes with strings
# We get strings from daos_command, while in pydaos we get bytes
if key.decode() == attr:
found = True
break
if not found:
self.fail(
"FAIL: Name does not match after list attr, Expected "
"buf={} and received buf={}".format(key, attributes_list))
def verify_get_attr(self, indata, outdata):
"""
verify the Attributes value after get_attr
"""
decoded = {}
for key, val in outdata.items():
if isinstance(val, bytes):
# The API returns the values as bytes already.
decoded[key.decode()] = val
else:
# The JSON output encodes the bytes as base64, so
# we need to decode them for comparison.
decoded[key] = base64.b64decode(val)
self.log.info("Verifying get_attr output:")
self.log.info(" get_attr data: %s", indata)
self.log.info(" set_attr data: %s", decoded)
for attr, value in indata.items():
if value != decoded.get(attr.decode(), None):
self.fail(
"FAIL: Value does not match after get({}), Expected "
"val={} and received val={}".format(attr, value,
decoded.get(attr.decode(), None)))
def test_container_large_attributes(self):
"""
Test ID: DAOS-1359
Test description: Test large randomly created container attribute.
:avocado: tags=all,full_regression
:avocado: tags=container,attribute
:avocado: tags=large_conattribute
:avocado: tags=container_attribute
"""
self.add_pool()
self.add_container(self.pool)
self.container.open()
self.daos_cmd = DaosCommand(self.bin)
attr_dict = self.create_data_set()
try:
self.container.container.set_attr(data=attr_dict)
# Workaround
# Due to DAOS-7093 skip the usage of pydaos cont list attr
# size, buf = self.container.container.list_attr()
data = self.daos_cmd.container_list_attrs(
pool=self.pool.uuid,
cont=self.container.uuid,
verbose=False)
self.verify_list_attr(attr_dict, data['response'])
data = self.daos_cmd.container_list_attrs(
pool=self.pool.uuid,
cont=self.container.uuid,
verbose=True)
self.verify_get_attr(attr_dict, data['response'])
except DaosApiError as excep:
print(excep)
print(traceback.format_exc())
self.fail("Test was expected to pass but it failed.\n")
def test_container_attribute(self):
"""
Test basic container attribute tests.
:avocado: tags=all,tiny,full_regression
:avocado: tags=container,attribute
:avocado: tags=sync_conattribute
:avocado: tags=container_attribute
"""
self.add_pool()
self.add_container(self.pool)
self.container.open()
self.daos_cmd = DaosCommand(self.bin)
expected_for_param = []
name = self.params.get("name", '/run/attrtests/name_handles/*/')
expected_for_param.append(name[1])
value = self.params.get("value", '/run/attrtests/value_handles/*/')
expected_for_param.append(value[1])
# Convert any test yaml string to bytes
if isinstance(name[0], str):
name[0] = name[0].encode("utf-8")
if isinstance(value[0], str):
value[0] = value[0].encode("utf-8")
attr_dict = {name[0]: value[0]}
expected_result = 'PASS'
for result in expected_for_param:
if result == 'FAIL':
expected_result = 'FAIL'
break
try:
self.container.container.set_attr(data=attr_dict)
data = self.daos_cmd.container_list_attrs(
pool=self.pool.uuid,
cont=self.container.uuid)
self.verify_list_attr(attr_dict, data['response'])
# Request something that doesn't exist
if name[0] is not None and b"Negative" in name[0]:
name[0] = b"rubbish"
attr_value_dict = self.container.container.get_attr([name[0]])
# Raise an exception if the attr value is empty
# This is expected to happen on Negative test cases
if not attr_value_dict[name[0]]:
raise DaosApiError("Attr value is empty. "
"Did you set the value?")
self.verify_get_attr(attr_dict, attr_value_dict)
if expected_result in ['FAIL']:
self.fail("Test was expected to fail but it passed.\n")
except (DaosApiError, DaosTestError) as excep:
print(excep)
print(traceback.format_exc())
if expected_result == 'PASS':
self.fail("Test was expected to pass but it failed.\n")
def test_container_attribute_async(self):
"""
Test basic container attribute tests.
:avocado: tags=all,small,full_regression
:avocado: tags=container,attribute
:avocado: tags=async_conattribute
:avocado: tags=container_attribute
"""
global GLOB_SIGNAL
global GLOB_RC
self.add_pool()
self.add_container(self.pool)
self.container.open()
self.daos_cmd = DaosCommand(self.bin)
expected_for_param = []
name = self.params.get("name", '/run/attrtests/name_handles/*/')
expected_for_param.append(name[1])
value = self.params.get("value", '/run/attrtests/value_handles/*/')
expected_for_param.append(value[1])
# Convert any test yaml string to bytes
if isinstance(name[0], str):
name[0] = name[0].encode("utf-8")
if isinstance(value[0], str):
value[0] = value[0].encode("utf-8")
attr_dict = {name[0]: value[0]}
expected_result = 'PASS'
for result in expected_for_param:
if result == 'FAIL':
expected_result = 'FAIL'
break
try:
GLOB_SIGNAL = threading.Event()
self.container.container.set_attr(data=attr_dict, cb_func=cb_func)
GLOB_SIGNAL.wait()
if GLOB_RC != 0 and expected_result in ['PASS']:
self.fail("RC not as expected after set_attr First {0}"
.format(GLOB_RC))
# Workaround
# Due to DAOS-7093 skip the usage of pydaos cont list attr
# GLOB_SIGNAL = threading.Event()
#
# size, buf = self.container.container.list_attr(cb_func=cb_func)
#
data = self.daos_cmd.container_list_attrs(
pool=self.pool.uuid,
cont=self.container.uuid)
# GLOB_SIGNAL.wait()
# if GLOB_RC != 0 and expected_result in ['PASS']:
# self.fail("RC not as expected after list_attr First {0}"
# .format(GLOB_RC))
if expected_result in ['PASS']:
# Workaround: async mode is not used for list_attr
self.verify_list_attr(attr_dict, data['response'])
# Request something that doesn't exist
if name[0] is not None and b"Negative" in name[0]:
name[0] = b"rubbish"
GLOB_SIGNAL = threading.Event()
self.container.container.get_attr([name[0]],
cb_func=cb_func)
GLOB_SIGNAL.wait()
if GLOB_RC != 0 and expected_result in ['PASS']:
self.fail("RC not as expected after get_attr {0}"
.format(GLOB_RC))
# not verifying the get_attr since its not available asynchronously
# Therefore we want to avoid passing negative test
# e.g. rubbish getting assigned.
if value[0] is not None:
if GLOB_RC == 0 and expected_result in ['FAIL']:
if name[0] != b"rubbish":
self.fail("Test was expected to fail but it passed.\n")
except DaosApiError as excep:
print(excep)
print(traceback.format_exc())
if expected_result == 'PASS':
self.fail("Test was expected to pass but it failed.\n")
| 35.408805 | 80 | 0.567584 |
import traceback
import threading
import random
import base64
from apricot import TestWithServers
from general_utils import DaosTestError, get_random_bytes
from pydaos.raw import DaosApiError
from daos_utils import DaosCommand
GLOB_SIGNAL = None
GLOB_RC = -99000000
def cb_func(event):
global GLOB_SIGNAL
global GLOB_RC
GLOB_RC = event.event.ev_error
GLOB_SIGNAL.set()
class ContainerAttributeTest(TestWithServers):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.expected_cont_uuid = None
self.daos_cmd = None
@staticmethod
def create_data_set():
data_set = {}
for index in range(1024):
size = random.randint(1, 100)
key = str(index).encode("utf-8")
data_set[key] = get_random_bytes(size)
return data_set
def verify_list_attr(self, indata, attributes_list, mode="sync"):
length = 0
for key in indata.keys():
length += len(key)
if mode == "async":
length += 1
size = 0
for attr in attributes_list:
size += len(attr)
self.log.info("Verifying list_attr output:")
self.log.info(" set_attr names: %s", list(indata.keys()))
self.log.info(" set_attr size: %s", length)
self.log.info(" list_attr names: %s", attributes_list)
self.log.info(" list_attr size: %s", size)
if length != size:
self.fail(
"FAIL: Size is not matching for Names in list attr, Expected "
"len={} and received len={}".format(length, size))
for key in list(indata.keys()):
found = False
for attr in attributes_list:
if key.decode() == attr:
found = True
break
if not found:
self.fail(
"FAIL: Name does not match after list attr, Expected "
"buf={} and received buf={}".format(key, attributes_list))
def verify_get_attr(self, indata, outdata):
decoded = {}
for key, val in outdata.items():
if isinstance(val, bytes):
decoded[key.decode()] = val
else:
decoded[key] = base64.b64decode(val)
self.log.info("Verifying get_attr output:")
self.log.info(" get_attr data: %s", indata)
self.log.info(" set_attr data: %s", decoded)
for attr, value in indata.items():
if value != decoded.get(attr.decode(), None):
self.fail(
"FAIL: Value does not match after get({}), Expected "
"val={} and received val={}".format(attr, value,
decoded.get(attr.decode(), None)))
def test_container_large_attributes(self):
self.add_pool()
self.add_container(self.pool)
self.container.open()
self.daos_cmd = DaosCommand(self.bin)
attr_dict = self.create_data_set()
try:
self.container.container.set_attr(data=attr_dict)
data = self.daos_cmd.container_list_attrs(
pool=self.pool.uuid,
cont=self.container.uuid,
verbose=False)
self.verify_list_attr(attr_dict, data['response'])
data = self.daos_cmd.container_list_attrs(
pool=self.pool.uuid,
cont=self.container.uuid,
verbose=True)
self.verify_get_attr(attr_dict, data['response'])
except DaosApiError as excep:
print(excep)
print(traceback.format_exc())
self.fail("Test was expected to pass but it failed.\n")
def test_container_attribute(self):
self.add_pool()
self.add_container(self.pool)
self.container.open()
self.daos_cmd = DaosCommand(self.bin)
expected_for_param = []
name = self.params.get("name", '/run/attrtests/name_handles/*/')
expected_for_param.append(name[1])
value = self.params.get("value", '/run/attrtests/value_handles/*/')
expected_for_param.append(value[1])
if isinstance(name[0], str):
name[0] = name[0].encode("utf-8")
if isinstance(value[0], str):
value[0] = value[0].encode("utf-8")
attr_dict = {name[0]: value[0]}
expected_result = 'PASS'
for result in expected_for_param:
if result == 'FAIL':
expected_result = 'FAIL'
break
try:
self.container.container.set_attr(data=attr_dict)
data = self.daos_cmd.container_list_attrs(
pool=self.pool.uuid,
cont=self.container.uuid)
self.verify_list_attr(attr_dict, data['response'])
if name[0] is not None and b"Negative" in name[0]:
name[0] = b"rubbish"
attr_value_dict = self.container.container.get_attr([name[0]])
# Raise an exception if the attr value is empty
# This is expected to happen on Negative test cases
if not attr_value_dict[name[0]]:
raise DaosApiError("Attr value is empty. "
"Did you set the value?")
self.verify_get_attr(attr_dict, attr_value_dict)
if expected_result in ['FAIL']:
self.fail("Test was expected to fail but it passed.\n")
except (DaosApiError, DaosTestError) as excep:
print(excep)
print(traceback.format_exc())
if expected_result == 'PASS':
self.fail("Test was expected to pass but it failed.\n")
def test_container_attribute_async(self):
global GLOB_SIGNAL
global GLOB_RC
self.add_pool()
self.add_container(self.pool)
self.container.open()
self.daos_cmd = DaosCommand(self.bin)
expected_for_param = []
name = self.params.get("name", '/run/attrtests/name_handles/*/')
expected_for_param.append(name[1])
value = self.params.get("value", '/run/attrtests/value_handles/*/')
expected_for_param.append(value[1])
# Convert any test yaml string to bytes
if isinstance(name[0], str):
name[0] = name[0].encode("utf-8")
if isinstance(value[0], str):
value[0] = value[0].encode("utf-8")
attr_dict = {name[0]: value[0]}
expected_result = 'PASS'
for result in expected_for_param:
if result == 'FAIL':
expected_result = 'FAIL'
break
try:
GLOB_SIGNAL = threading.Event()
self.container.container.set_attr(data=attr_dict, cb_func=cb_func)
GLOB_SIGNAL.wait()
if GLOB_RC != 0 and expected_result in ['PASS']:
self.fail("RC not as expected after set_attr First {0}"
.format(GLOB_RC))
# Workaround
# Due to DAOS-7093 skip the usage of pydaos cont list attr
# GLOB_SIGNAL = threading.Event()
#
# size, buf = self.container.container.list_attr(cb_func=cb_func)
#
data = self.daos_cmd.container_list_attrs(
pool=self.pool.uuid,
cont=self.container.uuid)
# GLOB_SIGNAL.wait()
# if GLOB_RC != 0 and expected_result in ['PASS']:
# self.fail("RC not as expected after list_attr First {0}"
# .format(GLOB_RC))
if expected_result in ['PASS']:
# Workaround: async mode is not used for list_attr
self.verify_list_attr(attr_dict, data['response'])
# Request something that doesn't exist
if name[0] is not None and b"Negative" in name[0]:
name[0] = b"rubbish"
GLOB_SIGNAL = threading.Event()
self.container.container.get_attr([name[0]],
cb_func=cb_func)
GLOB_SIGNAL.wait()
if GLOB_RC != 0 and expected_result in ['PASS']:
self.fail("RC not as expected after get_attr {0}"
.format(GLOB_RC))
if value[0] is not None:
if GLOB_RC == 0 and expected_result in ['FAIL']:
if name[0] != b"rubbish":
self.fail("Test was expected to fail but it passed.\n")
except DaosApiError as excep:
print(excep)
print(traceback.format_exc())
if expected_result == 'PASS':
self.fail("Test was expected to pass but it failed.\n")
| true | true |
f73b61abc09d5ccb96ab0ad2f1cdea16eb5f4891 | 5,889 | py | Python | train.py | Vaden4d/logo-classifier | 18c397e52352da8e79868158123c13bf0417130f | [
"MIT"
] | null | null | null | train.py | Vaden4d/logo-classifier | 18c397e52352da8e79868158123c13bf0417130f | [
"MIT"
] | null | null | null | train.py | Vaden4d/logo-classifier | 18c397e52352da8e79868158123c13bf0417130f | [
"MIT"
] | null | null | null | import os
import argparse
import pandas as pd
from tqdm import tqdm
import torch
import torch.nn as nn
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from utils.models import EfficientNetModel, EfficientNetSSL
from utils.transforms import get_transforms
from utils.loaders import get_loaders, get_loader
from utils.losses import LabelSmoothingLoss
from utils.misc import seed_everything, predict_on_loader
from utils.visualization import display_metrics
from utils.dataset import ImageDataset
from mixmatch_pytorch import MixMatchLoader, get_mixmatch_loss
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, confusion_matrix, precision_score, recall_score
parser = argparse.ArgumentParser(description='PyTorch Lightning Training')
parser.add_argument('--epochs', default=20, type=int, metavar='N',
help='Number of total training epochs')
parser.add_argument('--batch_size', default=32, type=int, metavar='N',
help='Train and test batch size')
parser.add_argument('--gpu', default=1, type=int,
help='0 if CPU mode, 1 if GPU')
parser.add_argument("--ssl", action="store_true",
help="Use semi-supervised pipeline")
parser.add_argument('--csv', default='dataset_with_weak_labels.csv', type=str,
help='Training .csv file with target column')
parser.add_argument('--target_column', default='weak_label', type=str,
help='Name of target column of the .csv file'
)
parser.add_argument('--validate', action="store_true",
help="Validate model on labeled dataset"
)
parser.add_argument('--validation_csv', default="labeled_part.csv",
help="Validation .csv file with labeled target"
)
parser.add_argument('--target_validation', default="label",
help="Name of target column in validation dataset"
)
parser.add_argument('--test_size', default=0.2, type=float,
help='Test dataset size'
)
parser.add_argument('--image_size', default=224, type=int,
help='Desired image size'
)
parser.add_argument('--num_workers', default=2, type=int,
help='Number of processes for PyTorch data loaders'
)
parser.add_argument('--random_state', default=42, type=int,
help='Random seed for all random operations'
)
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
args.gpu = args.gpu if torch.cuda.is_available() else 0
seed_everything(args.random_state)
# target_column has unique values in set -1, 0, 1
# -1 corresponds to the unlabeled data
df = pd.read_csv(args.csv)
labeled = df[df[args.target_column] > -1]
if args.ssl:
print("Semi-supervised learning model is on...")
unlabeled = df[df[args.target_column] == -1]
# weights to initialize bias of FC layer of classifier
weight = labeled.groupby(args.target_column).count()["path"] / labeled.shape[0]
weight = torch.Tensor(weight.values).log()
train_labeled, test_labeled = train_test_split(labeled, test_size=args.test_size, stratify=labeled[args.target_column], random_state=args.random_state)
train_transform, valid_transform = get_transforms(img_size=args.image_size)
train_labeled_loader, valid_labeled_loader = get_loaders(
train_labeled,
test_labeled,
train_transform,
valid_transform,
target_column=args.target_column,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=True
)
if args.ssl:
dataset_unlabeled = ImageDataset(unlabeled, train_transform, target_column=None)
loss = LabelSmoothingLoss(num_classes=2, smoothing=0.2, weight=None)
if args.ssl:
print("Semi-supervised learning model is configured...")
model = EfficientNetSSL(loss=loss, num_classes=2, weight=weight)
else:
model = EfficientNetModel(loss=loss, num_classes=2, weight=weight)
model_checkpoint = ModelCheckpoint(monitor="val_acc_f1",
verbose=True,
dirpath="models/",
mode="max",
filename="{epoch}_{val_acc_f1:.4f}")
if args.ssl:
# SSL approach changes only train dataloader and model class
train_loader = MixMatchLoader(
train_labeled_loader,
dataset_unlabeled,
model,
output_transform=nn.Softmax(dim=-1),
K=2,
T=0.5,
alpha=0.75
)
else:
train_loader = train_labeled_loader
trainer = pl.Trainer(gpus=args.gpu,
max_epochs=args.epochs,
precision=16,
auto_lr_find=True,
callbacks=[model_checkpoint])
trainer.fit(model, train_loader, valid_labeled_loader)
test_labeled["pred"] = predict_on_loader(valid_labeled_loader, model, device)
test_labeled.to_csv("test_labeled_with_preds.csv", index=False)
# TODO
# threshold tuning
print("Metrics results on the test sample with weak labels:")
display_metrics(test_labeled[args.target_column], test_labeled["pred"], threshold=0.5)
if args.validate:
validation = pd.read_csv(args.validation_csv)
validation[args.target_validation] = validation[args.target_validation].apply(lambda x: 1 if x == "logo" else 0)
labeled_loader = get_loader(
validation,
"label",
valid_transform,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=False
)
validation["pred"] = predict_on_loader(labeled_loader, model, device)
validation.to_csv("labeled_with_preds.csv", index=False)
print("Metrics results on the labeled sample with strong labels:")
display_metrics(validation["label"], validation["pred"], threshold=0.5)
| 37.75 | 151 | 0.689421 | import os
import argparse
import pandas as pd
from tqdm import tqdm
import torch
import torch.nn as nn
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from utils.models import EfficientNetModel, EfficientNetSSL
from utils.transforms import get_transforms
from utils.loaders import get_loaders, get_loader
from utils.losses import LabelSmoothingLoss
from utils.misc import seed_everything, predict_on_loader
from utils.visualization import display_metrics
from utils.dataset import ImageDataset
from mixmatch_pytorch import MixMatchLoader, get_mixmatch_loss
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, confusion_matrix, precision_score, recall_score
parser = argparse.ArgumentParser(description='PyTorch Lightning Training')
parser.add_argument('--epochs', default=20, type=int, metavar='N',
help='Number of total training epochs')
parser.add_argument('--batch_size', default=32, type=int, metavar='N',
help='Train and test batch size')
parser.add_argument('--gpu', default=1, type=int,
help='0 if CPU mode, 1 if GPU')
parser.add_argument("--ssl", action="store_true",
help="Use semi-supervised pipeline")
parser.add_argument('--csv', default='dataset_with_weak_labels.csv', type=str,
help='Training .csv file with target column')
parser.add_argument('--target_column', default='weak_label', type=str,
help='Name of target column of the .csv file'
)
parser.add_argument('--validate', action="store_true",
help="Validate model on labeled dataset"
)
parser.add_argument('--validation_csv', default="labeled_part.csv",
help="Validation .csv file with labeled target"
)
parser.add_argument('--target_validation', default="label",
help="Name of target column in validation dataset"
)
parser.add_argument('--test_size', default=0.2, type=float,
help='Test dataset size'
)
parser.add_argument('--image_size', default=224, type=int,
help='Desired image size'
)
parser.add_argument('--num_workers', default=2, type=int,
help='Number of processes for PyTorch data loaders'
)
parser.add_argument('--random_state', default=42, type=int,
help='Random seed for all random operations'
)
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
args.gpu = args.gpu if torch.cuda.is_available() else 0
seed_everything(args.random_state)
df = pd.read_csv(args.csv)
labeled = df[df[args.target_column] > -1]
if args.ssl:
print("Semi-supervised learning model is on...")
unlabeled = df[df[args.target_column] == -1]
weight = labeled.groupby(args.target_column).count()["path"] / labeled.shape[0]
weight = torch.Tensor(weight.values).log()
train_labeled, test_labeled = train_test_split(labeled, test_size=args.test_size, stratify=labeled[args.target_column], random_state=args.random_state)
train_transform, valid_transform = get_transforms(img_size=args.image_size)
train_labeled_loader, valid_labeled_loader = get_loaders(
train_labeled,
test_labeled,
train_transform,
valid_transform,
target_column=args.target_column,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=True
)
if args.ssl:
dataset_unlabeled = ImageDataset(unlabeled, train_transform, target_column=None)
loss = LabelSmoothingLoss(num_classes=2, smoothing=0.2, weight=None)
if args.ssl:
print("Semi-supervised learning model is configured...")
model = EfficientNetSSL(loss=loss, num_classes=2, weight=weight)
else:
model = EfficientNetModel(loss=loss, num_classes=2, weight=weight)
model_checkpoint = ModelCheckpoint(monitor="val_acc_f1",
verbose=True,
dirpath="models/",
mode="max",
filename="{epoch}_{val_acc_f1:.4f}")
if args.ssl:
train_loader = MixMatchLoader(
train_labeled_loader,
dataset_unlabeled,
model,
output_transform=nn.Softmax(dim=-1),
K=2,
T=0.5,
alpha=0.75
)
else:
train_loader = train_labeled_loader
trainer = pl.Trainer(gpus=args.gpu,
max_epochs=args.epochs,
precision=16,
auto_lr_find=True,
callbacks=[model_checkpoint])
trainer.fit(model, train_loader, valid_labeled_loader)
test_labeled["pred"] = predict_on_loader(valid_labeled_loader, model, device)
test_labeled.to_csv("test_labeled_with_preds.csv", index=False)
print("Metrics results on the test sample with weak labels:")
display_metrics(test_labeled[args.target_column], test_labeled["pred"], threshold=0.5)
if args.validate:
validation = pd.read_csv(args.validation_csv)
validation[args.target_validation] = validation[args.target_validation].apply(lambda x: 1 if x == "logo" else 0)
labeled_loader = get_loader(
validation,
"label",
valid_transform,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=False
)
validation["pred"] = predict_on_loader(labeled_loader, model, device)
validation.to_csv("labeled_with_preds.csv", index=False)
print("Metrics results on the labeled sample with strong labels:")
display_metrics(validation["label"], validation["pred"], threshold=0.5)
| true | true |
f73b61d5003c633db0b53a0a5061de0573d4d808 | 491 | py | Python | efi_monitor/_cli.py | ambauma/efi-monitor | 2519c19a0b3cd27c38014a36066978cd888adc18 | [
"MIT"
] | null | null | null | efi_monitor/_cli.py | ambauma/efi-monitor | 2519c19a0b3cd27c38014a36066978cd888adc18 | [
"MIT"
] | null | null | null | efi_monitor/_cli.py | ambauma/efi-monitor | 2519c19a0b3cd27c38014a36066978cd888adc18 | [
"MIT"
] | null | null | null | """Define the command line iterface."""
import os
import glob
def _file_path():
"""Determine the file path."""
return os.environ.get("EFI_MONITOR_FILE_PATH", "/sys/firmware/efi/efivars/dump*")
def _files():
"""Find the dump_files."""
return glob.glob(_file_path())
def check():
"""Check for efi dump files."""
for a_file in _files():
print(a_file)
def clear():
"""Clear out efi dump files."""
for a_file in _files():
os.unlink(a_file)
| 18.884615 | 85 | 0.627291 | import os
import glob
def _file_path():
return os.environ.get("EFI_MONITOR_FILE_PATH", "/sys/firmware/efi/efivars/dump*")
def _files():
return glob.glob(_file_path())
def check():
for a_file in _files():
print(a_file)
def clear():
for a_file in _files():
os.unlink(a_file)
| true | true |
f73b61e350cc5a30b9026232f71c75e484784fb4 | 4,535 | py | Python | direct/src/particles/SpriteParticleRendererExt.py | cmarshall108/panda3d-python3 | 8bea2c0c120b03ec1c9fd179701fdeb7510bb97b | [
"PHP-3.0",
"PHP-3.01"
] | 3 | 2020-01-02T08:43:36.000Z | 2020-07-05T08:59:02.000Z | direct/src/particles/SpriteParticleRendererExt.py | cmarshall108/panda3d-python3 | 8bea2c0c120b03ec1c9fd179701fdeb7510bb97b | [
"PHP-3.0",
"PHP-3.01"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/direct/particles/SpriteParticleRendererExt.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 1 | 2020-03-11T17:38:45.000Z | 2020-03-11T17:38:45.000Z | from panda3d.physics import SpriteParticleRenderer
class SpriteParticleRendererExt(SpriteParticleRenderer):
"""
Contains methods to extend functionality
of the SpriteParticleRenderer class
"""
# Initialize class variables for texture, source file and node for texture and
# node path textures to None. These will be initialized to a hardcoded default
# or whatever the user specifies in his/her Configrc variable the first time they
# are accessed
# Will use instance copy of this in functions below
sourceTextureName = None
sourceFileName = None
sourceNodeName = None
def getSourceTextureName(self):
if self.sourceTextureName == None:
SpriteParticleRendererExt.sourceTextureName = base.config.GetString(
'particle-sprite-texture', 'maps/lightbulb.rgb')
# Return instance copy of class variable
return self.sourceTextureName
def setSourceTextureName(self, name):
# Set instance copy of class variable
self.sourceTextureName = name
def setTextureFromFile(self, fileName = None):
if fileName == None:
fileName = self.getSourceTextureName()
t = loader.loadTexture(fileName)
if (t != None):
self.setTexture(t, t.getYSize())
self.setSourceTextureName(fileName)
return True
else:
print("Couldn't find rendererSpriteTexture file: %s" % fileName)
return False
def addTextureFromFile(self, fileName = None):
if(self.getNumAnims() == 0):
return self.setTextureFromFile(fileName)
if fileName == None:
fileName = self.getSourceTextureName()
t = loader.loadTexture(fileName)
if (t != None):
self.addTexture(t, t.getYSize())
return True
else:
print("Couldn't find rendererSpriteTexture file: %s" % fileName)
return False
def getSourceFileName(self):
if self.sourceFileName == None:
SpriteParticleRendererExt.sourceFileName = base.config.GetString(
'particle-sprite-model', 'models/misc/smiley')
# Return instance copy of class variable
return self.sourceFileName
def setSourceFileName(self, name):
# Set instance copy of class variable
self.sourceFileName = name
def getSourceNodeName(self):
if self.sourceNodeName == None:
SpriteParticleRendererExt.sourceNodeName = base.config.GetString(
'particle-sprite-node', '**/*')
# Return instance copy of class variable
return self.sourceNodeName
def setSourceNodeName(self, name):
# Set instance copy of class variable
self.sourceNodeName = name
def setTextureFromNode(self, modelName = None, nodeName = None, sizeFromTexels = False):
if modelName == None:
modelName = self.getSourceFileName()
if nodeName == None:
nodeName = self.getSourceNodeName()
# Load model and get texture
m = loader.loadModel(modelName)
if (m == None):
print("SpriteParticleRendererExt: Couldn't find model: %s!" % modelName)
return False
np = m.find(nodeName)
if np.isEmpty():
print("SpriteParticleRendererExt: Couldn't find node: %s!" % nodeName)
m.removeNode()
return False
self.setFromNode(np, modelName, nodeName, sizeFromTexels)
self.setSourceFileName(modelName)
self.setSourceNodeName(nodeName)
m.removeNode()
return True
def addTextureFromNode(self, modelName = None, nodeName = None, sizeFromTexels = False):
if(self.getNumAnims() == 0):
return self.setTextureFromNode(modelName, nodeName, sizeFromTexels)
if modelName == None:
modelName = self.getSourceFileName()
if nodeName == None:
nodeName = self.getSourceNodeName()
# Load model and get texture
m = loader.loadModel(modelName)
if (m == None):
print("SpriteParticleRendererExt: Couldn't find model: %s!" % modelName)
return False
np = m.find(nodeName)
if np.isEmpty():
print("SpriteParticleRendererExt: Couldn't find node: %s!" % nodeName)
m.removeNode()
return False
self.addFromNode(np, modelName, nodeName, sizeFromTexels)
m.removeNode()
return True
| 35.155039 | 92 | 0.629548 | from panda3d.physics import SpriteParticleRenderer
class SpriteParticleRendererExt(SpriteParticleRenderer):
sourceTextureName = None
sourceFileName = None
sourceNodeName = None
def getSourceTextureName(self):
if self.sourceTextureName == None:
SpriteParticleRendererExt.sourceTextureName = base.config.GetString(
'particle-sprite-texture', 'maps/lightbulb.rgb')
return self.sourceTextureName
def setSourceTextureName(self, name):
self.sourceTextureName = name
def setTextureFromFile(self, fileName = None):
if fileName == None:
fileName = self.getSourceTextureName()
t = loader.loadTexture(fileName)
if (t != None):
self.setTexture(t, t.getYSize())
self.setSourceTextureName(fileName)
return True
else:
print("Couldn't find rendererSpriteTexture file: %s" % fileName)
return False
def addTextureFromFile(self, fileName = None):
if(self.getNumAnims() == 0):
return self.setTextureFromFile(fileName)
if fileName == None:
fileName = self.getSourceTextureName()
t = loader.loadTexture(fileName)
if (t != None):
self.addTexture(t, t.getYSize())
return True
else:
print("Couldn't find rendererSpriteTexture file: %s" % fileName)
return False
def getSourceFileName(self):
if self.sourceFileName == None:
SpriteParticleRendererExt.sourceFileName = base.config.GetString(
'particle-sprite-model', 'models/misc/smiley')
return self.sourceFileName
def setSourceFileName(self, name):
self.sourceFileName = name
def getSourceNodeName(self):
if self.sourceNodeName == None:
SpriteParticleRendererExt.sourceNodeName = base.config.GetString(
'particle-sprite-node', '**/*')
return self.sourceNodeName
def setSourceNodeName(self, name):
self.sourceNodeName = name
def setTextureFromNode(self, modelName = None, nodeName = None, sizeFromTexels = False):
if modelName == None:
modelName = self.getSourceFileName()
if nodeName == None:
nodeName = self.getSourceNodeName()
m = loader.loadModel(modelName)
if (m == None):
print("SpriteParticleRendererExt: Couldn't find model: %s!" % modelName)
return False
np = m.find(nodeName)
if np.isEmpty():
print("SpriteParticleRendererExt: Couldn't find node: %s!" % nodeName)
m.removeNode()
return False
self.setFromNode(np, modelName, nodeName, sizeFromTexels)
self.setSourceFileName(modelName)
self.setSourceNodeName(nodeName)
m.removeNode()
return True
def addTextureFromNode(self, modelName = None, nodeName = None, sizeFromTexels = False):
if(self.getNumAnims() == 0):
return self.setTextureFromNode(modelName, nodeName, sizeFromTexels)
if modelName == None:
modelName = self.getSourceFileName()
if nodeName == None:
nodeName = self.getSourceNodeName()
m = loader.loadModel(modelName)
if (m == None):
print("SpriteParticleRendererExt: Couldn't find model: %s!" % modelName)
return False
np = m.find(nodeName)
if np.isEmpty():
print("SpriteParticleRendererExt: Couldn't find node: %s!" % nodeName)
m.removeNode()
return False
self.addFromNode(np, modelName, nodeName, sizeFromTexels)
m.removeNode()
return True
| true | true |
f73b632634309b095bbcf2d01716af21add4e5a6 | 516 | py | Python | adapters/zemismart/__init__.py | cocooma/domoticz-zigbee2mqtt-plugin | 97b025beaff6b68f4f92dd434c6dda63f53efdd8 | [
"MIT"
] | null | null | null | adapters/zemismart/__init__.py | cocooma/domoticz-zigbee2mqtt-plugin | 97b025beaff6b68f4f92dd434c6dda63f53efdd8 | [
"MIT"
] | null | null | null | adapters/zemismart/__init__.py | cocooma/domoticz-zigbee2mqtt-plugin | 97b025beaff6b68f4f92dd434c6dda63f53efdd8 | [
"MIT"
] | null | null | null | from adapters.rgbw_adapter import RGBWAdapter
from adapters.generic.blind_adapter import BlindAdapter
from adapters.zemismart.ZMCSW002D import ZMCSW002D
from adapters.zemismart.ZML03EZ import ZML03EZ
zemismart_adapters = {
'LXZB-12A': RGBWAdapter, # Zemismart RGB LED downlight
'ZM-CSW002-D': ZMCSW002D, # Zemismart ZM-CSW002-D 2 gang switch
'ZM-CSW032-D': BlindAdapter,# Zemismart Curtain/roller blind switch
'ZM-L03E-Z': ZML03EZ, # Zemismart ZM-L03E-Z 3 gang with neutral wire switch
}
| 43 | 85 | 0.763566 | from adapters.rgbw_adapter import RGBWAdapter
from adapters.generic.blind_adapter import BlindAdapter
from adapters.zemismart.ZMCSW002D import ZMCSW002D
from adapters.zemismart.ZML03EZ import ZML03EZ
zemismart_adapters = {
'LXZB-12A': RGBWAdapter,
'ZM-CSW002-D': ZMCSW002D,
'ZM-CSW032-D': BlindAdapter,
'ZM-L03E-Z': ZML03EZ,
}
| true | true |
f73b64f5c28bcd29dc0aad247be19c4dc6b5659b | 1,078 | py | Python | basic_SPN_tests.py | hkscy/Basic-SPN-cryptanalysis | c93cc88785d4ac7d3e9f9ae8f3c926a52a051d82 | [
"MIT"
] | 6 | 2019-03-06T01:05:27.000Z | 2022-01-11T13:23:51.000Z | basic_SPN_tests.py | hicksc/Basic-SPN-cryptanalysis | c93cc88785d4ac7d3e9f9ae8f3c926a52a051d82 | [
"MIT"
] | 1 | 2018-09-20T10:07:53.000Z | 2019-08-13T13:28:12.000Z | basic_SPN_tests.py | hicksc/Basic-SPN-cryptanalysis | c93cc88785d4ac7d3e9f9ae8f3c926a52a051d82 | [
"MIT"
] | 4 | 2018-04-18T17:58:26.000Z | 2021-09-27T19:41:21.000Z | import basic_SPN as cipher
pbox = {0:0, 1:4, 2:8, 3:12, 4:1, 5:5, 6:9, 7:13, 8:2, 9:6, 10:10, 11:14, 12:3, 13:7, 14:11, 15:15}
# test pbox functionality/symmetry
def testPBox(statem: list, pbox: dict):
staten = [0]*len(pbox)
for tpi, tp in enumerate(statem):
staten[pbox[tpi]] = tp
#print (staten)
return staten
testpBoxm = ['a','b','c','d', 'e','f','g','h', 'i','j','k','l', 'm','n','o','p']
testpBoxn = testPBox(testpBoxm, pbox)
testpBoxo = testPBox(testpBoxn, pbox)
if testpBoxm != testpBoxo:
print('FAIL: pbox inverse failed')
else:
print('PASS: pbox inverse functional')
# test that encryption and decryption are symmetric operations
def testEncDecSymmetry(n):
k = cipher.keyGeneration()
ct = [cipher.encrypt(pt, k) for pt in range(0, n)]
for pt,ct in enumerate(ct):
if pt != cipher.decrypt(ct, k):
print('FAIL: cipher encrypt-decrypt failed for {:04x}:{:04x}:{:04x}'.format(pt,ct, cipher.decrypt(ct, k)))
print('PASS: cipher encrypt-decrypt symmetry')
testEncDecSymmetry(100)
| 34.774194 | 118 | 0.620594 | import basic_SPN as cipher
pbox = {0:0, 1:4, 2:8, 3:12, 4:1, 5:5, 6:9, 7:13, 8:2, 9:6, 10:10, 11:14, 12:3, 13:7, 14:11, 15:15}
def testPBox(statem: list, pbox: dict):
staten = [0]*len(pbox)
for tpi, tp in enumerate(statem):
staten[pbox[tpi]] = tp
return staten
testpBoxm = ['a','b','c','d', 'e','f','g','h', 'i','j','k','l', 'm','n','o','p']
testpBoxn = testPBox(testpBoxm, pbox)
testpBoxo = testPBox(testpBoxn, pbox)
if testpBoxm != testpBoxo:
print('FAIL: pbox inverse failed')
else:
print('PASS: pbox inverse functional')
def testEncDecSymmetry(n):
k = cipher.keyGeneration()
ct = [cipher.encrypt(pt, k) for pt in range(0, n)]
for pt,ct in enumerate(ct):
if pt != cipher.decrypt(ct, k):
print('FAIL: cipher encrypt-decrypt failed for {:04x}:{:04x}:{:04x}'.format(pt,ct, cipher.decrypt(ct, k)))
print('PASS: cipher encrypt-decrypt symmetry')
testEncDecSymmetry(100)
| true | true |
f73b650199f4c433026c13de7b49a04d23556ac4 | 8,951 | py | Python | stellar_sdk/sep/federation.py | kaotisk-hund/py-stellar-base | 30dbe1139d8f0c03c4c20ea3c9a45a19285bedb8 | [
"Apache-2.0"
] | 341 | 2015-10-06T20:56:19.000Z | 2022-03-23T15:58:54.000Z | stellar_sdk/sep/federation.py | kaotisk-hund/py-stellar-base | 30dbe1139d8f0c03c4c20ea3c9a45a19285bedb8 | [
"Apache-2.0"
] | 479 | 2015-11-09T18:39:40.000Z | 2022-03-16T06:46:58.000Z | stellar_sdk/sep/federation.py | kaotisk-hund/py-stellar-base | 30dbe1139d8f0c03c4c20ea3c9a45a19285bedb8 | [
"Apache-2.0"
] | 181 | 2015-10-01T23:00:59.000Z | 2022-03-05T13:42:19.000Z | """
SEP: 0002
Title: Federation protocol
Author: stellar.org
Status: Final
Created: 2017-10-30
Updated: 2019-10-10
Version 1.1.0
"""
from typing import Any, Coroutine, Dict, Optional, Union
from ..client.base_async_client import BaseAsyncClient
from ..client.base_sync_client import BaseSyncClient
from ..client.requests_client import RequestsClient
from ..client.response import Response
from ..exceptions import ValueError
from .exceptions import (
BadFederationResponseError,
FederationServerNotFoundError,
InvalidFederationAddress,
)
from .stellar_toml import fetch_stellar_toml
SEPARATOR = "*"
FEDERATION_SERVER_KEY = "FEDERATION_SERVER"
class FederationRecord:
def __init__(self, account_id, stellar_address, memo_type, memo):
"""The :class:`FederationRecord`, which represents record in federation server.
:param account_id: Stellar public key / account ID
:param stellar_address: Stellar address
:param memo_type: Type of memo to attach to transaction, one of *text*, *id* or *hash*
:param memo: value of memo to attach to transaction, for *hash* this should be base64-encoded.
This field should always be of type *string* (even when ``memo_type`` is equal *id*) to support parsing
value in languages that don't support big numbers.
"""
self.account_id: str = account_id
self.stellar_address: str = stellar_address
self.memo_type: Optional[str] = memo_type
self.memo: Optional[str] = memo
def __str__(self):
return (
f"<FederationRecord [account_id={self.account_id}, stellar_address={self.stellar_address}, "
f"memo_type={self.memo_type}, memo={self.memo}]>"
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented # pragma: no cover
return (
self.account_id == other.account_id
and self.stellar_address == other.stellar_address
and self.memo_type == other.memo_type
and self.memo == other.memo
)
def resolve_stellar_address(
stellar_address: str,
client: Union[BaseAsyncClient, BaseSyncClient] = None,
federation_url: str = None,
use_http: bool = False,
) -> Union[Coroutine[Any, Any, FederationRecord], FederationRecord]:
"""Get the federation record if the user was found for a given Stellar address.
:param stellar_address: address Stellar address (ex. bob*stellar.org).
:param client: Http Client used to send the request.
:param federation_url: The federation server URL (ex. `https://stellar.org/federation`),
if you don't set this value, we will try to get it from ``stellar_address``.
:param use_http: Specifies whether the request should go over plain HTTP vs HTTPS.
Note it is recommend that you *always* use HTTPS.
:return: Federation record.
"""
if not client:
client = RequestsClient()
if isinstance(client, BaseAsyncClient):
return __resolve_stellar_address_async(
stellar_address, client, federation_url, use_http
)
elif isinstance(client, BaseSyncClient):
return __resolve_stellar_address_sync(
stellar_address, client, federation_url, use_http
)
else:
raise TypeError(
"This `client` class should be an instance "
"of `stellar_sdk.client.base_async_client.BaseAsyncClient` "
"or `stellar_sdk.client.base_sync_client.BaseSyncClient`."
)
def resolve_account_id(
account_id: str,
domain: str = None,
federation_url: str = None,
client: Union[BaseAsyncClient, BaseSyncClient] = None,
use_http: bool = False,
) -> Union[Coroutine[Any, Any, FederationRecord], FederationRecord]:
"""Given an account ID, get their federation record if the user was found
:param account_id: Account ID (ex. GBYNR2QJXLBCBTRN44MRORCMI4YO7FZPFBCNOKTOBCAAFC7KC3LNPRYS)
:param domain: Get ``federation_url`` from the domain, you don't need to set this value if ``federation_url`` is set.
:param federation_url: The federation server URL (ex. https://stellar.org/federation).
:param client: Http Client used to send the request.
:param use_http: Specifies whether the request should go over plain HTTP vs HTTPS.
Note it is recommend that you *always* use HTTPS.
:return: Federation record.
"""
if domain is None and federation_url is None:
raise ValueError("You should provide either `domain` or `federation_url`.")
if not client:
client = RequestsClient()
if isinstance(client, BaseAsyncClient):
return __resolve_account_id_async(
account_id, domain, federation_url, client, use_http
)
elif isinstance(client, BaseSyncClient):
return __resolve_account_id_sync(
account_id, domain, federation_url, client, use_http
)
else:
raise TypeError(
"This `client` class should be an instance "
"of `stellar_sdk.client.base_async_client.BaseAsyncClient` "
"or `stellar_sdk.client.base_sync_client.BaseSyncClient`."
)
def __resolve_stellar_address_sync(
stellar_address: str,
client: BaseSyncClient,
federation_url: str = None,
use_http: bool = False,
) -> FederationRecord:
parts = split_stellar_address(stellar_address)
domain = parts["domain"]
if federation_url is None:
federation_url = fetch_stellar_toml(domain, use_http=use_http).get( # type: ignore[union-attr]
FEDERATION_SERVER_KEY
)
if federation_url is None:
raise FederationServerNotFoundError(
f"Unable to find federation server at {domain}."
)
raw_resp = client.get(federation_url, {"type": "name", "q": stellar_address})
return __handle_raw_response(raw_resp, stellar_address=stellar_address)
async def __resolve_stellar_address_async(
stellar_address: str,
client: BaseAsyncClient,
federation_url: str = None,
use_http: bool = False,
) -> FederationRecord:
parts = split_stellar_address(stellar_address)
domain = parts["domain"]
if federation_url is None:
federation_url = (
await fetch_stellar_toml(domain, client=client, use_http=use_http) # type: ignore[misc]
).get(FEDERATION_SERVER_KEY)
if federation_url is None:
raise FederationServerNotFoundError(
f"Unable to find federation server at {domain}."
)
raw_resp = await client.get(federation_url, {"type": "name", "q": stellar_address})
return __handle_raw_response(raw_resp, stellar_address=stellar_address)
def __resolve_account_id_sync(
account_id: str,
domain: str = None,
federation_url: str = None,
client=None,
use_http: bool = False,
) -> FederationRecord:
if domain is not None:
federation_url = fetch_stellar_toml(domain, client, use_http).get( # type: ignore[union-attr]
FEDERATION_SERVER_KEY
)
if federation_url is None:
raise FederationServerNotFoundError(
f"Unable to find federation server at {domain}."
)
raw_resp = client.get(federation_url, {"type": "id", "q": account_id})
return __handle_raw_response(raw_resp, account_id=account_id)
async def __resolve_account_id_async(
account_id: str,
domain: str = None,
federation_url: str = None,
client=None,
use_http: bool = False,
) -> FederationRecord:
if domain is not None:
federation_url = (await fetch_stellar_toml(domain, client, use_http)).get( # type: ignore[misc]
FEDERATION_SERVER_KEY
)
if federation_url is None:
raise FederationServerNotFoundError(
f"Unable to find federation server at {domain}."
)
raw_resp = await client.get(federation_url, {"type": "id", "q": account_id})
return __handle_raw_response(raw_resp, account_id=account_id)
def __handle_raw_response(
raw_resp: Response, stellar_address=None, account_id=None
) -> FederationRecord:
if not 200 <= raw_resp.status_code < 300:
raise BadFederationResponseError(raw_resp)
data = raw_resp.json()
account_id = account_id or data.get("account_id")
stellar_address = stellar_address or data.get("stellar_address")
memo_type = data.get("memo_type")
memo = data.get("memo")
return FederationRecord(
account_id=account_id,
stellar_address=stellar_address,
memo_type=memo_type,
memo=memo,
)
def split_stellar_address(address: str) -> Dict[str, str]:
parts = address.split(SEPARATOR)
if len(parts) != 2:
raise InvalidFederationAddress(
"Address should be a valid address, such as `bob*stellar.org`"
)
name, domain = parts
return {"name": name, "domain": domain}
| 37.767932 | 121 | 0.683611 | from typing import Any, Coroutine, Dict, Optional, Union
from ..client.base_async_client import BaseAsyncClient
from ..client.base_sync_client import BaseSyncClient
from ..client.requests_client import RequestsClient
from ..client.response import Response
from ..exceptions import ValueError
from .exceptions import (
BadFederationResponseError,
FederationServerNotFoundError,
InvalidFederationAddress,
)
from .stellar_toml import fetch_stellar_toml
SEPARATOR = "*"
FEDERATION_SERVER_KEY = "FEDERATION_SERVER"
class FederationRecord:
def __init__(self, account_id, stellar_address, memo_type, memo):
self.account_id: str = account_id
self.stellar_address: str = stellar_address
self.memo_type: Optional[str] = memo_type
self.memo: Optional[str] = memo
def __str__(self):
return (
f"<FederationRecord [account_id={self.account_id}, stellar_address={self.stellar_address}, "
f"memo_type={self.memo_type}, memo={self.memo}]>"
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.account_id == other.account_id
and self.stellar_address == other.stellar_address
and self.memo_type == other.memo_type
and self.memo == other.memo
)
def resolve_stellar_address(
stellar_address: str,
client: Union[BaseAsyncClient, BaseSyncClient] = None,
federation_url: str = None,
use_http: bool = False,
) -> Union[Coroutine[Any, Any, FederationRecord], FederationRecord]:
if not client:
client = RequestsClient()
if isinstance(client, BaseAsyncClient):
return __resolve_stellar_address_async(
stellar_address, client, federation_url, use_http
)
elif isinstance(client, BaseSyncClient):
return __resolve_stellar_address_sync(
stellar_address, client, federation_url, use_http
)
else:
raise TypeError(
"This `client` class should be an instance "
"of `stellar_sdk.client.base_async_client.BaseAsyncClient` "
"or `stellar_sdk.client.base_sync_client.BaseSyncClient`."
)
def resolve_account_id(
account_id: str,
domain: str = None,
federation_url: str = None,
client: Union[BaseAsyncClient, BaseSyncClient] = None,
use_http: bool = False,
) -> Union[Coroutine[Any, Any, FederationRecord], FederationRecord]:
if domain is None and federation_url is None:
raise ValueError("You should provide either `domain` or `federation_url`.")
if not client:
client = RequestsClient()
if isinstance(client, BaseAsyncClient):
return __resolve_account_id_async(
account_id, domain, federation_url, client, use_http
)
elif isinstance(client, BaseSyncClient):
return __resolve_account_id_sync(
account_id, domain, federation_url, client, use_http
)
else:
raise TypeError(
"This `client` class should be an instance "
"of `stellar_sdk.client.base_async_client.BaseAsyncClient` "
"or `stellar_sdk.client.base_sync_client.BaseSyncClient`."
)
def __resolve_stellar_address_sync(
stellar_address: str,
client: BaseSyncClient,
federation_url: str = None,
use_http: bool = False,
) -> FederationRecord:
parts = split_stellar_address(stellar_address)
domain = parts["domain"]
if federation_url is None:
federation_url = fetch_stellar_toml(domain, use_http=use_http).get(
FEDERATION_SERVER_KEY
)
if federation_url is None:
raise FederationServerNotFoundError(
f"Unable to find federation server at {domain}."
)
raw_resp = client.get(federation_url, {"type": "name", "q": stellar_address})
return __handle_raw_response(raw_resp, stellar_address=stellar_address)
async def __resolve_stellar_address_async(
stellar_address: str,
client: BaseAsyncClient,
federation_url: str = None,
use_http: bool = False,
) -> FederationRecord:
parts = split_stellar_address(stellar_address)
domain = parts["domain"]
if federation_url is None:
federation_url = (
await fetch_stellar_toml(domain, client=client, use_http=use_http)
).get(FEDERATION_SERVER_KEY)
if federation_url is None:
raise FederationServerNotFoundError(
f"Unable to find federation server at {domain}."
)
raw_resp = await client.get(federation_url, {"type": "name", "q": stellar_address})
return __handle_raw_response(raw_resp, stellar_address=stellar_address)
def __resolve_account_id_sync(
account_id: str,
domain: str = None,
federation_url: str = None,
client=None,
use_http: bool = False,
) -> FederationRecord:
if domain is not None:
federation_url = fetch_stellar_toml(domain, client, use_http).get(
FEDERATION_SERVER_KEY
)
if federation_url is None:
raise FederationServerNotFoundError(
f"Unable to find federation server at {domain}."
)
raw_resp = client.get(federation_url, {"type": "id", "q": account_id})
return __handle_raw_response(raw_resp, account_id=account_id)
async def __resolve_account_id_async(
account_id: str,
domain: str = None,
federation_url: str = None,
client=None,
use_http: bool = False,
) -> FederationRecord:
if domain is not None:
federation_url = (await fetch_stellar_toml(domain, client, use_http)).get(
FEDERATION_SERVER_KEY
)
if federation_url is None:
raise FederationServerNotFoundError(
f"Unable to find federation server at {domain}."
)
raw_resp = await client.get(federation_url, {"type": "id", "q": account_id})
return __handle_raw_response(raw_resp, account_id=account_id)
def __handle_raw_response(
raw_resp: Response, stellar_address=None, account_id=None
) -> FederationRecord:
if not 200 <= raw_resp.status_code < 300:
raise BadFederationResponseError(raw_resp)
data = raw_resp.json()
account_id = account_id or data.get("account_id")
stellar_address = stellar_address or data.get("stellar_address")
memo_type = data.get("memo_type")
memo = data.get("memo")
return FederationRecord(
account_id=account_id,
stellar_address=stellar_address,
memo_type=memo_type,
memo=memo,
)
def split_stellar_address(address: str) -> Dict[str, str]:
parts = address.split(SEPARATOR)
if len(parts) != 2:
raise InvalidFederationAddress(
"Address should be a valid address, such as `bob*stellar.org`"
)
name, domain = parts
return {"name": name, "domain": domain}
| true | true |
f73b65df5a71053517f328f5398688d89886cefe | 5,652 | py | Python | tests/unit/types/document/test_converters.py | rudranshsharma123/jina | cdc66eb44fe1ae5c84ba6ddfe0a6173476f773bb | [
"Apache-2.0"
] | null | null | null | tests/unit/types/document/test_converters.py | rudranshsharma123/jina | cdc66eb44fe1ae5c84ba6ddfe0a6173476f773bb | [
"Apache-2.0"
] | null | null | null | tests/unit/types/document/test_converters.py | rudranshsharma123/jina | cdc66eb44fe1ae5c84ba6ddfe0a6173476f773bb | [
"Apache-2.0"
] | null | null | null | import os
import numpy as np
import pytest
from jina import Document, __windows__
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_uri_to_blob():
doc = Document(uri=os.path.join(cur_dir, 'test.png'))
doc.convert_image_uri_to_blob()
assert isinstance(doc.blob, np.ndarray)
assert doc.mime_type == 'image/png'
assert doc.blob.shape == (85, 152, 3) # h,w,c
def test_datauri_to_blob():
doc = Document(uri=os.path.join(cur_dir, 'test.png'))
doc.convert_uri_to_datauri()
doc.convert_image_datauri_to_blob()
assert isinstance(doc.blob, np.ndarray)
assert doc.mime_type == 'image/png'
assert doc.blob.shape == (85, 152, 3) # h,w,c
def test_buffer_to_blob():
doc = Document(uri=os.path.join(cur_dir, 'test.png'))
doc.convert_uri_to_buffer()
doc.convert_image_buffer_to_blob()
assert isinstance(doc.blob, np.ndarray)
assert doc.mime_type == 'image/png'
assert doc.blob.shape == (85, 152, 3) # h,w,c
def test_convert_buffer_to_blob():
rand_state = np.random.RandomState(0)
array = rand_state.random([10, 10])
doc = Document(content=array.tobytes())
assert doc.content_type == 'buffer'
intialiazed_buffer = doc.buffer
doc.convert_buffer_to_blob()
assert doc.content_type == 'blob'
converted_buffer_in_one_of = doc.buffer
assert intialiazed_buffer != converted_buffer_in_one_of
np.testing.assert_almost_equal(doc.content.reshape([10, 10]), array)
@pytest.mark.parametrize('resize_method', ['BILINEAR', 'NEAREST', 'BICUBIC', 'LANCZOS'])
@pytest.mark.parametrize(
'arr_size, channel_axis, height, width',
[
((32 * 28), -1, None, None), # single line
([32, 28], -1, None, None), # without channel info
([32, 28, 3], -1, None, None), # h, w, c (rgb)
([3, 32, 28], 0, None, None), # c, h, w (rgb)
([1, 32, 28], 0, None, None), # c, h, w, (greyscale)
([32, 28, 1], -1, None, None), # h, w, c, (greyscale)
((32 * 28), -1, 896, 1), # single line
([32, 28], -1, 32, 28), # without channel info
([32, 28, 3], -1, 32, 28), # h, w, c (rgb)
([3, 32, 28], 0, 32, 28), # c, h, w (rgb)
([1, 32, 28], 0, 32, 28), # c, h, w, (greyscale)
([32, 28, 1], -1, 32, 28), # h, w, c, (greyscale)
],
)
def test_convert_image_blob_to_uri(arr_size, channel_axis, width, height, resize_method):
doc = Document(content=np.random.randint(0, 255, arr_size))
assert doc.blob.any()
assert not doc.uri
doc.convert_image_blob_to_uri(
channel_axis=channel_axis, width=width, height=height, resize_method=resize_method
)
assert doc.uri.startswith('data:image/png;base64,')
assert doc.mime_type == 'image/png'
@pytest.mark.xfail(
condition=__windows__, reason='x-python is not detected on windows CI'
)
@pytest.mark.parametrize(
'uri, mimetype',
[
(__file__, 'text/x-python'),
('http://google.com/index.html', 'text/html'),
('https://google.com/index.html', 'text/html'),
],
)
def test_convert_uri_to_buffer(uri, mimetype):
d = Document(uri=uri)
assert not d.buffer
d.convert_uri_to_buffer()
assert d.buffer
assert d.mime_type == mimetype
@pytest.mark.parametrize(
'converter', ['convert_buffer_to_uri', 'convert_content_to_uri']
)
def test_convert_buffer_to_uri(converter):
d = Document(content=open(__file__).read().encode(), mime_type='text/x-python')
assert d.buffer
getattr(d, converter)()
assert d.uri.startswith('data:text/x-python;')
@pytest.mark.parametrize('converter', ['convert_text_to_uri', 'convert_content_to_uri'])
def test_convert_text_to_uri(converter):
d = Document(content=open(__file__).read(), mime_type='text/x-python')
assert d.text
getattr(d, converter)()
assert d.uri.startswith('data:text/x-python;')
@pytest.mark.xfail(
condition=__windows__, reason='x-python is not detected on windows CI'
)
@pytest.mark.parametrize(
'uri, mimetype',
[
pytest.param(
__file__,
'text/x-python',
marks=pytest.mark.xfail(
condition=__windows__, reason='x-python is not detected on windows CI'
),
),
('http://google.com/index.html', 'text/html'),
('https://google.com/index.html', 'text/html'),
],
)
def test_convert_uri_to_text(uri, mimetype):
doc = Document(uri=uri, mime_type=mimetype)
doc.convert_uri_to_text()
if mimetype == 'text/html':
assert '<!doctype html>' in doc.text
elif mimetype == 'text/x-python':
text_from_file = open(__file__).read()
assert doc.text == text_from_file
def test_convert_text_to_uri_and_back():
text_from_file = open(__file__).read()
doc = Document(content=text_from_file, mime_type='text/x-python')
assert doc.text
assert doc.mime_type == 'text/x-python'
doc.convert_text_to_uri()
doc.convert_uri_to_text()
assert doc.mime_type == 'text/plain'
assert doc.text == text_from_file
def test_convert_content_to_uri():
d = Document(content=np.random.random([10, 10]))
with pytest.raises(NotImplementedError):
d.convert_content_to_uri()
@pytest.mark.parametrize(
'uri, mimetype',
[
(__file__, 'text/x-python'),
('http://google.com/index.html', 'text/html'),
('https://google.com/index.html', 'text/html'),
],
)
def test_convert_uri_to_data_uri(uri, mimetype):
doc = Document(uri=uri, mime_type=mimetype)
doc.convert_uri_to_datauri()
assert doc.uri.startswith(f'data:{mimetype}')
assert doc.mime_type == mimetype
| 32.482759 | 90 | 0.646851 | import os
import numpy as np
import pytest
from jina import Document, __windows__
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_uri_to_blob():
doc = Document(uri=os.path.join(cur_dir, 'test.png'))
doc.convert_image_uri_to_blob()
assert isinstance(doc.blob, np.ndarray)
assert doc.mime_type == 'image/png'
assert doc.blob.shape == (85, 152, 3)
def test_datauri_to_blob():
doc = Document(uri=os.path.join(cur_dir, 'test.png'))
doc.convert_uri_to_datauri()
doc.convert_image_datauri_to_blob()
assert isinstance(doc.blob, np.ndarray)
assert doc.mime_type == 'image/png'
assert doc.blob.shape == (85, 152, 3)
def test_buffer_to_blob():
doc = Document(uri=os.path.join(cur_dir, 'test.png'))
doc.convert_uri_to_buffer()
doc.convert_image_buffer_to_blob()
assert isinstance(doc.blob, np.ndarray)
assert doc.mime_type == 'image/png'
assert doc.blob.shape == (85, 152, 3)
def test_convert_buffer_to_blob():
rand_state = np.random.RandomState(0)
array = rand_state.random([10, 10])
doc = Document(content=array.tobytes())
assert doc.content_type == 'buffer'
intialiazed_buffer = doc.buffer
doc.convert_buffer_to_blob()
assert doc.content_type == 'blob'
converted_buffer_in_one_of = doc.buffer
assert intialiazed_buffer != converted_buffer_in_one_of
np.testing.assert_almost_equal(doc.content.reshape([10, 10]), array)
@pytest.mark.parametrize('resize_method', ['BILINEAR', 'NEAREST', 'BICUBIC', 'LANCZOS'])
@pytest.mark.parametrize(
'arr_size, channel_axis, height, width',
[
((32 * 28), -1, None, None),
([32, 28], -1, None, None),
([32, 28, 3], -1, None, None),
([3, 32, 28], 0, None, None),
([1, 32, 28], 0, None, None),
([32, 28, 1], -1, None, None),
((32 * 28), -1, 896, 1),
([32, 28], -1, 32, 28),
([32, 28, 3], -1, 32, 28),
([3, 32, 28], 0, 32, 28),
([1, 32, 28], 0, 32, 28),
([32, 28, 1], -1, 32, 28),
],
)
def test_convert_image_blob_to_uri(arr_size, channel_axis, width, height, resize_method):
doc = Document(content=np.random.randint(0, 255, arr_size))
assert doc.blob.any()
assert not doc.uri
doc.convert_image_blob_to_uri(
channel_axis=channel_axis, width=width, height=height, resize_method=resize_method
)
assert doc.uri.startswith('data:image/png;base64,')
assert doc.mime_type == 'image/png'
@pytest.mark.xfail(
condition=__windows__, reason='x-python is not detected on windows CI'
)
@pytest.mark.parametrize(
'uri, mimetype',
[
(__file__, 'text/x-python'),
('http://google.com/index.html', 'text/html'),
('https://google.com/index.html', 'text/html'),
],
)
def test_convert_uri_to_buffer(uri, mimetype):
d = Document(uri=uri)
assert not d.buffer
d.convert_uri_to_buffer()
assert d.buffer
assert d.mime_type == mimetype
@pytest.mark.parametrize(
'converter', ['convert_buffer_to_uri', 'convert_content_to_uri']
)
def test_convert_buffer_to_uri(converter):
d = Document(content=open(__file__).read().encode(), mime_type='text/x-python')
assert d.buffer
getattr(d, converter)()
assert d.uri.startswith('data:text/x-python;')
@pytest.mark.parametrize('converter', ['convert_text_to_uri', 'convert_content_to_uri'])
def test_convert_text_to_uri(converter):
d = Document(content=open(__file__).read(), mime_type='text/x-python')
assert d.text
getattr(d, converter)()
assert d.uri.startswith('data:text/x-python;')
@pytest.mark.xfail(
condition=__windows__, reason='x-python is not detected on windows CI'
)
@pytest.mark.parametrize(
'uri, mimetype',
[
pytest.param(
__file__,
'text/x-python',
marks=pytest.mark.xfail(
condition=__windows__, reason='x-python is not detected on windows CI'
),
),
('http://google.com/index.html', 'text/html'),
('https://google.com/index.html', 'text/html'),
],
)
def test_convert_uri_to_text(uri, mimetype):
doc = Document(uri=uri, mime_type=mimetype)
doc.convert_uri_to_text()
if mimetype == 'text/html':
assert '<!doctype html>' in doc.text
elif mimetype == 'text/x-python':
text_from_file = open(__file__).read()
assert doc.text == text_from_file
def test_convert_text_to_uri_and_back():
text_from_file = open(__file__).read()
doc = Document(content=text_from_file, mime_type='text/x-python')
assert doc.text
assert doc.mime_type == 'text/x-python'
doc.convert_text_to_uri()
doc.convert_uri_to_text()
assert doc.mime_type == 'text/plain'
assert doc.text == text_from_file
def test_convert_content_to_uri():
d = Document(content=np.random.random([10, 10]))
with pytest.raises(NotImplementedError):
d.convert_content_to_uri()
@pytest.mark.parametrize(
'uri, mimetype',
[
(__file__, 'text/x-python'),
('http://google.com/index.html', 'text/html'),
('https://google.com/index.html', 'text/html'),
],
)
def test_convert_uri_to_data_uri(uri, mimetype):
doc = Document(uri=uri, mime_type=mimetype)
doc.convert_uri_to_datauri()
assert doc.uri.startswith(f'data:{mimetype}')
assert doc.mime_type == mimetype
| true | true |
f73b66c0fc50e179b49e0fa95dfebcc24277c7b8 | 997 | py | Python | mmseg/models/scalar_schedulers/step.py | evgeniya-egupova/mmsegmentation | 3857f19321ad6af41c8a6af364898ee050225f4c | [
"Apache-2.0"
] | null | null | null | mmseg/models/scalar_schedulers/step.py | evgeniya-egupova/mmsegmentation | 3857f19321ad6af41c8a6af364898ee050225f4c | [
"Apache-2.0"
] | null | null | null | mmseg/models/scalar_schedulers/step.py | evgeniya-egupova/mmsegmentation | 3857f19321ad6af41c8a6af364898ee050225f4c | [
"Apache-2.0"
] | null | null | null | import numpy as np
from ..builder import SCALAR_SCHEDULERS
from .base import BaseScalarScheduler
@SCALAR_SCHEDULERS.register_module()
class StepScalarScheduler(BaseScalarScheduler):
def __init__(self, scales, num_iters, by_epoch=False):
super(StepScalarScheduler, self).__init__()
self.by_epoch = by_epoch
assert len(scales) == len(num_iters) + 1
assert len(scales) > 0
self._scales = list(scales)
self._iter_ranges = list(num_iters) + [np.iinfo(np.int32).max]
def _get_value(self, step, epoch_size):
if step is None:
return float(self._scales[-1])
out_scale_idx = 0
for iter_range in self._iter_ranges:
if self.by_epoch:
iter_threshold = epoch_size * iter_range
else:
iter_threshold = iter_range
if step < iter_threshold:
break
out_scale_idx += 1
return float(self._scales[out_scale_idx])
| 26.945946 | 70 | 0.62989 | import numpy as np
from ..builder import SCALAR_SCHEDULERS
from .base import BaseScalarScheduler
@SCALAR_SCHEDULERS.register_module()
class StepScalarScheduler(BaseScalarScheduler):
def __init__(self, scales, num_iters, by_epoch=False):
super(StepScalarScheduler, self).__init__()
self.by_epoch = by_epoch
assert len(scales) == len(num_iters) + 1
assert len(scales) > 0
self._scales = list(scales)
self._iter_ranges = list(num_iters) + [np.iinfo(np.int32).max]
def _get_value(self, step, epoch_size):
if step is None:
return float(self._scales[-1])
out_scale_idx = 0
for iter_range in self._iter_ranges:
if self.by_epoch:
iter_threshold = epoch_size * iter_range
else:
iter_threshold = iter_range
if step < iter_threshold:
break
out_scale_idx += 1
return float(self._scales[out_scale_idx])
| true | true |
f73b672bbd29fa50d1c7e8e9e447edad247f7fbd | 12,768 | py | Python | ArbitraryCS_main.py | Raeyi/multipooling-AdaPECT | 9632b98ff1612344de798321298f6488f1c303b0 | [
"Apache-2.0"
] | null | null | null | ArbitraryCS_main.py | Raeyi/multipooling-AdaPECT | 9632b98ff1612344de798321298f6488f1c303b0 | [
"Apache-2.0"
] | null | null | null | ArbitraryCS_main.py | Raeyi/multipooling-AdaPECT | 9632b98ff1612344de798321298f6488f1c303b0 | [
"Apache-2.0"
] | null | null | null | # coding:utf8
import torch as t
import torchvision as tv
import torchnet as tnt
from torch.utils import data
from transformer_net import TransformerNet
import utils
from PackedVGG import Vgg16
from torch.nn import functional as F
import tqdm
import os
import ipdb
# from WCT2_train import WCT2
# import model
from LapSobGaus_train import Lap_Sob_Gaus
import net
import Ovodus_Laplace_model
import utils_
from WCT2_train import train_transform
from tensorboardX import SummaryWriter
from pathlib import Path
from torchvision.utils import save_image
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
class Config(object):
# General Args
use_gpu = True
model_path = None # pretrain model path (for resume training or test)
# Train Args
image_size = 448 # image crop_size for training
batch_size = 2
data_root = r'F:\DataSets\train2017' # 'data/' dataset root:$data_root/coco/a.jpg D:\CoCo_Dataset\train2017
num_workers = 4 # dataloader num of workers
lr = 1e-4
epoches = 20 # total epoch to train
content_weight = 1e10 # weight of content_loss
style_weight = 1e2 # weight of style_loss
style_path = 'style_input' # style image path
env = 'onlyencodercontent_58_Laps_test_nores_noDynamic_10_2' # visdom env
plot_every = 1 # visualize in visdom for every 10 batch
debug_file = '/tmp/debugnn' # touch $debug_fie to interrupt and enter ipdb
# Test Args
content_path = 'input.png' # input file to do style transfer [for test]
result_path = 'output.png' # style transfer result [for test]
option_unpool = 'sum'
cpu = False
transfer_at_encoder = True
transfer_at_decoder = True
transfer_at_skip = True
verbose = True
save_dir = './onlyencodercontent/nores_noDynamic/58_LapSobGaus_experiments_10_2'
log_dir = './onlyencodercontent/nores_noDynamic/58_LapSobGaus_logs_10_2'
lr_decay = 5e-5
def adjust_learning_rate(lr ,optimizer, iteration_count, lr_decay):
"""Imitating the original implementation"""
lr = lr / (1.0 + lr_decay * iteration_count)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train(**kwargs):
opt = Config()
for k_, v_ in kwargs.items():
setattr(opt, k_, v_)
device = 'cpu' if opt.cpu or not t.cuda.is_available() else 'cuda:0'
device = t.device(device)
# device=t.device('cuda') if opt.use_gpu else t.device('cpu')
vis = utils_.Visualizer(opt.env)
save_dir = Path(opt.save_dir)
save_dir.mkdir(exist_ok=True, parents=True)
log_dir = Path(opt.log_dir)
log_dir.mkdir(exist_ok=True, parents=True)
writer = SummaryWriter(log_dir=str(log_dir))
# Data loading
transfroms = tv.transforms.Compose([
tv.transforms.Resize(opt.image_size),
tv.transforms.CenterCrop(opt.image_size),
tv.transforms.ToTensor(),
#tv.transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
#tv.transforms.Lambda(lambda x: x*255)
])
dataset = tv.datasets.ImageFolder(opt.data_root, transfroms)
dataloader = data.DataLoader(dataset, opt.batch_size)
# style transformer network
# transformer = TransformerNet()
print('come!')
# visualizer = Visualizer(config) # create a visualizer that display/save images and plots
# device = 'cpu' if opt.cpu or not t.cuda.is_available() else 'cuda:0'
# device = t.device(device)
transfer_at = set()
if opt.transfer_at_encoder:
transfer_at.add('encoder')
if opt.transfer_at_decoder:
transfer_at.add('decoder')
if opt.transfer_at_skip:
transfer_at.add('skip')
# save_dir = Path(config.save_dir)
# save_dir.mkdir(exist_ok=True, parents=True)
# log_dir = Path(config.log_dir)
# log_dir.mkdir(exist_ok=True, parents=True)
# writer = SummaryWriter(log_dir=str(log_dir))
# vgg = net.vgg
wct2 = Lap_Sob_Gaus(transfer_at=transfer_at, option_unpool=opt.option_unpool, device=device,
verbose=False)
encoder = Ovodus_Laplace_model.Lap_Sob_GausEncoder(opt.option_unpool).to(device)
decoder = Ovodus_Laplace_model.Lap_Sob_GausDecoder(opt.option_unpool).to(device)
# vgg.load_state_dict(torch.load(config.vgg))
# vgg = nn.Sequential(*list(vgg.children())[:31])
laps = Lap_Sob_Gaus(transfer_at=transfer_at, option_unpool='sum', device=device)
network = net.Net(encoder, decoder)
network.train()
network.to(device)
transformer = network
if opt.model_path:
transformer.load_state_dict(t.load(opt.model_path, map_location=lambda _s, _: _s))
transformer.to(device)
# Vgg16 for Perceptual Loss
# vgg = Vgg16().eval()
# vgg.to(device)
# for param in vgg.parameters():
# param.requires_grad = False
# Optimizer
# optimizer = t.optim.Adam(transformer.parameters(), opt.lr)
enoptimizer = t.optim.Adam(network.encoder.parameters(), lr=opt.lr, betas=(0.9, 0.999))
deoptimizer = t.optim.Adam(network.decoder.parameters(), lr=opt.lr, betas=(0.9, 0.999))
# # Get style image
# style_dataloader = utils_.get_style_data(opt.style_path, opt.batch_size)
# #style_list = list(enumerate(style_dataloader))
# for ii, (style, _) in tqdm.tqdm(enumerate(style_dataloader)):
# #a = style
# style = style.expand(opt.batch_size, 3, 256, 256)
# vis.img('style', (style.data[0] * 0.225 + 0.45).clamp(min=0, max=1))
# #style_list.append(style)
#
# style = style.to(device)
# #
# # #
# # # # gram matrix for style image
# with t.no_grad():
# features_style = vgg(style)
# gram_style = [utils_.gram_matrix(y) for y in features_style]
# Loss meter
style_meter = tnt.meter.AverageValueMeter()
content_meter = tnt.meter.AverageValueMeter()
for epoch in range(opt.epoches):
# for jj, (style, _) in tqdm.tqdm(enumerate(style_dataloader)):
# a = style
# vis.img('style', (style.data[0] * 0.225 + 0.45).clamp(min=0, max=1))
# style = style.to(device)
#
content_meter.reset()
style_meter.reset()
for ii, (x, _) in tqdm.tqdm(enumerate(dataloader)):
if epoch == 0:
adjust_learning_rate(opt.lr, enoptimizer, iteration_count=ii, lr_decay=opt.lr_decay)
adjust_learning_rate(opt.lr, deoptimizer, iteration_count=ii, lr_decay=opt.lr_decay)
print(opt.lr)
# style = style_list[ii][1][0]
# # style = style_list[ii]
# style = style.to(device)
# # # gram matrix for style image
# with t.no_grad():
# features_style = vgg(style)
# gram_style = [utils_.gram_matrix(y) for y in features_style]
style_dataloader = utils_.get_style_data(opt.style_path, opt.batch_size)
# style_list = list(enumerate(style_dataloader))
for jj, (style, _) in tqdm.tqdm(enumerate(style_dataloader)):
# a = style
style = style.expand(opt.batch_size, 3, 256, 256)
#vis.img('style', (style.data[0] * 0.225 + 0.45).clamp(min=0, max=1))
vis.img('style', (style.data[0]).clamp(min=0, max=1))
# style_list.append(style)
style = style.to(device)
#
# #
# # # gram matrix for style image
# with t.no_grad():
# features_style = vgg(style)
# gram_style = [utils_.gram_matrix(y) for y in features_style]
# Train
enoptimizer.zero_grad()
deoptimizer.zero_grad()
x = x.to(device)
#y = network(x, style, Laps=laps)
# if (ii + 1) % 10 == 0:
# print(y)
# y = y.clamp_(0, 1) * 255
#y = utils_.normalize_batch(y)
#x = utils_.normalize_batch(x)
# features_y = vgg(y)
# features_x = vgg(x)
# # content loss
# content_loss = opt.content_weight * F.mse_loss(features_y.relu2_2, features_x.relu2_2)
#
# # style loss
# style_loss = 0
#
# for ft_y, gm_s in zip(features_y, gram_style):
# gram_y = utils_.gram_matrix(ft_y)
# style_loss += F.mse_loss(gram_y, gm_s.expand_as(gram_y))
y, content_feats, content_loss, style_loss = network(x, style, Laps=laps)
content_loss *= opt.content_weight
style_loss *= opt.style_weight
total_loss = content_loss + style_loss
total_loss.backward()
enoptimizer.step()
deoptimizer.step()
# Loss smooth for visualization
content_meter.add(content_loss.item())
style_meter.add(style_loss.item())
if ii % 50 == 1:
print('\n')
print('iters:', ii, 'total_loss:', total_loss, 'loss_c:', content_loss, 'loss_s: ', style_loss)
if (ii + 1) % opt.plot_every == 0:
if os.path.exists(opt.debug_file):
ipdb.set_trace()
# visualization
vis.plot('content_loss', content_meter.value()[0])
vis.plot('style_loss', style_meter.value()[0])
# denorm input/output, since we have applied (utils.normalize_batch)
vis.img('output1', (y.data.cpu()[0]).clamp(min=0, max=1))
vis.img('input1', (x.data.cpu()[0]).clamp(min=0, max=1))
vis.img('decoder_1', (content_feats['decoder'][0][0].data.cpu()[0]).clamp(min=0, max=1))
vis.img('decoder_2', (content_feats['decoder'][1][0].data.cpu()[0]).clamp(min=0, max=1))
vis.img('decoder_3', (content_feats['decoder'][2][0].data.cpu()[0]).clamp(min=0, max=1))
vis.img('decoder_4', (content_feats['decoder'][3][0].data.cpu()[0]).clamp(min=0, max=1))
#save_image(content_feat.clamp_(0, 1), fname_output + "decoder{:d}".format(level), padding=0)
if (ii) % 1000 == 0:
if not os.path.exists(save_dir /'epoch_{:d}'.format(epoch)):
os.makedirs(save_dir /'epoch_{:d}'.format(epoch))
de_state_dict = network.decoder.state_dict()
en_state_dict = network.encoder.state_dict()
for key in de_state_dict.keys():
de_state_dict[key] = de_state_dict[key].to(t.device('cpu'))
t.save(de_state_dict, save_dir /'epoch_{:d}'.format(epoch)/
'decoder_iter_{:d}.pth.tar'.format(ii + 1))
for key in en_state_dict.keys():
en_state_dict[key] = en_state_dict[key].to(t.device('cpu'))
t.save(en_state_dict, save_dir /'epoch_{:d}'.format(epoch)/
'encoder_iter_{:d}.pth.tar'.format(ii + 1))
de_state_dict = network.decoder.state_dict()
en_state_dict = network.encoder.state_dict()
for key in de_state_dict.keys():
de_state_dict[key] = de_state_dict[key].to(t.device('cpu'))
t.save(de_state_dict, save_dir /
'epoch_decoder_iter_{:d}.pth.tar'.format(epoch + 1))
for key in en_state_dict.keys():
en_state_dict[key] = en_state_dict[key].to(t.device('cpu'))
t.save(en_state_dict, save_dir /
'epoch_encoder_iter_{:d}.pth.tar'.format(epoch + 1))
# save checkpoints
vis.save([opt.env])
t.save(network.state_dict(), 'checkpoints/epoch_%s_style.pth' % epoch)
writer.close()
@t.no_grad()
def stylize(**kwargs):
"""
perform style transfer
"""
opt = Config()
for k_, v_ in kwargs.items():
setattr(opt, k_, v_)
device = t.device('cuda') if opt.use_gpu else t.device('cpu')
# input image preprocess
content_image = tv.datasets.folder.default_loader(opt.content_path)
content_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
tv.transforms.Lambda(lambda x: x.mul(255))
])
content_image = content_transform(content_image)
content_image = content_image.unsqueeze(0).to(device).detach()
# model setup
style_model = TransformerNet().eval()
style_model.load_state_dict(t.load(opt.model_path, map_location=lambda _s, _: _s))
style_model.to(device)
# style transfer and save output
output = style_model(content_image)
output_data = output.cpu().data[0]
tv.utils.save_image(((output_data / 255)).clamp(min=0, max=1), opt.result_path)
if __name__ == '__main__':
import fire
fire.Fire()
train() | 38.342342 | 114 | 0.617951 |
import torch as t
import torchvision as tv
import torchnet as tnt
from torch.utils import data
from transformer_net import TransformerNet
import utils
from PackedVGG import Vgg16
from torch.nn import functional as F
import tqdm
import os
import ipdb
from LapSobGaus_train import Lap_Sob_Gaus
import net
import Ovodus_Laplace_model
import utils_
from WCT2_train import train_transform
from tensorboardX import SummaryWriter
from pathlib import Path
from torchvision.utils import save_image
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
class Config(object):
use_gpu = True
model_path = None
image_size = 448
batch_size = 2
data_root = r'F:\DataSets\train2017'
num_workers = 4
lr = 1e-4
epoches = 20
content_weight = 1e10
style_weight = 1e2
style_path = 'style_input'
env = 'onlyencodercontent_58_Laps_test_nores_noDynamic_10_2'
plot_every = 1
debug_file = '/tmp/debugnn'
content_path = 'input.png'
result_path = 'output.png'
option_unpool = 'sum'
cpu = False
transfer_at_encoder = True
transfer_at_decoder = True
transfer_at_skip = True
verbose = True
save_dir = './onlyencodercontent/nores_noDynamic/58_LapSobGaus_experiments_10_2'
log_dir = './onlyencodercontent/nores_noDynamic/58_LapSobGaus_logs_10_2'
lr_decay = 5e-5
def adjust_learning_rate(lr ,optimizer, iteration_count, lr_decay):
lr = lr / (1.0 + lr_decay * iteration_count)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train(**kwargs):
opt = Config()
for k_, v_ in kwargs.items():
setattr(opt, k_, v_)
device = 'cpu' if opt.cpu or not t.cuda.is_available() else 'cuda:0'
device = t.device(device)
vis = utils_.Visualizer(opt.env)
save_dir = Path(opt.save_dir)
save_dir.mkdir(exist_ok=True, parents=True)
log_dir = Path(opt.log_dir)
log_dir.mkdir(exist_ok=True, parents=True)
writer = SummaryWriter(log_dir=str(log_dir))
transfroms = tv.transforms.Compose([
tv.transforms.Resize(opt.image_size),
tv.transforms.CenterCrop(opt.image_size),
tv.transforms.ToTensor(),
])
dataset = tv.datasets.ImageFolder(opt.data_root, transfroms)
dataloader = data.DataLoader(dataset, opt.batch_size)
print('come!')
at_encoder:
transfer_at.add('encoder')
if opt.transfer_at_decoder:
transfer_at.add('decoder')
if opt.transfer_at_skip:
transfer_at.add('skip')
wct2 = Lap_Sob_Gaus(transfer_at=transfer_at, option_unpool=opt.option_unpool, device=device,
verbose=False)
encoder = Ovodus_Laplace_model.Lap_Sob_GausEncoder(opt.option_unpool).to(device)
decoder = Ovodus_Laplace_model.Lap_Sob_GausDecoder(opt.option_unpool).to(device)
laps = Lap_Sob_Gaus(transfer_at=transfer_at, option_unpool='sum', device=device)
network = net.Net(encoder, decoder)
network.train()
network.to(device)
transformer = network
if opt.model_path:
transformer.load_state_dict(t.load(opt.model_path, map_location=lambda _s, _: _s))
transformer.to(device)
enoptimizer = t.optim.Adam(network.encoder.parameters(), lr=opt.lr, betas=(0.9, 0.999))
deoptimizer = t.optim.Adam(network.decoder.parameters(), lr=opt.lr, betas=(0.9, 0.999))
range(opt.epoches):
content_meter.reset()
style_meter.reset()
for ii, (x, _) in tqdm.tqdm(enumerate(dataloader)):
if epoch == 0:
adjust_learning_rate(opt.lr, enoptimizer, iteration_count=ii, lr_decay=opt.lr_decay)
adjust_learning_rate(opt.lr, deoptimizer, iteration_count=ii, lr_decay=opt.lr_decay)
print(opt.lr)
taloader = utils_.get_style_data(opt.style_path, opt.batch_size)
for jj, (style, _) in tqdm.tqdm(enumerate(style_dataloader)):
style = style.expand(opt.batch_size, 3, 256, 256)
vis.img('style', (style.data[0]).clamp(min=0, max=1))
style = style.to(device)
enoptimizer.zero_grad()
deoptimizer.zero_grad()
x = x.to(device)
y, content_feats, content_loss, style_loss = network(x, style, Laps=laps)
content_loss *= opt.content_weight
style_loss *= opt.style_weight
total_loss = content_loss + style_loss
total_loss.backward()
enoptimizer.step()
deoptimizer.step()
content_meter.add(content_loss.item())
style_meter.add(style_loss.item())
if ii % 50 == 1:
print('\n')
print('iters:', ii, 'total_loss:', total_loss, 'loss_c:', content_loss, 'loss_s: ', style_loss)
if (ii + 1) % opt.plot_every == 0:
if os.path.exists(opt.debug_file):
ipdb.set_trace()
vis.plot('content_loss', content_meter.value()[0])
vis.plot('style_loss', style_meter.value()[0])
vis.img('output1', (y.data.cpu()[0]).clamp(min=0, max=1))
vis.img('input1', (x.data.cpu()[0]).clamp(min=0, max=1))
vis.img('decoder_1', (content_feats['decoder'][0][0].data.cpu()[0]).clamp(min=0, max=1))
vis.img('decoder_2', (content_feats['decoder'][1][0].data.cpu()[0]).clamp(min=0, max=1))
vis.img('decoder_3', (content_feats['decoder'][2][0].data.cpu()[0]).clamp(min=0, max=1))
vis.img('decoder_4', (content_feats['decoder'][3][0].data.cpu()[0]).clamp(min=0, max=1))
if (ii) % 1000 == 0:
if not os.path.exists(save_dir /'epoch_{:d}'.format(epoch)):
os.makedirs(save_dir /'epoch_{:d}'.format(epoch))
de_state_dict = network.decoder.state_dict()
en_state_dict = network.encoder.state_dict()
for key in de_state_dict.keys():
de_state_dict[key] = de_state_dict[key].to(t.device('cpu'))
t.save(de_state_dict, save_dir /'epoch_{:d}'.format(epoch)/
'decoder_iter_{:d}.pth.tar'.format(ii + 1))
for key in en_state_dict.keys():
en_state_dict[key] = en_state_dict[key].to(t.device('cpu'))
t.save(en_state_dict, save_dir /'epoch_{:d}'.format(epoch)/
'encoder_iter_{:d}.pth.tar'.format(ii + 1))
de_state_dict = network.decoder.state_dict()
en_state_dict = network.encoder.state_dict()
for key in de_state_dict.keys():
de_state_dict[key] = de_state_dict[key].to(t.device('cpu'))
t.save(de_state_dict, save_dir /
'epoch_decoder_iter_{:d}.pth.tar'.format(epoch + 1))
for key in en_state_dict.keys():
en_state_dict[key] = en_state_dict[key].to(t.device('cpu'))
t.save(en_state_dict, save_dir /
'epoch_encoder_iter_{:d}.pth.tar'.format(epoch + 1))
vis.save([opt.env])
t.save(network.state_dict(), 'checkpoints/epoch_%s_style.pth' % epoch)
writer.close()
@t.no_grad()
def stylize(**kwargs):
opt = Config()
for k_, v_ in kwargs.items():
setattr(opt, k_, v_)
device = t.device('cuda') if opt.use_gpu else t.device('cpu')
content_image = tv.datasets.folder.default_loader(opt.content_path)
content_transform = tv.transforms.Compose([
tv.transforms.ToTensor(),
tv.transforms.Lambda(lambda x: x.mul(255))
])
content_image = content_transform(content_image)
content_image = content_image.unsqueeze(0).to(device).detach()
style_model = TransformerNet().eval()
style_model.load_state_dict(t.load(opt.model_path, map_location=lambda _s, _: _s))
style_model.to(device)
output = style_model(content_image)
output_data = output.cpu().data[0]
tv.utils.save_image(((output_data / 255)).clamp(min=0, max=1), opt.result_path)
if __name__ == '__main__':
import fire
fire.Fire()
train() | true | true |
f73b67a9de1ad56358596d1aeb0c035c9bcfe833 | 5,933 | py | Python | pyisis/tests/test_invtech.py | rodsenra/pyisis | f5815fd096a463902893f87f309f8117b5705621 | [
"MIT"
] | null | null | null | pyisis/tests/test_invtech.py | rodsenra/pyisis | f5815fd096a463902893f87f309f8117b5705621 | [
"MIT"
] | null | null | null | pyisis/tests/test_invtech.py | rodsenra/pyisis | f5815fd096a463902893f87f309f8117b5705621 | [
"MIT"
] | 2 | 2019-11-08T20:51:54.000Z | 2021-08-17T23:49:48.000Z | # -*- coding: utf-8 -*-
"""
File to test Indexing Techniques
"""
__created__ = "2009-09-14"
__updated__ = "2009-09-14"
__author__ = "João Chaves <joaochaves@gpr.com.br>"
from os.path import join, exists
from os import remove, getcwd
from sys import platform, path
from pyisis.files import MasterFile
from pyisis.config import config as base_config
from pyisis.engine import Engine
import gettext
path.append('../../')
path.append('../')
FIXTURES_DIR = 'fixtures'
def initialize():
"""Prepare test environment"""
set_i18n(base_config)
Engine.setup(base_config)
return Engine.config
def set_i18n(config):
"""Activate i18n in the test environment"""
localedir = join('..','locale')
gettext.install('pyisis',localedir=localedir)
lang1 = gettext.translation('pyisis', languages=[config.LANGUAGE],localedir=localedir)
lang1.install(unicode=1)
# global setup
config = initialize()
def invcb(total,current):
pass
def test_inv_it_0_4():
"""Test Indexing Techniques 0-4 """
db = join(FIXTURES_DIR,'cds.mst')
mf = MasterFile(db)
#Remove old indexes files
for ext in ('.idx','.idx.index','.idx.lock','.idx.old','.idx.tmp'):
fname = join(FIXTURES_DIR,'cds.%s' % ext)
if exists(fname):
remove(fname)
mf.invertdb(fst=join(FIXTURES_DIR,'it_0-4.fst') ,callback = invcb)
post = mf.search('ABEYWICKRAMA, B.A.').next()
assert post.mfn == 60, 'Failed IT 0'
post = mf.search('BOSIAN, G.').next()
assert post.mfn == 2, 'Failed IT 0'
post = mf.search('CENTER FOR NORTHERN EDUCATIONAL RESEARCH, UNIVERSITY OF ALAS').next()
assert post.mfn == 89, 'Failed IT 1'
post = mf.search('HUMANITIES PRESS').next()
assert post.mfn == 143, 'Failed IT 1'
post = mf.search('AGRICULTURE').next()
assert post.mfn == 17, 'Failed IT 2/3'
post = mf.search("CHILDREN'S BOOKS").next()
assert post.mfn == 114, 'Failed IT 2/3'
post = mf.search('EXPERIMENTAL').next()
assert post.mfn == 130, 'Failed IT 4'
post = mf.search("MONOLITHS").next()
assert post.mfn == 20, 'Failed IT 4'
def test_inv_it_5_8():
"""Test Indexing Techniques 5-8"""
db = join(FIXTURES_DIR,'cds.mst')
mf = MasterFile(db)
#Remove old indexes files
for ext in ('.idx','.idx.index','.idx.lock','.idx.old','.idx.tmp'):
fname = join(FIXTURES_DIR,'cds.%s' % ext)
if exists(fname):
remove(fname)
mf.invertdb(fst=join(FIXTURES_DIR,'it_5-8.fst') ,callback = invcb)
post = mf.search('AU_CHAPMAN, VALENTINE J.').next()
assert post.mfn == 62, 'Failed IT 0 (Prefix)'
post = mf.search('AU_HOLLERWOGER, F.').next()
assert post.mfn == 47, 'Failed IT 0 (Prefix)'
post = mf.search('TECHNIQUE TEST 5: CENTER FOR THE ADVANCED STUDY OF EDUCATION').next()
assert post.mfn == 113, 'Failed IT 5'
post = mf.search('TECHNIQUE TEST 5: LANSING, MICH.').next()
assert post.mfn == 138, 'Failed IT 5'
post = mf.search('TECHNIQUE TEST 6: BRACKISH WATER').next()
assert post.mfn == 48, 'Failed IT 6/7'
post = mf.search('TECHNIQUE TEST 6: INFORMATION/LIBRARY FINANCING').next()
assert post.mfn == 136, 'Failed IT 6/7'
post = mf.search('TECHNIQUE TEST 8: DOCUMENTATION').next()
assert post.mfn == 105, 'Failed IT 8'
post = mf.search('TECHNIQUE TEST 8: YUGOSLAVIA').next()
assert post.mfn == 90, 'Failed IT 8'
def test_inv_it_1000_1008():
"""Test Indexing Techniques 1000-1008"""
db = join(FIXTURES_DIR,'cds.mst')
mf = MasterFile(db)
#Remove old indexes files
for ext in ('.idx','.idx.index','.idx.lock','.idx.old','.idx.tmp'):
fname = join(FIXTURES_DIR,'cds.%s' % ext)
if exists(fname):
remove(fname)
mf.invertdb(fst=join(FIXTURES_DIR,'it_1000-1008.fst') ,callback = invcb)
post = mf.search('MUELLER-DOMBOIS, D.').next()
assert post.mfn == 12, 'Failed IT 1000'
post = mf.search('OHYA, MASAHIKO').next()
assert post.mfn == 111, 'Failed IT 1000'
post = mf.search('SIMMONS, MELVIN K.').next()
assert post.mfn == 16, 'Failed IT 1000'
post = mf.search('A. PEDONE').next()
assert post.mfn == 8, 'Failed IT 1001'
post = mf.search('BRUXELLES').next()
assert post.mfn == 36, 'Failed IT 1001'
post = mf.search('UNIVERSITY OF ZAMBIA').next()
assert post.mfn == 29, 'Failed IT 1001'
post = mf.search('DEMOCRATIC KAMPUCHEA').next()
assert post.mfn == 111, 'Failed IT 1002'
post = mf.search('TAIWAN').next()
assert post.mfn == 111, 'Failed IT 1002'
post = mf.search('ZAMBIA').next()
assert post.mfn == 29, 'Failed IT 1002'
post = mf.search('ARMAMENTS').next()
assert post.mfn == 8, 'Failed IT 1004'
post = mf.search('BEHAVIOUR').next()
assert post.mfn == 20, 'Failed IT 1004'
post = mf.search('COMPARATIVE').next()
assert post.mfn == 111, 'Failed IT 1004'
post = mf.search('TECHNIQUE TEST 5: 25 JUNE 1976').next()
assert post.mfn == 28, 'Failed IT 1005'
post = mf.search('TECHNIQUE TEST 5: CENTER FOR RESEARCH ON UTILIZATION OF SCIE').next()
assert post.mfn == 16, 'Failed IT 1005'
post = mf.search('TECHNIQUE TEST 5: GENEVE').next()
assert post.mfn == 11, 'Failed IT 1005'
post = mf.search('TECHNIQUE TEST 6: DEMOCRATIC KAMPUCHEA').next()
assert post.mfn == 111, 'Failed IT 1006'
post = mf.search('TECHNIQUE TEST 6: ECONOMY').next()
assert post.mfn == 22, 'Failed IT 1006'
post = mf.search('TECHNIQUE TEST 6: EDUCATIONAL HISTORY').next()
assert post.mfn == 11, 'Failed IT 1006'
post = mf.search('TECHNIQUE TEST 8: ALLUVIONS').next()
assert post.mfn == 111, 'Failed IT 1008'
post = mf.search('TECHNIQUE TEST 8: ANNOTATED').next()
assert post.mfn == 17, 'Failed IT 1008'
post = mf.search('TECHNIQUE TEST 8: BROADCASTING').next()
assert post.mfn == 29, 'Failed IT 1008'
if __name__ == '__main__':
test_inv_it_0_4()
| 34.294798 | 91 | 0.63644 |
__created__ = "2009-09-14"
__updated__ = "2009-09-14"
__author__ = "João Chaves <joaochaves@gpr.com.br>"
from os.path import join, exists
from os import remove, getcwd
from sys import platform, path
from pyisis.files import MasterFile
from pyisis.config import config as base_config
from pyisis.engine import Engine
import gettext
path.append('../../')
path.append('../')
FIXTURES_DIR = 'fixtures'
def initialize():
set_i18n(base_config)
Engine.setup(base_config)
return Engine.config
def set_i18n(config):
localedir = join('..','locale')
gettext.install('pyisis',localedir=localedir)
lang1 = gettext.translation('pyisis', languages=[config.LANGUAGE],localedir=localedir)
lang1.install(unicode=1)
config = initialize()
def invcb(total,current):
pass
def test_inv_it_0_4():
db = join(FIXTURES_DIR,'cds.mst')
mf = MasterFile(db)
for ext in ('.idx','.idx.index','.idx.lock','.idx.old','.idx.tmp'):
fname = join(FIXTURES_DIR,'cds.%s' % ext)
if exists(fname):
remove(fname)
mf.invertdb(fst=join(FIXTURES_DIR,'it_0-4.fst') ,callback = invcb)
post = mf.search('ABEYWICKRAMA, B.A.').next()
assert post.mfn == 60, 'Failed IT 0'
post = mf.search('BOSIAN, G.').next()
assert post.mfn == 2, 'Failed IT 0'
post = mf.search('CENTER FOR NORTHERN EDUCATIONAL RESEARCH, UNIVERSITY OF ALAS').next()
assert post.mfn == 89, 'Failed IT 1'
post = mf.search('HUMANITIES PRESS').next()
assert post.mfn == 143, 'Failed IT 1'
post = mf.search('AGRICULTURE').next()
assert post.mfn == 17, 'Failed IT 2/3'
post = mf.search("CHILDREN'S BOOKS").next()
assert post.mfn == 114, 'Failed IT 2/3'
post = mf.search('EXPERIMENTAL').next()
assert post.mfn == 130, 'Failed IT 4'
post = mf.search("MONOLITHS").next()
assert post.mfn == 20, 'Failed IT 4'
def test_inv_it_5_8():
db = join(FIXTURES_DIR,'cds.mst')
mf = MasterFile(db)
#Remove old indexes files
for ext in ('.idx','.idx.index','.idx.lock','.idx.old','.idx.tmp'):
fname = join(FIXTURES_DIR,'cds.%s' % ext)
if exists(fname):
remove(fname)
mf.invertdb(fst=join(FIXTURES_DIR,'it_5-8.fst') ,callback = invcb)
post = mf.search('AU_CHAPMAN, VALENTINE J.').next()
assert post.mfn == 62, 'Failed IT 0 (Prefix)'
post = mf.search('AU_HOLLERWOGER, F.').next()
assert post.mfn == 47, 'Failed IT 0 (Prefix)'
post = mf.search('TECHNIQUE TEST 5: CENTER FOR THE ADVANCED STUDY OF EDUCATION').next()
assert post.mfn == 113, 'Failed IT 5'
post = mf.search('TECHNIQUE TEST 5: LANSING, MICH.').next()
assert post.mfn == 138, 'Failed IT 5'
post = mf.search('TECHNIQUE TEST 6: BRACKISH WATER').next()
assert post.mfn == 48, 'Failed IT 6/7'
post = mf.search('TECHNIQUE TEST 6: INFORMATION/LIBRARY FINANCING').next()
assert post.mfn == 136, 'Failed IT 6/7'
post = mf.search('TECHNIQUE TEST 8: DOCUMENTATION').next()
assert post.mfn == 105, 'Failed IT 8'
post = mf.search('TECHNIQUE TEST 8: YUGOSLAVIA').next()
assert post.mfn == 90, 'Failed IT 8'
def test_inv_it_1000_1008():
db = join(FIXTURES_DIR,'cds.mst')
mf = MasterFile(db)
#Remove old indexes files
for ext in ('.idx','.idx.index','.idx.lock','.idx.old','.idx.tmp'):
fname = join(FIXTURES_DIR,'cds.%s' % ext)
if exists(fname):
remove(fname)
mf.invertdb(fst=join(FIXTURES_DIR,'it_1000-1008.fst') ,callback = invcb)
post = mf.search('MUELLER-DOMBOIS, D.').next()
assert post.mfn == 12, 'Failed IT 1000'
post = mf.search('OHYA, MASAHIKO').next()
assert post.mfn == 111, 'Failed IT 1000'
post = mf.search('SIMMONS, MELVIN K.').next()
assert post.mfn == 16, 'Failed IT 1000'
post = mf.search('A. PEDONE').next()
assert post.mfn == 8, 'Failed IT 1001'
post = mf.search('BRUXELLES').next()
assert post.mfn == 36, 'Failed IT 1001'
post = mf.search('UNIVERSITY OF ZAMBIA').next()
assert post.mfn == 29, 'Failed IT 1001'
post = mf.search('DEMOCRATIC KAMPUCHEA').next()
assert post.mfn == 111, 'Failed IT 1002'
post = mf.search('TAIWAN').next()
assert post.mfn == 111, 'Failed IT 1002'
post = mf.search('ZAMBIA').next()
assert post.mfn == 29, 'Failed IT 1002'
post = mf.search('ARMAMENTS').next()
assert post.mfn == 8, 'Failed IT 1004'
post = mf.search('BEHAVIOUR').next()
assert post.mfn == 20, 'Failed IT 1004'
post = mf.search('COMPARATIVE').next()
assert post.mfn == 111, 'Failed IT 1004'
post = mf.search('TECHNIQUE TEST 5: 25 JUNE 1976').next()
assert post.mfn == 28, 'Failed IT 1005'
post = mf.search('TECHNIQUE TEST 5: CENTER FOR RESEARCH ON UTILIZATION OF SCIE').next()
assert post.mfn == 16, 'Failed IT 1005'
post = mf.search('TECHNIQUE TEST 5: GENEVE').next()
assert post.mfn == 11, 'Failed IT 1005'
post = mf.search('TECHNIQUE TEST 6: DEMOCRATIC KAMPUCHEA').next()
assert post.mfn == 111, 'Failed IT 1006'
post = mf.search('TECHNIQUE TEST 6: ECONOMY').next()
assert post.mfn == 22, 'Failed IT 1006'
post = mf.search('TECHNIQUE TEST 6: EDUCATIONAL HISTORY').next()
assert post.mfn == 11, 'Failed IT 1006'
post = mf.search('TECHNIQUE TEST 8: ALLUVIONS').next()
assert post.mfn == 111, 'Failed IT 1008'
post = mf.search('TECHNIQUE TEST 8: ANNOTATED').next()
assert post.mfn == 17, 'Failed IT 1008'
post = mf.search('TECHNIQUE TEST 8: BROADCASTING').next()
assert post.mfn == 29, 'Failed IT 1008'
if __name__ == '__main__':
test_inv_it_0_4()
| true | true |
f73b682591833686efd6e92758e2b9d79fddde20 | 1,119 | py | Python | setup.py | gralin/python-qnapstats | e9ab3b8e91e9db13db9625f7812810ed19cab70c | [
"MIT"
] | null | null | null | setup.py | gralin/python-qnapstats | e9ab3b8e91e9db13db9625f7812810ed19cab70c | [
"MIT"
] | null | null | null | setup.py | gralin/python-qnapstats | e9ab3b8e91e9db13db9625f7812810ed19cab70c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import io
from setuptools import setup
setup(
name='qnapstats',
description='Python API for obtaining QNAP NAS system stats',
long_description=io.open('README.rst', encoding='utf-8').read(),
version='0.3.1',
license='MIT',
author='Colin O\'Dell',
author_email='colinodell@gmail.com',
url='https://github.com/colinodell/python-qnapstats',
packages=['qnapstats'],
keywords=['qnap'],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Home Automation',
'Topic :: System :: Monitoring'
],
install_requires=['requests>=1.0.0', 'xmltodict>=0.10.0']
)
| 31.971429 | 68 | 0.607685 |
import io
from setuptools import setup
setup(
name='qnapstats',
description='Python API for obtaining QNAP NAS system stats',
long_description=io.open('README.rst', encoding='utf-8').read(),
version='0.3.1',
license='MIT',
author='Colin O\'Dell',
author_email='colinodell@gmail.com',
url='https://github.com/colinodell/python-qnapstats',
packages=['qnapstats'],
keywords=['qnap'],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Home Automation',
'Topic :: System :: Monitoring'
],
install_requires=['requests>=1.0.0', 'xmltodict>=0.10.0']
)
| true | true |
f73b68d789b0facbcae3900bed3fa18434dbb6e1 | 897 | py | Python | src/ciscosupportsdk/api/asd.py | supermanny81/ciscosupportsdk | dd2ff2714b4bac6fe33576f1ab2c7999a80e0dd3 | [
"MIT"
] | null | null | null | src/ciscosupportsdk/api/asd.py | supermanny81/ciscosupportsdk | dd2ff2714b4bac6fe33576f1ab2c7999a80e0dd3 | [
"MIT"
] | null | null | null | src/ciscosupportsdk/api/asd.py | supermanny81/ciscosupportsdk | dd2ff2714b4bac6fe33576f1ab2c7999a80e0dd3 | [
"MIT"
] | null | null | null | from ciscosupportsdk.apisession import ApiSession
SERVICE_BASE_URL = "/software/v4.0"
class AutomatedSoftwareDistributionApi(object):
"""
Cisco Automated Software Distribution service provides software
information and download URLs to assist you in upgrading your
device/application to the latest version.
"""
def __init__(self, session: ApiSession) -> None:
self._session = session
def get_bug_details(self, bug_ids: list[str]) -> None:
"""
Returns detailed information for the specified bug ID or IDs.
:param: bug_ids: list[str]: Identifier of the bug or bugs for which
to return detailed information. A maximum of five (5) bug IDs can
be submitted separated by a comma.
:rtype: Bug
"""
path = f"{SERVICE_BASE_URL}/bug_ids/" f"{','.join(bug_ids)}"
print(path)
pass
| 32.035714 | 77 | 0.664437 | from ciscosupportsdk.apisession import ApiSession
SERVICE_BASE_URL = "/software/v4.0"
class AutomatedSoftwareDistributionApi(object):
def __init__(self, session: ApiSession) -> None:
self._session = session
def get_bug_details(self, bug_ids: list[str]) -> None:
path = f"{SERVICE_BASE_URL}/bug_ids/" f"{','.join(bug_ids)}"
print(path)
pass
| true | true |
f73b69cd2099483b6123684818356c6537070845 | 406 | py | Python | scripts/serializers.py | AdmiralGT/botc-scripts | e1b1805e030bc9af7f02c04cfa0a514c476f6bdb | [
"MIT"
] | 2 | 2022-02-28T17:05:03.000Z | 2022-02-28T22:30:03.000Z | scripts/serializers.py | AdmiralGT/botc-scripts | e1b1805e030bc9af7f02c04cfa0a514c476f6bdb | [
"MIT"
] | 50 | 2021-07-20T22:17:45.000Z | 2022-03-31T22:19:00.000Z | scripts/serializers.py | AdmiralGT/botc-scripts | e1b1805e030bc9af7f02c04cfa0a514c476f6bdb | [
"MIT"
] | null | null | null | from rest_framework import serializers
from scripts.models import ScriptVersion
# Serializers define the API representation.
class ScriptSerializer(serializers.ModelSerializer):
name = serializers.CharField(source="script.name")
score = serializers.ReadOnlyField(source="votes.count")
class Meta:
model = ScriptVersion
fields = ["pk", "name", "version", "content", "score"]
| 29 | 62 | 0.73399 | from rest_framework import serializers
from scripts.models import ScriptVersion
class ScriptSerializer(serializers.ModelSerializer):
name = serializers.CharField(source="script.name")
score = serializers.ReadOnlyField(source="votes.count")
class Meta:
model = ScriptVersion
fields = ["pk", "name", "version", "content", "score"]
| true | true |
f73b6a7b07cf7322c5d2c422b130434fb0d97ed6 | 1,315 | py | Python | routing.py | kingfozhou/tornado-middleware | 0429942c9aead1a20b142030d2e987d40356e7bf | [
"Unlicense"
] | 2 | 2019-02-10T20:52:26.000Z | 2021-02-22T14:51:38.000Z | routing.py | kingfozhou/tornado-middleware | 0429942c9aead1a20b142030d2e987d40356e7bf | [
"Unlicense"
] | 1 | 2019-02-08T12:15:37.000Z | 2019-02-08T12:15:37.000Z | routing.py | kingfozhou/tornado-middleware | 0429942c9aead1a20b142030d2e987d40356e7bf | [
"Unlicense"
] | 2 | 2021-01-13T03:13:50.000Z | 2021-02-22T14:56:24.000Z | import functools
from tornado.routing import PathMatches
from core import PipelineDelegate
class MethodMatches(PathMatches):
"""Matches request path and maethod."""
def __init__(self, path_pattern, method: str):
super().__init__(path_pattern)
self.method = method.upper()
def match(self, request):
result = super().match(request)
if result is not None:
if request.method.upper() != self.method:
return None
return result
class Router:
_routes = []
def __call__(self, path: str, name=None, method: str = None):
def wrapper(func):
if method is None:
self._routes.append((path, PipelineDelegate, {'delegate': func}, name))
else:
self._routes.append((MethodMatches(path, method), PipelineDelegate, {'delegate': func}, name))
return func
return wrapper
get = functools.partialmethod(__call__, method='GET')
post = functools.partialmethod(__call__, method='POST')
put = functools.partialmethod(__call__, method='PUT')
patch = functools.partialmethod(__call__, method='PATCH')
delete = functools.partialmethod(__call__, method='DELETE')
def get_routes(self):
return tuple(self._routes)
route = Router()
| 27.395833 | 110 | 0.641065 | import functools
from tornado.routing import PathMatches
from core import PipelineDelegate
class MethodMatches(PathMatches):
def __init__(self, path_pattern, method: str):
super().__init__(path_pattern)
self.method = method.upper()
def match(self, request):
result = super().match(request)
if result is not None:
if request.method.upper() != self.method:
return None
return result
class Router:
_routes = []
def __call__(self, path: str, name=None, method: str = None):
def wrapper(func):
if method is None:
self._routes.append((path, PipelineDelegate, {'delegate': func}, name))
else:
self._routes.append((MethodMatches(path, method), PipelineDelegate, {'delegate': func}, name))
return func
return wrapper
get = functools.partialmethod(__call__, method='GET')
post = functools.partialmethod(__call__, method='POST')
put = functools.partialmethod(__call__, method='PUT')
patch = functools.partialmethod(__call__, method='PATCH')
delete = functools.partialmethod(__call__, method='DELETE')
def get_routes(self):
return tuple(self._routes)
route = Router()
| true | true |
f73b6a83340aaf0846a9e08c3fb0253c5b22a9a0 | 4,599 | py | Python | keras/backend/common.py | ypxie/keras-1 | f1ed8d63faa26ce6180faa685839aa32217211c6 | [
"MIT"
] | 126 | 2017-09-08T15:21:10.000Z | 2022-01-10T00:57:22.000Z | keras/backend/common.py | ypxie/keras-1 | f1ed8d63faa26ce6180faa685839aa32217211c6 | [
"MIT"
] | 12 | 2018-04-03T03:45:24.000Z | 2020-07-28T14:42:08.000Z | keras/backend/common.py | ypxie/keras-1 | f1ed8d63faa26ce6180faa685839aa32217211c6 | [
"MIT"
] | 52 | 2017-12-26T13:19:49.000Z | 2022-03-17T06:14:19.000Z | import numpy as np
from collections import defaultdict
# the type of float to use throughout the session.
_FLOATX = 'float32'
_EPSILON = 10e-8
_UID_PREFIXES = defaultdict(int)
_IMAGE_DIM_ORDERING = 'tf'
_LEGACY_WEIGHT_ORDERING = False
def epsilon():
'''Returns the value of the fuzz
factor used in numeric expressions.
# Returns
A float.
# Example
```python
>>> keras.backend.epsilon()
1e-08
```
'''
return _EPSILON
def set_epsilon(e):
'''Sets the value of the fuzz
factor used in numeric expressions.
# Arguments
e: float. New value of epsilon.
# Example
```python
>>> from keras import backend as K
>>> K.epsilon()
1e-08
>>> K.set_epsilon(1e-05)
>>> K.epsilon()
1e-05
```
'''
global _EPSILON
_EPSILON = e
def floatx():
'''Returns the default float type, as a string
(e.g. 'float16', 'float32', 'float64').
# Returns
String, the current default float type.
# Example
```python
>>> keras.backend.floatx()
'float32'
```
'''
return _FLOATX
def set_floatx(floatx):
'''Sets the default float type.
# Arguments
String: 'float16', 'float32', or 'float64'.
# Example
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> K.set_floatx('float16')
>>> K.floatx()
'float16'
```
'''
global _FLOATX
if floatx not in {'float16', 'float32', 'float64'}:
raise ValueError('Unknown floatx type: ' + str(floatx))
_FLOATX = str(floatx)
def cast_to_floatx(x):
'''Cast a Numpy array to the default Keras float type.
# Arguments
x: Numpy array.
# Returns
The same Numpy array, cast to its new type.
# Example
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> arr = numpy.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = K.cast_to_floatx(arr)
>>> new_arr
array([ 1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
```
'''
return np.asarray(x, dtype=_FLOATX)
def image_dim_ordering():
'''Returns the default image dimension ordering
convention ('th' or 'tf').
# Returns
A string, either `'th'` or `'tf'`
# Example
```python
>>> keras.backend.image_dim_ordering()
'th'
```
'''
return _IMAGE_DIM_ORDERING
def set_image_dim_ordering(dim_ordering):
'''Sets the value of the image dimension
ordering convention ('th' or 'tf').
# Arguments
dim_ordering: string. `'th'` or `'tf'`.
# Example
```python
>>> from keras import backend as K
>>> K.image_dim_ordering()
'th'
>>> K.set_image_dim_ordering('tf')
>>> K.image_dim_ordering()
'tf'
```
'''
global _IMAGE_DIM_ORDERING
if dim_ordering not in {'tf', 'th'}:
raise ValueError('Unknown dim_ordering:', dim_ordering)
_IMAGE_DIM_ORDERING = str(dim_ordering)
def get_uid(prefix=''):
'''Provides a unique UID given a string prefix.
# Arguments
prefix: string.
# Returns
An integer.
# Example
```
>>> keras.backend.get_uid('dense')
>>> 1
>>> keras.backend.get_uid('dense')
>>> 2
```
'''
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
def reset_uids():
global _UID_PREFIXES
_UID_PREFIXES = defaultdict(int)
def is_keras_tensor(x):
'''Returns whether `x` is a Keras tensor.
# Arguments
x: a potential tensor.
# Returns
A boolean: whether the argument is a Keras tensor.
# Examples
```python
>>> from keras import backend as K
>>> np_var = numpy.array([1, 2])
>>> K.is_keras_tensor(np_var)
False
>>> keras_var = K.variable(np_var)
>>> K.is_keras_tensor(keras_var) # A variable is not a Tensor.
False
>>> keras_placeholder = K.placeholder(shape=(2, 4, 5))
>>> K.is_keras_tensor(keras_placeholder) # A placeholder is a Tensor.
True
```
'''
if hasattr(x, '_keras_shape'):
return True
else:
return False
def set_legacy_weight_ordering(value):
global _LEGACY_WEIGHT_ORDERING
assert value in {True, False}
_LEGACY_WEIGHT_ORDERING = value
def legacy_weight_ordering():
return _LEGACY_WEIGHT_ORDERING
| 21.09633 | 78 | 0.572081 | import numpy as np
from collections import defaultdict
_FLOATX = 'float32'
_EPSILON = 10e-8
_UID_PREFIXES = defaultdict(int)
_IMAGE_DIM_ORDERING = 'tf'
_LEGACY_WEIGHT_ORDERING = False
def epsilon():
return _EPSILON
def set_epsilon(e):
global _EPSILON
_EPSILON = e
def floatx():
return _FLOATX
def set_floatx(floatx):
global _FLOATX
if floatx not in {'float16', 'float32', 'float64'}:
raise ValueError('Unknown floatx type: ' + str(floatx))
_FLOATX = str(floatx)
def cast_to_floatx(x):
return np.asarray(x, dtype=_FLOATX)
def image_dim_ordering():
return _IMAGE_DIM_ORDERING
def set_image_dim_ordering(dim_ordering):
global _IMAGE_DIM_ORDERING
if dim_ordering not in {'tf', 'th'}:
raise ValueError('Unknown dim_ordering:', dim_ordering)
_IMAGE_DIM_ORDERING = str(dim_ordering)
def get_uid(prefix=''):
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
def reset_uids():
global _UID_PREFIXES
_UID_PREFIXES = defaultdict(int)
def is_keras_tensor(x):
if hasattr(x, '_keras_shape'):
return True
else:
return False
def set_legacy_weight_ordering(value):
global _LEGACY_WEIGHT_ORDERING
assert value in {True, False}
_LEGACY_WEIGHT_ORDERING = value
def legacy_weight_ordering():
return _LEGACY_WEIGHT_ORDERING
| true | true |
f73b6ad30fd0ae3e66665286385f5fc82a71dc17 | 68 | py | Python | budget/management/commands/__init__.py | NTUSA/fudez-app | 91c8d85238fb642488ef5616be441e4417f21d0a | [
"MIT"
] | 2 | 2017-04-13T08:52:19.000Z | 2018-05-07T12:14:34.000Z | budget/management/commands/__init__.py | NTUSA/fudez-app | 91c8d85238fb642488ef5616be441e4417f21d0a | [
"MIT"
] | null | null | null | budget/management/commands/__init__.py | NTUSA/fudez-app | 91c8d85238fb642488ef5616be441e4417f21d0a | [
"MIT"
] | 1 | 2018-07-28T16:09:59.000Z | 2018-07-28T16:09:59.000Z | from .setup_budget import Command
from .clear_budget import Command
| 22.666667 | 33 | 0.852941 | from .setup_budget import Command
from .clear_budget import Command
| true | true |
f73b6af957400f898d06f92e8e267411e1aeaa6c | 668 | py | Python | backend/Backendapi/library/migrations/0007_auto_20170613_0605.py | f0rdream/SkyRead | 798b4dd35b7e6be41e5fed4537d3f6034d20494e | [
"MIT"
] | null | null | null | backend/Backendapi/library/migrations/0007_auto_20170613_0605.py | f0rdream/SkyRead | 798b4dd35b7e6be41e5fed4537d3f6034d20494e | [
"MIT"
] | null | null | null | backend/Backendapi/library/migrations/0007_auto_20170613_0605.py | f0rdream/SkyRead | 798b4dd35b7e6be41e5fed4537d3f6034d20494e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('library', '0006_auto_20170516_0903'),
]
operations = [
migrations.RenameField(
model_name='borrowitem',
old_name='borrow_find_id',
new_name='book_id',
),
migrations.RemoveField(
model_name='borrowitem',
name='library_name',
),
migrations.AddField(
model_name='borrowitem',
name='find_id',
field=models.TextField(default=None),
),
]
| 23.034483 | 49 | 0.568862 |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('library', '0006_auto_20170516_0903'),
]
operations = [
migrations.RenameField(
model_name='borrowitem',
old_name='borrow_find_id',
new_name='book_id',
),
migrations.RemoveField(
model_name='borrowitem',
name='library_name',
),
migrations.AddField(
model_name='borrowitem',
name='find_id',
field=models.TextField(default=None),
),
]
| true | true |
f73b6ed86ce45be7b09155b8bc2c2938bbacb8ab | 13,346 | py | Python | test/functional/test_framework/sha256.py | tradecraftio/tradecraft | a014fea4d4656df67aef19e379f10322386cf6f8 | [
"MIT"
] | 10 | 2019-03-08T04:10:37.000Z | 2021-08-20T11:55:14.000Z | test/functional/test_framework/sha256.py | tradecraftio/tradecraft | a014fea4d4656df67aef19e379f10322386cf6f8 | [
"MIT"
] | 69 | 2018-11-09T20:29:29.000Z | 2021-10-05T00:08:36.000Z | test/functional/test_framework/sha256.py | tradecraftio/tradecraft | a014fea4d4656df67aef19e379f10322386cf6f8 | [
"MIT"
] | 7 | 2019-01-21T06:00:18.000Z | 2021-12-19T16:18:00.000Z | #!/usr/bin/env python
#
# Copyright (c) 2012 Dave Pifke.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""SHA256 (FIPS 180-3) implementation for experimentation."""
import binascii
import codecs
import collections
import struct
import sys
if sys.version > '3':
long = int
class SHA256(object):
"""
SHA256 (FIPS 180-3) implementation for experimentation.
This is an implementation of the hash function designed not for
efficiency, but for clarity and ability to experiment. The details
of the algorithm are abstracted out with subclassing in mind.
"""
# Container for the state registers between rounds:
State = collections.namedtuple('State', 'a b c d e f g h')
# From FIPS 180-3 section 5.3.3 (page 15):
INITIAL_STATE = State(
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
)
# From FIPS 180-3 section 4.2.2 (page 11):
K = (
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
)
# Abstract bitwise operations, which can be overridden to provide tracing
# or alternate implementations:
@staticmethod
def _sum_mod32(*args):
return sum(args) & 0xffffffff
@classmethod
def _xor(cls, *args):
if len(args) == 2:
return args[0] ^ args[1]
else:
return args[0] ^ cls._xor(*args[1:])
_and = staticmethod(lambda x, y: x & y)
_invert = staticmethod(lambda x: ~x)
# Operations defined by FIPS 180-3 section 3.2 (page 8):
_rrot = staticmethod(lambda x, n: ((x & 0xffffffff) >> n) | (x << (32 - n)) & 0xffffffff)
_shr = staticmethod(lambda x, n: (x & 0xffffffff) >> n)
# Operations defined by FIPS 180-3 section 4.1.2 (page 10):
_ch = classmethod(lambda cls, x, y, z: cls._xor(cls._and(x, y), cls._and(cls._invert(x), z)))
_maj = classmethod(lambda cls, x, y, z: cls._xor(cls._and(x, y), cls._and(x, z), cls._and(y, z)))
_S0 = classmethod(lambda cls, x: cls._xor(cls._rrot(x, 2), cls._rrot(x, 13), cls._rrot(x, 22)))
_S1 = classmethod(lambda cls, x: cls._xor(cls._rrot(x, 6), cls._rrot(x, 11), cls._rrot(x, 25)))
_s0 = classmethod(lambda cls, x: cls._xor(cls._rrot(x, 7), cls._rrot(x, 18), cls._shr(x, 3)))
_s1 = classmethod(lambda cls, x: cls._xor(cls._rrot(x, 17), cls._rrot(x, 19), cls._shr(x, 10)))
# Operations defined by FIPS 180-3 section 6.2.2 (page 22):
_T1 = classmethod(lambda cls, prev, w, k: cls._sum_mod32(cls._S1(prev.e), cls._ch(prev.e, prev.f, prev.g), prev.h, w, k))
_T2 = classmethod(lambda cls, prev: cls._sum_mod32(cls._S0(prev.a), cls._maj(prev.a, prev.b, prev.c)))
@classmethod
def _round(cls, number, w, prev=INITIAL_STATE):
"""
Performs one round of SHA256 message transformation, returning the new
message state. See FIPS 180-3 section 6.2.2 step 3 (pages 21-22).
:param number:
The round number.
:param w:
The expanded word of the input for this round.
:param prev:
Named tuple containing the working state from the previous round.
"""
t1 = cls._T1(prev, w, cls.K[number % 64])
return cls.State(
a=cls._sum_mod32(t1, cls._T2(prev)),
b=prev.a,
c=prev.b,
d=prev.c,
e=cls._sum_mod32(prev.d, t1),
f=prev.e,
g=prev.f,
h=prev.g
)
@classmethod
def _finalize(cls, state, initial_state=INITIAL_STATE):
"""
Returns the intermediate state after the final round for a given block
is complete. See FIPS 180-3 section 6.2.2 step 4 (page 22).
:param state:
The digest state after the final round.
:param initial_state:
The digest state from before the first round.
"""
return cls.State(
a=cls._sum_mod32(state.a, initial_state.a),
b=cls._sum_mod32(state.b, initial_state.b),
c=cls._sum_mod32(state.c, initial_state.c),
d=cls._sum_mod32(state.d, initial_state.d),
e=cls._sum_mod32(state.e, initial_state.e),
f=cls._sum_mod32(state.f, initial_state.f),
g=cls._sum_mod32(state.g, initial_state.g),
h=cls._sum_mod32(state.h, initial_state.h)
)
@classmethod
def _expand_message(cls, message):
"""
Returns a list of 64 32-bit words based upon 16 32-bit words from the
message block being hashed. See FIPS 180-3 section 6.2.2 step 1
(page 21).
:param message:
Array of 16 32-bit values (512 bits total).
"""
assert len(message) == 16, '_expand_message() got %d words, expected 16' % len(message)
w = list(message)
for i in range(16, 64):
w.append(cls._sum_mod32(w[i - 16], cls._s0(w[i - 15]), w[i - 7], cls._s1(w[i - 2])))
return w
@classmethod
def _process_block(cls, message, state=INITIAL_STATE, round_offset=0):
"""
Processes a block of message data, returning the new digest state
(the intermediate hash value). See FIPS 180-3 section 6.2.2 (pages
21 and 22).
:param message:
Byte string of length 64 containing the block data to hash.
:param state:
The digest state from the previous block.
:param round_offset:
The _round() method can be overridden to report intermediate hash
values, in which case it's useful to know how many rounds came
before. This argument allows the caller to specify as much.
"""
assert len(message) == 64, '_process_block() got %d bytes, expected 64' % len(message)
assert not round_offset % 64, 'round_offset should be a multiple of 64'
w = cls._expand_message(struct.unpack('>LLLLLLLLLLLLLLLL', message))
midstate = state
for i in range(64):
midstate = cls._round(round_offset + i, w[i], midstate)
return cls._finalize(midstate, state)
@classmethod
def _pad_message(cls, message, length):
"""
Returns a list containing the final 1 or 2 message blocks, which
include the message padding per FIPS 180-3 section 5.1.1 (page 13).
:param message:
Byte string containing the final block data to hash. Should be
less than a full block's worth (63 bytes or less).
:param length:
Length of the message, in bits.
"""
assert len(message) < 64, 'Input to _pad_message() must be less than 512 bits'
if len(message) <= 55:
# Append trailing 1 bit, then padding, then length
return [b''.join((
message,
b'\x80',
b'\x00' * (55 - len(message)),
struct.pack('>LL', length >> 32, length & 0xffffffff),
))]
else:
# Not enough room to append length, return two blocks:
return [
# First is trailing 1 bit, then padding
b''.join((
message,
b'\x80',
b'\x00' * (63 - len(message)),
)),
# Next is more padding, then length
b''.join((
b'\x00' * 56,
struct.pack('>LL', length >> 32, length & 0xffffffff),
)),
]
def __init__(self, message=b'', round_offset=0):
"""
Constructor.
:param message:
Initial data to pass to update().
:param round_offset:
The _round() method can be overridden to report intermediate hash
values, in which case it's useful to know how many rounds came
before. For applications that perform double-hashing, you can
specify the number of rounds from the previous hash instance
using this parameter.
"""
self.state = self.INITIAL_STATE
self.length = long(0)
self.buffer = b''
self.round_offset = round_offset
self.update(message)
def update(self, message):
"""
Updates the hash with the contents of *message*.
Hashing uses 512-bit blocks, so the message is buffered until there's
enough data to process a complete block. When digest() is called,
any remaining data in the buffer will be padded and digested.
:param message:
A byte string to digest.
"""
message = bytes(message)
self.length += len(message) * 8
self.buffer = b''.join((self.buffer, message))
while len(self.buffer) >= 64:
self.state = self._process_block(self.buffer[:64], self.state, self.round_offset)
self.buffer = self.buffer[64:]
self.round_offset += 64
return self
def midstate(self):
"""
"""
return ( struct.pack(">LLLLLLLL", *self.state)
, self.round_offset + len(self.buffer)
, self.buffer )
def digest(self):
"""
Returns the SHA256 digest of the message.
The hash is based on all data passed thus far via the constructor and
update(). Any buffered data will be processed (along with the
terminating length), however the internal state is not modified. This
means that update() can safely be used again after digest().
"""
final_state = self.state
for block in self._pad_message(self.buffer, self.length):
final_state = self._process_block(block, final_state, self.round_offset)
return struct.pack('>LLLLLLLL', *final_state)
def hexdigest(self):
"""Like digest(), but returns a hexadecimal string."""
return binascii.hexlify(self.digest())
if __name__ == '__main__':
# Test routine. Compares our output to that of the stdlib. We also
# print some timings, although keep in mind we're not built for speed so
# the performance comparison is of dubious utility.
import hashlib
import os
import sys
import time
try:
count = int(sys.argv[1])
except (ValueError, IndexError):
count = 1000 # default
mine = []
stdlib = []
message = os.urandom(count)
try:
consumed = 0
start = time.time()
for i in range(count):
mine.append(SHA256(message[:i]).hexdigest())
consumed += i
finally:
elapsed = time.time() - start
print ('Mine: %d hashes (%d bytes) in %0.2f secs (%0.2f H/s %d B/s)' % (i+1, consumed, elapsed, float(i+1) / elapsed, float(consumed) / elapsed))
try:
consumed = 0
start = time.time()
for i in range(count):
# The encoding before is to make sure we're comparing the same
# types (bytes to bytes). This is a Python 3 issue.
stdlib.append(codecs.latin_1_encode(hashlib.sha256(message[:i]).hexdigest())[0])
consumed += i
finally:
elapsed = time.time() - start
print ('stdlib: %d hashes (%d bytes) in %0.2f secs (%0.2f H/s %d B/s)' % (i+1, consumed, elapsed, float(i+1) / elapsed, float(consumed) / elapsed))
for a, b, i in zip(mine, stdlib, range(count)):
assert a == b, '%r (mine) != %r (stdlib) calculating SHA256(%r) of length %d' % (a, b, message[:i], i)
# eof
| 35.589333 | 155 | 0.608947 |
import binascii
import codecs
import collections
import struct
import sys
if sys.version > '3':
long = int
class SHA256(object):
State = collections.namedtuple('State', 'a b c d e f g h')
INITIAL_STATE = State(
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
)
K = (
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
)
@staticmethod
def _sum_mod32(*args):
return sum(args) & 0xffffffff
@classmethod
def _xor(cls, *args):
if len(args) == 2:
return args[0] ^ args[1]
else:
return args[0] ^ cls._xor(*args[1:])
_and = staticmethod(lambda x, y: x & y)
_invert = staticmethod(lambda x: ~x)
_rrot = staticmethod(lambda x, n: ((x & 0xffffffff) >> n) | (x << (32 - n)) & 0xffffffff)
_shr = staticmethod(lambda x, n: (x & 0xffffffff) >> n)
_ch = classmethod(lambda cls, x, y, z: cls._xor(cls._and(x, y), cls._and(cls._invert(x), z)))
_maj = classmethod(lambda cls, x, y, z: cls._xor(cls._and(x, y), cls._and(x, z), cls._and(y, z)))
_S0 = classmethod(lambda cls, x: cls._xor(cls._rrot(x, 2), cls._rrot(x, 13), cls._rrot(x, 22)))
_S1 = classmethod(lambda cls, x: cls._xor(cls._rrot(x, 6), cls._rrot(x, 11), cls._rrot(x, 25)))
_s0 = classmethod(lambda cls, x: cls._xor(cls._rrot(x, 7), cls._rrot(x, 18), cls._shr(x, 3)))
_s1 = classmethod(lambda cls, x: cls._xor(cls._rrot(x, 17), cls._rrot(x, 19), cls._shr(x, 10)))
_T1 = classmethod(lambda cls, prev, w, k: cls._sum_mod32(cls._S1(prev.e), cls._ch(prev.e, prev.f, prev.g), prev.h, w, k))
_T2 = classmethod(lambda cls, prev: cls._sum_mod32(cls._S0(prev.a), cls._maj(prev.a, prev.b, prev.c)))
@classmethod
def _round(cls, number, w, prev=INITIAL_STATE):
t1 = cls._T1(prev, w, cls.K[number % 64])
return cls.State(
a=cls._sum_mod32(t1, cls._T2(prev)),
b=prev.a,
c=prev.b,
d=prev.c,
e=cls._sum_mod32(prev.d, t1),
f=prev.e,
g=prev.f,
h=prev.g
)
@classmethod
def _finalize(cls, state, initial_state=INITIAL_STATE):
return cls.State(
a=cls._sum_mod32(state.a, initial_state.a),
b=cls._sum_mod32(state.b, initial_state.b),
c=cls._sum_mod32(state.c, initial_state.c),
d=cls._sum_mod32(state.d, initial_state.d),
e=cls._sum_mod32(state.e, initial_state.e),
f=cls._sum_mod32(state.f, initial_state.f),
g=cls._sum_mod32(state.g, initial_state.g),
h=cls._sum_mod32(state.h, initial_state.h)
)
@classmethod
def _expand_message(cls, message):
assert len(message) == 16, '_expand_message() got %d words, expected 16' % len(message)
w = list(message)
for i in range(16, 64):
w.append(cls._sum_mod32(w[i - 16], cls._s0(w[i - 15]), w[i - 7], cls._s1(w[i - 2])))
return w
@classmethod
def _process_block(cls, message, state=INITIAL_STATE, round_offset=0):
assert len(message) == 64, '_process_block() got %d bytes, expected 64' % len(message)
assert not round_offset % 64, 'round_offset should be a multiple of 64'
w = cls._expand_message(struct.unpack('>LLLLLLLLLLLLLLLL', message))
midstate = state
for i in range(64):
midstate = cls._round(round_offset + i, w[i], midstate)
return cls._finalize(midstate, state)
@classmethod
def _pad_message(cls, message, length):
assert len(message) < 64, 'Input to _pad_message() must be less than 512 bits'
if len(message) <= 55:
return [b''.join((
message,
b'\x80',
b'\x00' * (55 - len(message)),
struct.pack('>LL', length >> 32, length & 0xffffffff),
))]
else:
return [
b''.join((
message,
b'\x80',
b'\x00' * (63 - len(message)),
)),
b''.join((
b'\x00' * 56,
struct.pack('>LL', length >> 32, length & 0xffffffff),
)),
]
def __init__(self, message=b'', round_offset=0):
self.state = self.INITIAL_STATE
self.length = long(0)
self.buffer = b''
self.round_offset = round_offset
self.update(message)
def update(self, message):
message = bytes(message)
self.length += len(message) * 8
self.buffer = b''.join((self.buffer, message))
while len(self.buffer) >= 64:
self.state = self._process_block(self.buffer[:64], self.state, self.round_offset)
self.buffer = self.buffer[64:]
self.round_offset += 64
return self
def midstate(self):
return ( struct.pack(">LLLLLLLL", *self.state)
, self.round_offset + len(self.buffer)
, self.buffer )
def digest(self):
final_state = self.state
for block in self._pad_message(self.buffer, self.length):
final_state = self._process_block(block, final_state, self.round_offset)
return struct.pack('>LLLLLLLL', *final_state)
def hexdigest(self):
return binascii.hexlify(self.digest())
if __name__ == '__main__':
# the performance comparison is of dubious utility.
import hashlib
import os
import sys
import time
try:
count = int(sys.argv[1])
except (ValueError, IndexError):
count = 1000 # default
mine = []
stdlib = []
message = os.urandom(count)
try:
consumed = 0
start = time.time()
for i in range(count):
mine.append(SHA256(message[:i]).hexdigest())
consumed += i
finally:
elapsed = time.time() - start
print ('Mine: %d hashes (%d bytes) in %0.2f secs (%0.2f H/s %d B/s)' % (i+1, consumed, elapsed, float(i+1) / elapsed, float(consumed) / elapsed))
try:
consumed = 0
start = time.time()
for i in range(count):
# The encoding before is to make sure we're comparing the same
stdlib.append(codecs.latin_1_encode(hashlib.sha256(message[:i]).hexdigest())[0])
consumed += i
finally:
elapsed = time.time() - start
print ('stdlib: %d hashes (%d bytes) in %0.2f secs (%0.2f H/s %d B/s)' % (i+1, consumed, elapsed, float(i+1) / elapsed, float(consumed) / elapsed))
for a, b, i in zip(mine, stdlib, range(count)):
assert a == b, '%r (mine) != %r (stdlib) calculating SHA256(%r) of length %d' % (a, b, message[:i], i)
| true | true |
f73b6f6235e4a70614c6d8a32122b633eaefd46e | 4,169 | py | Python | rh_project/rk4_two_body.py | hrichstein/phys_50733 | a333bfa4dd5b0ca464bd861336bc2f32d8e72a2b | [
"MIT"
] | null | null | null | rh_project/rk4_two_body.py | hrichstein/phys_50733 | a333bfa4dd5b0ca464bd861336bc2f32d8e72a2b | [
"MIT"
] | null | null | null | rh_project/rk4_two_body.py | hrichstein/phys_50733 | a333bfa4dd5b0ca464bd861336bc2f32d8e72a2b | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
# from scipy.constants import G
# Setting plotting parameters
from matplotlib import rc,rcParams
rc('text', usetex=True)
rc('axes', linewidth=2)
rc('font', weight='bold')
rc('font', **{'family': 'serif', 'serif':['Computer Modern']})
def find_vel_init(M1, M2, A):
period = np.sqrt(4 * np.pi**2 * A**3 / G / (M1 + M2)) # period in days
v = 2 * np.pi * A / period # AU/day
return v
def accel(Mass, pos1, pos2):
"""
Mass: float-like?
sum of the mass of both stars
pos1: array-like
[x,y] position of first star
pos2: array-like
[x,y] position of second star
"""
r_sep = np.sqrt((pos1[0] - pos1[1])**2 + (pos2[0] - pos2[1])**2)
a_x = -G * Mass / r_sep**3 * abs(pos1[0] - pos2[0])
a_y = -G * Mass / r_sep**3 * abs(pos1[1] - pos2[1])
accel_arr = np.array([a_x, a_y])
return accel_arr
# Gm(x1-x2)/r^3; r is the distance separating everything (both the stars)
# initial velocity only in one direction
def rk4(r1, r2, v1, v2, h):
"""
r: array-like
has x,y components
v: array-like
has vx, vy components
h: float-like
time step
"""
x0_s1 = r1[0]
y0_s1 = r1[1]
x0_s2 = r2[0]
y0_s2 = r2[1]
d_x0_s1 = v1[0]
d_y0_s1 = v1[1]
d_x0_s2 = v2[0]
d_y0_s2 = v2[1]
d_v0 = accel(Mass, r1, r2) # Same for both stars (velocity is what differs)
# First set of RK4
x1_s1 = x0_s1 + 0.5*(d_x0_s1)*h
y1_s1 = y0_s1 + 0.5*(d_y0_s1)*h
x1_s2 = x0_s2 + 0.5*(d_x0_s2)*h
y1_s2 = y0_s2 + 0.5*(d_y0_s2)*h
d_x1_s1 = d_x0_s1 + 0.5*(d_v0[0])*h
d_y1_s1 = d_y0_s1 + 0.5*(d_v0[1])*h
d_x1_s2 = d_x0_s2 + 0.5*(d_v0[0])*h
d_y1_s2 = d_y0_s2 + 0.5*(d_v0[1])*h
r1_new = np.array([x1_s1,y1_s1])
r2_new = np.array([x1_s2,y1_s2])
d_v1 = accel(Mass, r1_new, r2_new)
# Second
x2_s1 = x0_s1 + 0.5*(d_x1_s1)*h
y2_s1 = y0_s1 + 0.5*(d_y1_s1)*h
x2_s2 = x0_s2 + 0.5*(d_x1_s2)*h
y2_s2 = y0_s2 + 0.5*(d_y1_s2)*h
d_x2_s1 = d_x0_s1 + 0.5*(d_v1[0])*h
d_y2_s1 = d_y0_s1 + 0.5*(d_v1[1])*h
d_x2_s2 = d_x0_s2 + 0.5*(d_v1[0])*h
d_y2_s2 = d_y0_s2 + 0.5*(d_v1[1])*h
r1_new = np.array([x2_s1,y2_s1])
r2_new = np.array([x2_s2,y2_s2])
d_v2 = accel(Mass, r1_new, r2_new)
# Third
x3_s1 = x0_s1 + (d_x2_s1)*h
y3_s1 = y0_s1 + (d_y2_s1)*h
x3_s2 = x0_s2 + (d_x2_s2)*h
y3_s2 = y0_s2 + (d_y2_s2)*h
d_x3_s1 = d_x0_s1 + (d_v2[0])*h
d_y3_s1 = d_y0_s1 + (d_v2[1])*h
d_x3_s2 = d_x0_s2 + (d_v2[0])*h
d_y3_s2 = d_y0_s2 + (d_v2[1])*h
r1_new = np.array([x3_s1,y3_s1])
r2_new = np.array([x3_s2,y3_s2])
d_v3 = accel(1, r1_new, r2_new)
# Combining
xf_s1 = x0_s1 + h*(d_x0_s1 + 2*d_x1_s1 + 2*d_x2_s1 + d_x3_s1)/6
yf_s1 = y0_s1 + h*(d_y0_s1 + 2*d_y1_s1 + 2*d_y2_s1 + d_y3_s1)/6
rf_s1 = np.array([xf_s1,yf_s1])
xf_s2 = x0_s2 + h*(d_x0_s2 + 2*d_x1_s2 + 2*d_x2_s2 + d_x3_s2)/6
yf_s2 = y0_s2 + h*(d_y0_s2 + 2*d_y1_s2 + 2*d_y2_s2 + d_y3_s2)/6
rf_s2 = np.array([xf_s2,yf_s2])
d_xf_s1 = d_x0_s1 + h*(d_v0[0] + 2*d_v1[0] + 2*d_v2[0] + d_v3[0])/6
d_yf_s1 = d_y0_s1 + h*(d_v0[1] + 2*d_v1[1] + 2*d_v2[1] + d_v3[1])/6
vf_s1 = np.array([d_xf_s1,d_yf_s1])
d_xf_s2 = d_x0_s2 + h*(d_v0[0] + 2*d_v1[0] + 2*d_v2[0] + d_v3[0])/6
d_yf_s2 = d_y0_s2 + h*(d_v0[1] + 2*d_v1[1] + 2*d_v2[1] + d_v3[1])/6
vf_s2 = np.array([d_xf_s2,d_yf_s2])
results_arr = np.array([rf_s1, rf_s2, vf_s1, vf_s2])
return results_arr
G = 4 * np.pi**2 # AU^3 yr^-2 M_sun^-1
Mass = 2 # Solar masses
A = 0.2 # AU (sep dist)
a = 0
b = 0.06 # years
N = 100000
h = (b-a) / N
tpoints = np.arange(a,b,h)
# Setting up arrays
xpts_s1 = [[] for xx in range(len(tpoints))]
ypts_s1 = [[] for xx in range(len(tpoints))]
xpts_s2 = [[] for xx in range(len(tpoints))]
ypts_s2 = [[] for xx in range(len(tpoints))]
# Initial conditions
r0_s1 = np.array([0,0.1])
r0_s2 = np.array([0,-0.1])
vx0 = find_vel_init(1, 1, A)
vy0 = 0
v0_s1 = np.array([vx0,0])
v0_s2 = np.array([-vx0,0])
param_arr = np.array([r0_s1, r0_s2, v0_s1, v0_s2])
for tt in range(len(tpoints)):
xpts_s1[tt] = param_arr[0][0]
ypts_s1[tt] = param_arr[0][1]
xpts_s2[tt] = param_arr[1][0]
ypts_s2[tt] = param_arr[1][1]
param_arr = rk4(param_arr[0], param_arr[1], param_arr[2], param_arr[3], h)
plt.plot(xpts_s1, ypts_s1)
plt.plot(xpts_s2, ypts_s2)
plt.show()
| 21.713542 | 77 | 0.623171 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc,rcParams
rc('text', usetex=True)
rc('axes', linewidth=2)
rc('font', weight='bold')
rc('font', **{'family': 'serif', 'serif':['Computer Modern']})
def find_vel_init(M1, M2, A):
period = np.sqrt(4 * np.pi**2 * A**3 / G / (M1 + M2))
v = 2 * np.pi * A / period
return v
def accel(Mass, pos1, pos2):
r_sep = np.sqrt((pos1[0] - pos1[1])**2 + (pos2[0] - pos2[1])**2)
a_x = -G * Mass / r_sep**3 * abs(pos1[0] - pos2[0])
a_y = -G * Mass / r_sep**3 * abs(pos1[1] - pos2[1])
accel_arr = np.array([a_x, a_y])
return accel_arr
def rk4(r1, r2, v1, v2, h):
x0_s1 = r1[0]
y0_s1 = r1[1]
x0_s2 = r2[0]
y0_s2 = r2[1]
d_x0_s1 = v1[0]
d_y0_s1 = v1[1]
d_x0_s2 = v2[0]
d_y0_s2 = v2[1]
d_v0 = accel(Mass, r1, r2)
x1_s1 = x0_s1 + 0.5*(d_x0_s1)*h
y1_s1 = y0_s1 + 0.5*(d_y0_s1)*h
x1_s2 = x0_s2 + 0.5*(d_x0_s2)*h
y1_s2 = y0_s2 + 0.5*(d_y0_s2)*h
d_x1_s1 = d_x0_s1 + 0.5*(d_v0[0])*h
d_y1_s1 = d_y0_s1 + 0.5*(d_v0[1])*h
d_x1_s2 = d_x0_s2 + 0.5*(d_v0[0])*h
d_y1_s2 = d_y0_s2 + 0.5*(d_v0[1])*h
r1_new = np.array([x1_s1,y1_s1])
r2_new = np.array([x1_s2,y1_s2])
d_v1 = accel(Mass, r1_new, r2_new)
x2_s1 = x0_s1 + 0.5*(d_x1_s1)*h
y2_s1 = y0_s1 + 0.5*(d_y1_s1)*h
x2_s2 = x0_s2 + 0.5*(d_x1_s2)*h
y2_s2 = y0_s2 + 0.5*(d_y1_s2)*h
d_x2_s1 = d_x0_s1 + 0.5*(d_v1[0])*h
d_y2_s1 = d_y0_s1 + 0.5*(d_v1[1])*h
d_x2_s2 = d_x0_s2 + 0.5*(d_v1[0])*h
d_y2_s2 = d_y0_s2 + 0.5*(d_v1[1])*h
r1_new = np.array([x2_s1,y2_s1])
r2_new = np.array([x2_s2,y2_s2])
d_v2 = accel(Mass, r1_new, r2_new)
x3_s1 = x0_s1 + (d_x2_s1)*h
y3_s1 = y0_s1 + (d_y2_s1)*h
x3_s2 = x0_s2 + (d_x2_s2)*h
y3_s2 = y0_s2 + (d_y2_s2)*h
d_x3_s1 = d_x0_s1 + (d_v2[0])*h
d_y3_s1 = d_y0_s1 + (d_v2[1])*h
d_x3_s2 = d_x0_s2 + (d_v2[0])*h
d_y3_s2 = d_y0_s2 + (d_v2[1])*h
r1_new = np.array([x3_s1,y3_s1])
r2_new = np.array([x3_s2,y3_s2])
d_v3 = accel(1, r1_new, r2_new)
xf_s1 = x0_s1 + h*(d_x0_s1 + 2*d_x1_s1 + 2*d_x2_s1 + d_x3_s1)/6
yf_s1 = y0_s1 + h*(d_y0_s1 + 2*d_y1_s1 + 2*d_y2_s1 + d_y3_s1)/6
rf_s1 = np.array([xf_s1,yf_s1])
xf_s2 = x0_s2 + h*(d_x0_s2 + 2*d_x1_s2 + 2*d_x2_s2 + d_x3_s2)/6
yf_s2 = y0_s2 + h*(d_y0_s2 + 2*d_y1_s2 + 2*d_y2_s2 + d_y3_s2)/6
rf_s2 = np.array([xf_s2,yf_s2])
d_xf_s1 = d_x0_s1 + h*(d_v0[0] + 2*d_v1[0] + 2*d_v2[0] + d_v3[0])/6
d_yf_s1 = d_y0_s1 + h*(d_v0[1] + 2*d_v1[1] + 2*d_v2[1] + d_v3[1])/6
vf_s1 = np.array([d_xf_s1,d_yf_s1])
d_xf_s2 = d_x0_s2 + h*(d_v0[0] + 2*d_v1[0] + 2*d_v2[0] + d_v3[0])/6
d_yf_s2 = d_y0_s2 + h*(d_v0[1] + 2*d_v1[1] + 2*d_v2[1] + d_v3[1])/6
vf_s2 = np.array([d_xf_s2,d_yf_s2])
results_arr = np.array([rf_s1, rf_s2, vf_s1, vf_s2])
return results_arr
G = 4 * np.pi**2
Mass = 2
A = 0.2
a = 0
b = 0.06
N = 100000
h = (b-a) / N
tpoints = np.arange(a,b,h)
xpts_s1 = [[] for xx in range(len(tpoints))]
ypts_s1 = [[] for xx in range(len(tpoints))]
xpts_s2 = [[] for xx in range(len(tpoints))]
ypts_s2 = [[] for xx in range(len(tpoints))]
r0_s1 = np.array([0,0.1])
r0_s2 = np.array([0,-0.1])
vx0 = find_vel_init(1, 1, A)
vy0 = 0
v0_s1 = np.array([vx0,0])
v0_s2 = np.array([-vx0,0])
param_arr = np.array([r0_s1, r0_s2, v0_s1, v0_s2])
for tt in range(len(tpoints)):
xpts_s1[tt] = param_arr[0][0]
ypts_s1[tt] = param_arr[0][1]
xpts_s2[tt] = param_arr[1][0]
ypts_s2[tt] = param_arr[1][1]
param_arr = rk4(param_arr[0], param_arr[1], param_arr[2], param_arr[3], h)
plt.plot(xpts_s1, ypts_s1)
plt.plot(xpts_s2, ypts_s2)
plt.show()
| true | true |
f73b6f6d7d6955528eef744b500e27d5eca877ef | 1,000 | py | Python | vyperlogix/django/forms/fields.py | raychorn/chrome_gui | f1fade70b61af12ee43c55c075aa9cfd32caa962 | [
"CC0-1.0"
] | 1 | 2020-09-29T01:36:33.000Z | 2020-09-29T01:36:33.000Z | vyperlogix/django/forms/fields.py | raychorn/chrome_gui | f1fade70b61af12ee43c55c075aa9cfd32caa962 | [
"CC0-1.0"
] | null | null | null | vyperlogix/django/forms/fields.py | raychorn/chrome_gui | f1fade70b61af12ee43c55c075aa9cfd32caa962 | [
"CC0-1.0"
] | null | null | null | import sys
from vyperlogix.misc import _utils
from django.utils.datastructures import SortedDict as SortedDictFromList
from vyperlogix.classes.SmartObject import SmartObject
def fields_for_model(model, formfield_callback=lambda f: f.formfield()):
"""
Returns a list of fields for the given Django model class.
Provide ``formfield_callback`` if you want to define different logic for
determining the formfield for a given database field. It's a callable that
takes a database Field instance and returns a form Field instance.
"""
field_list = []
try:
opts = model._meta
for f in opts.fields + opts.many_to_many:
if not f.editable:
continue
formfield = formfield_callback(f)
if formfield:
field_list.append((f.name, formfield))
except Exception as details:
print >>sys.stderr, _utils.formattedException(details=details)
return SortedDictFromList(dict(field_list))
| 33.333333 | 78 | 0.698 | import sys
from vyperlogix.misc import _utils
from django.utils.datastructures import SortedDict as SortedDictFromList
from vyperlogix.classes.SmartObject import SmartObject
def fields_for_model(model, formfield_callback=lambda f: f.formfield()):
field_list = []
try:
opts = model._meta
for f in opts.fields + opts.many_to_many:
if not f.editable:
continue
formfield = formfield_callback(f)
if formfield:
field_list.append((f.name, formfield))
except Exception as details:
print >>sys.stderr, _utils.formattedException(details=details)
return SortedDictFromList(dict(field_list))
| true | true |
f73b70081d30dca56b8865df6df3912715334caf | 51,771 | py | Python | dianhua/worker/crawler/china_mobile/jiangsu/main.py | Svolcano/python_exercise | a50e05891cc7f1fbb40ebcae324b09b6a14473d2 | [
"MIT"
] | 6 | 2015-07-09T08:47:08.000Z | 2020-05-16T10:47:31.000Z | dianhua/worker/crawler/china_mobile/jiangsu/main.py | Svolcano/python_exercise | a50e05891cc7f1fbb40ebcae324b09b6a14473d2 | [
"MIT"
] | 7 | 2019-03-27T04:13:12.000Z | 2022-03-02T14:54:56.000Z | dianhua/worker/crawler/china_mobile/jiangsu/main.py | Svolcano/python_exercise | a50e05891cc7f1fbb40ebcae324b09b6a14473d2 | [
"MIT"
] | 2 | 2019-06-21T06:46:28.000Z | 2019-12-23T09:31:09.000Z | # -*- coding: utf-8 -*-
import base64
import datetime
import json
import sys
import time
import random
import traceback
from datetime import date
from calendar import monthrange
import hashlib
import re
import execjs
from dateutil.relativedelta import relativedelta
from requests.utils import add_dict_to_cookiejar
reload(sys)
sys.setdefaultencoding('utf8')
if __name__ == '__main__':
sys.path.append('../..')
sys.path.append('../../..')
sys.path.append('../../../..')
from crawler.base_crawler import BaseCrawler
from tool import parse_call_record, parse_call_record_short
else:
from worker.crawler.base_crawler import BaseCrawler
from worker.crawler.china_mobile.jiangsu.tool import parse_call_record, parse_call_record_short
class Crawler(BaseCrawler):
def __init__(self, **kwargs):
super(Crawler, self).__init__(**kwargs)
# to keep time > 30 seconds between sending two sms veirfication
self.sms_send_time = None
self.start_url = ""
def need_parameters(self, **kwargs):
return ['pin_pwd']
def get_login_verify_type(self, **kwargs):
return 'SMS'
def enPwd(self, pwd):
js_source = """
var CryptoJS=CryptoJS||function(u,l){var d={},n=d.lib={},p=function(){},s=n.Base={extend:function(a){p.prototype=this;var c=new p;a&&c.mixIn(a);c.hasOwnProperty("init")||(c.init=function(){c.$super.init.apply(this,arguments)});c.init.prototype=c;c.$super=this;return c},create:function(){var a=this.extend();a.init.apply(a,arguments);return a},init:function(){},mixIn:function(a){for(var c in a)a.hasOwnProperty(c)&&(this[c]=a[c]);a.hasOwnProperty("toString")&&(this.toString=a.toString)},clone:function(){return this.init.prototype.extend(this)}},
q=n.WordArray=s.extend({init:function(a,c){a=this.words=a||[];this.sigBytes=c!=l?c:4*a.length},toString:function(a){return(a||v).stringify(this)},concat:function(a){var c=this.words,m=a.words,f=this.sigBytes;a=a.sigBytes;this.clamp();if(f%4)for(var t=0;t<a;t++)c[f+t>>>2]|=(m[t>>>2]>>>24-8*(t%4)&255)<<24-8*((f+t)%4);else if(65535<m.length)for(t=0;t<a;t+=4)c[f+t>>>2]=m[t>>>2];else c.push.apply(c,m);this.sigBytes+=a;return this},clamp:function(){var a=this.words,c=this.sigBytes;a[c>>>2]&=4294967295<<
32-8*(c%4);a.length=u.ceil(c/4)},clone:function(){var a=s.clone.call(this);a.words=this.words.slice(0);return a},random:function(a){for(var c=[],m=0;m<a;m+=4)c.push(4294967296*u.random()|0);return new q.init(c,a)}}),w=d.enc={},v=w.Hex={stringify:function(a){var c=a.words;a=a.sigBytes;for(var m=[],f=0;f<a;f++){var t=c[f>>>2]>>>24-8*(f%4)&255;m.push((t>>>4).toString(16));m.push((t&15).toString(16))}return m.join("")},parse:function(a){for(var c=a.length,m=[],f=0;f<c;f+=2)m[f>>>3]|=parseInt(a.substr(f,
2),16)<<24-4*(f%8);return new q.init(m,c/2)}},b=w.Latin1={stringify:function(a){var c=a.words;a=a.sigBytes;for(var m=[],f=0;f<a;f++)m.push(String.fromCharCode(c[f>>>2]>>>24-8*(f%4)&255));return m.join("")},parse:function(a){for(var c=a.length,m=[],f=0;f<c;f++)m[f>>>2]|=(a.charCodeAt(f)&255)<<24-8*(f%4);return new q.init(m,c)}},x=w.Utf8={stringify:function(a){try{return decodeURIComponent(escape(b.stringify(a)))}catch(c){throw Error("Malformed UTF-8 data");}},parse:function(a){return b.parse(unescape(encodeURIComponent(a)))}},
r=n.BufferedBlockAlgorithm=s.extend({reset:function(){this._data=new q.init;this._nDataBytes=0},_append:function(a){"string"==typeof a&&(a=x.parse(a));this._data.concat(a);this._nDataBytes+=a.sigBytes},_process:function(a){var c=this._data,m=c.words,f=c.sigBytes,t=this.blockSize,b=f/(4*t),b=a?u.ceil(b):u.max((b|0)-this._minBufferSize,0);a=b*t;f=u.min(4*a,f);if(a){for(var e=0;e<a;e+=t)this._doProcessBlock(m,e);e=m.splice(0,a);c.sigBytes-=f}return new q.init(e,f)},clone:function(){var a=s.clone.call(this);
a._data=this._data.clone();return a},_minBufferSize:0});n.Hasher=r.extend({cfg:s.extend(),init:function(a){this.cfg=this.cfg.extend(a);this.reset()},reset:function(){r.reset.call(this);this._doReset()},update:function(a){this._append(a);this._process();return this},finalize:function(a){a&&this._append(a);return this._doFinalize()},blockSize:16,_createHelper:function(a){return function(c,m){return(new a.init(m)).finalize(c)}},_createHmacHelper:function(a){return function(c,m){return(new e.HMAC.init(a,
m)).finalize(c)}}});var e=d.algo={};return d}(Math);
(function(){var u=CryptoJS,l=u.lib.WordArray;u.enc.Base64={stringify:function(d){var n=d.words,l=d.sigBytes,s=this._map;d.clamp();d=[];for(var q=0;q<l;q+=3)for(var w=(n[q>>>2]>>>24-8*(q%4)&255)<<16|(n[q+1>>>2]>>>24-8*((q+1)%4)&255)<<8|n[q+2>>>2]>>>24-8*((q+2)%4)&255,v=0;4>v&&q+0.75*v<l;v++)d.push(s.charAt(w>>>6*(3-v)&63));if(n=s.charAt(64))for(;d.length%4;)d.push(n);return d.join("")},parse:function(d){var n=d.length,p=this._map,s=p.charAt(64);s&&(s=d.indexOf(s),-1!=s&&(n=s));for(var s=[],q=0,w=0;w<
n;w++)if(w%4){var v=p.indexOf(d.charAt(w-1))<<2*(w%4),b=p.indexOf(d.charAt(w))>>>6-2*(w%4);s[q>>>2]|=(v|b)<<24-8*(q%4);q++}return l.create(s,q)},_map:"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="}})();
(function(u){function l(b,e,a,c,m,f,t){b=b+(e&a|~e&c)+m+t;return(b<<f|b>>>32-f)+e}function d(b,e,a,c,m,f,t){b=b+(e&c|a&~c)+m+t;return(b<<f|b>>>32-f)+e}function n(b,e,a,c,m,f,t){b=b+(e^a^c)+m+t;return(b<<f|b>>>32-f)+e}function p(b,e,a,c,m,f,t){b=b+(a^(e|~c))+m+t;return(b<<f|b>>>32-f)+e}for(var s=CryptoJS,q=s.lib,w=q.WordArray,v=q.Hasher,q=s.algo,b=[],x=0;64>x;x++)b[x]=4294967296*u.abs(u.sin(x+1))|0;q=q.MD5=v.extend({_doReset:function(){this._hash=new w.init([1732584193,4023233417,2562383102,271733878])},
_doProcessBlock:function(r,e){for(var a=0;16>a;a++){var c=e+a,m=r[c];r[c]=(m<<8|m>>>24)&16711935|(m<<24|m>>>8)&4278255360}var a=this._hash.words,c=r[e+0],m=r[e+1],f=r[e+2],t=r[e+3],y=r[e+4],q=r[e+5],s=r[e+6],w=r[e+7],v=r[e+8],u=r[e+9],x=r[e+10],z=r[e+11],A=r[e+12],B=r[e+13],C=r[e+14],D=r[e+15],g=a[0],h=a[1],j=a[2],k=a[3],g=l(g,h,j,k,c,7,b[0]),k=l(k,g,h,j,m,12,b[1]),j=l(j,k,g,h,f,17,b[2]),h=l(h,j,k,g,t,22,b[3]),g=l(g,h,j,k,y,7,b[4]),k=l(k,g,h,j,q,12,b[5]),j=l(j,k,g,h,s,17,b[6]),h=l(h,j,k,g,w,22,b[7]),
g=l(g,h,j,k,v,7,b[8]),k=l(k,g,h,j,u,12,b[9]),j=l(j,k,g,h,x,17,b[10]),h=l(h,j,k,g,z,22,b[11]),g=l(g,h,j,k,A,7,b[12]),k=l(k,g,h,j,B,12,b[13]),j=l(j,k,g,h,C,17,b[14]),h=l(h,j,k,g,D,22,b[15]),g=d(g,h,j,k,m,5,b[16]),k=d(k,g,h,j,s,9,b[17]),j=d(j,k,g,h,z,14,b[18]),h=d(h,j,k,g,c,20,b[19]),g=d(g,h,j,k,q,5,b[20]),k=d(k,g,h,j,x,9,b[21]),j=d(j,k,g,h,D,14,b[22]),h=d(h,j,k,g,y,20,b[23]),g=d(g,h,j,k,u,5,b[24]),k=d(k,g,h,j,C,9,b[25]),j=d(j,k,g,h,t,14,b[26]),h=d(h,j,k,g,v,20,b[27]),g=d(g,h,j,k,B,5,b[28]),k=d(k,g,
h,j,f,9,b[29]),j=d(j,k,g,h,w,14,b[30]),h=d(h,j,k,g,A,20,b[31]),g=n(g,h,j,k,q,4,b[32]),k=n(k,g,h,j,v,11,b[33]),j=n(j,k,g,h,z,16,b[34]),h=n(h,j,k,g,C,23,b[35]),g=n(g,h,j,k,m,4,b[36]),k=n(k,g,h,j,y,11,b[37]),j=n(j,k,g,h,w,16,b[38]),h=n(h,j,k,g,x,23,b[39]),g=n(g,h,j,k,B,4,b[40]),k=n(k,g,h,j,c,11,b[41]),j=n(j,k,g,h,t,16,b[42]),h=n(h,j,k,g,s,23,b[43]),g=n(g,h,j,k,u,4,b[44]),k=n(k,g,h,j,A,11,b[45]),j=n(j,k,g,h,D,16,b[46]),h=n(h,j,k,g,f,23,b[47]),g=p(g,h,j,k,c,6,b[48]),k=p(k,g,h,j,w,10,b[49]),j=p(j,k,g,h,
C,15,b[50]),h=p(h,j,k,g,q,21,b[51]),g=p(g,h,j,k,A,6,b[52]),k=p(k,g,h,j,t,10,b[53]),j=p(j,k,g,h,x,15,b[54]),h=p(h,j,k,g,m,21,b[55]),g=p(g,h,j,k,v,6,b[56]),k=p(k,g,h,j,D,10,b[57]),j=p(j,k,g,h,s,15,b[58]),h=p(h,j,k,g,B,21,b[59]),g=p(g,h,j,k,y,6,b[60]),k=p(k,g,h,j,z,10,b[61]),j=p(j,k,g,h,f,15,b[62]),h=p(h,j,k,g,u,21,b[63]);a[0]=a[0]+g|0;a[1]=a[1]+h|0;a[2]=a[2]+j|0;a[3]=a[3]+k|0},_doFinalize:function(){var b=this._data,e=b.words,a=8*this._nDataBytes,c=8*b.sigBytes;e[c>>>5]|=128<<24-c%32;var m=u.floor(a/
4294967296);e[(c+64>>>9<<4)+15]=(m<<8|m>>>24)&16711935|(m<<24|m>>>8)&4278255360;e[(c+64>>>9<<4)+14]=(a<<8|a>>>24)&16711935|(a<<24|a>>>8)&4278255360;b.sigBytes=4*(e.length+1);this._process();b=this._hash;e=b.words;for(a=0;4>a;a++)c=e[a],e[a]=(c<<8|c>>>24)&16711935|(c<<24|c>>>8)&4278255360;return b},clone:function(){var b=v.clone.call(this);b._hash=this._hash.clone();return b}});s.MD5=v._createHelper(q);s.HmacMD5=v._createHmacHelper(q)})(Math);
(function(){var u=CryptoJS,l=u.lib,d=l.Base,n=l.WordArray,l=u.algo,p=l.EvpKDF=d.extend({cfg:d.extend({keySize:4,hasher:l.MD5,iterations:1}),init:function(d){this.cfg=this.cfg.extend(d)},compute:function(d,l){for(var p=this.cfg,v=p.hasher.create(),b=n.create(),u=b.words,r=p.keySize,p=p.iterations;u.length<r;){e&&v.update(e);var e=v.update(d).finalize(l);v.reset();for(var a=1;a<p;a++)e=v.finalize(e),v.reset();b.concat(e)}b.sigBytes=4*r;return b}});u.EvpKDF=function(d,l,n){return p.create(n).compute(d,
l)}})();
CryptoJS.lib.Cipher||function(u){var l=CryptoJS,d=l.lib,n=d.Base,p=d.WordArray,s=d.BufferedBlockAlgorithm,q=l.enc.Base64,w=l.algo.EvpKDF,v=d.Cipher=s.extend({cfg:n.extend(),createEncryptor:function(m,a){return this.create(this._ENC_XFORM_MODE,m,a)},createDecryptor:function(m,a){return this.create(this._DEC_XFORM_MODE,m,a)},init:function(m,a,b){this.cfg=this.cfg.extend(b);this._xformMode=m;this._key=a;this.reset()},reset:function(){s.reset.call(this);this._doReset()},process:function(a){this._append(a);return this._process()},
finalize:function(a){a&&this._append(a);return this._doFinalize()},keySize:4,ivSize:4,_ENC_XFORM_MODE:1,_DEC_XFORM_MODE:2,_createHelper:function(m){return{encrypt:function(f,b,e){return("string"==typeof b?c:a).encrypt(m,f,b,e)},decrypt:function(f,b,e){return("string"==typeof b?c:a).decrypt(m,f,b,e)}}}});d.StreamCipher=v.extend({_doFinalize:function(){return this._process(!0)},blockSize:1});var b=l.mode={},x=function(a,f,b){var c=this._iv;c?this._iv=u:c=this._prevBlock;for(var e=0;e<b;e++)a[f+e]^=
c[e]},r=(d.BlockCipherMode=n.extend({createEncryptor:function(a,f){return this.Encryptor.create(a,f)},createDecryptor:function(a,f){return this.Decryptor.create(a,f)},init:function(a,f){this._cipher=a;this._iv=f}})).extend();r.Encryptor=r.extend({processBlock:function(a,f){var b=this._cipher,c=b.blockSize;x.call(this,a,f,c);b.encryptBlock(a,f);this._prevBlock=a.slice(f,f+c)}});r.Decryptor=r.extend({processBlock:function(a,b){var c=this._cipher,e=c.blockSize,d=a.slice(b,b+e);c.decryptBlock(a,b);x.call(this,
a,b,e);this._prevBlock=d}});b=b.CBC=r;r=(l.pad={}).Pkcs7={pad:function(a,b){for(var c=4*b,c=c-a.sigBytes%c,e=c<<24|c<<16|c<<8|c,d=[],l=0;l<c;l+=4)d.push(e);c=p.create(d,c);a.concat(c)},unpad:function(a){a.sigBytes-=a.words[a.sigBytes-1>>>2]&255}};d.BlockCipher=v.extend({cfg:v.cfg.extend({mode:b,padding:r}),reset:function(){v.reset.call(this);var a=this.cfg,c=a.iv,a=a.mode;if(this._xformMode==this._ENC_XFORM_MODE)var b=a.createEncryptor;else b=a.createDecryptor,this._minBufferSize=1;this._mode=b.call(a,
this,c&&c.words)},_doProcessBlock:function(a,c){this._mode.processBlock(a,c)},_doFinalize:function(){var a=this.cfg.padding;if(this._xformMode==this._ENC_XFORM_MODE){a.pad(this._data,this.blockSize);var c=this._process(!0)}else c=this._process(!0),a.unpad(c);return c},blockSize:4});var e=d.CipherParams=n.extend({init:function(a){this.mixIn(a)},toString:function(a){return(a||this.formatter).stringify(this)}}),b=(l.format={}).OpenSSL={stringify:function(a){var c=a.ciphertext;a=a.salt;return(a?p.create([1398893684,
1701076831]).concat(a).concat(c):c).toString(q)},parse:function(a){a=q.parse(a);var c=a.words;if(1398893684==c[0]&&1701076831==c[1]){var b=p.create(c.slice(2,4));c.splice(0,4);a.sigBytes-=16}return e.create({ciphertext:a,salt:b})}},a=d.SerializableCipher=n.extend({cfg:n.extend({format:b}),encrypt:function(a,c,b,d){d=this.cfg.extend(d);var l=a.createEncryptor(b,d);c=l.finalize(c);l=l.cfg;return e.create({ciphertext:c,key:b,iv:l.iv,algorithm:a,mode:l.mode,padding:l.padding,blockSize:a.blockSize,formatter:d.format})},
decrypt:function(a,c,b,e){e=this.cfg.extend(e);c=this._parse(c,e.format);return a.createDecryptor(b,e).finalize(c.ciphertext)},_parse:function(a,c){return"string"==typeof a?c.parse(a,this):a}}),l=(l.kdf={}).OpenSSL={execute:function(a,c,b,d){d||(d=p.random(8));a=w.create({keySize:c+b}).compute(a,d);b=p.create(a.words.slice(c),4*b);a.sigBytes=4*c;return e.create({key:a,iv:b,salt:d})}},c=d.PasswordBasedCipher=a.extend({cfg:a.cfg.extend({kdf:l}),encrypt:function(c,b,e,d){d=this.cfg.extend(d);e=d.kdf.execute(e,
c.keySize,c.ivSize);d.iv=e.iv;c=a.encrypt.call(this,c,b,e.key,d);c.mixIn(e);return c},decrypt:function(c,b,e,d){d=this.cfg.extend(d);b=this._parse(b,d.format);e=d.kdf.execute(e,c.keySize,c.ivSize,b.salt);d.iv=e.iv;return a.decrypt.call(this,c,b,e.key,d)}})}();
(function(){function u(b,a){var c=(this._lBlock>>>b^this._rBlock)&a;this._rBlock^=c;this._lBlock^=c<<b}function l(b,a){var c=(this._rBlock>>>b^this._lBlock)&a;this._lBlock^=c;this._rBlock^=c<<b}var d=CryptoJS,n=d.lib,p=n.WordArray,n=n.BlockCipher,s=d.algo,q=[57,49,41,33,25,17,9,1,58,50,42,34,26,18,10,2,59,51,43,35,27,19,11,3,60,52,44,36,63,55,47,39,31,23,15,7,62,54,46,38,30,22,14,6,61,53,45,37,29,21,13,5,28,20,12,4],w=[14,17,11,24,1,5,3,28,15,6,21,10,23,19,12,4,26,8,16,7,27,20,13,2,41,52,31,37,47,
55,30,40,51,45,33,48,44,49,39,56,34,53,46,42,50,36,29,32],v=[1,2,4,6,8,10,12,14,15,17,19,21,23,25,27,28],b=[{"0":8421888,268435456:32768,536870912:8421378,805306368:2,1073741824:512,1342177280:8421890,1610612736:8389122,1879048192:8388608,2147483648:514,2415919104:8389120,2684354560:33280,2952790016:8421376,3221225472:32770,3489660928:8388610,3758096384:0,4026531840:33282,134217728:0,402653184:8421890,671088640:33282,939524096:32768,1207959552:8421888,1476395008:512,1744830464:8421378,2013265920:2,
2281701376:8389120,2550136832:33280,2818572288:8421376,3087007744:8389122,3355443200:8388610,3623878656:32770,3892314112:514,4160749568:8388608,1:32768,268435457:2,536870913:8421888,805306369:8388608,1073741825:8421378,1342177281:33280,1610612737:512,1879048193:8389122,2147483649:8421890,2415919105:8421376,2684354561:8388610,2952790017:33282,3221225473:514,3489660929:8389120,3758096385:32770,4026531841:0,134217729:8421890,402653185:8421376,671088641:8388608,939524097:512,1207959553:32768,1476395009:8388610,
1744830465:2,2013265921:33282,2281701377:32770,2550136833:8389122,2818572289:514,3087007745:8421888,3355443201:8389120,3623878657:0,3892314113:33280,4160749569:8421378},{"0":1074282512,16777216:16384,33554432:524288,50331648:1074266128,67108864:1073741840,83886080:1074282496,100663296:1073758208,117440512:16,134217728:540672,150994944:1073758224,167772160:1073741824,184549376:540688,201326592:524304,218103808:0,234881024:16400,251658240:1074266112,8388608:1073758208,25165824:540688,41943040:16,58720256:1073758224,
75497472:1074282512,92274688:1073741824,109051904:524288,125829120:1074266128,142606336:524304,159383552:0,176160768:16384,192937984:1074266112,209715200:1073741840,226492416:540672,243269632:1074282496,260046848:16400,268435456:0,285212672:1074266128,301989888:1073758224,318767104:1074282496,335544320:1074266112,352321536:16,369098752:540688,385875968:16384,402653184:16400,419430400:524288,436207616:524304,452984832:1073741840,469762048:540672,486539264:1073758208,503316480:1073741824,520093696:1074282512,
276824064:540688,293601280:524288,310378496:1074266112,327155712:16384,343932928:1073758208,360710144:1074282512,377487360:16,394264576:1073741824,411041792:1074282496,427819008:1073741840,444596224:1073758224,461373440:524304,478150656:0,494927872:16400,511705088:1074266128,528482304:540672},{"0":260,1048576:0,2097152:67109120,3145728:65796,4194304:65540,5242880:67108868,6291456:67174660,7340032:67174400,8388608:67108864,9437184:67174656,10485760:65792,11534336:67174404,12582912:67109124,13631488:65536,
14680064:4,15728640:256,524288:67174656,1572864:67174404,2621440:0,3670016:67109120,4718592:67108868,5767168:65536,6815744:65540,7864320:260,8912896:4,9961472:256,11010048:67174400,12058624:65796,13107200:65792,14155776:67109124,15204352:67174660,16252928:67108864,16777216:67174656,17825792:65540,18874368:65536,19922944:67109120,20971520:256,22020096:67174660,23068672:67108868,24117248:0,25165824:67109124,26214400:67108864,27262976:4,28311552:65792,29360128:67174400,30408704:260,31457280:65796,32505856:67174404,
17301504:67108864,18350080:260,19398656:67174656,20447232:0,21495808:65540,22544384:67109120,23592960:256,24641536:67174404,25690112:65536,26738688:67174660,27787264:65796,28835840:67108868,29884416:67109124,30932992:67174400,31981568:4,33030144:65792},{"0":2151682048,65536:2147487808,131072:4198464,196608:2151677952,262144:0,327680:4198400,393216:2147483712,458752:4194368,524288:2147483648,589824:4194304,655360:64,720896:2147487744,786432:2151678016,851968:4160,917504:4096,983040:2151682112,32768:2147487808,
98304:64,163840:2151678016,229376:2147487744,294912:4198400,360448:2151682112,425984:0,491520:2151677952,557056:4096,622592:2151682048,688128:4194304,753664:4160,819200:2147483648,884736:4194368,950272:4198464,1015808:2147483712,1048576:4194368,1114112:4198400,1179648:2147483712,1245184:0,1310720:4160,1376256:2151678016,1441792:2151682048,1507328:2147487808,1572864:2151682112,1638400:2147483648,1703936:2151677952,1769472:4198464,1835008:2147487744,1900544:4194304,1966080:64,2031616:4096,1081344:2151677952,
1146880:2151682112,1212416:0,1277952:4198400,1343488:4194368,1409024:2147483648,1474560:2147487808,1540096:64,1605632:2147483712,1671168:4096,1736704:2147487744,1802240:2151678016,1867776:4160,1933312:2151682048,1998848:4194304,2064384:4198464},{"0":128,4096:17039360,8192:262144,12288:536870912,16384:537133184,20480:16777344,24576:553648256,28672:262272,32768:16777216,36864:537133056,40960:536871040,45056:553910400,49152:553910272,53248:0,57344:17039488,61440:553648128,2048:17039488,6144:553648256,
10240:128,14336:17039360,18432:262144,22528:537133184,26624:553910272,30720:536870912,34816:537133056,38912:0,43008:553910400,47104:16777344,51200:536871040,55296:553648128,59392:16777216,63488:262272,65536:262144,69632:128,73728:536870912,77824:553648256,81920:16777344,86016:553910272,90112:537133184,94208:16777216,98304:553910400,102400:553648128,106496:17039360,110592:537133056,114688:262272,118784:536871040,122880:0,126976:17039488,67584:553648256,71680:16777216,75776:17039360,79872:537133184,
83968:536870912,88064:17039488,92160:128,96256:553910272,100352:262272,104448:553910400,108544:0,112640:553648128,116736:16777344,120832:262144,124928:537133056,129024:536871040},{"0":268435464,256:8192,512:270532608,768:270540808,1024:268443648,1280:2097152,1536:2097160,1792:268435456,2048:0,2304:268443656,2560:2105344,2816:8,3072:270532616,3328:2105352,3584:8200,3840:270540800,128:270532608,384:270540808,640:8,896:2097152,1152:2105352,1408:268435464,1664:268443648,1920:8200,2176:2097160,2432:8192,
2688:268443656,2944:270532616,3200:0,3456:270540800,3712:2105344,3968:268435456,4096:268443648,4352:270532616,4608:270540808,4864:8200,5120:2097152,5376:268435456,5632:268435464,5888:2105344,6144:2105352,6400:0,6656:8,6912:270532608,7168:8192,7424:268443656,7680:270540800,7936:2097160,4224:8,4480:2105344,4736:2097152,4992:268435464,5248:268443648,5504:8200,5760:270540808,6016:270532608,6272:270540800,6528:270532616,6784:8192,7040:2105352,7296:2097160,7552:0,7808:268435456,8064:268443656},{"0":1048576,
16:33555457,32:1024,48:1049601,64:34604033,80:0,96:1,112:34603009,128:33555456,144:1048577,160:33554433,176:34604032,192:34603008,208:1025,224:1049600,240:33554432,8:34603009,24:0,40:33555457,56:34604032,72:1048576,88:33554433,104:33554432,120:1025,136:1049601,152:33555456,168:34603008,184:1048577,200:1024,216:34604033,232:1,248:1049600,256:33554432,272:1048576,288:33555457,304:34603009,320:1048577,336:33555456,352:34604032,368:1049601,384:1025,400:34604033,416:1049600,432:1,448:0,464:34603008,480:33554433,
496:1024,264:1049600,280:33555457,296:34603009,312:1,328:33554432,344:1048576,360:1025,376:34604032,392:33554433,408:34603008,424:0,440:34604033,456:1049601,472:1024,488:33555456,504:1048577},{"0":134219808,1:131072,2:134217728,3:32,4:131104,5:134350880,6:134350848,7:2048,8:134348800,9:134219776,10:133120,11:134348832,12:2080,13:0,14:134217760,15:133152,2147483648:2048,2147483649:134350880,2147483650:134219808,2147483651:134217728,2147483652:134348800,2147483653:133120,2147483654:133152,2147483655:32,
2147483656:134217760,2147483657:2080,2147483658:131104,2147483659:134350848,2147483660:0,2147483661:134348832,2147483662:134219776,2147483663:131072,16:133152,17:134350848,18:32,19:2048,20:134219776,21:134217760,22:134348832,23:131072,24:0,25:131104,26:134348800,27:134219808,28:134350880,29:133120,30:2080,31:134217728,2147483664:131072,2147483665:2048,2147483666:134348832,2147483667:133152,2147483668:32,2147483669:134348800,2147483670:134217728,2147483671:134219808,2147483672:134350880,2147483673:134217760,
2147483674:134219776,2147483675:0,2147483676:133120,2147483677:2080,2147483678:131104,2147483679:134350848}],x=[4160749569,528482304,33030144,2064384,129024,8064,504,2147483679],r=s.DES=n.extend({_doReset:function(){for(var b=this._key.words,a=[],c=0;56>c;c++){var d=q[c]-1;a[c]=b[d>>>5]>>>31-d%32&1}b=this._subKeys=[];for(d=0;16>d;d++){for(var f=b[d]=[],l=v[d],c=0;24>c;c++)f[c/6|0]|=a[(w[c]-1+l)%28]<<31-c%6,f[4+(c/6|0)]|=a[28+(w[c+24]-1+l)%28]<<31-c%6;f[0]=f[0]<<1|f[0]>>>31;for(c=1;7>c;c++)f[c]>>>=
4*(c-1)+3;f[7]=f[7]<<5|f[7]>>>27}a=this._invSubKeys=[];for(c=0;16>c;c++)a[c]=b[15-c]},encryptBlock:function(b,a){this._doCryptBlock(b,a,this._subKeys)},decryptBlock:function(b,a){this._doCryptBlock(b,a,this._invSubKeys)},_doCryptBlock:function(e,a,c){this._lBlock=e[a];this._rBlock=e[a+1];u.call(this,4,252645135);u.call(this,16,65535);l.call(this,2,858993459);l.call(this,8,16711935);u.call(this,1,1431655765);for(var d=0;16>d;d++){for(var f=c[d],n=this._lBlock,p=this._rBlock,q=0,r=0;8>r;r++)q|=b[r][((p^
f[r])&x[r])>>>0];this._lBlock=p;this._rBlock=n^q}c=this._lBlock;this._lBlock=this._rBlock;this._rBlock=c;u.call(this,1,1431655765);l.call(this,8,16711935);l.call(this,2,858993459);u.call(this,16,65535);u.call(this,4,252645135);e[a]=this._lBlock;e[a+1]=this._rBlock},keySize:2,ivSize:2,blockSize:2});d.DES=n._createHelper(r);s=s.TripleDES=n.extend({_doReset:function(){var b=this._key.words;this._des1=r.createEncryptor(p.create(b.slice(0,2)));this._des2=r.createEncryptor(p.create(b.slice(2,4)));this._des3=
r.createEncryptor(p.create(b.slice(4,6)))},encryptBlock:function(b,a){this._des1.encryptBlock(b,a);this._des2.decryptBlock(b,a);this._des3.encryptBlock(b,a)},decryptBlock:function(b,a){this._des3.decryptBlock(b,a);this._des2.encryptBlock(b,a);this._des1.decryptBlock(b,a)},keySize:6,ivSize:2,blockSize:2});d.TripleDES=n._createHelper(s)})();
CryptoJS.mode.ECB = (function () {
var ECB = CryptoJS.lib.BlockCipherMode.extend();
ECB.Encryptor = ECB.extend({
processBlock: function (words, offset) {
this._cipher.encryptBlock(words, offset);
}
});
ECB.Decryptor = ECB.extend({
processBlock: function (words, offset) {
this._cipher.decryptBlock(words, offset);
}
});
return ECB;
}());
function encryptByDES(message, key) {
var keyHex = CryptoJS.enc.Utf8.parse(key);
var encrypted = CryptoJS.DES.encrypt(message, keyHex, {
mode: CryptoJS.mode.ECB,
padding: CryptoJS.pad.Pkcs7
});
return encrypted.toString();
}
"""
return execjs.compile(js_source).call("encryptByDES", pwd, "1234567890")
def send_login_verify_request(self, **kwargs):
#初始请求
start_time = str(int(time.time() * 1000))
start_url = 'http://service.js.10086.cn/my/MY_QDCX.html?t={}'.format(start_time)
self.start_url = 'my/MY_QDCX.html?t={}'.format(start_time)
code, key, resp = self.get(start_url)
if code != 0:
return code, key, ""
#登录首次请求
login_url = "https://service.js.10086.cn/actionDispatcher.do"
headers = {
'Referer': 'http://service.js.10086.cn/login.html?url=my/MY_QDCX.html',
}
try:
data = {
'userLoginTransferProtocol': 'https',
'redirectUrl': self.start_url + "#home",
'reqUrl': 'login',
'busiNum': 'LOGIN',
'operType': '0',
'passwordType': '1',
'isSavePasswordVal': '0',
'isSavePasswordVal_N': '1',
'currentD': '1',
'loginFormTab': '#home',
'loginType': '1',
'smsFlag': '1',
'smsCode3': '',
'mobile': kwargs['tel'],
'city': 'NJDQ', #
'password': self.enPwd(kwargs['pin_pwd']),
'password3': '',
'verifyCode': '请输入验证码',
}
except:
error = traceback.format_exc()
self.log("crawler", "param_error pin_pwd:{} error:{}".format(kwargs['pin_pwd'], error), "")
return 9, "param_error"
code, key, resp = self.post(login_url, data=data, headers=headers)
if code != 0:
return code, key
if "resultCode=-BSP10001" not in resp.text:
self.log("crawler", "unknown_error", resp)
return 9, "unknown_error", ""
# 发短信
send_sms_url = "http://service.js.10086.cn/actionDispatcher.do"
data = {
'reqUrl': 'login',
'busiNum': 'LOGIN',
'fucName': 'sendLoginMsg',
'flag': '1',
}
code, key, resp = self.post(send_sms_url, data=data)
if code != 0:
return code, key
if '"resultCode":"0"' in resp.text and '"success":true' in resp.text:
return 0, "success", ""
else:
self.log("crawler", "登录短信发送未知错误", resp)
return 9, "send_sms_error", ""
# code, key, resp = self.get("http://service.js.10086.cn/login.html?url=my/MY_QDCX.html")
# if code != 0:
# if isinstance(resp, str):
# pass
# elif resp.status_code == 404:
# return 9, 'website_busy_error', ""
# return code, key, ""
# code, key, resp = self.get("http://service.js.10086.cn/imageVerifyCode?t=new&r=0.611200696976353")
# if code != 0:
# return code, key, ""
# code, key, resp = self.get("http://service.js.10086.cn/imageVerifyCode?t=new&r=0.2331200696976353")
# if code != 0:
# if isinstance(resp, str):
# pass
# elif resp.status_code == 404:
# return 9, 'website_busy_error', ""
# return code, key, ""
# return 0, 'success', ""
# return 0, 'success', resp
def get_call_log_by_month(self, year, month):
data = {}
data['reqUrl'] = 'MY_QDCXQueryNew'
data['busiNum'] = 'QDCX'
data['queryMonth'] = "%d%02d" % (year, month)
data['queryItem'] = 1
data['qryPages'] = ''
data['qryNo'] = '1'
data['operType'] = '3'
data['queryBeginTime'] = "%s-%02d-01" % (year, month)
data['queryEndTime'] = "%s-%02d-31" % (year, month)
headers = {
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Referer": "http://service.js.10086.cn/my/MY_QDCX.html"
}
bill_detail_url = "http://service.js.10086.cn/my/actionDispatcher.do"
code, key, resp = self.post(bill_detail_url, data=data, headers=headers)
if code != 0:
return code, key, "", resp
return 0, "success", "", resp
def login(self, **kwargs):
# get cookies
# login_url = "http://service.js.10086.cn/login.html"
# headers = {
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
# "Accept-Encoding": "gzip, deflate",
# "Accept-Language": "zh-CN,zh;q=0.9"
# }
# def rand_browser_finger():
# return hashlib.md5(str(random.randint(1000000000, 9999999999))).hexdigest()
# add_dict_to_cookiejar(self.session.cookies,
# {
# "CmProvid": "js",
# "mywaytoopen": rand_browser_finger(),
# "login_error_number_https": "15094393043",
# "login_error_loginType_https": "1"
# }
# )
# headers = {
# "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
# "Referer": "http://service.js.10086.cn/login.html",
# "Accept-Encoding": "gzip, deflate",
# "Accept-Language": "zh-CN,zh;q=0.9"
# }
# url = "http://service.js.10086.cn/actionDispatcher.do"
# headers = {
# "X-Requested-With": "XMLHttpRequest",
# "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
# "Referer": "http://service.js.10086.cn/login.html",
# "Accept-Encoding": "gzip, deflate",
# "Accept-Language": "zh-CN,zh;q=0.9"
# }
# data = {
# "reqUrl": "MY_WDXX",
# "methodNew": "getMsgCount"
# }
# code, key, resp = self.post(url, headers=headers, data=data)
# if code != 0:
# return code, key
# temp_time = int(time.time() * 1000)
# try:
# pwd = self.enPwd(kwargs['pin_pwd'])
# except:
# error = traceback.format_exc()
# self.log("crawler", "记录错误{} {}".format(kwargs['pin_pwd'], error), "")
# return 9, "website_busy_error"
# LOGIN_URL = 'https://service.js.10086.cn/actionDispatcher.do'
# data = {
# "userLoginTransferProtocol": "https",
# "redirectUrl": "index.html",
# "reqUrl": "login",
# "busiNum": "LOGIN",
# "operType": "0",
# "passwordType": "1",
# "isSavePasswordVal": "0",
# "isSavePasswordVal_N": "1",
# "currentD": "1",
# "loginFormTab": "http",
# "loginType": "1",
# "smsFlag": "1",
# "smsCode3": "",
# "mobile": kwargs['tel'],
# "city": "NJDQ",
# "password": pwd,
# "password3": "",
# "verifyCode": "%E8%AF%B7%E8%BE%93%E5%85%A5%E9%AA%8C%E8%AF%81%E7%A0%81",
# }
#
# headers = {
# "Referer": "http://service.js.10086.cn/login.html",
# "Content-Type": "application/x-www-form-urlencoded",
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
# "Accept-Encoding": "gzip, deflate, br",
# "Accept-Language": "zh-CN,zh;q=0.9"
# }
# code, key, resp = self.post(LOGIN_URL, headers=headers, data=data)
# if code != 0:
# return code, key
# if u'login' in resp.url:
# self.log("user", "login_param_error", resp)
# return 1, 'login_param_error'
# elif u'resultCode=-300' in resp.text:
# self.log("user", "pin_pwd_error", resp)
# return 1, 'pin_pwd_error'
# elif u'resultCode=-400' in resp.text or u'resultCode=-9000' in resp.text:
# self.log("user", "verify_error", resp)
# return 9, "website_busy_error"
# elif u'system maintenance' in resp.text:
# self.log("website", "系统升级", resp)
# return 9, "website_maintaining_error"
# for i in range(self.max_retry):
# code, key, resp = self.send_login_verify_request()
# if code != 0:
# continue
# # 云打码
# codetype = 3004
# key, result, cid = self._dama(resp.content, codetype)
#
# if key == "success" and result != "":
# captcha_code = str(result)
# else:
# self.log("website", "website_busy_error: 云打码失败{}".format(result), '')
# code, key = 9, "auto_captcha_code_error"
# continue
# LOGIN_URL = 'http://service.js.10086.cn/actionDispatcher.do'
# data = {}
# data['userLoginTransferProtocol'] = 'https'
# data['redirectUrl'] = 'my/MY_QDCX.html#home'
# data['reqUrl'] = 'login'
# data['busiNum'] = 'LOGIN'
# data['operType'] = '0'
# data['passwordType'] = '1'
# data['isSavePasswordVal'] = '0'
# data['isSavePasswordVal_N'] = '1'
# data['currentD'] = '1'
# data['loginFormTab'] = '#home'
# data['loginType'] = '1'
# data['smsFlag'] = '1'
# data['phone-login'] = 'on'
# data['mobile'] = kwargs['tel']
# data['city'] = 'NJDQ'
# data['password'] = kwargs['pin_pwd']
# data['verifyCode'] = ""
# headers = {"Upgrade-Insecure-Requests": "1", "Referer": "http://service.js.10086.cn/login.html?url=my/MY_QDCX.html?t=1481789973300"}
# code, key, resp = self.post(LOGIN_URL, headers=headers, data=data)
# if code != 0:
# return code, key
# if u'login' in resp.url:
# self.log("user", "login_param_error", resp)
# return 1, 'login_param_error'
# elif u'resultCode=-300' in resp.text:
# self.log("user", "pin_pwd_error", resp)
# return 1, 'pin_pwd_error'
# elif u'resultCode=-400' in resp.text or u'resultCode=-9000' in resp.text:
# self.log("user", "verify_error", resp)
# code, key = 9, "auto_captcha_code_error"
# self._dama_report(cid)
# continue
# else:
# return code, key
# go to bill page and next will need to do sms verification
# 验证短信
verify_login_sms_url = 'http://service.js.10086.cn/actionDispatcher.do'
data = {
'reqUrl': 'login',
'busiNum': 'LOGIN',
'smsLoginCode': kwargs['sms_code'],
'fucName': 'verySmsCode',
'flag': '1',
}
code, key, resp = self.post(verify_login_sms_url, data=data)
if code != 0:
return code, key,
if '"resultCode":"1"' in resp.text and '"success":false' in resp.text:
self.log("user", "短信验证码错误", resp)
return 9, "verify_error"
if '密码错误' in resp.text and 'logicCode":"-3002' in resp.text:
self.log("user", "pin_pwd_error", resp)
return 1, "pin_pwd_error"
if 'resultCode":"0"' not in resp.text and 'success":true' not in resp.text:
self.log("crawler", "unknown_error", resp)
return 9, "unknown_error", ""
query_date = date.today()
level, key, message, r = self.get_call_log_by_month(query_date.year, query_date.month)
if level != 0:
return level, key
bill_panel_url = 'http://service.js.10086.cn/my/MY_QDCX.html'
code, key, resp = self.get(bill_panel_url)
if code != 0:
return code, key
return 0, 'success'
def get_verify_type(self, **kwargs):
return 'SMS'
# return ""
def verify(self, **kwargs):
today = date.today()
data = {}
data['reqUrl'] = 'MY_QDCXQueryNew'
data['busiNum'] = 'QDCX'
data['queryMonth'] = "%d%02d" % (today.year, today.month)
data['queryItem'] = 1
data['qryPages'] = ''
# 1:1002:-1 example for pagination
data['qryNo'] = '1'
data['operType'] = '3'
data['queryBeginTime'] = "%s-%02d-01" % (today.year, today.month)
data['queryEndTime'] = "%s-%02d-31" % (today.year, today.month)
data['confirmFlg'] = '1'
data['smsNum'] = kwargs['sms_code']
url = "http://service.js.10086.cn/my/actionDispatcher.do"
headers = {
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Referer": "http://service.js.10086.cn/my/MY_QDCX.html"
}
code, key, resp = self.post(url, data=data, headers=headers)
if code != 0:
return code, key
try:
if "<title> system maintenance" in resp.text:
self.log("crawler", u"系统维护", resp)
return 9, "website_maintaining_error"
obj = json.loads(resp.text)
if obj['resultCode'] == "0":
status_key, level, message = 'success', 0, ''
else:
self.log("crawler", "验证短信失败", resp)
if obj['systemCode'] == "-200009":
return 2, 'verify_error'
elif obj['systemCode'] == "-200002":
return 9, "website_busy_error"
status_key, level, message = 'verify_error', 2, obj['resultMsg']
self.log("user", "verify_error", resp)
return level, status_key
except:
error = traceback.format_exc()
self.log("crawler", 'unknown_error: ' + error, resp)
return 9, "unknown_error"
def send_verify_request(self, **kwargs):
# see if we need to sleep for a while to send sms verification successfully
if self.sms_send_time != None:
sleep_time = 30 - int(time.time() - self.sms_send_time)
if sleep_time > 0:
time.sleep(sleep_time)
sms_post_url = "http://service.js.10086.cn/my/sms.do"
sms_req_payload = {'busiNum': 'QDCX'}
headers = {"Origin": "http://service.js.10086.cn", 'X-Requested-With': 'XMLHttpRequest',
"Referer": 'http://service.js.10086.cn/'+self.start_url}
# "Referer": "http://service.js.10086.cn/my/MY_QDCX.html?t=1481789973300"
code, key, resp = self.post(sms_post_url, headers=headers, data=sms_req_payload)
# code, key, resp = self.post(sms_post_url, data=sms_req_payload)
if code != 0:
return code, key, ""
if resp.text.strip() == "":
self.log("website", u"官网返回数据为空", resp)
return 9, "website_busy_error", ""
if "<html><head><title> system" in resp.text:
self.log("website", u"系统繁忙或者是升级", resp)
return 9, "website_busy_error", ""
try:
ret = resp.json()
error_code = ret['resultCode']
except:
error = traceback.format_exc()
self.log("crawler", "request_error: " + error, resp)
return 9, 'request_error', ""
if error_code != '0':
self.log("crawler", u"发送短信失败", resp)
return 9, 'send_sms_error', ''
# keep the latest sms sending timestamp
self.sms_send_time = time.time()
return 0, 'success', ''
def crawl_call_log_short_num(self, **kwargs):
miss_list = []
pos_miss_list = []
records = []
message_list = []
today = date.today()
delta_months = [i for i in range(0, -6, -1)]
for delta_month in delta_months:
query_date = today + relativedelta(months=delta_month)
end_date = monthrange(query_date.year, query_date.month)[1]
query_month = "%s%02d" % (query_date.year, query_date.month)
st = "%s-%02d-01" % (query_date.year, query_date.month)
et = "%s-%02d-%d" % (query_date.year, query_date.month, end_date)
data = {
"reqUrl": "MY_QDCXQueryNew",
"busiNum": "QDCX",
"queryMonth": "201709",
"queryItem": "8",
"qryPages": "8:1005:-1",
"qryNo": "1",
"operType": "3",
"queryBeginTime": st,
"queryEndTime": et
}
headers = {
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Referer": "http://service.js.10086.cn/my/MY_QDCX.html"
}
url = "http://service.js.10086.cn/my/actionDispatcher.do"
message = ""
for i in range(self.max_retry):
code, key, resp = self.post(url, data=data, headers=headers)
if code != 0:
message = "network_request_error"
continue
level, key, message, ret = parse_call_record_short(resp.text, query_month, self_obj=self)
if level != 0:
continue
records.extend(ret)
break
else:
if key == 'no_data':
self.log("crawler", "短号码{}{}".format(key, message), resp)
pos_miss_list.append(query_month)
else:
if message != "network_request_error":
self.log("crawler", "短号码{}{}".format(key, message), resp)
miss_list.append(query_month)
message_list.append(key)
return 0, "success", records, miss_list, pos_miss_list
def crawl_call_log(self, **kwargs):
miss_list = []
pos_miss_list = []
today = date.today()
records = []
message_list = []
delta_months = [i for i in range(0, -6, -1)]
page_and_retry = []
bill_detail_url = "http://service.js.10086.cn/my/actionDispatcher.do"
headers = {
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Referer": "http://service.js.10086.cn/my/MY_QDCX.html"
}
for delta_month in delta_months:
query_date = today + relativedelta(months=delta_month)
data = {}
query_month = "%d%02d" % (query_date.year, query_date.month)
data['reqUrl'] = 'MY_QDCXQueryNew'
data['busiNum'] = 'QDCX'
data['queryMonth'] = query_month
data['queryItem'] = 1
data['qryPages'] = ''
# 1:1002:-1 example for pagination
data['qryNo'] = '1'
data['operType'] = '3'
data['queryBeginTime'] = "%s-%02d-01" % (query_date.year, query_date.month)
data['queryEndTime'] = "%s-%02d-31" % (query_date.year, query_date.month)
data['confirmFlg'] = '1'
data['smsNum'] = kwargs['sms_code']
page_and_retry.append((data, query_month, self.max_retry))
log_for_retry_request = []
st_time = time.time()
et_time = st_time + 15
while page_and_retry:
# for i in range(self.max_retry):
data, m_query_month, m_retry_times = page_and_retry.pop(0)
log_for_retry_request.append((m_query_month, m_retry_times))
m_retry_times -= 1
code, key, resp = self.post(bill_detail_url, data=data, headers=headers)
if code == 0:
level, key, message, ret = parse_call_record(resp.text, m_query_month, self_obj=self)
if level != 0:
continue
records.extend(ret)
# break
else:
new_time = time.time()
if m_retry_times > 0:
page_and_retry.append((data, m_query_month, m_retry_times))
elif new_time < et_time:
page_and_retry.append((data, m_query_month, m_retry_times))
time.sleep(random.randint(3, 5))
else:
message = "network_request_error"
continue
if key == 'no_data':
self.log("crawler", "{}{}".format(key, message), resp)
pos_miss_list.append(m_query_month)
elif key == "success":
pass
else:
if message != "network_request_error":
self.log("crawler", "{}{}".format(key, message), resp)
miss_list.append(m_query_month)
message_list.append(key)
short_code, short_key, short_result, short_miss_list, short_pos_miss_list = self.crawl_call_log_short_num(
**kwargs)
records.extend(short_result)
self.log("crawler", "重试记录:{}".format(log_for_retry_request), "")
# print len(records),miss_list,pos_miss_list
if len(miss_list + pos_miss_list) == 6:
temp_list = map(
lambda x: x.count('request_error') or x.count('website_busy_error') or x.count('success') or 0,
message_list)
if temp_list.count(0) == 0:
return 9, 'website_busy_error', [], miss_list, pos_miss_list
else:
return 9, 'crawl_error', [], miss_list, pos_miss_list
return 0, 'success', records, miss_list, pos_miss_list
def time_transform(self, time_str, bm='utf-8', str_format="%Y%m%d%H%M%S"):
try:
time_type = time.strptime(time_str.encode(bm), str_format)
except:
error = traceback.format_exc()
return 9, 'unknown_error', u"time_transform failed: %s %s" % (error, time_str)
return 0, 'success', str(int(time.mktime(time_type)))
def crawl_info(self, **kwargs):
result = {}
url = "http://service.js.10086.cn/my/MY_GRZLGL.html#home"
code, key, resp = self.get(url)
if code != 0:
return code, key, {}
obj_str = ''
sections = resp.text.split('window.top.BmonPage.commonBusiCallBack(')
if len(sections) > 1:
obj_str = sections[1].split(", 'MY_GRZLGL')")[0]
if obj_str == '':
self.log("crawler", 'expected_key_error', resp)
return 9, "expected_key_error", {}
try:
obj = json.loads(obj_str)
result['is_realname_register'] = True
result['full_name'] = obj['resultObj']['kehuName']
result['id_card'] = kwargs['id_card']
result['open_date'] = obj['resultObj']['ruwangAt']
except:
error = traceback.format_exc()
self.log("crawler", 'unknown_error: ' + error, resp)
return 9, "unknown_error", {}
level, key, open_date = self.time_transform(obj['resultObj']['ruwangAt'])
if level != 0:
self.log("crawler", '转换时间失败{}{}'.format(key, open_date), resp)
return level, key, {}
result['open_date'] = open_date
result['address'] = ''
return 0, 'success', result
def crawl_phone_bill(self, **kwargs):
miss_list = []
phone_bill = list()
params = {'tel': kwargs['tel']}
message_list = []
for month in self.__monthly_period(6, '%Y%m'):
params['month'] = month
level, key, message, result, miss = self.crawl_month_bill(**params)
if level != 0:
message_list.append(key)
miss_list.append(miss)
if result.get('bill_amount', '') == '':
continue
if result:
phone_bill.append(result)
now_month = datetime.datetime.now().strftime("%Y%m")
now_month in miss_list and miss_list.remove(now_month)
if len(miss_list) == 5:
temp_list = map(lambda x: x.count('request_error') or x.count('website_busy_error') or 0, message_list)
if temp_list.count(0) == 0:
return 9, 'website_busy_error', [], miss_list
return 9, "crawl_error", [], miss_list
return 0, 'success', phone_bill, miss_list
def crawl_month_bill(self, **kwargs):
month_bill_url = 'http://service.js.10086.cn/my/actionDispatcher.do'
data = {
'reqUrl': 'MY_GRZDQuery',
'busiNum': 'ZDCX',
'methodName': 'getMobileHistoryBill',
'beginDate': kwargs['month']
}
headers = {
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Referer": "http://service.js.10086.cn/my/MY_ZDCX.html"
}
error = ""
for i in range(self.max_retry):
code, key, resp = self.post(month_bill_url, data=data, headers=headers)
if code != 0:
error = "network_request_error"
continue
resp.encoding = 'utf=8'
month_bill = {
'bill_month': kwargs['month'],
'bill_amount': '',
'bill_package': '',
'bill_ext_calls': '',
'bill_ext_data': '',
'bill_ext_sms': '',
'bill_zengzhifei': '',
'bill_daishoufei': '',
'bill_qita': ''
}
try:
result = json.loads(resp.text)
if 'billBean' in result['resultObj']:
bill = result['resultObj']['billBean']['billRet']
# 出账期
if u'移动话费出账期' in result['resultMsg']:
return 9, 'success', '', month_bill, kwargs['month']
if bill == None:
# u'账单查询出错 %d,%s' % (resp.status_code, resp.text)
# return 'json_error', 9, "", [], True
error = "账单查询出错"
continue
month_bill['bill_amount'] = '%.2f' % (float(bill['totalFee']) / 100)
for x in bill['feeDetailList']:
if 1 == x['level']:
if u'套餐及固定费' == x['feeName']:
month_bill['bill_package'] = '%.2f' % (float(x['fee']) / 100)
elif u'套餐外语音通信费' == x['feeName']:
month_bill['bill_ext_calls'] = '%.2f' % (float(x['fee']) / 100)
elif u'套餐外短信、彩信费' == x['feeName']:
month_bill['bill_ext_sms'] = '%.2f' % (float(x['fee']) / 100)
break
except:
error = traceback.format_exc()
# return "json_error", 9, error, resp, True
continue
else:
if error != "network_request_error":
self.log("crawler", error, resp)
return 9, "html_error", error, {}, kwargs['month']
return 0, 'success', '', month_bill, ""
def __monthly_period(self, length=6, strf='%Y%m'):
current_time = datetime.datetime.now()
for month_offset in range(0, length):
yield (current_time - relativedelta(months=month_offset)).strftime(strf)
if __name__ == '__main__':
c = Crawler()
mock = {}
# mock['username'] = u"毛羽建"
# mock['identification'] = '330225198112260052'
# mock['verify_code'] = ''
# mock['website_pwd'] = ''
mock['pin_pwd'] = '340393'
mock['tel'] = '15094393043'
c.self_test(**mock)
# print(c.enPwd('340393'))
# c.send_login_verify_request() | 67.410156 | 556 | 0.601321 |
import base64
import datetime
import json
import sys
import time
import random
import traceback
from datetime import date
from calendar import monthrange
import hashlib
import re
import execjs
from dateutil.relativedelta import relativedelta
from requests.utils import add_dict_to_cookiejar
reload(sys)
sys.setdefaultencoding('utf8')
if __name__ == '__main__':
sys.path.append('../..')
sys.path.append('../../..')
sys.path.append('../../../..')
from crawler.base_crawler import BaseCrawler
from tool import parse_call_record, parse_call_record_short
else:
from worker.crawler.base_crawler import BaseCrawler
from worker.crawler.china_mobile.jiangsu.tool import parse_call_record, parse_call_record_short
class Crawler(BaseCrawler):
def __init__(self, **kwargs):
super(Crawler, self).__init__(**kwargs)
self.sms_send_time = None
self.start_url = ""
def need_parameters(self, **kwargs):
return ['pin_pwd']
def get_login_verify_type(self, **kwargs):
return 'SMS'
def enPwd(self, pwd):
js_source = """
var CryptoJS=CryptoJS||function(u,l){var d={},n=d.lib={},p=function(){},s=n.Base={extend:function(a){p.prototype=this;var c=new p;a&&c.mixIn(a);c.hasOwnProperty("init")||(c.init=function(){c.$super.init.apply(this,arguments)});c.init.prototype=c;c.$super=this;return c},create:function(){var a=this.extend();a.init.apply(a,arguments);return a},init:function(){},mixIn:function(a){for(var c in a)a.hasOwnProperty(c)&&(this[c]=a[c]);a.hasOwnProperty("toString")&&(this.toString=a.toString)},clone:function(){return this.init.prototype.extend(this)}},
q=n.WordArray=s.extend({init:function(a,c){a=this.words=a||[];this.sigBytes=c!=l?c:4*a.length},toString:function(a){return(a||v).stringify(this)},concat:function(a){var c=this.words,m=a.words,f=this.sigBytes;a=a.sigBytes;this.clamp();if(f%4)for(var t=0;t<a;t++)c[f+t>>>2]|=(m[t>>>2]>>>24-8*(t%4)&255)<<24-8*((f+t)%4);else if(65535<m.length)for(t=0;t<a;t+=4)c[f+t>>>2]=m[t>>>2];else c.push.apply(c,m);this.sigBytes+=a;return this},clamp:function(){var a=this.words,c=this.sigBytes;a[c>>>2]&=4294967295<<
32-8*(c%4);a.length=u.ceil(c/4)},clone:function(){var a=s.clone.call(this);a.words=this.words.slice(0);return a},random:function(a){for(var c=[],m=0;m<a;m+=4)c.push(4294967296*u.random()|0);return new q.init(c,a)}}),w=d.enc={},v=w.Hex={stringify:function(a){var c=a.words;a=a.sigBytes;for(var m=[],f=0;f<a;f++){var t=c[f>>>2]>>>24-8*(f%4)&255;m.push((t>>>4).toString(16));m.push((t&15).toString(16))}return m.join("")},parse:function(a){for(var c=a.length,m=[],f=0;f<c;f+=2)m[f>>>3]|=parseInt(a.substr(f,
2),16)<<24-4*(f%8);return new q.init(m,c/2)}},b=w.Latin1={stringify:function(a){var c=a.words;a=a.sigBytes;for(var m=[],f=0;f<a;f++)m.push(String.fromCharCode(c[f>>>2]>>>24-8*(f%4)&255));return m.join("")},parse:function(a){for(var c=a.length,m=[],f=0;f<c;f++)m[f>>>2]|=(a.charCodeAt(f)&255)<<24-8*(f%4);return new q.init(m,c)}},x=w.Utf8={stringify:function(a){try{return decodeURIComponent(escape(b.stringify(a)))}catch(c){throw Error("Malformed UTF-8 data");}},parse:function(a){return b.parse(unescape(encodeURIComponent(a)))}},
r=n.BufferedBlockAlgorithm=s.extend({reset:function(){this._data=new q.init;this._nDataBytes=0},_append:function(a){"string"==typeof a&&(a=x.parse(a));this._data.concat(a);this._nDataBytes+=a.sigBytes},_process:function(a){var c=this._data,m=c.words,f=c.sigBytes,t=this.blockSize,b=f/(4*t),b=a?u.ceil(b):u.max((b|0)-this._minBufferSize,0);a=b*t;f=u.min(4*a,f);if(a){for(var e=0;e<a;e+=t)this._doProcessBlock(m,e);e=m.splice(0,a);c.sigBytes-=f}return new q.init(e,f)},clone:function(){var a=s.clone.call(this);
a._data=this._data.clone();return a},_minBufferSize:0});n.Hasher=r.extend({cfg:s.extend(),init:function(a){this.cfg=this.cfg.extend(a);this.reset()},reset:function(){r.reset.call(this);this._doReset()},update:function(a){this._append(a);this._process();return this},finalize:function(a){a&&this._append(a);return this._doFinalize()},blockSize:16,_createHelper:function(a){return function(c,m){return(new a.init(m)).finalize(c)}},_createHmacHelper:function(a){return function(c,m){return(new e.HMAC.init(a,
m)).finalize(c)}}});var e=d.algo={};return d}(Math);
(function(){var u=CryptoJS,l=u.lib.WordArray;u.enc.Base64={stringify:function(d){var n=d.words,l=d.sigBytes,s=this._map;d.clamp();d=[];for(var q=0;q<l;q+=3)for(var w=(n[q>>>2]>>>24-8*(q%4)&255)<<16|(n[q+1>>>2]>>>24-8*((q+1)%4)&255)<<8|n[q+2>>>2]>>>24-8*((q+2)%4)&255,v=0;4>v&&q+0.75*v<l;v++)d.push(s.charAt(w>>>6*(3-v)&63));if(n=s.charAt(64))for(;d.length%4;)d.push(n);return d.join("")},parse:function(d){var n=d.length,p=this._map,s=p.charAt(64);s&&(s=d.indexOf(s),-1!=s&&(n=s));for(var s=[],q=0,w=0;w<
n;w++)if(w%4){var v=p.indexOf(d.charAt(w-1))<<2*(w%4),b=p.indexOf(d.charAt(w))>>>6-2*(w%4);s[q>>>2]|=(v|b)<<24-8*(q%4);q++}return l.create(s,q)},_map:"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="}})();
(function(u){function l(b,e,a,c,m,f,t){b=b+(e&a|~e&c)+m+t;return(b<<f|b>>>32-f)+e}function d(b,e,a,c,m,f,t){b=b+(e&c|a&~c)+m+t;return(b<<f|b>>>32-f)+e}function n(b,e,a,c,m,f,t){b=b+(e^a^c)+m+t;return(b<<f|b>>>32-f)+e}function p(b,e,a,c,m,f,t){b=b+(a^(e|~c))+m+t;return(b<<f|b>>>32-f)+e}for(var s=CryptoJS,q=s.lib,w=q.WordArray,v=q.Hasher,q=s.algo,b=[],x=0;64>x;x++)b[x]=4294967296*u.abs(u.sin(x+1))|0;q=q.MD5=v.extend({_doReset:function(){this._hash=new w.init([1732584193,4023233417,2562383102,271733878])},
_doProcessBlock:function(r,e){for(var a=0;16>a;a++){var c=e+a,m=r[c];r[c]=(m<<8|m>>>24)&16711935|(m<<24|m>>>8)&4278255360}var a=this._hash.words,c=r[e+0],m=r[e+1],f=r[e+2],t=r[e+3],y=r[e+4],q=r[e+5],s=r[e+6],w=r[e+7],v=r[e+8],u=r[e+9],x=r[e+10],z=r[e+11],A=r[e+12],B=r[e+13],C=r[e+14],D=r[e+15],g=a[0],h=a[1],j=a[2],k=a[3],g=l(g,h,j,k,c,7,b[0]),k=l(k,g,h,j,m,12,b[1]),j=l(j,k,g,h,f,17,b[2]),h=l(h,j,k,g,t,22,b[3]),g=l(g,h,j,k,y,7,b[4]),k=l(k,g,h,j,q,12,b[5]),j=l(j,k,g,h,s,17,b[6]),h=l(h,j,k,g,w,22,b[7]),
g=l(g,h,j,k,v,7,b[8]),k=l(k,g,h,j,u,12,b[9]),j=l(j,k,g,h,x,17,b[10]),h=l(h,j,k,g,z,22,b[11]),g=l(g,h,j,k,A,7,b[12]),k=l(k,g,h,j,B,12,b[13]),j=l(j,k,g,h,C,17,b[14]),h=l(h,j,k,g,D,22,b[15]),g=d(g,h,j,k,m,5,b[16]),k=d(k,g,h,j,s,9,b[17]),j=d(j,k,g,h,z,14,b[18]),h=d(h,j,k,g,c,20,b[19]),g=d(g,h,j,k,q,5,b[20]),k=d(k,g,h,j,x,9,b[21]),j=d(j,k,g,h,D,14,b[22]),h=d(h,j,k,g,y,20,b[23]),g=d(g,h,j,k,u,5,b[24]),k=d(k,g,h,j,C,9,b[25]),j=d(j,k,g,h,t,14,b[26]),h=d(h,j,k,g,v,20,b[27]),g=d(g,h,j,k,B,5,b[28]),k=d(k,g,
h,j,f,9,b[29]),j=d(j,k,g,h,w,14,b[30]),h=d(h,j,k,g,A,20,b[31]),g=n(g,h,j,k,q,4,b[32]),k=n(k,g,h,j,v,11,b[33]),j=n(j,k,g,h,z,16,b[34]),h=n(h,j,k,g,C,23,b[35]),g=n(g,h,j,k,m,4,b[36]),k=n(k,g,h,j,y,11,b[37]),j=n(j,k,g,h,w,16,b[38]),h=n(h,j,k,g,x,23,b[39]),g=n(g,h,j,k,B,4,b[40]),k=n(k,g,h,j,c,11,b[41]),j=n(j,k,g,h,t,16,b[42]),h=n(h,j,k,g,s,23,b[43]),g=n(g,h,j,k,u,4,b[44]),k=n(k,g,h,j,A,11,b[45]),j=n(j,k,g,h,D,16,b[46]),h=n(h,j,k,g,f,23,b[47]),g=p(g,h,j,k,c,6,b[48]),k=p(k,g,h,j,w,10,b[49]),j=p(j,k,g,h,
C,15,b[50]),h=p(h,j,k,g,q,21,b[51]),g=p(g,h,j,k,A,6,b[52]),k=p(k,g,h,j,t,10,b[53]),j=p(j,k,g,h,x,15,b[54]),h=p(h,j,k,g,m,21,b[55]),g=p(g,h,j,k,v,6,b[56]),k=p(k,g,h,j,D,10,b[57]),j=p(j,k,g,h,s,15,b[58]),h=p(h,j,k,g,B,21,b[59]),g=p(g,h,j,k,y,6,b[60]),k=p(k,g,h,j,z,10,b[61]),j=p(j,k,g,h,f,15,b[62]),h=p(h,j,k,g,u,21,b[63]);a[0]=a[0]+g|0;a[1]=a[1]+h|0;a[2]=a[2]+j|0;a[3]=a[3]+k|0},_doFinalize:function(){var b=this._data,e=b.words,a=8*this._nDataBytes,c=8*b.sigBytes;e[c>>>5]|=128<<24-c%32;var m=u.floor(a/
4294967296);e[(c+64>>>9<<4)+15]=(m<<8|m>>>24)&16711935|(m<<24|m>>>8)&4278255360;e[(c+64>>>9<<4)+14]=(a<<8|a>>>24)&16711935|(a<<24|a>>>8)&4278255360;b.sigBytes=4*(e.length+1);this._process();b=this._hash;e=b.words;for(a=0;4>a;a++)c=e[a],e[a]=(c<<8|c>>>24)&16711935|(c<<24|c>>>8)&4278255360;return b},clone:function(){var b=v.clone.call(this);b._hash=this._hash.clone();return b}});s.MD5=v._createHelper(q);s.HmacMD5=v._createHmacHelper(q)})(Math);
(function(){var u=CryptoJS,l=u.lib,d=l.Base,n=l.WordArray,l=u.algo,p=l.EvpKDF=d.extend({cfg:d.extend({keySize:4,hasher:l.MD5,iterations:1}),init:function(d){this.cfg=this.cfg.extend(d)},compute:function(d,l){for(var p=this.cfg,v=p.hasher.create(),b=n.create(),u=b.words,r=p.keySize,p=p.iterations;u.length<r;){e&&v.update(e);var e=v.update(d).finalize(l);v.reset();for(var a=1;a<p;a++)e=v.finalize(e),v.reset();b.concat(e)}b.sigBytes=4*r;return b}});u.EvpKDF=function(d,l,n){return p.create(n).compute(d,
l)}})();
CryptoJS.lib.Cipher||function(u){var l=CryptoJS,d=l.lib,n=d.Base,p=d.WordArray,s=d.BufferedBlockAlgorithm,q=l.enc.Base64,w=l.algo.EvpKDF,v=d.Cipher=s.extend({cfg:n.extend(),createEncryptor:function(m,a){return this.create(this._ENC_XFORM_MODE,m,a)},createDecryptor:function(m,a){return this.create(this._DEC_XFORM_MODE,m,a)},init:function(m,a,b){this.cfg=this.cfg.extend(b);this._xformMode=m;this._key=a;this.reset()},reset:function(){s.reset.call(this);this._doReset()},process:function(a){this._append(a);return this._process()},
finalize:function(a){a&&this._append(a);return this._doFinalize()},keySize:4,ivSize:4,_ENC_XFORM_MODE:1,_DEC_XFORM_MODE:2,_createHelper:function(m){return{encrypt:function(f,b,e){return("string"==typeof b?c:a).encrypt(m,f,b,e)},decrypt:function(f,b,e){return("string"==typeof b?c:a).decrypt(m,f,b,e)}}}});d.StreamCipher=v.extend({_doFinalize:function(){return this._process(!0)},blockSize:1});var b=l.mode={},x=function(a,f,b){var c=this._iv;c?this._iv=u:c=this._prevBlock;for(var e=0;e<b;e++)a[f+e]^=
c[e]},r=(d.BlockCipherMode=n.extend({createEncryptor:function(a,f){return this.Encryptor.create(a,f)},createDecryptor:function(a,f){return this.Decryptor.create(a,f)},init:function(a,f){this._cipher=a;this._iv=f}})).extend();r.Encryptor=r.extend({processBlock:function(a,f){var b=this._cipher,c=b.blockSize;x.call(this,a,f,c);b.encryptBlock(a,f);this._prevBlock=a.slice(f,f+c)}});r.Decryptor=r.extend({processBlock:function(a,b){var c=this._cipher,e=c.blockSize,d=a.slice(b,b+e);c.decryptBlock(a,b);x.call(this,
a,b,e);this._prevBlock=d}});b=b.CBC=r;r=(l.pad={}).Pkcs7={pad:function(a,b){for(var c=4*b,c=c-a.sigBytes%c,e=c<<24|c<<16|c<<8|c,d=[],l=0;l<c;l+=4)d.push(e);c=p.create(d,c);a.concat(c)},unpad:function(a){a.sigBytes-=a.words[a.sigBytes-1>>>2]&255}};d.BlockCipher=v.extend({cfg:v.cfg.extend({mode:b,padding:r}),reset:function(){v.reset.call(this);var a=this.cfg,c=a.iv,a=a.mode;if(this._xformMode==this._ENC_XFORM_MODE)var b=a.createEncryptor;else b=a.createDecryptor,this._minBufferSize=1;this._mode=b.call(a,
this,c&&c.words)},_doProcessBlock:function(a,c){this._mode.processBlock(a,c)},_doFinalize:function(){var a=this.cfg.padding;if(this._xformMode==this._ENC_XFORM_MODE){a.pad(this._data,this.blockSize);var c=this._process(!0)}else c=this._process(!0),a.unpad(c);return c},blockSize:4});var e=d.CipherParams=n.extend({init:function(a){this.mixIn(a)},toString:function(a){return(a||this.formatter).stringify(this)}}),b=(l.format={}).OpenSSL={stringify:function(a){var c=a.ciphertext;a=a.salt;return(a?p.create([1398893684,
1701076831]).concat(a).concat(c):c).toString(q)},parse:function(a){a=q.parse(a);var c=a.words;if(1398893684==c[0]&&1701076831==c[1]){var b=p.create(c.slice(2,4));c.splice(0,4);a.sigBytes-=16}return e.create({ciphertext:a,salt:b})}},a=d.SerializableCipher=n.extend({cfg:n.extend({format:b}),encrypt:function(a,c,b,d){d=this.cfg.extend(d);var l=a.createEncryptor(b,d);c=l.finalize(c);l=l.cfg;return e.create({ciphertext:c,key:b,iv:l.iv,algorithm:a,mode:l.mode,padding:l.padding,blockSize:a.blockSize,formatter:d.format})},
decrypt:function(a,c,b,e){e=this.cfg.extend(e);c=this._parse(c,e.format);return a.createDecryptor(b,e).finalize(c.ciphertext)},_parse:function(a,c){return"string"==typeof a?c.parse(a,this):a}}),l=(l.kdf={}).OpenSSL={execute:function(a,c,b,d){d||(d=p.random(8));a=w.create({keySize:c+b}).compute(a,d);b=p.create(a.words.slice(c),4*b);a.sigBytes=4*c;return e.create({key:a,iv:b,salt:d})}},c=d.PasswordBasedCipher=a.extend({cfg:a.cfg.extend({kdf:l}),encrypt:function(c,b,e,d){d=this.cfg.extend(d);e=d.kdf.execute(e,
c.keySize,c.ivSize);d.iv=e.iv;c=a.encrypt.call(this,c,b,e.key,d);c.mixIn(e);return c},decrypt:function(c,b,e,d){d=this.cfg.extend(d);b=this._parse(b,d.format);e=d.kdf.execute(e,c.keySize,c.ivSize,b.salt);d.iv=e.iv;return a.decrypt.call(this,c,b,e.key,d)}})}();
(function(){function u(b,a){var c=(this._lBlock>>>b^this._rBlock)&a;this._rBlock^=c;this._lBlock^=c<<b}function l(b,a){var c=(this._rBlock>>>b^this._lBlock)&a;this._lBlock^=c;this._rBlock^=c<<b}var d=CryptoJS,n=d.lib,p=n.WordArray,n=n.BlockCipher,s=d.algo,q=[57,49,41,33,25,17,9,1,58,50,42,34,26,18,10,2,59,51,43,35,27,19,11,3,60,52,44,36,63,55,47,39,31,23,15,7,62,54,46,38,30,22,14,6,61,53,45,37,29,21,13,5,28,20,12,4],w=[14,17,11,24,1,5,3,28,15,6,21,10,23,19,12,4,26,8,16,7,27,20,13,2,41,52,31,37,47,
55,30,40,51,45,33,48,44,49,39,56,34,53,46,42,50,36,29,32],v=[1,2,4,6,8,10,12,14,15,17,19,21,23,25,27,28],b=[{"0":8421888,268435456:32768,536870912:8421378,805306368:2,1073741824:512,1342177280:8421890,1610612736:8389122,1879048192:8388608,2147483648:514,2415919104:8389120,2684354560:33280,2952790016:8421376,3221225472:32770,3489660928:8388610,3758096384:0,4026531840:33282,134217728:0,402653184:8421890,671088640:33282,939524096:32768,1207959552:8421888,1476395008:512,1744830464:8421378,2013265920:2,
2281701376:8389120,2550136832:33280,2818572288:8421376,3087007744:8389122,3355443200:8388610,3623878656:32770,3892314112:514,4160749568:8388608,1:32768,268435457:2,536870913:8421888,805306369:8388608,1073741825:8421378,1342177281:33280,1610612737:512,1879048193:8389122,2147483649:8421890,2415919105:8421376,2684354561:8388610,2952790017:33282,3221225473:514,3489660929:8389120,3758096385:32770,4026531841:0,134217729:8421890,402653185:8421376,671088641:8388608,939524097:512,1207959553:32768,1476395009:8388610,
1744830465:2,2013265921:33282,2281701377:32770,2550136833:8389122,2818572289:514,3087007745:8421888,3355443201:8389120,3623878657:0,3892314113:33280,4160749569:8421378},{"0":1074282512,16777216:16384,33554432:524288,50331648:1074266128,67108864:1073741840,83886080:1074282496,100663296:1073758208,117440512:16,134217728:540672,150994944:1073758224,167772160:1073741824,184549376:540688,201326592:524304,218103808:0,234881024:16400,251658240:1074266112,8388608:1073758208,25165824:540688,41943040:16,58720256:1073758224,
75497472:1074282512,92274688:1073741824,109051904:524288,125829120:1074266128,142606336:524304,159383552:0,176160768:16384,192937984:1074266112,209715200:1073741840,226492416:540672,243269632:1074282496,260046848:16400,268435456:0,285212672:1074266128,301989888:1073758224,318767104:1074282496,335544320:1074266112,352321536:16,369098752:540688,385875968:16384,402653184:16400,419430400:524288,436207616:524304,452984832:1073741840,469762048:540672,486539264:1073758208,503316480:1073741824,520093696:1074282512,
276824064:540688,293601280:524288,310378496:1074266112,327155712:16384,343932928:1073758208,360710144:1074282512,377487360:16,394264576:1073741824,411041792:1074282496,427819008:1073741840,444596224:1073758224,461373440:524304,478150656:0,494927872:16400,511705088:1074266128,528482304:540672},{"0":260,1048576:0,2097152:67109120,3145728:65796,4194304:65540,5242880:67108868,6291456:67174660,7340032:67174400,8388608:67108864,9437184:67174656,10485760:65792,11534336:67174404,12582912:67109124,13631488:65536,
14680064:4,15728640:256,524288:67174656,1572864:67174404,2621440:0,3670016:67109120,4718592:67108868,5767168:65536,6815744:65540,7864320:260,8912896:4,9961472:256,11010048:67174400,12058624:65796,13107200:65792,14155776:67109124,15204352:67174660,16252928:67108864,16777216:67174656,17825792:65540,18874368:65536,19922944:67109120,20971520:256,22020096:67174660,23068672:67108868,24117248:0,25165824:67109124,26214400:67108864,27262976:4,28311552:65792,29360128:67174400,30408704:260,31457280:65796,32505856:67174404,
17301504:67108864,18350080:260,19398656:67174656,20447232:0,21495808:65540,22544384:67109120,23592960:256,24641536:67174404,25690112:65536,26738688:67174660,27787264:65796,28835840:67108868,29884416:67109124,30932992:67174400,31981568:4,33030144:65792},{"0":2151682048,65536:2147487808,131072:4198464,196608:2151677952,262144:0,327680:4198400,393216:2147483712,458752:4194368,524288:2147483648,589824:4194304,655360:64,720896:2147487744,786432:2151678016,851968:4160,917504:4096,983040:2151682112,32768:2147487808,
98304:64,163840:2151678016,229376:2147487744,294912:4198400,360448:2151682112,425984:0,491520:2151677952,557056:4096,622592:2151682048,688128:4194304,753664:4160,819200:2147483648,884736:4194368,950272:4198464,1015808:2147483712,1048576:4194368,1114112:4198400,1179648:2147483712,1245184:0,1310720:4160,1376256:2151678016,1441792:2151682048,1507328:2147487808,1572864:2151682112,1638400:2147483648,1703936:2151677952,1769472:4198464,1835008:2147487744,1900544:4194304,1966080:64,2031616:4096,1081344:2151677952,
1146880:2151682112,1212416:0,1277952:4198400,1343488:4194368,1409024:2147483648,1474560:2147487808,1540096:64,1605632:2147483712,1671168:4096,1736704:2147487744,1802240:2151678016,1867776:4160,1933312:2151682048,1998848:4194304,2064384:4198464},{"0":128,4096:17039360,8192:262144,12288:536870912,16384:537133184,20480:16777344,24576:553648256,28672:262272,32768:16777216,36864:537133056,40960:536871040,45056:553910400,49152:553910272,53248:0,57344:17039488,61440:553648128,2048:17039488,6144:553648256,
10240:128,14336:17039360,18432:262144,22528:537133184,26624:553910272,30720:536870912,34816:537133056,38912:0,43008:553910400,47104:16777344,51200:536871040,55296:553648128,59392:16777216,63488:262272,65536:262144,69632:128,73728:536870912,77824:553648256,81920:16777344,86016:553910272,90112:537133184,94208:16777216,98304:553910400,102400:553648128,106496:17039360,110592:537133056,114688:262272,118784:536871040,122880:0,126976:17039488,67584:553648256,71680:16777216,75776:17039360,79872:537133184,
83968:536870912,88064:17039488,92160:128,96256:553910272,100352:262272,104448:553910400,108544:0,112640:553648128,116736:16777344,120832:262144,124928:537133056,129024:536871040},{"0":268435464,256:8192,512:270532608,768:270540808,1024:268443648,1280:2097152,1536:2097160,1792:268435456,2048:0,2304:268443656,2560:2105344,2816:8,3072:270532616,3328:2105352,3584:8200,3840:270540800,128:270532608,384:270540808,640:8,896:2097152,1152:2105352,1408:268435464,1664:268443648,1920:8200,2176:2097160,2432:8192,
2688:268443656,2944:270532616,3200:0,3456:270540800,3712:2105344,3968:268435456,4096:268443648,4352:270532616,4608:270540808,4864:8200,5120:2097152,5376:268435456,5632:268435464,5888:2105344,6144:2105352,6400:0,6656:8,6912:270532608,7168:8192,7424:268443656,7680:270540800,7936:2097160,4224:8,4480:2105344,4736:2097152,4992:268435464,5248:268443648,5504:8200,5760:270540808,6016:270532608,6272:270540800,6528:270532616,6784:8192,7040:2105352,7296:2097160,7552:0,7808:268435456,8064:268443656},{"0":1048576,
16:33555457,32:1024,48:1049601,64:34604033,80:0,96:1,112:34603009,128:33555456,144:1048577,160:33554433,176:34604032,192:34603008,208:1025,224:1049600,240:33554432,8:34603009,24:0,40:33555457,56:34604032,72:1048576,88:33554433,104:33554432,120:1025,136:1049601,152:33555456,168:34603008,184:1048577,200:1024,216:34604033,232:1,248:1049600,256:33554432,272:1048576,288:33555457,304:34603009,320:1048577,336:33555456,352:34604032,368:1049601,384:1025,400:34604033,416:1049600,432:1,448:0,464:34603008,480:33554433,
496:1024,264:1049600,280:33555457,296:34603009,312:1,328:33554432,344:1048576,360:1025,376:34604032,392:33554433,408:34603008,424:0,440:34604033,456:1049601,472:1024,488:33555456,504:1048577},{"0":134219808,1:131072,2:134217728,3:32,4:131104,5:134350880,6:134350848,7:2048,8:134348800,9:134219776,10:133120,11:134348832,12:2080,13:0,14:134217760,15:133152,2147483648:2048,2147483649:134350880,2147483650:134219808,2147483651:134217728,2147483652:134348800,2147483653:133120,2147483654:133152,2147483655:32,
2147483656:134217760,2147483657:2080,2147483658:131104,2147483659:134350848,2147483660:0,2147483661:134348832,2147483662:134219776,2147483663:131072,16:133152,17:134350848,18:32,19:2048,20:134219776,21:134217760,22:134348832,23:131072,24:0,25:131104,26:134348800,27:134219808,28:134350880,29:133120,30:2080,31:134217728,2147483664:131072,2147483665:2048,2147483666:134348832,2147483667:133152,2147483668:32,2147483669:134348800,2147483670:134217728,2147483671:134219808,2147483672:134350880,2147483673:134217760,
2147483674:134219776,2147483675:0,2147483676:133120,2147483677:2080,2147483678:131104,2147483679:134350848}],x=[4160749569,528482304,33030144,2064384,129024,8064,504,2147483679],r=s.DES=n.extend({_doReset:function(){for(var b=this._key.words,a=[],c=0;56>c;c++){var d=q[c]-1;a[c]=b[d>>>5]>>>31-d%32&1}b=this._subKeys=[];for(d=0;16>d;d++){for(var f=b[d]=[],l=v[d],c=0;24>c;c++)f[c/6|0]|=a[(w[c]-1+l)%28]<<31-c%6,f[4+(c/6|0)]|=a[28+(w[c+24]-1+l)%28]<<31-c%6;f[0]=f[0]<<1|f[0]>>>31;for(c=1;7>c;c++)f[c]>>>=
4*(c-1)+3;f[7]=f[7]<<5|f[7]>>>27}a=this._invSubKeys=[];for(c=0;16>c;c++)a[c]=b[15-c]},encryptBlock:function(b,a){this._doCryptBlock(b,a,this._subKeys)},decryptBlock:function(b,a){this._doCryptBlock(b,a,this._invSubKeys)},_doCryptBlock:function(e,a,c){this._lBlock=e[a];this._rBlock=e[a+1];u.call(this,4,252645135);u.call(this,16,65535);l.call(this,2,858993459);l.call(this,8,16711935);u.call(this,1,1431655765);for(var d=0;16>d;d++){for(var f=c[d],n=this._lBlock,p=this._rBlock,q=0,r=0;8>r;r++)q|=b[r][((p^
f[r])&x[r])>>>0];this._lBlock=p;this._rBlock=n^q}c=this._lBlock;this._lBlock=this._rBlock;this._rBlock=c;u.call(this,1,1431655765);l.call(this,8,16711935);l.call(this,2,858993459);u.call(this,16,65535);u.call(this,4,252645135);e[a]=this._lBlock;e[a+1]=this._rBlock},keySize:2,ivSize:2,blockSize:2});d.DES=n._createHelper(r);s=s.TripleDES=n.extend({_doReset:function(){var b=this._key.words;this._des1=r.createEncryptor(p.create(b.slice(0,2)));this._des2=r.createEncryptor(p.create(b.slice(2,4)));this._des3=
r.createEncryptor(p.create(b.slice(4,6)))},encryptBlock:function(b,a){this._des1.encryptBlock(b,a);this._des2.decryptBlock(b,a);this._des3.encryptBlock(b,a)},decryptBlock:function(b,a){this._des3.decryptBlock(b,a);this._des2.encryptBlock(b,a);this._des1.decryptBlock(b,a)},keySize:6,ivSize:2,blockSize:2});d.TripleDES=n._createHelper(s)})();
CryptoJS.mode.ECB = (function () {
var ECB = CryptoJS.lib.BlockCipherMode.extend();
ECB.Encryptor = ECB.extend({
processBlock: function (words, offset) {
this._cipher.encryptBlock(words, offset);
}
});
ECB.Decryptor = ECB.extend({
processBlock: function (words, offset) {
this._cipher.decryptBlock(words, offset);
}
});
return ECB;
}());
function encryptByDES(message, key) {
var keyHex = CryptoJS.enc.Utf8.parse(key);
var encrypted = CryptoJS.DES.encrypt(message, keyHex, {
mode: CryptoJS.mode.ECB,
padding: CryptoJS.pad.Pkcs7
});
return encrypted.toString();
}
"""
return execjs.compile(js_source).call("encryptByDES", pwd, "1234567890")
def send_login_verify_request(self, **kwargs):
start_time = str(int(time.time() * 1000))
start_url = 'http://service.js.10086.cn/my/MY_QDCX.html?t={}'.format(start_time)
self.start_url = 'my/MY_QDCX.html?t={}'.format(start_time)
code, key, resp = self.get(start_url)
if code != 0:
return code, key, ""
login_url = "https://service.js.10086.cn/actionDispatcher.do"
headers = {
'Referer': 'http://service.js.10086.cn/login.html?url=my/MY_QDCX.html',
}
try:
data = {
'userLoginTransferProtocol': 'https',
'redirectUrl': self.start_url + "#home",
'reqUrl': 'login',
'busiNum': 'LOGIN',
'operType': '0',
'passwordType': '1',
'isSavePasswordVal': '0',
'isSavePasswordVal_N': '1',
'currentD': '1',
'loginFormTab': '#home',
'loginType': '1',
'smsFlag': '1',
'smsCode3': '',
'mobile': kwargs['tel'],
'city': 'NJDQ',
'password': self.enPwd(kwargs['pin_pwd']),
'password3': '',
'verifyCode': '请输入验证码',
}
except:
error = traceback.format_exc()
self.log("crawler", "param_error pin_pwd:{} error:{}".format(kwargs['pin_pwd'], error), "")
return 9, "param_error"
code, key, resp = self.post(login_url, data=data, headers=headers)
if code != 0:
return code, key
if "resultCode=-BSP10001" not in resp.text:
self.log("crawler", "unknown_error", resp)
return 9, "unknown_error", ""
send_sms_url = "http://service.js.10086.cn/actionDispatcher.do"
data = {
'reqUrl': 'login',
'busiNum': 'LOGIN',
'fucName': 'sendLoginMsg',
'flag': '1',
}
code, key, resp = self.post(send_sms_url, data=data)
if code != 0:
return code, key
if '"resultCode":"0"' in resp.text and '"success":true' in resp.text:
return 0, "success", ""
else:
self.log("crawler", "登录短信发送未知错误", resp)
return 9, "send_sms_error", ""
def get_call_log_by_month(self, year, month):
data = {}
data['reqUrl'] = 'MY_QDCXQueryNew'
data['busiNum'] = 'QDCX'
data['queryMonth'] = "%d%02d" % (year, month)
data['queryItem'] = 1
data['qryPages'] = ''
data['qryNo'] = '1'
data['operType'] = '3'
data['queryBeginTime'] = "%s-%02d-01" % (year, month)
data['queryEndTime'] = "%s-%02d-31" % (year, month)
headers = {
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Referer": "http://service.js.10086.cn/my/MY_QDCX.html"
}
bill_detail_url = "http://service.js.10086.cn/my/actionDispatcher.do"
code, key, resp = self.post(bill_detail_url, data=data, headers=headers)
if code != 0:
return code, key, "", resp
return 0, "success", "", resp
def login(self, **kwargs):
verify_login_sms_url = 'http://service.js.10086.cn/actionDispatcher.do'
data = {
'reqUrl': 'login',
'busiNum': 'LOGIN',
'smsLoginCode': kwargs['sms_code'],
'fucName': 'verySmsCode',
'flag': '1',
}
code, key, resp = self.post(verify_login_sms_url, data=data)
if code != 0:
return code, key,
if '"resultCode":"1"' in resp.text and '"success":false' in resp.text:
self.log("user", "短信验证码错误", resp)
return 9, "verify_error"
if '密码错误' in resp.text and 'logicCode":"-3002' in resp.text:
self.log("user", "pin_pwd_error", resp)
return 1, "pin_pwd_error"
if 'resultCode":"0"' not in resp.text and 'success":true' not in resp.text:
self.log("crawler", "unknown_error", resp)
return 9, "unknown_error", ""
query_date = date.today()
level, key, message, r = self.get_call_log_by_month(query_date.year, query_date.month)
if level != 0:
return level, key
bill_panel_url = 'http://service.js.10086.cn/my/MY_QDCX.html'
code, key, resp = self.get(bill_panel_url)
if code != 0:
return code, key
return 0, 'success'
def get_verify_type(self, **kwargs):
return 'SMS'
def verify(self, **kwargs):
today = date.today()
data = {}
data['reqUrl'] = 'MY_QDCXQueryNew'
data['busiNum'] = 'QDCX'
data['queryMonth'] = "%d%02d" % (today.year, today.month)
data['queryItem'] = 1
data['qryPages'] = ''
data['qryNo'] = '1'
data['operType'] = '3'
data['queryBeginTime'] = "%s-%02d-01" % (today.year, today.month)
data['queryEndTime'] = "%s-%02d-31" % (today.year, today.month)
data['confirmFlg'] = '1'
data['smsNum'] = kwargs['sms_code']
url = "http://service.js.10086.cn/my/actionDispatcher.do"
headers = {
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Referer": "http://service.js.10086.cn/my/MY_QDCX.html"
}
code, key, resp = self.post(url, data=data, headers=headers)
if code != 0:
return code, key
try:
if "<title> system maintenance" in resp.text:
self.log("crawler", u"系统维护", resp)
return 9, "website_maintaining_error"
obj = json.loads(resp.text)
if obj['resultCode'] == "0":
status_key, level, message = 'success', 0, ''
else:
self.log("crawler", "验证短信失败", resp)
if obj['systemCode'] == "-200009":
return 2, 'verify_error'
elif obj['systemCode'] == "-200002":
return 9, "website_busy_error"
status_key, level, message = 'verify_error', 2, obj['resultMsg']
self.log("user", "verify_error", resp)
return level, status_key
except:
error = traceback.format_exc()
self.log("crawler", 'unknown_error: ' + error, resp)
return 9, "unknown_error"
def send_verify_request(self, **kwargs):
if self.sms_send_time != None:
sleep_time = 30 - int(time.time() - self.sms_send_time)
if sleep_time > 0:
time.sleep(sleep_time)
sms_post_url = "http://service.js.10086.cn/my/sms.do"
sms_req_payload = {'busiNum': 'QDCX'}
headers = {"Origin": "http://service.js.10086.cn", 'X-Requested-With': 'XMLHttpRequest',
"Referer": 'http://service.js.10086.cn/'+self.start_url}
code, key, resp = self.post(sms_post_url, headers=headers, data=sms_req_payload)
if code != 0:
return code, key, ""
if resp.text.strip() == "":
self.log("website", u"官网返回数据为空", resp)
return 9, "website_busy_error", ""
if "<html><head><title> system" in resp.text:
self.log("website", u"系统繁忙或者是升级", resp)
return 9, "website_busy_error", ""
try:
ret = resp.json()
error_code = ret['resultCode']
except:
error = traceback.format_exc()
self.log("crawler", "request_error: " + error, resp)
return 9, 'request_error', ""
if error_code != '0':
self.log("crawler", u"发送短信失败", resp)
return 9, 'send_sms_error', ''
self.sms_send_time = time.time()
return 0, 'success', ''
def crawl_call_log_short_num(self, **kwargs):
miss_list = []
pos_miss_list = []
records = []
message_list = []
today = date.today()
delta_months = [i for i in range(0, -6, -1)]
for delta_month in delta_months:
query_date = today + relativedelta(months=delta_month)
end_date = monthrange(query_date.year, query_date.month)[1]
query_month = "%s%02d" % (query_date.year, query_date.month)
st = "%s-%02d-01" % (query_date.year, query_date.month)
et = "%s-%02d-%d" % (query_date.year, query_date.month, end_date)
data = {
"reqUrl": "MY_QDCXQueryNew",
"busiNum": "QDCX",
"queryMonth": "201709",
"queryItem": "8",
"qryPages": "8:1005:-1",
"qryNo": "1",
"operType": "3",
"queryBeginTime": st,
"queryEndTime": et
}
headers = {
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Referer": "http://service.js.10086.cn/my/MY_QDCX.html"
}
url = "http://service.js.10086.cn/my/actionDispatcher.do"
message = ""
for i in range(self.max_retry):
code, key, resp = self.post(url, data=data, headers=headers)
if code != 0:
message = "network_request_error"
continue
level, key, message, ret = parse_call_record_short(resp.text, query_month, self_obj=self)
if level != 0:
continue
records.extend(ret)
break
else:
if key == 'no_data':
self.log("crawler", "短号码{}{}".format(key, message), resp)
pos_miss_list.append(query_month)
else:
if message != "network_request_error":
self.log("crawler", "短号码{}{}".format(key, message), resp)
miss_list.append(query_month)
message_list.append(key)
return 0, "success", records, miss_list, pos_miss_list
def crawl_call_log(self, **kwargs):
miss_list = []
pos_miss_list = []
today = date.today()
records = []
message_list = []
delta_months = [i for i in range(0, -6, -1)]
page_and_retry = []
bill_detail_url = "http://service.js.10086.cn/my/actionDispatcher.do"
headers = {
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Referer": "http://service.js.10086.cn/my/MY_QDCX.html"
}
for delta_month in delta_months:
query_date = today + relativedelta(months=delta_month)
data = {}
query_month = "%d%02d" % (query_date.year, query_date.month)
data['reqUrl'] = 'MY_QDCXQueryNew'
data['busiNum'] = 'QDCX'
data['queryMonth'] = query_month
data['queryItem'] = 1
data['qryPages'] = ''
data['qryNo'] = '1'
data['operType'] = '3'
data['queryBeginTime'] = "%s-%02d-01" % (query_date.year, query_date.month)
data['queryEndTime'] = "%s-%02d-31" % (query_date.year, query_date.month)
data['confirmFlg'] = '1'
data['smsNum'] = kwargs['sms_code']
page_and_retry.append((data, query_month, self.max_retry))
log_for_retry_request = []
st_time = time.time()
et_time = st_time + 15
while page_and_retry:
data, m_query_month, m_retry_times = page_and_retry.pop(0)
log_for_retry_request.append((m_query_month, m_retry_times))
m_retry_times -= 1
code, key, resp = self.post(bill_detail_url, data=data, headers=headers)
if code == 0:
level, key, message, ret = parse_call_record(resp.text, m_query_month, self_obj=self)
if level != 0:
continue
records.extend(ret)
else:
new_time = time.time()
if m_retry_times > 0:
page_and_retry.append((data, m_query_month, m_retry_times))
elif new_time < et_time:
page_and_retry.append((data, m_query_month, m_retry_times))
time.sleep(random.randint(3, 5))
else:
message = "network_request_error"
continue
if key == 'no_data':
self.log("crawler", "{}{}".format(key, message), resp)
pos_miss_list.append(m_query_month)
elif key == "success":
pass
else:
if message != "network_request_error":
self.log("crawler", "{}{}".format(key, message), resp)
miss_list.append(m_query_month)
message_list.append(key)
short_code, short_key, short_result, short_miss_list, short_pos_miss_list = self.crawl_call_log_short_num(
**kwargs)
records.extend(short_result)
self.log("crawler", "重试记录:{}".format(log_for_retry_request), "")
if len(miss_list + pos_miss_list) == 6:
temp_list = map(
lambda x: x.count('request_error') or x.count('website_busy_error') or x.count('success') or 0,
message_list)
if temp_list.count(0) == 0:
return 9, 'website_busy_error', [], miss_list, pos_miss_list
else:
return 9, 'crawl_error', [], miss_list, pos_miss_list
return 0, 'success', records, miss_list, pos_miss_list
def time_transform(self, time_str, bm='utf-8', str_format="%Y%m%d%H%M%S"):
try:
time_type = time.strptime(time_str.encode(bm), str_format)
except:
error = traceback.format_exc()
return 9, 'unknown_error', u"time_transform failed: %s %s" % (error, time_str)
return 0, 'success', str(int(time.mktime(time_type)))
def crawl_info(self, **kwargs):
result = {}
url = "http://service.js.10086.cn/my/MY_GRZLGL.html#home"
code, key, resp = self.get(url)
if code != 0:
return code, key, {}
obj_str = ''
sections = resp.text.split('window.top.BmonPage.commonBusiCallBack(')
if len(sections) > 1:
obj_str = sections[1].split(", 'MY_GRZLGL')")[0]
if obj_str == '':
self.log("crawler", 'expected_key_error', resp)
return 9, "expected_key_error", {}
try:
obj = json.loads(obj_str)
result['is_realname_register'] = True
result['full_name'] = obj['resultObj']['kehuName']
result['id_card'] = kwargs['id_card']
result['open_date'] = obj['resultObj']['ruwangAt']
except:
error = traceback.format_exc()
self.log("crawler", 'unknown_error: ' + error, resp)
return 9, "unknown_error", {}
level, key, open_date = self.time_transform(obj['resultObj']['ruwangAt'])
if level != 0:
self.log("crawler", '转换时间失败{}{}'.format(key, open_date), resp)
return level, key, {}
result['open_date'] = open_date
result['address'] = ''
return 0, 'success', result
def crawl_phone_bill(self, **kwargs):
miss_list = []
phone_bill = list()
params = {'tel': kwargs['tel']}
message_list = []
for month in self.__monthly_period(6, '%Y%m'):
params['month'] = month
level, key, message, result, miss = self.crawl_month_bill(**params)
if level != 0:
message_list.append(key)
miss_list.append(miss)
if result.get('bill_amount', '') == '':
continue
if result:
phone_bill.append(result)
now_month = datetime.datetime.now().strftime("%Y%m")
now_month in miss_list and miss_list.remove(now_month)
if len(miss_list) == 5:
temp_list = map(lambda x: x.count('request_error') or x.count('website_busy_error') or 0, message_list)
if temp_list.count(0) == 0:
return 9, 'website_busy_error', [], miss_list
return 9, "crawl_error", [], miss_list
return 0, 'success', phone_bill, miss_list
def crawl_month_bill(self, **kwargs):
month_bill_url = 'http://service.js.10086.cn/my/actionDispatcher.do'
data = {
'reqUrl': 'MY_GRZDQuery',
'busiNum': 'ZDCX',
'methodName': 'getMobileHistoryBill',
'beginDate': kwargs['month']
}
headers = {
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Referer": "http://service.js.10086.cn/my/MY_ZDCX.html"
}
error = ""
for i in range(self.max_retry):
code, key, resp = self.post(month_bill_url, data=data, headers=headers)
if code != 0:
error = "network_request_error"
continue
resp.encoding = 'utf=8'
month_bill = {
'bill_month': kwargs['month'],
'bill_amount': '',
'bill_package': '',
'bill_ext_calls': '',
'bill_ext_data': '',
'bill_ext_sms': '',
'bill_zengzhifei': '',
'bill_daishoufei': '',
'bill_qita': ''
}
try:
result = json.loads(resp.text)
if 'billBean' in result['resultObj']:
bill = result['resultObj']['billBean']['billRet']
if u'移动话费出账期' in result['resultMsg']:
return 9, 'success', '', month_bill, kwargs['month']
if bill == None:
error = "账单查询出错"
continue
month_bill['bill_amount'] = '%.2f' % (float(bill['totalFee']) / 100)
for x in bill['feeDetailList']:
if 1 == x['level']:
if u'套餐及固定费' == x['feeName']:
month_bill['bill_package'] = '%.2f' % (float(x['fee']) / 100)
elif u'套餐外语音通信费' == x['feeName']:
month_bill['bill_ext_calls'] = '%.2f' % (float(x['fee']) / 100)
elif u'套餐外短信、彩信费' == x['feeName']:
month_bill['bill_ext_sms'] = '%.2f' % (float(x['fee']) / 100)
break
except:
error = traceback.format_exc()
continue
else:
if error != "network_request_error":
self.log("crawler", error, resp)
return 9, "html_error", error, {}, kwargs['month']
return 0, 'success', '', month_bill, ""
def __monthly_period(self, length=6, strf='%Y%m'):
current_time = datetime.datetime.now()
for month_offset in range(0, length):
yield (current_time - relativedelta(months=month_offset)).strftime(strf)
if __name__ == '__main__':
c = Crawler()
mock = {}
mock['pin_pwd'] = '340393'
mock['tel'] = '15094393043'
c.self_test(**mock)
| true | true |
f73b718d2cae9741980debef98a010af966684c5 | 7,308 | py | Python | cloudmerge-hpc/mpi_merge.py | xsun28/CloudMerge | c4211bac841b103c77d6f9c4af633102742298ac | [
"Apache-2.0"
] | null | null | null | cloudmerge-hpc/mpi_merge.py | xsun28/CloudMerge | c4211bac841b103c77d6f9c4af633102742298ac | [
"Apache-2.0"
] | 1 | 2020-03-27T18:29:00.000Z | 2020-03-27T18:29:00.000Z | cloudmerge-hpc/mpi_merge.py | xsun28/CloudMerge | c4211bac841b103c77d6f9c4af633102742298ac | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 27 16:54:42 2017
@author: Xiaobo
"""
import numpy as np
from mpi4py import MPI
import commands
import os
import sys
path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(path)
#sys.path.append('/Users/Xiaobo/git/CloudMerge/CloudMerge/cloudmerge-hpc')
#sys.path.append('/home/ubuntu/cloudmerge/cloudmerge-hpc/')
import multiway_merge as mm
import argparse
###############################################################################
def get_source(rank,size,rounds):
divisor = np.power(2,rounds)
new_rank = int(rank/divisor)
new_size = int(size/divisor)
if (new_rank%2 != 0) or (new_rank+1>=new_size):
return []
elif (new_rank+2+1>=new_size) and (new_rank+2<new_size):
return [divisor*(new_rank+1),divisor*(new_rank+2)]
else:
return [divisor*(new_rank+1),]
#------------------------------------------------------------------------------
def get_dest(rank,size,rounds):
SELF = -1
divisor = np.power(2,rounds)
new_rank = int(rank/divisor)
new_size = int(size/divisor)
if new_rank % 2 !=0:
dest = divisor*(new_rank-1)
return dest
elif (new_rank + 1) >= new_size:
dest = divisor*(new_rank-2)
return dest if dest >=0 else 0
else:
return SELF
#------------------------------------------------------------------------------
def splits(filenum,size):
assigned = 0
sendcounts = np.zeros(size)
disp = np.zeros(size)
for i in range(size):
nxt_sz = int((filenum-assigned)/(size-i))
disp[i] = assigned
sendcounts[i] = nxt_sz
assigned = assigned + nxt_sz
return tuple(sendcounts),tuple(disp)
#------------------------------------------------------------------------------
def get_output_name(localname,rcvname):
if rcvname is None:
return localname
start = localname.split('_')[0]
end = rcvname[-1].split('_')[-1]
return start+'_'+end
###############################################################################
parser = argparse.ArgumentParser(description='cloudmerge-hpc')
parser.add_argument('-i',required=True,help='input file directory path',dest='input',metavar='/home/ubuntu/cloudmerge/input/')
parser.add_argument('-o',required=True,help='output file directory path',dest='output',metavar='/home/ubuntu/cloudmerge/output/')
parser.add_argument('-n',required=True,help='input file number',dest='filenum',metavar='10',type=int)
parser.add_argument('-l',required=False,default='1',help='lower boundary of chrmosomes',dest='lower_chr',metavar='1')
parser.add_argument('-u',required=False,default='M',help='upper boundary of chromosomes',dest='upper_chr',metavar='M')
parser.add_argument('-g',required=False,default=9,help='genotype column number',dest='gtype_col',metavar='9',type=int)
parser.add_argument('-f',required=False,default='PASS',help='filter value',dest='filter',metavar='PASS')
args = parser.parse_args()
#args = parser.parse_args('-i abc -o def -n 10 -l 1 -u 26 -g 9 -f PASS'.split())
#input_path = '/home/ubuntu/cloudmerge/input/'
#output_path = '/home/ubuntu/cloudmerge/output/'
#input_path = '/Users/Xiaobo/Desktop/input/'
#output_path = '/Users/Xiaobo/Desktop/output/'
input_path = args.input
output_path = args.output
filenum = args.filenum
lower_chr = args.lower_chr
upper_chr = args.upper_chr
qfilter = args.filter
genotype_col = args.gtype_col
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
host = commands.getoutput("hostname")
rounds = 0
sendcounts,disp = splits(filenum,size)
if rank == 0:
sendbuff = np.linspace(1,filenum,filenum)
else:
sendbuff = None
rcvbuff = np.zeros(int(sendcounts[rank]))
comm.Scatterv([sendbuff,sendcounts,disp,MPI.DOUBLE],rcvbuff,root=0)
#local_input_files = map(lambda x: input_path+str(int(x))+'.bz2',rcvbuff)
#for file in local_input_files:
# print('unzipping files %s in rank %d' % (str(local_input_files),rank))
# os.system('bunzip2 '+file)
local_input_files = map(lambda x: input_path+str(int(x))+'.bz2',rcvbuff)
#local_merged_files = "_".join(map(lambda x: str(int(x)),rcvbuff))
local_merged_files = str(int(rcvbuff[0]))+'_'+str(int(rcvbuff[-1]))
merger = mm.multiway_merger(local_input_files, output_path+local_merged_files,lower_chr,upper_chr,qfilter,genotype_col,merge_type='vcf')
merger.start()
print('merged_files %s'%local_merged_files)
while True:
src = get_source(rank,size,rounds)
# if len(src) == 0: #only when no source, we need a destination
dest = get_dest(rank,size,rounds)
rounds = rounds+1
if len(src) == 0:
if rank > 0:
comm.send(local_merged_files,dest=dest,tag=0)
print('i am rank %d, host is %s, sent merged files is %s, source is %s, dest is %d' %(rank,host,local_merged_files,str(src),dest))
break ## send the filename to dest process and quit
elif len(src) == 1:
local_files = [output_path+local_merged_files]
rcv_merged_file = comm.recv(source=src[0],tag=0)
local_files.extend([output_path+rcv_merged_file])
# local_merged_files = '_'.join([local_merged_files,rcv_merged_file])
local_merged_files = get_output_name(local_merged_files,[rcv_merged_file])
print('i am rank %d, host is %s, local merged file is %s, src is %s, dest is %d' %(rank,host,local_merged_files,str(src),dest))
else:
local_files = [output_path+local_merged_files]
rcv_merged_files = []
for i,s in enumerate(src):
print('i am rank %d, host is %s, src is %s, dest is %d' %(rank,host,s,dest))
rcv_file = comm.recv(source=s,tag=0)
local_files.extend([output_path+rcv_file])
rcv_merged_files.extend([rcv_file])
# local_merged_files = '_'.join([local_merged_files]+rcv_merged_files)
local_merged_files = get_output_name(local_merged_files,rcv_merged_files)
if rank == 0:
src = get_source(rank,size,rounds)
if len(src) == 0: #### the last merging step
merger = mm.multiway_merger(local_files,output_path+local_merged_files,lower_chr,upper_chr,qfilter,genotype_col,False,merge_type='tped')
merger.start()
break;
merger = mm.multiway_merger(local_files,output_path+local_merged_files,lower_chr,upper_chr,qfilter,genotype_col,merge_type='tped')
merger.start()
################################################################################
# if rank >0:
# comm.send(local_merged_files,dest=dest,tag=0)
# print('i am rank %d, host is %s, send local merged files is %s, source is %s, dest is %d' %(rank,host,local_merged_files,str(src),dest))
#print('rank is %d, host is %s, data is %s' %(rank,host,str(rcvbuff)))
# create numpy arrays to reduce
#src = (np.arange(8) + rank*8).reshape(4,2)
#dst = np.zeros_like(src)
#
#def myadd(xmem, ymem, dt):
# x = np.frombuffer(xmem, dtype=src.dtype)
# y = np.frombuffer(ymem, dtype=src.dtype)
#
# z = x + y
#
# print("Rank %d on host %s reducing %s (%s) and %s (%s), yielding %s" % (rank, host, x, type(x), y, type(y), z))
#
# y[:] = z
#
#op = MPI.Op.Create(myadd)
#
#MPI.COMM_WORLD.Reduce(src, dst, op)
#
#if MPI.COMM_WORLD.rank == 0:
# print("ANSWER: %s" % dst) | 38.666667 | 146 | 0.632868 |
import numpy as np
from mpi4py import MPI
import commands
import os
import sys
path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(path)
import multiway_merge as mm
import argparse
| true | true |
f73b7194cf9635a1ee1a086d7b28c9a5c9ce3dbc | 215 | py | Python | superlists/lists/urls.py | Rishant96/Test-Driven-Development | e60f24e1554acf933878c000b64c865902de53db | [
"BSD-3-Clause"
] | null | null | null | superlists/lists/urls.py | Rishant96/Test-Driven-Development | e60f24e1554acf933878c000b64c865902de53db | [
"BSD-3-Clause"
] | null | null | null | superlists/lists/urls.py | Rishant96/Test-Driven-Development | e60f24e1554acf933878c000b64c865902de53db | [
"BSD-3-Clause"
] | null | null | null | from django.urls import path
from . import views
app_name = "lists"
urlpatterns = [
path('', views.home_page, name="home"),
path('the-only-list-in-the-world/', views.view_list,
name='view_list')
]
| 21.5 | 56 | 0.655814 | from django.urls import path
from . import views
app_name = "lists"
urlpatterns = [
path('', views.home_page, name="home"),
path('the-only-list-in-the-world/', views.view_list,
name='view_list')
]
| true | true |
f73b738d4173688dd65e3dc89f6ba73e754fedaa | 624 | py | Python | indico/modules/news/views.py | uxmaster/indico | ecd19f17ef6fdc9f5584f59c87ec647319ce5d31 | [
"MIT"
] | 1 | 2019-11-03T11:34:16.000Z | 2019-11-03T11:34:16.000Z | indico/modules/news/views.py | NP-compete/indico | 80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549 | [
"MIT"
] | null | null | null | indico/modules/news/views.py | NP-compete/indico | 80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549 | [
"MIT"
] | null | null | null | # This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.modules.admin.views import WPAdmin
from indico.util.i18n import _
from indico.web.views import WPDecorated, WPJinjaMixin
class WPNews(WPJinjaMixin, WPDecorated):
template_prefix = 'news/'
title = _('News')
def _get_body(self, params):
return self._get_page_content(params)
class WPManageNews(WPAdmin):
template_prefix = 'news/'
| 24.96 | 57 | 0.745192 |
from __future__ import unicode_literals
from indico.modules.admin.views import WPAdmin
from indico.util.i18n import _
from indico.web.views import WPDecorated, WPJinjaMixin
class WPNews(WPJinjaMixin, WPDecorated):
template_prefix = 'news/'
title = _('News')
def _get_body(self, params):
return self._get_page_content(params)
class WPManageNews(WPAdmin):
template_prefix = 'news/'
| true | true |
f73b7408bc16ab24872927eba07c71af7a4367d2 | 802 | py | Python | lib/django-1.2/django/core/cache/backends/dummy.py | MiCHiLU/google_appengine_sdk | 3da9f20d7e65e26c4938d2c4054bc4f39cbc5522 | [
"Apache-2.0"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | AppServer/lib/django-1.2/django/core/cache/backends/dummy.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | AppServer/lib/django-1.2/django/core/cache/backends/dummy.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | "Dummy cache backend"
from django.core.cache.backends.base import BaseCache
class CacheClass(BaseCache):
def __init__(self, *args, **kwargs):
pass
def add(self, key, *args, **kwargs):
self.validate_key(key)
return True
def get(self, key, default=None):
self.validate_key(key)
return default
def set(self, key, *args, **kwargs):
self.validate_key(key)
def delete(self, key, *args, **kwargs):
self.validate_key(key)
def get_many(self, *args, **kwargs):
return {}
def has_key(self, key, *args, **kwargs):
self.validate_key(key)
return False
def set_many(self, *args, **kwargs):
pass
def delete_many(self, *args, **kwargs):
pass
def clear(self):
pass
| 21.105263 | 53 | 0.593516 |
from django.core.cache.backends.base import BaseCache
class CacheClass(BaseCache):
def __init__(self, *args, **kwargs):
pass
def add(self, key, *args, **kwargs):
self.validate_key(key)
return True
def get(self, key, default=None):
self.validate_key(key)
return default
def set(self, key, *args, **kwargs):
self.validate_key(key)
def delete(self, key, *args, **kwargs):
self.validate_key(key)
def get_many(self, *args, **kwargs):
return {}
def has_key(self, key, *args, **kwargs):
self.validate_key(key)
return False
def set_many(self, *args, **kwargs):
pass
def delete_many(self, *args, **kwargs):
pass
def clear(self):
pass
| true | true |
f73b7592057f2df4bc67cde0a7d7949e4e675d90 | 2,265 | py | Python | simulation-code/old_functions/average_rotated_trials.py | young24/LFP-simulation-in-turtle-brain | cd801dc02804d027b7c245b0f0ca9c8b00f8d450 | [
"MIT"
] | null | null | null | simulation-code/old_functions/average_rotated_trials.py | young24/LFP-simulation-in-turtle-brain | cd801dc02804d027b7c245b0f0ca9c8b00f8d450 | [
"MIT"
] | null | null | null | simulation-code/old_functions/average_rotated_trials.py | young24/LFP-simulation-in-turtle-brain | cd801dc02804d027b7c245b0f0ca9c8b00f8d450 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 12 09:12:00 2016
Average the spatial influence of the morphology
@author: young
"""
import os
from os.path import join
import pylab as plt
import numpy as np
def average_data_in_dif_folder(dirName,dataName,z_pos,numRotation,idxSection):
yfileName = dirName+'_0/'+dataName+str(z_pos)+'.npy'
y=np.load(yfileName)
y=y[idxSection]
for i in range(1,numRotation,1):
yfileName = dirName+'_'+str(i)+'/'+dataName+str(z_pos)+'.npy'
temp_y = np.load(yfileName)
temp_y = temp_y[idxSection]
y = y + temp_y
y = y/numRotation
return y
if __name__ == '__main__':
numRotation = 8
dirName = 'sim_results'
xfileName = dirName+'_0/'+'tvec.npy'
x=np.load(xfileName)
fig = plt.figure(figsize=[10, 10])
# Extracellular_potential
ax1 = plt.subplot(311, ylabel='$\mu$V',
title='Extracellular\npotential')
# share x only
ax2 = plt.subplot(312, sharex=ax1, ylabel='mV',
title='Membrane potential')
ax3 = plt.subplot(313, sharex=ax1, xlabel='ms', ylabel='nA',
title='Return currents')
legendList = []
for z_pos in range(20,301,10):
legendList.append('z:'+str(z_pos)+'$\mu$m')
dataName='phi_z'
y1 = average_data_in_dif_folder(dirName,dataName,z_pos,numRotation,idxSection=2) # centre electrode
ax1.plot(x, y1)
dataName='phi_z'+str(z_pos)+'.npy'
np.save(join('averaged_result_for_real_neuron', dataName), y1)
dataName='vmem_z'
y2 = average_data_in_dif_folder(dirName,dataName,z_pos,numRotation,idxSection=0) # soma index = 0
ax2.plot(x, y2)
dataName='vmem_z'+str(z_pos)+'.npy'
np.save(join('averaged_result_for_real_neuron', dataName), y2)
dataName='imem_z'
y3 = average_data_in_dif_folder(dirName,dataName,z_pos,numRotation,idxSection=0)
ax3.plot(x, y3)
dataName='imem_z'+str(z_pos)+'.npy'
np.save(join('averaged_result_for_real_neuron', dataName), y3)
plt.legend(legendList, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig('averaged_z_profile', bbox_inches='tight')
| 33.80597 | 107 | 0.628256 |
import os
from os.path import join
import pylab as plt
import numpy as np
def average_data_in_dif_folder(dirName,dataName,z_pos,numRotation,idxSection):
yfileName = dirName+'_0/'+dataName+str(z_pos)+'.npy'
y=np.load(yfileName)
y=y[idxSection]
for i in range(1,numRotation,1):
yfileName = dirName+'_'+str(i)+'/'+dataName+str(z_pos)+'.npy'
temp_y = np.load(yfileName)
temp_y = temp_y[idxSection]
y = y + temp_y
y = y/numRotation
return y
if __name__ == '__main__':
numRotation = 8
dirName = 'sim_results'
xfileName = dirName+'_0/'+'tvec.npy'
x=np.load(xfileName)
fig = plt.figure(figsize=[10, 10])
ax1 = plt.subplot(311, ylabel='$\mu$V',
title='Extracellular\npotential')
ax2 = plt.subplot(312, sharex=ax1, ylabel='mV',
title='Membrane potential')
ax3 = plt.subplot(313, sharex=ax1, xlabel='ms', ylabel='nA',
title='Return currents')
legendList = []
for z_pos in range(20,301,10):
legendList.append('z:'+str(z_pos)+'$\mu$m')
dataName='phi_z'
y1 = average_data_in_dif_folder(dirName,dataName,z_pos,numRotation,idxSection=2)
ax1.plot(x, y1)
dataName='phi_z'+str(z_pos)+'.npy'
np.save(join('averaged_result_for_real_neuron', dataName), y1)
dataName='vmem_z'
y2 = average_data_in_dif_folder(dirName,dataName,z_pos,numRotation,idxSection=0)
ax2.plot(x, y2)
dataName='vmem_z'+str(z_pos)+'.npy'
np.save(join('averaged_result_for_real_neuron', dataName), y2)
dataName='imem_z'
y3 = average_data_in_dif_folder(dirName,dataName,z_pos,numRotation,idxSection=0)
ax3.plot(x, y3)
dataName='imem_z'+str(z_pos)+'.npy'
np.save(join('averaged_result_for_real_neuron', dataName), y3)
plt.legend(legendList, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig('averaged_z_profile', bbox_inches='tight')
| true | true |
f73b7616494b07699e5229ce405a2904632aaff8 | 7,493 | py | Python | Rasp-main/PruebasDB.py | ascuadrado/ISC | 0d3f623dabf40dc0057e8da0d3c75c8b2dd339e9 | [
"MIT"
] | 1 | 2020-11-18T12:22:09.000Z | 2020-11-18T12:22:09.000Z | Rasp-main/PruebasDB.py | ascuadrado/ISC | 0d3f623dabf40dc0057e8da0d3c75c8b2dd339e9 | [
"MIT"
] | null | null | null | Rasp-main/PruebasDB.py | ascuadrado/ISC | 0d3f623dabf40dc0057e8da0d3c75c8b2dd339e9 | [
"MIT"
] | null | null | null | import sqlite3
import time
import random
from tqdm import tqdm
# Setup
dbFile = 'ISC/Rasp-main/Database.db'
DBDelay = 2 # seconds
# Data
general = dict()
charger = dict()
sevcon = dict()
bms1 = dict()
bms2 = dict()
bms3 = dict()
bms = [bms1, bms2, bms3]
def init_dict():
'''
Initializes internal dictionaries (general, sevcon, charger, bms1, bms2, bms3)
Args:
d: value to default all dictionaries
'''
d = -1
v = 4.2
t = 30
bms1['voltages'] = [v, v, v, v, v, v, v, v, v, v, v, v]
bms2['voltages'] = [v, v, v, v, v, v, v, v, v, v, v, v]
bms3['voltages'] = [v, v, v, v, v, v, v, v, v, v, v, v]
bms1['temperatures'] = [t, t]
bms2['temperatures'] = [t, t]
bms3['temperatures'] = [t, t]
charger['voltage'] = d
charger['current'] = d
charger['flags'] = [d, d, d, d, d]
sevcon['target_id'] = d
sevcon['id'] = d
sevcon['target_iq'] = d
sevcon['iq'] = d
sevcon['battery_voltage'] = d
sevcon['battery_current'] = d
sevcon['line_contactor'] = d
sevcon['capacitor_voltage'] = d
sevcon['throttle_value'] = d
sevcon['target_torque'] = d
sevcon['torque'] = d
sevcon['heatsink_temp'] = d
sevcon['maximum_motor_speed'] = d
sevcon['velocity'] = d
general['allOK'] = d
general['stateOfCharge'] = 1
general['opMode'] = "Testing"
general['sevconConnected'] = 0
general['chargerConnected'] = 0
general['bms1Connected'] = 0
general['bms2Connected'] = 0
general['bms3Connected'] = 0
def generate_random_values():
arr = bms1['voltages']
res = [0.0001 * random.randrange(0, 100, 1) for i in range(12)]
new_list = [x1 - x2 for (x1, x2) in zip(arr, res)]
bms1['voltages'] = new_list
arr = bms2['voltages']
res = [0.0001 * random.randrange(0, 100, 1) for i in range(12)]
new_list = [x1 - x2 for (x1, x2) in zip(arr, res)]
bms2['voltages'] = new_list
arr = bms3['voltages']
res = [0.0001 * random.randrange(0, 100, 1) for i in range(12)]
new_list = [x1 - x2 for (x1, x2) in zip(arr, res)]
bms3['voltages'] = new_list
arr = bms1['temperatures']
res = [0.01 * random.randrange(-50, 100, 1) for i in range(2)]
new_list = [x1 + x2 for (x1, x2) in zip(arr, res)]
bms1['temperatures'] = new_list
arr = bms2['temperatures']
res = [0.01 * random.randrange(-50, 100, 1) for i in range(2)]
new_list = [x1 + x2 for (x1, x2) in zip(arr, res)]
bms2['temperatures'] = new_list
arr = bms3['temperatures']
res = [0.01 * random.randrange(-50, 100, 1) for i in range(2)]
new_list = [x1 + x2 for (x1, x2) in zip(arr, res)]
bms3['temperatures'] = new_list
sevcon['throttle_value'] = random.randrange(-6, 6, 1)
sevcon['velocity'] = random.randrange(20, 160, 1)
def save_to_db(dataBaseName='ISC/Rasp-main/Database.db'):
'''
Saves the data collected to database
Args:
dataBaseName: Name of the file to sqlite3 database
'''
conn = sqlite3.connect(dataBaseName)
c = conn.cursor()
general['timestamp'] = time.time()
general['date'] = time.strftime("%Y-%m-%d")
general['time'] = time.strftime("%H:%M:%S")
c.execute("INSERT INTO general (timestamp, date, time, allOK, stateOfCharge,"
"sevconConnected, chargerConnected, bms1Connected,"
"bms2Connected, bms3Connected) VALUES (?,?,?,?,?,?,?,?,?,?)",
(general['timestamp'], general['date'], general['time'],
general['allOK'], general['stateOfCharge'], general['sevconConnected'],
general['chargerConnected'], general['bms1Connected'],
general['bms2Connected'], general['bms3Connected']))
c.execute("INSERT INTO charger (timestamp, date, time, voltage, current,"
" flag0, flag1, flag2, flag3, flag4) VALUES (?,?,?,?,?,?,?,?,?,?)", (
general['timestamp'], general['date'], general['time'],
charger['voltage'], charger['current'],
charger['flags'][0], charger['flags'][1], charger['flags'][2],
charger['flags'][3], charger['flags'][4]))
c.execute("INSERT INTO sevcon (timestamp, date, time, target_id, id, "
"target_iq, iq, battery_voltage, battery_current, line_contactor, "
"capacitor_voltage, throttle_value, target_torque, torque, "
"heatsink_temp, maximum_motor_speed, velocity "
") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
(general['timestamp'], general['date'], general['time'],
sevcon['target_id'], sevcon['id'], sevcon['target_iq'],
sevcon['iq'], sevcon['battery_voltage'], sevcon['battery_current'],
sevcon['line_contactor'], sevcon['capacitor_voltage'],
sevcon['throttle_value'], sevcon['target_torque'],
sevcon['torque'], sevcon['heatsink_temp'],
sevcon['maximum_motor_speed'], sevcon['velocity']))
c.execute("INSERT INTO bms1 (timestamp, date, time, voltage1, voltage2, "
"voltage3, voltage4, voltage5, voltage6, voltage7, voltage8, "
"voltage9, voltage10, voltage11, voltage12, temperature1, "
"temperature2) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", (
general['timestamp'], general['date'], general['time'],
bms1['voltages'][0], bms1['voltages'][1], bms1['voltages'][2],
bms1['voltages'][3], bms1['voltages'][4], bms1['voltages'][5],
bms1['voltages'][6], bms1['voltages'][7], bms1['voltages'][8],
bms1['voltages'][9], bms1['voltages'][10], bms1['voltages'][11],
bms1['temperatures'][0], bms1['temperatures'][1]))
c.execute("INSERT INTO bms2 (timestamp, date, time, voltage1, voltage2, "
"voltage3, voltage4, voltage5, voltage6, voltage7, voltage8, "
"voltage9, voltage10, voltage11, voltage12, temperature1, "
"temperature2) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", (
general['timestamp'], general['date'], general['time'],
bms2['voltages'][0], bms2['voltages'][1], bms2['voltages'][2],
bms2['voltages'][3], bms2['voltages'][4], bms2['voltages'][5],
bms2['voltages'][6], bms2['voltages'][7], bms2['voltages'][8],
bms2['voltages'][9], bms2['voltages'][10], bms2['voltages'][11],
bms2['temperatures'][0], bms2['temperatures'][1]))
c.execute("INSERT INTO bms3 (timestamp, date, time, voltage1, voltage2, "
"voltage3, voltage4, voltage5, voltage6, voltage7, voltage8, "
"voltage9, voltage10, voltage11, voltage12, temperature1, "
"temperature2) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", (
general['timestamp'], general['date'], general['time'],
bms3['voltages'][0], bms3['voltages'][1], bms3['voltages'][2],
bms3['voltages'][3], bms3['voltages'][4], bms3['voltages'][5],
bms3['voltages'][6], bms3['voltages'][7], bms3['voltages'][8],
bms3['voltages'][9], bms3['voltages'][10], bms3['voltages'][11],
bms3['temperatures'][0], bms3['temperatures'][1]))
conn.commit()
conn.close()
if __name__ == '__main__':
init_dict()
for i in tqdm(range(300)):
time.sleep(1)
generate_random_values()
save_to_db()
| 38.425641 | 86 | 0.558521 | import sqlite3
import time
import random
from tqdm import tqdm
dbFile = 'ISC/Rasp-main/Database.db'
DBDelay = 2
general = dict()
charger = dict()
sevcon = dict()
bms1 = dict()
bms2 = dict()
bms3 = dict()
bms = [bms1, bms2, bms3]
def init_dict():
d = -1
v = 4.2
t = 30
bms1['voltages'] = [v, v, v, v, v, v, v, v, v, v, v, v]
bms2['voltages'] = [v, v, v, v, v, v, v, v, v, v, v, v]
bms3['voltages'] = [v, v, v, v, v, v, v, v, v, v, v, v]
bms1['temperatures'] = [t, t]
bms2['temperatures'] = [t, t]
bms3['temperatures'] = [t, t]
charger['voltage'] = d
charger['current'] = d
charger['flags'] = [d, d, d, d, d]
sevcon['target_id'] = d
sevcon['id'] = d
sevcon['target_iq'] = d
sevcon['iq'] = d
sevcon['battery_voltage'] = d
sevcon['battery_current'] = d
sevcon['line_contactor'] = d
sevcon['capacitor_voltage'] = d
sevcon['throttle_value'] = d
sevcon['target_torque'] = d
sevcon['torque'] = d
sevcon['heatsink_temp'] = d
sevcon['maximum_motor_speed'] = d
sevcon['velocity'] = d
general['allOK'] = d
general['stateOfCharge'] = 1
general['opMode'] = "Testing"
general['sevconConnected'] = 0
general['chargerConnected'] = 0
general['bms1Connected'] = 0
general['bms2Connected'] = 0
general['bms3Connected'] = 0
def generate_random_values():
arr = bms1['voltages']
res = [0.0001 * random.randrange(0, 100, 1) for i in range(12)]
new_list = [x1 - x2 for (x1, x2) in zip(arr, res)]
bms1['voltages'] = new_list
arr = bms2['voltages']
res = [0.0001 * random.randrange(0, 100, 1) for i in range(12)]
new_list = [x1 - x2 for (x1, x2) in zip(arr, res)]
bms2['voltages'] = new_list
arr = bms3['voltages']
res = [0.0001 * random.randrange(0, 100, 1) for i in range(12)]
new_list = [x1 - x2 for (x1, x2) in zip(arr, res)]
bms3['voltages'] = new_list
arr = bms1['temperatures']
res = [0.01 * random.randrange(-50, 100, 1) for i in range(2)]
new_list = [x1 + x2 for (x1, x2) in zip(arr, res)]
bms1['temperatures'] = new_list
arr = bms2['temperatures']
res = [0.01 * random.randrange(-50, 100, 1) for i in range(2)]
new_list = [x1 + x2 for (x1, x2) in zip(arr, res)]
bms2['temperatures'] = new_list
arr = bms3['temperatures']
res = [0.01 * random.randrange(-50, 100, 1) for i in range(2)]
new_list = [x1 + x2 for (x1, x2) in zip(arr, res)]
bms3['temperatures'] = new_list
sevcon['throttle_value'] = random.randrange(-6, 6, 1)
sevcon['velocity'] = random.randrange(20, 160, 1)
def save_to_db(dataBaseName='ISC/Rasp-main/Database.db'):
conn = sqlite3.connect(dataBaseName)
c = conn.cursor()
general['timestamp'] = time.time()
general['date'] = time.strftime("%Y-%m-%d")
general['time'] = time.strftime("%H:%M:%S")
c.execute("INSERT INTO general (timestamp, date, time, allOK, stateOfCharge,"
"sevconConnected, chargerConnected, bms1Connected,"
"bms2Connected, bms3Connected) VALUES (?,?,?,?,?,?,?,?,?,?)",
(general['timestamp'], general['date'], general['time'],
general['allOK'], general['stateOfCharge'], general['sevconConnected'],
general['chargerConnected'], general['bms1Connected'],
general['bms2Connected'], general['bms3Connected']))
c.execute("INSERT INTO charger (timestamp, date, time, voltage, current,"
" flag0, flag1, flag2, flag3, flag4) VALUES (?,?,?,?,?,?,?,?,?,?)", (
general['timestamp'], general['date'], general['time'],
charger['voltage'], charger['current'],
charger['flags'][0], charger['flags'][1], charger['flags'][2],
charger['flags'][3], charger['flags'][4]))
c.execute("INSERT INTO sevcon (timestamp, date, time, target_id, id, "
"target_iq, iq, battery_voltage, battery_current, line_contactor, "
"capacitor_voltage, throttle_value, target_torque, torque, "
"heatsink_temp, maximum_motor_speed, velocity "
") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
(general['timestamp'], general['date'], general['time'],
sevcon['target_id'], sevcon['id'], sevcon['target_iq'],
sevcon['iq'], sevcon['battery_voltage'], sevcon['battery_current'],
sevcon['line_contactor'], sevcon['capacitor_voltage'],
sevcon['throttle_value'], sevcon['target_torque'],
sevcon['torque'], sevcon['heatsink_temp'],
sevcon['maximum_motor_speed'], sevcon['velocity']))
c.execute("INSERT INTO bms1 (timestamp, date, time, voltage1, voltage2, "
"voltage3, voltage4, voltage5, voltage6, voltage7, voltage8, "
"voltage9, voltage10, voltage11, voltage12, temperature1, "
"temperature2) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", (
general['timestamp'], general['date'], general['time'],
bms1['voltages'][0], bms1['voltages'][1], bms1['voltages'][2],
bms1['voltages'][3], bms1['voltages'][4], bms1['voltages'][5],
bms1['voltages'][6], bms1['voltages'][7], bms1['voltages'][8],
bms1['voltages'][9], bms1['voltages'][10], bms1['voltages'][11],
bms1['temperatures'][0], bms1['temperatures'][1]))
c.execute("INSERT INTO bms2 (timestamp, date, time, voltage1, voltage2, "
"voltage3, voltage4, voltage5, voltage6, voltage7, voltage8, "
"voltage9, voltage10, voltage11, voltage12, temperature1, "
"temperature2) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", (
general['timestamp'], general['date'], general['time'],
bms2['voltages'][0], bms2['voltages'][1], bms2['voltages'][2],
bms2['voltages'][3], bms2['voltages'][4], bms2['voltages'][5],
bms2['voltages'][6], bms2['voltages'][7], bms2['voltages'][8],
bms2['voltages'][9], bms2['voltages'][10], bms2['voltages'][11],
bms2['temperatures'][0], bms2['temperatures'][1]))
c.execute("INSERT INTO bms3 (timestamp, date, time, voltage1, voltage2, "
"voltage3, voltage4, voltage5, voltage6, voltage7, voltage8, "
"voltage9, voltage10, voltage11, voltage12, temperature1, "
"temperature2) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", (
general['timestamp'], general['date'], general['time'],
bms3['voltages'][0], bms3['voltages'][1], bms3['voltages'][2],
bms3['voltages'][3], bms3['voltages'][4], bms3['voltages'][5],
bms3['voltages'][6], bms3['voltages'][7], bms3['voltages'][8],
bms3['voltages'][9], bms3['voltages'][10], bms3['voltages'][11],
bms3['temperatures'][0], bms3['temperatures'][1]))
conn.commit()
conn.close()
if __name__ == '__main__':
init_dict()
for i in tqdm(range(300)):
time.sleep(1)
generate_random_values()
save_to_db()
| true | true |
f73b782d9623bde7cf1340322e29f574a1f998f3 | 10,738 | py | Python | userbot/plugins/chatinfo.py | SH4DOWV/X-tra-Telegram | 73634556989ac274c44a0a2cc9ff4322e7a52158 | [
"MIT"
] | null | null | null | userbot/plugins/chatinfo.py | SH4DOWV/X-tra-Telegram | 73634556989ac274c44a0a2cc9ff4322e7a52158 | [
"MIT"
] | null | null | null | userbot/plugins/chatinfo.py | SH4DOWV/X-tra-Telegram | 73634556989ac274c44a0a2cc9ff4322e7a52158 | [
"MIT"
] | null | null | null | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Credits to Hitalo-Sama and FTG Modules
from datetime import datetime
from emoji import emojize
from math import sqrt
from telethon.tl.functions.channels import GetFullChannelRequest, GetParticipantsRequest
from telethon.tl.functions.messages import GetHistoryRequest, CheckChatInviteRequest, GetFullChatRequest
from telethon.tl.types import MessageActionChannelMigrateFrom, ChannelParticipantsAdmins
from telethon.errors import (ChannelInvalidError, ChannelPrivateError, ChannelPublicGroupNaError, InviteHashEmptyError, InviteHashExpiredError, InviteHashInvalidError)
from telethon.utils import get_input_location
from userbot import CMD_HELP
from userbot.events import register
@register(pattern=".chatinfo(?: |$)(.*)", outgoing=True)
async def info(event):
await event.edit("**🔬Analizando il gruppo/canale...**")
chat = await get_chatinfo(event)
caption = await fetch_info(chat, event)
try:
await event.edit(caption, parse_mode="html")
except Exception as e:
print("Exception:", e)
await event.edit("`C'è stato un errore inaspettato.`")
return
async def get_chatinfo(event):
chat = event.pattern_match.group(1)
chat_info = None
if chat:
try:
chat = int(chat)
except ValueError:
pass
if not chat:
if event.reply_to_msg_id:
replied_msg = await event.get_reply_message()
if replied_msg.fwd_from and replied_msg.fwd_from.channel_id is not None:
chat = replied_msg.fwd_from.channel_id
else:
chat = event.chat_id
try:
chat_info = await event.client(GetFullChatRequest(chat))
except:
try:
chat_info = await event.client(GetFullChannelRequest(chat))
except ChannelInvalidError:
await event.edit("`Gruppo/Canale non valido.`")
return None
except ChannelPrivateError:
await event.edit("`Questo è un canale/gruppo privato o sono Bannato da esso.`")
return None
except ChannelPublicGroupNaError:
await event.edit("`Canale o SuperGruppo non esistente.`")
return None
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return chat_info
async def fetch_info(chat, event):
# chat.chats is a list so we use get_entity() to avoid IndexError
chat_obj_info = await event.client.get_entity(chat.full_chat.id)
broadcast = chat_obj_info.broadcast if hasattr(chat_obj_info, "broadcast") else False
chat_type = "Canale" if broadcast else "Gruppo"
chat_title = chat_obj_info.title
warn_emoji = emojize(":warning:")
try:
msg_info = await event.client(GetHistoryRequest(peer=chat_obj_info.id, offset_id=0, offset_date=datetime(2010, 1, 1),
add_offset=-1, limit=1, max_id=0, min_id=0, hash=0))
except Exception as e:
msg_info = None
print("Exception:", e)
# No chance for IndexError as it checks for msg_info.messages first
first_msg_valid = True if msg_info and msg_info.messages and msg_info.messages[0].id == 1 else False
# Same for msg_info.users
creator_valid = True if first_msg_valid and msg_info.users else False
creator_id = msg_info.users[0].id if creator_valid else None
creator_firstname = msg_info.users[0].first_name if creator_valid and msg_info.users[0].first_name is not None else "Account Eliminato"
creator_username = msg_info.users[0].username if creator_valid and msg_info.users[0].username is not None else None
created = msg_info.messages[0].date if first_msg_valid else None
former_title = msg_info.messages[0].action.title if first_msg_valid and type(msg_info.messages[0].action) is MessageActionChannelMigrateFrom and msg_info.messages[0].action.title != chat_title else None
try:
dc_id, location = get_input_location(chat.full_chat.chat_photo)
except Exception as e:
dc_id = "Sconosciuto"
location = str(e)
#this is some spaghetti I need to change
description = chat.full_chat.about
members = chat.full_chat.participants_count if hasattr(chat.full_chat, "participants_count") else chat_obj_info.participants_count
admins = chat.full_chat.admins_count if hasattr(chat.full_chat, "admins_count") else None
banned_users = chat.full_chat.kicked_count if hasattr(chat.full_chat, "kicked_count") else None
restrcited_users = chat.full_chat.banned_count if hasattr(chat.full_chat, "banned_count") else None
members_online = chat.full_chat.online_count if hasattr(chat.full_chat, "online_count") else 0
group_stickers = chat.full_chat.stickerset.title if hasattr(chat.full_chat, "stickerset") and chat.full_chat.stickerset else None
messages_viewable = msg_info.count if msg_info else None
messages_sent = chat.full_chat.read_inbox_max_id if hasattr(chat.full_chat, "read_inbox_max_id") else None
messages_sent_alt = chat.full_chat.read_outbox_max_id if hasattr(chat.full_chat, "read_outbox_max_id") else None
exp_count = chat.full_chat.pts if hasattr(chat.full_chat, "pts") else None
username = chat_obj_info.username if hasattr(chat_obj_info, "username") else None
bots_list = chat.full_chat.bot_info # this is a list
bots = 0
supergroup = "<b>Si</b>" if hasattr(chat_obj_info, "megagroup") and chat_obj_info.megagroup else "No"
slowmode = "<b>Si</b>" if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else "No"
slowmode_time = chat.full_chat.slowmode_seconds if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else None
restricted = "<b>Si</b>" if hasattr(chat_obj_info, "restricted") and chat_obj_info.restricted else "No"
verified = "<b>Si</b>" if hasattr(chat_obj_info, "verified") and chat_obj_info.verified else "No"
username = "@{}".format(username) if username else None
creator_username = "@{}".format(creator_username) if creator_username else None
#end of spaghetti block
if admins is None:
# use this alternative way if chat.full_chat.admins_count is None, works even without being an admin
try:
participants_admins = await event.client(GetParticipantsRequest(channel=chat.full_chat.id, filter=ChannelParticipantsAdmins(),
offset=0, limit=0, hash=0))
admins = participants_admins.count if participants_admins else None
except Exception as e:
print("Exception:", e)
if bots_list:
for bot in bots_list:
bots += 1
caption = "<b></b>\n"
caption += f"<b>🖊️Info della Chat</b>\n• 🆔: <code>{chat_obj_info.id}</code>\n"
if chat_title is not None:
caption += f"<b>• 📰Titolo del {chat_type} :</b> {chat_title}\n"
if former_title is not None: # Meant is the very first title
caption += f"<b>• 🥇Titolo Originario:</b> {former_title}\n"
if username is not None:
caption += f"<b>• 🏷Tipo di {chat_type}:</b> Publico\n"
caption += f"<b>• 🖇Link:<b> {username}\n"
else:
caption += f"<b>• 🏷Tipo di {chat_type} :</b> Privato\n"
if creator_username is not None:
caption += f"<b>• 👑Creatore:</b> {creator_username}\n"
elif creator_valid:
caption += f"<b>• 👑Creatore:</b> <a href=\"tg://user?id={creator_id}\">{creator_firstname}</a>\n"
if created is not None:
caption += f"<b>• 🕐Creato:</b> <code>{created.date().strftime('%b %d, %Y')} - {created.time()}</code>\n"
else:
caption += f"<b>• 🕐Creato:</b> <code>{chat_obj_info.date.date().strftime('%b %d, %Y')} - {chat_obj_info.date.time()}</code> {warn_emoji}\n"
caption += f"<b>• 📡Data Center ID:</b> {dc_id}\n"
if exp_count is not None:
chat_level = int((1+sqrt(1+7*exp_count/14))/2)
caption += f"<b>• 🏁Livello del {chat_type}:</b> <code>{chat_level}</code>\n"
if messages_viewable is not None:
caption += f"<b>• ✉️Messaggi Visibili:</b> <code>{messages_viewable}</code>\n"
if messages_sent:
caption += f"<b>• 📨Messaggi inviati:</b> <code>{messages_sent}</code>\n"
elif messages_sent_alt:
caption += f"<b>• 📨Messaggi Inviati:</b> <code>{messages_sent_alt}</code> {warn_emoji}\n"
if members is not None:
caption += f"<b>• 👥Membri:</b> <code>{members}</code>\n"
if admins is not None:
caption += f"<b>• ⚜Amministratori:</b> <code>{admins}</code>\n"
if bots_list:
caption += f"<b>• 🤖Bot</b>: <code>{bots}</code>\n"
if members_online:
caption += f"<b>• 👥💡Membri Online al Momento:</b> <code>{members_online}</code>\n"
if restrcited_users is not None:
caption += f"<b>• 👥🚨Utenti Limitati:</b> <code>{restrcited_users}</code>\n"
if banned_users is not None:
caption += f"<b>• 👥🚷Utenti Bannati:</b> <code>{banned_users}</code>\n"
if group_stickers is not None:
caption += f"<b>• 🎨Sticker del {chat_type}:</b> <a href=\"t.me/addstickers/{chat.full_chat.stickerset.short_name}\">{group_stickers}</a>\n"
caption += "\n"
if not broadcast:
caption += f"<b>• 🐌Modalità Lenta:</b> {slowmode}"
if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled:
caption += f", <code>{slowmode_time}s</code>\n\n"
else:
caption += "\n\n"
if not broadcast:
caption += f"<b>🏆SuperGruppo:<b> {supergroup}\n\n"
if hasattr(chat_obj_info, "restricted"):
caption += f"<b>🚨Limitato:<b> {restricted}\n"
if chat_obj_info.restricted:
caption += f"<b>> 💻Piattaforma:<b> {chat_obj_info.restriction_reason[0].platform}\n"
caption += f"<b>> 📝Motivo:<b> {chat_obj_info.restriction_reason[0].reason}\n"
caption += f"<b>> 📖Testo:<b> {chat_obj_info.restriction_reason[0].text}\n\n"
else:
caption += "\n"
if hasattr(chat_obj_info, "scam") and chat_obj_info.scam:
caption += "<b>⚠️Scam:<b> <b>Si</b>\n\n"
if hasattr(chat_obj_info, "verified"):
caption += f"✅<b>Verificato da Telegram:<b> {verified}\n\n"
if description:
caption += f"<b>💬Descrizione:<b> \n<code>{description}</code>\n"
return caption
CMD_HELP.update({
"chatinfo":
".chatinfo [optional: <reply/tag/chat id/invite link>]\
\nUsage: Gets info of a chat. Some info might be limited due to missing permissions."
})
| 52.637255 | 206 | 0.668653 |
from datetime import datetime
from emoji import emojize
from math import sqrt
from telethon.tl.functions.channels import GetFullChannelRequest, GetParticipantsRequest
from telethon.tl.functions.messages import GetHistoryRequest, CheckChatInviteRequest, GetFullChatRequest
from telethon.tl.types import MessageActionChannelMigrateFrom, ChannelParticipantsAdmins
from telethon.errors import (ChannelInvalidError, ChannelPrivateError, ChannelPublicGroupNaError, InviteHashEmptyError, InviteHashExpiredError, InviteHashInvalidError)
from telethon.utils import get_input_location
from userbot import CMD_HELP
from userbot.events import register
@register(pattern=".chatinfo(?: |$)(.*)", outgoing=True)
async def info(event):
await event.edit("**🔬Analizando il gruppo/canale...**")
chat = await get_chatinfo(event)
caption = await fetch_info(chat, event)
try:
await event.edit(caption, parse_mode="html")
except Exception as e:
print("Exception:", e)
await event.edit("`C'è stato un errore inaspettato.`")
return
async def get_chatinfo(event):
chat = event.pattern_match.group(1)
chat_info = None
if chat:
try:
chat = int(chat)
except ValueError:
pass
if not chat:
if event.reply_to_msg_id:
replied_msg = await event.get_reply_message()
if replied_msg.fwd_from and replied_msg.fwd_from.channel_id is not None:
chat = replied_msg.fwd_from.channel_id
else:
chat = event.chat_id
try:
chat_info = await event.client(GetFullChatRequest(chat))
except:
try:
chat_info = await event.client(GetFullChannelRequest(chat))
except ChannelInvalidError:
await event.edit("`Gruppo/Canale non valido.`")
return None
except ChannelPrivateError:
await event.edit("`Questo è un canale/gruppo privato o sono Bannato da esso.`")
return None
except ChannelPublicGroupNaError:
await event.edit("`Canale o SuperGruppo non esistente.`")
return None
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return chat_info
async def fetch_info(chat, event):
# chat.chats is a list so we use get_entity() to avoid IndexError
chat_obj_info = await event.client.get_entity(chat.full_chat.id)
broadcast = chat_obj_info.broadcast if hasattr(chat_obj_info, "broadcast") else False
chat_type = "Canale" if broadcast else "Gruppo"
chat_title = chat_obj_info.title
warn_emoji = emojize(":warning:")
try:
msg_info = await event.client(GetHistoryRequest(peer=chat_obj_info.id, offset_id=0, offset_date=datetime(2010, 1, 1),
add_offset=-1, limit=1, max_id=0, min_id=0, hash=0))
except Exception as e:
msg_info = None
print("Exception:", e)
# No chance for IndexError as it checks for msg_info.messages first
first_msg_valid = True if msg_info and msg_info.messages and msg_info.messages[0].id == 1 else False
# Same for msg_info.users
creator_valid = True if first_msg_valid and msg_info.users else False
creator_id = msg_info.users[0].id if creator_valid else None
creator_firstname = msg_info.users[0].first_name if creator_valid and msg_info.users[0].first_name is not None else "Account Eliminato"
creator_username = msg_info.users[0].username if creator_valid and msg_info.users[0].username is not None else None
created = msg_info.messages[0].date if first_msg_valid else None
former_title = msg_info.messages[0].action.title if first_msg_valid and type(msg_info.messages[0].action) is MessageActionChannelMigrateFrom and msg_info.messages[0].action.title != chat_title else None
try:
dc_id, location = get_input_location(chat.full_chat.chat_photo)
except Exception as e:
dc_id = "Sconosciuto"
location = str(e)
#this is some spaghetti I need to change
description = chat.full_chat.about
members = chat.full_chat.participants_count if hasattr(chat.full_chat, "participants_count") else chat_obj_info.participants_count
admins = chat.full_chat.admins_count if hasattr(chat.full_chat, "admins_count") else None
banned_users = chat.full_chat.kicked_count if hasattr(chat.full_chat, "kicked_count") else None
restrcited_users = chat.full_chat.banned_count if hasattr(chat.full_chat, "banned_count") else None
members_online = chat.full_chat.online_count if hasattr(chat.full_chat, "online_count") else 0
group_stickers = chat.full_chat.stickerset.title if hasattr(chat.full_chat, "stickerset") and chat.full_chat.stickerset else None
messages_viewable = msg_info.count if msg_info else None
messages_sent = chat.full_chat.read_inbox_max_id if hasattr(chat.full_chat, "read_inbox_max_id") else None
messages_sent_alt = chat.full_chat.read_outbox_max_id if hasattr(chat.full_chat, "read_outbox_max_id") else None
exp_count = chat.full_chat.pts if hasattr(chat.full_chat, "pts") else None
username = chat_obj_info.username if hasattr(chat_obj_info, "username") else None
bots_list = chat.full_chat.bot_info # this is a list
bots = 0
supergroup = "<b>Si</b>" if hasattr(chat_obj_info, "megagroup") and chat_obj_info.megagroup else "No"
slowmode = "<b>Si</b>" if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else "No"
slowmode_time = chat.full_chat.slowmode_seconds if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled else None
restricted = "<b>Si</b>" if hasattr(chat_obj_info, "restricted") and chat_obj_info.restricted else "No"
verified = "<b>Si</b>" if hasattr(chat_obj_info, "verified") and chat_obj_info.verified else "No"
username = "@{}".format(username) if username else None
creator_username = "@{}".format(creator_username) if creator_username else None
#end of spaghetti block
if admins is None:
# use this alternative way if chat.full_chat.admins_count is None, works even without being an admin
try:
participants_admins = await event.client(GetParticipantsRequest(channel=chat.full_chat.id, filter=ChannelParticipantsAdmins(),
offset=0, limit=0, hash=0))
admins = participants_admins.count if participants_admins else None
except Exception as e:
print("Exception:", e)
if bots_list:
for bot in bots_list:
bots += 1
caption = "<b></b>\n"
caption += f"<b>🖊️Info della Chat</b>\n• 🆔: <code>{chat_obj_info.id}</code>\n"
if chat_title is not None:
caption += f"<b>• 📰Titolo del {chat_type} :</b> {chat_title}\n"
if former_title is not None: # Meant is the very first title
caption += f"<b>• 🥇Titolo Originario:</b> {former_title}\n"
if username is not None:
caption += f"<b>• 🏷Tipo di {chat_type}:</b> Publico\n"
caption += f"<b>• 🖇Link:<b> {username}\n"
else:
caption += f"<b>• 🏷Tipo di {chat_type} :</b> Privato\n"
if creator_username is not None:
caption += f"<b>• 👑Creatore:</b> {creator_username}\n"
elif creator_valid:
caption += f"<b>• 👑Creatore:</b> <a href=\"tg://user?id={creator_id}\">{creator_firstname}</a>\n"
if created is not None:
caption += f"<b>• 🕐Creato:</b> <code>{created.date().strftime('%b %d, %Y')} - {created.time()}</code>\n"
else:
caption += f"<b>• 🕐Creato:</b> <code>{chat_obj_info.date.date().strftime('%b %d, %Y')} - {chat_obj_info.date.time()}</code> {warn_emoji}\n"
caption += f"<b>• 📡Data Center ID:</b> {dc_id}\n"
if exp_count is not None:
chat_level = int((1+sqrt(1+7*exp_count/14))/2)
caption += f"<b>• 🏁Livello del {chat_type}:</b> <code>{chat_level}</code>\n"
if messages_viewable is not None:
caption += f"<b>• ✉️Messaggi Visibili:</b> <code>{messages_viewable}</code>\n"
if messages_sent:
caption += f"<b>• 📨Messaggi inviati:</b> <code>{messages_sent}</code>\n"
elif messages_sent_alt:
caption += f"<b>• 📨Messaggi Inviati:</b> <code>{messages_sent_alt}</code> {warn_emoji}\n"
if members is not None:
caption += f"<b>• 👥Membri:</b> <code>{members}</code>\n"
if admins is not None:
caption += f"<b>• ⚜Amministratori:</b> <code>{admins}</code>\n"
if bots_list:
caption += f"<b>• 🤖Bot</b>: <code>{bots}</code>\n"
if members_online:
caption += f"<b>• 👥💡Membri Online al Momento:</b> <code>{members_online}</code>\n"
if restrcited_users is not None:
caption += f"<b>• 👥🚨Utenti Limitati:</b> <code>{restrcited_users}</code>\n"
if banned_users is not None:
caption += f"<b>• 👥🚷Utenti Bannati:</b> <code>{banned_users}</code>\n"
if group_stickers is not None:
caption += f"<b>• 🎨Sticker del {chat_type}:</b> <a href=\"t.me/addstickers/{chat.full_chat.stickerset.short_name}\">{group_stickers}</a>\n"
caption += "\n"
if not broadcast:
caption += f"<b>• 🐌Modalità Lenta:</b> {slowmode}"
if hasattr(chat_obj_info, "slowmode_enabled") and chat_obj_info.slowmode_enabled:
caption += f", <code>{slowmode_time}s</code>\n\n"
else:
caption += "\n\n"
if not broadcast:
caption += f"<b>🏆SuperGruppo:<b> {supergroup}\n\n"
if hasattr(chat_obj_info, "restricted"):
caption += f"<b>🚨Limitato:<b> {restricted}\n"
if chat_obj_info.restricted:
caption += f"<b>> 💻Piattaforma:<b> {chat_obj_info.restriction_reason[0].platform}\n"
caption += f"<b>> 📝Motivo:<b> {chat_obj_info.restriction_reason[0].reason}\n"
caption += f"<b>> 📖Testo:<b> {chat_obj_info.restriction_reason[0].text}\n\n"
else:
caption += "\n"
if hasattr(chat_obj_info, "scam") and chat_obj_info.scam:
caption += "<b>⚠️Scam:<b> <b>Si</b>\n\n"
if hasattr(chat_obj_info, "verified"):
caption += f"✅<b>Verificato da Telegram:<b> {verified}\n\n"
if description:
caption += f"<b>💬Descrizione:<b> \n<code>{description}</code>\n"
return caption
CMD_HELP.update({
"chatinfo":
".chatinfo [optional: <reply/tag/chat id/invite link>]\
\nUsage: Gets info of a chat. Some info might be limited due to missing permissions."
})
| true | true |
f73b7842ef9014e2395b7d6bb610683e124791d0 | 49,387 | py | Python | pandas/tests/internals/test_internals.py | developing-coder/pandas | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | [
"BSD-3-Clause"
] | 1 | 2019-05-04T03:42:25.000Z | 2019-05-04T03:42:25.000Z | pandas/tests/internals/test_internals.py | developing-coder/pandas | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/internals/test_internals.py | developing-coder/pandas | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | [
"BSD-3-Clause"
] | null | null | null | from collections import OrderedDict
from datetime import date, datetime
from distutils.version import LooseVersion
import itertools
import operator
import re
import sys
import numpy as np
import pytest
from pandas._libs.internals import BlockPlacement
from pandas.compat import lrange
import pandas as pd
from pandas import (
Categorical, DataFrame, DatetimeIndex, Index, MultiIndex, Series,
SparseArray)
import pandas.core.algorithms as algos
from pandas.core.arrays import DatetimeArray, TimedeltaArray
from pandas.core.internals import BlockManager, SingleBlockManager, make_block
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal, randn)
# in 3.6.1 a c-api slicing function changed, see src/compat_helper.h
PY361 = LooseVersion(sys.version) >= LooseVersion('3.6.1')
@pytest.fixture
def mgr():
return create_mgr(
'a: f8; b: object; c: f8; d: object; e: f8;'
'f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;'
'k: M8[ns, US/Eastern]; l: M8[ns, CET];')
def assert_block_equal(left, right):
tm.assert_numpy_array_equal(left.values, right.values)
assert left.dtype == right.dtype
assert isinstance(left.mgr_locs, BlockPlacement)
assert isinstance(right.mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(left.mgr_locs.as_array,
right.mgr_locs.as_array)
def get_numeric_mat(shape):
arr = np.arange(shape[0])
return np.lib.stride_tricks.as_strided(x=arr, shape=shape, strides=(
arr.itemsize, ) + (0, ) * (len(shape) - 1)).copy()
N = 10
def create_block(typestr, placement, item_shape=None, num_offset=0):
"""
Supported typestr:
* float, f8, f4, f2
* int, i8, i4, i2, i1
* uint, u8, u4, u2, u1
* complex, c16, c8
* bool
* object, string, O
* datetime, dt, M8[ns], M8[ns, tz]
* timedelta, td, m8[ns]
* sparse (SparseArray with fill_value=0.0)
* sparse_na (SparseArray with fill_value=np.nan)
* category, category2
"""
placement = BlockPlacement(placement)
num_items = len(placement)
if item_shape is None:
item_shape = (N, )
shape = (num_items, ) + item_shape
mat = get_numeric_mat(shape)
if typestr in ('float', 'f8', 'f4', 'f2', 'int', 'i8', 'i4', 'i2', 'i1',
'uint', 'u8', 'u4', 'u2', 'u1'):
values = mat.astype(typestr) + num_offset
elif typestr in ('complex', 'c16', 'c8'):
values = 1.j * (mat.astype(typestr) + num_offset)
elif typestr in ('object', 'string', 'O'):
values = np.reshape(['A%d' % i for i in mat.ravel() + num_offset],
shape)
elif typestr in ('b', 'bool', ):
values = np.ones(shape, dtype=np.bool_)
elif typestr in ('datetime', 'dt', 'M8[ns]'):
values = (mat * 1e9).astype('M8[ns]')
elif typestr.startswith('M8[ns'):
# datetime with tz
m = re.search(r'M8\[ns,\s*(\w+\/?\w*)\]', typestr)
assert m is not None, "incompatible typestr -> {0}".format(typestr)
tz = m.groups()[0]
assert num_items == 1, "must have only 1 num items for a tz-aware"
values = DatetimeIndex(np.arange(N) * 1e9, tz=tz)
elif typestr in ('timedelta', 'td', 'm8[ns]'):
values = (mat * 1).astype('m8[ns]')
elif typestr in ('category', ):
values = Categorical([1, 1, 2, 2, 3, 3, 3, 3, 4, 4])
elif typestr in ('category2', ):
values = Categorical(['a', 'a', 'a', 'a', 'b', 'b', 'c', 'c', 'c', 'd'
])
elif typestr in ('sparse', 'sparse_na'):
# FIXME: doesn't support num_rows != 10
assert shape[-1] == 10
assert all(s == 1 for s in shape[:-1])
if typestr.endswith('_na'):
fill_value = np.nan
else:
fill_value = 0.0
values = SparseArray([fill_value, fill_value, 1, 2, 3, fill_value,
4, 5, fill_value, 6], fill_value=fill_value)
arr = values.sp_values.view()
arr += (num_offset - 1)
else:
raise ValueError('Unsupported typestr: "%s"' % typestr)
return make_block(values, placement=placement, ndim=len(shape))
def create_single_mgr(typestr, num_rows=None):
if num_rows is None:
num_rows = N
return SingleBlockManager(
create_block(typestr, placement=slice(0, num_rows), item_shape=()),
np.arange(num_rows))
def create_mgr(descr, item_shape=None):
"""
Construct BlockManager from string description.
String description syntax looks similar to np.matrix initializer. It looks
like this::
a,b,c: f8; d,e,f: i8
Rules are rather simple:
* see list of supported datatypes in `create_block` method
* components are semicolon-separated
* each component is `NAME,NAME,NAME: DTYPE_ID`
* whitespace around colons & semicolons are removed
* components with same DTYPE_ID are combined into single block
* to force multiple blocks with same dtype, use '-SUFFIX'::
'a:f8-1; b:f8-2; c:f8-foobar'
"""
if item_shape is None:
item_shape = (N, )
offset = 0
mgr_items = []
block_placements = OrderedDict()
for d in descr.split(';'):
d = d.strip()
if not len(d):
continue
names, blockstr = d.partition(':')[::2]
blockstr = blockstr.strip()
names = names.strip().split(',')
mgr_items.extend(names)
placement = list(np.arange(len(names)) + offset)
try:
block_placements[blockstr].extend(placement)
except KeyError:
block_placements[blockstr] = placement
offset += len(names)
mgr_items = Index(mgr_items)
blocks = []
num_offset = 0
for blockstr, placement in block_placements.items():
typestr = blockstr.split('-')[0]
blocks.append(create_block(typestr,
placement,
item_shape=item_shape,
num_offset=num_offset, ))
num_offset += len(placement)
return BlockManager(sorted(blocks, key=lambda b: b.mgr_locs[0]),
[mgr_items] + [np.arange(n) for n in item_shape])
class TestBlock:
def setup_method(self, method):
# self.fblock = get_float_ex() # a,c,e
# self.cblock = get_complex_ex() #
# self.oblock = get_obj_ex()
# self.bool_block = get_bool_ex()
# self.int_block = get_int_ex()
self.fblock = create_block('float', [0, 2, 4])
self.cblock = create_block('complex', [7])
self.oblock = create_block('object', [1, 3])
self.bool_block = create_block('bool', [5])
self.int_block = create_block('int', [6])
def test_constructor(self):
int32block = create_block('i4', [0])
assert int32block.dtype == np.int32
def test_pickle(self):
def _check(blk):
assert_block_equal(tm.round_trip_pickle(blk), blk)
_check(self.fblock)
_check(self.cblock)
_check(self.oblock)
_check(self.bool_block)
def test_mgr_locs(self):
assert isinstance(self.fblock.mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(self.fblock.mgr_locs.as_array,
np.array([0, 2, 4], dtype=np.int64))
def test_attrs(self):
assert self.fblock.shape == self.fblock.values.shape
assert self.fblock.dtype == self.fblock.values.dtype
assert len(self.fblock) == len(self.fblock.values)
def test_merge(self):
avals = randn(2, 10)
bvals = randn(2, 10)
ref_cols = Index(['e', 'a', 'b', 'd', 'f'])
ablock = make_block(avals, ref_cols.get_indexer(['e', 'b']))
bblock = make_block(bvals, ref_cols.get_indexer(['a', 'd']))
merged = ablock.merge(bblock)
tm.assert_numpy_array_equal(merged.mgr_locs.as_array,
np.array([0, 1, 2, 3], dtype=np.int64))
tm.assert_numpy_array_equal(merged.values[[0, 2]], np.array(avals))
tm.assert_numpy_array_equal(merged.values[[1, 3]], np.array(bvals))
# TODO: merge with mixed type?
def test_copy(self):
cop = self.fblock.copy()
assert cop is not self.fblock
assert_block_equal(self.fblock, cop)
def test_reindex_index(self):
pass
def test_reindex_cast(self):
pass
def test_insert(self):
pass
def test_delete(self):
newb = self.fblock.copy()
newb.delete(0)
assert isinstance(newb.mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([2, 4], dtype=np.int64))
assert (newb.values[0] == 1).all()
newb = self.fblock.copy()
newb.delete(1)
assert isinstance(newb.mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([0, 4], dtype=np.int64))
assert (newb.values[1] == 2).all()
newb = self.fblock.copy()
newb.delete(2)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([0, 2], dtype=np.int64))
assert (newb.values[1] == 1).all()
newb = self.fblock.copy()
with pytest.raises(Exception):
newb.delete(3)
def test_make_block_same_class(self):
# issue 19431
block = create_block('M8[ns, US/Eastern]', [3])
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
block.make_block_same_class(block.values,
dtype=block.values.dtype)
class TestDatetimeBlock:
def test_try_coerce_arg(self):
block = create_block('datetime', [0])
# coerce None
none_coerced = block._try_coerce_args(block.values, None)[1]
assert pd.Timestamp(none_coerced) is pd.NaT
# coerce different types of date bojects
vals = (np.datetime64('2010-10-10'), datetime(2010, 10, 10),
date(2010, 10, 10))
for val in vals:
coerced = block._try_coerce_args(block.values, val)[1]
assert np.int64 == type(coerced)
assert pd.Timestamp('2010-10-10') == pd.Timestamp(coerced)
class TestBlockManager:
def test_constructor_corner(self):
pass
def test_attrs(self):
mgr = create_mgr('a,b,c: f8-1; d,e,f: f8-2')
assert mgr.nblocks == 2
assert len(mgr) == 6
def test_is_mixed_dtype(self):
assert not create_mgr('a,b:f8').is_mixed_type
assert not create_mgr('a:f8-1; b:f8-2').is_mixed_type
assert create_mgr('a,b:f8; c,d: f4').is_mixed_type
assert create_mgr('a,b:f8; c,d: object').is_mixed_type
def test_duplicate_ref_loc_failure(self):
tmp_mgr = create_mgr('a:bool; a: f8')
axes, blocks = tmp_mgr.axes, tmp_mgr.blocks
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([0])
# test trying to create block manager with overlapping ref locs
with pytest.raises(AssertionError):
BlockManager(blocks, axes)
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([1])
mgr = BlockManager(blocks, axes)
mgr.iget(1)
def test_contains(self, mgr):
assert 'a' in mgr
assert 'baz' not in mgr
def test_pickle(self, mgr):
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
# share ref_items
# assert mgr2.blocks[0].ref_items is mgr2.blocks[1].ref_items
# GH2431
assert hasattr(mgr2, "_is_consolidated")
assert hasattr(mgr2, "_known_consolidated")
# reset to False on load
assert not mgr2._is_consolidated
assert not mgr2._known_consolidated
def test_non_unique_pickle(self):
mgr = create_mgr('a,a,a:f8')
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
mgr = create_mgr('a: f8; a: i8')
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
def test_categorical_block_pickle(self):
mgr = create_mgr('a: category')
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
smgr = create_single_mgr('category')
smgr2 = tm.round_trip_pickle(smgr)
assert_series_equal(Series(smgr), Series(smgr2))
def test_get(self):
cols = Index(list('abc'))
values = np.random.rand(3, 3)
block = make_block(values=values.copy(), placement=np.arange(3))
mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)])
assert_almost_equal(mgr.get('a', fastpath=False), values[0])
assert_almost_equal(mgr.get('b', fastpath=False), values[1])
assert_almost_equal(mgr.get('c', fastpath=False), values[2])
assert_almost_equal(mgr.get('a').internal_values(), values[0])
assert_almost_equal(mgr.get('b').internal_values(), values[1])
assert_almost_equal(mgr.get('c').internal_values(), values[2])
def test_set(self):
mgr = create_mgr('a,b,c: int', item_shape=(3, ))
mgr.set('d', np.array(['foo'] * 3))
mgr.set('b', np.array(['bar'] * 3))
tm.assert_numpy_array_equal(mgr.get('a').internal_values(),
np.array([0] * 3))
tm.assert_numpy_array_equal(mgr.get('b').internal_values(),
np.array(['bar'] * 3, dtype=np.object_))
tm.assert_numpy_array_equal(mgr.get('c').internal_values(),
np.array([2] * 3))
tm.assert_numpy_array_equal(mgr.get('d').internal_values(),
np.array(['foo'] * 3, dtype=np.object_))
def test_set_change_dtype(self, mgr):
mgr.set('baz', np.zeros(N, dtype=bool))
mgr.set('baz', np.repeat('foo', N))
assert mgr.get('baz').dtype == np.object_
mgr2 = mgr.consolidate()
mgr2.set('baz', np.repeat('foo', N))
assert mgr2.get('baz').dtype == np.object_
mgr2.set('quux', randn(N).astype(int))
assert mgr2.get('quux').dtype == np.int_
mgr2.set('quux', randn(N))
assert mgr2.get('quux').dtype == np.float_
def test_set_change_dtype_slice(self): # GH8850
cols = MultiIndex.from_tuples([('1st', 'a'), ('2nd', 'b'), ('3rd', 'c')
])
df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols)
df['2nd'] = df['2nd'] * 2.0
blocks = df._to_dict_of_blocks()
assert sorted(blocks.keys()) == ['float64', 'int64']
assert_frame_equal(blocks['float64'], DataFrame(
[[1.0, 4.0], [4.0, 10.0]], columns=cols[:2]))
assert_frame_equal(blocks['int64'], DataFrame(
[[3], [6]], columns=cols[2:]))
def test_copy(self, mgr):
cp = mgr.copy(deep=False)
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
# view assertion
assert cp_blk.equals(blk)
if isinstance(blk.values, np.ndarray):
assert cp_blk.values.base is blk.values.base
else:
# DatetimeTZBlock has DatetimeIndex values
assert cp_blk.values._data.base is blk.values._data.base
cp = mgr.copy(deep=True)
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
# copy assertion we either have a None for a base or in case of
# some blocks it is an array (e.g. datetimetz), but was copied
assert cp_blk.equals(blk)
if not isinstance(cp_blk.values, np.ndarray):
assert cp_blk.values._data.base is not blk.values._data.base
else:
assert cp_blk.values.base is None and blk.values.base is None
def test_sparse(self):
mgr = create_mgr('a: sparse-1; b: sparse-2')
# what to test here?
assert mgr.as_array().dtype == np.float64
def test_sparse_mixed(self):
mgr = create_mgr('a: sparse-1; b: sparse-2; c: f8')
assert len(mgr.blocks) == 3
assert isinstance(mgr, BlockManager)
# what to test here?
def test_as_array_float(self):
mgr = create_mgr('c: f4; d: f2; e: f8')
assert mgr.as_array().dtype == np.float64
mgr = create_mgr('c: f4; d: f2')
assert mgr.as_array().dtype == np.float32
def test_as_array_int_bool(self):
mgr = create_mgr('a: bool-1; b: bool-2')
assert mgr.as_array().dtype == np.bool_
mgr = create_mgr('a: i8-1; b: i8-2; c: i4; d: i2; e: u1')
assert mgr.as_array().dtype == np.int64
mgr = create_mgr('c: i4; d: i2; e: u1')
assert mgr.as_array().dtype == np.int32
def test_as_array_datetime(self):
mgr = create_mgr('h: datetime-1; g: datetime-2')
assert mgr.as_array().dtype == 'M8[ns]'
def test_as_array_datetime_tz(self):
mgr = create_mgr('h: M8[ns, US/Eastern]; g: M8[ns, CET]')
assert mgr.get('h').dtype == 'datetime64[ns, US/Eastern]'
assert mgr.get('g').dtype == 'datetime64[ns, CET]'
assert mgr.as_array().dtype == 'object'
def test_astype(self):
# coerce all
mgr = create_mgr('c: f4; d: f2; e: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t)
assert tmgr.get('c').dtype.type == t
assert tmgr.get('d').dtype.type == t
assert tmgr.get('e').dtype.type == t
# mixed
mgr = create_mgr('a,b: object; c: bool; d: datetime;'
'e: f4; f: f2; g: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t, errors='ignore')
assert tmgr.get('c').dtype.type == t
assert tmgr.get('e').dtype.type == t
assert tmgr.get('f').dtype.type == t
assert tmgr.get('g').dtype.type == t
assert tmgr.get('a').dtype.type == np.object_
assert tmgr.get('b').dtype.type == np.object_
if t != np.int64:
assert tmgr.get('d').dtype.type == np.datetime64
else:
assert tmgr.get('d').dtype.type == t
def test_convert(self):
def _compare(old_mgr, new_mgr):
""" compare the blocks, numeric compare ==, object don't """
old_blocks = set(old_mgr.blocks)
new_blocks = set(new_mgr.blocks)
assert len(old_blocks) == len(new_blocks)
# compare non-numeric
for b in old_blocks:
found = False
for nb in new_blocks:
if (b.values == nb.values).all():
found = True
break
assert found
for b in new_blocks:
found = False
for ob in old_blocks:
if (b.values == ob.values).all():
found = True
break
assert found
# noops
mgr = create_mgr('f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
mgr = create_mgr('a, b: object; f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
# convert
mgr = create_mgr('a,b,foo: object; f: i8; g: f8')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
assert new_mgr.get('a').dtype == np.int64
assert new_mgr.get('b').dtype == np.float64
assert new_mgr.get('foo').dtype == np.object_
assert new_mgr.get('f').dtype == np.int64
assert new_mgr.get('g').dtype == np.float64
mgr = create_mgr('a,b,foo: object; f: i4; bool: bool; dt: datetime;'
'i: i8; g: f8; h: f2')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
assert new_mgr.get('a').dtype == np.int64
assert new_mgr.get('b').dtype == np.float64
assert new_mgr.get('foo').dtype == np.object_
assert new_mgr.get('f').dtype == np.int32
assert new_mgr.get('bool').dtype == np.bool_
assert new_mgr.get('dt').dtype.type, np.datetime64
assert new_mgr.get('i').dtype == np.int64
assert new_mgr.get('g').dtype == np.float64
assert new_mgr.get('h').dtype == np.float16
def test_interleave(self):
# self
for dtype in ['f8', 'i8', 'object', 'bool', 'complex', 'M8[ns]',
'm8[ns]']:
mgr = create_mgr('a: {0}'.format(dtype))
assert mgr.as_array().dtype == dtype
mgr = create_mgr('a: {0}; b: {0}'.format(dtype))
assert mgr.as_array().dtype == dtype
# will be converted according the actual dtype of the underlying
mgr = create_mgr('a: category')
assert mgr.as_array().dtype == 'i8'
mgr = create_mgr('a: category; b: category')
assert mgr.as_array().dtype == 'i8'
mgr = create_mgr('a: category; b: category2')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: category2')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: category2; b: category2')
assert mgr.as_array().dtype == 'object'
# combinations
mgr = create_mgr('a: f8')
assert mgr.as_array().dtype == 'f8'
mgr = create_mgr('a: f8; b: i8')
assert mgr.as_array().dtype == 'f8'
mgr = create_mgr('a: f4; b: i8')
assert mgr.as_array().dtype == 'f8'
mgr = create_mgr('a: f4; b: i8; d: object')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: bool; b: i8')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: complex')
assert mgr.as_array().dtype == 'complex'
mgr = create_mgr('a: f8; b: category')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: category')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: bool')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: i8')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: m8[ns]; b: bool')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: m8[ns]; b: i8')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: m8[ns]')
assert mgr.as_array().dtype == 'object'
def test_interleave_non_unique_cols(self):
df = DataFrame([
[pd.Timestamp('20130101'), 3.5],
[pd.Timestamp('20130102'), 4.5]],
columns=['x', 'x'],
index=[1, 2])
df_unique = df.copy()
df_unique.columns = ['x', 'y']
assert df_unique.values.shape == df.values.shape
tm.assert_numpy_array_equal(df_unique.values[0], df.values[0])
tm.assert_numpy_array_equal(df_unique.values[1], df.values[1])
def test_consolidate(self):
pass
def test_consolidate_ordering_issues(self, mgr):
mgr.set('f', randn(N))
mgr.set('d', randn(N))
mgr.set('b', randn(N))
mgr.set('g', randn(N))
mgr.set('h', randn(N))
# we have datetime/tz blocks in mgr
cons = mgr.consolidate()
assert cons.nblocks == 4
cons = mgr.consolidate().get_numeric_data()
assert cons.nblocks == 1
assert isinstance(cons.blocks[0].mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(cons.blocks[0].mgr_locs.as_array,
np.arange(len(cons.items), dtype=np.int64))
def test_reindex_index(self):
pass
def test_reindex_items(self):
# mgr is not consolidated, f8 & f8-2 blocks
mgr = create_mgr('a: f8; b: i8; c: f8; d: i8; e: f8;'
'f: bool; g: f8-2')
reindexed = mgr.reindex_axis(['g', 'c', 'a', 'd'], axis=0)
assert reindexed.nblocks == 2
tm.assert_index_equal(reindexed.items, pd.Index(['g', 'c', 'a', 'd']))
assert_almost_equal(
mgr.get('g', fastpath=False), reindexed.get('g', fastpath=False))
assert_almost_equal(
mgr.get('c', fastpath=False), reindexed.get('c', fastpath=False))
assert_almost_equal(
mgr.get('a', fastpath=False), reindexed.get('a', fastpath=False))
assert_almost_equal(
mgr.get('d', fastpath=False), reindexed.get('d', fastpath=False))
assert_almost_equal(
mgr.get('g').internal_values(),
reindexed.get('g').internal_values())
assert_almost_equal(
mgr.get('c').internal_values(),
reindexed.get('c').internal_values())
assert_almost_equal(
mgr.get('a').internal_values(),
reindexed.get('a').internal_values())
assert_almost_equal(
mgr.get('d').internal_values(),
reindexed.get('d').internal_values())
def test_multiindex_xs(self):
mgr = create_mgr('a,b,c: f8; d,e,f: i8')
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
mgr.set_axis(1, index)
result = mgr.xs('bar', axis=1)
assert result.shape == (6, 2)
assert result.axes[1][0] == ('bar', 'one')
assert result.axes[1][1] == ('bar', 'two')
def test_get_numeric_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
'str: object; bool: bool; obj: object; dt: datetime',
item_shape=(3, ))
mgr.set('obj', np.array([1, 2, 3], dtype=np.object_))
numeric = mgr.get_numeric_data()
tm.assert_index_equal(numeric.items,
pd.Index(['int', 'float', 'complex', 'bool']))
assert_almost_equal(
mgr.get('float', fastpath=False), numeric.get('float',
fastpath=False))
assert_almost_equal(
mgr.get('float').internal_values(),
numeric.get('float').internal_values())
# Check sharing
numeric.set('float', np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float', fastpath=False), np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float').internal_values(), np.array([100., 200., 300.]))
numeric2 = mgr.get_numeric_data(copy=True)
tm.assert_index_equal(numeric.items,
pd.Index(['int', 'float', 'complex', 'bool']))
numeric2.set('float', np.array([1000., 2000., 3000.]))
assert_almost_equal(
mgr.get('float', fastpath=False), np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float').internal_values(), np.array([100., 200., 300.]))
def test_get_bool_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
'str: object; bool: bool; obj: object; dt: datetime',
item_shape=(3, ))
mgr.set('obj', np.array([True, False, True], dtype=np.object_))
bools = mgr.get_bool_data()
tm.assert_index_equal(bools.items, pd.Index(['bool']))
assert_almost_equal(mgr.get('bool', fastpath=False),
bools.get('bool', fastpath=False))
assert_almost_equal(
mgr.get('bool').internal_values(),
bools.get('bool').internal_values())
bools.set('bool', np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),
np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),
np.array([True, False, True]))
# Check sharing
bools2 = mgr.get_bool_data(copy=True)
bools2.set('bool', np.array([False, True, False]))
tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),
np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),
np.array([True, False, True]))
def test_unicode_repr_doesnt_raise(self):
repr(create_mgr('b,\u05d0: object'))
def test_missing_unicode_key(self):
df = DataFrame({"a": [1]})
try:
df.loc[:, "\u05d0"] # should not raise UnicodeEncodeError
except KeyError:
pass # this is the expected exception
def test_equals(self):
# unique items
bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
assert bm1.equals(bm2)
bm1 = create_mgr('a,a,a: i8-1; b,b,b: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
assert bm1.equals(bm2)
def test_equals_block_order_different_dtypes(self):
# GH 9330
mgr_strings = [
"a:i8;b:f8", # basic case
"a:i8;b:f8;c:c8;d:b", # many types
"a:i8;e:dt;f:td;g:string", # more types
"a:i8;b:category;c:category2;d:category2", # categories
"c:sparse;d:sparse_na;b:f8", # sparse
]
for mgr_string in mgr_strings:
bm = create_mgr(mgr_string)
block_perms = itertools.permutations(bm.blocks)
for bm_perm in block_perms:
bm_this = BlockManager(bm_perm, bm.axes)
assert bm.equals(bm_this)
assert bm_this.equals(bm)
def test_single_mgr_ctor(self):
mgr = create_single_mgr('f8', num_rows=5)
assert mgr.as_array().tolist() == [0., 1., 2., 3., 4.]
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')
for value in invalid_values:
with pytest.raises(ValueError):
bm1.replace_list([1], [2], inplace=value)
class TestIndexing:
# Nosetests-style data-driven tests.
#
# This test applies different indexing routines to block managers and
# compares the outcome to the result of same operations on np.ndarray.
#
# NOTE: sparse (SparseBlock with fill_value != np.nan) fail a lot of tests
# and are disabled.
MANAGERS = [
create_single_mgr('f8', N),
create_single_mgr('i8', N),
# 2-dim
create_mgr('a,b,c,d,e,f: f8', item_shape=(N,)),
create_mgr('a,b,c,d,e,f: i8', item_shape=(N,)),
create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N,)),
create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N,)),
# 3-dim
create_mgr('a,b,c,d,e,f: f8', item_shape=(N, N)),
create_mgr('a,b,c,d,e,f: i8', item_shape=(N, N)),
create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N, N)),
create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N, N)),
]
# MANAGERS = [MANAGERS[6]]
def test_get_slice(self):
def assert_slice_ok(mgr, axis, slobj):
mat = mgr.as_array()
# we maybe using an ndarray to test slicing and
# might not be the full length of the axis
if isinstance(slobj, np.ndarray):
ax = mgr.axes[axis]
if len(ax) and len(slobj) and len(slobj) != len(ax):
slobj = np.concatenate([slobj, np.zeros(
len(ax) - len(slobj), dtype=bool)])
sliced = mgr.get_slice(slobj, axis=axis)
mat_slobj = (slice(None), ) * axis + (slobj, )
tm.assert_numpy_array_equal(mat[mat_slobj], sliced.as_array(),
check_dtype=False)
tm.assert_index_equal(mgr.axes[axis][slobj], sliced.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# slice
assert_slice_ok(mgr, ax, slice(None))
assert_slice_ok(mgr, ax, slice(3))
assert_slice_ok(mgr, ax, slice(100))
assert_slice_ok(mgr, ax, slice(1, 4))
assert_slice_ok(mgr, ax, slice(3, 0, -2))
# boolean mask
assert_slice_ok(
mgr, ax, np.array([], dtype=np.bool_))
assert_slice_ok(
mgr, ax,
np.ones(mgr.shape[ax], dtype=np.bool_))
assert_slice_ok(
mgr, ax,
np.zeros(mgr.shape[ax], dtype=np.bool_))
if mgr.shape[ax] >= 3:
assert_slice_ok(
mgr, ax,
np.arange(mgr.shape[ax]) % 3 == 0)
assert_slice_ok(
mgr, ax, np.array(
[True, True, False], dtype=np.bool_))
# fancy indexer
assert_slice_ok(mgr, ax, [])
assert_slice_ok(mgr, ax, lrange(mgr.shape[ax]))
if mgr.shape[ax] >= 3:
assert_slice_ok(mgr, ax, [0, 1, 2])
assert_slice_ok(mgr, ax, [-1, -2, -3])
def test_take(self):
def assert_take_ok(mgr, axis, indexer):
mat = mgr.as_array()
taken = mgr.take(indexer, axis)
tm.assert_numpy_array_equal(np.take(mat, indexer, axis),
taken.as_array(), check_dtype=False)
tm.assert_index_equal(mgr.axes[axis].take(indexer),
taken.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# take/fancy indexer
assert_take_ok(mgr, ax, [])
assert_take_ok(mgr, ax, [0, 0, 0])
assert_take_ok(mgr, ax, lrange(mgr.shape[ax]))
if mgr.shape[ax] >= 3:
assert_take_ok(mgr, ax, [0, 1, 2])
assert_take_ok(mgr, ax, [-1, -2, -3])
def test_reindex_axis(self):
def assert_reindex_axis_is_ok(mgr, axis, new_labels, fill_value):
mat = mgr.as_array()
indexer = mgr.axes[axis].get_indexer_for(new_labels)
reindexed = mgr.reindex_axis(new_labels, axis,
fill_value=fill_value)
tm.assert_numpy_array_equal(algos.take_nd(mat, indexer, axis,
fill_value=fill_value),
reindexed.as_array(),
check_dtype=False)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
assert_reindex_axis_is_ok(
mgr, ax,
pd.Index([]), fill_value)
assert_reindex_axis_is_ok(
mgr, ax, mgr.axes[ax],
fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][[0, 0, 0]], fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']), fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
pd.Index(['foo', mgr.axes[ax][0], 'baz']),
fill_value)
if mgr.shape[ax] >= 3:
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][:-3], fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][-3::-1], fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value)
def test_reindex_indexer(self):
def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer,
fill_value):
mat = mgr.as_array()
reindexed_mat = algos.take_nd(mat, indexer, axis,
fill_value=fill_value)
reindexed = mgr.reindex_indexer(new_labels, indexer, axis,
fill_value=fill_value)
tm.assert_numpy_array_equal(reindexed_mat,
reindexed.as_array(),
check_dtype=False)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index([]), [], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo'] * mgr.shape[ax]),
np.arange(mgr.shape[ax]), fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
mgr.axes[ax][::-1], np.arange(mgr.shape[ax]),
fill_value)
assert_reindex_indexer_is_ok(
mgr, ax, mgr.axes[ax],
np.arange(mgr.shape[ax])[::-1], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[0, 0, 0], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[-1, 0, -1], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', mgr.axes[ax][0], 'baz']),
[-1, -1, -1], fill_value)
if mgr.shape[ax] >= 3:
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[0, 1, 2], fill_value)
# test_get_slice(slice_like, axis)
# take(indexer, axis)
# reindex_axis(new_labels, axis)
# reindex_indexer(new_labels, indexer, axis)
class TestBlockPlacement:
def test_slice_len(self):
assert len(BlockPlacement(slice(0, 4))) == 4
assert len(BlockPlacement(slice(0, 4, 2))) == 2
assert len(BlockPlacement(slice(0, 3, 2))) == 2
assert len(BlockPlacement(slice(0, 1, 2))) == 1
assert len(BlockPlacement(slice(1, 0, -1))) == 1
def test_zero_step_raises(self):
with pytest.raises(ValueError):
BlockPlacement(slice(1, 1, 0))
with pytest.raises(ValueError):
BlockPlacement(slice(1, 2, 0))
def test_unbounded_slice_raises(self):
def assert_unbounded_slice_error(slc):
with pytest.raises(ValueError, match="unbounded slice"):
BlockPlacement(slc)
assert_unbounded_slice_error(slice(None, None))
assert_unbounded_slice_error(slice(10, None))
assert_unbounded_slice_error(slice(None, None, -1))
assert_unbounded_slice_error(slice(None, 10, -1))
# These are "unbounded" because negative index will change depending on
# container shape.
assert_unbounded_slice_error(slice(-1, None))
assert_unbounded_slice_error(slice(None, -1))
assert_unbounded_slice_error(slice(-1, -1))
assert_unbounded_slice_error(slice(-1, None, -1))
assert_unbounded_slice_error(slice(None, -1, -1))
assert_unbounded_slice_error(slice(-1, -1, -1))
def test_not_slice_like_slices(self):
def assert_not_slice_like(slc):
assert not BlockPlacement(slc).is_slice_like
assert_not_slice_like(slice(0, 0))
assert_not_slice_like(slice(100, 0))
assert_not_slice_like(slice(100, 100, -1))
assert_not_slice_like(slice(0, 100, -1))
assert not BlockPlacement(slice(0, 0)).is_slice_like
assert not BlockPlacement(slice(100, 100)).is_slice_like
def test_array_to_slice_conversion(self):
def assert_as_slice_equals(arr, slc):
assert BlockPlacement(arr).as_slice == slc
assert_as_slice_equals([0], slice(0, 1, 1))
assert_as_slice_equals([100], slice(100, 101, 1))
assert_as_slice_equals([0, 1, 2], slice(0, 3, 1))
assert_as_slice_equals([0, 5, 10], slice(0, 15, 5))
assert_as_slice_equals([0, 100], slice(0, 200, 100))
assert_as_slice_equals([2, 1], slice(2, 0, -1))
if not PY361:
assert_as_slice_equals([2, 1, 0], slice(2, None, -1))
assert_as_slice_equals([100, 0], slice(100, None, -100))
def test_not_slice_like_arrays(self):
def assert_not_slice_like(arr):
assert not BlockPlacement(arr).is_slice_like
assert_not_slice_like([])
assert_not_slice_like([-1])
assert_not_slice_like([-1, -2, -3])
assert_not_slice_like([-10])
assert_not_slice_like([-1])
assert_not_slice_like([-1, 0, 1, 2])
assert_not_slice_like([-2, 0, 2, 4])
assert_not_slice_like([1, 0, -1])
assert_not_slice_like([1, 1, 1])
def test_slice_iter(self):
assert list(BlockPlacement(slice(0, 3))) == [0, 1, 2]
assert list(BlockPlacement(slice(0, 0))) == []
assert list(BlockPlacement(slice(3, 0))) == []
if not PY361:
assert list(BlockPlacement(slice(3, 0, -1))) == [3, 2, 1]
assert list(BlockPlacement(slice(3, None, -1))) == [3, 2, 1, 0]
def test_slice_to_array_conversion(self):
def assert_as_array_equals(slc, asarray):
tm.assert_numpy_array_equal(
BlockPlacement(slc).as_array,
np.asarray(asarray, dtype=np.int64))
assert_as_array_equals(slice(0, 3), [0, 1, 2])
assert_as_array_equals(slice(0, 0), [])
assert_as_array_equals(slice(3, 0), [])
assert_as_array_equals(slice(3, 0, -1), [3, 2, 1])
if not PY361:
assert_as_array_equals(slice(3, None, -1), [3, 2, 1, 0])
assert_as_array_equals(slice(31, None, -10), [31, 21, 11, 1])
def test_blockplacement_add(self):
bpl = BlockPlacement(slice(0, 5))
assert bpl.add(1).as_slice == slice(1, 6, 1)
assert bpl.add(np.arange(5)).as_slice == slice(0, 10, 2)
assert list(bpl.add(np.arange(5, 0, -1))) == [5, 5, 5, 5, 5]
def test_blockplacement_add_int(self):
def assert_add_equals(val, inc, result):
assert list(BlockPlacement(val).add(inc)) == result
assert_add_equals(slice(0, 0), 0, [])
assert_add_equals(slice(1, 4), 0, [1, 2, 3])
assert_add_equals(slice(3, 0, -1), 0, [3, 2, 1])
assert_add_equals([1, 2, 4], 0, [1, 2, 4])
assert_add_equals(slice(0, 0), 10, [])
assert_add_equals(slice(1, 4), 10, [11, 12, 13])
assert_add_equals(slice(3, 0, -1), 10, [13, 12, 11])
assert_add_equals([1, 2, 4], 10, [11, 12, 14])
assert_add_equals(slice(0, 0), -1, [])
assert_add_equals(slice(1, 4), -1, [0, 1, 2])
assert_add_equals([1, 2, 4], -1, [0, 1, 3])
with pytest.raises(ValueError):
BlockPlacement(slice(1, 4)).add(-10)
with pytest.raises(ValueError):
BlockPlacement([1, 2, 4]).add(-10)
if not PY361:
assert_add_equals(slice(3, 0, -1), -1, [2, 1, 0])
assert_add_equals(slice(2, None, -1), 0, [2, 1, 0])
assert_add_equals(slice(2, None, -1), 10, [12, 11, 10])
with pytest.raises(ValueError):
BlockPlacement(slice(2, None, -1)).add(-1)
class DummyElement:
def __init__(self, value, dtype):
self.value = value
self.dtype = np.dtype(dtype)
def __array__(self):
return np.array(self.value, dtype=self.dtype)
def __str__(self):
return "DummyElement({}, {})".format(self.value, self.dtype)
def __repr__(self):
return str(self)
def astype(self, dtype, copy=False):
self.dtype = dtype
return self
def view(self, dtype):
return type(self)(self.value.view(dtype), dtype)
def any(self, axis=None):
return bool(self.value)
class TestCanHoldElement:
@pytest.mark.parametrize('value, dtype', [
(1, 'i8'),
(1.0, 'f8'),
(2**63, 'f8'),
(1j, 'complex128'),
(2**63, 'complex128'),
(True, 'bool'),
(np.timedelta64(20, 'ns'), '<m8[ns]'),
(np.datetime64(20, 'ns'), '<M8[ns]'),
])
@pytest.mark.parametrize('op', [
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.mod,
operator.pow,
], ids=lambda x: x.__name__)
def test_binop_other(self, op, value, dtype):
skip = {(operator.add, 'bool'),
(operator.sub, 'bool'),
(operator.mul, 'bool'),
(operator.truediv, 'bool'),
(operator.mod, 'i8'),
(operator.mod, 'complex128'),
(operator.pow, 'bool')}
if (op, dtype) in skip:
pytest.skip("Invalid combination {},{}".format(op, dtype))
e = DummyElement(value, dtype)
s = pd.DataFrame({"A": [e.value, e.value]}, dtype=e.dtype)
invalid = {(operator.pow, '<M8[ns]'),
(operator.mod, '<M8[ns]'),
(operator.truediv, '<M8[ns]'),
(operator.mul, '<M8[ns]'),
(operator.add, '<M8[ns]'),
(operator.pow, '<m8[ns]'),
(operator.mul, '<m8[ns]')}
if (op, dtype) in invalid:
with pytest.raises(TypeError):
op(s, e.value)
else:
# FIXME: Since dispatching to Series, this test no longer
# asserts anything meaningful
result = op(s, e.value).dtypes
expected = op(s, value).dtypes
assert_series_equal(result, expected)
@pytest.mark.parametrize('typestr, holder', [
('category', Categorical),
('M8[ns]', DatetimeArray),
('M8[ns, US/Central]', DatetimeArray),
('m8[ns]', TimedeltaArray),
('sparse', SparseArray),
])
def test_holder(typestr, holder):
blk = create_block(typestr, [1])
assert blk._holder is holder
def test_deprecated_fastpath():
# GH#19265
values = np.random.rand(3, 3)
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
make_block(values, placement=np.arange(3), fastpath=True)
def test_validate_ndim():
values = np.array([1.0, 2.0])
placement = slice(2)
msg = r"Wrong number of dimensions. values.ndim != ndim \[1 != 2\]"
with pytest.raises(ValueError, match=msg):
make_block(values, placement, ndim=2)
def test_block_shape():
idx = pd.Index([0, 1, 2, 3, 4])
a = pd.Series([1, 2, 3]).reindex(idx)
b = pd.Series(pd.Categorical([1, 2, 3])).reindex(idx)
assert (a._data.blocks[0].mgr_locs.indexer ==
b._data.blocks[0].mgr_locs.indexer)
def test_make_block_no_pandas_array():
# https://github.com/pandas-dev/pandas/pull/24866
arr = pd.array([1, 2])
# PandasArray, no dtype
result = make_block(arr, slice(len(arr)))
assert result.is_integer is True
assert result.is_extension is False
# PandasArray, PandasDtype
result = make_block(arr, slice(len(arr)), dtype=arr.dtype)
assert result.is_integer is True
assert result.is_extension is False
# ndarray, PandasDtype
result = make_block(arr.to_numpy(), slice(len(arr)), dtype=arr.dtype)
assert result.is_integer is True
assert result.is_extension is False
| 37.585236 | 79 | 0.547978 | from collections import OrderedDict
from datetime import date, datetime
from distutils.version import LooseVersion
import itertools
import operator
import re
import sys
import numpy as np
import pytest
from pandas._libs.internals import BlockPlacement
from pandas.compat import lrange
import pandas as pd
from pandas import (
Categorical, DataFrame, DatetimeIndex, Index, MultiIndex, Series,
SparseArray)
import pandas.core.algorithms as algos
from pandas.core.arrays import DatetimeArray, TimedeltaArray
from pandas.core.internals import BlockManager, SingleBlockManager, make_block
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal, randn)
PY361 = LooseVersion(sys.version) >= LooseVersion('3.6.1')
@pytest.fixture
def mgr():
return create_mgr(
'a: f8; b: object; c: f8; d: object; e: f8;'
'f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;'
'k: M8[ns, US/Eastern]; l: M8[ns, CET];')
def assert_block_equal(left, right):
tm.assert_numpy_array_equal(left.values, right.values)
assert left.dtype == right.dtype
assert isinstance(left.mgr_locs, BlockPlacement)
assert isinstance(right.mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(left.mgr_locs.as_array,
right.mgr_locs.as_array)
def get_numeric_mat(shape):
arr = np.arange(shape[0])
return np.lib.stride_tricks.as_strided(x=arr, shape=shape, strides=(
arr.itemsize, ) + (0, ) * (len(shape) - 1)).copy()
N = 10
def create_block(typestr, placement, item_shape=None, num_offset=0):
placement = BlockPlacement(placement)
num_items = len(placement)
if item_shape is None:
item_shape = (N, )
shape = (num_items, ) + item_shape
mat = get_numeric_mat(shape)
if typestr in ('float', 'f8', 'f4', 'f2', 'int', 'i8', 'i4', 'i2', 'i1',
'uint', 'u8', 'u4', 'u2', 'u1'):
values = mat.astype(typestr) + num_offset
elif typestr in ('complex', 'c16', 'c8'):
values = 1.j * (mat.astype(typestr) + num_offset)
elif typestr in ('object', 'string', 'O'):
values = np.reshape(['A%d' % i for i in mat.ravel() + num_offset],
shape)
elif typestr in ('b', 'bool', ):
values = np.ones(shape, dtype=np.bool_)
elif typestr in ('datetime', 'dt', 'M8[ns]'):
values = (mat * 1e9).astype('M8[ns]')
elif typestr.startswith('M8[ns'):
m = re.search(r'M8\[ns,\s*(\w+\/?\w*)\]', typestr)
assert m is not None, "incompatible typestr -> {0}".format(typestr)
tz = m.groups()[0]
assert num_items == 1, "must have only 1 num items for a tz-aware"
values = DatetimeIndex(np.arange(N) * 1e9, tz=tz)
elif typestr in ('timedelta', 'td', 'm8[ns]'):
values = (mat * 1).astype('m8[ns]')
elif typestr in ('category', ):
values = Categorical([1, 1, 2, 2, 3, 3, 3, 3, 4, 4])
elif typestr in ('category2', ):
values = Categorical(['a', 'a', 'a', 'a', 'b', 'b', 'c', 'c', 'c', 'd'
])
elif typestr in ('sparse', 'sparse_na'):
assert shape[-1] == 10
assert all(s == 1 for s in shape[:-1])
if typestr.endswith('_na'):
fill_value = np.nan
else:
fill_value = 0.0
values = SparseArray([fill_value, fill_value, 1, 2, 3, fill_value,
4, 5, fill_value, 6], fill_value=fill_value)
arr = values.sp_values.view()
arr += (num_offset - 1)
else:
raise ValueError('Unsupported typestr: "%s"' % typestr)
return make_block(values, placement=placement, ndim=len(shape))
def create_single_mgr(typestr, num_rows=None):
if num_rows is None:
num_rows = N
return SingleBlockManager(
create_block(typestr, placement=slice(0, num_rows), item_shape=()),
np.arange(num_rows))
def create_mgr(descr, item_shape=None):
if item_shape is None:
item_shape = (N, )
offset = 0
mgr_items = []
block_placements = OrderedDict()
for d in descr.split(';'):
d = d.strip()
if not len(d):
continue
names, blockstr = d.partition(':')[::2]
blockstr = blockstr.strip()
names = names.strip().split(',')
mgr_items.extend(names)
placement = list(np.arange(len(names)) + offset)
try:
block_placements[blockstr].extend(placement)
except KeyError:
block_placements[blockstr] = placement
offset += len(names)
mgr_items = Index(mgr_items)
blocks = []
num_offset = 0
for blockstr, placement in block_placements.items():
typestr = blockstr.split('-')[0]
blocks.append(create_block(typestr,
placement,
item_shape=item_shape,
num_offset=num_offset, ))
num_offset += len(placement)
return BlockManager(sorted(blocks, key=lambda b: b.mgr_locs[0]),
[mgr_items] + [np.arange(n) for n in item_shape])
class TestBlock:
def setup_method(self, method):
# self.fblock = get_float_ex() # a,c,e
# self.cblock = get_complex_ex() #
# self.oblock = get_obj_ex()
# self.bool_block = get_bool_ex()
# self.int_block = get_int_ex()
self.fblock = create_block('float', [0, 2, 4])
self.cblock = create_block('complex', [7])
self.oblock = create_block('object', [1, 3])
self.bool_block = create_block('bool', [5])
self.int_block = create_block('int', [6])
def test_constructor(self):
int32block = create_block('i4', [0])
assert int32block.dtype == np.int32
def test_pickle(self):
def _check(blk):
assert_block_equal(tm.round_trip_pickle(blk), blk)
_check(self.fblock)
_check(self.cblock)
_check(self.oblock)
_check(self.bool_block)
def test_mgr_locs(self):
assert isinstance(self.fblock.mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(self.fblock.mgr_locs.as_array,
np.array([0, 2, 4], dtype=np.int64))
def test_attrs(self):
assert self.fblock.shape == self.fblock.values.shape
assert self.fblock.dtype == self.fblock.values.dtype
assert len(self.fblock) == len(self.fblock.values)
def test_merge(self):
avals = randn(2, 10)
bvals = randn(2, 10)
ref_cols = Index(['e', 'a', 'b', 'd', 'f'])
ablock = make_block(avals, ref_cols.get_indexer(['e', 'b']))
bblock = make_block(bvals, ref_cols.get_indexer(['a', 'd']))
merged = ablock.merge(bblock)
tm.assert_numpy_array_equal(merged.mgr_locs.as_array,
np.array([0, 1, 2, 3], dtype=np.int64))
tm.assert_numpy_array_equal(merged.values[[0, 2]], np.array(avals))
tm.assert_numpy_array_equal(merged.values[[1, 3]], np.array(bvals))
# TODO: merge with mixed type?
def test_copy(self):
cop = self.fblock.copy()
assert cop is not self.fblock
assert_block_equal(self.fblock, cop)
def test_reindex_index(self):
pass
def test_reindex_cast(self):
pass
def test_insert(self):
pass
def test_delete(self):
newb = self.fblock.copy()
newb.delete(0)
assert isinstance(newb.mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([2, 4], dtype=np.int64))
assert (newb.values[0] == 1).all()
newb = self.fblock.copy()
newb.delete(1)
assert isinstance(newb.mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([0, 4], dtype=np.int64))
assert (newb.values[1] == 2).all()
newb = self.fblock.copy()
newb.delete(2)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([0, 2], dtype=np.int64))
assert (newb.values[1] == 1).all()
newb = self.fblock.copy()
with pytest.raises(Exception):
newb.delete(3)
def test_make_block_same_class(self):
# issue 19431
block = create_block('M8[ns, US/Eastern]', [3])
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
block.make_block_same_class(block.values,
dtype=block.values.dtype)
class TestDatetimeBlock:
def test_try_coerce_arg(self):
block = create_block('datetime', [0])
# coerce None
none_coerced = block._try_coerce_args(block.values, None)[1]
assert pd.Timestamp(none_coerced) is pd.NaT
# coerce different types of date bojects
vals = (np.datetime64('2010-10-10'), datetime(2010, 10, 10),
date(2010, 10, 10))
for val in vals:
coerced = block._try_coerce_args(block.values, val)[1]
assert np.int64 == type(coerced)
assert pd.Timestamp('2010-10-10') == pd.Timestamp(coerced)
class TestBlockManager:
def test_constructor_corner(self):
pass
def test_attrs(self):
mgr = create_mgr('a,b,c: f8-1; d,e,f: f8-2')
assert mgr.nblocks == 2
assert len(mgr) == 6
def test_is_mixed_dtype(self):
assert not create_mgr('a,b:f8').is_mixed_type
assert not create_mgr('a:f8-1; b:f8-2').is_mixed_type
assert create_mgr('a,b:f8; c,d: f4').is_mixed_type
assert create_mgr('a,b:f8; c,d: object').is_mixed_type
def test_duplicate_ref_loc_failure(self):
tmp_mgr = create_mgr('a:bool; a: f8')
axes, blocks = tmp_mgr.axes, tmp_mgr.blocks
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([0])
# test trying to create block manager with overlapping ref locs
with pytest.raises(AssertionError):
BlockManager(blocks, axes)
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([1])
mgr = BlockManager(blocks, axes)
mgr.iget(1)
def test_contains(self, mgr):
assert 'a' in mgr
assert 'baz' not in mgr
def test_pickle(self, mgr):
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
# share ref_items
# assert mgr2.blocks[0].ref_items is mgr2.blocks[1].ref_items
# GH2431
assert hasattr(mgr2, "_is_consolidated")
assert hasattr(mgr2, "_known_consolidated")
# reset to False on load
assert not mgr2._is_consolidated
assert not mgr2._known_consolidated
def test_non_unique_pickle(self):
mgr = create_mgr('a,a,a:f8')
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
mgr = create_mgr('a: f8; a: i8')
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
def test_categorical_block_pickle(self):
mgr = create_mgr('a: category')
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
smgr = create_single_mgr('category')
smgr2 = tm.round_trip_pickle(smgr)
assert_series_equal(Series(smgr), Series(smgr2))
def test_get(self):
cols = Index(list('abc'))
values = np.random.rand(3, 3)
block = make_block(values=values.copy(), placement=np.arange(3))
mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)])
assert_almost_equal(mgr.get('a', fastpath=False), values[0])
assert_almost_equal(mgr.get('b', fastpath=False), values[1])
assert_almost_equal(mgr.get('c', fastpath=False), values[2])
assert_almost_equal(mgr.get('a').internal_values(), values[0])
assert_almost_equal(mgr.get('b').internal_values(), values[1])
assert_almost_equal(mgr.get('c').internal_values(), values[2])
def test_set(self):
mgr = create_mgr('a,b,c: int', item_shape=(3, ))
mgr.set('d', np.array(['foo'] * 3))
mgr.set('b', np.array(['bar'] * 3))
tm.assert_numpy_array_equal(mgr.get('a').internal_values(),
np.array([0] * 3))
tm.assert_numpy_array_equal(mgr.get('b').internal_values(),
np.array(['bar'] * 3, dtype=np.object_))
tm.assert_numpy_array_equal(mgr.get('c').internal_values(),
np.array([2] * 3))
tm.assert_numpy_array_equal(mgr.get('d').internal_values(),
np.array(['foo'] * 3, dtype=np.object_))
def test_set_change_dtype(self, mgr):
mgr.set('baz', np.zeros(N, dtype=bool))
mgr.set('baz', np.repeat('foo', N))
assert mgr.get('baz').dtype == np.object_
mgr2 = mgr.consolidate()
mgr2.set('baz', np.repeat('foo', N))
assert mgr2.get('baz').dtype == np.object_
mgr2.set('quux', randn(N).astype(int))
assert mgr2.get('quux').dtype == np.int_
mgr2.set('quux', randn(N))
assert mgr2.get('quux').dtype == np.float_
def test_set_change_dtype_slice(self): # GH8850
cols = MultiIndex.from_tuples([('1st', 'a'), ('2nd', 'b'), ('3rd', 'c')
])
df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols)
df['2nd'] = df['2nd'] * 2.0
blocks = df._to_dict_of_blocks()
assert sorted(blocks.keys()) == ['float64', 'int64']
assert_frame_equal(blocks['float64'], DataFrame(
[[1.0, 4.0], [4.0, 10.0]], columns=cols[:2]))
assert_frame_equal(blocks['int64'], DataFrame(
[[3], [6]], columns=cols[2:]))
def test_copy(self, mgr):
cp = mgr.copy(deep=False)
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
# view assertion
assert cp_blk.equals(blk)
if isinstance(blk.values, np.ndarray):
assert cp_blk.values.base is blk.values.base
else:
# DatetimeTZBlock has DatetimeIndex values
assert cp_blk.values._data.base is blk.values._data.base
cp = mgr.copy(deep=True)
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
# copy assertion we either have a None for a base or in case of
# some blocks it is an array (e.g. datetimetz), but was copied
assert cp_blk.equals(blk)
if not isinstance(cp_blk.values, np.ndarray):
assert cp_blk.values._data.base is not blk.values._data.base
else:
assert cp_blk.values.base is None and blk.values.base is None
def test_sparse(self):
mgr = create_mgr('a: sparse-1; b: sparse-2')
# what to test here?
assert mgr.as_array().dtype == np.float64
def test_sparse_mixed(self):
mgr = create_mgr('a: sparse-1; b: sparse-2; c: f8')
assert len(mgr.blocks) == 3
assert isinstance(mgr, BlockManager)
# what to test here?
def test_as_array_float(self):
mgr = create_mgr('c: f4; d: f2; e: f8')
assert mgr.as_array().dtype == np.float64
mgr = create_mgr('c: f4; d: f2')
assert mgr.as_array().dtype == np.float32
def test_as_array_int_bool(self):
mgr = create_mgr('a: bool-1; b: bool-2')
assert mgr.as_array().dtype == np.bool_
mgr = create_mgr('a: i8-1; b: i8-2; c: i4; d: i2; e: u1')
assert mgr.as_array().dtype == np.int64
mgr = create_mgr('c: i4; d: i2; e: u1')
assert mgr.as_array().dtype == np.int32
def test_as_array_datetime(self):
mgr = create_mgr('h: datetime-1; g: datetime-2')
assert mgr.as_array().dtype == 'M8[ns]'
def test_as_array_datetime_tz(self):
mgr = create_mgr('h: M8[ns, US/Eastern]; g: M8[ns, CET]')
assert mgr.get('h').dtype == 'datetime64[ns, US/Eastern]'
assert mgr.get('g').dtype == 'datetime64[ns, CET]'
assert mgr.as_array().dtype == 'object'
def test_astype(self):
# coerce all
mgr = create_mgr('c: f4; d: f2; e: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t)
assert tmgr.get('c').dtype.type == t
assert tmgr.get('d').dtype.type == t
assert tmgr.get('e').dtype.type == t
# mixed
mgr = create_mgr('a,b: object; c: bool; d: datetime;'
'e: f4; f: f2; g: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t, errors='ignore')
assert tmgr.get('c').dtype.type == t
assert tmgr.get('e').dtype.type == t
assert tmgr.get('f').dtype.type == t
assert tmgr.get('g').dtype.type == t
assert tmgr.get('a').dtype.type == np.object_
assert tmgr.get('b').dtype.type == np.object_
if t != np.int64:
assert tmgr.get('d').dtype.type == np.datetime64
else:
assert tmgr.get('d').dtype.type == t
def test_convert(self):
def _compare(old_mgr, new_mgr):
old_blocks = set(old_mgr.blocks)
new_blocks = set(new_mgr.blocks)
assert len(old_blocks) == len(new_blocks)
# compare non-numeric
for b in old_blocks:
found = False
for nb in new_blocks:
if (b.values == nb.values).all():
found = True
break
assert found
for b in new_blocks:
found = False
for ob in old_blocks:
if (b.values == ob.values).all():
found = True
break
assert found
# noops
mgr = create_mgr('f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
mgr = create_mgr('a, b: object; f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
# convert
mgr = create_mgr('a,b,foo: object; f: i8; g: f8')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
assert new_mgr.get('a').dtype == np.int64
assert new_mgr.get('b').dtype == np.float64
assert new_mgr.get('foo').dtype == np.object_
assert new_mgr.get('f').dtype == np.int64
assert new_mgr.get('g').dtype == np.float64
mgr = create_mgr('a,b,foo: object; f: i4; bool: bool; dt: datetime;'
'i: i8; g: f8; h: f2')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
assert new_mgr.get('a').dtype == np.int64
assert new_mgr.get('b').dtype == np.float64
assert new_mgr.get('foo').dtype == np.object_
assert new_mgr.get('f').dtype == np.int32
assert new_mgr.get('bool').dtype == np.bool_
assert new_mgr.get('dt').dtype.type, np.datetime64
assert new_mgr.get('i').dtype == np.int64
assert new_mgr.get('g').dtype == np.float64
assert new_mgr.get('h').dtype == np.float16
def test_interleave(self):
# self
for dtype in ['f8', 'i8', 'object', 'bool', 'complex', 'M8[ns]',
'm8[ns]']:
mgr = create_mgr('a: {0}'.format(dtype))
assert mgr.as_array().dtype == dtype
mgr = create_mgr('a: {0}; b: {0}'.format(dtype))
assert mgr.as_array().dtype == dtype
# will be converted according the actual dtype of the underlying
mgr = create_mgr('a: category')
assert mgr.as_array().dtype == 'i8'
mgr = create_mgr('a: category; b: category')
assert mgr.as_array().dtype == 'i8'
mgr = create_mgr('a: category; b: category2')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: category2')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: category2; b: category2')
assert mgr.as_array().dtype == 'object'
# combinations
mgr = create_mgr('a: f8')
assert mgr.as_array().dtype == 'f8'
mgr = create_mgr('a: f8; b: i8')
assert mgr.as_array().dtype == 'f8'
mgr = create_mgr('a: f4; b: i8')
assert mgr.as_array().dtype == 'f8'
mgr = create_mgr('a: f4; b: i8; d: object')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: bool; b: i8')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: complex')
assert mgr.as_array().dtype == 'complex'
mgr = create_mgr('a: f8; b: category')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: category')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: bool')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: i8')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: m8[ns]; b: bool')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: m8[ns]; b: i8')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: m8[ns]')
assert mgr.as_array().dtype == 'object'
def test_interleave_non_unique_cols(self):
df = DataFrame([
[pd.Timestamp('20130101'), 3.5],
[pd.Timestamp('20130102'), 4.5]],
columns=['x', 'x'],
index=[1, 2])
df_unique = df.copy()
df_unique.columns = ['x', 'y']
assert df_unique.values.shape == df.values.shape
tm.assert_numpy_array_equal(df_unique.values[0], df.values[0])
tm.assert_numpy_array_equal(df_unique.values[1], df.values[1])
def test_consolidate(self):
pass
def test_consolidate_ordering_issues(self, mgr):
mgr.set('f', randn(N))
mgr.set('d', randn(N))
mgr.set('b', randn(N))
mgr.set('g', randn(N))
mgr.set('h', randn(N))
# we have datetime/tz blocks in mgr
cons = mgr.consolidate()
assert cons.nblocks == 4
cons = mgr.consolidate().get_numeric_data()
assert cons.nblocks == 1
assert isinstance(cons.blocks[0].mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(cons.blocks[0].mgr_locs.as_array,
np.arange(len(cons.items), dtype=np.int64))
def test_reindex_index(self):
pass
def test_reindex_items(self):
# mgr is not consolidated, f8 & f8-2 blocks
mgr = create_mgr('a: f8; b: i8; c: f8; d: i8; e: f8;'
'f: bool; g: f8-2')
reindexed = mgr.reindex_axis(['g', 'c', 'a', 'd'], axis=0)
assert reindexed.nblocks == 2
tm.assert_index_equal(reindexed.items, pd.Index(['g', 'c', 'a', 'd']))
assert_almost_equal(
mgr.get('g', fastpath=False), reindexed.get('g', fastpath=False))
assert_almost_equal(
mgr.get('c', fastpath=False), reindexed.get('c', fastpath=False))
assert_almost_equal(
mgr.get('a', fastpath=False), reindexed.get('a', fastpath=False))
assert_almost_equal(
mgr.get('d', fastpath=False), reindexed.get('d', fastpath=False))
assert_almost_equal(
mgr.get('g').internal_values(),
reindexed.get('g').internal_values())
assert_almost_equal(
mgr.get('c').internal_values(),
reindexed.get('c').internal_values())
assert_almost_equal(
mgr.get('a').internal_values(),
reindexed.get('a').internal_values())
assert_almost_equal(
mgr.get('d').internal_values(),
reindexed.get('d').internal_values())
def test_multiindex_xs(self):
mgr = create_mgr('a,b,c: f8; d,e,f: i8')
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
mgr.set_axis(1, index)
result = mgr.xs('bar', axis=1)
assert result.shape == (6, 2)
assert result.axes[1][0] == ('bar', 'one')
assert result.axes[1][1] == ('bar', 'two')
def test_get_numeric_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
'str: object; bool: bool; obj: object; dt: datetime',
item_shape=(3, ))
mgr.set('obj', np.array([1, 2, 3], dtype=np.object_))
numeric = mgr.get_numeric_data()
tm.assert_index_equal(numeric.items,
pd.Index(['int', 'float', 'complex', 'bool']))
assert_almost_equal(
mgr.get('float', fastpath=False), numeric.get('float',
fastpath=False))
assert_almost_equal(
mgr.get('float').internal_values(),
numeric.get('float').internal_values())
# Check sharing
numeric.set('float', np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float', fastpath=False), np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float').internal_values(), np.array([100., 200., 300.]))
numeric2 = mgr.get_numeric_data(copy=True)
tm.assert_index_equal(numeric.items,
pd.Index(['int', 'float', 'complex', 'bool']))
numeric2.set('float', np.array([1000., 2000., 3000.]))
assert_almost_equal(
mgr.get('float', fastpath=False), np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float').internal_values(), np.array([100., 200., 300.]))
def test_get_bool_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
'str: object; bool: bool; obj: object; dt: datetime',
item_shape=(3, ))
mgr.set('obj', np.array([True, False, True], dtype=np.object_))
bools = mgr.get_bool_data()
tm.assert_index_equal(bools.items, pd.Index(['bool']))
assert_almost_equal(mgr.get('bool', fastpath=False),
bools.get('bool', fastpath=False))
assert_almost_equal(
mgr.get('bool').internal_values(),
bools.get('bool').internal_values())
bools.set('bool', np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),
np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),
np.array([True, False, True]))
# Check sharing
bools2 = mgr.get_bool_data(copy=True)
bools2.set('bool', np.array([False, True, False]))
tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),
np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),
np.array([True, False, True]))
def test_unicode_repr_doesnt_raise(self):
repr(create_mgr('b,\u05d0: object'))
def test_missing_unicode_key(self):
df = DataFrame({"a": [1]})
try:
df.loc[:, "\u05d0"] # should not raise UnicodeEncodeError
except KeyError:
pass # this is the expected exception
def test_equals(self):
# unique items
bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
assert bm1.equals(bm2)
bm1 = create_mgr('a,a,a: i8-1; b,b,b: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
assert bm1.equals(bm2)
def test_equals_block_order_different_dtypes(self):
# GH 9330
mgr_strings = [
"a:i8;b:f8", # basic case
"a:i8;b:f8;c:c8;d:b", # many types
"a:i8;e:dt;f:td;g:string", # more types
"a:i8;b:category;c:category2;d:category2", # categories
"c:sparse;d:sparse_na;b:f8", # sparse
]
for mgr_string in mgr_strings:
bm = create_mgr(mgr_string)
block_perms = itertools.permutations(bm.blocks)
for bm_perm in block_perms:
bm_this = BlockManager(bm_perm, bm.axes)
assert bm.equals(bm_this)
assert bm_this.equals(bm)
def test_single_mgr_ctor(self):
mgr = create_single_mgr('f8', num_rows=5)
assert mgr.as_array().tolist() == [0., 1., 2., 3., 4.]
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')
for value in invalid_values:
with pytest.raises(ValueError):
bm1.replace_list([1], [2], inplace=value)
class TestIndexing:
# Nosetests-style data-driven tests.
#
# This test applies different indexing routines to block managers and
# compares the outcome to the result of same operations on np.ndarray.
#
# NOTE: sparse (SparseBlock with fill_value != np.nan) fail a lot of tests
# and are disabled.
MANAGERS = [
create_single_mgr('f8', N),
create_single_mgr('i8', N),
# 2-dim
create_mgr('a,b,c,d,e,f: f8', item_shape=(N,)),
create_mgr('a,b,c,d,e,f: i8', item_shape=(N,)),
create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N,)),
create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N,)),
# 3-dim
create_mgr('a,b,c,d,e,f: f8', item_shape=(N, N)),
create_mgr('a,b,c,d,e,f: i8', item_shape=(N, N)),
create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N, N)),
create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N, N)),
]
# MANAGERS = [MANAGERS[6]]
def test_get_slice(self):
def assert_slice_ok(mgr, axis, slobj):
mat = mgr.as_array()
# we maybe using an ndarray to test slicing and
# might not be the full length of the axis
if isinstance(slobj, np.ndarray):
ax = mgr.axes[axis]
if len(ax) and len(slobj) and len(slobj) != len(ax):
slobj = np.concatenate([slobj, np.zeros(
len(ax) - len(slobj), dtype=bool)])
sliced = mgr.get_slice(slobj, axis=axis)
mat_slobj = (slice(None), ) * axis + (slobj, )
tm.assert_numpy_array_equal(mat[mat_slobj], sliced.as_array(),
check_dtype=False)
tm.assert_index_equal(mgr.axes[axis][slobj], sliced.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# slice
assert_slice_ok(mgr, ax, slice(None))
assert_slice_ok(mgr, ax, slice(3))
assert_slice_ok(mgr, ax, slice(100))
assert_slice_ok(mgr, ax, slice(1, 4))
assert_slice_ok(mgr, ax, slice(3, 0, -2))
# boolean mask
assert_slice_ok(
mgr, ax, np.array([], dtype=np.bool_))
assert_slice_ok(
mgr, ax,
np.ones(mgr.shape[ax], dtype=np.bool_))
assert_slice_ok(
mgr, ax,
np.zeros(mgr.shape[ax], dtype=np.bool_))
if mgr.shape[ax] >= 3:
assert_slice_ok(
mgr, ax,
np.arange(mgr.shape[ax]) % 3 == 0)
assert_slice_ok(
mgr, ax, np.array(
[True, True, False], dtype=np.bool_))
# fancy indexer
assert_slice_ok(mgr, ax, [])
assert_slice_ok(mgr, ax, lrange(mgr.shape[ax]))
if mgr.shape[ax] >= 3:
assert_slice_ok(mgr, ax, [0, 1, 2])
assert_slice_ok(mgr, ax, [-1, -2, -3])
def test_take(self):
def assert_take_ok(mgr, axis, indexer):
mat = mgr.as_array()
taken = mgr.take(indexer, axis)
tm.assert_numpy_array_equal(np.take(mat, indexer, axis),
taken.as_array(), check_dtype=False)
tm.assert_index_equal(mgr.axes[axis].take(indexer),
taken.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# take/fancy indexer
assert_take_ok(mgr, ax, [])
assert_take_ok(mgr, ax, [0, 0, 0])
assert_take_ok(mgr, ax, lrange(mgr.shape[ax]))
if mgr.shape[ax] >= 3:
assert_take_ok(mgr, ax, [0, 1, 2])
assert_take_ok(mgr, ax, [-1, -2, -3])
def test_reindex_axis(self):
def assert_reindex_axis_is_ok(mgr, axis, new_labels, fill_value):
mat = mgr.as_array()
indexer = mgr.axes[axis].get_indexer_for(new_labels)
reindexed = mgr.reindex_axis(new_labels, axis,
fill_value=fill_value)
tm.assert_numpy_array_equal(algos.take_nd(mat, indexer, axis,
fill_value=fill_value),
reindexed.as_array(),
check_dtype=False)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
assert_reindex_axis_is_ok(
mgr, ax,
pd.Index([]), fill_value)
assert_reindex_axis_is_ok(
mgr, ax, mgr.axes[ax],
fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][[0, 0, 0]], fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']), fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
pd.Index(['foo', mgr.axes[ax][0], 'baz']),
fill_value)
if mgr.shape[ax] >= 3:
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][:-3], fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][-3::-1], fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value)
def test_reindex_indexer(self):
def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer,
fill_value):
mat = mgr.as_array()
reindexed_mat = algos.take_nd(mat, indexer, axis,
fill_value=fill_value)
reindexed = mgr.reindex_indexer(new_labels, indexer, axis,
fill_value=fill_value)
tm.assert_numpy_array_equal(reindexed_mat,
reindexed.as_array(),
check_dtype=False)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index([]), [], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo'] * mgr.shape[ax]),
np.arange(mgr.shape[ax]), fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
mgr.axes[ax][::-1], np.arange(mgr.shape[ax]),
fill_value)
assert_reindex_indexer_is_ok(
mgr, ax, mgr.axes[ax],
np.arange(mgr.shape[ax])[::-1], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[0, 0, 0], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[-1, 0, -1], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', mgr.axes[ax][0], 'baz']),
[-1, -1, -1], fill_value)
if mgr.shape[ax] >= 3:
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[0, 1, 2], fill_value)
# test_get_slice(slice_like, axis)
# take(indexer, axis)
# reindex_axis(new_labels, axis)
# reindex_indexer(new_labels, indexer, axis)
class TestBlockPlacement:
def test_slice_len(self):
assert len(BlockPlacement(slice(0, 4))) == 4
assert len(BlockPlacement(slice(0, 4, 2))) == 2
assert len(BlockPlacement(slice(0, 3, 2))) == 2
assert len(BlockPlacement(slice(0, 1, 2))) == 1
assert len(BlockPlacement(slice(1, 0, -1))) == 1
def test_zero_step_raises(self):
with pytest.raises(ValueError):
BlockPlacement(slice(1, 1, 0))
with pytest.raises(ValueError):
BlockPlacement(slice(1, 2, 0))
def test_unbounded_slice_raises(self):
def assert_unbounded_slice_error(slc):
with pytest.raises(ValueError, match="unbounded slice"):
BlockPlacement(slc)
assert_unbounded_slice_error(slice(None, None))
assert_unbounded_slice_error(slice(10, None))
assert_unbounded_slice_error(slice(None, None, -1))
assert_unbounded_slice_error(slice(None, 10, -1))
# These are "unbounded" because negative index will change depending on
# container shape.
assert_unbounded_slice_error(slice(-1, None))
assert_unbounded_slice_error(slice(None, -1))
assert_unbounded_slice_error(slice(-1, -1))
assert_unbounded_slice_error(slice(-1, None, -1))
assert_unbounded_slice_error(slice(None, -1, -1))
assert_unbounded_slice_error(slice(-1, -1, -1))
def test_not_slice_like_slices(self):
def assert_not_slice_like(slc):
assert not BlockPlacement(slc).is_slice_like
assert_not_slice_like(slice(0, 0))
assert_not_slice_like(slice(100, 0))
assert_not_slice_like(slice(100, 100, -1))
assert_not_slice_like(slice(0, 100, -1))
assert not BlockPlacement(slice(0, 0)).is_slice_like
assert not BlockPlacement(slice(100, 100)).is_slice_like
def test_array_to_slice_conversion(self):
def assert_as_slice_equals(arr, slc):
assert BlockPlacement(arr).as_slice == slc
assert_as_slice_equals([0], slice(0, 1, 1))
assert_as_slice_equals([100], slice(100, 101, 1))
assert_as_slice_equals([0, 1, 2], slice(0, 3, 1))
assert_as_slice_equals([0, 5, 10], slice(0, 15, 5))
assert_as_slice_equals([0, 100], slice(0, 200, 100))
assert_as_slice_equals([2, 1], slice(2, 0, -1))
if not PY361:
assert_as_slice_equals([2, 1, 0], slice(2, None, -1))
assert_as_slice_equals([100, 0], slice(100, None, -100))
def test_not_slice_like_arrays(self):
def assert_not_slice_like(arr):
assert not BlockPlacement(arr).is_slice_like
assert_not_slice_like([])
assert_not_slice_like([-1])
assert_not_slice_like([-1, -2, -3])
assert_not_slice_like([-10])
assert_not_slice_like([-1])
assert_not_slice_like([-1, 0, 1, 2])
assert_not_slice_like([-2, 0, 2, 4])
assert_not_slice_like([1, 0, -1])
assert_not_slice_like([1, 1, 1])
def test_slice_iter(self):
assert list(BlockPlacement(slice(0, 3))) == [0, 1, 2]
assert list(BlockPlacement(slice(0, 0))) == []
assert list(BlockPlacement(slice(3, 0))) == []
if not PY361:
assert list(BlockPlacement(slice(3, 0, -1))) == [3, 2, 1]
assert list(BlockPlacement(slice(3, None, -1))) == [3, 2, 1, 0]
def test_slice_to_array_conversion(self):
def assert_as_array_equals(slc, asarray):
tm.assert_numpy_array_equal(
BlockPlacement(slc).as_array,
np.asarray(asarray, dtype=np.int64))
assert_as_array_equals(slice(0, 3), [0, 1, 2])
assert_as_array_equals(slice(0, 0), [])
assert_as_array_equals(slice(3, 0), [])
assert_as_array_equals(slice(3, 0, -1), [3, 2, 1])
if not PY361:
assert_as_array_equals(slice(3, None, -1), [3, 2, 1, 0])
assert_as_array_equals(slice(31, None, -10), [31, 21, 11, 1])
def test_blockplacement_add(self):
bpl = BlockPlacement(slice(0, 5))
assert bpl.add(1).as_slice == slice(1, 6, 1)
assert bpl.add(np.arange(5)).as_slice == slice(0, 10, 2)
assert list(bpl.add(np.arange(5, 0, -1))) == [5, 5, 5, 5, 5]
def test_blockplacement_add_int(self):
def assert_add_equals(val, inc, result):
assert list(BlockPlacement(val).add(inc)) == result
assert_add_equals(slice(0, 0), 0, [])
assert_add_equals(slice(1, 4), 0, [1, 2, 3])
assert_add_equals(slice(3, 0, -1), 0, [3, 2, 1])
assert_add_equals([1, 2, 4], 0, [1, 2, 4])
assert_add_equals(slice(0, 0), 10, [])
assert_add_equals(slice(1, 4), 10, [11, 12, 13])
assert_add_equals(slice(3, 0, -1), 10, [13, 12, 11])
assert_add_equals([1, 2, 4], 10, [11, 12, 14])
assert_add_equals(slice(0, 0), -1, [])
assert_add_equals(slice(1, 4), -1, [0, 1, 2])
assert_add_equals([1, 2, 4], -1, [0, 1, 3])
with pytest.raises(ValueError):
BlockPlacement(slice(1, 4)).add(-10)
with pytest.raises(ValueError):
BlockPlacement([1, 2, 4]).add(-10)
if not PY361:
assert_add_equals(slice(3, 0, -1), -1, [2, 1, 0])
assert_add_equals(slice(2, None, -1), 0, [2, 1, 0])
assert_add_equals(slice(2, None, -1), 10, [12, 11, 10])
with pytest.raises(ValueError):
BlockPlacement(slice(2, None, -1)).add(-1)
class DummyElement:
def __init__(self, value, dtype):
self.value = value
self.dtype = np.dtype(dtype)
def __array__(self):
return np.array(self.value, dtype=self.dtype)
def __str__(self):
return "DummyElement({}, {})".format(self.value, self.dtype)
def __repr__(self):
return str(self)
def astype(self, dtype, copy=False):
self.dtype = dtype
return self
def view(self, dtype):
return type(self)(self.value.view(dtype), dtype)
def any(self, axis=None):
return bool(self.value)
class TestCanHoldElement:
@pytest.mark.parametrize('value, dtype', [
(1, 'i8'),
(1.0, 'f8'),
(2**63, 'f8'),
(1j, 'complex128'),
(2**63, 'complex128'),
(True, 'bool'),
(np.timedelta64(20, 'ns'), '<m8[ns]'),
(np.datetime64(20, 'ns'), '<M8[ns]'),
])
@pytest.mark.parametrize('op', [
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.mod,
operator.pow,
], ids=lambda x: x.__name__)
def test_binop_other(self, op, value, dtype):
skip = {(operator.add, 'bool'),
(operator.sub, 'bool'),
(operator.mul, 'bool'),
(operator.truediv, 'bool'),
(operator.mod, 'i8'),
(operator.mod, 'complex128'),
(operator.pow, 'bool')}
if (op, dtype) in skip:
pytest.skip("Invalid combination {},{}".format(op, dtype))
e = DummyElement(value, dtype)
s = pd.DataFrame({"A": [e.value, e.value]}, dtype=e.dtype)
invalid = {(operator.pow, '<M8[ns]'),
(operator.mod, '<M8[ns]'),
(operator.truediv, '<M8[ns]'),
(operator.mul, '<M8[ns]'),
(operator.add, '<M8[ns]'),
(operator.pow, '<m8[ns]'),
(operator.mul, '<m8[ns]')}
if (op, dtype) in invalid:
with pytest.raises(TypeError):
op(s, e.value)
else:
# FIXME: Since dispatching to Series, this test no longer
# asserts anything meaningful
result = op(s, e.value).dtypes
expected = op(s, value).dtypes
assert_series_equal(result, expected)
@pytest.mark.parametrize('typestr, holder', [
('category', Categorical),
('M8[ns]', DatetimeArray),
('M8[ns, US/Central]', DatetimeArray),
('m8[ns]', TimedeltaArray),
('sparse', SparseArray),
])
def test_holder(typestr, holder):
blk = create_block(typestr, [1])
assert blk._holder is holder
def test_deprecated_fastpath():
# GH#19265
values = np.random.rand(3, 3)
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
make_block(values, placement=np.arange(3), fastpath=True)
def test_validate_ndim():
values = np.array([1.0, 2.0])
placement = slice(2)
msg = r"Wrong number of dimensions. values.ndim != ndim \[1 != 2\]"
with pytest.raises(ValueError, match=msg):
make_block(values, placement, ndim=2)
def test_block_shape():
idx = pd.Index([0, 1, 2, 3, 4])
a = pd.Series([1, 2, 3]).reindex(idx)
b = pd.Series(pd.Categorical([1, 2, 3])).reindex(idx)
assert (a._data.blocks[0].mgr_locs.indexer ==
b._data.blocks[0].mgr_locs.indexer)
def test_make_block_no_pandas_array():
# https://github.com/pandas-dev/pandas/pull/24866
arr = pd.array([1, 2])
# PandasArray, no dtype
result = make_block(arr, slice(len(arr)))
assert result.is_integer is True
assert result.is_extension is False
# PandasArray, PandasDtype
result = make_block(arr, slice(len(arr)), dtype=arr.dtype)
assert result.is_integer is True
assert result.is_extension is False
# ndarray, PandasDtype
result = make_block(arr.to_numpy(), slice(len(arr)), dtype=arr.dtype)
assert result.is_integer is True
assert result.is_extension is False
| true | true |
f73b78c1acef35629be71052e1e9fa32f0ad298a | 151 | py | Python | livy/__init__.py | EDS-APHP/pylivy | 0714e4e74e27c1a13b74228700bb3e800e9646fe | [
"MIT"
] | null | null | null | livy/__init__.py | EDS-APHP/pylivy | 0714e4e74e27c1a13b74228700bb3e800e9646fe | [
"MIT"
] | null | null | null | livy/__init__.py | EDS-APHP/pylivy | 0714e4e74e27c1a13b74228700bb3e800e9646fe | [
"MIT"
] | 1 | 2020-02-05T09:31:01.000Z | 2020-02-05T09:31:01.000Z | from livy.session import LivySession # noqa: F401
from livy.models import ( # noqa: F401
SessionKind,
SessionState,
SparkRuntimeError,
)
| 21.571429 | 50 | 0.715232 | from livy.session import LivySession
from livy.models import (
SessionKind,
SessionState,
SparkRuntimeError,
)
| true | true |
f73b78d3a2989df5213f148680faeebb5b1c6a9b | 32,481 | py | Python | ndlib/test/test_ndlib.py | KDDComplexNetworkAnalysis/ndlib | 2d05df8cd67de142ef068cc051969f86e51dfbc6 | [
"BSD-2-Clause"
] | 6 | 2018-12-20T07:33:09.000Z | 2020-05-10T07:33:33.000Z | ndlib/test/test_ndlib.py | KDDComplexNetworkAnalysis/ndlib | 2d05df8cd67de142ef068cc051969f86e51dfbc6 | [
"BSD-2-Clause"
] | null | null | null | ndlib/test/test_ndlib.py | KDDComplexNetworkAnalysis/ndlib | 2d05df8cd67de142ef068cc051969f86e51dfbc6 | [
"BSD-2-Clause"
] | 2 | 2020-02-05T09:11:17.000Z | 2020-02-07T11:12:33.000Z | from __future__ import absolute_import
import unittest
import random
import future.utils
import networkx as nx
import igraph as ig
import numpy as np
import ndlib.models.ModelConfig as mc
import ndlib.models.epidemics as epd
import ndlib.models.opinions as opn
import ndlib.utils as ut
__author__ = 'Giulio Rossetti'
__license__ = "BSD-2-Clause"
__email__ = "giulio.rossetti@gmail.com"
def get_graph(er=False):
if not er:
g = nx.complete_graph(100)
else:
g = nx.erdos_renyi_graph(1000, 0.1)
gi = ig.Graph(directed=False)
gi.add_vertices(list(g.nodes()))
gi.add_edges(list(g.edges()))
gs = [g, gi]
return gs
def get_directed_graph(er=False):
if not er:
g = nx.complete_graph(100)
else:
g = nx.erdos_renyi_graph(1000, 0.1)
g = g.to_directed()
gi = ig.Graph(directed=True)
gi.add_vertices(list(g.nodes()))
gi.add_edges(list(g.edges()))
gs = [g, gi]
return gs
class NdlibTest(unittest.TestCase):
def test_utldr(self):
for g in get_graph():
model = epd.UTLDRModel(g)
config = mc.Configuration()
# Undetected
config.add_model_parameter("sigma", 0.05)
config.add_model_parameter("beta", {"M": 0.25, "F": 0})
config.add_model_parameter("gamma", 0.05)
config.add_model_parameter("omega", 0.01)
config.add_model_parameter("p", 0.04)
config.add_model_parameter("lsize", 0.2)
# Testing
config.add_model_parameter("phi_e", 0.03)
config.add_model_parameter("phi_i", 0.1)
config.add_model_parameter("kappa_e", 0.03)
config.add_model_parameter("kappa_i", 0.1)
config.add_model_parameter("gamma_t", 0.08)
config.add_model_parameter("gamma_f", 0.1)
config.add_model_parameter("omega_t", 0.01)
config.add_model_parameter("omega_f", 0.08)
config.add_model_parameter("epsilon_e", 1)
config.add_model_parameter("icu_b", 10)
config.add_model_parameter("iota", 0.20)
config.add_model_parameter("z", 0.2)
config.add_model_parameter("s", 0.05)
# Lockdown
config.add_model_parameter("lambda", 0.8)
config.add_model_parameter("epsilon_l", 5)
config.add_model_parameter("mu", 0.05)
config.add_model_parameter("p_l", 0.04)
# Vaccination
config.add_model_parameter("v", 0.15)
config.add_model_parameter("f", 0.02)
# node activity level
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
ngender = ['M', 'F']
work = ['school', 'PA', 'hospital', 'none']
for i in nodes:
config.add_node_configuration("activity", i, 1)
config.add_node_configuration("work", i, np.random.choice(work, 2))
config.add_node_configuration("segment", i, np.random.choice(ngender, 1)[0])
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
households = {0: [1, 2, 3, 4], 5: [6, 7]}
model.set_lockdown(households, ['PA', 'school'])
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
model.unset_lockdown(['PA'])
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
model.set_lockdown(households)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
model.unset_lockdown(['school'])
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_algorithmic_bias_model(self):
for g in get_graph():
model = opn.AlgorithmicBiasModel(g, seed=0)
config = mc.Configuration()
config.add_model_parameter("epsilon", 0.32)
config.add_model_parameter("gamma", 1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_voter_model(self):
for g in get_graph():
model = opn.VoterModel(g, seed=0)
config = mc.Configuration()
config.add_model_parameter("fraction_infected", 0.2)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_sznajd_model(self):
for g in get_graph():
model = opn.SznajdModel(g)
config = mc.Configuration()
config.add_model_parameter("fraction_infected", 0.2)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
for g in get_directed_graph():
model = opn.SznajdModel(g)
config = mc.Configuration()
config.add_model_parameter("fraction_infected", 0.2)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_majorityrule_model(self):
for g in get_graph():
model = opn.MajorityRuleModel(g)
config = mc.Configuration()
config.add_model_parameter("q", 3)
config.add_model_parameter("fraction_infected", 0.2)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_qvoter_model(self):
for g in get_graph():
model = opn.QVoterModel(g)
config = mc.Configuration()
config.add_model_parameter("q", 5)
config.add_model_parameter("fraction_infected", 0.6)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_cognitive_model(self):
for g in get_graph():
model = opn.CognitiveOpDynModel(g, seed=0)
config = mc.Configuration()
config.add_model_parameter("I", 0.15)
config.add_model_parameter("B_range_min", 0)
config.add_model_parameter("B_range_max", 1)
config.add_model_parameter("T_range_min", 0)
config.add_model_parameter("T_range_max", 1)
config.add_model_parameter("R_fraction_negative", 1.0 / 3)
config.add_model_parameter("R_fraction_neutral", 1.0 / 3)
config.add_model_parameter("R_fraction_positive", 1.0 / 3)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_si_model(self):
for g in get_graph(True):
model = epd.SIModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_sir_model(self):
for g in get_graph(True):
model = epd.SIRModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('gamma', 0.2)
config.add_model_parameter("percentage_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_seir_model(self):
for g in get_graph(True):
model = epd.SEIRModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('gamma', 0.2)
config.add_model_parameter('alpha', 0.05)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
for g in get_directed_graph(True):
model = epd.SEIRModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('gamma', 0.8)
config.add_model_parameter('alpha', 0.5)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_seirct_model(self):
for g in get_graph(True):
model = epd.SEIRctModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('gamma', 0.2)
config.add_model_parameter('alpha', 0.05)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
for g in get_directed_graph(True):
model = epd.SEIRctModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('gamma', 0.8)
config.add_model_parameter('alpha', 0.5)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_swir_model(self):
for g in get_graph(True):
model = epd.SWIRModel(g)
config = mc.Configuration()
config.add_model_parameter('kappa', 0.5)
config.add_model_parameter('mu', 0.2)
config.add_model_parameter('nu', 0.05)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
def test_seis_model(self):
for g in get_graph(True):
model = epd.SEISModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('lambda', 0.2)
config.add_model_parameter('alpha', 0.05)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
for g in get_directed_graph(True):
model = epd.SEISModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('lambda', 0.8)
config.add_model_parameter('alpha', 0.5)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_seis_model(self):
for g in get_graph(True):
model = epd.SEISctModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('lambda', 0.2)
config.add_model_parameter('alpha', 0.05)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
for g in get_directed_graph(True):
model = epd.SEISctModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('lambda', 0.8)
config.add_model_parameter('alpha', 0.5)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_sis_model(self):
for g in get_graph(True):
model = epd.SISModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('lambda', 0.2)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_kertesz_model(self):
for g in get_graph():
model = epd.KerteszThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('adopter_rate', 0.4)
config.add_model_parameter('percentage_blocked', 0.1)
config.add_model_parameter('fraction_infected', 0.1)
threshold = 0.2
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
for i in nodes:
config.add_node_configuration("threshold", i, threshold)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_multiple_si_model(self):
for g in get_graph(True):
model = epd.SIModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.01)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
executions = ut.multi_runs(model, execution_number=10, iteration_number=50)
self.assertEqual(len(executions), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_threshold_model(self):
for g in get_graph(True):
model = epd.ThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
threshold = 0.2
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
for i in nodes:
config.add_node_configuration("threshold", i, threshold)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_generalisedthreshold_model(self):
for g in get_graph(True):
model = epd.GeneralisedThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
config.add_model_parameter('tau', 5)
config.add_model_parameter('mu', 5)
threshold = 0.2
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
for i in nodes:
config.add_node_configuration("threshold", i, threshold)
model.set_initial_status(config)
iterations = model.iteration_bunch(50)
self.assertEqual(len(iterations), 50)
iterations = model.iteration_bunch(50, node_status=False)
self.assertEqual(len(iterations), 50)
def test_GeneralThresholdModel(self):
for g in get_graph(True):
model = epd.GeneralThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
threshold = 0.2
weight = 0.2
if isinstance(g, nx.Graph):
nodes = g.nodes
edges = g.edges
else:
nodes = g.vs['name']
edges = [(g.vs[e.tuple[0]]['name'], g.vs[e.tuple[1]]['name']) for e in g.es]
for i in nodes:
config.add_node_configuration("threshold", i, threshold)
for e in edges:
config.add_edge_configuration("weight", e, weight)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_profile_threshold_model(self):
for g in get_graph(True):
model = epd.ProfileThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
threshold = 0.2
profile = 0.1
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
for i in nodes:
config.add_node_configuration("threshold", i, threshold)
config.add_node_configuration("profile", i, profile)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
model = epd.ProfileThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
config.add_model_parameter("blocked", 0.1)
config.add_model_parameter("adopter_rate", 0.001)
threshold = 0.2
profile = 0.1
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
for i in nodes:
config.add_node_configuration("threshold", i, threshold)
config.add_node_configuration("profile", i, profile)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_profile_model(self):
for g in get_graph(True):
model = epd.ProfileModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
profile = 0.1
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
for i in nodes:
config.add_node_configuration("profile", i, profile)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
model = epd.ProfileModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
config.add_model_parameter("blocked", 0.1)
config.add_model_parameter("adopter_rate", 0.001)
profile = 0.1
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
for i in nodes:
config.add_node_configuration("profile", i, profile)
model.set_initial_status(config)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_independent_cascade_model(self):
for g in get_graph(True):
model = epd.IndependentCascadesModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
threshold = 0.1
if isinstance(g, nx.Graph):
for e in g.edges:
config.add_edge_configuration("threshold", e, threshold)
else:
edges = [(g.vs[e.tuple[0]]['name'], g.vs[e.tuple[1]]['name']) for e in g.es]
for e in edges:
config.add_edge_configuration("threshold", e, threshold)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_ICE(self):
for g in get_graph(True):
model = epd.ICEModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
if isinstance(g, nx.Graph):
node_to_com = {n: random.choice([0, 1])for n in g.nodes()}
for i in g.nodes():
config.add_node_configuration("com", i, node_to_com[i])
else:
node_to_com = {n: random.choice([0, 1]) for n in g.vs['name']}
for i in g.vs['name']:
config.add_node_configuration("com", i, node_to_com[i])
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_ICP(self):
threshold = 0.1
for g in get_graph(True):
model = epd.ICPModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
if isinstance(g, nx.Graph):
node_to_com = {n: random.choice([0, 1])for n in g.nodes()}
for i in g.nodes():
config.add_node_configuration("com", i, node_to_com[i])
for e in g.edges:
config.add_edge_configuration("threshold", e, threshold)
else:
node_to_com = {n: random.choice([0, 1]) for n in g.vs['name']}
for i in g.vs['name']:
config.add_node_configuration("com", i, node_to_com[i])
edges = [(g.vs[e.tuple[0]]['name'], g.vs[e.tuple[1]]['name']) for e in g.es]
for e in edges:
config.add_edge_configuration("threshold", e, threshold)
config.add_model_parameter('permeability', 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_ICEP(self):
for g in get_graph(True):
model = epd.ICEPModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
if isinstance(g, nx.Graph):
node_to_com = {n: random.choice([0, 1])for n in g.nodes()}
for i in g.nodes():
config.add_node_configuration("com", i, node_to_com[i])
else:
node_to_com = {n: random.choice([0, 1]) for n in g.vs['name']}
for i in g.vs['name']:
config.add_node_configuration("com", i, node_to_com[i])
config.add_model_parameter('permeability', 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_kertesz_model_predefined_blocked(self):
for g in get_graph(True):
model = epd.KerteszThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('adopter_rate', 0.4)
predefined_blocked = [0, 1, 2, 3, 4, 5]
config.add_model_initial_configuration("Blocked", predefined_blocked)
config.add_model_parameter('percentage_infected', 0.1)
threshold = 0.2
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
for i in nodes:
config.add_node_configuration("threshold", i, threshold)
model.set_initial_status(config)
iteration = model.iteration()
blocked = [x for x, v in future.utils.iteritems(iteration['status']) if v == -1]
self.assertEqual(blocked, predefined_blocked)
def test_initial_infected(self):
for g in get_graph(True):
model = epd.SISModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('lambda', 0.2)
predefined_infected = [0, 1, 2, 3, 4, 5]
config.add_model_initial_configuration("Infected", predefined_infected)
model.set_initial_status(config)
inft = [k for k, v in future.utils.iteritems(model.status) if v == 1]
self.assertAlmostEqual(inft, predefined_infected)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
def test_optional_parameters(self):
for g in get_graph(True):
model = epd.ThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
if isinstance(g, nx.Graph):
config.add_node_set_configuration("test", {n: 1 for n in g.nodes})
config.add_edge_set_configuration("etest", {e: 1 for e in g.edges})
else:
config.add_node_set_configuration("test", {n: 1 for n in g.vs['name']})
edges = [(g.vs[e.tuple[0]]['name'], g.vs[e.tuple[1]]['name']) for e in g.es]
config.add_edge_set_configuration("etest", {e: 1 for e in edges})
self.assertEqual(len(iterations), 10)
model = epd.KerteszThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('adopter_rate', 0.4)
predefined_blocked = [0, 1, 2, 3, 4, 5]
config.add_model_initial_configuration("Blocked", predefined_blocked)
config.add_model_parameter('percentage_infected', 0.1)
model.set_initial_status(config)
iteration = model.iteration()
blocked = [x for x, v in future.utils.iteritems(iteration["status"]) if v == -1]
self.assertEqual(blocked, predefined_blocked)
model = epd.IndependentCascadesModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
model = epd.ProfileModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
model = epd.ProfileThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
model = epd.ThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
model = epd.KerteszThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('adopter_rate', 0.4)
config.add_model_parameter('percentage_blocked', 0.1)
config.add_model_parameter('fraction_infected', 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
def test_config(self):
for g in get_graph(True):
model = epd.ThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
config.add_model_initial_configuration("Infected", [1, 2, 3])
config.add_node_set_configuration("partial", {1: 1, 2: 2})
try:
model.set_initial_status(config)
except:
pass
if isinstance(g, nx.Graph):
edges = list(g.edges)
nodes = list(g.nodes)
else:
edges = [(g.vs[e.tuple[0]]['name'], g.vs[e.tuple[1]]['name']) for e in g.es]
nodes = g.vs['name']
config.add_edge_set_configuration("partial", {e: 1 for e in edges[:10]})
try:
model.set_initial_status(config)
except:
pass
config.add_node_set_configuration("partial", {n: 1 for n in nodes})
config.add_edge_set_configuration("partial", {e: 1 for e in edges})
model.set_initial_status(config)
for g in get_graph():
model = opn.MajorityRuleModel(g)
config = mc.Configuration()
config.add_model_parameter("percentage_infected", 0.2)
try:
model.set_initial_status(config)
except:
pass
for g in get_graph(True):
model = epd.IndependentCascadesModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
try:
model.set_initial_status(config)
except:
pass
for g in get_graph(True):
model = epd.ThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
try:
model.set_initial_status(config)
except:
pass
| 40.299007 | 92 | 0.58859 | from __future__ import absolute_import
import unittest
import random
import future.utils
import networkx as nx
import igraph as ig
import numpy as np
import ndlib.models.ModelConfig as mc
import ndlib.models.epidemics as epd
import ndlib.models.opinions as opn
import ndlib.utils as ut
__author__ = 'Giulio Rossetti'
__license__ = "BSD-2-Clause"
__email__ = "giulio.rossetti@gmail.com"
def get_graph(er=False):
if not er:
g = nx.complete_graph(100)
else:
g = nx.erdos_renyi_graph(1000, 0.1)
gi = ig.Graph(directed=False)
gi.add_vertices(list(g.nodes()))
gi.add_edges(list(g.edges()))
gs = [g, gi]
return gs
def get_directed_graph(er=False):
if not er:
g = nx.complete_graph(100)
else:
g = nx.erdos_renyi_graph(1000, 0.1)
g = g.to_directed()
gi = ig.Graph(directed=True)
gi.add_vertices(list(g.nodes()))
gi.add_edges(list(g.edges()))
gs = [g, gi]
return gs
class NdlibTest(unittest.TestCase):
def test_utldr(self):
for g in get_graph():
model = epd.UTLDRModel(g)
config = mc.Configuration()
config.add_model_parameter("sigma", 0.05)
config.add_model_parameter("beta", {"M": 0.25, "F": 0})
config.add_model_parameter("gamma", 0.05)
config.add_model_parameter("omega", 0.01)
config.add_model_parameter("p", 0.04)
config.add_model_parameter("lsize", 0.2)
config.add_model_parameter("phi_e", 0.03)
config.add_model_parameter("phi_i", 0.1)
config.add_model_parameter("kappa_e", 0.03)
config.add_model_parameter("kappa_i", 0.1)
config.add_model_parameter("gamma_t", 0.08)
config.add_model_parameter("gamma_f", 0.1)
config.add_model_parameter("omega_t", 0.01)
config.add_model_parameter("omega_f", 0.08)
config.add_model_parameter("epsilon_e", 1)
config.add_model_parameter("icu_b", 10)
config.add_model_parameter("iota", 0.20)
config.add_model_parameter("z", 0.2)
config.add_model_parameter("s", 0.05)
config.add_model_parameter("lambda", 0.8)
config.add_model_parameter("epsilon_l", 5)
config.add_model_parameter("mu", 0.05)
config.add_model_parameter("p_l", 0.04)
config.add_model_parameter("v", 0.15)
config.add_model_parameter("f", 0.02)
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
ngender = ['M', 'F']
work = ['school', 'PA', 'hospital', 'none']
for i in nodes:
config.add_node_configuration("activity", i, 1)
config.add_node_configuration("work", i, np.random.choice(work, 2))
config.add_node_configuration("segment", i, np.random.choice(ngender, 1)[0])
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
households = {0: [1, 2, 3, 4], 5: [6, 7]}
model.set_lockdown(households, ['PA', 'school'])
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
model.unset_lockdown(['PA'])
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
model.set_lockdown(households)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
model.unset_lockdown(['school'])
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_algorithmic_bias_model(self):
for g in get_graph():
model = opn.AlgorithmicBiasModel(g, seed=0)
config = mc.Configuration()
config.add_model_parameter("epsilon", 0.32)
config.add_model_parameter("gamma", 1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_voter_model(self):
for g in get_graph():
model = opn.VoterModel(g, seed=0)
config = mc.Configuration()
config.add_model_parameter("fraction_infected", 0.2)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_sznajd_model(self):
for g in get_graph():
model = opn.SznajdModel(g)
config = mc.Configuration()
config.add_model_parameter("fraction_infected", 0.2)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
for g in get_directed_graph():
model = opn.SznajdModel(g)
config = mc.Configuration()
config.add_model_parameter("fraction_infected", 0.2)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_majorityrule_model(self):
for g in get_graph():
model = opn.MajorityRuleModel(g)
config = mc.Configuration()
config.add_model_parameter("q", 3)
config.add_model_parameter("fraction_infected", 0.2)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_qvoter_model(self):
for g in get_graph():
model = opn.QVoterModel(g)
config = mc.Configuration()
config.add_model_parameter("q", 5)
config.add_model_parameter("fraction_infected", 0.6)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_cognitive_model(self):
for g in get_graph():
model = opn.CognitiveOpDynModel(g, seed=0)
config = mc.Configuration()
config.add_model_parameter("I", 0.15)
config.add_model_parameter("B_range_min", 0)
config.add_model_parameter("B_range_max", 1)
config.add_model_parameter("T_range_min", 0)
config.add_model_parameter("T_range_max", 1)
config.add_model_parameter("R_fraction_negative", 1.0 / 3)
config.add_model_parameter("R_fraction_neutral", 1.0 / 3)
config.add_model_parameter("R_fraction_positive", 1.0 / 3)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_si_model(self):
for g in get_graph(True):
model = epd.SIModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_sir_model(self):
for g in get_graph(True):
model = epd.SIRModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('gamma', 0.2)
config.add_model_parameter("percentage_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_seir_model(self):
for g in get_graph(True):
model = epd.SEIRModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('gamma', 0.2)
config.add_model_parameter('alpha', 0.05)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
for g in get_directed_graph(True):
model = epd.SEIRModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('gamma', 0.8)
config.add_model_parameter('alpha', 0.5)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_seirct_model(self):
for g in get_graph(True):
model = epd.SEIRctModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('gamma', 0.2)
config.add_model_parameter('alpha', 0.05)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
for g in get_directed_graph(True):
model = epd.SEIRctModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('gamma', 0.8)
config.add_model_parameter('alpha', 0.5)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_swir_model(self):
for g in get_graph(True):
model = epd.SWIRModel(g)
config = mc.Configuration()
config.add_model_parameter('kappa', 0.5)
config.add_model_parameter('mu', 0.2)
config.add_model_parameter('nu', 0.05)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
def test_seis_model(self):
for g in get_graph(True):
model = epd.SEISModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('lambda', 0.2)
config.add_model_parameter('alpha', 0.05)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
for g in get_directed_graph(True):
model = epd.SEISModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('lambda', 0.8)
config.add_model_parameter('alpha', 0.5)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_seis_model(self):
for g in get_graph(True):
model = epd.SEISctModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('lambda', 0.2)
config.add_model_parameter('alpha', 0.05)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
for g in get_directed_graph(True):
model = epd.SEISctModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('lambda', 0.8)
config.add_model_parameter('alpha', 0.5)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_sis_model(self):
for g in get_graph(True):
model = epd.SISModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('lambda', 0.2)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_kertesz_model(self):
for g in get_graph():
model = epd.KerteszThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('adopter_rate', 0.4)
config.add_model_parameter('percentage_blocked', 0.1)
config.add_model_parameter('fraction_infected', 0.1)
threshold = 0.2
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
for i in nodes:
config.add_node_configuration("threshold", i, threshold)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_multiple_si_model(self):
for g in get_graph(True):
model = epd.SIModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.01)
config.add_model_parameter("fraction_infected", 0.1)
model.set_initial_status(config)
executions = ut.multi_runs(model, execution_number=10, iteration_number=50)
self.assertEqual(len(executions), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_threshold_model(self):
for g in get_graph(True):
model = epd.ThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
threshold = 0.2
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
for i in nodes:
config.add_node_configuration("threshold", i, threshold)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_generalisedthreshold_model(self):
for g in get_graph(True):
model = epd.GeneralisedThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
config.add_model_parameter('tau', 5)
config.add_model_parameter('mu', 5)
threshold = 0.2
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
for i in nodes:
config.add_node_configuration("threshold", i, threshold)
model.set_initial_status(config)
iterations = model.iteration_bunch(50)
self.assertEqual(len(iterations), 50)
iterations = model.iteration_bunch(50, node_status=False)
self.assertEqual(len(iterations), 50)
def test_GeneralThresholdModel(self):
for g in get_graph(True):
model = epd.GeneralThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
threshold = 0.2
weight = 0.2
if isinstance(g, nx.Graph):
nodes = g.nodes
edges = g.edges
else:
nodes = g.vs['name']
edges = [(g.vs[e.tuple[0]]['name'], g.vs[e.tuple[1]]['name']) for e in g.es]
for i in nodes:
config.add_node_configuration("threshold", i, threshold)
for e in edges:
config.add_edge_configuration("weight", e, weight)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_profile_threshold_model(self):
for g in get_graph(True):
model = epd.ProfileThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
threshold = 0.2
profile = 0.1
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
for i in nodes:
config.add_node_configuration("threshold", i, threshold)
config.add_node_configuration("profile", i, profile)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
model = epd.ProfileThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
config.add_model_parameter("blocked", 0.1)
config.add_model_parameter("adopter_rate", 0.001)
threshold = 0.2
profile = 0.1
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
for i in nodes:
config.add_node_configuration("threshold", i, threshold)
config.add_node_configuration("profile", i, profile)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_profile_model(self):
for g in get_graph(True):
model = epd.ProfileModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
profile = 0.1
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
for i in nodes:
config.add_node_configuration("profile", i, profile)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
model = epd.ProfileModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
config.add_model_parameter("blocked", 0.1)
config.add_model_parameter("adopter_rate", 0.001)
profile = 0.1
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
for i in nodes:
config.add_node_configuration("profile", i, profile)
model.set_initial_status(config)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_independent_cascade_model(self):
for g in get_graph(True):
model = epd.IndependentCascadesModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
threshold = 0.1
if isinstance(g, nx.Graph):
for e in g.edges:
config.add_edge_configuration("threshold", e, threshold)
else:
edges = [(g.vs[e.tuple[0]]['name'], g.vs[e.tuple[1]]['name']) for e in g.es]
for e in edges:
config.add_edge_configuration("threshold", e, threshold)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_ICE(self):
for g in get_graph(True):
model = epd.ICEModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
if isinstance(g, nx.Graph):
node_to_com = {n: random.choice([0, 1])for n in g.nodes()}
for i in g.nodes():
config.add_node_configuration("com", i, node_to_com[i])
else:
node_to_com = {n: random.choice([0, 1]) for n in g.vs['name']}
for i in g.vs['name']:
config.add_node_configuration("com", i, node_to_com[i])
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_ICP(self):
threshold = 0.1
for g in get_graph(True):
model = epd.ICPModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
if isinstance(g, nx.Graph):
node_to_com = {n: random.choice([0, 1])for n in g.nodes()}
for i in g.nodes():
config.add_node_configuration("com", i, node_to_com[i])
for e in g.edges:
config.add_edge_configuration("threshold", e, threshold)
else:
node_to_com = {n: random.choice([0, 1]) for n in g.vs['name']}
for i in g.vs['name']:
config.add_node_configuration("com", i, node_to_com[i])
edges = [(g.vs[e.tuple[0]]['name'], g.vs[e.tuple[1]]['name']) for e in g.es]
for e in edges:
config.add_edge_configuration("threshold", e, threshold)
config.add_model_parameter('permeability', 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_ICEP(self):
for g in get_graph(True):
model = epd.ICEPModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
if isinstance(g, nx.Graph):
node_to_com = {n: random.choice([0, 1])for n in g.nodes()}
for i in g.nodes():
config.add_node_configuration("com", i, node_to_com[i])
else:
node_to_com = {n: random.choice([0, 1]) for n in g.vs['name']}
for i in g.vs['name']:
config.add_node_configuration("com", i, node_to_com[i])
config.add_model_parameter('permeability', 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
iterations = model.iteration_bunch(10, node_status=False)
self.assertEqual(len(iterations), 10)
def test_kertesz_model_predefined_blocked(self):
for g in get_graph(True):
model = epd.KerteszThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('adopter_rate', 0.4)
predefined_blocked = [0, 1, 2, 3, 4, 5]
config.add_model_initial_configuration("Blocked", predefined_blocked)
config.add_model_parameter('percentage_infected', 0.1)
threshold = 0.2
if isinstance(g, nx.Graph):
nodes = g.nodes
else:
nodes = g.vs['name']
for i in nodes:
config.add_node_configuration("threshold", i, threshold)
model.set_initial_status(config)
iteration = model.iteration()
blocked = [x for x, v in future.utils.iteritems(iteration['status']) if v == -1]
self.assertEqual(blocked, predefined_blocked)
def test_initial_infected(self):
for g in get_graph(True):
model = epd.SISModel(g)
config = mc.Configuration()
config.add_model_parameter('beta', 0.5)
config.add_model_parameter('lambda', 0.2)
predefined_infected = [0, 1, 2, 3, 4, 5]
config.add_model_initial_configuration("Infected", predefined_infected)
model.set_initial_status(config)
inft = [k for k, v in future.utils.iteritems(model.status) if v == 1]
self.assertAlmostEqual(inft, predefined_infected)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
def test_optional_parameters(self):
for g in get_graph(True):
model = epd.ThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
if isinstance(g, nx.Graph):
config.add_node_set_configuration("test", {n: 1 for n in g.nodes})
config.add_edge_set_configuration("etest", {e: 1 for e in g.edges})
else:
config.add_node_set_configuration("test", {n: 1 for n in g.vs['name']})
edges = [(g.vs[e.tuple[0]]['name'], g.vs[e.tuple[1]]['name']) for e in g.es]
config.add_edge_set_configuration("etest", {e: 1 for e in edges})
self.assertEqual(len(iterations), 10)
model = epd.KerteszThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('adopter_rate', 0.4)
predefined_blocked = [0, 1, 2, 3, 4, 5]
config.add_model_initial_configuration("Blocked", predefined_blocked)
config.add_model_parameter('percentage_infected', 0.1)
model.set_initial_status(config)
iteration = model.iteration()
blocked = [x for x, v in future.utils.iteritems(iteration["status"]) if v == -1]
self.assertEqual(blocked, predefined_blocked)
model = epd.IndependentCascadesModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
model = epd.ProfileModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
model = epd.ProfileThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
model = epd.ThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
model = epd.KerteszThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('adopter_rate', 0.4)
config.add_model_parameter('percentage_blocked', 0.1)
config.add_model_parameter('fraction_infected', 0.1)
model.set_initial_status(config)
iterations = model.iteration_bunch(10)
self.assertEqual(len(iterations), 10)
def test_config(self):
for g in get_graph(True):
model = epd.ThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('fraction_infected', 0.1)
config.add_model_initial_configuration("Infected", [1, 2, 3])
config.add_node_set_configuration("partial", {1: 1, 2: 2})
try:
model.set_initial_status(config)
except:
pass
if isinstance(g, nx.Graph):
edges = list(g.edges)
nodes = list(g.nodes)
else:
edges = [(g.vs[e.tuple[0]]['name'], g.vs[e.tuple[1]]['name']) for e in g.es]
nodes = g.vs['name']
config.add_edge_set_configuration("partial", {e: 1 for e in edges[:10]})
try:
model.set_initial_status(config)
except:
pass
config.add_node_set_configuration("partial", {n: 1 for n in nodes})
config.add_edge_set_configuration("partial", {e: 1 for e in edges})
model.set_initial_status(config)
for g in get_graph():
model = opn.MajorityRuleModel(g)
config = mc.Configuration()
config.add_model_parameter("percentage_infected", 0.2)
try:
model.set_initial_status(config)
except:
pass
for g in get_graph(True):
model = epd.IndependentCascadesModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
try:
model.set_initial_status(config)
except:
pass
for g in get_graph(True):
model = epd.ThresholdModel(g)
config = mc.Configuration()
config.add_model_parameter('percentage_infected', 0.1)
try:
model.set_initial_status(config)
except:
pass
| true | true |
f73b7950730aa42386ba3444f382424ff9044cff | 9,836 | py | Python | 1_AdvancedBasicsPart_I/otus_hw1_log_analyzer/log_analyzer.py | a-agarkov/otus-python-2017-11 | 2f0977f83e0f46f2360a74a7d941199bbe9e9306 | [
"MIT"
] | 2 | 2017-12-14T14:50:52.000Z | 2018-03-08T20:48:44.000Z | 1_AdvancedBasicsPart_I/otus_hw1_log_analyzer/log_analyzer.py | a-agarkov/otus-python-2017-11 | 2f0977f83e0f46f2360a74a7d941199bbe9e9306 | [
"MIT"
] | null | null | null | 1_AdvancedBasicsPart_I/otus_hw1_log_analyzer/log_analyzer.py | a-agarkov/otus-python-2017-11 | 2f0977f83e0f46f2360a74a7d941199bbe9e9306 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import gzip
import json
import re
import logging
import sys
import datetime as dt
import os
from operator import itemgetter
from statistics import median, mean
import argparse
from time import time, sleep
from itertools import groupby
from collections import namedtuple
from string import Template
from functools import partial
default_config = {"REPORT_SIZE": 1000,
"REPORT_DIR": "./reports",
"LOG_DIR": "./log"}
def parse_config(default_config: dict = None,
config_path: str = None):
"""
1. Checks whether main config exists at default path.
2. Updates default config keys.
3. Checks, whether any config file was passed in args.
4. Updates config keys, if it was passed.
5. Checks whether all dirs in config exist.
:param default_config: default config file.
:param config_path: main config file path.
:return: log_analyzer config.
"""
if not default_config:
return "No default config provided."
config = {k: v for k, v in default_config.items()}
if os.path.exists(config_path):
with open(config_path, mode='r') as f:
main_config = json.load(f)
else:
return "No config at given path."
config.update(main_config)
if not all((os.path.exists(config[k]) for k in config.keys() if k.endswith('DIR'))):
return "Some config path is broken."
return config
def find_latest_log(log_dir: str):
"""
Finds latest logfile in logs directory.
:param log_dir:
:return: name of the latest log or None if no log found.
"""
def get_log_date(log_name):
log_date = re.search('\d{8}', log_name)
return dt.datetime.strptime(log_date.group(0), "%Y%m%d") if log_date else None
log_name = log_date = None
for item in os.listdir(log_dir):
if 'nginx-access-ui.log' not in item:
continue
date = get_log_date(item)
if (not log_date) or (date > log_date):
log_name, log_date = item, date
return namedtuple('latest_log', ['log_name', 'log_date'])._make((log_name, log_date)
if log_name
else (None, None))
def log_finish_timestamp():
"""
Updates log_analyzer.ts with latest timestamp, if script has terminated successfully.
"""
with open("./monitoring/log_analyzer.ts", mode='w') as f:
f.write(f'{time()}')
sys.exit(0)
def check_if_report_exists(latest_log, report_dir: str):
"""
Checks if report for a certain log file already exists.
:param latest_log: latest log named tuple with log_date;
:param report_dir: path to reports;
:return: True if report already exists, False otherwise.
"""
return os.path.exists(f'{report_dir}/report-{latest_log.log_date.strftime("%Y.%m.%d")}.html')
def parse_log(log_path: str, parser) -> object:
"""
Parses a log file.
:param log_path: path to log file.
:return: log, parsed according to a given format.
"""
open_log = partial(gzip.open, mode='rt', encoding="utf-8") if log_path.endswith(".gz") else partial(open, mode='r')
with open_log(log_path) as f:
parse_results = [parser(line) for line in f]
return parse_results
def parse_line(line: str):
"""
Parses single record from a log according to log_pattern.
If error occurs in parsing request_time, the log line is considered broken and function returns None.
If error occurs in parsing URL, while request_time is present,
the URL is marked as 'parse_failed' to allow further statistical checking.
:param line: UTF-8 encoded string of a log record.
:return: dictionary, made up according to regex_log_pattern or None.
"""
log_contents = {}
request_time_pat = ' \d*[.]?\d*$'
request_pat = '"(GET|HEAD|POST|PUT|DELETE|CONNECT|OPTIONS|TRACE|PATCH)\s(?P<url>.+?)\sHTTP/.+"\s'
log_contents['request_time'] = re.search(request_time_pat, line)[0].strip()
request = re.findall(request_pat, line)
log_contents['request'] = request[0][1] if request else 'bad_request'
if log_contents['request_time']:
return log_contents
else:
return None
def make_report_table(access_logs: object, report_length: int = 1000):
"""
Calculates following statistics for all URLs within access log:
- count of visits to a URL;
- URL visit count percentage to total visits during log period;
- total time of response for a given URL;
- longest response time for a given URL;
- average response time for a given URL;
- median response time for a given URL;
- percentage of total response time for a given URL to total response time of all URLs.
:param access_logs: Parsed access log records.
:param report_length: Report length.
:param error_threshold: Sets parsing error threshold.
Raises a warning if percent of urls, parsed correctly, is less than threshold.
:return: Data to insert into report.
"""
logging.info('Preparing data for statistics calculation...')
urls = {}
logging.info('Calculating statistics...')
for url, group in groupby(sorted(access_logs, key=lambda x: x['request']), key=lambda x: x['request']):
req_times = [float(record['request_time']) for record in group]
urls[url] = {"url": url,
'count': len(req_times),
'time_sum': sum(req_times),
'time_max': max(req_times),
'time_med': median(req_times),
'time_avg': mean(req_times)}
total_time = sum([record['time_sum'] for record in urls.values()])
total_records = sum([record['count'] for record in urls.values()])
for url in urls.keys():
urls[url]['time_perc'] = urls[url]['time_sum'] / total_time
urls[url]['count_perc'] = urls[url]['count'] / total_records
report_table = sorted(list(urls.values()), key=lambda k: k['time_sum'], reverse=True)
return report_table[:report_length]
def render_html_report(table: list,
report_path: str,
latest_log_date) -> str:
"""
Renders html report from dummy 'report.html'.
:param table: Data to insert into dummy report.
:param report_path: Path to dummy 'report.html'.
:param latest_log_date: Latest log date, is used to make name of a new report.
:return: Returns name of freshly rendered report.
"""
with open(os.path.join(report_path, "report.html"), mode='r') as f:
report = f.read()
new_report_name = f"report-{latest_log_date.strftime('%Y.%m.%d')}.html"
if not os.path.exists(report_path):
os.makedirs(report_path)
with open(os.path.join(report_path, new_report_name), mode='w') as f:
f.write(Template(report).safe_substitute(table_json=json.dumps(table)))
return new_report_name
def main(config: dict = None):
"""
Main procedure flow:
1. Looks for latest log;
2. Checks if report for this log already exists;
3. Parses the log;
4. Makes report table;
5. Renders HTML report.
:param config: Configuration dict.
"""
# find latest access log
latest_log = find_latest_log(log_dir=config['LOG_DIR'])
if not all([latest_log.log_name, latest_log.log_date]):
logging.info(f"No logs found in LOG_DIR: {config['LOG_DIR']}")
sys.exit(0)
logging.info(f"Latest log found: {latest_log.log_name}")
# check if report has already been created for this access log
if check_if_report_exists(latest_log=latest_log,
report_dir=config["REPORT_DIR"]):
logging.info(f"Report for latest logfile {latest_log.log_name} already exists.")
log_finish_timestamp()
logging.info("No report found for latest_log.")
# parse log
logging.info(f"Parsing {latest_log.log_name}...")
access_logs = parse_log(log_path=os.path.join(config["LOG_DIR"], latest_log.log_name), parser=parse_line)
if not access_logs:
logging.info("Log parsing failed.")
sys.exit(1)
# make a report
report_table = make_report_table(access_logs=access_logs,
report_length=config['REPORT_SIZE'])
if not report_table:
logging.info("Report table construction failed.")
sys.exit(1)
# render html report
logging.info("Rendering report...")
render_result = render_html_report(table=report_table,
report_path=config['REPORT_DIR'],
latest_log_date=latest_log.log_date)
if render_result:
logging.info(f"New report {render_result} successfully rendered.")
log_finish_timestamp()
else:
logging.error("Report render failed.")
sys.exit(1)
if __name__ == "__main__":
# check for config path, passed via --config
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument('--config', default='./config/log_analyzer.conf')
config = parse_config(default_config=default_config,
config_path=argument_parser.parse_args().config)
if isinstance(config, str):
logging.error(config)
sys.exit(1)
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] %(levelname).1s %(message)s',
datefmt='%Y.%m.%d %H:%M:%S',
filename=config.get("MONITORING_LOG", None))
logging.info("Starting log_analyzer")
try:
main(config=config)
except Exception as e:
logging.error(f'Something is wrong: {e}')
| 32.569536 | 119 | 0.638878 |
import gzip
import json
import re
import logging
import sys
import datetime as dt
import os
from operator import itemgetter
from statistics import median, mean
import argparse
from time import time, sleep
from itertools import groupby
from collections import namedtuple
from string import Template
from functools import partial
default_config = {"REPORT_SIZE": 1000,
"REPORT_DIR": "./reports",
"LOG_DIR": "./log"}
def parse_config(default_config: dict = None,
config_path: str = None):
if not default_config:
return "No default config provided."
config = {k: v for k, v in default_config.items()}
if os.path.exists(config_path):
with open(config_path, mode='r') as f:
main_config = json.load(f)
else:
return "No config at given path."
config.update(main_config)
if not all((os.path.exists(config[k]) for k in config.keys() if k.endswith('DIR'))):
return "Some config path is broken."
return config
def find_latest_log(log_dir: str):
def get_log_date(log_name):
log_date = re.search('\d{8}', log_name)
return dt.datetime.strptime(log_date.group(0), "%Y%m%d") if log_date else None
log_name = log_date = None
for item in os.listdir(log_dir):
if 'nginx-access-ui.log' not in item:
continue
date = get_log_date(item)
if (not log_date) or (date > log_date):
log_name, log_date = item, date
return namedtuple('latest_log', ['log_name', 'log_date'])._make((log_name, log_date)
if log_name
else (None, None))
def log_finish_timestamp():
with open("./monitoring/log_analyzer.ts", mode='w') as f:
f.write(f'{time()}')
sys.exit(0)
def check_if_report_exists(latest_log, report_dir: str):
return os.path.exists(f'{report_dir}/report-{latest_log.log_date.strftime("%Y.%m.%d")}.html')
def parse_log(log_path: str, parser) -> object:
open_log = partial(gzip.open, mode='rt', encoding="utf-8") if log_path.endswith(".gz") else partial(open, mode='r')
with open_log(log_path) as f:
parse_results = [parser(line) for line in f]
return parse_results
def parse_line(line: str):
log_contents = {}
request_time_pat = ' \d*[.]?\d*$'
request_pat = '"(GET|HEAD|POST|PUT|DELETE|CONNECT|OPTIONS|TRACE|PATCH)\s(?P<url>.+?)\sHTTP/.+"\s'
log_contents['request_time'] = re.search(request_time_pat, line)[0].strip()
request = re.findall(request_pat, line)
log_contents['request'] = request[0][1] if request else 'bad_request'
if log_contents['request_time']:
return log_contents
else:
return None
def make_report_table(access_logs: object, report_length: int = 1000):
logging.info('Preparing data for statistics calculation...')
urls = {}
logging.info('Calculating statistics...')
for url, group in groupby(sorted(access_logs, key=lambda x: x['request']), key=lambda x: x['request']):
req_times = [float(record['request_time']) for record in group]
urls[url] = {"url": url,
'count': len(req_times),
'time_sum': sum(req_times),
'time_max': max(req_times),
'time_med': median(req_times),
'time_avg': mean(req_times)}
total_time = sum([record['time_sum'] for record in urls.values()])
total_records = sum([record['count'] for record in urls.values()])
for url in urls.keys():
urls[url]['time_perc'] = urls[url]['time_sum'] / total_time
urls[url]['count_perc'] = urls[url]['count'] / total_records
report_table = sorted(list(urls.values()), key=lambda k: k['time_sum'], reverse=True)
return report_table[:report_length]
def render_html_report(table: list,
report_path: str,
latest_log_date) -> str:
with open(os.path.join(report_path, "report.html"), mode='r') as f:
report = f.read()
new_report_name = f"report-{latest_log_date.strftime('%Y.%m.%d')}.html"
if not os.path.exists(report_path):
os.makedirs(report_path)
with open(os.path.join(report_path, new_report_name), mode='w') as f:
f.write(Template(report).safe_substitute(table_json=json.dumps(table)))
return new_report_name
def main(config: dict = None):
latest_log = find_latest_log(log_dir=config['LOG_DIR'])
if not all([latest_log.log_name, latest_log.log_date]):
logging.info(f"No logs found in LOG_DIR: {config['LOG_DIR']}")
sys.exit(0)
logging.info(f"Latest log found: {latest_log.log_name}")
if check_if_report_exists(latest_log=latest_log,
report_dir=config["REPORT_DIR"]):
logging.info(f"Report for latest logfile {latest_log.log_name} already exists.")
log_finish_timestamp()
logging.info("No report found for latest_log.")
logging.info(f"Parsing {latest_log.log_name}...")
access_logs = parse_log(log_path=os.path.join(config["LOG_DIR"], latest_log.log_name), parser=parse_line)
if not access_logs:
logging.info("Log parsing failed.")
sys.exit(1)
report_table = make_report_table(access_logs=access_logs,
report_length=config['REPORT_SIZE'])
if not report_table:
logging.info("Report table construction failed.")
sys.exit(1)
logging.info("Rendering report...")
render_result = render_html_report(table=report_table,
report_path=config['REPORT_DIR'],
latest_log_date=latest_log.log_date)
if render_result:
logging.info(f"New report {render_result} successfully rendered.")
log_finish_timestamp()
else:
logging.error("Report render failed.")
sys.exit(1)
if __name__ == "__main__":
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument('--config', default='./config/log_analyzer.conf')
config = parse_config(default_config=default_config,
config_path=argument_parser.parse_args().config)
if isinstance(config, str):
logging.error(config)
sys.exit(1)
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] %(levelname).1s %(message)s',
datefmt='%Y.%m.%d %H:%M:%S',
filename=config.get("MONITORING_LOG", None))
logging.info("Starting log_analyzer")
try:
main(config=config)
except Exception as e:
logging.error(f'Something is wrong: {e}')
| true | true |
f73b79b13a7fe69d626231f46dd7d033d19ba62c | 1,581 | py | Python | Service/website_service.py | H0R4T1U/SRI | ed0891c595551929ce649b38f722ed2a8b7a696d | [
"Apache-2.0"
] | 1 | 2021-12-04T08:40:30.000Z | 2021-12-04T08:40:30.000Z | Service/website_service.py | H0R4T1U/SRI | ed0891c595551929ce649b38f722ed2a8b7a696d | [
"Apache-2.0"
] | null | null | null | Service/website_service.py | H0R4T1U/SRI | ed0891c595551929ce649b38f722ed2a8b7a696d | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
from Domain.website import Website
from selenium.webdriver.firefox.options import Options
from Repository.file_repository import FileRepository
class WebsiteService:
def __init__(self, website_repository: FileRepository):
self.__website_repository = website_repository
def get_all(self):
return self.__website_repository.get_all()
def add(self,name, url, container_class, classes):
website = Website(name,url, container_class, classes)
self.__website_repository.add(website)
def get_files_from_file(self):
self.__website_repository.read_file()
def scrap(self):
options = Options()
options.headless = True
driver = webdriver.Firefox(options=options)
websites = self.get_all()
for website in websites:
data = {}
df = pd.DataFrame()
driver.get(website.url)
content = driver.page_source
soup = BeautifulSoup(content, features="html.parser")
for div in soup.find_all('div', class_=website.container_class):
for ScrapedClass in website.classes:
try:
data[f"{ScrapedClass}"] = div.find('div', class_=ScrapedClass).text
except:
data[f"{ScrapedClass}"] = "null"
df = df.append(data, ignore_index=True)
df.to_csv(f'{website.name}.csv', index=False, encoding='utf-8')
driver.quit() | 31.62 | 91 | 0.631246 | from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
from Domain.website import Website
from selenium.webdriver.firefox.options import Options
from Repository.file_repository import FileRepository
class WebsiteService:
def __init__(self, website_repository: FileRepository):
self.__website_repository = website_repository
def get_all(self):
return self.__website_repository.get_all()
def add(self,name, url, container_class, classes):
website = Website(name,url, container_class, classes)
self.__website_repository.add(website)
def get_files_from_file(self):
self.__website_repository.read_file()
def scrap(self):
options = Options()
options.headless = True
driver = webdriver.Firefox(options=options)
websites = self.get_all()
for website in websites:
data = {}
df = pd.DataFrame()
driver.get(website.url)
content = driver.page_source
soup = BeautifulSoup(content, features="html.parser")
for div in soup.find_all('div', class_=website.container_class):
for ScrapedClass in website.classes:
try:
data[f"{ScrapedClass}"] = div.find('div', class_=ScrapedClass).text
except:
data[f"{ScrapedClass}"] = "null"
df = df.append(data, ignore_index=True)
df.to_csv(f'{website.name}.csv', index=False, encoding='utf-8')
driver.quit() | true | true |
f73b7ad8716953e988a19b8a3da79faac94ccecb | 492 | py | Python | src/unittest/python/injection_tests.py | DMRSystem/dmr-cli | a884347facb07a4b5c5bd7486d95cc3a05340917 | [
"MIT"
] | null | null | null | src/unittest/python/injection_tests.py | DMRSystem/dmr-cli | a884347facb07a4b5c5bd7486d95cc3a05340917 | [
"MIT"
] | 2 | 2018-05-24T21:20:43.000Z | 2018-05-28T05:35:32.000Z | src/unittest/python/injection_tests.py | DMRSystem/dmr-cli | a884347facb07a4b5c5bd7486d95cc3a05340917 | [
"MIT"
] | null | null | null | import dependency_injector.providers as providers
import dependency_injector.containers as containers
class Engine(object):
def go(self):
return "I'm going."
class Car(object):
def __init__(self, engine: Engine):
self.engine = engine
def go(self):
print(self.engine.go())
class Injector(containers.DeclarativeContainer):
engine = providers.Factory(Engine)
car = providers.Factory(Car, engine)
my_car: Car = Injector.car()
my_car.go()
| 16.4 | 51 | 0.693089 | import dependency_injector.providers as providers
import dependency_injector.containers as containers
class Engine(object):
def go(self):
return "I'm going."
class Car(object):
def __init__(self, engine: Engine):
self.engine = engine
def go(self):
print(self.engine.go())
class Injector(containers.DeclarativeContainer):
engine = providers.Factory(Engine)
car = providers.Factory(Car, engine)
my_car: Car = Injector.car()
my_car.go()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.