max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
tools/nntool/expressions/symbolic/q15_quantization/scale_quantized.py | GreenWaves-Technologies/gap_sdk | 118 | 6625451 | <gh_stars>100-1000
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import math
import numpy as np
from expressions.symbolic.function import Function
from ..basic import Cast, CompoundFunction, LShift, Mul
from ..symbol import c_headers, nargs
from .clip_norm import Norm
from .q15_scale_q_rec import Q15ScaleQRec
from .quantized_constant import QuantizedConstant
@nargs(2)
@c_headers('"Gap.h"')
class MulRN(Function):
def __init__(self, *args, norm=0, **kwargs):
self._norm = norm
super().__init__(*args, **kwargs)
def _impl(self, *args, **kwargs):
factor = int(math.pow(2, self._norm - 1))
si_args = [arg.astype(np.int16) for arg in args]
return (np.multiply(*si_args, dtype=np.int32) + factor) >> self._norm
def _py_expr(self, *args, **kwargs):
factor = int(math.pow(2, self._norm - 1))
return f'(np.multiply(({args[0]}).astype(np.int16), ({args[1]}).astype(np.int16), dtype=np.int32) + {factor})>>{self._norm}'
def _c_expr(self, *args, **kwargs):
return f"gap8_muluRN(({args[0]}),({args[1]}),{self._norm})"
@nargs(1)
@c_headers('"Gap.h"')
class ScaleQuantized(CompoundFunction):
def __init__(self, *args, from_qrec=None, to_qrec=None, num_bits=15, **kwargs):
self._from_qrec = from_qrec
self._to_qrec = to_qrec
self._qbias, self._qnorm = None, None
self._num_bits = num_bits
super().__init__(*args, qrec=self._to_qrec, **kwargs)
@property
def from_qrec(self):
return self._from_qrec
@property
def to_qrec(self):
return self._to_qrec
@property
def num_bits(self):
return self._num_bits
def _calc_bias(self):
if self._qbias is None or self._qnorm is None:
self._qbias, self._qnorm = Q15ScaleQRec.quantized_scale(
self._from_qrec,
self._to_qrec,
self._num_bits)
return self._qbias, self._qnorm
def _eval(self, *args, **kwargs):
sym = args[0]
# if the argument is another scalequantized do the scaling in one step
# this should be safe as we never go much above Q15 and the scaling step
# is also a Q15
if isinstance(sym, ScaleQuantized):
return ScaleQuantized(*sym.contents, from_qrec=sym.from_qrec, to_qrec=self.to_qrec, num_bits=min(self._num_bits, sym.num_bits))
# Check if we do nothing
if self._from_qrec == self._to_qrec:
return sym
if sym.is_zero:
if self._from_qrec.dtype != self._to_qrec.dtype:
return Cast(sym, dtype=self._to_qrec.dtype)
return sym
qbias, qnorm = self._calc_bias()
# make sure we are in int32 before doing these operations
if self._from_qrec.dtype != np.int32:
sym = Cast(sym, dtype=np.int32)
if qbias == 1:
# its a left shift
if qnorm < 0:
sym = LShift(
sym,
#pylint: disable=invalid-unary-operand-type
QuantizedConstant(-qnorm, dtype=np.int8),
name=self.name,
dtype=self._to_qrec.dtype
)
elif qnorm > 0:
sym = Norm(
sym,
QuantizedConstant(qnorm, dtype=np.int8),
name=self.name,
dtype=self._to_qrec.dtype
)
# if 0 do nothing
elif qnorm < 0:
sym = LShift(
Mul(
sym,
QuantizedConstant(qbias, dtype=np.int32),
dtype=self._to_qrec.dtype
),
#pylint: disable=invalid-unary-operand-type
QuantizedConstant(-qnorm, dtype=np.int8),
name=self.name
)
elif qnorm > 0:
sym = Norm(
Mul(
sym,
QuantizedConstant(qbias, dtype=np.int32),
dtype=self._to_qrec.dtype
),
QuantizedConstant(qnorm, dtype=np.int8),
name=self.name
)
else:
sym = Mul(
sym,
QuantizedConstant(qbias, dtype=np.int32),
name=self.name,
dtype=self._to_qrec.dtype
)
if self._to_qrec.dtype != np.int32:
sym = Cast(sym, dtype=self._to_qrec.dtype)
return sym
def __repr__(self) -> str:
return f"ScaleQuantized({self.contents[0]}, [{self._from_qrec}]->[{self._to_qrec}])"
| # Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import math
import numpy as np
from expressions.symbolic.function import Function
from ..basic import Cast, CompoundFunction, LShift, Mul
from ..symbol import c_headers, nargs
from .clip_norm import Norm
from .q15_scale_q_rec import Q15ScaleQRec
from .quantized_constant import QuantizedConstant
@nargs(2)
@c_headers('"Gap.h"')
class MulRN(Function):
def __init__(self, *args, norm=0, **kwargs):
self._norm = norm
super().__init__(*args, **kwargs)
def _impl(self, *args, **kwargs):
factor = int(math.pow(2, self._norm - 1))
si_args = [arg.astype(np.int16) for arg in args]
return (np.multiply(*si_args, dtype=np.int32) + factor) >> self._norm
def _py_expr(self, *args, **kwargs):
factor = int(math.pow(2, self._norm - 1))
return f'(np.multiply(({args[0]}).astype(np.int16), ({args[1]}).astype(np.int16), dtype=np.int32) + {factor})>>{self._norm}'
def _c_expr(self, *args, **kwargs):
return f"gap8_muluRN(({args[0]}),({args[1]}),{self._norm})"
@nargs(1)
@c_headers('"Gap.h"')
class ScaleQuantized(CompoundFunction):
def __init__(self, *args, from_qrec=None, to_qrec=None, num_bits=15, **kwargs):
self._from_qrec = from_qrec
self._to_qrec = to_qrec
self._qbias, self._qnorm = None, None
self._num_bits = num_bits
super().__init__(*args, qrec=self._to_qrec, **kwargs)
@property
def from_qrec(self):
return self._from_qrec
@property
def to_qrec(self):
return self._to_qrec
@property
def num_bits(self):
return self._num_bits
def _calc_bias(self):
if self._qbias is None or self._qnorm is None:
self._qbias, self._qnorm = Q15ScaleQRec.quantized_scale(
self._from_qrec,
self._to_qrec,
self._num_bits)
return self._qbias, self._qnorm
def _eval(self, *args, **kwargs):
sym = args[0]
# if the argument is another scalequantized do the scaling in one step
# this should be safe as we never go much above Q15 and the scaling step
# is also a Q15
if isinstance(sym, ScaleQuantized):
return ScaleQuantized(*sym.contents, from_qrec=sym.from_qrec, to_qrec=self.to_qrec, num_bits=min(self._num_bits, sym.num_bits))
# Check if we do nothing
if self._from_qrec == self._to_qrec:
return sym
if sym.is_zero:
if self._from_qrec.dtype != self._to_qrec.dtype:
return Cast(sym, dtype=self._to_qrec.dtype)
return sym
qbias, qnorm = self._calc_bias()
# make sure we are in int32 before doing these operations
if self._from_qrec.dtype != np.int32:
sym = Cast(sym, dtype=np.int32)
if qbias == 1:
# its a left shift
if qnorm < 0:
sym = LShift(
sym,
#pylint: disable=invalid-unary-operand-type
QuantizedConstant(-qnorm, dtype=np.int8),
name=self.name,
dtype=self._to_qrec.dtype
)
elif qnorm > 0:
sym = Norm(
sym,
QuantizedConstant(qnorm, dtype=np.int8),
name=self.name,
dtype=self._to_qrec.dtype
)
# if 0 do nothing
elif qnorm < 0:
sym = LShift(
Mul(
sym,
QuantizedConstant(qbias, dtype=np.int32),
dtype=self._to_qrec.dtype
),
#pylint: disable=invalid-unary-operand-type
QuantizedConstant(-qnorm, dtype=np.int8),
name=self.name
)
elif qnorm > 0:
sym = Norm(
Mul(
sym,
QuantizedConstant(qbias, dtype=np.int32),
dtype=self._to_qrec.dtype
),
QuantizedConstant(qnorm, dtype=np.int8),
name=self.name
)
else:
sym = Mul(
sym,
QuantizedConstant(qbias, dtype=np.int32),
name=self.name,
dtype=self._to_qrec.dtype
)
if self._to_qrec.dtype != np.int32:
sym = Cast(sym, dtype=self._to_qrec.dtype)
return sym
def __repr__(self) -> str:
return f"ScaleQuantized({self.contents[0]}, [{self._from_qrec}]->[{self._to_qrec}])" | en | 0.873184 | # Copyright (C) 2020 GreenWaves Technologies, SAS # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # if the argument is another scalequantized do the scaling in one step # this should be safe as we never go much above Q15 and the scaling step # is also a Q15 # Check if we do nothing # make sure we are in int32 before doing these operations # its a left shift #pylint: disable=invalid-unary-operand-type # if 0 do nothing #pylint: disable=invalid-unary-operand-type | 1.884991 | 2 |
dephell/commands/self_autocomplete.py | Brishen/dephell | 0 | 6625452 | <filename>dephell/commands/self_autocomplete.py
# built-in
import os
from argparse import ArgumentParser
from pathlib import Path
from platform import platform
# external
from dephell_shells import Shells
# app
from ..actions import make_bash_autocomplete, make_zsh_autocomplete
from ..config import builders, get_data_dir
from .base import BaseCommand
class SelfAutocompleteCommand(BaseCommand):
"""Enable DepHell commands autocomplete for current shell.
"""
@staticmethod
def build_parser(parser) -> ArgumentParser:
builders.build_config(parser)
builders.build_output(parser)
return parser
def __call__(self):
shell = Shells(bin_path=None).shell_name
msg = 'Autocompletion installed. Please, reload your shell'
if shell == 'bash':
self._bash()
self.logger.info(msg)
return True
if shell == 'zsh':
self._zsh()
self.logger.info(msg)
return True
self.logger.error('unsupported shell', extra=dict(shell=shell))
return False
def _bash(self):
script = make_bash_autocomplete()
# Install completions to the correct location for modern bash-completion.
# This will be sourced on-demand by bash-completion as soon as dephell is
# completed for the first time.
# https://github.com/dephell/dephell/pull/132
lazy_paths = (
Path(os.getenv('BASH_COMPLETION_USER_DIR', '')) / 'completions',
Path(os.getenv('XDG_DATA_HOME', '')) / 'bash-completion' / 'completions',
Path.home() / '.local' / 'share' / 'bash-completion' / 'completions',
)
for path in lazy_paths:
if path.exists():
(path / 'dephell').write_text(script)
return
# https://github.com/dephell/dephell/pull/62
if platform().lower() == 'darwin':
# ref. https://itnext.io/programmable-completion-for-bash-on-macos-f81a0103080b
path = Path('/') / 'usr' / 'local' / 'etc' / 'bash_completion.d' / 'dephell.bash-completion'
else:
path = Path.home() / '.local' / 'etc' / 'bash_completion.d' / 'dephell.bash-completion'
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(script)
for rc_name in ('.bashrc', '.profile', '.bash_profile'):
rc_path = Path.home() / rc_name
if not rc_path.exists():
continue
if 'bash_completion.d/dephell.bash-completion' not in rc_path.read_text():
with rc_path.open('a') as stream:
stream.write('\n\nsource "{}"\n'.format(str(path)))
break
def _zsh(self):
script = make_zsh_autocomplete()
path = get_data_dir() / '_dephell_zsh_autocomplete'
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(script)
path.chmod(0o777)
rc_path = Path.home() / '.zshrc'
if str(path) not in rc_path.read_text():
with rc_path.open('a') as stream:
stream.write('\n\nsource "{}"\n'.format(str(path)))
| <filename>dephell/commands/self_autocomplete.py
# built-in
import os
from argparse import ArgumentParser
from pathlib import Path
from platform import platform
# external
from dephell_shells import Shells
# app
from ..actions import make_bash_autocomplete, make_zsh_autocomplete
from ..config import builders, get_data_dir
from .base import BaseCommand
class SelfAutocompleteCommand(BaseCommand):
"""Enable DepHell commands autocomplete for current shell.
"""
@staticmethod
def build_parser(parser) -> ArgumentParser:
builders.build_config(parser)
builders.build_output(parser)
return parser
def __call__(self):
shell = Shells(bin_path=None).shell_name
msg = 'Autocompletion installed. Please, reload your shell'
if shell == 'bash':
self._bash()
self.logger.info(msg)
return True
if shell == 'zsh':
self._zsh()
self.logger.info(msg)
return True
self.logger.error('unsupported shell', extra=dict(shell=shell))
return False
def _bash(self):
script = make_bash_autocomplete()
# Install completions to the correct location for modern bash-completion.
# This will be sourced on-demand by bash-completion as soon as dephell is
# completed for the first time.
# https://github.com/dephell/dephell/pull/132
lazy_paths = (
Path(os.getenv('BASH_COMPLETION_USER_DIR', '')) / 'completions',
Path(os.getenv('XDG_DATA_HOME', '')) / 'bash-completion' / 'completions',
Path.home() / '.local' / 'share' / 'bash-completion' / 'completions',
)
for path in lazy_paths:
if path.exists():
(path / 'dephell').write_text(script)
return
# https://github.com/dephell/dephell/pull/62
if platform().lower() == 'darwin':
# ref. https://itnext.io/programmable-completion-for-bash-on-macos-f81a0103080b
path = Path('/') / 'usr' / 'local' / 'etc' / 'bash_completion.d' / 'dephell.bash-completion'
else:
path = Path.home() / '.local' / 'etc' / 'bash_completion.d' / 'dephell.bash-completion'
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(script)
for rc_name in ('.bashrc', '.profile', '.bash_profile'):
rc_path = Path.home() / rc_name
if not rc_path.exists():
continue
if 'bash_completion.d/dephell.bash-completion' not in rc_path.read_text():
with rc_path.open('a') as stream:
stream.write('\n\nsource "{}"\n'.format(str(path)))
break
def _zsh(self):
script = make_zsh_autocomplete()
path = get_data_dir() / '_dephell_zsh_autocomplete'
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(script)
path.chmod(0o777)
rc_path = Path.home() / '.zshrc'
if str(path) not in rc_path.read_text():
with rc_path.open('a') as stream:
stream.write('\n\nsource "{}"\n'.format(str(path)))
| en | 0.878202 | # built-in # external # app Enable DepHell commands autocomplete for current shell. # Install completions to the correct location for modern bash-completion. # This will be sourced on-demand by bash-completion as soon as dephell is # completed for the first time. # https://github.com/dephell/dephell/pull/132 # https://github.com/dephell/dephell/pull/62 # ref. https://itnext.io/programmable-completion-for-bash-on-macos-f81a0103080b | 2.321889 | 2 |
membership/management/commands/paper_reminders.py | str4nd/sikteeri | 22 | 6625453 | # encoding: UTF-8
import logging
from tempfile import NamedTemporaryFile
from django.core.management.base import BaseCommand, CommandError
from membership.models import BillingCycle
logger = logging.getLogger("paper_reminders")
class Command(BaseCommand):
help = 'Create paper reminders pdf'
def add_arguments(self, parser):
parser.add_argument('--member',
dest='member',
default=None,
help='Create pdf-reminder for user',
required=True)
def handle(self, *args, **options):
try:
with NamedTemporaryFile(suffix=".pdf", prefix='sikteeri', delete=False) as target_file:
pdfcontent = BillingCycle.get_pdf_reminders(memberid=options['member'])
if not pdfcontent:
print("No paper reminders to print")
return
target_file.write(pdfcontent)
target_file.close()
pdffile = target_file.name
if pdffile:
print(("pdf file created: %s" % pdffile))
else:
print("Cannot create pdffile")
except RuntimeError as e:
raise CommandError(e)
| # encoding: UTF-8
import logging
from tempfile import NamedTemporaryFile
from django.core.management.base import BaseCommand, CommandError
from membership.models import BillingCycle
logger = logging.getLogger("paper_reminders")
class Command(BaseCommand):
help = 'Create paper reminders pdf'
def add_arguments(self, parser):
parser.add_argument('--member',
dest='member',
default=None,
help='Create pdf-reminder for user',
required=True)
def handle(self, *args, **options):
try:
with NamedTemporaryFile(suffix=".pdf", prefix='sikteeri', delete=False) as target_file:
pdfcontent = BillingCycle.get_pdf_reminders(memberid=options['member'])
if not pdfcontent:
print("No paper reminders to print")
return
target_file.write(pdfcontent)
target_file.close()
pdffile = target_file.name
if pdffile:
print(("pdf file created: %s" % pdffile))
else:
print("Cannot create pdffile")
except RuntimeError as e:
raise CommandError(e)
| en | 0.156115 | # encoding: UTF-8 | 2.163673 | 2 |
simtool/encode.py | hubzero/simtool | 0 | 6625454 | # @package hubzero-simtool
# @file encode.py
# @copyright Copyright (c) 2019-2021 The Regents of the University of California.
# @license http://opensource.org/licenses/MIT MIT
# @trademark HUBzero is a registered trademark of The Regents of the University of California.
#
import jsonpickle
# The purpose of this class is to abstract out
# the serialization/deserialization of data so
# that we may change the method in the future.
# abstract class (template)
# (Python doesn't need this, but added anyway
# for clarity.)
class Encoder:
def encode(self, val):
pass
def decode(self, val):
pass
class JsonEncoder(Encoder):
def encode(self, val):
return jsonpickle.dumps(val)
def decode(self, val):
return jsonpickle.loads(val)
| # @package hubzero-simtool
# @file encode.py
# @copyright Copyright (c) 2019-2021 The Regents of the University of California.
# @license http://opensource.org/licenses/MIT MIT
# @trademark HUBzero is a registered trademark of The Regents of the University of California.
#
import jsonpickle
# The purpose of this class is to abstract out
# the serialization/deserialization of data so
# that we may change the method in the future.
# abstract class (template)
# (Python doesn't need this, but added anyway
# for clarity.)
class Encoder:
def encode(self, val):
pass
def decode(self, val):
pass
class JsonEncoder(Encoder):
def encode(self, val):
return jsonpickle.dumps(val)
def decode(self, val):
return jsonpickle.loads(val)
| en | 0.816663 | # @package hubzero-simtool # @file encode.py # @copyright Copyright (c) 2019-2021 The Regents of the University of California. # @license http://opensource.org/licenses/MIT MIT # @trademark HUBzero is a registered trademark of The Regents of the University of California. # # The purpose of this class is to abstract out # the serialization/deserialization of data so # that we may change the method in the future. # abstract class (template) # (Python doesn't need this, but added anyway # for clarity.) | 2.549614 | 3 |
neo/test/iotest/test_tdtio.py | neurodebian/python-neo | 1 | 6625455 | <filename>neo/test/iotest/test_tdtio.py<gh_stars>1-10
# encoding: utf-8
"""
Tests of io.tdtio
"""
from __future__ import absolute_import, division
try:
import unittest2 as unittest
except ImportError:
import unittest
from ...io import TdtIO
import numpy
from .common_io_test import BaseTestIO
class TestTdtIOIO(BaseTestIO, unittest.TestCase, ):
ioclass = TdtIO
files_to_test = [ 'aep_05'
]
files_to_download = [
'aep_05/Block-1/aep_05_Block-1.Tbk',
'aep_05/Block-1/aep_05_Block-1.Tdx',
'aep_05/Block-1/aep_05_Block-1.tev',
'aep_05/Block-1/aep_05_Block-1.tsq',
#~ 'aep_05/Block-2/aep_05_Block-2.Tbk',
#~ 'aep_05/Block-2/aep_05_Block-2.Tdx',
#~ 'aep_05/Block-2/aep_05_Block-2.tev',
#~ 'aep_05/Block-2/aep_05_Block-2.tsq',
#~ 'aep_05/Block-3/aep_05_Block-3.Tbk',
#~ 'aep_05/Block-3/aep_05_Block-3.Tdx',
#~ 'aep_05/Block-3/aep_05_Block-3.tev',
#~ 'aep_05/Block-3/aep_05_Block-3.tsq',
]
if __name__ == "__main__":
unittest.main()
| <filename>neo/test/iotest/test_tdtio.py<gh_stars>1-10
# encoding: utf-8
"""
Tests of io.tdtio
"""
from __future__ import absolute_import, division
try:
import unittest2 as unittest
except ImportError:
import unittest
from ...io import TdtIO
import numpy
from .common_io_test import BaseTestIO
class TestTdtIOIO(BaseTestIO, unittest.TestCase, ):
ioclass = TdtIO
files_to_test = [ 'aep_05'
]
files_to_download = [
'aep_05/Block-1/aep_05_Block-1.Tbk',
'aep_05/Block-1/aep_05_Block-1.Tdx',
'aep_05/Block-1/aep_05_Block-1.tev',
'aep_05/Block-1/aep_05_Block-1.tsq',
#~ 'aep_05/Block-2/aep_05_Block-2.Tbk',
#~ 'aep_05/Block-2/aep_05_Block-2.Tdx',
#~ 'aep_05/Block-2/aep_05_Block-2.tev',
#~ 'aep_05/Block-2/aep_05_Block-2.tsq',
#~ 'aep_05/Block-3/aep_05_Block-3.Tbk',
#~ 'aep_05/Block-3/aep_05_Block-3.Tdx',
#~ 'aep_05/Block-3/aep_05_Block-3.tev',
#~ 'aep_05/Block-3/aep_05_Block-3.tsq',
]
if __name__ == "__main__":
unittest.main()
| ja | 0.240858 | # encoding: utf-8 Tests of io.tdtio #~ 'aep_05/Block-2/aep_05_Block-2.Tbk', #~ 'aep_05/Block-2/aep_05_Block-2.Tdx', #~ 'aep_05/Block-2/aep_05_Block-2.tev', #~ 'aep_05/Block-2/aep_05_Block-2.tsq', #~ 'aep_05/Block-3/aep_05_Block-3.Tbk', #~ 'aep_05/Block-3/aep_05_Block-3.Tdx', #~ 'aep_05/Block-3/aep_05_Block-3.tev', #~ 'aep_05/Block-3/aep_05_Block-3.tsq', | 2.228716 | 2 |
pynucastro/nucdata/AtomicMassEvaluation/extract_mass_excess.py | pyreaclib/pyreaclib | 6 | 6625456 | <reponame>pyreaclib/pyreaclib
import argparse
"""
This module extract the (A,Z, dm) tuples from `nubase_3.mas20.txt`, where:
:var A: is the atomic weight measured in atomic mass units.
:var Z: is the atomic number.
:var dm: is the mass difference A_{nuc}-A.
"""
#os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument('input', type=str, help='Name of the input table')
parser.add_argument('-o', '--output', type=str, default='mass_excess2020', help='Name of the formatted mass escess table')
args = parser.parse_args()
finput = open(args.input, 'r')
for _ in range(25):
finput.readline()
fout = open(args.output+'.txt', 'w')
fout.write('# Mass difference evaluation table: {} \n'.format(args.output))
fout.write('# only ground states are tabulated \n')
fout.write('#\n')
fout.write('#\n')
fout.write('==A== {:18s} ==Z== {:10s} ======dm===== \n'.format(' ', ' '))
for line in finput:
isomer_string = line[7]
isomer = int(isomer_string)
if isomer != 0:
continue
A_string = line[0:3].strip()
Z_string = line[4:7].strip()
dm_string = line[18:31].strip().strip('#')
A = int(A_string)
Z = int(Z_string)
#dm is measured in keV, but we want MeV
dm = float(dm_string)/1.0e3
fout.write('{:3d} {:20s} {:3d} {:10s} {:15.6} \n'.format(A, ' ', Z, ' ', dm))
finput.close()
fout.close()
| import argparse
"""
This module extract the (A,Z, dm) tuples from `nubase_3.mas20.txt`, where:
:var A: is the atomic weight measured in atomic mass units.
:var Z: is the atomic number.
:var dm: is the mass difference A_{nuc}-A.
"""
#os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument('input', type=str, help='Name of the input table')
parser.add_argument('-o', '--output', type=str, default='mass_excess2020', help='Name of the formatted mass escess table')
args = parser.parse_args()
finput = open(args.input, 'r')
for _ in range(25):
finput.readline()
fout = open(args.output+'.txt', 'w')
fout.write('# Mass difference evaluation table: {} \n'.format(args.output))
fout.write('# only ground states are tabulated \n')
fout.write('#\n')
fout.write('#\n')
fout.write('==A== {:18s} ==Z== {:10s} ======dm===== \n'.format(' ', ' '))
for line in finput:
isomer_string = line[7]
isomer = int(isomer_string)
if isomer != 0:
continue
A_string = line[0:3].strip()
Z_string = line[4:7].strip()
dm_string = line[18:31].strip().strip('#')
A = int(A_string)
Z = int(Z_string)
#dm is measured in keV, but we want MeV
dm = float(dm_string)/1.0e3
fout.write('{:3d} {:20s} {:3d} {:10s} {:15.6} \n'.format(A, ' ', Z, ' ', dm))
finput.close()
fout.close() | en | 0.676626 | This module extract the (A,Z, dm) tuples from `nubase_3.mas20.txt`, where: :var A: is the atomic weight measured in atomic mass units. :var Z: is the atomic number. :var dm: is the mass difference A_{nuc}-A. #os.path.dirname(os.path.realpath(__file__)) #dm is measured in keV, but we want MeV | 3.037997 | 3 |
tobiko/tests/functional/openstack/test_neutron.py | FedericoRessi/tobiko | 1 | 6625457 | <gh_stars>1-10
# Copyright (c) 2019 Red Hat, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import netaddr
import testtools
import tobiko
from tobiko import config
from tobiko.openstack import keystone
from tobiko.openstack import neutron
from tobiko.openstack import nova
from tobiko.openstack import stacks
from tobiko.openstack import tests
CONF = config.CONF
@keystone.skip_unless_has_keystone_credentials()
class NeutronApiTest(testtools.TestCase):
"""Tests network creation"""
#: Stack of resources with a network with a gateway router
stack = tobiko.required_setup_fixture(stacks.NetworkStackFixture)
def test_find_network_with_id(self):
network = neutron.find_network(id=self.stack.network_id)
self.assertEqual(self.stack.network_id, network['id'])
def test_find_floating_network(self):
floating_network = CONF.tobiko.neutron.floating_network
if not floating_network:
tobiko.skip_test('floating_network not configured')
network = neutron.find_network(name=floating_network)
self.assertIn(floating_network, [network['name'], network['id']])
self.assertEqual(self.stack.gateway_network_id, network['id'])
def test_list_networks(self):
networks = neutron.list_networks()
network_ids = {n['id'] for n in networks}
self.assertIn(self.stack.network_id, network_ids)
def test_list_subnets(self):
subnets = neutron.list_subnets()
subnets_ids = {s['id'] for s in subnets}
if self.stack.has_ipv4:
self.assertIn(self.stack.ipv4_subnet_id, subnets_ids)
if self.stack.has_ipv6:
self.assertIn(self.stack.ipv6_subnet_id, subnets_ids)
def test_list_subnet_cidrs(self):
subnets_cidrs = neutron.list_subnet_cidrs()
if self.stack.has_ipv4:
cidr = netaddr.IPNetwork(self.stack.ipv4_subnet_details['cidr'])
self.assertIn(cidr, subnets_cidrs)
if self.stack.has_ipv6:
cidr = netaddr.IPNetwork(self.stack.ipv6_subnet_details['cidr'])
self.assertIn(cidr, subnets_cidrs)
def test_get_network(self):
network = neutron.get_network(self.stack.network_id)
self.assertEqual(self.stack.network_id, network['id'])
self.assertEqual(self.stack.port_security_enabled,
network['port_security_enabled'])
if self.stack.has_ipv4:
self.assertIn(self.stack.ipv4_subnet_id, network['subnets'])
else:
self.assertNotIn(self.stack.ipv4_subnet_id, network['subnets'])
if self.stack.has_ipv6:
self.assertIn(self.stack.ipv6_subnet_id, network['subnets'])
else:
self.assertNotIn(self.stack.ipv6_subnet_id, network['subnets'])
def test_create_network(self):
network = neutron.create_network(name=self.id())
self.addCleanup(neutron.delete_network, network['id'])
self.assertIsInstance(network['id'], str)
self.assertNotEqual('', network['id'])
self.assertEqual(self.id(), network['name'])
observed = neutron.get_network(network['id'])
self.assertEqual(network['id'], observed['id'])
def test_delete_network(self):
network = neutron.create_network(name=self.id())
neutron.delete_network(network['id'])
self.assertRaises(neutron.NoSuchNetwork, neutron.get_network,
network['id'])
def test_get_router(self):
if not self.stack.has_gateway:
tobiko.skip_test(f"Stack {self.stack.stack_name} has no gateway "
"router")
router = neutron.get_router(self.stack.gateway_id)
self.assertEqual(self.stack.gateway_id, router['id'])
def test_get_ipv4_subnet(self):
if not self.stack.has_ipv4:
tobiko.skip_test(
"Stack {self.stack.stack_name} has no IPv4 subnet")
subnet = neutron.get_subnet(self.stack.ipv4_subnet_id)
self.assertEqual(self.stack.ipv4_subnet_id, subnet['id'])
self.assertEqual(self.stack.ipv4_subnet_details, subnet)
def test_get_ipv6_subnet(self):
if not self.stack.has_ipv6:
tobiko.skip_test(
"Stack {self.stack.stack_name} has no IPv6 subnet")
subnet = neutron.get_subnet(self.stack.ipv6_subnet_id)
self.assertEqual(self.stack.ipv6_subnet_id, subnet['id'])
self.assertEqual(self.stack.ipv6_subnet_details, subnet)
def test_find_agents_with_binary(self):
agent = neutron.list_agents().first
agents = neutron.list_agents(binary=agent['binary'])
self.assertIn(agent['id'], {a['id'] for a in agents})
@keystone.skip_unless_has_keystone_credentials()
class PortTest(testtools.TestCase):
#: Stack of resources with a network with a gateway router
stack = tobiko.required_setup_fixture(stacks.CirrosServerStackFixture)
def test_list_port_addresses(self, ip_version=None):
port = neutron.find_port(device_id=self.stack.server_id)
port_addresses = neutron.list_port_ip_addresses(
port=port,
ip_version=ip_version)
server_addresses = nova.list_server_ip_addresses(
server=self.stack.server_details,
ip_version=ip_version,
address_type='fixed')
self.assertEqual(set(server_addresses), set(port_addresses))
if ip_version:
self.assertEqual(
port_addresses.with_attributes(version=ip_version),
port_addresses)
def test_list_port_addresses_with_ipv4(self):
self.test_list_port_addresses(ip_version=4)
def test_list_port_addresses_with_ipv6(self):
self.test_list_port_addresses(ip_version=6)
def test_find_port_address_with_ip_version(self):
port = neutron.find_port(device_id=self.stack.server_id)
server_addresses = nova.list_server_ip_addresses(
server=self.stack.server_details,
address_type='fixed')
for server_address in server_addresses:
port_address = neutron.find_port_ip_address(
port=port,
ip_version=server_address.version,
unique=True)
self.assertEqual(server_address, port_address)
def test_find_port_address_with_subnet_id(self):
port = neutron.find_port(device_id=self.stack.server_id)
for subnet in neutron.list_subnets(network_id=port['network_id']):
port_address = neutron.find_port_ip_address(
port=port, subnet_id=subnet['id'], unique=True)
cidr = netaddr.IPNetwork(subnet['cidr'])
self.assertIn(port_address, cidr)
@keystone.skip_unless_has_keystone_credentials()
class AgentTest(testtools.TestCase):
def test_skip_if_missing_agents(self, count=1, should_skip=False,
**params):
if should_skip:
expected_exeption = self.skipException
else:
expected_exeption = self.failureException
@neutron.skip_if_missing_networking_agents(count=count, **params)
def method():
raise self.fail('Not skipped')
exception = self.assertRaises(expected_exeption, method)
if should_skip:
agents = neutron.list_agents(**params)
message = "missing {!r} agent(s)".format(count - len(agents))
if params:
message += " with {!s}".format(
','.join('{!s}={!r}'.format(k, v)
for k, v in params.items()))
self.assertEqual(message, str(exception))
else:
self.assertEqual('Not skipped', str(exception))
def test_skip_if_missing_agents_with_no_agents(self):
self.test_skip_if_missing_agents(binary='never-never-land',
should_skip=True)
def test_skip_if_missing_agents_with_big_count(self):
self.test_skip_if_missing_agents(count=1000000,
should_skip=True)
def test_neutron_agents_are_alive(self):
agents = tests.test_neutron_agents_are_alive()
# check has agents and they are all alive
self.assertNotEqual([], agents)
self.assertNotEqual([], agents.with_items(alive=True))
| # Copyright (c) 2019 Red Hat, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import netaddr
import testtools
import tobiko
from tobiko import config
from tobiko.openstack import keystone
from tobiko.openstack import neutron
from tobiko.openstack import nova
from tobiko.openstack import stacks
from tobiko.openstack import tests
CONF = config.CONF
@keystone.skip_unless_has_keystone_credentials()
class NeutronApiTest(testtools.TestCase):
"""Tests network creation"""
#: Stack of resources with a network with a gateway router
stack = tobiko.required_setup_fixture(stacks.NetworkStackFixture)
def test_find_network_with_id(self):
network = neutron.find_network(id=self.stack.network_id)
self.assertEqual(self.stack.network_id, network['id'])
def test_find_floating_network(self):
floating_network = CONF.tobiko.neutron.floating_network
if not floating_network:
tobiko.skip_test('floating_network not configured')
network = neutron.find_network(name=floating_network)
self.assertIn(floating_network, [network['name'], network['id']])
self.assertEqual(self.stack.gateway_network_id, network['id'])
def test_list_networks(self):
networks = neutron.list_networks()
network_ids = {n['id'] for n in networks}
self.assertIn(self.stack.network_id, network_ids)
def test_list_subnets(self):
subnets = neutron.list_subnets()
subnets_ids = {s['id'] for s in subnets}
if self.stack.has_ipv4:
self.assertIn(self.stack.ipv4_subnet_id, subnets_ids)
if self.stack.has_ipv6:
self.assertIn(self.stack.ipv6_subnet_id, subnets_ids)
def test_list_subnet_cidrs(self):
subnets_cidrs = neutron.list_subnet_cidrs()
if self.stack.has_ipv4:
cidr = netaddr.IPNetwork(self.stack.ipv4_subnet_details['cidr'])
self.assertIn(cidr, subnets_cidrs)
if self.stack.has_ipv6:
cidr = netaddr.IPNetwork(self.stack.ipv6_subnet_details['cidr'])
self.assertIn(cidr, subnets_cidrs)
def test_get_network(self):
network = neutron.get_network(self.stack.network_id)
self.assertEqual(self.stack.network_id, network['id'])
self.assertEqual(self.stack.port_security_enabled,
network['port_security_enabled'])
if self.stack.has_ipv4:
self.assertIn(self.stack.ipv4_subnet_id, network['subnets'])
else:
self.assertNotIn(self.stack.ipv4_subnet_id, network['subnets'])
if self.stack.has_ipv6:
self.assertIn(self.stack.ipv6_subnet_id, network['subnets'])
else:
self.assertNotIn(self.stack.ipv6_subnet_id, network['subnets'])
def test_create_network(self):
network = neutron.create_network(name=self.id())
self.addCleanup(neutron.delete_network, network['id'])
self.assertIsInstance(network['id'], str)
self.assertNotEqual('', network['id'])
self.assertEqual(self.id(), network['name'])
observed = neutron.get_network(network['id'])
self.assertEqual(network['id'], observed['id'])
def test_delete_network(self):
network = neutron.create_network(name=self.id())
neutron.delete_network(network['id'])
self.assertRaises(neutron.NoSuchNetwork, neutron.get_network,
network['id'])
def test_get_router(self):
if not self.stack.has_gateway:
tobiko.skip_test(f"Stack {self.stack.stack_name} has no gateway "
"router")
router = neutron.get_router(self.stack.gateway_id)
self.assertEqual(self.stack.gateway_id, router['id'])
def test_get_ipv4_subnet(self):
if not self.stack.has_ipv4:
tobiko.skip_test(
"Stack {self.stack.stack_name} has no IPv4 subnet")
subnet = neutron.get_subnet(self.stack.ipv4_subnet_id)
self.assertEqual(self.stack.ipv4_subnet_id, subnet['id'])
self.assertEqual(self.stack.ipv4_subnet_details, subnet)
def test_get_ipv6_subnet(self):
if not self.stack.has_ipv6:
tobiko.skip_test(
"Stack {self.stack.stack_name} has no IPv6 subnet")
subnet = neutron.get_subnet(self.stack.ipv6_subnet_id)
self.assertEqual(self.stack.ipv6_subnet_id, subnet['id'])
self.assertEqual(self.stack.ipv6_subnet_details, subnet)
def test_find_agents_with_binary(self):
agent = neutron.list_agents().first
agents = neutron.list_agents(binary=agent['binary'])
self.assertIn(agent['id'], {a['id'] for a in agents})
@keystone.skip_unless_has_keystone_credentials()
class PortTest(testtools.TestCase):
#: Stack of resources with a network with a gateway router
stack = tobiko.required_setup_fixture(stacks.CirrosServerStackFixture)
def test_list_port_addresses(self, ip_version=None):
port = neutron.find_port(device_id=self.stack.server_id)
port_addresses = neutron.list_port_ip_addresses(
port=port,
ip_version=ip_version)
server_addresses = nova.list_server_ip_addresses(
server=self.stack.server_details,
ip_version=ip_version,
address_type='fixed')
self.assertEqual(set(server_addresses), set(port_addresses))
if ip_version:
self.assertEqual(
port_addresses.with_attributes(version=ip_version),
port_addresses)
def test_list_port_addresses_with_ipv4(self):
self.test_list_port_addresses(ip_version=4)
def test_list_port_addresses_with_ipv6(self):
self.test_list_port_addresses(ip_version=6)
def test_find_port_address_with_ip_version(self):
port = neutron.find_port(device_id=self.stack.server_id)
server_addresses = nova.list_server_ip_addresses(
server=self.stack.server_details,
address_type='fixed')
for server_address in server_addresses:
port_address = neutron.find_port_ip_address(
port=port,
ip_version=server_address.version,
unique=True)
self.assertEqual(server_address, port_address)
def test_find_port_address_with_subnet_id(self):
port = neutron.find_port(device_id=self.stack.server_id)
for subnet in neutron.list_subnets(network_id=port['network_id']):
port_address = neutron.find_port_ip_address(
port=port, subnet_id=subnet['id'], unique=True)
cidr = netaddr.IPNetwork(subnet['cidr'])
self.assertIn(port_address, cidr)
@keystone.skip_unless_has_keystone_credentials()
class AgentTest(testtools.TestCase):
def test_skip_if_missing_agents(self, count=1, should_skip=False,
**params):
if should_skip:
expected_exeption = self.skipException
else:
expected_exeption = self.failureException
@neutron.skip_if_missing_networking_agents(count=count, **params)
def method():
raise self.fail('Not skipped')
exception = self.assertRaises(expected_exeption, method)
if should_skip:
agents = neutron.list_agents(**params)
message = "missing {!r} agent(s)".format(count - len(agents))
if params:
message += " with {!s}".format(
','.join('{!s}={!r}'.format(k, v)
for k, v in params.items()))
self.assertEqual(message, str(exception))
else:
self.assertEqual('Not skipped', str(exception))
def test_skip_if_missing_agents_with_no_agents(self):
self.test_skip_if_missing_agents(binary='never-never-land',
should_skip=True)
def test_skip_if_missing_agents_with_big_count(self):
self.test_skip_if_missing_agents(count=1000000,
should_skip=True)
def test_neutron_agents_are_alive(self):
agents = tests.test_neutron_agents_are_alive()
# check has agents and they are all alive
self.assertNotEqual([], agents)
self.assertNotEqual([], agents.with_items(alive=True)) | en | 0.885671 | # Copyright (c) 2019 Red Hat, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Tests network creation #: Stack of resources with a network with a gateway router #: Stack of resources with a network with a gateway router # check has agents and they are all alive | 1.747115 | 2 |
_1327/documents/views.py | fsr-itse/1327 | 10 | 6625458 | from datetime import datetime
import json
import os
from asgiref.sync import async_to_sync
import channels.layers
from django.contrib import messages
from django.contrib.admin.utils import NestedObjects
from django.contrib.auth.models import Group
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.db import DEFAULT_DB_ALIAS, models, transaction
from django.db.models import Q
from django.forms import formset_factory
from django.http import HttpResponse, HttpResponseNotAllowed, HttpResponseRedirect
from django.shortcuts import get_object_or_404, Http404, render
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from guardian.shortcuts import get_objects_for_user
from guardian.utils import get_anonymous_user
from reversion import revisions
from reversion.models import Version
from sendfile import sendfile
from _1327 import settings
from _1327.documents.forms import get_permission_form
from _1327.documents.models import Attachment, Document, TemporaryDocumentText
from _1327.documents.utils import delete_cascade_to_json, delete_old_empty_pages, get_model_function, get_new_autosaved_pages_for_user, \
handle_attachment, handle_autosave, handle_edit, prepare_versions
from _1327.information_pages.models import InformationDocument
from _1327.information_pages.forms import InformationDocumentForm # noqa
from _1327.main.utils import convert_markdown, document_permission_overview
from _1327.minutes.models import MinutesDocument
from _1327.minutes.forms import MinutesDocumentForm # noqa
from _1327.polls.models import Poll
from _1327.polls.forms import PollForm # noqa
from _1327.user_management.shortcuts import check_permissions
def create(request, document_type):
content_type = ContentType.objects.get(model=document_type)
if request.user.has_perm("{app}.add_{model}".format(app=content_type.app_label, model=content_type.model)):
model_class = content_type.model_class()
delete_old_empty_pages()
title_en, title_de = model_class.generate_new_title()
url_title = "temp_{}_{}".format(datetime.utcnow().strftime("%d%m%Y%H%M%S%f"), model_class.generate_default_slug(title_en))
kwargs = {
'url_title': url_title,
'title_en': title_en,
'title_de': title_de,
}
if hasattr(model_class, 'author'):
kwargs['author'] = request.user
model_class.objects.get_or_create(**kwargs)
new_autosaved_pages = get_new_autosaved_pages_for_user(request.user, content_type)
initial = {
'comment': _("Created document"),
}
return edit(request, url_title, new_autosaved_pages, initial)
else:
raise PermissionDenied
def edit(request, title, new_autosaved_pages=None, initial=None):
document = get_object_or_404(Document, url_title=title)
content_type = ContentType.objects.get_for_model(document)
if document.has_perms():
check_permissions(document, request.user, [document.edit_permission_name])
elif new_autosaved_pages is None and initial is None:
# page is not new and has no permissions set, it is likely that somebody tries to view an autosaved page
# users are only allowed to view autosaved pages if they have the "add" permission for documents
check_permissions(document, request.user, [document.add_permission_name])
# if the edit form has a formset we will initialize it here
formset_factory = document.Form.get_formset_factory()
formset = formset_factory(request.POST or None, instance=document) if formset_factory is not None else None
if formset is not None:
template_name = "{app}_edit.html".format(app=content_type.app_label)
else:
template_name = "documents_edit.html"
try:
creation_group = request.user.groups.get(id=request.GET.get('group', False))
except Group.DoesNotExist:
creation_group = None
success, form = handle_edit(request, document, formset, initial, creation_group=creation_group)
__, attachment_form, __ = handle_attachment(request, document)
if success:
messages.success(request, _("Successfully saved changes"))
return HttpResponseRedirect(reverse(document.get_view_url_name(), args=[document.url_title]))
else:
return render(request, template_name, {
'document': document,
'form': form,
'attachment_form': attachment_form,
'active_page': 'edit',
'creation': document.is_in_creation,
'new_autosaved_pages': new_autosaved_pages,
'permission_overview': document_permission_overview(request.user, document),
'supported_image_types': settings.SUPPORTED_IMAGE_TYPES,
'formset': formset,
})
def autosave(request, title):
if request.user.is_anonymous or request.user == get_anonymous_user():
raise PermissionDenied()
document = None
try:
document = get_object_or_404(Document, url_title=title)
if document.has_perms():
check_permissions(document, request.user, [document.edit_permission_name])
except Document.DoesNotExist:
pass
handle_autosave(request, document)
data = {
'preview_url': request.build_absolute_uri(
reverse('documents:preview') + '?hash_value=' + document.hash_value
)
}
return HttpResponse(json.dumps(data))
def versions(request, title):
document = get_object_or_404(Document, url_title=title)
check_permissions(document, request.user, [document.edit_permission_name])
document_versions = prepare_versions(document)
if not document.can_be_reverted:
messages.warning(request, _('This Document can not be reverted!'))
return render(request, 'documents_versions.html', {
'active_page': 'versions',
'versions': document_versions,
'document': document,
'permission_overview': document_permission_overview(request.user, document),
'can_be_reverted': document.can_be_reverted,
})
def view(request, title):
document = get_object_or_404(Document, url_title=title)
content_type = ContentType.objects.get_for_model(document)
check_permissions(document, request.user, [document.view_permission_name])
try:
function = get_model_function(content_type, 'view')
return function(request, title)
except (ImportError, AttributeError):
pass
if document.text == "" and (document.text_en != "" or document.text_de != ""):
messages.warning(request, _('The requested document is not available in the selected language. It will be shown in the available language instead.'))
text, toc = convert_markdown(next((text for text in (document.text_de, document.text_en) if text != ""), ""))
else:
text, toc = convert_markdown(document.text)
return render(request, 'documents_base.html', {
'document': document,
'text': text,
'toc': toc,
'attachments': document.attachments.filter(no_direct_download=False).order_by('index'),
'active_page': 'view',
'view_page': True,
'permission_overview': document_permission_overview(request.user, document),
})
def permissions(request, title):
document = get_object_or_404(Document, url_title=title)
content_type = ContentType.objects.get_for_model(document)
check_permissions(document, request.user, [document.edit_permission_name])
if not document.show_permissions_editor():
raise PermissionDenied()
PermissionForm = get_permission_form(document)
PermissionFormset = formset_factory(get_permission_form(document), extra=0)
initial_data = PermissionForm.prepare_initial_data(Group.objects.all(), content_type, document)
formset = PermissionFormset(request.POST or None, initial=initial_data)
if request.POST and formset.is_valid():
for form in formset:
form.save(document)
messages.success(request, _("Permissions have been changed successfully."))
if request.user.has_perm(document.edit_permission_name, document):
return HttpResponseRedirect(reverse(document.get_permissions_url_name(), args=[document.url_title]))
if request.user.has_perm(document.view_permission_name, document):
return HttpResponseRedirect(reverse(document.get_view_url_name(), args=[document.url_title]))
return HttpResponseRedirect(reverse('index'))
return render(request, 'documents_permissions.html', {
'document': document,
'formset_header': PermissionForm.header(content_type),
'formset': formset,
'active_page': 'permissions',
'permission_overview': document_permission_overview(request.user, document),
})
def publish(request, title, next_state_id):
document = get_object_or_404(Document, url_title=title)
check_permissions(document, request.user, [document.edit_permission_name])
if not document.show_publish_button():
raise PermissionDenied()
document.publish(next_state_id)
messages.success(request, _("Minutes document has been published."))
return HttpResponseRedirect(reverse(document.get_view_url_name(), args=[document.url_title]))
def attachments(request, title):
document = get_object_or_404(Document, url_title=title)
check_permissions(document, request.user, [document.edit_permission_name])
success, form, __ = handle_attachment(request, document)
if success:
messages.success(request, _("File has been uploaded successfully!"))
return HttpResponseRedirect(reverse(document.get_attachments_url_name(), args=[document.url_title]))
else:
return render(request, "documents_attachments.html", {
'document': document,
'edit_url': reverse(document.get_attachments_url_name(), args=[document.url_title]),
'form': form,
'attachments': document.attachments.all().order_by('index'),
'active_page': 'attachments',
'permission_overview': document_permission_overview(request.user, document),
})
def render_text(request, title):
if request.method != 'POST':
raise SuspiciousOperation
document = get_object_or_404(Document, url_title=title)
if document.has_perms():
check_permissions(document, request.user, [document.view_permission_name, document.edit_permission_name])
text, __ = convert_markdown(request.POST['text'])
channel_layer = channels.layers.get_channel_layer()
async_to_sync(channel_layer.group_send)(
document.hash_value,
{
'type': 'update_preview',
'message': text,
}
)
return HttpResponse(text, content_type='text/plain')
def search(request):
if not request.GET:
raise Http404
id_only = request.GET.get('id_only', False)
query = request.GET['q']
minutes = get_objects_for_user(
request.user,
MinutesDocument.VIEW_PERMISSION_NAME,
klass=MinutesDocument.objects.filter(
Q(title_de__icontains=query) | Q(title_en__icontains=query)
)
)
information_documents = get_objects_for_user(
request.user,
InformationDocument.VIEW_PERMISSION_NAME,
klass=InformationDocument.objects.filter(
Q(title_de__icontains=query) | Q(title_en__icontains=query)
)
)
polls = get_objects_for_user(
request.user,
Poll.VIEW_PERMISSION_NAME,
klass=Poll.objects.filter(
Q(title_de__icontains=query) | Q(title_en__icontains=query)
)
)
return render(request, "ajax_search_api.json", {
'minutes': minutes,
'information_documents': information_documents,
'polls': polls,
'id_only': id_only,
})
def revert(request):
if not request.is_ajax() or not request.POST:
raise Http404
version_id = request.POST['id']
document_url_title = request.POST['url_title']
document = get_object_or_404(Document, url_title=document_url_title)
check_permissions(document, request.user, [document.edit_permission_name])
versions = Version.objects.get_for_object(document)
if not document.can_be_reverted:
raise SuspiciousOperation('This Document can not be reverted!')
# find the we want to revert to
revert_version = None
for version in versions:
if version.pk == int(version_id):
revert_version = version
break
if revert_version is None:
# user supplied version_id that does not exist
raise SuspiciousOperation('Could not find document')
revert_version.revision.revert(delete=False)
fields = revert_version.field_dict
document_class = ContentType.objects.get_for_id(fields.pop('polymorphic_ctype_id')).model_class()
# Remove all references to parent objects, rename ForeignKeyFields, extract ManyToManyFields.
new_fields = fields.copy()
many_to_many_fields = {}
for key in fields.keys():
if "_ptr" in key:
del new_fields[key]
continue
try:
field = getattr(document_class, key).field
except AttributeError:
continue
if isinstance(field, models.ManyToManyField):
many_to_many_fields[key] = fields[key]
del new_fields[key]
else:
new_fields[field.attname] = fields[key]
reverted_document = document_class(**new_fields)
with transaction.atomic(), revisions.create_revision():
reverted_document.save()
# Restore ManyToManyFields
for key in many_to_many_fields.keys():
getattr(reverted_document, key).clear()
getattr(reverted_document, key).add(*many_to_many_fields[key])
revisions.set_user(request.user)
revisions.set_comment(
_('reverted to revision \"{revision_comment}\" (at {date})'.format(
revision_comment=revert_version.revision.get_comment(),
date=datetime.utcnow().strftime("%Y-%m-%d %H:%M"),
))
)
return HttpResponse(reverse('versions', args=[reverted_document.url_title]))
def create_attachment(request):
if not request.is_ajax() or not request.method == "POST":
raise Http404()
document = get_object_or_404(Document, id=request.POST['document'])
if not document.can_be_changed_by(request.user):
raise PermissionDenied
success, __, attachment = handle_attachment(request, document)
if success:
return HttpResponse(attachment.hash_value)
else:
raise SuspiciousOperation
def delete_attachment(request):
if request.is_ajax() and request.method == "POST":
attachment = Attachment.objects.get(id=request.POST['id'])
# check whether user has permission to change the document the attachment belongs to
document = attachment.document
if not document.can_be_changed_by(request.user):
raise PermissionDenied
attachment.file.delete()
attachment.delete()
messages.success(request, _("Successfully deleted Attachment!"))
return HttpResponse()
raise Http404()
def download_attachment(request):
if not request.method == "GET":
return HttpResponseNotAllowed(["GET"])
attachment = get_object_or_404(Attachment, hash_value=request.GET.get('hash_value', None))
# check whether user is allowed to see that document and thus download the attachment
document = attachment.document
if not request.user.has_perm(document.view_permission_name, document):
raise PermissionDenied
filename = os.path.join(settings.MEDIA_ROOT, attachment.file.name)
extension = os.path.splitext(filename)[1]
is_attachment = not request.GET.get('embed', None)
attachment_filename = attachment.displayname
if not attachment_filename.endswith(extension):
attachment_filename += extension
return sendfile(request, filename, attachment=is_attachment, attachment_filename=attachment_filename)
def update_attachment_order(request):
data = request.POST
if data is None or not request.is_ajax():
raise Http404
for pk, index in data.items():
attachment = get_object_or_404(Attachment, pk=pk)
# check that user is allowed to make changes to attachment
document = attachment.document
if not document.can_be_changed_by(request.user):
raise PermissionDenied
attachment.index = index
attachment.save()
return HttpResponse()
def get_attachments(request, document_id):
if not request.is_ajax():
raise Http404
document = get_object_or_404(Document, pk=document_id)
if not document.can_be_changed_by(request.user):
raise PermissionDenied
attachments = document.attachments.all()
data = {}
for attachment in attachments:
file_type = attachment.displayname.lower().split('.')[-1]
if file_type not in settings.SUPPORTED_IMAGE_TYPES:
continue
data[attachment.hash_value] = attachment.displayname
return HttpResponse(json.dumps(data))
def change_attachment(request):
if not request.POST or not request.is_ajax():
raise Http404
attachment_id = request.POST.get('id', None)
if attachment_id is None:
raise SuspiciousOperation
attachment = Attachment.objects.get(id=attachment_id)
if not attachment.document.can_be_changed_by(request.user):
raise PermissionDenied
no_direct_download_value = request.POST.get('no_direct_download', None)
attachment.no_direct_download = json.loads(no_direct_download_value) if no_direct_download_value is not None else attachment.no_direct_download
attachment.displayname = request.POST.get('displayname', attachment.displayname)
attachment.save()
return HttpResponse()
def delete_document(request, title):
document = get_object_or_404(Document, url_title=title)
if document.is_in_creation:
try:
# check super user permissions
check_permissions(document, request.user, [document.edit_permission_name])
except PermissionDenied:
# check if an autosave has already been created
autosaves_for_document = TemporaryDocumentText.objects.filter(document=document)
if autosaves_for_document.exists():
# with an unsaved document, only one user can have autosaves
if autosaves_for_document.first().author != request.user:
raise PermissionDenied
else:
# no permission check possible if no autosave was saved (current behavior is not ideal)
raise PermissionDenied
else:
check_permissions(document, request.user, [document.edit_permission_name])
document.delete()
messages.success(request, _("Successfully deleted document: {}").format(document.title))
return HttpResponse()
def get_delete_cascade(request, title):
document = get_object_or_404(Document, url_title=title)
check_permissions(document, request.user, [document.edit_permission_name])
collector = NestedObjects(using=DEFAULT_DB_ALIAS)
collector.collect([document])
delete_cascade = collector.nested()
# remove all subclasses of current document from the list because that does not add much helpful information
simplified_delete_cascade = []
for cascade_item in delete_cascade:
if issubclass(type(document), type(cascade_item)) and not type(document) == type(cascade_item):
continue
simplified_delete_cascade.append(cascade_item)
return HttpResponse(json.dumps(delete_cascade_to_json(simplified_delete_cascade)))
def preview(request):
if not request.GET or request.method != 'GET':
raise Http404
hash_value = request.GET['hash_value']
document = get_object_or_404(Document, hash_value=hash_value)
text, __ = convert_markdown(document.text)
return render(
request,
'documents_preview.html',
{
'document': document,
'text': text,
'preview_url': settings.PREVIEW_URL,
'hash_value': hash_value,
'view_page': True,
}
)
def delete_autosave(request, title):
if request.method != 'POST':
raise Http404
# check that the user may change this document
document = get_object_or_404(Document, url_title=title)
# check that the supplied autosave id matches to the document and has been created by the user
autosave_id = request.POST['autosave_id']
autosave = get_object_or_404(TemporaryDocumentText, id=autosave_id)
autosaves_for_object_and_user = TemporaryDocumentText.objects.filter(document=document, author=request.user)
# a new document does not have permissions, just check if the autosave author is correct
if autosave.author != request.user:
# if the autosave author is not correct, only proceed when the user has superuser privileges by checking permissions
check_permissions(document, request.user, [document.edit_permission_name])
if autosave not in autosaves_for_object_and_user:
raise SuspiciousOperation
if document.is_in_creation:
# this is a new document that only has this autosave right now and nothing else, we can safely delete this document
document.delete()
messages.success(request, _("Successfully deleted document: {}").format(document.title))
response = HttpResponseRedirect(reverse("index"))
else:
# everything seems to be alright, we can delete the autosave and leave the document as such intact
autosave.delete()
messages.success(request, _("Successfully deleted autosave"))
response = HttpResponseRedirect(reverse("edit", args=[document.url_title]))
return response
| from datetime import datetime
import json
import os
from asgiref.sync import async_to_sync
import channels.layers
from django.contrib import messages
from django.contrib.admin.utils import NestedObjects
from django.contrib.auth.models import Group
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.db import DEFAULT_DB_ALIAS, models, transaction
from django.db.models import Q
from django.forms import formset_factory
from django.http import HttpResponse, HttpResponseNotAllowed, HttpResponseRedirect
from django.shortcuts import get_object_or_404, Http404, render
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from guardian.shortcuts import get_objects_for_user
from guardian.utils import get_anonymous_user
from reversion import revisions
from reversion.models import Version
from sendfile import sendfile
from _1327 import settings
from _1327.documents.forms import get_permission_form
from _1327.documents.models import Attachment, Document, TemporaryDocumentText
from _1327.documents.utils import delete_cascade_to_json, delete_old_empty_pages, get_model_function, get_new_autosaved_pages_for_user, \
handle_attachment, handle_autosave, handle_edit, prepare_versions
from _1327.information_pages.models import InformationDocument
from _1327.information_pages.forms import InformationDocumentForm # noqa
from _1327.main.utils import convert_markdown, document_permission_overview
from _1327.minutes.models import MinutesDocument
from _1327.minutes.forms import MinutesDocumentForm # noqa
from _1327.polls.models import Poll
from _1327.polls.forms import PollForm # noqa
from _1327.user_management.shortcuts import check_permissions
def create(request, document_type):
content_type = ContentType.objects.get(model=document_type)
if request.user.has_perm("{app}.add_{model}".format(app=content_type.app_label, model=content_type.model)):
model_class = content_type.model_class()
delete_old_empty_pages()
title_en, title_de = model_class.generate_new_title()
url_title = "temp_{}_{}".format(datetime.utcnow().strftime("%d%m%Y%H%M%S%f"), model_class.generate_default_slug(title_en))
kwargs = {
'url_title': url_title,
'title_en': title_en,
'title_de': title_de,
}
if hasattr(model_class, 'author'):
kwargs['author'] = request.user
model_class.objects.get_or_create(**kwargs)
new_autosaved_pages = get_new_autosaved_pages_for_user(request.user, content_type)
initial = {
'comment': _("Created document"),
}
return edit(request, url_title, new_autosaved_pages, initial)
else:
raise PermissionDenied
def edit(request, title, new_autosaved_pages=None, initial=None):
document = get_object_or_404(Document, url_title=title)
content_type = ContentType.objects.get_for_model(document)
if document.has_perms():
check_permissions(document, request.user, [document.edit_permission_name])
elif new_autosaved_pages is None and initial is None:
# page is not new and has no permissions set, it is likely that somebody tries to view an autosaved page
# users are only allowed to view autosaved pages if they have the "add" permission for documents
check_permissions(document, request.user, [document.add_permission_name])
# if the edit form has a formset we will initialize it here
formset_factory = document.Form.get_formset_factory()
formset = formset_factory(request.POST or None, instance=document) if formset_factory is not None else None
if formset is not None:
template_name = "{app}_edit.html".format(app=content_type.app_label)
else:
template_name = "documents_edit.html"
try:
creation_group = request.user.groups.get(id=request.GET.get('group', False))
except Group.DoesNotExist:
creation_group = None
success, form = handle_edit(request, document, formset, initial, creation_group=creation_group)
__, attachment_form, __ = handle_attachment(request, document)
if success:
messages.success(request, _("Successfully saved changes"))
return HttpResponseRedirect(reverse(document.get_view_url_name(), args=[document.url_title]))
else:
return render(request, template_name, {
'document': document,
'form': form,
'attachment_form': attachment_form,
'active_page': 'edit',
'creation': document.is_in_creation,
'new_autosaved_pages': new_autosaved_pages,
'permission_overview': document_permission_overview(request.user, document),
'supported_image_types': settings.SUPPORTED_IMAGE_TYPES,
'formset': formset,
})
def autosave(request, title):
if request.user.is_anonymous or request.user == get_anonymous_user():
raise PermissionDenied()
document = None
try:
document = get_object_or_404(Document, url_title=title)
if document.has_perms():
check_permissions(document, request.user, [document.edit_permission_name])
except Document.DoesNotExist:
pass
handle_autosave(request, document)
data = {
'preview_url': request.build_absolute_uri(
reverse('documents:preview') + '?hash_value=' + document.hash_value
)
}
return HttpResponse(json.dumps(data))
def versions(request, title):
document = get_object_or_404(Document, url_title=title)
check_permissions(document, request.user, [document.edit_permission_name])
document_versions = prepare_versions(document)
if not document.can_be_reverted:
messages.warning(request, _('This Document can not be reverted!'))
return render(request, 'documents_versions.html', {
'active_page': 'versions',
'versions': document_versions,
'document': document,
'permission_overview': document_permission_overview(request.user, document),
'can_be_reverted': document.can_be_reverted,
})
def view(request, title):
document = get_object_or_404(Document, url_title=title)
content_type = ContentType.objects.get_for_model(document)
check_permissions(document, request.user, [document.view_permission_name])
try:
function = get_model_function(content_type, 'view')
return function(request, title)
except (ImportError, AttributeError):
pass
if document.text == "" and (document.text_en != "" or document.text_de != ""):
messages.warning(request, _('The requested document is not available in the selected language. It will be shown in the available language instead.'))
text, toc = convert_markdown(next((text for text in (document.text_de, document.text_en) if text != ""), ""))
else:
text, toc = convert_markdown(document.text)
return render(request, 'documents_base.html', {
'document': document,
'text': text,
'toc': toc,
'attachments': document.attachments.filter(no_direct_download=False).order_by('index'),
'active_page': 'view',
'view_page': True,
'permission_overview': document_permission_overview(request.user, document),
})
def permissions(request, title):
document = get_object_or_404(Document, url_title=title)
content_type = ContentType.objects.get_for_model(document)
check_permissions(document, request.user, [document.edit_permission_name])
if not document.show_permissions_editor():
raise PermissionDenied()
PermissionForm = get_permission_form(document)
PermissionFormset = formset_factory(get_permission_form(document), extra=0)
initial_data = PermissionForm.prepare_initial_data(Group.objects.all(), content_type, document)
formset = PermissionFormset(request.POST or None, initial=initial_data)
if request.POST and formset.is_valid():
for form in formset:
form.save(document)
messages.success(request, _("Permissions have been changed successfully."))
if request.user.has_perm(document.edit_permission_name, document):
return HttpResponseRedirect(reverse(document.get_permissions_url_name(), args=[document.url_title]))
if request.user.has_perm(document.view_permission_name, document):
return HttpResponseRedirect(reverse(document.get_view_url_name(), args=[document.url_title]))
return HttpResponseRedirect(reverse('index'))
return render(request, 'documents_permissions.html', {
'document': document,
'formset_header': PermissionForm.header(content_type),
'formset': formset,
'active_page': 'permissions',
'permission_overview': document_permission_overview(request.user, document),
})
def publish(request, title, next_state_id):
document = get_object_or_404(Document, url_title=title)
check_permissions(document, request.user, [document.edit_permission_name])
if not document.show_publish_button():
raise PermissionDenied()
document.publish(next_state_id)
messages.success(request, _("Minutes document has been published."))
return HttpResponseRedirect(reverse(document.get_view_url_name(), args=[document.url_title]))
def attachments(request, title):
document = get_object_or_404(Document, url_title=title)
check_permissions(document, request.user, [document.edit_permission_name])
success, form, __ = handle_attachment(request, document)
if success:
messages.success(request, _("File has been uploaded successfully!"))
return HttpResponseRedirect(reverse(document.get_attachments_url_name(), args=[document.url_title]))
else:
return render(request, "documents_attachments.html", {
'document': document,
'edit_url': reverse(document.get_attachments_url_name(), args=[document.url_title]),
'form': form,
'attachments': document.attachments.all().order_by('index'),
'active_page': 'attachments',
'permission_overview': document_permission_overview(request.user, document),
})
def render_text(request, title):
if request.method != 'POST':
raise SuspiciousOperation
document = get_object_or_404(Document, url_title=title)
if document.has_perms():
check_permissions(document, request.user, [document.view_permission_name, document.edit_permission_name])
text, __ = convert_markdown(request.POST['text'])
channel_layer = channels.layers.get_channel_layer()
async_to_sync(channel_layer.group_send)(
document.hash_value,
{
'type': 'update_preview',
'message': text,
}
)
return HttpResponse(text, content_type='text/plain')
def search(request):
if not request.GET:
raise Http404
id_only = request.GET.get('id_only', False)
query = request.GET['q']
minutes = get_objects_for_user(
request.user,
MinutesDocument.VIEW_PERMISSION_NAME,
klass=MinutesDocument.objects.filter(
Q(title_de__icontains=query) | Q(title_en__icontains=query)
)
)
information_documents = get_objects_for_user(
request.user,
InformationDocument.VIEW_PERMISSION_NAME,
klass=InformationDocument.objects.filter(
Q(title_de__icontains=query) | Q(title_en__icontains=query)
)
)
polls = get_objects_for_user(
request.user,
Poll.VIEW_PERMISSION_NAME,
klass=Poll.objects.filter(
Q(title_de__icontains=query) | Q(title_en__icontains=query)
)
)
return render(request, "ajax_search_api.json", {
'minutes': minutes,
'information_documents': information_documents,
'polls': polls,
'id_only': id_only,
})
def revert(request):
if not request.is_ajax() or not request.POST:
raise Http404
version_id = request.POST['id']
document_url_title = request.POST['url_title']
document = get_object_or_404(Document, url_title=document_url_title)
check_permissions(document, request.user, [document.edit_permission_name])
versions = Version.objects.get_for_object(document)
if not document.can_be_reverted:
raise SuspiciousOperation('This Document can not be reverted!')
# find the we want to revert to
revert_version = None
for version in versions:
if version.pk == int(version_id):
revert_version = version
break
if revert_version is None:
# user supplied version_id that does not exist
raise SuspiciousOperation('Could not find document')
revert_version.revision.revert(delete=False)
fields = revert_version.field_dict
document_class = ContentType.objects.get_for_id(fields.pop('polymorphic_ctype_id')).model_class()
# Remove all references to parent objects, rename ForeignKeyFields, extract ManyToManyFields.
new_fields = fields.copy()
many_to_many_fields = {}
for key in fields.keys():
if "_ptr" in key:
del new_fields[key]
continue
try:
field = getattr(document_class, key).field
except AttributeError:
continue
if isinstance(field, models.ManyToManyField):
many_to_many_fields[key] = fields[key]
del new_fields[key]
else:
new_fields[field.attname] = fields[key]
reverted_document = document_class(**new_fields)
with transaction.atomic(), revisions.create_revision():
reverted_document.save()
# Restore ManyToManyFields
for key in many_to_many_fields.keys():
getattr(reverted_document, key).clear()
getattr(reverted_document, key).add(*many_to_many_fields[key])
revisions.set_user(request.user)
revisions.set_comment(
_('reverted to revision \"{revision_comment}\" (at {date})'.format(
revision_comment=revert_version.revision.get_comment(),
date=datetime.utcnow().strftime("%Y-%m-%d %H:%M"),
))
)
return HttpResponse(reverse('versions', args=[reverted_document.url_title]))
def create_attachment(request):
if not request.is_ajax() or not request.method == "POST":
raise Http404()
document = get_object_or_404(Document, id=request.POST['document'])
if not document.can_be_changed_by(request.user):
raise PermissionDenied
success, __, attachment = handle_attachment(request, document)
if success:
return HttpResponse(attachment.hash_value)
else:
raise SuspiciousOperation
def delete_attachment(request):
if request.is_ajax() and request.method == "POST":
attachment = Attachment.objects.get(id=request.POST['id'])
# check whether user has permission to change the document the attachment belongs to
document = attachment.document
if not document.can_be_changed_by(request.user):
raise PermissionDenied
attachment.file.delete()
attachment.delete()
messages.success(request, _("Successfully deleted Attachment!"))
return HttpResponse()
raise Http404()
def download_attachment(request):
if not request.method == "GET":
return HttpResponseNotAllowed(["GET"])
attachment = get_object_or_404(Attachment, hash_value=request.GET.get('hash_value', None))
# check whether user is allowed to see that document and thus download the attachment
document = attachment.document
if not request.user.has_perm(document.view_permission_name, document):
raise PermissionDenied
filename = os.path.join(settings.MEDIA_ROOT, attachment.file.name)
extension = os.path.splitext(filename)[1]
is_attachment = not request.GET.get('embed', None)
attachment_filename = attachment.displayname
if not attachment_filename.endswith(extension):
attachment_filename += extension
return sendfile(request, filename, attachment=is_attachment, attachment_filename=attachment_filename)
def update_attachment_order(request):
data = request.POST
if data is None or not request.is_ajax():
raise Http404
for pk, index in data.items():
attachment = get_object_or_404(Attachment, pk=pk)
# check that user is allowed to make changes to attachment
document = attachment.document
if not document.can_be_changed_by(request.user):
raise PermissionDenied
attachment.index = index
attachment.save()
return HttpResponse()
def get_attachments(request, document_id):
if not request.is_ajax():
raise Http404
document = get_object_or_404(Document, pk=document_id)
if not document.can_be_changed_by(request.user):
raise PermissionDenied
attachments = document.attachments.all()
data = {}
for attachment in attachments:
file_type = attachment.displayname.lower().split('.')[-1]
if file_type not in settings.SUPPORTED_IMAGE_TYPES:
continue
data[attachment.hash_value] = attachment.displayname
return HttpResponse(json.dumps(data))
def change_attachment(request):
if not request.POST or not request.is_ajax():
raise Http404
attachment_id = request.POST.get('id', None)
if attachment_id is None:
raise SuspiciousOperation
attachment = Attachment.objects.get(id=attachment_id)
if not attachment.document.can_be_changed_by(request.user):
raise PermissionDenied
no_direct_download_value = request.POST.get('no_direct_download', None)
attachment.no_direct_download = json.loads(no_direct_download_value) if no_direct_download_value is not None else attachment.no_direct_download
attachment.displayname = request.POST.get('displayname', attachment.displayname)
attachment.save()
return HttpResponse()
def delete_document(request, title):
document = get_object_or_404(Document, url_title=title)
if document.is_in_creation:
try:
# check super user permissions
check_permissions(document, request.user, [document.edit_permission_name])
except PermissionDenied:
# check if an autosave has already been created
autosaves_for_document = TemporaryDocumentText.objects.filter(document=document)
if autosaves_for_document.exists():
# with an unsaved document, only one user can have autosaves
if autosaves_for_document.first().author != request.user:
raise PermissionDenied
else:
# no permission check possible if no autosave was saved (current behavior is not ideal)
raise PermissionDenied
else:
check_permissions(document, request.user, [document.edit_permission_name])
document.delete()
messages.success(request, _("Successfully deleted document: {}").format(document.title))
return HttpResponse()
def get_delete_cascade(request, title):
document = get_object_or_404(Document, url_title=title)
check_permissions(document, request.user, [document.edit_permission_name])
collector = NestedObjects(using=DEFAULT_DB_ALIAS)
collector.collect([document])
delete_cascade = collector.nested()
# remove all subclasses of current document from the list because that does not add much helpful information
simplified_delete_cascade = []
for cascade_item in delete_cascade:
if issubclass(type(document), type(cascade_item)) and not type(document) == type(cascade_item):
continue
simplified_delete_cascade.append(cascade_item)
return HttpResponse(json.dumps(delete_cascade_to_json(simplified_delete_cascade)))
def preview(request):
if not request.GET or request.method != 'GET':
raise Http404
hash_value = request.GET['hash_value']
document = get_object_or_404(Document, hash_value=hash_value)
text, __ = convert_markdown(document.text)
return render(
request,
'documents_preview.html',
{
'document': document,
'text': text,
'preview_url': settings.PREVIEW_URL,
'hash_value': hash_value,
'view_page': True,
}
)
def delete_autosave(request, title):
if request.method != 'POST':
raise Http404
# check that the user may change this document
document = get_object_or_404(Document, url_title=title)
# check that the supplied autosave id matches to the document and has been created by the user
autosave_id = request.POST['autosave_id']
autosave = get_object_or_404(TemporaryDocumentText, id=autosave_id)
autosaves_for_object_and_user = TemporaryDocumentText.objects.filter(document=document, author=request.user)
# a new document does not have permissions, just check if the autosave author is correct
if autosave.author != request.user:
# if the autosave author is not correct, only proceed when the user has superuser privileges by checking permissions
check_permissions(document, request.user, [document.edit_permission_name])
if autosave not in autosaves_for_object_and_user:
raise SuspiciousOperation
if document.is_in_creation:
# this is a new document that only has this autosave right now and nothing else, we can safely delete this document
document.delete()
messages.success(request, _("Successfully deleted document: {}").format(document.title))
response = HttpResponseRedirect(reverse("index"))
else:
# everything seems to be alright, we can delete the autosave and leave the document as such intact
autosave.delete()
messages.success(request, _("Successfully deleted autosave"))
response = HttpResponseRedirect(reverse("edit", args=[document.url_title]))
return response
| en | 0.933632 | # noqa # noqa # noqa # page is not new and has no permissions set, it is likely that somebody tries to view an autosaved page # users are only allowed to view autosaved pages if they have the "add" permission for documents # if the edit form has a formset we will initialize it here # find the we want to revert to # user supplied version_id that does not exist # Remove all references to parent objects, rename ForeignKeyFields, extract ManyToManyFields. # Restore ManyToManyFields # check whether user has permission to change the document the attachment belongs to # check whether user is allowed to see that document and thus download the attachment # check that user is allowed to make changes to attachment # check super user permissions # check if an autosave has already been created # with an unsaved document, only one user can have autosaves # no permission check possible if no autosave was saved (current behavior is not ideal) # remove all subclasses of current document from the list because that does not add much helpful information # check that the user may change this document # check that the supplied autosave id matches to the document and has been created by the user # a new document does not have permissions, just check if the autosave author is correct # if the autosave author is not correct, only proceed when the user has superuser privileges by checking permissions # this is a new document that only has this autosave right now and nothing else, we can safely delete this document # everything seems to be alright, we can delete the autosave and leave the document as such intact | 1.454351 | 1 |
datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/eula.py | kjmadscience/integrations-core | 0 | 6625459 | <gh_stars>0
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import click
from ...manifest_utils import Manifest
from ...testing import process_checks_option
from ...utils import complete_valid_checks, get_manifest_file
from ..console import CONTEXT_SETTINGS, abort, annotate_error, echo_debug, echo_failure, echo_info, echo_success
@click.command('eula', context_settings=CONTEXT_SETTINGS, short_help='Validate EULA files')
@click.argument('check', shell_complete=complete_valid_checks, required=False)
def eula(check):
"""Validate all EULA definition files.
If `check` is specified, only the check will be validated, if check value is 'changed' will only apply to changed
checks, an 'all' or empty `check` value will validate all README files.
"""
echo_info("Validating all EULA files...")
failed_checks = 0
ok_checks = 0
checks = process_checks_option(check, source='integrations')
echo_info(f"Validating EULA files for {len(checks)} checks...")
for check_name in checks:
manifest = Manifest.load_manifest(check_name)
if not manifest:
echo_debug(f"Skipping validation for check: {check}; can't process manifest")
continue
eula_relative_location, eula_exists = manifest.get_eula_from_manifest()
manifest_file = get_manifest_file(check_name)
if not eula_exists:
echo_info(f'{check_name}... ', nl=False)
echo_info(' FAILED')
message = f'{eula_relative_location} does not exist'
echo_failure(' ' + message)
annotate_error(manifest_file, message)
failed_checks += 1
continue
# Check file extension of eula is .pdf
if not eula_relative_location.endswith(".pdf"):
echo_info(f'{check_name}... ', nl=False)
echo_info(' FAILED')
message = f'{eula_relative_location} is missing the pdf extension'
echo_failure(' ' + message)
annotate_error(manifest_file, message)
continue
# Check PDF starts with PDF magic_number: "%PDF"
with open(eula_relative_location, 'rb') as f:
magic_number = f.readline()
if b'%PDF' not in magic_number:
echo_info(f'{check_name}... ', nl=False)
echo_info(' FAILED')
message = f'{eula_relative_location} is not a PDF file'
echo_failure(' ' + message)
annotate_error(manifest_file, message)
failed_checks += 1
continue
ok_checks += 1
if ok_checks:
echo_success(f"{ok_checks} valid files")
if failed_checks:
echo_failure(f"{failed_checks} invalid files")
abort()
| # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import click
from ...manifest_utils import Manifest
from ...testing import process_checks_option
from ...utils import complete_valid_checks, get_manifest_file
from ..console import CONTEXT_SETTINGS, abort, annotate_error, echo_debug, echo_failure, echo_info, echo_success
@click.command('eula', context_settings=CONTEXT_SETTINGS, short_help='Validate EULA files')
@click.argument('check', shell_complete=complete_valid_checks, required=False)
def eula(check):
"""Validate all EULA definition files.
If `check` is specified, only the check will be validated, if check value is 'changed' will only apply to changed
checks, an 'all' or empty `check` value will validate all README files.
"""
echo_info("Validating all EULA files...")
failed_checks = 0
ok_checks = 0
checks = process_checks_option(check, source='integrations')
echo_info(f"Validating EULA files for {len(checks)} checks...")
for check_name in checks:
manifest = Manifest.load_manifest(check_name)
if not manifest:
echo_debug(f"Skipping validation for check: {check}; can't process manifest")
continue
eula_relative_location, eula_exists = manifest.get_eula_from_manifest()
manifest_file = get_manifest_file(check_name)
if not eula_exists:
echo_info(f'{check_name}... ', nl=False)
echo_info(' FAILED')
message = f'{eula_relative_location} does not exist'
echo_failure(' ' + message)
annotate_error(manifest_file, message)
failed_checks += 1
continue
# Check file extension of eula is .pdf
if not eula_relative_location.endswith(".pdf"):
echo_info(f'{check_name}... ', nl=False)
echo_info(' FAILED')
message = f'{eula_relative_location} is missing the pdf extension'
echo_failure(' ' + message)
annotate_error(manifest_file, message)
continue
# Check PDF starts with PDF magic_number: "%PDF"
with open(eula_relative_location, 'rb') as f:
magic_number = f.readline()
if b'%PDF' not in magic_number:
echo_info(f'{check_name}... ', nl=False)
echo_info(' FAILED')
message = f'{eula_relative_location} is not a PDF file'
echo_failure(' ' + message)
annotate_error(manifest_file, message)
failed_checks += 1
continue
ok_checks += 1
if ok_checks:
echo_success(f"{ok_checks} valid files")
if failed_checks:
echo_failure(f"{failed_checks} invalid files")
abort() | en | 0.65925 | # (C) Datadog, Inc. 2020-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) Validate all EULA definition files. If `check` is specified, only the check will be validated, if check value is 'changed' will only apply to changed checks, an 'all' or empty `check` value will validate all README files. # Check file extension of eula is .pdf # Check PDF starts with PDF magic_number: "%PDF" | 1.994231 | 2 |
clustering_normalized_cuts/networks.py | shaun95/google-research | 1 | 6625460 | <filename>clustering_normalized_cuts/networks.py
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains network definitions (for siamese net, and cnc_net)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import time
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import gfile
from tensorflow.compat.v1.keras import backend as K
from tensorflow.compat.v1.keras.layers import Input
from tensorflow.compat.v1.keras.layers import Lambda
from tensorflow.compat.v1.keras.models import Model
from clustering_normalized_cuts import affinities
from clustering_normalized_cuts import train
from clustering_normalized_cuts import util
from clustering_normalized_cuts.layer import stack_layers
class SiameseNet(object):
"""Class for Siamese Network."""
def __init__(self, inputs, arch, siam_reg, main_path, y_true):
self.orig_inputs = inputs
# set up inputs
self.inputs = {
'A': inputs['Unlabeled'],
'B': Input(shape=inputs['Unlabeled'].get_shape().as_list()[1:]),
'Labeled': inputs['Labeled'],
}
self.main_path = os.path.join(main_path, 'siemese/')
self.y_true = y_true
# generate layers
self.layers = []
self.layers += util.make_layer_list(arch, 'siamese', siam_reg)
# create the siamese net
self.outputs = stack_layers(self.inputs, self.layers)
# add the distance layer
self.distance = Lambda(
affinities.euclidean_distance,
output_shape=affinities.eucl_dist_output_shape)(
[self.outputs['A'], self.outputs['B']])
# create the distance model for training
self.net = Model([self.inputs['A'], self.inputs['B']], self.distance)
# compile the siamese network
self.net.compile(
loss=affinities.get_contrastive_loss(m_neg=1, m_pos=0.05),
optimizer='rmsprop')
def train(self,
pairs_train,
dist_train,
pairs_val,
dist_val,
lr,
drop,
patience,
num_epochs,
batch_size,
dset,
load=True):
"""Train the Siamese Network."""
if load:
# load weights into model
output_path = os.path.join(self.main_path, dset)
load_model(self.net, output_path, '_siamese')
return
# create handler for early stopping and learning rate scheduling
self.lh = util.LearningHandler(
lr=lr, drop=drop, lr_tensor=self.net.optimizer.lr, patience=patience)
# initialize the training generator
train_gen_ = util.train_gen(pairs_train, dist_train, batch_size)
# format the validation data for keras
validation_data = ([pairs_val[:, 0], pairs_val[:, 1]], dist_val)
# compute the steps per epoch
steps_per_epoch = int(len(pairs_train) / batch_size)
# train the network
self.net.fit_generator(
train_gen_,
epochs=num_epochs,
validation_data=validation_data,
steps_per_epoch=steps_per_epoch,
callbacks=[self.lh])
model_json = self.net.to_json()
output_path = os.path.join(self.main_path, dset)
save_model(self.net, model_json, output_path, '_siamese')
def predict(self, x, batch_sizes):
# compute the siamese embeddings of the input data
return train.predict(
self.outputs['A'],
x_unlabeled=x,
inputs=self.orig_inputs,
y_true=self.y_true,
batch_sizes=batch_sizes)
class CncNet(object):
"""Class for CNC Network."""
def __init__(self,
inputs,
arch,
cnc_reg,
y_true,
y_train_labeled_onehot,
n_clusters,
affinity,
scale_nbr,
n_nbrs,
batch_sizes,
result_path,
dset,
siamese_net=None,
x_train=None,
lr=0.01,
temperature=1.0,
bal_reg=0.0):
self.y_true = y_true
self.y_train_labeled_onehot = y_train_labeled_onehot
self.inputs = inputs
self.batch_sizes = batch_sizes
self.result_path = result_path
self.lr = lr
self.temperature = temperature
# generate layers
self.layers = util.make_layer_list(arch[:-1], 'cnc', cnc_reg)
print('Runing with CNC loss')
self.layers += [{
'type': 'None',
'size': n_clusters,
'l2_reg': cnc_reg,
'name': 'cnc_{}'.format(len(arch))
}]
# create CncNet
self.outputs = stack_layers(self.inputs, self.layers)
self.net = Model(
inputs=self.inputs['Unlabeled'], outputs=self.outputs['Unlabeled'])
# DEFINE LOSS
# generate affinity matrix W according to params
if affinity == 'siamese':
input_affinity = tf.concat(
[siamese_net.outputs['A'], siamese_net.outputs['Labeled']], axis=0)
x_affinity = siamese_net.predict(x_train, batch_sizes)
elif affinity in ['knn', 'full']:
input_affinity = tf.concat(
[self.inputs['Unlabeled'], self.inputs['Labeled']], axis=0)
x_affinity = x_train
# calculate scale for affinity matrix
scale = util.get_scale(x_affinity, self.batch_sizes['Unlabeled'], scale_nbr)
# create affinity matrix
if affinity == 'full':
weight_mat = affinities.full_affinity(input_affinity, scale=scale)
elif affinity in ['knn', 'siamese']:
weight_mat = affinities.knn_affinity(
input_affinity, n_nbrs, scale=scale, scale_nbr=scale_nbr)
# define loss
self.tau = tf.Variable(self.temperature, name='temperature')
self.outputs['Unlabeled'] = util.gumbel_softmax(self.outputs['Unlabeled'],
self.tau)
num_nodes = self.batch_sizes['Unlabeled']
cluster_size = tf.reduce_sum(self.outputs['Unlabeled'], axis=0)
ground_truth = [num_nodes / float(n_clusters)] * n_clusters
bal = tf.losses.mean_squared_error(ground_truth, cluster_size)
degree = tf.expand_dims(tf.reduce_sum(weight_mat, axis=1), 0)
vol = tf.matmul(degree, self.outputs['Unlabeled'], name='vol')
normalized_prob = tf.divide(
self.outputs['Unlabeled'], vol[tf.newaxis, :],
name='normalized_prob')[0]
gain = tf.matmul(
normalized_prob,
tf.transpose(1 - self.outputs['Unlabeled']),
name='res2')
self.loss = tf.reduce_sum(gain * weight_mat) + bal_reg * bal
# create the train step update
self.learning_rate = tf.Variable(self.lr, name='cnc_learning_rate')
self.train_step = tf.train.RMSPropOptimizer(
learning_rate=self.learning_rate).minimize(
self.loss, var_list=self.net.trainable_weights)
# initialize cnc_net variables
K.get_session().run(tf.global_variables_initializer())
K.get_session().run(tf.variables_initializer(self.net.trainable_weights))
if affinity == 'siamese':
output_path = os.path.join(self.main_path, dset)
load_model(siamese_net, output_path, '_siamese')
def train(self,
x_train_unlabeled,
x_train_labeled,
x_val_unlabeled,
drop,
patience,
min_tem,
num_epochs,
load=False):
"""Train the CNC network."""
file_name = 'cnc_net'
if load:
# load weights into model
print('load pretrain weights of the CNC network.')
load_model(self.net, self.result_path, file_name)
return
# create handler for early stopping and learning rate scheduling
self.lh = util.LearningHandler(
lr=self.lr,
drop=drop,
lr_tensor=self.learning_rate,
patience=patience,
tau=self.temperature,
tau_tensor=self.tau,
min_tem=min_tem,
gumble=True)
losses = np.empty((num_epochs,))
val_losses = np.empty((num_epochs,))
# begin cnc_net training loop
self.lh.on_train_begin()
for i in range(num_epochs):
# train cnc_net
losses[i] = train.train_step(
return_var=[self.loss],
updates=self.net.updates + [self.train_step],
x_unlabeled=x_train_unlabeled,
inputs=self.inputs,
y_true=self.y_true,
batch_sizes=self.batch_sizes,
x_labeled=x_train_labeled,
y_labeled=self.y_train_labeled_onehot,
batches_per_epoch=100)[0]
# get validation loss
val_losses[i] = train.predict_sum(
self.loss,
x_unlabeled=x_val_unlabeled,
inputs=self.inputs,
y_true=self.y_true,
x_labeled=x_train_unlabeled[0:0],
y_labeled=self.y_train_labeled_onehot,
batch_sizes=self.batch_sizes)
# do early stopping if necessary
if self.lh.on_epoch_end(i, val_losses[i]):
print('STOPPING EARLY')
break
# print training status
print('Epoch: {}, loss={:2f}, val_loss={:2f}'.format(
i, losses[i], val_losses[i]))
with gfile.Open(self.result_path + 'losses', 'a') as f:
f.write(str(i) + ' ' + str(losses[i]) + ' ' + str(val_losses[i]) + '\n')
model_json = self.net.to_json()
save_model(self.net, model_json, self.result_path, file_name)
def predict(self, x):
# test inputs do not require the 'Labeled' input
inputs_test = {'Unlabeled': self.inputs['Unlabeled']}
return train.predict(
self.outputs['Unlabeled'],
x_unlabeled=x,
inputs=inputs_test,
y_true=self.y_true,
x_labeled=x[0:0],
y_labeled=self.y_train_labeled_onehot[0:0],
batch_sizes=self.batch_sizes)
def save_model(net, model_json, output_path, file_name):
"""serialize weights to HDF5."""
with gfile.Open(output_path + file_name + '.json', 'w') as json_file:
json_file.write(model_json)
# serialize weights to HDF5
weight_path = os.path.join(output_path, file_name, '.h5')
local_filename = weight_path.split('/')[-1]
tmp_filename = os.path.join(tempfile.gettempdir(),
str(int(time.time())) + '_' + local_filename)
net.save_weights(tmp_filename)
gfile.Copy(tmp_filename, weight_path, overwrite=True)
gfile.Remove(tmp_filename)
def load_model(net, output_path, file_name):
weights_path = os.path.join(output_path, file_name, '.h5')
local_filename = weights_path.split('/')[-1]
tmp_filename = os.path.join(tempfile.gettempdir(),
str(int(time.time())) + '_' + local_filename)
gfile.Copy(weights_path, tmp_filename)
net.load_weights(tmp_filename)
gfile.Remove(tmp_filename)
| <filename>clustering_normalized_cuts/networks.py
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains network definitions (for siamese net, and cnc_net)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import time
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import gfile
from tensorflow.compat.v1.keras import backend as K
from tensorflow.compat.v1.keras.layers import Input
from tensorflow.compat.v1.keras.layers import Lambda
from tensorflow.compat.v1.keras.models import Model
from clustering_normalized_cuts import affinities
from clustering_normalized_cuts import train
from clustering_normalized_cuts import util
from clustering_normalized_cuts.layer import stack_layers
class SiameseNet(object):
"""Class for Siamese Network."""
def __init__(self, inputs, arch, siam_reg, main_path, y_true):
self.orig_inputs = inputs
# set up inputs
self.inputs = {
'A': inputs['Unlabeled'],
'B': Input(shape=inputs['Unlabeled'].get_shape().as_list()[1:]),
'Labeled': inputs['Labeled'],
}
self.main_path = os.path.join(main_path, 'siemese/')
self.y_true = y_true
# generate layers
self.layers = []
self.layers += util.make_layer_list(arch, 'siamese', siam_reg)
# create the siamese net
self.outputs = stack_layers(self.inputs, self.layers)
# add the distance layer
self.distance = Lambda(
affinities.euclidean_distance,
output_shape=affinities.eucl_dist_output_shape)(
[self.outputs['A'], self.outputs['B']])
# create the distance model for training
self.net = Model([self.inputs['A'], self.inputs['B']], self.distance)
# compile the siamese network
self.net.compile(
loss=affinities.get_contrastive_loss(m_neg=1, m_pos=0.05),
optimizer='rmsprop')
def train(self,
pairs_train,
dist_train,
pairs_val,
dist_val,
lr,
drop,
patience,
num_epochs,
batch_size,
dset,
load=True):
"""Train the Siamese Network."""
if load:
# load weights into model
output_path = os.path.join(self.main_path, dset)
load_model(self.net, output_path, '_siamese')
return
# create handler for early stopping and learning rate scheduling
self.lh = util.LearningHandler(
lr=lr, drop=drop, lr_tensor=self.net.optimizer.lr, patience=patience)
# initialize the training generator
train_gen_ = util.train_gen(pairs_train, dist_train, batch_size)
# format the validation data for keras
validation_data = ([pairs_val[:, 0], pairs_val[:, 1]], dist_val)
# compute the steps per epoch
steps_per_epoch = int(len(pairs_train) / batch_size)
# train the network
self.net.fit_generator(
train_gen_,
epochs=num_epochs,
validation_data=validation_data,
steps_per_epoch=steps_per_epoch,
callbacks=[self.lh])
model_json = self.net.to_json()
output_path = os.path.join(self.main_path, dset)
save_model(self.net, model_json, output_path, '_siamese')
def predict(self, x, batch_sizes):
# compute the siamese embeddings of the input data
return train.predict(
self.outputs['A'],
x_unlabeled=x,
inputs=self.orig_inputs,
y_true=self.y_true,
batch_sizes=batch_sizes)
class CncNet(object):
"""Class for CNC Network."""
def __init__(self,
inputs,
arch,
cnc_reg,
y_true,
y_train_labeled_onehot,
n_clusters,
affinity,
scale_nbr,
n_nbrs,
batch_sizes,
result_path,
dset,
siamese_net=None,
x_train=None,
lr=0.01,
temperature=1.0,
bal_reg=0.0):
self.y_true = y_true
self.y_train_labeled_onehot = y_train_labeled_onehot
self.inputs = inputs
self.batch_sizes = batch_sizes
self.result_path = result_path
self.lr = lr
self.temperature = temperature
# generate layers
self.layers = util.make_layer_list(arch[:-1], 'cnc', cnc_reg)
print('Runing with CNC loss')
self.layers += [{
'type': 'None',
'size': n_clusters,
'l2_reg': cnc_reg,
'name': 'cnc_{}'.format(len(arch))
}]
# create CncNet
self.outputs = stack_layers(self.inputs, self.layers)
self.net = Model(
inputs=self.inputs['Unlabeled'], outputs=self.outputs['Unlabeled'])
# DEFINE LOSS
# generate affinity matrix W according to params
if affinity == 'siamese':
input_affinity = tf.concat(
[siamese_net.outputs['A'], siamese_net.outputs['Labeled']], axis=0)
x_affinity = siamese_net.predict(x_train, batch_sizes)
elif affinity in ['knn', 'full']:
input_affinity = tf.concat(
[self.inputs['Unlabeled'], self.inputs['Labeled']], axis=0)
x_affinity = x_train
# calculate scale for affinity matrix
scale = util.get_scale(x_affinity, self.batch_sizes['Unlabeled'], scale_nbr)
# create affinity matrix
if affinity == 'full':
weight_mat = affinities.full_affinity(input_affinity, scale=scale)
elif affinity in ['knn', 'siamese']:
weight_mat = affinities.knn_affinity(
input_affinity, n_nbrs, scale=scale, scale_nbr=scale_nbr)
# define loss
self.tau = tf.Variable(self.temperature, name='temperature')
self.outputs['Unlabeled'] = util.gumbel_softmax(self.outputs['Unlabeled'],
self.tau)
num_nodes = self.batch_sizes['Unlabeled']
cluster_size = tf.reduce_sum(self.outputs['Unlabeled'], axis=0)
ground_truth = [num_nodes / float(n_clusters)] * n_clusters
bal = tf.losses.mean_squared_error(ground_truth, cluster_size)
degree = tf.expand_dims(tf.reduce_sum(weight_mat, axis=1), 0)
vol = tf.matmul(degree, self.outputs['Unlabeled'], name='vol')
normalized_prob = tf.divide(
self.outputs['Unlabeled'], vol[tf.newaxis, :],
name='normalized_prob')[0]
gain = tf.matmul(
normalized_prob,
tf.transpose(1 - self.outputs['Unlabeled']),
name='res2')
self.loss = tf.reduce_sum(gain * weight_mat) + bal_reg * bal
# create the train step update
self.learning_rate = tf.Variable(self.lr, name='cnc_learning_rate')
self.train_step = tf.train.RMSPropOptimizer(
learning_rate=self.learning_rate).minimize(
self.loss, var_list=self.net.trainable_weights)
# initialize cnc_net variables
K.get_session().run(tf.global_variables_initializer())
K.get_session().run(tf.variables_initializer(self.net.trainable_weights))
if affinity == 'siamese':
output_path = os.path.join(self.main_path, dset)
load_model(siamese_net, output_path, '_siamese')
def train(self,
x_train_unlabeled,
x_train_labeled,
x_val_unlabeled,
drop,
patience,
min_tem,
num_epochs,
load=False):
"""Train the CNC network."""
file_name = 'cnc_net'
if load:
# load weights into model
print('load pretrain weights of the CNC network.')
load_model(self.net, self.result_path, file_name)
return
# create handler for early stopping and learning rate scheduling
self.lh = util.LearningHandler(
lr=self.lr,
drop=drop,
lr_tensor=self.learning_rate,
patience=patience,
tau=self.temperature,
tau_tensor=self.tau,
min_tem=min_tem,
gumble=True)
losses = np.empty((num_epochs,))
val_losses = np.empty((num_epochs,))
# begin cnc_net training loop
self.lh.on_train_begin()
for i in range(num_epochs):
# train cnc_net
losses[i] = train.train_step(
return_var=[self.loss],
updates=self.net.updates + [self.train_step],
x_unlabeled=x_train_unlabeled,
inputs=self.inputs,
y_true=self.y_true,
batch_sizes=self.batch_sizes,
x_labeled=x_train_labeled,
y_labeled=self.y_train_labeled_onehot,
batches_per_epoch=100)[0]
# get validation loss
val_losses[i] = train.predict_sum(
self.loss,
x_unlabeled=x_val_unlabeled,
inputs=self.inputs,
y_true=self.y_true,
x_labeled=x_train_unlabeled[0:0],
y_labeled=self.y_train_labeled_onehot,
batch_sizes=self.batch_sizes)
# do early stopping if necessary
if self.lh.on_epoch_end(i, val_losses[i]):
print('STOPPING EARLY')
break
# print training status
print('Epoch: {}, loss={:2f}, val_loss={:2f}'.format(
i, losses[i], val_losses[i]))
with gfile.Open(self.result_path + 'losses', 'a') as f:
f.write(str(i) + ' ' + str(losses[i]) + ' ' + str(val_losses[i]) + '\n')
model_json = self.net.to_json()
save_model(self.net, model_json, self.result_path, file_name)
def predict(self, x):
# test inputs do not require the 'Labeled' input
inputs_test = {'Unlabeled': self.inputs['Unlabeled']}
return train.predict(
self.outputs['Unlabeled'],
x_unlabeled=x,
inputs=inputs_test,
y_true=self.y_true,
x_labeled=x[0:0],
y_labeled=self.y_train_labeled_onehot[0:0],
batch_sizes=self.batch_sizes)
def save_model(net, model_json, output_path, file_name):
"""serialize weights to HDF5."""
with gfile.Open(output_path + file_name + '.json', 'w') as json_file:
json_file.write(model_json)
# serialize weights to HDF5
weight_path = os.path.join(output_path, file_name, '.h5')
local_filename = weight_path.split('/')[-1]
tmp_filename = os.path.join(tempfile.gettempdir(),
str(int(time.time())) + '_' + local_filename)
net.save_weights(tmp_filename)
gfile.Copy(tmp_filename, weight_path, overwrite=True)
gfile.Remove(tmp_filename)
def load_model(net, output_path, file_name):
weights_path = os.path.join(output_path, file_name, '.h5')
local_filename = weights_path.split('/')[-1]
tmp_filename = os.path.join(tempfile.gettempdir(),
str(int(time.time())) + '_' + local_filename)
gfile.Copy(weights_path, tmp_filename)
net.load_weights(tmp_filename)
gfile.Remove(tmp_filename)
| en | 0.749142 | # coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Contains network definitions (for siamese net, and cnc_net). Class for Siamese Network. # set up inputs # generate layers # create the siamese net # add the distance layer # create the distance model for training # compile the siamese network Train the Siamese Network. # load weights into model # create handler for early stopping and learning rate scheduling # initialize the training generator # format the validation data for keras # compute the steps per epoch # train the network # compute the siamese embeddings of the input data Class for CNC Network. # generate layers # create CncNet # DEFINE LOSS # generate affinity matrix W according to params # calculate scale for affinity matrix # create affinity matrix # define loss # create the train step update # initialize cnc_net variables Train the CNC network. # load weights into model # create handler for early stopping and learning rate scheduling # begin cnc_net training loop # train cnc_net # get validation loss # do early stopping if necessary # print training status # test inputs do not require the 'Labeled' input serialize weights to HDF5. # serialize weights to HDF5 | 2.034697 | 2 |
src/exabgp/bgp/message/update/attribute/aigp.py | ahmet2mir/exabgp | 27 | 6625461 | <gh_stars>10-100
# encoding: utf-8
"""
aigp.py
Created by <NAME> on 2013-09-24.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from struct import pack
from struct import unpack
from exabgp.bgp.message.update.attribute.attribute import Attribute
# ========================================================================== TLV
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type | Length | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
# ~ ~
# | Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+..........................
# Length: Two octets encoding the length in octets of the TLV,
# including the type and length fields.
class TLV(object):
def __init__(self, what, value):
self.type = what
self.value = value
class TLVS(list):
@staticmethod
def unpack(data):
def loop(data):
while data:
t = data[0]
length = unpack('!H', data[1:3])[0]
v, data = data[3:length], data[length:]
yield TLV(t, v)
return TLVS(list(loop(data)))
def pack(self):
return b''.join([bytes([tlv.type]) + pack('!H', len(tlv.value) + 3) + tlv.value for tlv in self])
# ==================================================================== AIGP (26)
#
@Attribute.register()
class AIGP(Attribute):
ID = Attribute.CODE.AIGP
FLAG = Attribute.Flag.OPTIONAL
CACHING = True
TYPES = [
1,
]
def __init__(self, aigp, packed=None):
self.aigp = aigp
if packed:
self._packed = packed
else:
self._packed = self._attribute(aigp)
def __eq__(self, other):
return self.ID == other.ID and self.FLAG == other.FLAG and self.aigp == other.aigp
def __ne__(self, other):
return not self.__eq__(other)
def pack(self, negotiated):
if negotiated.aigp:
return self._packed
if negotiated.local_as == negotiated.peer_as:
return self._packed
return b''
def __repr__(self):
return '0x' + ''.join('%02x' % _ for _ in self.aigp[-8:])
@classmethod
def unpack(cls, data, negotiated):
if not negotiated.aigp:
# AIGP must only be accepted on configured sessions
return None
return cls(unpack('!Q', data[:8] & 0x000000FFFFFFFFFF), data[:8])
| # encoding: utf-8
"""
aigp.py
Created by <NAME> on 2013-09-24.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from struct import pack
from struct import unpack
from exabgp.bgp.message.update.attribute.attribute import Attribute
# ========================================================================== TLV
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type | Length | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
# ~ ~
# | Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+..........................
# Length: Two octets encoding the length in octets of the TLV,
# including the type and length fields.
class TLV(object):
def __init__(self, what, value):
self.type = what
self.value = value
class TLVS(list):
@staticmethod
def unpack(data):
def loop(data):
while data:
t = data[0]
length = unpack('!H', data[1:3])[0]
v, data = data[3:length], data[length:]
yield TLV(t, v)
return TLVS(list(loop(data)))
def pack(self):
return b''.join([bytes([tlv.type]) + pack('!H', len(tlv.value) + 3) + tlv.value for tlv in self])
# ==================================================================== AIGP (26)
#
@Attribute.register()
class AIGP(Attribute):
ID = Attribute.CODE.AIGP
FLAG = Attribute.Flag.OPTIONAL
CACHING = True
TYPES = [
1,
]
def __init__(self, aigp, packed=None):
self.aigp = aigp
if packed:
self._packed = packed
else:
self._packed = self._attribute(aigp)
def __eq__(self, other):
return self.ID == other.ID and self.FLAG == other.FLAG and self.aigp == other.aigp
def __ne__(self, other):
return not self.__eq__(other)
def pack(self, negotiated):
if negotiated.aigp:
return self._packed
if negotiated.local_as == negotiated.peer_as:
return self._packed
return b''
def __repr__(self):
return '0x' + ''.join('%02x' % _ for _ in self.aigp[-8:])
@classmethod
def unpack(cls, data, negotiated):
if not negotiated.aigp:
# AIGP must only be accepted on configured sessions
return None
return cls(unpack('!Q', data[:8] & 0x000000FFFFFFFFFF), data[:8]) | en | 0.297743 | # encoding: utf-8 aigp.py Created by <NAME> on 2013-09-24. Copyright (c) 2009-2017 Exa Networks. All rights reserved. License: 3-clause BSD. (See the COPYRIGHT file) # ========================================================================== TLV # # 0 1 2 3 # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | Type | Length | | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | # ~ ~ # | Value | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+.......................... # Length: Two octets encoding the length in octets of the TLV, # including the type and length fields. # ==================================================================== AIGP (26) # # AIGP must only be accepted on configured sessions | 2.471115 | 2 |
imgtag/main_window.py | pauljxtan/imgtag | 0 | 6625462 | <filename>imgtag/main_window.py<gh_stars>0
"""Provides the top-level window widget."""
from PySide2.QtWidgets import QAction, QApplication, QMainWindow, QTabWidget
from .state import GlobalState
from .tabs import FileTab, GalleryTab
class MainWindow(QMainWindow):
"""The top-level window."""
title = 'ImgTag'
def __init__(self):
super().__init__()
self.global_state = GlobalState()
self.setWindowTitle(self.title)
self._make_menubar()
self.setCentralWidget(self._central_widget())
def _make_menubar(self):
menubar = self.menuBar()
quit_action = QAction('&Quit', self)
quit_action.setShortcut('Ctrl+Q')
quit_action.triggered.connect(QApplication.quit)
menubar.addAction(quit_action)
def _central_widget(self) -> QTabWidget:
tabs = QTabWidget()
self._file_tab = FileTab(self.global_state)
tabs.addTab(self._file_tab, self._file_tab.title)
self._gallery_tab = GalleryTab(self.global_state)
tabs.addTab(self._gallery_tab, self._gallery_tab.title)
return tabs
| <filename>imgtag/main_window.py<gh_stars>0
"""Provides the top-level window widget."""
from PySide2.QtWidgets import QAction, QApplication, QMainWindow, QTabWidget
from .state import GlobalState
from .tabs import FileTab, GalleryTab
class MainWindow(QMainWindow):
"""The top-level window."""
title = 'ImgTag'
def __init__(self):
super().__init__()
self.global_state = GlobalState()
self.setWindowTitle(self.title)
self._make_menubar()
self.setCentralWidget(self._central_widget())
def _make_menubar(self):
menubar = self.menuBar()
quit_action = QAction('&Quit', self)
quit_action.setShortcut('Ctrl+Q')
quit_action.triggered.connect(QApplication.quit)
menubar.addAction(quit_action)
def _central_widget(self) -> QTabWidget:
tabs = QTabWidget()
self._file_tab = FileTab(self.global_state)
tabs.addTab(self._file_tab, self._file_tab.title)
self._gallery_tab = GalleryTab(self.global_state)
tabs.addTab(self._gallery_tab, self._gallery_tab.title)
return tabs
| en | 0.811584 | Provides the top-level window widget. The top-level window. | 2.447633 | 2 |
lena/core/functions.py | ynikitenko/lena | 4 | 6625463 | import sys
def flow_to_iter(flow):
"""Convert *flow* to support both ``__iter__`` and ``next``.
*flow* must be iterable.
If that doesn't support ``next`` (for example, a list),
it will be converted to *iter(flow)*.
Works for Python versions 2 and 3 (where next is different).
"""
if ((sys.version_info.major == 3 and hasattr(flow, "__next__"))
or (sys.version_info.major == 2 and hasattr(flow, "next"))):
return flow
else:
return iter(flow)
| import sys
def flow_to_iter(flow):
"""Convert *flow* to support both ``__iter__`` and ``next``.
*flow* must be iterable.
If that doesn't support ``next`` (for example, a list),
it will be converted to *iter(flow)*.
Works for Python versions 2 and 3 (where next is different).
"""
if ((sys.version_info.major == 3 and hasattr(flow, "__next__"))
or (sys.version_info.major == 2 and hasattr(flow, "next"))):
return flow
else:
return iter(flow)
| en | 0.889569 | Convert *flow* to support both ``__iter__`` and ``next``. *flow* must be iterable. If that doesn't support ``next`` (for example, a list), it will be converted to *iter(flow)*. Works for Python versions 2 and 3 (where next is different). | 3.402405 | 3 |
tests/test_static_content.py | grihabor/catch-hook-telegram-bot | 0 | 6625464 | import pytest
@pytest.mark.parametrize('mapping,json_obj,expected', [
({}, {}, {}),
({'name': 'obj.name'}, {'obj': {'name': 'John'}}, {'name': 'John'}),
({
'user': {
'id': 'project.user_id',
'name': 'project.user_name'
},
'name': 'project.name'
}, {
'project': {
'user_id': 2574,
'user_name': 'Dan',
'name': 'coolbot',
}
}, {
'user': {
'id': 2574,
'name': 'Dan'
},
'name': 'coolbot'
}),
])
def test_static_content(mapping, json_obj, expected):
from catchbot.message.content import get_static_msg_content
assert expected == get_static_msg_content(mapping, json_obj)
| import pytest
@pytest.mark.parametrize('mapping,json_obj,expected', [
({}, {}, {}),
({'name': 'obj.name'}, {'obj': {'name': 'John'}}, {'name': 'John'}),
({
'user': {
'id': 'project.user_id',
'name': 'project.user_name'
},
'name': 'project.name'
}, {
'project': {
'user_id': 2574,
'user_name': 'Dan',
'name': 'coolbot',
}
}, {
'user': {
'id': 2574,
'name': 'Dan'
},
'name': 'coolbot'
}),
])
def test_static_content(mapping, json_obj, expected):
from catchbot.message.content import get_static_msg_content
assert expected == get_static_msg_content(mapping, json_obj)
| none | 1 | 2.379223 | 2 | |
tools/conllu-w2t.py | coastalcph/HIT-SCIR-CoNLL2019 | 5 | 6625465 | #!/usr/bin/env python
import sys
import re
import file_util
from file_util import ID,HEAD,DEPS #column index for the columns we'll need
import argparse
interval_re=re.compile(ur"^([0-9]+)-([0-9]+)$",re.U)
def get_tokens(wtree):
"""
Returns a list of tokens in the tree as integer intervals like so:
[(1,1),(2,3),(4,4),...]
`tree` is a tree (as produced by trees()) in the word-indexed format
"""
tokens=[]
for cols in wtree:
if cols[ID].isdigit():
t_id=int(cols[ID])
#Not covered by the previous interval?
if not (tokens and tokens[-1][0]<=t_id and tokens[-1][1]>=t_id):
tokens.append((t_id,t_id)) #nope - let's make a default interval for it
else:
match=interval_re.match(cols[ID]) #Check the interval against the regex
beg,end=int(match.group(1)),int(match.group(2))
tokens.append((beg,end))
return tokens
def w2t(wtree):
tokens=get_tokens(wtree)
word_ids=[u"0"] #root remains 0
line_idx=0 #index of the line in wtree we are editing
for token_idx,(b,e) in enumerate(tokens): #go over all token ranges and produce new IDs for the words involved
wtree[line_idx][ID]=unicode(token_idx+1) #Renumber the ID field of the token
if b==e: #token==word
word_ids.append("%d"%(token_idx+1))
line_idx+=1
else:
#We have a range, renumber the words
line_idx+=1
for word_idx,_ in enumerate(range(b,e+1)): #consume as many lines as there are words in the token
word_ids.append("%d.%d"%(token_idx+1,word_idx+1))
wtree[line_idx][ID]=word_ids[-1]
line_idx+=1
#word_ids is now a list with 1-based indexing which has the new ID for every single word
#the ID column has been renumbered by now
#now we can renumber all of the HEAD columns
for cols in wtree:
if cols[HEAD]==u"_": #token
continue
cols[HEAD]=word_ids[int(cols[HEAD])]
if cols[DEPS]!=u"_": #need to renumber secondary deps
new_pairs=[]
for head_deprel in cols[DEPS].split(u"|"):
head,deprel=head_deprel.split(u":")
new_pairs.append(word_ids[int(head)]+u":"+deprel)
cols[DEPS]=u"|".join(new_pairs)
if __name__=="__main__":
opt_parser = argparse.ArgumentParser(description='Conversion script from word-based CoNLL-U to token-based CoNLL-U. This script assumes that the input is validated and does no checking on its own.')
opt_parser.add_argument('input', nargs='?', help='Input file name, or "-" or nothing for standard input.')
opt_parser.add_argument('output', nargs='?', help='Output file name, or "-" or nothing for standard output.')
args = opt_parser.parse_args() #Parsed command-line arguments
inp,out=file_util.in_out(args)
for comments,tree in file_util.trees(inp):
w2t(tree)
file_util.print_tree(comments,tree,out)
| #!/usr/bin/env python
import sys
import re
import file_util
from file_util import ID,HEAD,DEPS #column index for the columns we'll need
import argparse
interval_re=re.compile(ur"^([0-9]+)-([0-9]+)$",re.U)
def get_tokens(wtree):
"""
Returns a list of tokens in the tree as integer intervals like so:
[(1,1),(2,3),(4,4),...]
`tree` is a tree (as produced by trees()) in the word-indexed format
"""
tokens=[]
for cols in wtree:
if cols[ID].isdigit():
t_id=int(cols[ID])
#Not covered by the previous interval?
if not (tokens and tokens[-1][0]<=t_id and tokens[-1][1]>=t_id):
tokens.append((t_id,t_id)) #nope - let's make a default interval for it
else:
match=interval_re.match(cols[ID]) #Check the interval against the regex
beg,end=int(match.group(1)),int(match.group(2))
tokens.append((beg,end))
return tokens
def w2t(wtree):
tokens=get_tokens(wtree)
word_ids=[u"0"] #root remains 0
line_idx=0 #index of the line in wtree we are editing
for token_idx,(b,e) in enumerate(tokens): #go over all token ranges and produce new IDs for the words involved
wtree[line_idx][ID]=unicode(token_idx+1) #Renumber the ID field of the token
if b==e: #token==word
word_ids.append("%d"%(token_idx+1))
line_idx+=1
else:
#We have a range, renumber the words
line_idx+=1
for word_idx,_ in enumerate(range(b,e+1)): #consume as many lines as there are words in the token
word_ids.append("%d.%d"%(token_idx+1,word_idx+1))
wtree[line_idx][ID]=word_ids[-1]
line_idx+=1
#word_ids is now a list with 1-based indexing which has the new ID for every single word
#the ID column has been renumbered by now
#now we can renumber all of the HEAD columns
for cols in wtree:
if cols[HEAD]==u"_": #token
continue
cols[HEAD]=word_ids[int(cols[HEAD])]
if cols[DEPS]!=u"_": #need to renumber secondary deps
new_pairs=[]
for head_deprel in cols[DEPS].split(u"|"):
head,deprel=head_deprel.split(u":")
new_pairs.append(word_ids[int(head)]+u":"+deprel)
cols[DEPS]=u"|".join(new_pairs)
if __name__=="__main__":
opt_parser = argparse.ArgumentParser(description='Conversion script from word-based CoNLL-U to token-based CoNLL-U. This script assumes that the input is validated and does no checking on its own.')
opt_parser.add_argument('input', nargs='?', help='Input file name, or "-" or nothing for standard input.')
opt_parser.add_argument('output', nargs='?', help='Output file name, or "-" or nothing for standard output.')
args = opt_parser.parse_args() #Parsed command-line arguments
inp,out=file_util.in_out(args)
for comments,tree in file_util.trees(inp):
w2t(tree)
file_util.print_tree(comments,tree,out)
| en | 0.891056 | #!/usr/bin/env python #column index for the columns we'll need Returns a list of tokens in the tree as integer intervals like so: [(1,1),(2,3),(4,4),...] `tree` is a tree (as produced by trees()) in the word-indexed format #Not covered by the previous interval? #nope - let's make a default interval for it #Check the interval against the regex #root remains 0 #index of the line in wtree we are editing #go over all token ranges and produce new IDs for the words involved #Renumber the ID field of the token #token==word #We have a range, renumber the words #consume as many lines as there are words in the token #word_ids is now a list with 1-based indexing which has the new ID for every single word #the ID column has been renumbered by now #now we can renumber all of the HEAD columns #token #need to renumber secondary deps #Parsed command-line arguments | 3.341522 | 3 |
src/mdp/markov_decision_procedure.py | Gnosling/RLASP | 0 | 6625466 | import os
import clingo
import random
from typing import Set, List
class MarkovDecisionProcedure:
@staticmethod
def file_path(file_name):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), file_name)
def __init__(self, state_initial: Set[str], state_static: Set[str], discount_rate: float,
problem_file_name: str):
self.state: Set[str] = frozenset(state_initial)
self.state_static: Set[str] = frozenset(state_static)
self.discount_rate: float = discount_rate
self.env = False
# TODO: Needs to be separated from abstract MDP. -> Do it when introducing a second MDP
self.interface_file_name: str = 'markov_decision_procedure.lp'
self.problem_file_name: str = problem_file_name
# MDP trajectory: S0, A0, R1, S1, A1, R2, S2, A2, ...
self.state_history: List[Set[str]] = [frozenset(state_initial)] # S0
self.action_history: List[str] = [] #A0 will be given later once the first action is executed
self.reward_history: List[float] = [None] # R0, which is undefined
# self.available_actions = self._compute_available_actions
self.available_actions = set()
self._compute_available_actions
self.action = ""
@property
def interface_file_path(self):
return self.file_path(self.interface_file_name)
@property
def problem_file_path(self):
return self.file_path(self.problem_file_name)
def set_transition(self, model: clingo.Model):
next_reward = None
next_state = set()
available_actions = set()
for symbol in model.symbols(shown=True):
if symbol.name == 'nextState':
# ˙Atom is of the form `state(f(...))`
# where`f(...)` is an uninterpreted function belonging to the state representation.
f = symbol.arguments[0]
next_state.add(str(f))
if symbol.name == 'nextReward':
# Atom is of the form `nextReward(r)`, and `r` is the reward.
next_reward = symbol.arguments[0].number
if symbol.name == 'nextExecutable':
# Atom is of the form `nextExecutable(f(...))`
# where`f(...)` is an uninterpreted function representing an executable action.
available_actions.add(str(symbol.arguments[0]))
self.state = frozenset(next_state)
self.available_actions = available_actions
# Update trajectory:
self.action_history.append(self.action) # A[t]
self.state_history.append(frozenset(next_state)) # S[t+1]
self.reward_history.append(next_reward) # R[t+1]
def transition(self, action: str):
ctl = clingo.Control()
ctl.load(self.file_path(self.interface_file_name))
ctl.load(self.file_path(self.problem_file_name))
ctl.add('base', [], ' '.join(f'currentState({s}).' for s in self.state))
ctl.add('base', [], ' '.join(f'{s}.' for s in self.state_static))
ctl.add('base', [], f'currentAction({action}).')
ctl.add('base', [], '#show nextState/1. #show nextReward/1. #show nextExecutable/1.')
ctl.ground(parts=[('base', [])])
self.action = action
ctl.solve(on_model=self.set_transition)
return self.state, self.reward_history[-1]
@property
def return_history(self) -> List[float]:
T = len(self.state_history)
G = [0] * T
for t in reversed(range(T-1)):
G[t] = self.reward_history[t+1] + self.discount_rate * G[t+1]
return G
def set_available_actions(self, model: clingo.Model):
available_actions = set()
for symbol in model.symbols(shown=True):
# We expect atoms of the form `currentExecutable(move(X, Y)`
# but we are only interested in the first argument `move(X, Y)`
available_actions.add(str(symbol.arguments[0]))
self.available_actions = available_actions
@property
def _compute_available_actions(self) -> Set[str]:
ctl = clingo.Control()
ctl.load(self.file_path(self.interface_file_name))
ctl.load(self.file_path(self.problem_file_name))
ctl.add('base', [], ' '.join(f'currentState({s}).' for s in self.state))
ctl.add('base', [], ' '.join(f'{s}.' for s in self.state_static))
ctl.add('base', [], '#show currentExecutable/1.')
ctl.ground(parts=[('base', [])])
ctl.solve(on_model=self.set_available_actions)
return self.available_actions
def update_available_actions(self):
ctl = clingo.Control()
ctl.load(self.file_path(self.interface_file_name))
ctl.load(self.file_path(self.problem_file_name))
ctl.add('base', [], ' '.join(f'currentState({s}).' for s in self.state))
ctl.add('base', [], ' '.join(f'{s}.' for s in self.state_static))
ctl.add('base', [], '#show currentExecutable/1.')
ctl.ground(parts=[('base', [])])
ctl.solve(on_model=self.set_available_actions)
return self.available_actions
def set_env(self, env):
self.env = env
def set_env_level(self, level_name, is_slippery=False, is_random=False):
self.env.set_level(level_name, is_slippery, is_random)
| import os
import clingo
import random
from typing import Set, List
class MarkovDecisionProcedure:
@staticmethod
def file_path(file_name):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), file_name)
def __init__(self, state_initial: Set[str], state_static: Set[str], discount_rate: float,
problem_file_name: str):
self.state: Set[str] = frozenset(state_initial)
self.state_static: Set[str] = frozenset(state_static)
self.discount_rate: float = discount_rate
self.env = False
# TODO: Needs to be separated from abstract MDP. -> Do it when introducing a second MDP
self.interface_file_name: str = 'markov_decision_procedure.lp'
self.problem_file_name: str = problem_file_name
# MDP trajectory: S0, A0, R1, S1, A1, R2, S2, A2, ...
self.state_history: List[Set[str]] = [frozenset(state_initial)] # S0
self.action_history: List[str] = [] #A0 will be given later once the first action is executed
self.reward_history: List[float] = [None] # R0, which is undefined
# self.available_actions = self._compute_available_actions
self.available_actions = set()
self._compute_available_actions
self.action = ""
@property
def interface_file_path(self):
return self.file_path(self.interface_file_name)
@property
def problem_file_path(self):
return self.file_path(self.problem_file_name)
def set_transition(self, model: clingo.Model):
next_reward = None
next_state = set()
available_actions = set()
for symbol in model.symbols(shown=True):
if symbol.name == 'nextState':
# ˙Atom is of the form `state(f(...))`
# where`f(...)` is an uninterpreted function belonging to the state representation.
f = symbol.arguments[0]
next_state.add(str(f))
if symbol.name == 'nextReward':
# Atom is of the form `nextReward(r)`, and `r` is the reward.
next_reward = symbol.arguments[0].number
if symbol.name == 'nextExecutable':
# Atom is of the form `nextExecutable(f(...))`
# where`f(...)` is an uninterpreted function representing an executable action.
available_actions.add(str(symbol.arguments[0]))
self.state = frozenset(next_state)
self.available_actions = available_actions
# Update trajectory:
self.action_history.append(self.action) # A[t]
self.state_history.append(frozenset(next_state)) # S[t+1]
self.reward_history.append(next_reward) # R[t+1]
def transition(self, action: str):
ctl = clingo.Control()
ctl.load(self.file_path(self.interface_file_name))
ctl.load(self.file_path(self.problem_file_name))
ctl.add('base', [], ' '.join(f'currentState({s}).' for s in self.state))
ctl.add('base', [], ' '.join(f'{s}.' for s in self.state_static))
ctl.add('base', [], f'currentAction({action}).')
ctl.add('base', [], '#show nextState/1. #show nextReward/1. #show nextExecutable/1.')
ctl.ground(parts=[('base', [])])
self.action = action
ctl.solve(on_model=self.set_transition)
return self.state, self.reward_history[-1]
@property
def return_history(self) -> List[float]:
T = len(self.state_history)
G = [0] * T
for t in reversed(range(T-1)):
G[t] = self.reward_history[t+1] + self.discount_rate * G[t+1]
return G
def set_available_actions(self, model: clingo.Model):
available_actions = set()
for symbol in model.symbols(shown=True):
# We expect atoms of the form `currentExecutable(move(X, Y)`
# but we are only interested in the first argument `move(X, Y)`
available_actions.add(str(symbol.arguments[0]))
self.available_actions = available_actions
@property
def _compute_available_actions(self) -> Set[str]:
ctl = clingo.Control()
ctl.load(self.file_path(self.interface_file_name))
ctl.load(self.file_path(self.problem_file_name))
ctl.add('base', [], ' '.join(f'currentState({s}).' for s in self.state))
ctl.add('base', [], ' '.join(f'{s}.' for s in self.state_static))
ctl.add('base', [], '#show currentExecutable/1.')
ctl.ground(parts=[('base', [])])
ctl.solve(on_model=self.set_available_actions)
return self.available_actions
def update_available_actions(self):
ctl = clingo.Control()
ctl.load(self.file_path(self.interface_file_name))
ctl.load(self.file_path(self.problem_file_name))
ctl.add('base', [], ' '.join(f'currentState({s}).' for s in self.state))
ctl.add('base', [], ' '.join(f'{s}.' for s in self.state_static))
ctl.add('base', [], '#show currentExecutable/1.')
ctl.ground(parts=[('base', [])])
ctl.solve(on_model=self.set_available_actions)
return self.available_actions
def set_env(self, env):
self.env = env
def set_env_level(self, level_name, is_slippery=False, is_random=False):
self.env.set_level(level_name, is_slippery, is_random)
| en | 0.805962 | # TODO: Needs to be separated from abstract MDP. -> Do it when introducing a second MDP # MDP trajectory: S0, A0, R1, S1, A1, R2, S2, A2, ... # S0 #A0 will be given later once the first action is executed # R0, which is undefined # self.available_actions = self._compute_available_actions # ˙Atom is of the form `state(f(...))` # where`f(...)` is an uninterpreted function belonging to the state representation. # Atom is of the form `nextReward(r)`, and `r` is the reward. # Atom is of the form `nextExecutable(f(...))` # where`f(...)` is an uninterpreted function representing an executable action. # Update trajectory: # A[t] # S[t+1] # R[t+1] #show nextReward/1. #show nextExecutable/1.') # We expect atoms of the form `currentExecutable(move(X, Y)` # but we are only interested in the first argument `move(X, Y)` | 2.862174 | 3 |
src/twisted/internet/endpoints.py | muelli/twisted | 0 | 6625467 | <reponame>muelli/twisted
# -*- test-case-name: twisted.internet.test.test_endpoints -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementations of L{IStreamServerEndpoint} and L{IStreamClientEndpoint} that
wrap the L{IReactorTCP}, L{IReactorSSL}, and L{IReactorUNIX} interfaces.
This also implements an extensible mini-language for describing endpoints,
parsed by the L{clientFromString} and L{serverFromString} functions.
@since: 10.1
"""
import os
import re
import socket
from unicodedata import normalize
import warnings
from constantly import NamedConstant, Names
from incremental import Version
from zope.interface import implementer, directlyProvides, provider
from twisted.internet import interfaces, defer, error, fdesc, threads
from twisted.internet.abstract import isIPv6Address, isIPAddress
from twisted.internet.address import (
_ProcessAddress, HostnameAddress, IPv4Address, IPv6Address
)
from twisted.internet.interfaces import (
IStreamServerEndpointStringParser,
IStreamClientEndpointStringParserWithReactor, IResolutionReceiver,
IReactorPluggableNameResolver,
IHostnameResolver,
)
from twisted.internet.protocol import ClientFactory, Factory
from twisted.internet.protocol import ProcessProtocol, Protocol
try:
from twisted.internet.stdio import StandardIO, PipeAddress
except ImportError:
# fallback if pywin32 is not installed
StandardIO = None # type: ignore[assignment,misc]
PipeAddress = None # type: ignore[assignment,misc]
from twisted.internet.task import LoopingCall
from twisted.internet._resolver import HostResolution
from twisted.logger import Logger
from twisted.plugin import IPlugin, getPlugins
from twisted.python import deprecate, log
from twisted.python.compat import nativeString, _matchingString
from twisted.python.components import proxyForInterface
from twisted.python.failure import Failure
from twisted.python.filepath import FilePath
from twisted.python.compat import iterbytes
from twisted.internet.defer import Deferred
from twisted.python.systemd import ListenFDs
from ._idna import _idnaBytes, _idnaText
try:
from twisted.protocols.tls import (
TLSMemoryBIOFactory as _TLSMemoryBIOFactory)
from twisted.internet.ssl import (
optionsForClientTLS, PrivateCertificate, Certificate, KeyPair,
CertificateOptions, trustRootFromCertificates
)
from OpenSSL.SSL import Error as SSLError
except ImportError:
TLSMemoryBIOFactory = None
else:
TLSMemoryBIOFactory = _TLSMemoryBIOFactory
__all__ = ["clientFromString", "serverFromString",
"TCP4ServerEndpoint", "TCP6ServerEndpoint",
"TCP4ClientEndpoint", "TCP6ClientEndpoint",
"UNIXServerEndpoint", "UNIXClientEndpoint",
"SSL4ServerEndpoint", "SSL4ClientEndpoint",
"AdoptedStreamServerEndpoint", "StandardIOEndpoint",
"ProcessEndpoint", "HostnameEndpoint",
"StandardErrorBehavior", "connectProtocol",
"wrapClientTLS"]
class _WrappingProtocol(Protocol):
"""
Wrap another protocol in order to notify my user when a connection has
been made.
"""
def __init__(self, connectedDeferred, wrappedProtocol):
"""
@param connectedDeferred: The L{Deferred} that will callback
with the C{wrappedProtocol} when it is connected.
@param wrappedProtocol: An L{IProtocol} provider that will be
connected.
"""
self._connectedDeferred = connectedDeferred
self._wrappedProtocol = wrappedProtocol
for iface in [interfaces.IHalfCloseableProtocol,
interfaces.IFileDescriptorReceiver,
interfaces.IHandshakeListener]:
if iface.providedBy(self._wrappedProtocol):
directlyProvides(self, iface)
def logPrefix(self):
"""
Transparently pass through the wrapped protocol's log prefix.
"""
if interfaces.ILoggingContext.providedBy(self._wrappedProtocol):
return self._wrappedProtocol.logPrefix()
return self._wrappedProtocol.__class__.__name__
def connectionMade(self):
"""
Connect the C{self._wrappedProtocol} to our C{self.transport} and
callback C{self._connectedDeferred} with the C{self._wrappedProtocol}
"""
self._wrappedProtocol.makeConnection(self.transport)
self._connectedDeferred.callback(self._wrappedProtocol)
def dataReceived(self, data):
"""
Proxy C{dataReceived} calls to our C{self._wrappedProtocol}
"""
return self._wrappedProtocol.dataReceived(data)
def fileDescriptorReceived(self, descriptor):
"""
Proxy C{fileDescriptorReceived} calls to our C{self._wrappedProtocol}
"""
return self._wrappedProtocol.fileDescriptorReceived(descriptor)
def connectionLost(self, reason):
"""
Proxy C{connectionLost} calls to our C{self._wrappedProtocol}
"""
return self._wrappedProtocol.connectionLost(reason)
def readConnectionLost(self):
"""
Proxy L{IHalfCloseableProtocol.readConnectionLost} to our
C{self._wrappedProtocol}
"""
self._wrappedProtocol.readConnectionLost()
def writeConnectionLost(self):
"""
Proxy L{IHalfCloseableProtocol.writeConnectionLost} to our
C{self._wrappedProtocol}
"""
self._wrappedProtocol.writeConnectionLost()
def handshakeCompleted(self):
"""
Proxy L{interfaces.IHandshakeListener} to our
C{self._wrappedProtocol}.
"""
self._wrappedProtocol.handshakeCompleted()
class _WrappingFactory(ClientFactory):
"""
Wrap a factory in order to wrap the protocols it builds.
@ivar _wrappedFactory: A provider of I{IProtocolFactory} whose buildProtocol
method will be called and whose resulting protocol will be wrapped.
@ivar _onConnection: A L{Deferred} that fires when the protocol is
connected
@ivar _connector: A L{connector <twisted.internet.interfaces.IConnector>}
that is managing the current or previous connection attempt.
"""
protocol = _WrappingProtocol
def __init__(self, wrappedFactory):
"""
@param wrappedFactory: A provider of I{IProtocolFactory} whose
buildProtocol method will be called and whose resulting protocol
will be wrapped.
"""
self._wrappedFactory = wrappedFactory
self._onConnection = defer.Deferred(canceller=self._canceller)
def startedConnecting(self, connector):
"""
A connection attempt was started. Remember the connector which started
said attempt, for use later.
"""
self._connector = connector
def _canceller(self, deferred):
"""
The outgoing connection attempt was cancelled. Fail that L{Deferred}
with an L{error.ConnectingCancelledError}.
@param deferred: The L{Deferred <defer.Deferred>} that was cancelled;
should be the same as C{self._onConnection}.
@type deferred: L{Deferred <defer.Deferred>}
@note: This relies on startedConnecting having been called, so it may
seem as though there's a race condition where C{_connector} may not
have been set. However, using public APIs, this condition is
impossible to catch, because a connection API
(C{connectTCP}/C{SSL}/C{UNIX}) is always invoked before a
L{_WrappingFactory}'s L{Deferred <defer.Deferred>} is returned to
C{connect()}'s caller.
@return: L{None}
"""
deferred.errback(
error.ConnectingCancelledError(
self._connector.getDestination()))
self._connector.stopConnecting()
def doStart(self):
"""
Start notifications are passed straight through to the wrapped factory.
"""
self._wrappedFactory.doStart()
def doStop(self):
"""
Stop notifications are passed straight through to the wrapped factory.
"""
self._wrappedFactory.doStop()
def buildProtocol(self, addr):
"""
Proxy C{buildProtocol} to our C{self._wrappedFactory} or errback the
C{self._onConnection} L{Deferred} if the wrapped factory raises an
exception or returns L{None}.
@return: An instance of L{_WrappingProtocol} or L{None}
"""
try:
proto = self._wrappedFactory.buildProtocol(addr)
if proto is None:
raise error.NoProtocol()
except:
self._onConnection.errback()
else:
return self.protocol(self._onConnection, proto)
def clientConnectionFailed(self, connector, reason):
"""
Errback the C{self._onConnection} L{Deferred} when the
client connection fails.
"""
if not self._onConnection.called:
self._onConnection.errback(reason)
@implementer(interfaces.IStreamServerEndpoint)
class StandardIOEndpoint:
"""
A Standard Input/Output endpoint
@ivar _stdio: a callable, like L{stdio.StandardIO}, which takes an
L{IProtocol} provider and a C{reactor} keyword argument (interface
dependent upon your platform).
"""
_stdio = StandardIO
def __init__(self, reactor):
"""
@param reactor: The reactor for the endpoint.
"""
self._reactor = reactor
def listen(self, stdioProtocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen on stdin/stdout
"""
return defer.execute(self._stdio,
stdioProtocolFactory.buildProtocol(PipeAddress()),
reactor=self._reactor)
class _IProcessTransportWithConsumerAndProducer(interfaces.IProcessTransport,
interfaces.IConsumer,
interfaces.IPushProducer):
"""
An L{_IProcessTransportWithConsumerAndProducer} combines various interfaces
to work around the issue that L{interfaces.IProcessTransport} is
incompletely defined and doesn't specify flow-control interfaces, and that
L{proxyForInterface} doesn't allow for multiple interfaces.
"""
class _ProcessEndpointTransport(
proxyForInterface(_IProcessTransportWithConsumerAndProducer, # type: ignore[misc] # noqa
'_process')):
"""
An L{ITransport}, L{IProcessTransport}, L{IConsumer}, and L{IPushProducer}
provider for the L{IProtocol} instance passed to the process endpoint.
@ivar _process: An active process transport which will be used by write
methods on this object to write data to a child process.
@type _process: L{interfaces.IProcessTransport} provider
"""
class _WrapIProtocol(ProcessProtocol):
"""
An L{IProcessProtocol} provider that wraps an L{IProtocol}.
@ivar transport: A L{_ProcessEndpointTransport} provider that is hooked to
the wrapped L{IProtocol} provider.
@see: L{protocol.ProcessProtocol}
"""
def __init__(self, proto, executable, errFlag):
"""
@param proto: An L{IProtocol} provider.
@param errFlag: A constant belonging to L{StandardErrorBehavior}
that determines if stderr is logged or dropped.
@param executable: The file name (full path) to spawn.
"""
self.protocol = proto
self.errFlag = errFlag
self.executable = executable
def makeConnection(self, process):
"""
Call L{IProtocol} provider's makeConnection method with an
L{ITransport} provider.
@param process: An L{IProcessTransport} provider.
"""
self.transport = _ProcessEndpointTransport(process)
return self.protocol.makeConnection(self.transport)
def childDataReceived(self, childFD, data):
"""
This is called with data from the process's stdout or stderr pipes. It
checks the status of the errFlag to setermine if stderr should be
logged (default) or dropped.
"""
if childFD == 1:
return self.protocol.dataReceived(data)
elif childFD == 2 and self.errFlag == StandardErrorBehavior.LOG:
log.msg(
format="Process %(executable)r wrote stderr unhandled by "
"%(protocol)s: %(data)s",
executable=self.executable, protocol=self.protocol,
data=data)
def processEnded(self, reason):
"""
If the process ends with L{error.ProcessDone}, this method calls the
L{IProtocol} provider's L{connectionLost} with a
L{error.ConnectionDone}
@see: L{ProcessProtocol.processEnded}
"""
if (reason.check(error.ProcessDone) == error.ProcessDone) and (
reason.value.status == 0):
return self.protocol.connectionLost(
Failure(error.ConnectionDone()))
else:
return self.protocol.connectionLost(reason)
class StandardErrorBehavior(Names):
"""
Constants used in ProcessEndpoint to decide what to do with stderr.
@cvar LOG: Indicates that stderr is to be logged.
@cvar DROP: Indicates that stderr is to be dropped (and not logged).
@since: 13.1
"""
LOG = NamedConstant()
DROP = NamedConstant()
@implementer(interfaces.IStreamClientEndpoint)
class ProcessEndpoint:
"""
An endpoint for child processes
@ivar _spawnProcess: A hook used for testing the spawning of child process.
@since: 13.1
"""
def __init__(self, reactor, executable, args=(), env={}, path=None,
uid=None, gid=None, usePTY=0, childFDs=None,
errFlag=StandardErrorBehavior.LOG):
"""
See L{IReactorProcess.spawnProcess}.
@param errFlag: Determines if stderr should be logged.
@type errFlag: L{endpoints.StandardErrorBehavior}
"""
self._reactor = reactor
self._executable = executable
self._args = args
self._env = env
self._path = path
self._uid = uid
self._gid = gid
self._usePTY = usePTY
self._childFDs = childFDs
self._errFlag = errFlag
self._spawnProcess = self._reactor.spawnProcess
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to launch a child process
and connect it to a protocol created by C{protocolFactory}.
@param protocolFactory: A factory for an L{IProtocol} provider which
will be notified of all events related to the created process.
"""
proto = protocolFactory.buildProtocol(_ProcessAddress())
try:
self._spawnProcess(
_WrapIProtocol(proto, self._executable, self._errFlag),
self._executable, self._args, self._env, self._path, self._uid,
self._gid, self._usePTY, self._childFDs)
except:
return defer.fail()
else:
return defer.succeed(proto)
@implementer(interfaces.IStreamServerEndpoint)
class _TCPServerEndpoint:
"""
A TCP server endpoint interface
"""
def __init__(self, reactor, port, backlog, interface):
"""
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used for listening
@type port: int
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to
@type interface: str
"""
self._reactor = reactor
self._port = port
self._backlog = backlog
self._interface = interface
def listen(self, protocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen on a TCP
socket
"""
return defer.execute(self._reactor.listenTCP,
self._port,
protocolFactory,
backlog=self._backlog,
interface=self._interface)
class TCP4ServerEndpoint(_TCPServerEndpoint):
"""
Implements TCP server endpoint with an IPv4 configuration
"""
def __init__(self, reactor, port, backlog=50, interface=''):
"""
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used for listening
@type port: int
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to, defaults to '' (all)
@type interface: str
"""
_TCPServerEndpoint.__init__(self, reactor, port, backlog, interface)
class TCP6ServerEndpoint(_TCPServerEndpoint):
"""
Implements TCP server endpoint with an IPv6 configuration
"""
def __init__(self, reactor, port, backlog=50, interface='::'):
"""
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used for listening
@type port: int
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to, defaults to C{::} (all)
@type interface: str
"""
_TCPServerEndpoint.__init__(self, reactor, port, backlog, interface)
@implementer(interfaces.IStreamClientEndpoint)
class TCP4ClientEndpoint:
"""
TCP client endpoint with an IPv4 configuration.
"""
def __init__(self, reactor, host, port, timeout=30, bindAddress=None):
"""
@param reactor: An L{IReactorTCP} provider
@param host: A hostname, used when connecting
@type host: str
@param port: The port number, used when connecting
@type port: int
@param timeout: The number of seconds to wait before assuming the
connection has failed.
@type timeout: L{float} or L{int}
@param bindAddress: A (host, port) tuple of local address to bind to,
or None.
@type bindAddress: tuple
"""
self._reactor = reactor
self._host = host
self._port = port
self._timeout = timeout
self._bindAddress = bindAddress
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect via TCP.
"""
try:
wf = _WrappingFactory(protocolFactory)
self._reactor.connectTCP(
self._host, self._port, wf,
timeout=self._timeout, bindAddress=self._bindAddress)
return wf._onConnection
except:
return defer.fail()
@implementer(interfaces.IStreamClientEndpoint)
class TCP6ClientEndpoint:
"""
TCP client endpoint with an IPv6 configuration.
@ivar _getaddrinfo: A hook used for testing name resolution.
@ivar _deferToThread: A hook used for testing deferToThread.
@ivar _GAI_ADDRESS: Index of the address portion in result of
getaddrinfo to be used.
@ivar _GAI_ADDRESS_HOST: Index of the actual host-address in the
5-tuple L{_GAI_ADDRESS}.
"""
_getaddrinfo = staticmethod(socket.getaddrinfo)
_deferToThread = staticmethod(threads.deferToThread)
_GAI_ADDRESS = 4
_GAI_ADDRESS_HOST = 0
def __init__(self, reactor, host, port, timeout=30, bindAddress=None):
"""
@param host: An IPv6 address literal or a hostname with an
IPv6 address
@see: L{twisted.internet.interfaces.IReactorTCP.connectTCP}
"""
self._reactor = reactor
self._host = host
self._port = port
self._timeout = timeout
self._bindAddress = bindAddress
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect via TCP,
once the hostname resolution is done.
"""
if isIPv6Address(self._host):
d = self._resolvedHostConnect(self._host, protocolFactory)
else:
d = self._nameResolution(self._host)
d.addCallback(lambda result: result[0][self._GAI_ADDRESS]
[self._GAI_ADDRESS_HOST])
d.addCallback(self._resolvedHostConnect, protocolFactory)
return d
def _nameResolution(self, host):
"""
Resolve the hostname string into a tuple containing the host
IPv6 address.
"""
return self._deferToThread(
self._getaddrinfo, host, 0, socket.AF_INET6)
def _resolvedHostConnect(self, resolvedHost, protocolFactory):
"""
Connect to the server using the resolved hostname.
"""
try:
wf = _WrappingFactory(protocolFactory)
self._reactor.connectTCP(resolvedHost, self._port, wf,
timeout=self._timeout, bindAddress=self._bindAddress)
return wf._onConnection
except:
return defer.fail()
@implementer(IHostnameResolver)
class _SimpleHostnameResolver:
"""
An L{IHostnameResolver} provider that invokes a provided callable
to resolve hostnames.
@ivar _nameResolution: the callable L{resolveHostName} invokes to
resolve hostnames.
@type _nameResolution: A L{callable} that accepts two arguments:
the host to resolve and the port number to include in the
result.
"""
_log = Logger()
def __init__(self, nameResolution):
"""
Create a L{_SimpleHostnameResolver} instance.
"""
self._nameResolution = nameResolution
def resolveHostName(self, resolutionReceiver,
hostName,
portNumber=0,
addressTypes=None,
transportSemantics='TCP'):
"""
Initiate a hostname resolution.
@param resolutionReceiver: an object that will receive each resolved
address as it arrives.
@type resolutionReceiver: L{IResolutionReceiver}
@param hostName: see interface
@param portNumber: see interface
@param addressTypes: Ignored in this implementation.
@param transportSemantics: Ignored in this implementation.
@return: The resolution in progress.
@rtype: L{IResolutionReceiver}
"""
resolutionReceiver.resolutionBegan(HostResolution(hostName))
d = self._nameResolution(hostName, portNumber)
def cbDeliver(gairesult):
for family, socktype, proto, canonname, sockaddr in gairesult:
if family == socket.AF_INET6:
resolutionReceiver.addressResolved(
IPv6Address('TCP', *sockaddr))
elif family == socket.AF_INET:
resolutionReceiver.addressResolved(
IPv4Address('TCP', *sockaddr))
def ebLog(error):
self._log.failure("while looking up {name} with {callable}",
error, name=hostName,
callable=self._nameResolution)
d.addCallback(cbDeliver)
d.addErrback(ebLog)
d.addBoth(lambda ignored: resolutionReceiver.resolutionComplete())
return resolutionReceiver
@implementer(interfaces.IStreamClientEndpoint)
class HostnameEndpoint:
"""
A name-based endpoint that connects to the fastest amongst the resolved
host addresses.
@cvar _DEFAULT_ATTEMPT_DELAY: The default time to use between attempts, in
seconds, when no C{attemptDelay} is given to
L{HostnameEndpoint.__init__}.
@ivar _hostText: the textual representation of the hostname passed to the
constructor. Used to pass to the reactor's hostname resolver.
@type _hostText: L{unicode}
@ivar _hostBytes: the encoded bytes-representation of the hostname passed
to the constructor. Used to construct the L{HostnameAddress}
associated with this endpoint.
@type _hostBytes: L{bytes}
@ivar _hostStr: the native-string representation of the hostname passed to
the constructor, used for exception construction
@type _hostStr: native L{str}
@ivar _badHostname: a flag - hopefully false! - indicating that an invalid
hostname was passed to the constructor. This might be a textual
hostname that isn't valid IDNA, or non-ASCII bytes.
@type _badHostname: L{bool}
"""
_getaddrinfo = staticmethod(socket.getaddrinfo)
_deferToThread = staticmethod(threads.deferToThread)
_DEFAULT_ATTEMPT_DELAY = 0.3
def __init__(self, reactor, host, port, timeout=30, bindAddress=None,
attemptDelay=None):
"""
Create a L{HostnameEndpoint}.
@param reactor: The reactor to use for connections and delayed calls.
@type reactor: provider of L{IReactorTCP}, L{IReactorTime} and either
L{IReactorPluggableNameResolver} or L{IReactorPluggableResolver}.
@param host: A hostname to connect to.
@type host: L{bytes} or L{unicode}
@param port: The port number to connect to.
@type port: L{int}
@param timeout: For each individual connection attempt, the number of
seconds to wait before assuming the connection has failed.
@type timeout: L{float} or L{int}
@param bindAddress: the local address of the network interface to make
the connections from.
@type bindAddress: L{bytes}
@param attemptDelay: The number of seconds to delay between connection
attempts.
@type attemptDelay: L{float}
@see: L{twisted.internet.interfaces.IReactorTCP.connectTCP}
"""
self._reactor = reactor
self._nameResolver = self._getNameResolverAndMaybeWarn(reactor)
[self._badHostname, self._hostBytes, self._hostText] = (
self._hostAsBytesAndText(host)
)
self._hostStr = self._hostBytes if bytes is str else self._hostText
self._port = port
self._timeout = timeout
self._bindAddress = bindAddress
if attemptDelay is None:
attemptDelay = self._DEFAULT_ATTEMPT_DELAY
self._attemptDelay = attemptDelay
def __repr__(self) -> str:
"""
Produce a string representation of the L{HostnameEndpoint}.
@return: A L{str}
"""
if self._badHostname:
# Use the backslash-encoded version of the string passed to the
# constructor, which is already a native string.
host = self._hostStr
elif isIPv6Address(self._hostStr):
host = '[{}]'.format(self._hostStr)
else:
# Convert the bytes representation to a native string to ensure
# that we display the punycoded version of the hostname, which is
# more useful than any IDN version as it can be easily copy-pasted
# into debugging tools.
host = nativeString(self._hostBytes)
return "".join(["<HostnameEndpoint ", host, ":", str(self._port), ">"])
def _getNameResolverAndMaybeWarn(self, reactor):
"""
Retrieve a C{nameResolver} callable and warn the caller's
caller that using a reactor which doesn't provide
L{IReactorPluggableNameResolver} is deprecated.
@param reactor: The reactor to check.
@return: A L{IHostnameResolver} provider.
"""
if not IReactorPluggableNameResolver.providedBy(reactor):
warningString = deprecate.getDeprecationWarningString(
reactor.__class__,
Version('Twisted', 17, 5, 0),
format=("Passing HostnameEndpoint a reactor that does not"
" provide IReactorPluggableNameResolver (%(fqpn)s)"
" was deprecated in %(version)s"),
replacement=("a reactor that provides"
" IReactorPluggableNameResolver"),
)
warnings.warn(warningString, DeprecationWarning, stacklevel=3)
return _SimpleHostnameResolver(self._fallbackNameResolution)
return reactor.nameResolver
@staticmethod
def _hostAsBytesAndText(host):
"""
For various reasons (documented in the C{@ivar}'s in the class
docstring) we need both a textual and a binary representation of the
hostname given to the constructor. For compatibility and convenience,
we accept both textual and binary representations of the hostname, save
the form that was passed, and convert into the other form. This is
mostly just because L{HostnameAddress} chose somewhat poorly to define
its attribute as bytes; hopefully we can find a compatible way to clean
this up in the future and just operate in terms of text internally.
@param host: A hostname to convert.
@type host: L{bytes} or C{str}
@return: a 3-tuple of C{(invalid, bytes, text)} where C{invalid} is a
boolean indicating the validity of the hostname, C{bytes} is a
binary representation of C{host}, and C{text} is a textual
representation of C{host}.
"""
if isinstance(host, bytes):
if isIPAddress(host) or isIPv6Address(host):
return False, host, host.decode("ascii")
else:
try:
return False, host, _idnaText(host)
except UnicodeError:
# Convert the host to _some_ kind of text, to handle below.
host = host.decode("charmap")
else:
host = normalize('NFC', host)
if isIPAddress(host) or isIPv6Address(host):
return False, host.encode("ascii"), host
else:
try:
return False, _idnaBytes(host), host
except UnicodeError:
pass
# `host` has been converted to text by this point either way; it's
# invalid as a hostname, and so may contain unprintable characters and
# such. escape it with backslashes so the user can get _some_ guess as
# to what went wrong.
asciibytes = host.encode('ascii', 'backslashreplace')
return True, asciibytes, asciibytes.decode('ascii')
def connect(self, protocolFactory):
"""
Attempts a connection to each resolved address, and returns a
connection which is established first.
@param protocolFactory: The protocol factory whose protocol
will be connected.
@type protocolFactory:
L{IProtocolFactory<twisted.internet.interfaces.IProtocolFactory>}
@return: A L{Deferred} that fires with the connected protocol
or fails a connection-related error.
"""
if self._badHostname:
return defer.fail(
ValueError("invalid hostname: {}".format(self._hostStr))
)
d = Deferred()
addresses = []
@provider(IResolutionReceiver)
class EndpointReceiver:
@staticmethod
def resolutionBegan(resolutionInProgress):
pass
@staticmethod
def addressResolved(address):
addresses.append(address)
@staticmethod
def resolutionComplete():
d.callback(addresses)
self._nameResolver.resolveHostName(
EndpointReceiver, self._hostText, portNumber=self._port
)
d.addErrback(lambda ignored: defer.fail(error.DNSLookupError(
"Couldn't find the hostname '{}'".format(self._hostStr))))
@d.addCallback
def resolvedAddressesToEndpoints(addresses):
# Yield an endpoint for every address resolved from the name.
for eachAddress in addresses:
if isinstance(eachAddress, IPv6Address):
yield TCP6ClientEndpoint(
self._reactor, eachAddress.host, eachAddress.port,
self._timeout, self._bindAddress
)
if isinstance(eachAddress, IPv4Address):
yield TCP4ClientEndpoint(
self._reactor, eachAddress.host, eachAddress.port,
self._timeout, self._bindAddress
)
d.addCallback(list)
def _canceller(d):
# This canceller must remain defined outside of
# `startConnectionAttempts`, because Deferred should not
# participate in cycles with their cancellers; that would create a
# potentially problematic circular reference and possibly
# gc.garbage.
d.errback(error.ConnectingCancelledError(
HostnameAddress(self._hostBytes, self._port)))
@d.addCallback
def startConnectionAttempts(endpoints):
"""
Given a sequence of endpoints obtained via name resolution, start
connecting to a new one every C{self._attemptDelay} seconds until
one of the connections succeeds, all of them fail, or the attempt
is cancelled.
@param endpoints: a list of all the endpoints we might try to
connect to, as determined by name resolution.
@type endpoints: L{list} of L{IStreamServerEndpoint}
@return: a Deferred that fires with the result of the
C{endpoint.connect} method that completes the fastest, or fails
with the first connection error it encountered if none of them
succeed.
@rtype: L{Deferred} failing with L{error.ConnectingCancelledError}
or firing with L{IProtocol}
"""
if not endpoints:
raise error.DNSLookupError(
"no results for hostname lookup: {}".format(self._hostStr)
)
iterEndpoints = iter(endpoints)
pending = []
failures = []
winner = defer.Deferred(canceller=_canceller)
def checkDone():
if pending or checkDone.completed or checkDone.endpointsLeft:
return
winner.errback(failures.pop())
checkDone.completed = False
checkDone.endpointsLeft = True
@LoopingCall
def iterateEndpoint():
endpoint = next(iterEndpoints, None)
if endpoint is None:
# The list of endpoints ends.
checkDone.endpointsLeft = False
checkDone()
return
eachAttempt = endpoint.connect(protocolFactory)
pending.append(eachAttempt)
@eachAttempt.addBoth
def noLongerPending(result):
pending.remove(eachAttempt)
return result
@eachAttempt.addCallback
def succeeded(result):
winner.callback(result)
@eachAttempt.addErrback
def failed(reason):
failures.append(reason)
checkDone()
iterateEndpoint.clock = self._reactor
iterateEndpoint.start(self._attemptDelay)
@winner.addBoth
def cancelRemainingPending(result):
checkDone.completed = True
for remaining in pending[:]:
remaining.cancel()
if iterateEndpoint.running:
iterateEndpoint.stop()
return result
return winner
return d
def _fallbackNameResolution(self, host, port):
"""
Resolve the hostname string into a tuple containing the host
address. This is method is only used when the reactor does
not provide L{IReactorPluggableNameResolver}.
@param host: A unicode hostname to resolve.
@param port: The port to include in the resolution.
@return: A L{Deferred} that fires with L{_getaddrinfo}'s
return value.
"""
return self._deferToThread(self._getaddrinfo, host, port, 0,
socket.SOCK_STREAM)
@implementer(interfaces.IStreamServerEndpoint)
class SSL4ServerEndpoint:
"""
SSL secured TCP server endpoint with an IPv4 configuration.
"""
def __init__(self, reactor, port, sslContextFactory,
backlog=50, interface=''):
"""
@param reactor: An L{IReactorSSL} provider.
@param port: The port number used for listening
@type port: int
@param sslContextFactory: An instance of
L{interfaces.IOpenSSLContextFactory}.
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to, defaults to '' (all)
@type interface: str
"""
self._reactor = reactor
self._port = port
self._sslContextFactory = sslContextFactory
self._backlog = backlog
self._interface = interface
def listen(self, protocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen for SSL on a
TCP socket.
"""
return defer.execute(self._reactor.listenSSL, self._port,
protocolFactory,
contextFactory=self._sslContextFactory,
backlog=self._backlog,
interface=self._interface)
@implementer(interfaces.IStreamClientEndpoint)
class SSL4ClientEndpoint:
"""
SSL secured TCP client endpoint with an IPv4 configuration
"""
def __init__(self, reactor, host, port, sslContextFactory,
timeout=30, bindAddress=None):
"""
@param reactor: An L{IReactorSSL} provider.
@param host: A hostname, used when connecting
@type host: str
@param port: The port number, used when connecting
@type port: int
@param sslContextFactory: SSL Configuration information as an instance
of L{interfaces.IOpenSSLContextFactory}.
@param timeout: Number of seconds to wait before assuming the
connection has failed.
@type timeout: int
@param bindAddress: A (host, port) tuple of local address to bind to,
or None.
@type bindAddress: tuple
"""
self._reactor = reactor
self._host = host
self._port = port
self._sslContextFactory = sslContextFactory
self._timeout = timeout
self._bindAddress = bindAddress
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect with SSL over
TCP.
"""
try:
wf = _WrappingFactory(protocolFactory)
self._reactor.connectSSL(
self._host, self._port, wf, self._sslContextFactory,
timeout=self._timeout, bindAddress=self._bindAddress)
return wf._onConnection
except:
return defer.fail()
@implementer(interfaces.IStreamServerEndpoint)
class UNIXServerEndpoint:
"""
UnixSocket server endpoint.
"""
def __init__(self, reactor, address, backlog=50, mode=0o666, wantPID=0):
"""
@param reactor: An L{IReactorUNIX} provider.
@param address: The path to the Unix socket file, used when listening
@param backlog: number of connections to allow in backlog.
@param mode: mode to set on the unix socket. This parameter is
deprecated. Permissions should be set on the directory which
contains the UNIX socket.
@param wantPID: If True, create a pidfile for the socket.
"""
self._reactor = reactor
self._address = address
self._backlog = backlog
self._mode = mode
self._wantPID = wantPID
def listen(self, protocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen on a UNIX socket.
"""
return defer.execute(self._reactor.listenUNIX, self._address,
protocolFactory,
backlog=self._backlog,
mode=self._mode,
wantPID=self._wantPID)
@implementer(interfaces.IStreamClientEndpoint)
class UNIXClientEndpoint:
"""
UnixSocket client endpoint.
"""
def __init__(self, reactor, path, timeout=30, checkPID=0):
"""
@param reactor: An L{IReactorUNIX} provider.
@param path: The path to the Unix socket file, used when connecting
@type path: str
@param timeout: Number of seconds to wait before assuming the
connection has failed.
@type timeout: int
@param checkPID: If True, check for a pid file to verify that a server
is listening.
@type checkPID: bool
"""
self._reactor = reactor
self._path = path
self._timeout = timeout
self._checkPID = checkPID
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect via a
UNIX Socket
"""
try:
wf = _WrappingFactory(protocolFactory)
self._reactor.connectUNIX(
self._path, wf,
timeout=self._timeout,
checkPID=self._checkPID)
return wf._onConnection
except:
return defer.fail()
@implementer(interfaces.IStreamServerEndpoint)
class AdoptedStreamServerEndpoint:
"""
An endpoint for listening on a file descriptor initialized outside of
Twisted.
@ivar _used: A C{bool} indicating whether this endpoint has been used to
listen with a factory yet. C{True} if so.
"""
_close = os.close
_setNonBlocking = staticmethod(fdesc.setNonBlocking)
def __init__(self, reactor, fileno, addressFamily):
"""
@param reactor: An L{IReactorSocket} provider.
@param fileno: An integer file descriptor corresponding to a listening
I{SOCK_STREAM} socket.
@param addressFamily: The address family of the socket given by
C{fileno}.
"""
self.reactor = reactor
self.fileno = fileno
self.addressFamily = addressFamily
self._used = False
def listen(self, factory):
"""
Implement L{IStreamServerEndpoint.listen} to start listening on, and
then close, C{self._fileno}.
"""
if self._used:
return defer.fail(error.AlreadyListened())
self._used = True
try:
self._setNonBlocking(self.fileno)
port = self.reactor.adoptStreamPort(
self.fileno, self.addressFamily, factory)
self._close(self.fileno)
except:
return defer.fail()
return defer.succeed(port)
def _parseTCP(factory, port, interface="", backlog=50):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for a TCP(IPv4) stream endpoint into the structured arguments.
@param factory: the protocol factory being parsed, or L{None}. (This was a
leftover argument from when this code was in C{strports}, and is now
mostly None and unused.)
@type factory: L{IProtocolFactory} or L{None}
@param port: the integer port number to bind
@type port: C{str}
@param interface: the interface IP to listen on
@param backlog: the length of the listen queue
@type backlog: C{str}
@return: a 2-tuple of (args, kwargs), describing the parameters to
L{IReactorTCP.listenTCP} (or, modulo argument 2, the factory, arguments
to L{TCP4ServerEndpoint}.
"""
return (int(port), factory), {'interface': interface,
'backlog': int(backlog)}
def _parseUNIX(factory, address, mode='666', backlog=50, lockfile=True):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for a UNIX (AF_UNIX/SOCK_STREAM) stream endpoint into the
structured arguments.
@param factory: the protocol factory being parsed, or L{None}. (This was a
leftover argument from when this code was in C{strports}, and is now
mostly None and unused.)
@type factory: L{IProtocolFactory} or L{None}
@param address: the pathname of the unix socket
@type address: C{str}
@param backlog: the length of the listen queue
@type backlog: C{str}
@param lockfile: A string '0' or '1', mapping to True and False
respectively. See the C{wantPID} argument to C{listenUNIX}
@return: a 2-tuple of (args, kwargs), describing the parameters to
L{twisted.internet.interfaces.IReactorUNIX.listenUNIX} (or,
modulo argument 2, the factory, arguments to L{UNIXServerEndpoint}.
"""
return (
(address, factory),
{'mode': int(mode, 8), 'backlog': int(backlog),
'wantPID': bool(int(lockfile))})
def _parseSSL(factory, port, privateKey="server.pem", certKey=None,
sslmethod=None, interface='', backlog=50, extraCertChain=None,
dhParameters=None):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for an SSL (over TCP/IPv4) stream endpoint into the structured
arguments.
@param factory: the protocol factory being parsed, or L{None}. (This was a
leftover argument from when this code was in C{strports}, and is now
mostly None and unused.)
@type factory: L{IProtocolFactory} or L{None}
@param port: the integer port number to bind
@type port: C{str}
@param interface: the interface IP to listen on
@param backlog: the length of the listen queue
@type backlog: C{str}
@param privateKey: The file name of a PEM format private key file.
@type privateKey: C{str}
@param certKey: The file name of a PEM format certificate file.
@type certKey: C{str}
@param sslmethod: The string name of an SSL method, based on the name of a
constant in C{OpenSSL.SSL}. Must be one of: "SSLv23_METHOD",
"SSLv2_METHOD", "SSLv3_METHOD", "TLSv1_METHOD".
@type sslmethod: C{str}
@param extraCertChain: The path of a file containing one or more
certificates in PEM format that establish the chain from a root CA to
the CA that signed your C{certKey}.
@type extraCertChain: L{str}
@param dhParameters: The file name of a file containing parameters that are
required for Diffie-Hellman key exchange. If this is not specified,
the forward secret C{DHE} ciphers aren't available for servers.
@type dhParameters: L{str}
@return: a 2-tuple of (args, kwargs), describing the parameters to
L{IReactorSSL.listenSSL} (or, modulo argument 2, the factory, arguments
to L{SSL4ServerEndpoint}.
"""
from twisted.internet import ssl
if certKey is None:
certKey = privateKey
kw = {}
if sslmethod is not None:
kw['method'] = getattr(ssl.SSL, sslmethod)
certPEM = FilePath(certKey).getContent()
keyPEM = FilePath(privateKey).getContent()
privateCertificate = ssl.PrivateCertificate.loadPEM(
certPEM + b'\n' + keyPEM)
if extraCertChain is not None:
matches = re.findall(
r'(-----BEGIN CERTIFICATE-----\n.+?\n-----END CERTIFICATE-----)',
nativeString(FilePath(extraCertChain).getContent()),
flags=re.DOTALL
)
chainCertificates = [ssl.Certificate.loadPEM(chainCertPEM).original
for chainCertPEM in matches]
if not chainCertificates:
raise ValueError(
"Specified chain file '%s' doesn't contain any valid "
"certificates in PEM format." % (extraCertChain,)
)
else:
chainCertificates = None
if dhParameters is not None:
dhParameters = ssl.DiffieHellmanParameters.fromFile(
FilePath(dhParameters),
)
cf = ssl.CertificateOptions(
privateKey=privateCertificate.privateKey.original,
certificate=privateCertificate.original,
extraCertChain=chainCertificates,
dhParameters=dhParameters,
**kw
)
return ((int(port), factory, cf),
{'interface': interface, 'backlog': int(backlog)})
@implementer(IPlugin, IStreamServerEndpointStringParser)
class _StandardIOParser:
"""
Stream server endpoint string parser for the Standard I/O type.
@ivar prefix: See L{IStreamServerEndpointStringParser.prefix}.
"""
prefix = "stdio"
def _parseServer(self, reactor):
"""
Internal parser function for L{_parseServer} to convert the string
arguments into structured arguments for the L{StandardIOEndpoint}
@param reactor: Reactor for the endpoint
"""
return StandardIOEndpoint(reactor)
def parseStreamServer(self, reactor, *args, **kwargs):
# Redirects to another function (self._parseServer), tricks zope.interface
# into believing the interface is correctly implemented.
return self._parseServer(reactor)
@implementer(IPlugin, IStreamServerEndpointStringParser)
class _SystemdParser:
"""
Stream server endpoint string parser for the I{systemd} endpoint type.
@ivar prefix: See L{IStreamServerEndpointStringParser.prefix}.
@ivar _sddaemon: A L{ListenFDs} instance used to translate an index into an
actual file descriptor.
"""
_sddaemon = ListenFDs.fromEnvironment()
prefix = "systemd"
def _parseServer(self, reactor, domain, index):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for a systemd server endpoint into structured arguments for
L{AdoptedStreamServerEndpoint}.
@param reactor: An L{IReactorSocket} provider.
@param domain: The domain (or address family) of the socket inherited
from systemd. This is a string like C{"INET"} or C{"UNIX"}, ie the
name of an address family from the L{socket} module, without the
C{"AF_"} prefix.
@type domain: C{str}
@param index: An offset into the list of file descriptors inherited from
systemd.
@type index: C{str}
@return: A two-tuple of parsed positional arguments and parsed keyword
arguments (a tuple and a dictionary). These can be used to
construct an L{AdoptedStreamServerEndpoint}.
"""
index = int(index)
fileno = self._sddaemon.inheritedDescriptors()[index]
addressFamily = getattr(socket, 'AF_' + domain)
return AdoptedStreamServerEndpoint(reactor, fileno, addressFamily)
def parseStreamServer(self, reactor, *args, **kwargs):
# Delegate to another function with a sane signature. This function has
# an insane signature to trick zope.interface into believing the
# interface is correctly implemented.
return self._parseServer(reactor, *args, **kwargs)
@implementer(IPlugin, IStreamServerEndpointStringParser)
class _TCP6ServerParser:
"""
Stream server endpoint string parser for the TCP6ServerEndpoint type.
@ivar prefix: See L{IStreamServerEndpointStringParser.prefix}.
"""
prefix = "tcp6" # Used in _parseServer to identify the plugin with the endpoint type
def _parseServer(self, reactor, port, backlog=50, interface='::'):
"""
Internal parser function for L{_parseServer} to convert the string
arguments into structured arguments for the L{TCP6ServerEndpoint}
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used for listening
@type port: int
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to
@type interface: str
"""
port = int(port)
backlog = int(backlog)
return TCP6ServerEndpoint(reactor, port, backlog, interface)
def parseStreamServer(self, reactor, *args, **kwargs):
# Redirects to another function (self._parseServer), tricks zope.interface
# into believing the interface is correctly implemented.
return self._parseServer(reactor, *args, **kwargs)
_serverParsers = {"tcp": _parseTCP,
"unix": _parseUNIX,
"ssl": _parseSSL,
}
_OP, _STRING = range(2)
def _tokenize(description):
"""
Tokenize a strports string and yield each token.
@param description: a string as described by L{serverFromString} or
L{clientFromString}.
@type description: L{str} or L{bytes}
@return: an iterable of 2-tuples of (C{_OP} or C{_STRING}, string). Tuples
starting with C{_OP} will contain a second element of either ':' (i.e.
'next parameter') or '=' (i.e. 'assign parameter value'). For example,
the string 'hello:greeting=world' would result in a generator yielding
these values::
_STRING, 'hello'
_OP, ':'
_STRING, 'greet=ing'
_OP, '='
_STRING, 'world'
"""
empty = _matchingString(u'', description)
colon = _matchingString(u':', description)
equals = _matchingString(u'=', description)
backslash = _matchingString(u'\x5c', description)
current = empty
ops = colon + equals
nextOps = {colon: colon + equals, equals: colon}
iterdesc = iter(iterbytes(description))
for n in iterdesc:
if n in iterbytes(ops):
yield _STRING, current
yield _OP, n
current = empty
ops = nextOps[n]
elif n == backslash:
current += next(iterdesc)
else:
current += n
yield _STRING, current
def _parse(description):
"""
Convert a description string into a list of positional and keyword
parameters, using logic vaguely like what Python does.
@param description: a string as described by L{serverFromString} or
L{clientFromString}.
@return: a 2-tuple of C{(args, kwargs)}, where 'args' is a list of all
':'-separated C{str}s not containing an '=' and 'kwargs' is a map of
all C{str}s which do contain an '='. For example, the result of
C{_parse('a:b:d=1:c')} would be C{(['a', 'b', 'c'], {'d': '1'})}.
"""
args, kw = [], {}
colon = _matchingString(u':', description)
def add(sofar):
if len(sofar) == 1:
args.append(sofar[0])
else:
kw[nativeString(sofar[0])] = sofar[1]
sofar = ()
for (type, value) in _tokenize(description):
if type is _STRING:
sofar += (value,)
elif value == colon:
add(sofar)
sofar = ()
add(sofar)
return args, kw
# Mappings from description "names" to endpoint constructors.
_endpointServerFactories = {
'TCP': TCP4ServerEndpoint,
'SSL': SSL4ServerEndpoint,
'UNIX': UNIXServerEndpoint,
}
_endpointClientFactories = {
'TCP': TCP4ClientEndpoint,
'SSL': SSL4ClientEndpoint,
'UNIX': UNIXClientEndpoint,
}
def _parseServer(description, factory):
"""
Parse a strports description into a 2-tuple of arguments and keyword
values.
@param description: A description in the format explained by
L{serverFromString}.
@type description: C{str}
@param factory: A 'factory' argument; this is left-over from
twisted.application.strports, it's not really used.
@type factory: L{IProtocolFactory} or L{None}
@return: a 3-tuple of (plugin or name, arguments, keyword arguments)
"""
args, kw = _parse(description)
endpointType = args[0]
parser = _serverParsers.get(endpointType)
if parser is None:
# If the required parser is not found in _server, check if
# a plugin exists for the endpointType
plugin = _matchPluginToPrefix(
getPlugins(IStreamServerEndpointStringParser), endpointType
)
return (plugin, args[1:], kw)
return (endpointType.upper(),) + parser(factory, *args[1:], **kw)
def _matchPluginToPrefix(plugins, endpointType):
"""
Match plugin to prefix.
"""
endpointType = endpointType.lower()
for plugin in plugins:
if (_matchingString(plugin.prefix.lower(),
endpointType) == endpointType):
return plugin
raise ValueError("Unknown endpoint type: '%s'" % (endpointType,))
def serverFromString(reactor, description):
"""
Construct a stream server endpoint from an endpoint description string.
The format for server endpoint descriptions is a simple byte string. It is
a prefix naming the type of endpoint, then a colon, then the arguments for
that endpoint.
For example, you can call it like this to create an endpoint that will
listen on TCP port 80::
serverFromString(reactor, "tcp:80")
Additional arguments may be specified as keywords, separated with colons.
For example, you can specify the interface for a TCP server endpoint to
bind to like this::
serverFromString(reactor, "tcp:80:interface=127.0.0.1")
SSL server endpoints may be specified with the 'ssl' prefix, and the
private key and certificate files may be specified by the C{privateKey} and
C{certKey} arguments::
serverFromString(
reactor, "ssl:443:privateKey=key.pem:certKey=crt.pem")
If a private key file name (C{privateKey}) isn't provided, a "server.pem"
file is assumed to exist which contains the private key. If the certificate
file name (C{certKey}) isn't provided, the private key file is assumed to
contain the certificate as well.
You may escape colons in arguments with a backslash, which you will need to
use if you want to specify a full pathname argument on Windows::
serverFromString(reactor,
"ssl:443:privateKey=C\\:/key.pem:certKey=C\\:/cert.pem")
finally, the 'unix' prefix may be used to specify a filesystem UNIX socket,
optionally with a 'mode' argument to specify the mode of the socket file
created by C{listen}::
serverFromString(reactor, "unix:/var/run/finger")
serverFromString(reactor, "unix:/var/run/finger:mode=660")
This function is also extensible; new endpoint types may be registered as
L{IStreamServerEndpointStringParser} plugins. See that interface for more
information.
@param reactor: The server endpoint will be constructed with this reactor.
@param description: The strports description to parse.
@type description: L{str}
@return: A new endpoint which can be used to listen with the parameters
given by C{description}.
@rtype: L{IStreamServerEndpoint<twisted.internet.interfaces.IStreamServerEndpoint>}
@raise ValueError: when the 'description' string cannot be parsed.
@since: 10.2
"""
nameOrPlugin, args, kw = _parseServer(description, None)
if type(nameOrPlugin) is not str:
plugin = nameOrPlugin
return plugin.parseStreamServer(reactor, *args, **kw)
else:
name = nameOrPlugin
# Chop out the factory.
args = args[:1] + args[2:]
return _endpointServerFactories[name](reactor, *args, **kw)
def quoteStringArgument(argument):
"""
Quote an argument to L{serverFromString} and L{clientFromString}. Since
arguments are separated with colons and colons are escaped with
backslashes, some care is necessary if, for example, you have a pathname,
you may be tempted to interpolate into a string like this::
serverFromString(reactor, "ssl:443:privateKey=%s" % (myPathName,))
This may appear to work, but will have portability issues (Windows
pathnames, for example). Usually you should just construct the appropriate
endpoint type rather than interpolating strings, which in this case would
be L{SSL4ServerEndpoint}. There are some use-cases where you may need to
generate such a string, though; for example, a tool to manipulate a
configuration file which has strports descriptions in it. To be correct in
those cases, do this instead::
serverFromString(reactor, "ssl:443:privateKey=%s" %
(quoteStringArgument(myPathName),))
@param argument: The part of the endpoint description string you want to
pass through.
@type argument: C{str}
@return: The quoted argument.
@rtype: C{str}
"""
backslash, colon = '\\:'
for c in backslash, colon:
argument = argument.replace(c, backslash + c)
return argument
def _parseClientTCP(*args, **kwargs):
"""
Perform any argument value coercion necessary for TCP client parameters.
Valid positional arguments to this function are host and port.
Valid keyword arguments to this function are all L{IReactorTCP.connectTCP}
arguments.
@return: The coerced values as a C{dict}.
"""
if len(args) == 2:
kwargs['port'] = int(args[1])
kwargs['host'] = args[0]
elif len(args) == 1:
if 'host' in kwargs:
kwargs['port'] = int(args[0])
else:
kwargs['host'] = args[0]
try:
kwargs['port'] = int(kwargs['port'])
except KeyError:
pass
try:
kwargs['timeout'] = int(kwargs['timeout'])
except KeyError:
pass
try:
kwargs['bindAddress'] = (kwargs['bindAddress'], 0)
except KeyError:
pass
return kwargs
def _loadCAsFromDir(directoryPath):
"""
Load certificate-authority certificate objects in a given directory.
@param directoryPath: a L{unicode} or L{bytes} pointing at a directory to
load .pem files from, or L{None}.
@return: an L{IOpenSSLTrustRoot} provider.
"""
caCerts = {}
for child in directoryPath.children():
if not child.asTextMode().basename().split(u'.')[-1].lower() == u'pem':
continue
try:
data = child.getContent()
except IOError:
# Permission denied, corrupt disk, we don't care.
continue
try:
theCert = Certificate.loadPEM(data)
except SSLError:
# Duplicate certificate, invalid certificate, etc. We don't care.
pass
else:
caCerts[theCert.digest()] = theCert
return trustRootFromCertificates(caCerts.values())
def _parseTrustRootPath(pathName):
"""
Parse a string referring to a directory full of certificate authorities
into a trust root.
@param pathName: path name
@type pathName: L{unicode} or L{bytes} or L{None}
@return: L{None} or L{IOpenSSLTrustRoot}
"""
if pathName is None:
return None
return _loadCAsFromDir(FilePath(pathName))
def _privateCertFromPaths(certificatePath, keyPath):
"""
Parse a certificate path and key path, either or both of which might be
L{None}, into a certificate object.
@param certificatePath: the certificate path
@type certificatePath: L{bytes} or L{unicode} or L{None}
@param keyPath: the private key path
@type keyPath: L{bytes} or L{unicode} or L{None}
@return: a L{PrivateCertificate} or L{None}
"""
if certificatePath is None:
return None
certBytes = FilePath(certificatePath).getContent()
if keyPath is None:
return PrivateCertificate.loadPEM(certBytes)
else:
return PrivateCertificate.fromCertificateAndKeyPair(
Certificate.loadPEM(certBytes),
KeyPair.load(FilePath(keyPath).getContent(), 1)
)
def _parseClientSSLOptions(kwargs):
"""
Parse common arguments for SSL endpoints, creating an L{CertificateOptions}
instance.
@param kwargs: A dict of keyword arguments to be parsed, potentially
containing keys C{certKey}, C{privateKey}, C{caCertsDir}, and
C{hostname}. See L{_parseClientSSL}.
@type kwargs: L{dict}
@return: The remaining arguments, including a new key C{sslContextFactory}.
"""
hostname = kwargs.pop('hostname', None)
clientCertificate = _privateCertFromPaths(kwargs.pop('certKey', None),
kwargs.pop('privateKey', None))
trustRoot = _parseTrustRootPath(kwargs.pop('caCertsDir', None))
if hostname is not None:
configuration = optionsForClientTLS(
_idnaText(hostname), trustRoot=trustRoot,
clientCertificate=clientCertificate
)
else:
# _really_ though, you should specify a hostname.
if clientCertificate is not None:
privateKeyOpenSSL = clientCertificate.privateKey.original
certificateOpenSSL = clientCertificate.original
else:
privateKeyOpenSSL = None
certificateOpenSSL = None
configuration = CertificateOptions(
trustRoot=trustRoot,
privateKey=privateKeyOpenSSL,
certificate=certificateOpenSSL,
)
kwargs['sslContextFactory'] = configuration
return kwargs
def _parseClientSSL(*args, **kwargs):
"""
Perform any argument value coercion necessary for SSL client parameters.
Valid keyword arguments to this function are all L{IReactorSSL.connectSSL}
arguments except for C{contextFactory}. Instead, C{certKey} (the path name
of the certificate file) C{privateKey} (the path name of the private key
associated with the certificate) are accepted and used to construct a
context factory.
Valid positional arguments to this function are host and port.
@param caCertsDir: The one parameter which is not part of
L{IReactorSSL.connectSSL}'s signature, this is a path name used to
construct a list of certificate authority certificates. The directory
will be scanned for files ending in C{.pem}, all of which will be
considered valid certificate authorities for this connection.
@type caCertsDir: L{str}
@param hostname: The hostname to use for validating the server's
certificate.
@type hostname: L{unicode}
@return: The coerced values as a L{dict}.
"""
kwargs = _parseClientTCP(*args, **kwargs)
return _parseClientSSLOptions(kwargs)
def _parseClientUNIX(*args, **kwargs):
"""
Perform any argument value coercion necessary for UNIX client parameters.
Valid keyword arguments to this function are all L{IReactorUNIX.connectUNIX}
keyword arguments except for C{checkPID}. Instead, C{lockfile} is accepted
and has the same meaning. Also C{path} is used instead of C{address}.
Valid positional arguments to this function are C{path}.
@return: The coerced values as a C{dict}.
"""
if len(args) == 1:
kwargs['path'] = args[0]
try:
kwargs['checkPID'] = bool(int(kwargs.pop('lockfile')))
except KeyError:
pass
try:
kwargs['timeout'] = int(kwargs['timeout'])
except KeyError:
pass
return kwargs
_clientParsers = {
'TCP': _parseClientTCP,
'SSL': _parseClientSSL,
'UNIX': _parseClientUNIX,
}
def clientFromString(reactor, description):
"""
Construct a client endpoint from a description string.
Client description strings are much like server description strings,
although they take all of their arguments as keywords, aside from host and
port.
You can create a TCP client endpoint with the 'host' and 'port' arguments,
like so::
clientFromString(reactor, "tcp:host=www.example.com:port=80")
or, without specifying host and port keywords::
clientFromString(reactor, "tcp:www.example.com:80")
Or you can specify only one or the other, as in the following 2 examples::
clientFromString(reactor, "tcp:host=www.example.com:80")
clientFromString(reactor, "tcp:www.example.com:port=80")
or an SSL client endpoint with those arguments, plus the arguments used by
the server SSL, for a client certificate::
clientFromString(reactor, "ssl:web.example.com:443:"
"privateKey=foo.pem:certKey=foo.pem")
to specify your certificate trust roots, you can identify a directory with
PEM files in it with the C{caCertsDir} argument::
clientFromString(reactor, "ssl:host=web.example.com:port=443:"
"caCertsDir=/etc/ssl/certs")
Both TCP and SSL client endpoint description strings can include a
'bindAddress' keyword argument, whose value should be a local IPv4
address. This fixes the client socket to that IP address::
clientFromString(reactor, "tcp:www.example.com:80:"
"bindAddress=192.0.2.100")
NB: Fixed client ports are not currently supported in TCP or SSL
client endpoints. The client socket will always use an ephemeral
port assigned by the operating system
You can create a UNIX client endpoint with the 'path' argument and optional
'lockfile' and 'timeout' arguments::
clientFromString(
reactor, b"unix:path=/var/foo/bar:lockfile=1:timeout=9")
or, with the path as a positional argument with or without optional
arguments as in the following 2 examples::
clientFromString(reactor, "unix:/var/foo/bar")
clientFromString(reactor, "unix:/var/foo/bar:lockfile=1:timeout=9")
This function is also extensible; new endpoint types may be registered as
L{IStreamClientEndpointStringParserWithReactor} plugins. See that
interface for more information.
@param reactor: The client endpoint will be constructed with this reactor.
@param description: The strports description to parse.
@type description: L{str}
@return: A new endpoint which can be used to connect with the parameters
given by C{description}.
@rtype: L{IStreamClientEndpoint<twisted.internet.interfaces.IStreamClientEndpoint>}
@since: 10.2
"""
args, kwargs = _parse(description)
aname = args.pop(0)
name = aname.upper()
if name not in _clientParsers:
plugin = _matchPluginToPrefix(
getPlugins(IStreamClientEndpointStringParserWithReactor), name
)
return plugin.parseStreamClient(reactor, *args, **kwargs)
kwargs = _clientParsers[name](*args, **kwargs)
return _endpointClientFactories[name](reactor, **kwargs)
def connectProtocol(endpoint, protocol):
"""
Connect a protocol instance to an endpoint.
This allows using a client endpoint without having to create a factory.
@param endpoint: A client endpoint to connect to.
@param protocol: A protocol instance.
@return: The result of calling C{connect} on the endpoint, i.e. a
L{Deferred} that will fire with the protocol when connected, or an
appropriate error.
@since: 13.1
"""
class OneShotFactory(Factory):
def buildProtocol(self, addr):
return protocol
return endpoint.connect(OneShotFactory())
@implementer(interfaces.IStreamClientEndpoint)
class _WrapperEndpoint:
"""
An endpoint that wraps another endpoint.
"""
def __init__(self, wrappedEndpoint, wrapperFactory):
"""
Construct a L{_WrapperEndpoint}.
"""
self._wrappedEndpoint = wrappedEndpoint
self._wrapperFactory = wrapperFactory
def connect(self, protocolFactory):
"""
Connect the given protocol factory and unwrap its result.
"""
return self._wrappedEndpoint.connect(
self._wrapperFactory(protocolFactory)
).addCallback(lambda protocol: protocol.wrappedProtocol)
@implementer(interfaces.IStreamServerEndpoint)
class _WrapperServerEndpoint:
"""
A server endpoint that wraps another server endpoint.
"""
def __init__(self, wrappedEndpoint, wrapperFactory):
"""
Construct a L{_WrapperServerEndpoint}.
"""
self._wrappedEndpoint = wrappedEndpoint
self._wrapperFactory = wrapperFactory
def listen(self, protocolFactory):
"""
Connect the given protocol factory and unwrap its result.
"""
return self._wrappedEndpoint.listen(
self._wrapperFactory(protocolFactory)
)
def wrapClientTLS(connectionCreator, wrappedEndpoint):
"""
Wrap an endpoint which upgrades to TLS as soon as the connection is
established.
@since: 16.0
@param connectionCreator: The TLS options to use when connecting; see
L{twisted.internet.ssl.optionsForClientTLS} for how to construct this.
@type connectionCreator:
L{twisted.internet.interfaces.IOpenSSLClientConnectionCreator}
@param wrappedEndpoint: The endpoint to wrap.
@type wrappedEndpoint: An L{IStreamClientEndpoint} provider.
@return: an endpoint that provides transport level encryption layered on
top of C{wrappedEndpoint}
@rtype: L{twisted.internet.interfaces.IStreamClientEndpoint}
"""
if TLSMemoryBIOFactory is None:
raise NotImplementedError(
"OpenSSL not available. Try `pip install twisted[tls]`."
)
return _WrapperEndpoint(
wrappedEndpoint,
lambda protocolFactory:
TLSMemoryBIOFactory(connectionCreator, True, protocolFactory)
)
def _parseClientTLS(reactor, host, port, timeout=b'30', bindAddress=None,
certificate=None, privateKey=None, trustRoots=None,
endpoint=None, **kwargs):
"""
Internal method to construct an endpoint from string parameters.
@param reactor: The reactor passed to L{clientFromString}.
@param host: The hostname to connect to.
@type host: L{bytes} or L{unicode}
@param port: The port to connect to.
@type port: L{bytes} or L{unicode}
@param timeout: For each individual connection attempt, the number of
seconds to wait before assuming the connection has failed.
@type timeout: L{bytes} or L{unicode}
@param bindAddress: The address to which to bind outgoing connections.
@type bindAddress: L{bytes} or L{unicode}
@param certificate: a string representing a filesystem path to a
PEM-encoded certificate.
@type certificate: L{bytes} or L{unicode}
@param privateKey: a string representing a filesystem path to a PEM-encoded
certificate.
@type privateKey: L{bytes} or L{unicode}
@param endpoint: an optional string endpoint description of an endpoint to
wrap; if this is passed then C{host} is used only for certificate
verification.
@type endpoint: L{bytes} or L{unicode}
@return: a client TLS endpoint
@rtype: L{IStreamClientEndpoint}
"""
if kwargs:
raise TypeError('unrecognized keyword arguments present',
list(kwargs.keys()))
host = host if isinstance(host, str) else host.decode("utf-8")
bindAddress = (bindAddress
if isinstance(bindAddress, str) or bindAddress is None
else bindAddress.decode("utf-8"))
port = int(port)
timeout = int(timeout)
return wrapClientTLS(
optionsForClientTLS(
host, trustRoot=_parseTrustRootPath(trustRoots),
clientCertificate=_privateCertFromPaths(certificate,
privateKey)),
clientFromString(reactor, endpoint) if endpoint is not None
else HostnameEndpoint(reactor, _idnaBytes(host), port, timeout,
bindAddress)
)
@implementer(IPlugin, IStreamClientEndpointStringParserWithReactor)
class _TLSClientEndpointParser:
"""
Stream client endpoint string parser for L{wrapClientTLS} with
L{HostnameEndpoint}.
@ivar prefix: See
L{IStreamClientEndpointStringParserWithReactor.prefix}.
"""
prefix = 'tls'
@staticmethod
def parseStreamClient(reactor, *args, **kwargs):
"""
Redirects to another function L{_parseClientTLS}; tricks zope.interface
into believing the interface is correctly implemented, since the
signature is (C{reactor}, C{*args}, C{**kwargs}). See
L{_parseClientTLS} for the specific signature description for this
endpoint parser.
@param reactor: The reactor passed to L{clientFromString}.
@param args: The positional arguments in the endpoint description.
@type args: L{tuple}
@param kwargs: The named arguments in the endpoint description.
@type kwargs: L{dict}
@return: a client TLS endpoint
@rtype: L{IStreamClientEndpoint}
"""
return _parseClientTLS(reactor, *args, **kwargs)
| # -*- test-case-name: twisted.internet.test.test_endpoints -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementations of L{IStreamServerEndpoint} and L{IStreamClientEndpoint} that
wrap the L{IReactorTCP}, L{IReactorSSL}, and L{IReactorUNIX} interfaces.
This also implements an extensible mini-language for describing endpoints,
parsed by the L{clientFromString} and L{serverFromString} functions.
@since: 10.1
"""
import os
import re
import socket
from unicodedata import normalize
import warnings
from constantly import NamedConstant, Names
from incremental import Version
from zope.interface import implementer, directlyProvides, provider
from twisted.internet import interfaces, defer, error, fdesc, threads
from twisted.internet.abstract import isIPv6Address, isIPAddress
from twisted.internet.address import (
_ProcessAddress, HostnameAddress, IPv4Address, IPv6Address
)
from twisted.internet.interfaces import (
IStreamServerEndpointStringParser,
IStreamClientEndpointStringParserWithReactor, IResolutionReceiver,
IReactorPluggableNameResolver,
IHostnameResolver,
)
from twisted.internet.protocol import ClientFactory, Factory
from twisted.internet.protocol import ProcessProtocol, Protocol
try:
from twisted.internet.stdio import StandardIO, PipeAddress
except ImportError:
# fallback if pywin32 is not installed
StandardIO = None # type: ignore[assignment,misc]
PipeAddress = None # type: ignore[assignment,misc]
from twisted.internet.task import LoopingCall
from twisted.internet._resolver import HostResolution
from twisted.logger import Logger
from twisted.plugin import IPlugin, getPlugins
from twisted.python import deprecate, log
from twisted.python.compat import nativeString, _matchingString
from twisted.python.components import proxyForInterface
from twisted.python.failure import Failure
from twisted.python.filepath import FilePath
from twisted.python.compat import iterbytes
from twisted.internet.defer import Deferred
from twisted.python.systemd import ListenFDs
from ._idna import _idnaBytes, _idnaText
try:
from twisted.protocols.tls import (
TLSMemoryBIOFactory as _TLSMemoryBIOFactory)
from twisted.internet.ssl import (
optionsForClientTLS, PrivateCertificate, Certificate, KeyPair,
CertificateOptions, trustRootFromCertificates
)
from OpenSSL.SSL import Error as SSLError
except ImportError:
TLSMemoryBIOFactory = None
else:
TLSMemoryBIOFactory = _TLSMemoryBIOFactory
__all__ = ["clientFromString", "serverFromString",
"TCP4ServerEndpoint", "TCP6ServerEndpoint",
"TCP4ClientEndpoint", "TCP6ClientEndpoint",
"UNIXServerEndpoint", "UNIXClientEndpoint",
"SSL4ServerEndpoint", "SSL4ClientEndpoint",
"AdoptedStreamServerEndpoint", "StandardIOEndpoint",
"ProcessEndpoint", "HostnameEndpoint",
"StandardErrorBehavior", "connectProtocol",
"wrapClientTLS"]
class _WrappingProtocol(Protocol):
"""
Wrap another protocol in order to notify my user when a connection has
been made.
"""
def __init__(self, connectedDeferred, wrappedProtocol):
"""
@param connectedDeferred: The L{Deferred} that will callback
with the C{wrappedProtocol} when it is connected.
@param wrappedProtocol: An L{IProtocol} provider that will be
connected.
"""
self._connectedDeferred = connectedDeferred
self._wrappedProtocol = wrappedProtocol
for iface in [interfaces.IHalfCloseableProtocol,
interfaces.IFileDescriptorReceiver,
interfaces.IHandshakeListener]:
if iface.providedBy(self._wrappedProtocol):
directlyProvides(self, iface)
def logPrefix(self):
"""
Transparently pass through the wrapped protocol's log prefix.
"""
if interfaces.ILoggingContext.providedBy(self._wrappedProtocol):
return self._wrappedProtocol.logPrefix()
return self._wrappedProtocol.__class__.__name__
def connectionMade(self):
"""
Connect the C{self._wrappedProtocol} to our C{self.transport} and
callback C{self._connectedDeferred} with the C{self._wrappedProtocol}
"""
self._wrappedProtocol.makeConnection(self.transport)
self._connectedDeferred.callback(self._wrappedProtocol)
def dataReceived(self, data):
"""
Proxy C{dataReceived} calls to our C{self._wrappedProtocol}
"""
return self._wrappedProtocol.dataReceived(data)
def fileDescriptorReceived(self, descriptor):
"""
Proxy C{fileDescriptorReceived} calls to our C{self._wrappedProtocol}
"""
return self._wrappedProtocol.fileDescriptorReceived(descriptor)
def connectionLost(self, reason):
"""
Proxy C{connectionLost} calls to our C{self._wrappedProtocol}
"""
return self._wrappedProtocol.connectionLost(reason)
def readConnectionLost(self):
"""
Proxy L{IHalfCloseableProtocol.readConnectionLost} to our
C{self._wrappedProtocol}
"""
self._wrappedProtocol.readConnectionLost()
def writeConnectionLost(self):
"""
Proxy L{IHalfCloseableProtocol.writeConnectionLost} to our
C{self._wrappedProtocol}
"""
self._wrappedProtocol.writeConnectionLost()
def handshakeCompleted(self):
"""
Proxy L{interfaces.IHandshakeListener} to our
C{self._wrappedProtocol}.
"""
self._wrappedProtocol.handshakeCompleted()
class _WrappingFactory(ClientFactory):
"""
Wrap a factory in order to wrap the protocols it builds.
@ivar _wrappedFactory: A provider of I{IProtocolFactory} whose buildProtocol
method will be called and whose resulting protocol will be wrapped.
@ivar _onConnection: A L{Deferred} that fires when the protocol is
connected
@ivar _connector: A L{connector <twisted.internet.interfaces.IConnector>}
that is managing the current or previous connection attempt.
"""
protocol = _WrappingProtocol
def __init__(self, wrappedFactory):
"""
@param wrappedFactory: A provider of I{IProtocolFactory} whose
buildProtocol method will be called and whose resulting protocol
will be wrapped.
"""
self._wrappedFactory = wrappedFactory
self._onConnection = defer.Deferred(canceller=self._canceller)
def startedConnecting(self, connector):
"""
A connection attempt was started. Remember the connector which started
said attempt, for use later.
"""
self._connector = connector
def _canceller(self, deferred):
"""
The outgoing connection attempt was cancelled. Fail that L{Deferred}
with an L{error.ConnectingCancelledError}.
@param deferred: The L{Deferred <defer.Deferred>} that was cancelled;
should be the same as C{self._onConnection}.
@type deferred: L{Deferred <defer.Deferred>}
@note: This relies on startedConnecting having been called, so it may
seem as though there's a race condition where C{_connector} may not
have been set. However, using public APIs, this condition is
impossible to catch, because a connection API
(C{connectTCP}/C{SSL}/C{UNIX}) is always invoked before a
L{_WrappingFactory}'s L{Deferred <defer.Deferred>} is returned to
C{connect()}'s caller.
@return: L{None}
"""
deferred.errback(
error.ConnectingCancelledError(
self._connector.getDestination()))
self._connector.stopConnecting()
def doStart(self):
"""
Start notifications are passed straight through to the wrapped factory.
"""
self._wrappedFactory.doStart()
def doStop(self):
"""
Stop notifications are passed straight through to the wrapped factory.
"""
self._wrappedFactory.doStop()
def buildProtocol(self, addr):
"""
Proxy C{buildProtocol} to our C{self._wrappedFactory} or errback the
C{self._onConnection} L{Deferred} if the wrapped factory raises an
exception or returns L{None}.
@return: An instance of L{_WrappingProtocol} or L{None}
"""
try:
proto = self._wrappedFactory.buildProtocol(addr)
if proto is None:
raise error.NoProtocol()
except:
self._onConnection.errback()
else:
return self.protocol(self._onConnection, proto)
def clientConnectionFailed(self, connector, reason):
"""
Errback the C{self._onConnection} L{Deferred} when the
client connection fails.
"""
if not self._onConnection.called:
self._onConnection.errback(reason)
@implementer(interfaces.IStreamServerEndpoint)
class StandardIOEndpoint:
"""
A Standard Input/Output endpoint
@ivar _stdio: a callable, like L{stdio.StandardIO}, which takes an
L{IProtocol} provider and a C{reactor} keyword argument (interface
dependent upon your platform).
"""
_stdio = StandardIO
def __init__(self, reactor):
"""
@param reactor: The reactor for the endpoint.
"""
self._reactor = reactor
def listen(self, stdioProtocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen on stdin/stdout
"""
return defer.execute(self._stdio,
stdioProtocolFactory.buildProtocol(PipeAddress()),
reactor=self._reactor)
class _IProcessTransportWithConsumerAndProducer(interfaces.IProcessTransport,
interfaces.IConsumer,
interfaces.IPushProducer):
"""
An L{_IProcessTransportWithConsumerAndProducer} combines various interfaces
to work around the issue that L{interfaces.IProcessTransport} is
incompletely defined and doesn't specify flow-control interfaces, and that
L{proxyForInterface} doesn't allow for multiple interfaces.
"""
class _ProcessEndpointTransport(
proxyForInterface(_IProcessTransportWithConsumerAndProducer, # type: ignore[misc] # noqa
'_process')):
"""
An L{ITransport}, L{IProcessTransport}, L{IConsumer}, and L{IPushProducer}
provider for the L{IProtocol} instance passed to the process endpoint.
@ivar _process: An active process transport which will be used by write
methods on this object to write data to a child process.
@type _process: L{interfaces.IProcessTransport} provider
"""
class _WrapIProtocol(ProcessProtocol):
"""
An L{IProcessProtocol} provider that wraps an L{IProtocol}.
@ivar transport: A L{_ProcessEndpointTransport} provider that is hooked to
the wrapped L{IProtocol} provider.
@see: L{protocol.ProcessProtocol}
"""
def __init__(self, proto, executable, errFlag):
"""
@param proto: An L{IProtocol} provider.
@param errFlag: A constant belonging to L{StandardErrorBehavior}
that determines if stderr is logged or dropped.
@param executable: The file name (full path) to spawn.
"""
self.protocol = proto
self.errFlag = errFlag
self.executable = executable
def makeConnection(self, process):
"""
Call L{IProtocol} provider's makeConnection method with an
L{ITransport} provider.
@param process: An L{IProcessTransport} provider.
"""
self.transport = _ProcessEndpointTransport(process)
return self.protocol.makeConnection(self.transport)
def childDataReceived(self, childFD, data):
"""
This is called with data from the process's stdout or stderr pipes. It
checks the status of the errFlag to setermine if stderr should be
logged (default) or dropped.
"""
if childFD == 1:
return self.protocol.dataReceived(data)
elif childFD == 2 and self.errFlag == StandardErrorBehavior.LOG:
log.msg(
format="Process %(executable)r wrote stderr unhandled by "
"%(protocol)s: %(data)s",
executable=self.executable, protocol=self.protocol,
data=data)
def processEnded(self, reason):
"""
If the process ends with L{error.ProcessDone}, this method calls the
L{IProtocol} provider's L{connectionLost} with a
L{error.ConnectionDone}
@see: L{ProcessProtocol.processEnded}
"""
if (reason.check(error.ProcessDone) == error.ProcessDone) and (
reason.value.status == 0):
return self.protocol.connectionLost(
Failure(error.ConnectionDone()))
else:
return self.protocol.connectionLost(reason)
class StandardErrorBehavior(Names):
"""
Constants used in ProcessEndpoint to decide what to do with stderr.
@cvar LOG: Indicates that stderr is to be logged.
@cvar DROP: Indicates that stderr is to be dropped (and not logged).
@since: 13.1
"""
LOG = NamedConstant()
DROP = NamedConstant()
@implementer(interfaces.IStreamClientEndpoint)
class ProcessEndpoint:
"""
An endpoint for child processes
@ivar _spawnProcess: A hook used for testing the spawning of child process.
@since: 13.1
"""
def __init__(self, reactor, executable, args=(), env={}, path=None,
uid=None, gid=None, usePTY=0, childFDs=None,
errFlag=StandardErrorBehavior.LOG):
"""
See L{IReactorProcess.spawnProcess}.
@param errFlag: Determines if stderr should be logged.
@type errFlag: L{endpoints.StandardErrorBehavior}
"""
self._reactor = reactor
self._executable = executable
self._args = args
self._env = env
self._path = path
self._uid = uid
self._gid = gid
self._usePTY = usePTY
self._childFDs = childFDs
self._errFlag = errFlag
self._spawnProcess = self._reactor.spawnProcess
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to launch a child process
and connect it to a protocol created by C{protocolFactory}.
@param protocolFactory: A factory for an L{IProtocol} provider which
will be notified of all events related to the created process.
"""
proto = protocolFactory.buildProtocol(_ProcessAddress())
try:
self._spawnProcess(
_WrapIProtocol(proto, self._executable, self._errFlag),
self._executable, self._args, self._env, self._path, self._uid,
self._gid, self._usePTY, self._childFDs)
except:
return defer.fail()
else:
return defer.succeed(proto)
@implementer(interfaces.IStreamServerEndpoint)
class _TCPServerEndpoint:
"""
A TCP server endpoint interface
"""
def __init__(self, reactor, port, backlog, interface):
"""
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used for listening
@type port: int
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to
@type interface: str
"""
self._reactor = reactor
self._port = port
self._backlog = backlog
self._interface = interface
def listen(self, protocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen on a TCP
socket
"""
return defer.execute(self._reactor.listenTCP,
self._port,
protocolFactory,
backlog=self._backlog,
interface=self._interface)
class TCP4ServerEndpoint(_TCPServerEndpoint):
"""
Implements TCP server endpoint with an IPv4 configuration
"""
def __init__(self, reactor, port, backlog=50, interface=''):
"""
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used for listening
@type port: int
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to, defaults to '' (all)
@type interface: str
"""
_TCPServerEndpoint.__init__(self, reactor, port, backlog, interface)
class TCP6ServerEndpoint(_TCPServerEndpoint):
"""
Implements TCP server endpoint with an IPv6 configuration
"""
def __init__(self, reactor, port, backlog=50, interface='::'):
"""
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used for listening
@type port: int
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to, defaults to C{::} (all)
@type interface: str
"""
_TCPServerEndpoint.__init__(self, reactor, port, backlog, interface)
@implementer(interfaces.IStreamClientEndpoint)
class TCP4ClientEndpoint:
"""
TCP client endpoint with an IPv4 configuration.
"""
def __init__(self, reactor, host, port, timeout=30, bindAddress=None):
"""
@param reactor: An L{IReactorTCP} provider
@param host: A hostname, used when connecting
@type host: str
@param port: The port number, used when connecting
@type port: int
@param timeout: The number of seconds to wait before assuming the
connection has failed.
@type timeout: L{float} or L{int}
@param bindAddress: A (host, port) tuple of local address to bind to,
or None.
@type bindAddress: tuple
"""
self._reactor = reactor
self._host = host
self._port = port
self._timeout = timeout
self._bindAddress = bindAddress
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect via TCP.
"""
try:
wf = _WrappingFactory(protocolFactory)
self._reactor.connectTCP(
self._host, self._port, wf,
timeout=self._timeout, bindAddress=self._bindAddress)
return wf._onConnection
except:
return defer.fail()
@implementer(interfaces.IStreamClientEndpoint)
class TCP6ClientEndpoint:
"""
TCP client endpoint with an IPv6 configuration.
@ivar _getaddrinfo: A hook used for testing name resolution.
@ivar _deferToThread: A hook used for testing deferToThread.
@ivar _GAI_ADDRESS: Index of the address portion in result of
getaddrinfo to be used.
@ivar _GAI_ADDRESS_HOST: Index of the actual host-address in the
5-tuple L{_GAI_ADDRESS}.
"""
_getaddrinfo = staticmethod(socket.getaddrinfo)
_deferToThread = staticmethod(threads.deferToThread)
_GAI_ADDRESS = 4
_GAI_ADDRESS_HOST = 0
def __init__(self, reactor, host, port, timeout=30, bindAddress=None):
"""
@param host: An IPv6 address literal or a hostname with an
IPv6 address
@see: L{twisted.internet.interfaces.IReactorTCP.connectTCP}
"""
self._reactor = reactor
self._host = host
self._port = port
self._timeout = timeout
self._bindAddress = bindAddress
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect via TCP,
once the hostname resolution is done.
"""
if isIPv6Address(self._host):
d = self._resolvedHostConnect(self._host, protocolFactory)
else:
d = self._nameResolution(self._host)
d.addCallback(lambda result: result[0][self._GAI_ADDRESS]
[self._GAI_ADDRESS_HOST])
d.addCallback(self._resolvedHostConnect, protocolFactory)
return d
def _nameResolution(self, host):
"""
Resolve the hostname string into a tuple containing the host
IPv6 address.
"""
return self._deferToThread(
self._getaddrinfo, host, 0, socket.AF_INET6)
def _resolvedHostConnect(self, resolvedHost, protocolFactory):
"""
Connect to the server using the resolved hostname.
"""
try:
wf = _WrappingFactory(protocolFactory)
self._reactor.connectTCP(resolvedHost, self._port, wf,
timeout=self._timeout, bindAddress=self._bindAddress)
return wf._onConnection
except:
return defer.fail()
@implementer(IHostnameResolver)
class _SimpleHostnameResolver:
"""
An L{IHostnameResolver} provider that invokes a provided callable
to resolve hostnames.
@ivar _nameResolution: the callable L{resolveHostName} invokes to
resolve hostnames.
@type _nameResolution: A L{callable} that accepts two arguments:
the host to resolve and the port number to include in the
result.
"""
_log = Logger()
def __init__(self, nameResolution):
"""
Create a L{_SimpleHostnameResolver} instance.
"""
self._nameResolution = nameResolution
def resolveHostName(self, resolutionReceiver,
hostName,
portNumber=0,
addressTypes=None,
transportSemantics='TCP'):
"""
Initiate a hostname resolution.
@param resolutionReceiver: an object that will receive each resolved
address as it arrives.
@type resolutionReceiver: L{IResolutionReceiver}
@param hostName: see interface
@param portNumber: see interface
@param addressTypes: Ignored in this implementation.
@param transportSemantics: Ignored in this implementation.
@return: The resolution in progress.
@rtype: L{IResolutionReceiver}
"""
resolutionReceiver.resolutionBegan(HostResolution(hostName))
d = self._nameResolution(hostName, portNumber)
def cbDeliver(gairesult):
for family, socktype, proto, canonname, sockaddr in gairesult:
if family == socket.AF_INET6:
resolutionReceiver.addressResolved(
IPv6Address('TCP', *sockaddr))
elif family == socket.AF_INET:
resolutionReceiver.addressResolved(
IPv4Address('TCP', *sockaddr))
def ebLog(error):
self._log.failure("while looking up {name} with {callable}",
error, name=hostName,
callable=self._nameResolution)
d.addCallback(cbDeliver)
d.addErrback(ebLog)
d.addBoth(lambda ignored: resolutionReceiver.resolutionComplete())
return resolutionReceiver
@implementer(interfaces.IStreamClientEndpoint)
class HostnameEndpoint:
"""
A name-based endpoint that connects to the fastest amongst the resolved
host addresses.
@cvar _DEFAULT_ATTEMPT_DELAY: The default time to use between attempts, in
seconds, when no C{attemptDelay} is given to
L{HostnameEndpoint.__init__}.
@ivar _hostText: the textual representation of the hostname passed to the
constructor. Used to pass to the reactor's hostname resolver.
@type _hostText: L{unicode}
@ivar _hostBytes: the encoded bytes-representation of the hostname passed
to the constructor. Used to construct the L{HostnameAddress}
associated with this endpoint.
@type _hostBytes: L{bytes}
@ivar _hostStr: the native-string representation of the hostname passed to
the constructor, used for exception construction
@type _hostStr: native L{str}
@ivar _badHostname: a flag - hopefully false! - indicating that an invalid
hostname was passed to the constructor. This might be a textual
hostname that isn't valid IDNA, or non-ASCII bytes.
@type _badHostname: L{bool}
"""
_getaddrinfo = staticmethod(socket.getaddrinfo)
_deferToThread = staticmethod(threads.deferToThread)
_DEFAULT_ATTEMPT_DELAY = 0.3
def __init__(self, reactor, host, port, timeout=30, bindAddress=None,
attemptDelay=None):
"""
Create a L{HostnameEndpoint}.
@param reactor: The reactor to use for connections and delayed calls.
@type reactor: provider of L{IReactorTCP}, L{IReactorTime} and either
L{IReactorPluggableNameResolver} or L{IReactorPluggableResolver}.
@param host: A hostname to connect to.
@type host: L{bytes} or L{unicode}
@param port: The port number to connect to.
@type port: L{int}
@param timeout: For each individual connection attempt, the number of
seconds to wait before assuming the connection has failed.
@type timeout: L{float} or L{int}
@param bindAddress: the local address of the network interface to make
the connections from.
@type bindAddress: L{bytes}
@param attemptDelay: The number of seconds to delay between connection
attempts.
@type attemptDelay: L{float}
@see: L{twisted.internet.interfaces.IReactorTCP.connectTCP}
"""
self._reactor = reactor
self._nameResolver = self._getNameResolverAndMaybeWarn(reactor)
[self._badHostname, self._hostBytes, self._hostText] = (
self._hostAsBytesAndText(host)
)
self._hostStr = self._hostBytes if bytes is str else self._hostText
self._port = port
self._timeout = timeout
self._bindAddress = bindAddress
if attemptDelay is None:
attemptDelay = self._DEFAULT_ATTEMPT_DELAY
self._attemptDelay = attemptDelay
def __repr__(self) -> str:
"""
Produce a string representation of the L{HostnameEndpoint}.
@return: A L{str}
"""
if self._badHostname:
# Use the backslash-encoded version of the string passed to the
# constructor, which is already a native string.
host = self._hostStr
elif isIPv6Address(self._hostStr):
host = '[{}]'.format(self._hostStr)
else:
# Convert the bytes representation to a native string to ensure
# that we display the punycoded version of the hostname, which is
# more useful than any IDN version as it can be easily copy-pasted
# into debugging tools.
host = nativeString(self._hostBytes)
return "".join(["<HostnameEndpoint ", host, ":", str(self._port), ">"])
def _getNameResolverAndMaybeWarn(self, reactor):
"""
Retrieve a C{nameResolver} callable and warn the caller's
caller that using a reactor which doesn't provide
L{IReactorPluggableNameResolver} is deprecated.
@param reactor: The reactor to check.
@return: A L{IHostnameResolver} provider.
"""
if not IReactorPluggableNameResolver.providedBy(reactor):
warningString = deprecate.getDeprecationWarningString(
reactor.__class__,
Version('Twisted', 17, 5, 0),
format=("Passing HostnameEndpoint a reactor that does not"
" provide IReactorPluggableNameResolver (%(fqpn)s)"
" was deprecated in %(version)s"),
replacement=("a reactor that provides"
" IReactorPluggableNameResolver"),
)
warnings.warn(warningString, DeprecationWarning, stacklevel=3)
return _SimpleHostnameResolver(self._fallbackNameResolution)
return reactor.nameResolver
@staticmethod
def _hostAsBytesAndText(host):
"""
For various reasons (documented in the C{@ivar}'s in the class
docstring) we need both a textual and a binary representation of the
hostname given to the constructor. For compatibility and convenience,
we accept both textual and binary representations of the hostname, save
the form that was passed, and convert into the other form. This is
mostly just because L{HostnameAddress} chose somewhat poorly to define
its attribute as bytes; hopefully we can find a compatible way to clean
this up in the future and just operate in terms of text internally.
@param host: A hostname to convert.
@type host: L{bytes} or C{str}
@return: a 3-tuple of C{(invalid, bytes, text)} where C{invalid} is a
boolean indicating the validity of the hostname, C{bytes} is a
binary representation of C{host}, and C{text} is a textual
representation of C{host}.
"""
if isinstance(host, bytes):
if isIPAddress(host) or isIPv6Address(host):
return False, host, host.decode("ascii")
else:
try:
return False, host, _idnaText(host)
except UnicodeError:
# Convert the host to _some_ kind of text, to handle below.
host = host.decode("charmap")
else:
host = normalize('NFC', host)
if isIPAddress(host) or isIPv6Address(host):
return False, host.encode("ascii"), host
else:
try:
return False, _idnaBytes(host), host
except UnicodeError:
pass
# `host` has been converted to text by this point either way; it's
# invalid as a hostname, and so may contain unprintable characters and
# such. escape it with backslashes so the user can get _some_ guess as
# to what went wrong.
asciibytes = host.encode('ascii', 'backslashreplace')
return True, asciibytes, asciibytes.decode('ascii')
def connect(self, protocolFactory):
"""
Attempts a connection to each resolved address, and returns a
connection which is established first.
@param protocolFactory: The protocol factory whose protocol
will be connected.
@type protocolFactory:
L{IProtocolFactory<twisted.internet.interfaces.IProtocolFactory>}
@return: A L{Deferred} that fires with the connected protocol
or fails a connection-related error.
"""
if self._badHostname:
return defer.fail(
ValueError("invalid hostname: {}".format(self._hostStr))
)
d = Deferred()
addresses = []
@provider(IResolutionReceiver)
class EndpointReceiver:
@staticmethod
def resolutionBegan(resolutionInProgress):
pass
@staticmethod
def addressResolved(address):
addresses.append(address)
@staticmethod
def resolutionComplete():
d.callback(addresses)
self._nameResolver.resolveHostName(
EndpointReceiver, self._hostText, portNumber=self._port
)
d.addErrback(lambda ignored: defer.fail(error.DNSLookupError(
"Couldn't find the hostname '{}'".format(self._hostStr))))
@d.addCallback
def resolvedAddressesToEndpoints(addresses):
# Yield an endpoint for every address resolved from the name.
for eachAddress in addresses:
if isinstance(eachAddress, IPv6Address):
yield TCP6ClientEndpoint(
self._reactor, eachAddress.host, eachAddress.port,
self._timeout, self._bindAddress
)
if isinstance(eachAddress, IPv4Address):
yield TCP4ClientEndpoint(
self._reactor, eachAddress.host, eachAddress.port,
self._timeout, self._bindAddress
)
d.addCallback(list)
def _canceller(d):
# This canceller must remain defined outside of
# `startConnectionAttempts`, because Deferred should not
# participate in cycles with their cancellers; that would create a
# potentially problematic circular reference and possibly
# gc.garbage.
d.errback(error.ConnectingCancelledError(
HostnameAddress(self._hostBytes, self._port)))
@d.addCallback
def startConnectionAttempts(endpoints):
"""
Given a sequence of endpoints obtained via name resolution, start
connecting to a new one every C{self._attemptDelay} seconds until
one of the connections succeeds, all of them fail, or the attempt
is cancelled.
@param endpoints: a list of all the endpoints we might try to
connect to, as determined by name resolution.
@type endpoints: L{list} of L{IStreamServerEndpoint}
@return: a Deferred that fires with the result of the
C{endpoint.connect} method that completes the fastest, or fails
with the first connection error it encountered if none of them
succeed.
@rtype: L{Deferred} failing with L{error.ConnectingCancelledError}
or firing with L{IProtocol}
"""
if not endpoints:
raise error.DNSLookupError(
"no results for hostname lookup: {}".format(self._hostStr)
)
iterEndpoints = iter(endpoints)
pending = []
failures = []
winner = defer.Deferred(canceller=_canceller)
def checkDone():
if pending or checkDone.completed or checkDone.endpointsLeft:
return
winner.errback(failures.pop())
checkDone.completed = False
checkDone.endpointsLeft = True
@LoopingCall
def iterateEndpoint():
endpoint = next(iterEndpoints, None)
if endpoint is None:
# The list of endpoints ends.
checkDone.endpointsLeft = False
checkDone()
return
eachAttempt = endpoint.connect(protocolFactory)
pending.append(eachAttempt)
@eachAttempt.addBoth
def noLongerPending(result):
pending.remove(eachAttempt)
return result
@eachAttempt.addCallback
def succeeded(result):
winner.callback(result)
@eachAttempt.addErrback
def failed(reason):
failures.append(reason)
checkDone()
iterateEndpoint.clock = self._reactor
iterateEndpoint.start(self._attemptDelay)
@winner.addBoth
def cancelRemainingPending(result):
checkDone.completed = True
for remaining in pending[:]:
remaining.cancel()
if iterateEndpoint.running:
iterateEndpoint.stop()
return result
return winner
return d
def _fallbackNameResolution(self, host, port):
"""
Resolve the hostname string into a tuple containing the host
address. This is method is only used when the reactor does
not provide L{IReactorPluggableNameResolver}.
@param host: A unicode hostname to resolve.
@param port: The port to include in the resolution.
@return: A L{Deferred} that fires with L{_getaddrinfo}'s
return value.
"""
return self._deferToThread(self._getaddrinfo, host, port, 0,
socket.SOCK_STREAM)
@implementer(interfaces.IStreamServerEndpoint)
class SSL4ServerEndpoint:
"""
SSL secured TCP server endpoint with an IPv4 configuration.
"""
def __init__(self, reactor, port, sslContextFactory,
backlog=50, interface=''):
"""
@param reactor: An L{IReactorSSL} provider.
@param port: The port number used for listening
@type port: int
@param sslContextFactory: An instance of
L{interfaces.IOpenSSLContextFactory}.
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to, defaults to '' (all)
@type interface: str
"""
self._reactor = reactor
self._port = port
self._sslContextFactory = sslContextFactory
self._backlog = backlog
self._interface = interface
def listen(self, protocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen for SSL on a
TCP socket.
"""
return defer.execute(self._reactor.listenSSL, self._port,
protocolFactory,
contextFactory=self._sslContextFactory,
backlog=self._backlog,
interface=self._interface)
@implementer(interfaces.IStreamClientEndpoint)
class SSL4ClientEndpoint:
"""
SSL secured TCP client endpoint with an IPv4 configuration
"""
def __init__(self, reactor, host, port, sslContextFactory,
timeout=30, bindAddress=None):
"""
@param reactor: An L{IReactorSSL} provider.
@param host: A hostname, used when connecting
@type host: str
@param port: The port number, used when connecting
@type port: int
@param sslContextFactory: SSL Configuration information as an instance
of L{interfaces.IOpenSSLContextFactory}.
@param timeout: Number of seconds to wait before assuming the
connection has failed.
@type timeout: int
@param bindAddress: A (host, port) tuple of local address to bind to,
or None.
@type bindAddress: tuple
"""
self._reactor = reactor
self._host = host
self._port = port
self._sslContextFactory = sslContextFactory
self._timeout = timeout
self._bindAddress = bindAddress
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect with SSL over
TCP.
"""
try:
wf = _WrappingFactory(protocolFactory)
self._reactor.connectSSL(
self._host, self._port, wf, self._sslContextFactory,
timeout=self._timeout, bindAddress=self._bindAddress)
return wf._onConnection
except:
return defer.fail()
@implementer(interfaces.IStreamServerEndpoint)
class UNIXServerEndpoint:
"""
UnixSocket server endpoint.
"""
def __init__(self, reactor, address, backlog=50, mode=0o666, wantPID=0):
"""
@param reactor: An L{IReactorUNIX} provider.
@param address: The path to the Unix socket file, used when listening
@param backlog: number of connections to allow in backlog.
@param mode: mode to set on the unix socket. This parameter is
deprecated. Permissions should be set on the directory which
contains the UNIX socket.
@param wantPID: If True, create a pidfile for the socket.
"""
self._reactor = reactor
self._address = address
self._backlog = backlog
self._mode = mode
self._wantPID = wantPID
def listen(self, protocolFactory):
"""
Implement L{IStreamServerEndpoint.listen} to listen on a UNIX socket.
"""
return defer.execute(self._reactor.listenUNIX, self._address,
protocolFactory,
backlog=self._backlog,
mode=self._mode,
wantPID=self._wantPID)
@implementer(interfaces.IStreamClientEndpoint)
class UNIXClientEndpoint:
"""
UnixSocket client endpoint.
"""
def __init__(self, reactor, path, timeout=30, checkPID=0):
"""
@param reactor: An L{IReactorUNIX} provider.
@param path: The path to the Unix socket file, used when connecting
@type path: str
@param timeout: Number of seconds to wait before assuming the
connection has failed.
@type timeout: int
@param checkPID: If True, check for a pid file to verify that a server
is listening.
@type checkPID: bool
"""
self._reactor = reactor
self._path = path
self._timeout = timeout
self._checkPID = checkPID
def connect(self, protocolFactory):
"""
Implement L{IStreamClientEndpoint.connect} to connect via a
UNIX Socket
"""
try:
wf = _WrappingFactory(protocolFactory)
self._reactor.connectUNIX(
self._path, wf,
timeout=self._timeout,
checkPID=self._checkPID)
return wf._onConnection
except:
return defer.fail()
@implementer(interfaces.IStreamServerEndpoint)
class AdoptedStreamServerEndpoint:
"""
An endpoint for listening on a file descriptor initialized outside of
Twisted.
@ivar _used: A C{bool} indicating whether this endpoint has been used to
listen with a factory yet. C{True} if so.
"""
_close = os.close
_setNonBlocking = staticmethod(fdesc.setNonBlocking)
def __init__(self, reactor, fileno, addressFamily):
"""
@param reactor: An L{IReactorSocket} provider.
@param fileno: An integer file descriptor corresponding to a listening
I{SOCK_STREAM} socket.
@param addressFamily: The address family of the socket given by
C{fileno}.
"""
self.reactor = reactor
self.fileno = fileno
self.addressFamily = addressFamily
self._used = False
def listen(self, factory):
"""
Implement L{IStreamServerEndpoint.listen} to start listening on, and
then close, C{self._fileno}.
"""
if self._used:
return defer.fail(error.AlreadyListened())
self._used = True
try:
self._setNonBlocking(self.fileno)
port = self.reactor.adoptStreamPort(
self.fileno, self.addressFamily, factory)
self._close(self.fileno)
except:
return defer.fail()
return defer.succeed(port)
def _parseTCP(factory, port, interface="", backlog=50):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for a TCP(IPv4) stream endpoint into the structured arguments.
@param factory: the protocol factory being parsed, or L{None}. (This was a
leftover argument from when this code was in C{strports}, and is now
mostly None and unused.)
@type factory: L{IProtocolFactory} or L{None}
@param port: the integer port number to bind
@type port: C{str}
@param interface: the interface IP to listen on
@param backlog: the length of the listen queue
@type backlog: C{str}
@return: a 2-tuple of (args, kwargs), describing the parameters to
L{IReactorTCP.listenTCP} (or, modulo argument 2, the factory, arguments
to L{TCP4ServerEndpoint}.
"""
return (int(port), factory), {'interface': interface,
'backlog': int(backlog)}
def _parseUNIX(factory, address, mode='666', backlog=50, lockfile=True):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for a UNIX (AF_UNIX/SOCK_STREAM) stream endpoint into the
structured arguments.
@param factory: the protocol factory being parsed, or L{None}. (This was a
leftover argument from when this code was in C{strports}, and is now
mostly None and unused.)
@type factory: L{IProtocolFactory} or L{None}
@param address: the pathname of the unix socket
@type address: C{str}
@param backlog: the length of the listen queue
@type backlog: C{str}
@param lockfile: A string '0' or '1', mapping to True and False
respectively. See the C{wantPID} argument to C{listenUNIX}
@return: a 2-tuple of (args, kwargs), describing the parameters to
L{twisted.internet.interfaces.IReactorUNIX.listenUNIX} (or,
modulo argument 2, the factory, arguments to L{UNIXServerEndpoint}.
"""
return (
(address, factory),
{'mode': int(mode, 8), 'backlog': int(backlog),
'wantPID': bool(int(lockfile))})
def _parseSSL(factory, port, privateKey="server.pem", certKey=None,
sslmethod=None, interface='', backlog=50, extraCertChain=None,
dhParameters=None):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for an SSL (over TCP/IPv4) stream endpoint into the structured
arguments.
@param factory: the protocol factory being parsed, or L{None}. (This was a
leftover argument from when this code was in C{strports}, and is now
mostly None and unused.)
@type factory: L{IProtocolFactory} or L{None}
@param port: the integer port number to bind
@type port: C{str}
@param interface: the interface IP to listen on
@param backlog: the length of the listen queue
@type backlog: C{str}
@param privateKey: The file name of a PEM format private key file.
@type privateKey: C{str}
@param certKey: The file name of a PEM format certificate file.
@type certKey: C{str}
@param sslmethod: The string name of an SSL method, based on the name of a
constant in C{OpenSSL.SSL}. Must be one of: "SSLv23_METHOD",
"SSLv2_METHOD", "SSLv3_METHOD", "TLSv1_METHOD".
@type sslmethod: C{str}
@param extraCertChain: The path of a file containing one or more
certificates in PEM format that establish the chain from a root CA to
the CA that signed your C{certKey}.
@type extraCertChain: L{str}
@param dhParameters: The file name of a file containing parameters that are
required for Diffie-Hellman key exchange. If this is not specified,
the forward secret C{DHE} ciphers aren't available for servers.
@type dhParameters: L{str}
@return: a 2-tuple of (args, kwargs), describing the parameters to
L{IReactorSSL.listenSSL} (or, modulo argument 2, the factory, arguments
to L{SSL4ServerEndpoint}.
"""
from twisted.internet import ssl
if certKey is None:
certKey = privateKey
kw = {}
if sslmethod is not None:
kw['method'] = getattr(ssl.SSL, sslmethod)
certPEM = FilePath(certKey).getContent()
keyPEM = FilePath(privateKey).getContent()
privateCertificate = ssl.PrivateCertificate.loadPEM(
certPEM + b'\n' + keyPEM)
if extraCertChain is not None:
matches = re.findall(
r'(-----BEGIN CERTIFICATE-----\n.+?\n-----END CERTIFICATE-----)',
nativeString(FilePath(extraCertChain).getContent()),
flags=re.DOTALL
)
chainCertificates = [ssl.Certificate.loadPEM(chainCertPEM).original
for chainCertPEM in matches]
if not chainCertificates:
raise ValueError(
"Specified chain file '%s' doesn't contain any valid "
"certificates in PEM format." % (extraCertChain,)
)
else:
chainCertificates = None
if dhParameters is not None:
dhParameters = ssl.DiffieHellmanParameters.fromFile(
FilePath(dhParameters),
)
cf = ssl.CertificateOptions(
privateKey=privateCertificate.privateKey.original,
certificate=privateCertificate.original,
extraCertChain=chainCertificates,
dhParameters=dhParameters,
**kw
)
return ((int(port), factory, cf),
{'interface': interface, 'backlog': int(backlog)})
@implementer(IPlugin, IStreamServerEndpointStringParser)
class _StandardIOParser:
"""
Stream server endpoint string parser for the Standard I/O type.
@ivar prefix: See L{IStreamServerEndpointStringParser.prefix}.
"""
prefix = "stdio"
def _parseServer(self, reactor):
"""
Internal parser function for L{_parseServer} to convert the string
arguments into structured arguments for the L{StandardIOEndpoint}
@param reactor: Reactor for the endpoint
"""
return StandardIOEndpoint(reactor)
def parseStreamServer(self, reactor, *args, **kwargs):
# Redirects to another function (self._parseServer), tricks zope.interface
# into believing the interface is correctly implemented.
return self._parseServer(reactor)
@implementer(IPlugin, IStreamServerEndpointStringParser)
class _SystemdParser:
"""
Stream server endpoint string parser for the I{systemd} endpoint type.
@ivar prefix: See L{IStreamServerEndpointStringParser.prefix}.
@ivar _sddaemon: A L{ListenFDs} instance used to translate an index into an
actual file descriptor.
"""
_sddaemon = ListenFDs.fromEnvironment()
prefix = "systemd"
def _parseServer(self, reactor, domain, index):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for a systemd server endpoint into structured arguments for
L{AdoptedStreamServerEndpoint}.
@param reactor: An L{IReactorSocket} provider.
@param domain: The domain (or address family) of the socket inherited
from systemd. This is a string like C{"INET"} or C{"UNIX"}, ie the
name of an address family from the L{socket} module, without the
C{"AF_"} prefix.
@type domain: C{str}
@param index: An offset into the list of file descriptors inherited from
systemd.
@type index: C{str}
@return: A two-tuple of parsed positional arguments and parsed keyword
arguments (a tuple and a dictionary). These can be used to
construct an L{AdoptedStreamServerEndpoint}.
"""
index = int(index)
fileno = self._sddaemon.inheritedDescriptors()[index]
addressFamily = getattr(socket, 'AF_' + domain)
return AdoptedStreamServerEndpoint(reactor, fileno, addressFamily)
def parseStreamServer(self, reactor, *args, **kwargs):
# Delegate to another function with a sane signature. This function has
# an insane signature to trick zope.interface into believing the
# interface is correctly implemented.
return self._parseServer(reactor, *args, **kwargs)
@implementer(IPlugin, IStreamServerEndpointStringParser)
class _TCP6ServerParser:
"""
Stream server endpoint string parser for the TCP6ServerEndpoint type.
@ivar prefix: See L{IStreamServerEndpointStringParser.prefix}.
"""
prefix = "tcp6" # Used in _parseServer to identify the plugin with the endpoint type
def _parseServer(self, reactor, port, backlog=50, interface='::'):
"""
Internal parser function for L{_parseServer} to convert the string
arguments into structured arguments for the L{TCP6ServerEndpoint}
@param reactor: An L{IReactorTCP} provider.
@param port: The port number used for listening
@type port: int
@param backlog: Size of the listen queue
@type backlog: int
@param interface: The hostname to bind to
@type interface: str
"""
port = int(port)
backlog = int(backlog)
return TCP6ServerEndpoint(reactor, port, backlog, interface)
def parseStreamServer(self, reactor, *args, **kwargs):
# Redirects to another function (self._parseServer), tricks zope.interface
# into believing the interface is correctly implemented.
return self._parseServer(reactor, *args, **kwargs)
_serverParsers = {"tcp": _parseTCP,
"unix": _parseUNIX,
"ssl": _parseSSL,
}
_OP, _STRING = range(2)
def _tokenize(description):
"""
Tokenize a strports string and yield each token.
@param description: a string as described by L{serverFromString} or
L{clientFromString}.
@type description: L{str} or L{bytes}
@return: an iterable of 2-tuples of (C{_OP} or C{_STRING}, string). Tuples
starting with C{_OP} will contain a second element of either ':' (i.e.
'next parameter') or '=' (i.e. 'assign parameter value'). For example,
the string 'hello:greeting=world' would result in a generator yielding
these values::
_STRING, 'hello'
_OP, ':'
_STRING, 'greet=ing'
_OP, '='
_STRING, 'world'
"""
empty = _matchingString(u'', description)
colon = _matchingString(u':', description)
equals = _matchingString(u'=', description)
backslash = _matchingString(u'\x5c', description)
current = empty
ops = colon + equals
nextOps = {colon: colon + equals, equals: colon}
iterdesc = iter(iterbytes(description))
for n in iterdesc:
if n in iterbytes(ops):
yield _STRING, current
yield _OP, n
current = empty
ops = nextOps[n]
elif n == backslash:
current += next(iterdesc)
else:
current += n
yield _STRING, current
def _parse(description):
"""
Convert a description string into a list of positional and keyword
parameters, using logic vaguely like what Python does.
@param description: a string as described by L{serverFromString} or
L{clientFromString}.
@return: a 2-tuple of C{(args, kwargs)}, where 'args' is a list of all
':'-separated C{str}s not containing an '=' and 'kwargs' is a map of
all C{str}s which do contain an '='. For example, the result of
C{_parse('a:b:d=1:c')} would be C{(['a', 'b', 'c'], {'d': '1'})}.
"""
args, kw = [], {}
colon = _matchingString(u':', description)
def add(sofar):
if len(sofar) == 1:
args.append(sofar[0])
else:
kw[nativeString(sofar[0])] = sofar[1]
sofar = ()
for (type, value) in _tokenize(description):
if type is _STRING:
sofar += (value,)
elif value == colon:
add(sofar)
sofar = ()
add(sofar)
return args, kw
# Mappings from description "names" to endpoint constructors.
_endpointServerFactories = {
'TCP': TCP4ServerEndpoint,
'SSL': SSL4ServerEndpoint,
'UNIX': UNIXServerEndpoint,
}
_endpointClientFactories = {
'TCP': TCP4ClientEndpoint,
'SSL': SSL4ClientEndpoint,
'UNIX': UNIXClientEndpoint,
}
def _parseServer(description, factory):
"""
Parse a strports description into a 2-tuple of arguments and keyword
values.
@param description: A description in the format explained by
L{serverFromString}.
@type description: C{str}
@param factory: A 'factory' argument; this is left-over from
twisted.application.strports, it's not really used.
@type factory: L{IProtocolFactory} or L{None}
@return: a 3-tuple of (plugin or name, arguments, keyword arguments)
"""
args, kw = _parse(description)
endpointType = args[0]
parser = _serverParsers.get(endpointType)
if parser is None:
# If the required parser is not found in _server, check if
# a plugin exists for the endpointType
plugin = _matchPluginToPrefix(
getPlugins(IStreamServerEndpointStringParser), endpointType
)
return (plugin, args[1:], kw)
return (endpointType.upper(),) + parser(factory, *args[1:], **kw)
def _matchPluginToPrefix(plugins, endpointType):
"""
Match plugin to prefix.
"""
endpointType = endpointType.lower()
for plugin in plugins:
if (_matchingString(plugin.prefix.lower(),
endpointType) == endpointType):
return plugin
raise ValueError("Unknown endpoint type: '%s'" % (endpointType,))
def serverFromString(reactor, description):
"""
Construct a stream server endpoint from an endpoint description string.
The format for server endpoint descriptions is a simple byte string. It is
a prefix naming the type of endpoint, then a colon, then the arguments for
that endpoint.
For example, you can call it like this to create an endpoint that will
listen on TCP port 80::
serverFromString(reactor, "tcp:80")
Additional arguments may be specified as keywords, separated with colons.
For example, you can specify the interface for a TCP server endpoint to
bind to like this::
serverFromString(reactor, "tcp:80:interface=127.0.0.1")
SSL server endpoints may be specified with the 'ssl' prefix, and the
private key and certificate files may be specified by the C{privateKey} and
C{certKey} arguments::
serverFromString(
reactor, "ssl:443:privateKey=key.pem:certKey=crt.pem")
If a private key file name (C{privateKey}) isn't provided, a "server.pem"
file is assumed to exist which contains the private key. If the certificate
file name (C{certKey}) isn't provided, the private key file is assumed to
contain the certificate as well.
You may escape colons in arguments with a backslash, which you will need to
use if you want to specify a full pathname argument on Windows::
serverFromString(reactor,
"ssl:443:privateKey=C\\:/key.pem:certKey=C\\:/cert.pem")
finally, the 'unix' prefix may be used to specify a filesystem UNIX socket,
optionally with a 'mode' argument to specify the mode of the socket file
created by C{listen}::
serverFromString(reactor, "unix:/var/run/finger")
serverFromString(reactor, "unix:/var/run/finger:mode=660")
This function is also extensible; new endpoint types may be registered as
L{IStreamServerEndpointStringParser} plugins. See that interface for more
information.
@param reactor: The server endpoint will be constructed with this reactor.
@param description: The strports description to parse.
@type description: L{str}
@return: A new endpoint which can be used to listen with the parameters
given by C{description}.
@rtype: L{IStreamServerEndpoint<twisted.internet.interfaces.IStreamServerEndpoint>}
@raise ValueError: when the 'description' string cannot be parsed.
@since: 10.2
"""
nameOrPlugin, args, kw = _parseServer(description, None)
if type(nameOrPlugin) is not str:
plugin = nameOrPlugin
return plugin.parseStreamServer(reactor, *args, **kw)
else:
name = nameOrPlugin
# Chop out the factory.
args = args[:1] + args[2:]
return _endpointServerFactories[name](reactor, *args, **kw)
def quoteStringArgument(argument):
"""
Quote an argument to L{serverFromString} and L{clientFromString}. Since
arguments are separated with colons and colons are escaped with
backslashes, some care is necessary if, for example, you have a pathname,
you may be tempted to interpolate into a string like this::
serverFromString(reactor, "ssl:443:privateKey=%s" % (myPathName,))
This may appear to work, but will have portability issues (Windows
pathnames, for example). Usually you should just construct the appropriate
endpoint type rather than interpolating strings, which in this case would
be L{SSL4ServerEndpoint}. There are some use-cases where you may need to
generate such a string, though; for example, a tool to manipulate a
configuration file which has strports descriptions in it. To be correct in
those cases, do this instead::
serverFromString(reactor, "ssl:443:privateKey=%s" %
(quoteStringArgument(myPathName),))
@param argument: The part of the endpoint description string you want to
pass through.
@type argument: C{str}
@return: The quoted argument.
@rtype: C{str}
"""
backslash, colon = '\\:'
for c in backslash, colon:
argument = argument.replace(c, backslash + c)
return argument
def _parseClientTCP(*args, **kwargs):
"""
Perform any argument value coercion necessary for TCP client parameters.
Valid positional arguments to this function are host and port.
Valid keyword arguments to this function are all L{IReactorTCP.connectTCP}
arguments.
@return: The coerced values as a C{dict}.
"""
if len(args) == 2:
kwargs['port'] = int(args[1])
kwargs['host'] = args[0]
elif len(args) == 1:
if 'host' in kwargs:
kwargs['port'] = int(args[0])
else:
kwargs['host'] = args[0]
try:
kwargs['port'] = int(kwargs['port'])
except KeyError:
pass
try:
kwargs['timeout'] = int(kwargs['timeout'])
except KeyError:
pass
try:
kwargs['bindAddress'] = (kwargs['bindAddress'], 0)
except KeyError:
pass
return kwargs
def _loadCAsFromDir(directoryPath):
"""
Load certificate-authority certificate objects in a given directory.
@param directoryPath: a L{unicode} or L{bytes} pointing at a directory to
load .pem files from, or L{None}.
@return: an L{IOpenSSLTrustRoot} provider.
"""
caCerts = {}
for child in directoryPath.children():
if not child.asTextMode().basename().split(u'.')[-1].lower() == u'pem':
continue
try:
data = child.getContent()
except IOError:
# Permission denied, corrupt disk, we don't care.
continue
try:
theCert = Certificate.loadPEM(data)
except SSLError:
# Duplicate certificate, invalid certificate, etc. We don't care.
pass
else:
caCerts[theCert.digest()] = theCert
return trustRootFromCertificates(caCerts.values())
def _parseTrustRootPath(pathName):
"""
Parse a string referring to a directory full of certificate authorities
into a trust root.
@param pathName: path name
@type pathName: L{unicode} or L{bytes} or L{None}
@return: L{None} or L{IOpenSSLTrustRoot}
"""
if pathName is None:
return None
return _loadCAsFromDir(FilePath(pathName))
def _privateCertFromPaths(certificatePath, keyPath):
"""
Parse a certificate path and key path, either or both of which might be
L{None}, into a certificate object.
@param certificatePath: the certificate path
@type certificatePath: L{bytes} or L{unicode} or L{None}
@param keyPath: the private key path
@type keyPath: L{bytes} or L{unicode} or L{None}
@return: a L{PrivateCertificate} or L{None}
"""
if certificatePath is None:
return None
certBytes = FilePath(certificatePath).getContent()
if keyPath is None:
return PrivateCertificate.loadPEM(certBytes)
else:
return PrivateCertificate.fromCertificateAndKeyPair(
Certificate.loadPEM(certBytes),
KeyPair.load(FilePath(keyPath).getContent(), 1)
)
def _parseClientSSLOptions(kwargs):
"""
Parse common arguments for SSL endpoints, creating an L{CertificateOptions}
instance.
@param kwargs: A dict of keyword arguments to be parsed, potentially
containing keys C{certKey}, C{privateKey}, C{caCertsDir}, and
C{hostname}. See L{_parseClientSSL}.
@type kwargs: L{dict}
@return: The remaining arguments, including a new key C{sslContextFactory}.
"""
hostname = kwargs.pop('hostname', None)
clientCertificate = _privateCertFromPaths(kwargs.pop('certKey', None),
kwargs.pop('privateKey', None))
trustRoot = _parseTrustRootPath(kwargs.pop('caCertsDir', None))
if hostname is not None:
configuration = optionsForClientTLS(
_idnaText(hostname), trustRoot=trustRoot,
clientCertificate=clientCertificate
)
else:
# _really_ though, you should specify a hostname.
if clientCertificate is not None:
privateKeyOpenSSL = clientCertificate.privateKey.original
certificateOpenSSL = clientCertificate.original
else:
privateKeyOpenSSL = None
certificateOpenSSL = None
configuration = CertificateOptions(
trustRoot=trustRoot,
privateKey=privateKeyOpenSSL,
certificate=certificateOpenSSL,
)
kwargs['sslContextFactory'] = configuration
return kwargs
def _parseClientSSL(*args, **kwargs):
"""
Perform any argument value coercion necessary for SSL client parameters.
Valid keyword arguments to this function are all L{IReactorSSL.connectSSL}
arguments except for C{contextFactory}. Instead, C{certKey} (the path name
of the certificate file) C{privateKey} (the path name of the private key
associated with the certificate) are accepted and used to construct a
context factory.
Valid positional arguments to this function are host and port.
@param caCertsDir: The one parameter which is not part of
L{IReactorSSL.connectSSL}'s signature, this is a path name used to
construct a list of certificate authority certificates. The directory
will be scanned for files ending in C{.pem}, all of which will be
considered valid certificate authorities for this connection.
@type caCertsDir: L{str}
@param hostname: The hostname to use for validating the server's
certificate.
@type hostname: L{unicode}
@return: The coerced values as a L{dict}.
"""
kwargs = _parseClientTCP(*args, **kwargs)
return _parseClientSSLOptions(kwargs)
def _parseClientUNIX(*args, **kwargs):
"""
Perform any argument value coercion necessary for UNIX client parameters.
Valid keyword arguments to this function are all L{IReactorUNIX.connectUNIX}
keyword arguments except for C{checkPID}. Instead, C{lockfile} is accepted
and has the same meaning. Also C{path} is used instead of C{address}.
Valid positional arguments to this function are C{path}.
@return: The coerced values as a C{dict}.
"""
if len(args) == 1:
kwargs['path'] = args[0]
try:
kwargs['checkPID'] = bool(int(kwargs.pop('lockfile')))
except KeyError:
pass
try:
kwargs['timeout'] = int(kwargs['timeout'])
except KeyError:
pass
return kwargs
_clientParsers = {
'TCP': _parseClientTCP,
'SSL': _parseClientSSL,
'UNIX': _parseClientUNIX,
}
def clientFromString(reactor, description):
"""
Construct a client endpoint from a description string.
Client description strings are much like server description strings,
although they take all of their arguments as keywords, aside from host and
port.
You can create a TCP client endpoint with the 'host' and 'port' arguments,
like so::
clientFromString(reactor, "tcp:host=www.example.com:port=80")
or, without specifying host and port keywords::
clientFromString(reactor, "tcp:www.example.com:80")
Or you can specify only one or the other, as in the following 2 examples::
clientFromString(reactor, "tcp:host=www.example.com:80")
clientFromString(reactor, "tcp:www.example.com:port=80")
or an SSL client endpoint with those arguments, plus the arguments used by
the server SSL, for a client certificate::
clientFromString(reactor, "ssl:web.example.com:443:"
"privateKey=foo.pem:certKey=foo.pem")
to specify your certificate trust roots, you can identify a directory with
PEM files in it with the C{caCertsDir} argument::
clientFromString(reactor, "ssl:host=web.example.com:port=443:"
"caCertsDir=/etc/ssl/certs")
Both TCP and SSL client endpoint description strings can include a
'bindAddress' keyword argument, whose value should be a local IPv4
address. This fixes the client socket to that IP address::
clientFromString(reactor, "tcp:www.example.com:80:"
"bindAddress=192.0.2.100")
NB: Fixed client ports are not currently supported in TCP or SSL
client endpoints. The client socket will always use an ephemeral
port assigned by the operating system
You can create a UNIX client endpoint with the 'path' argument and optional
'lockfile' and 'timeout' arguments::
clientFromString(
reactor, b"unix:path=/var/foo/bar:lockfile=1:timeout=9")
or, with the path as a positional argument with or without optional
arguments as in the following 2 examples::
clientFromString(reactor, "unix:/var/foo/bar")
clientFromString(reactor, "unix:/var/foo/bar:lockfile=1:timeout=9")
This function is also extensible; new endpoint types may be registered as
L{IStreamClientEndpointStringParserWithReactor} plugins. See that
interface for more information.
@param reactor: The client endpoint will be constructed with this reactor.
@param description: The strports description to parse.
@type description: L{str}
@return: A new endpoint which can be used to connect with the parameters
given by C{description}.
@rtype: L{IStreamClientEndpoint<twisted.internet.interfaces.IStreamClientEndpoint>}
@since: 10.2
"""
args, kwargs = _parse(description)
aname = args.pop(0)
name = aname.upper()
if name not in _clientParsers:
plugin = _matchPluginToPrefix(
getPlugins(IStreamClientEndpointStringParserWithReactor), name
)
return plugin.parseStreamClient(reactor, *args, **kwargs)
kwargs = _clientParsers[name](*args, **kwargs)
return _endpointClientFactories[name](reactor, **kwargs)
def connectProtocol(endpoint, protocol):
"""
Connect a protocol instance to an endpoint.
This allows using a client endpoint without having to create a factory.
@param endpoint: A client endpoint to connect to.
@param protocol: A protocol instance.
@return: The result of calling C{connect} on the endpoint, i.e. a
L{Deferred} that will fire with the protocol when connected, or an
appropriate error.
@since: 13.1
"""
class OneShotFactory(Factory):
def buildProtocol(self, addr):
return protocol
return endpoint.connect(OneShotFactory())
@implementer(interfaces.IStreamClientEndpoint)
class _WrapperEndpoint:
"""
An endpoint that wraps another endpoint.
"""
def __init__(self, wrappedEndpoint, wrapperFactory):
"""
Construct a L{_WrapperEndpoint}.
"""
self._wrappedEndpoint = wrappedEndpoint
self._wrapperFactory = wrapperFactory
def connect(self, protocolFactory):
"""
Connect the given protocol factory and unwrap its result.
"""
return self._wrappedEndpoint.connect(
self._wrapperFactory(protocolFactory)
).addCallback(lambda protocol: protocol.wrappedProtocol)
@implementer(interfaces.IStreamServerEndpoint)
class _WrapperServerEndpoint:
"""
A server endpoint that wraps another server endpoint.
"""
def __init__(self, wrappedEndpoint, wrapperFactory):
"""
Construct a L{_WrapperServerEndpoint}.
"""
self._wrappedEndpoint = wrappedEndpoint
self._wrapperFactory = wrapperFactory
def listen(self, protocolFactory):
"""
Connect the given protocol factory and unwrap its result.
"""
return self._wrappedEndpoint.listen(
self._wrapperFactory(protocolFactory)
)
def wrapClientTLS(connectionCreator, wrappedEndpoint):
"""
Wrap an endpoint which upgrades to TLS as soon as the connection is
established.
@since: 16.0
@param connectionCreator: The TLS options to use when connecting; see
L{twisted.internet.ssl.optionsForClientTLS} for how to construct this.
@type connectionCreator:
L{twisted.internet.interfaces.IOpenSSLClientConnectionCreator}
@param wrappedEndpoint: The endpoint to wrap.
@type wrappedEndpoint: An L{IStreamClientEndpoint} provider.
@return: an endpoint that provides transport level encryption layered on
top of C{wrappedEndpoint}
@rtype: L{twisted.internet.interfaces.IStreamClientEndpoint}
"""
if TLSMemoryBIOFactory is None:
raise NotImplementedError(
"OpenSSL not available. Try `pip install twisted[tls]`."
)
return _WrapperEndpoint(
wrappedEndpoint,
lambda protocolFactory:
TLSMemoryBIOFactory(connectionCreator, True, protocolFactory)
)
def _parseClientTLS(reactor, host, port, timeout=b'30', bindAddress=None,
certificate=None, privateKey=None, trustRoots=None,
endpoint=None, **kwargs):
"""
Internal method to construct an endpoint from string parameters.
@param reactor: The reactor passed to L{clientFromString}.
@param host: The hostname to connect to.
@type host: L{bytes} or L{unicode}
@param port: The port to connect to.
@type port: L{bytes} or L{unicode}
@param timeout: For each individual connection attempt, the number of
seconds to wait before assuming the connection has failed.
@type timeout: L{bytes} or L{unicode}
@param bindAddress: The address to which to bind outgoing connections.
@type bindAddress: L{bytes} or L{unicode}
@param certificate: a string representing a filesystem path to a
PEM-encoded certificate.
@type certificate: L{bytes} or L{unicode}
@param privateKey: a string representing a filesystem path to a PEM-encoded
certificate.
@type privateKey: L{bytes} or L{unicode}
@param endpoint: an optional string endpoint description of an endpoint to
wrap; if this is passed then C{host} is used only for certificate
verification.
@type endpoint: L{bytes} or L{unicode}
@return: a client TLS endpoint
@rtype: L{IStreamClientEndpoint}
"""
if kwargs:
raise TypeError('unrecognized keyword arguments present',
list(kwargs.keys()))
host = host if isinstance(host, str) else host.decode("utf-8")
bindAddress = (bindAddress
if isinstance(bindAddress, str) or bindAddress is None
else bindAddress.decode("utf-8"))
port = int(port)
timeout = int(timeout)
return wrapClientTLS(
optionsForClientTLS(
host, trustRoot=_parseTrustRootPath(trustRoots),
clientCertificate=_privateCertFromPaths(certificate,
privateKey)),
clientFromString(reactor, endpoint) if endpoint is not None
else HostnameEndpoint(reactor, _idnaBytes(host), port, timeout,
bindAddress)
)
@implementer(IPlugin, IStreamClientEndpointStringParserWithReactor)
class _TLSClientEndpointParser:
"""
Stream client endpoint string parser for L{wrapClientTLS} with
L{HostnameEndpoint}.
@ivar prefix: See
L{IStreamClientEndpointStringParserWithReactor.prefix}.
"""
prefix = 'tls'
@staticmethod
def parseStreamClient(reactor, *args, **kwargs):
"""
Redirects to another function L{_parseClientTLS}; tricks zope.interface
into believing the interface is correctly implemented, since the
signature is (C{reactor}, C{*args}, C{**kwargs}). See
L{_parseClientTLS} for the specific signature description for this
endpoint parser.
@param reactor: The reactor passed to L{clientFromString}.
@param args: The positional arguments in the endpoint description.
@type args: L{tuple}
@param kwargs: The named arguments in the endpoint description.
@type kwargs: L{dict}
@return: a client TLS endpoint
@rtype: L{IStreamClientEndpoint}
"""
return _parseClientTLS(reactor, *args, **kwargs) | en | 0.685007 | # -*- test-case-name: twisted.internet.test.test_endpoints -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. Implementations of L{IStreamServerEndpoint} and L{IStreamClientEndpoint} that wrap the L{IReactorTCP}, L{IReactorSSL}, and L{IReactorUNIX} interfaces. This also implements an extensible mini-language for describing endpoints, parsed by the L{clientFromString} and L{serverFromString} functions. @since: 10.1 # fallback if pywin32 is not installed # type: ignore[assignment,misc] # type: ignore[assignment,misc] Wrap another protocol in order to notify my user when a connection has been made. @param connectedDeferred: The L{Deferred} that will callback with the C{wrappedProtocol} when it is connected. @param wrappedProtocol: An L{IProtocol} provider that will be connected. Transparently pass through the wrapped protocol's log prefix. Connect the C{self._wrappedProtocol} to our C{self.transport} and callback C{self._connectedDeferred} with the C{self._wrappedProtocol} Proxy C{dataReceived} calls to our C{self._wrappedProtocol} Proxy C{fileDescriptorReceived} calls to our C{self._wrappedProtocol} Proxy C{connectionLost} calls to our C{self._wrappedProtocol} Proxy L{IHalfCloseableProtocol.readConnectionLost} to our C{self._wrappedProtocol} Proxy L{IHalfCloseableProtocol.writeConnectionLost} to our C{self._wrappedProtocol} Proxy L{interfaces.IHandshakeListener} to our C{self._wrappedProtocol}. Wrap a factory in order to wrap the protocols it builds. @ivar _wrappedFactory: A provider of I{IProtocolFactory} whose buildProtocol method will be called and whose resulting protocol will be wrapped. @ivar _onConnection: A L{Deferred} that fires when the protocol is connected @ivar _connector: A L{connector <twisted.internet.interfaces.IConnector>} that is managing the current or previous connection attempt. @param wrappedFactory: A provider of I{IProtocolFactory} whose buildProtocol method will be called and whose resulting protocol will be wrapped. A connection attempt was started. Remember the connector which started said attempt, for use later. The outgoing connection attempt was cancelled. Fail that L{Deferred} with an L{error.ConnectingCancelledError}. @param deferred: The L{Deferred <defer.Deferred>} that was cancelled; should be the same as C{self._onConnection}. @type deferred: L{Deferred <defer.Deferred>} @note: This relies on startedConnecting having been called, so it may seem as though there's a race condition where C{_connector} may not have been set. However, using public APIs, this condition is impossible to catch, because a connection API (C{connectTCP}/C{SSL}/C{UNIX}) is always invoked before a L{_WrappingFactory}'s L{Deferred <defer.Deferred>} is returned to C{connect()}'s caller. @return: L{None} Start notifications are passed straight through to the wrapped factory. Stop notifications are passed straight through to the wrapped factory. Proxy C{buildProtocol} to our C{self._wrappedFactory} or errback the C{self._onConnection} L{Deferred} if the wrapped factory raises an exception or returns L{None}. @return: An instance of L{_WrappingProtocol} or L{None} Errback the C{self._onConnection} L{Deferred} when the client connection fails. A Standard Input/Output endpoint @ivar _stdio: a callable, like L{stdio.StandardIO}, which takes an L{IProtocol} provider and a C{reactor} keyword argument (interface dependent upon your platform). @param reactor: The reactor for the endpoint. Implement L{IStreamServerEndpoint.listen} to listen on stdin/stdout An L{_IProcessTransportWithConsumerAndProducer} combines various interfaces to work around the issue that L{interfaces.IProcessTransport} is incompletely defined and doesn't specify flow-control interfaces, and that L{proxyForInterface} doesn't allow for multiple interfaces. # type: ignore[misc] # noqa An L{ITransport}, L{IProcessTransport}, L{IConsumer}, and L{IPushProducer} provider for the L{IProtocol} instance passed to the process endpoint. @ivar _process: An active process transport which will be used by write methods on this object to write data to a child process. @type _process: L{interfaces.IProcessTransport} provider An L{IProcessProtocol} provider that wraps an L{IProtocol}. @ivar transport: A L{_ProcessEndpointTransport} provider that is hooked to the wrapped L{IProtocol} provider. @see: L{protocol.ProcessProtocol} @param proto: An L{IProtocol} provider. @param errFlag: A constant belonging to L{StandardErrorBehavior} that determines if stderr is logged or dropped. @param executable: The file name (full path) to spawn. Call L{IProtocol} provider's makeConnection method with an L{ITransport} provider. @param process: An L{IProcessTransport} provider. This is called with data from the process's stdout or stderr pipes. It checks the status of the errFlag to setermine if stderr should be logged (default) or dropped. If the process ends with L{error.ProcessDone}, this method calls the L{IProtocol} provider's L{connectionLost} with a L{error.ConnectionDone} @see: L{ProcessProtocol.processEnded} Constants used in ProcessEndpoint to decide what to do with stderr. @cvar LOG: Indicates that stderr is to be logged. @cvar DROP: Indicates that stderr is to be dropped (and not logged). @since: 13.1 An endpoint for child processes @ivar _spawnProcess: A hook used for testing the spawning of child process. @since: 13.1 See L{IReactorProcess.spawnProcess}. @param errFlag: Determines if stderr should be logged. @type errFlag: L{endpoints.StandardErrorBehavior} Implement L{IStreamClientEndpoint.connect} to launch a child process and connect it to a protocol created by C{protocolFactory}. @param protocolFactory: A factory for an L{IProtocol} provider which will be notified of all events related to the created process. A TCP server endpoint interface @param reactor: An L{IReactorTCP} provider. @param port: The port number used for listening @type port: int @param backlog: Size of the listen queue @type backlog: int @param interface: The hostname to bind to @type interface: str Implement L{IStreamServerEndpoint.listen} to listen on a TCP socket Implements TCP server endpoint with an IPv4 configuration @param reactor: An L{IReactorTCP} provider. @param port: The port number used for listening @type port: int @param backlog: Size of the listen queue @type backlog: int @param interface: The hostname to bind to, defaults to '' (all) @type interface: str Implements TCP server endpoint with an IPv6 configuration @param reactor: An L{IReactorTCP} provider. @param port: The port number used for listening @type port: int @param backlog: Size of the listen queue @type backlog: int @param interface: The hostname to bind to, defaults to C{::} (all) @type interface: str TCP client endpoint with an IPv4 configuration. @param reactor: An L{IReactorTCP} provider @param host: A hostname, used when connecting @type host: str @param port: The port number, used when connecting @type port: int @param timeout: The number of seconds to wait before assuming the connection has failed. @type timeout: L{float} or L{int} @param bindAddress: A (host, port) tuple of local address to bind to, or None. @type bindAddress: tuple Implement L{IStreamClientEndpoint.connect} to connect via TCP. TCP client endpoint with an IPv6 configuration. @ivar _getaddrinfo: A hook used for testing name resolution. @ivar _deferToThread: A hook used for testing deferToThread. @ivar _GAI_ADDRESS: Index of the address portion in result of getaddrinfo to be used. @ivar _GAI_ADDRESS_HOST: Index of the actual host-address in the 5-tuple L{_GAI_ADDRESS}. @param host: An IPv6 address literal or a hostname with an IPv6 address @see: L{twisted.internet.interfaces.IReactorTCP.connectTCP} Implement L{IStreamClientEndpoint.connect} to connect via TCP, once the hostname resolution is done. Resolve the hostname string into a tuple containing the host IPv6 address. Connect to the server using the resolved hostname. An L{IHostnameResolver} provider that invokes a provided callable to resolve hostnames. @ivar _nameResolution: the callable L{resolveHostName} invokes to resolve hostnames. @type _nameResolution: A L{callable} that accepts two arguments: the host to resolve and the port number to include in the result. Create a L{_SimpleHostnameResolver} instance. Initiate a hostname resolution. @param resolutionReceiver: an object that will receive each resolved address as it arrives. @type resolutionReceiver: L{IResolutionReceiver} @param hostName: see interface @param portNumber: see interface @param addressTypes: Ignored in this implementation. @param transportSemantics: Ignored in this implementation. @return: The resolution in progress. @rtype: L{IResolutionReceiver} A name-based endpoint that connects to the fastest amongst the resolved host addresses. @cvar _DEFAULT_ATTEMPT_DELAY: The default time to use between attempts, in seconds, when no C{attemptDelay} is given to L{HostnameEndpoint.__init__}. @ivar _hostText: the textual representation of the hostname passed to the constructor. Used to pass to the reactor's hostname resolver. @type _hostText: L{unicode} @ivar _hostBytes: the encoded bytes-representation of the hostname passed to the constructor. Used to construct the L{HostnameAddress} associated with this endpoint. @type _hostBytes: L{bytes} @ivar _hostStr: the native-string representation of the hostname passed to the constructor, used for exception construction @type _hostStr: native L{str} @ivar _badHostname: a flag - hopefully false! - indicating that an invalid hostname was passed to the constructor. This might be a textual hostname that isn't valid IDNA, or non-ASCII bytes. @type _badHostname: L{bool} Create a L{HostnameEndpoint}. @param reactor: The reactor to use for connections and delayed calls. @type reactor: provider of L{IReactorTCP}, L{IReactorTime} and either L{IReactorPluggableNameResolver} or L{IReactorPluggableResolver}. @param host: A hostname to connect to. @type host: L{bytes} or L{unicode} @param port: The port number to connect to. @type port: L{int} @param timeout: For each individual connection attempt, the number of seconds to wait before assuming the connection has failed. @type timeout: L{float} or L{int} @param bindAddress: the local address of the network interface to make the connections from. @type bindAddress: L{bytes} @param attemptDelay: The number of seconds to delay between connection attempts. @type attemptDelay: L{float} @see: L{twisted.internet.interfaces.IReactorTCP.connectTCP} Produce a string representation of the L{HostnameEndpoint}. @return: A L{str} # Use the backslash-encoded version of the string passed to the # constructor, which is already a native string. # Convert the bytes representation to a native string to ensure # that we display the punycoded version of the hostname, which is # more useful than any IDN version as it can be easily copy-pasted # into debugging tools. Retrieve a C{nameResolver} callable and warn the caller's caller that using a reactor which doesn't provide L{IReactorPluggableNameResolver} is deprecated. @param reactor: The reactor to check. @return: A L{IHostnameResolver} provider. For various reasons (documented in the C{@ivar}'s in the class docstring) we need both a textual and a binary representation of the hostname given to the constructor. For compatibility and convenience, we accept both textual and binary representations of the hostname, save the form that was passed, and convert into the other form. This is mostly just because L{HostnameAddress} chose somewhat poorly to define its attribute as bytes; hopefully we can find a compatible way to clean this up in the future and just operate in terms of text internally. @param host: A hostname to convert. @type host: L{bytes} or C{str} @return: a 3-tuple of C{(invalid, bytes, text)} where C{invalid} is a boolean indicating the validity of the hostname, C{bytes} is a binary representation of C{host}, and C{text} is a textual representation of C{host}. # Convert the host to _some_ kind of text, to handle below. # `host` has been converted to text by this point either way; it's # invalid as a hostname, and so may contain unprintable characters and # such. escape it with backslashes so the user can get _some_ guess as # to what went wrong. Attempts a connection to each resolved address, and returns a connection which is established first. @param protocolFactory: The protocol factory whose protocol will be connected. @type protocolFactory: L{IProtocolFactory<twisted.internet.interfaces.IProtocolFactory>} @return: A L{Deferred} that fires with the connected protocol or fails a connection-related error. # Yield an endpoint for every address resolved from the name. # This canceller must remain defined outside of # `startConnectionAttempts`, because Deferred should not # participate in cycles with their cancellers; that would create a # potentially problematic circular reference and possibly # gc.garbage. Given a sequence of endpoints obtained via name resolution, start connecting to a new one every C{self._attemptDelay} seconds until one of the connections succeeds, all of them fail, or the attempt is cancelled. @param endpoints: a list of all the endpoints we might try to connect to, as determined by name resolution. @type endpoints: L{list} of L{IStreamServerEndpoint} @return: a Deferred that fires with the result of the C{endpoint.connect} method that completes the fastest, or fails with the first connection error it encountered if none of them succeed. @rtype: L{Deferred} failing with L{error.ConnectingCancelledError} or firing with L{IProtocol} # The list of endpoints ends. Resolve the hostname string into a tuple containing the host address. This is method is only used when the reactor does not provide L{IReactorPluggableNameResolver}. @param host: A unicode hostname to resolve. @param port: The port to include in the resolution. @return: A L{Deferred} that fires with L{_getaddrinfo}'s return value. SSL secured TCP server endpoint with an IPv4 configuration. @param reactor: An L{IReactorSSL} provider. @param port: The port number used for listening @type port: int @param sslContextFactory: An instance of L{interfaces.IOpenSSLContextFactory}. @param backlog: Size of the listen queue @type backlog: int @param interface: The hostname to bind to, defaults to '' (all) @type interface: str Implement L{IStreamServerEndpoint.listen} to listen for SSL on a TCP socket. SSL secured TCP client endpoint with an IPv4 configuration @param reactor: An L{IReactorSSL} provider. @param host: A hostname, used when connecting @type host: str @param port: The port number, used when connecting @type port: int @param sslContextFactory: SSL Configuration information as an instance of L{interfaces.IOpenSSLContextFactory}. @param timeout: Number of seconds to wait before assuming the connection has failed. @type timeout: int @param bindAddress: A (host, port) tuple of local address to bind to, or None. @type bindAddress: tuple Implement L{IStreamClientEndpoint.connect} to connect with SSL over TCP. UnixSocket server endpoint. @param reactor: An L{IReactorUNIX} provider. @param address: The path to the Unix socket file, used when listening @param backlog: number of connections to allow in backlog. @param mode: mode to set on the unix socket. This parameter is deprecated. Permissions should be set on the directory which contains the UNIX socket. @param wantPID: If True, create a pidfile for the socket. Implement L{IStreamServerEndpoint.listen} to listen on a UNIX socket. UnixSocket client endpoint. @param reactor: An L{IReactorUNIX} provider. @param path: The path to the Unix socket file, used when connecting @type path: str @param timeout: Number of seconds to wait before assuming the connection has failed. @type timeout: int @param checkPID: If True, check for a pid file to verify that a server is listening. @type checkPID: bool Implement L{IStreamClientEndpoint.connect} to connect via a UNIX Socket An endpoint for listening on a file descriptor initialized outside of Twisted. @ivar _used: A C{bool} indicating whether this endpoint has been used to listen with a factory yet. C{True} if so. @param reactor: An L{IReactorSocket} provider. @param fileno: An integer file descriptor corresponding to a listening I{SOCK_STREAM} socket. @param addressFamily: The address family of the socket given by C{fileno}. Implement L{IStreamServerEndpoint.listen} to start listening on, and then close, C{self._fileno}. Internal parser function for L{_parseServer} to convert the string arguments for a TCP(IPv4) stream endpoint into the structured arguments. @param factory: the protocol factory being parsed, or L{None}. (This was a leftover argument from when this code was in C{strports}, and is now mostly None and unused.) @type factory: L{IProtocolFactory} or L{None} @param port: the integer port number to bind @type port: C{str} @param interface: the interface IP to listen on @param backlog: the length of the listen queue @type backlog: C{str} @return: a 2-tuple of (args, kwargs), describing the parameters to L{IReactorTCP.listenTCP} (or, modulo argument 2, the factory, arguments to L{TCP4ServerEndpoint}. Internal parser function for L{_parseServer} to convert the string arguments for a UNIX (AF_UNIX/SOCK_STREAM) stream endpoint into the structured arguments. @param factory: the protocol factory being parsed, or L{None}. (This was a leftover argument from when this code was in C{strports}, and is now mostly None and unused.) @type factory: L{IProtocolFactory} or L{None} @param address: the pathname of the unix socket @type address: C{str} @param backlog: the length of the listen queue @type backlog: C{str} @param lockfile: A string '0' or '1', mapping to True and False respectively. See the C{wantPID} argument to C{listenUNIX} @return: a 2-tuple of (args, kwargs), describing the parameters to L{twisted.internet.interfaces.IReactorUNIX.listenUNIX} (or, modulo argument 2, the factory, arguments to L{UNIXServerEndpoint}. Internal parser function for L{_parseServer} to convert the string arguments for an SSL (over TCP/IPv4) stream endpoint into the structured arguments. @param factory: the protocol factory being parsed, or L{None}. (This was a leftover argument from when this code was in C{strports}, and is now mostly None and unused.) @type factory: L{IProtocolFactory} or L{None} @param port: the integer port number to bind @type port: C{str} @param interface: the interface IP to listen on @param backlog: the length of the listen queue @type backlog: C{str} @param privateKey: The file name of a PEM format private key file. @type privateKey: C{str} @param certKey: The file name of a PEM format certificate file. @type certKey: C{str} @param sslmethod: The string name of an SSL method, based on the name of a constant in C{OpenSSL.SSL}. Must be one of: "SSLv23_METHOD", "SSLv2_METHOD", "SSLv3_METHOD", "TLSv1_METHOD". @type sslmethod: C{str} @param extraCertChain: The path of a file containing one or more certificates in PEM format that establish the chain from a root CA to the CA that signed your C{certKey}. @type extraCertChain: L{str} @param dhParameters: The file name of a file containing parameters that are required for Diffie-Hellman key exchange. If this is not specified, the forward secret C{DHE} ciphers aren't available for servers. @type dhParameters: L{str} @return: a 2-tuple of (args, kwargs), describing the parameters to L{IReactorSSL.listenSSL} (or, modulo argument 2, the factory, arguments to L{SSL4ServerEndpoint}. Stream server endpoint string parser for the Standard I/O type. @ivar prefix: See L{IStreamServerEndpointStringParser.prefix}. Internal parser function for L{_parseServer} to convert the string arguments into structured arguments for the L{StandardIOEndpoint} @param reactor: Reactor for the endpoint # Redirects to another function (self._parseServer), tricks zope.interface # into believing the interface is correctly implemented. Stream server endpoint string parser for the I{systemd} endpoint type. @ivar prefix: See L{IStreamServerEndpointStringParser.prefix}. @ivar _sddaemon: A L{ListenFDs} instance used to translate an index into an actual file descriptor. Internal parser function for L{_parseServer} to convert the string arguments for a systemd server endpoint into structured arguments for L{AdoptedStreamServerEndpoint}. @param reactor: An L{IReactorSocket} provider. @param domain: The domain (or address family) of the socket inherited from systemd. This is a string like C{"INET"} or C{"UNIX"}, ie the name of an address family from the L{socket} module, without the C{"AF_"} prefix. @type domain: C{str} @param index: An offset into the list of file descriptors inherited from systemd. @type index: C{str} @return: A two-tuple of parsed positional arguments and parsed keyword arguments (a tuple and a dictionary). These can be used to construct an L{AdoptedStreamServerEndpoint}. # Delegate to another function with a sane signature. This function has # an insane signature to trick zope.interface into believing the # interface is correctly implemented. Stream server endpoint string parser for the TCP6ServerEndpoint type. @ivar prefix: See L{IStreamServerEndpointStringParser.prefix}. # Used in _parseServer to identify the plugin with the endpoint type Internal parser function for L{_parseServer} to convert the string arguments into structured arguments for the L{TCP6ServerEndpoint} @param reactor: An L{IReactorTCP} provider. @param port: The port number used for listening @type port: int @param backlog: Size of the listen queue @type backlog: int @param interface: The hostname to bind to @type interface: str # Redirects to another function (self._parseServer), tricks zope.interface # into believing the interface is correctly implemented. Tokenize a strports string and yield each token. @param description: a string as described by L{serverFromString} or L{clientFromString}. @type description: L{str} or L{bytes} @return: an iterable of 2-tuples of (C{_OP} or C{_STRING}, string). Tuples starting with C{_OP} will contain a second element of either ':' (i.e. 'next parameter') or '=' (i.e. 'assign parameter value'). For example, the string 'hello:greeting=world' would result in a generator yielding these values:: _STRING, 'hello' _OP, ':' _STRING, 'greet=ing' _OP, '=' _STRING, 'world' Convert a description string into a list of positional and keyword parameters, using logic vaguely like what Python does. @param description: a string as described by L{serverFromString} or L{clientFromString}. @return: a 2-tuple of C{(args, kwargs)}, where 'args' is a list of all ':'-separated C{str}s not containing an '=' and 'kwargs' is a map of all C{str}s which do contain an '='. For example, the result of C{_parse('a:b:d=1:c')} would be C{(['a', 'b', 'c'], {'d': '1'})}. # Mappings from description "names" to endpoint constructors. Parse a strports description into a 2-tuple of arguments and keyword values. @param description: A description in the format explained by L{serverFromString}. @type description: C{str} @param factory: A 'factory' argument; this is left-over from twisted.application.strports, it's not really used. @type factory: L{IProtocolFactory} or L{None} @return: a 3-tuple of (plugin or name, arguments, keyword arguments) # If the required parser is not found in _server, check if # a plugin exists for the endpointType Match plugin to prefix. Construct a stream server endpoint from an endpoint description string. The format for server endpoint descriptions is a simple byte string. It is a prefix naming the type of endpoint, then a colon, then the arguments for that endpoint. For example, you can call it like this to create an endpoint that will listen on TCP port 80:: serverFromString(reactor, "tcp:80") Additional arguments may be specified as keywords, separated with colons. For example, you can specify the interface for a TCP server endpoint to bind to like this:: serverFromString(reactor, "tcp:80:interface=127.0.0.1") SSL server endpoints may be specified with the 'ssl' prefix, and the private key and certificate files may be specified by the C{privateKey} and C{certKey} arguments:: serverFromString( reactor, "ssl:443:privateKey=key.pem:certKey=crt.pem") If a private key file name (C{privateKey}) isn't provided, a "server.pem" file is assumed to exist which contains the private key. If the certificate file name (C{certKey}) isn't provided, the private key file is assumed to contain the certificate as well. You may escape colons in arguments with a backslash, which you will need to use if you want to specify a full pathname argument on Windows:: serverFromString(reactor, "ssl:443:privateKey=C\\:/key.pem:certKey=C\\:/cert.pem") finally, the 'unix' prefix may be used to specify a filesystem UNIX socket, optionally with a 'mode' argument to specify the mode of the socket file created by C{listen}:: serverFromString(reactor, "unix:/var/run/finger") serverFromString(reactor, "unix:/var/run/finger:mode=660") This function is also extensible; new endpoint types may be registered as L{IStreamServerEndpointStringParser} plugins. See that interface for more information. @param reactor: The server endpoint will be constructed with this reactor. @param description: The strports description to parse. @type description: L{str} @return: A new endpoint which can be used to listen with the parameters given by C{description}. @rtype: L{IStreamServerEndpoint<twisted.internet.interfaces.IStreamServerEndpoint>} @raise ValueError: when the 'description' string cannot be parsed. @since: 10.2 # Chop out the factory. Quote an argument to L{serverFromString} and L{clientFromString}. Since arguments are separated with colons and colons are escaped with backslashes, some care is necessary if, for example, you have a pathname, you may be tempted to interpolate into a string like this:: serverFromString(reactor, "ssl:443:privateKey=%s" % (myPathName,)) This may appear to work, but will have portability issues (Windows pathnames, for example). Usually you should just construct the appropriate endpoint type rather than interpolating strings, which in this case would be L{SSL4ServerEndpoint}. There are some use-cases where you may need to generate such a string, though; for example, a tool to manipulate a configuration file which has strports descriptions in it. To be correct in those cases, do this instead:: serverFromString(reactor, "ssl:443:privateKey=%s" % (quoteStringArgument(myPathName),)) @param argument: The part of the endpoint description string you want to pass through. @type argument: C{str} @return: The quoted argument. @rtype: C{str} Perform any argument value coercion necessary for TCP client parameters. Valid positional arguments to this function are host and port. Valid keyword arguments to this function are all L{IReactorTCP.connectTCP} arguments. @return: The coerced values as a C{dict}. Load certificate-authority certificate objects in a given directory. @param directoryPath: a L{unicode} or L{bytes} pointing at a directory to load .pem files from, or L{None}. @return: an L{IOpenSSLTrustRoot} provider. # Permission denied, corrupt disk, we don't care. # Duplicate certificate, invalid certificate, etc. We don't care. Parse a string referring to a directory full of certificate authorities into a trust root. @param pathName: path name @type pathName: L{unicode} or L{bytes} or L{None} @return: L{None} or L{IOpenSSLTrustRoot} Parse a certificate path and key path, either or both of which might be L{None}, into a certificate object. @param certificatePath: the certificate path @type certificatePath: L{bytes} or L{unicode} or L{None} @param keyPath: the private key path @type keyPath: L{bytes} or L{unicode} or L{None} @return: a L{PrivateCertificate} or L{None} Parse common arguments for SSL endpoints, creating an L{CertificateOptions} instance. @param kwargs: A dict of keyword arguments to be parsed, potentially containing keys C{certKey}, C{privateKey}, C{caCertsDir}, and C{hostname}. See L{_parseClientSSL}. @type kwargs: L{dict} @return: The remaining arguments, including a new key C{sslContextFactory}. # _really_ though, you should specify a hostname. Perform any argument value coercion necessary for SSL client parameters. Valid keyword arguments to this function are all L{IReactorSSL.connectSSL} arguments except for C{contextFactory}. Instead, C{certKey} (the path name of the certificate file) C{privateKey} (the path name of the private key associated with the certificate) are accepted and used to construct a context factory. Valid positional arguments to this function are host and port. @param caCertsDir: The one parameter which is not part of L{IReactorSSL.connectSSL}'s signature, this is a path name used to construct a list of certificate authority certificates. The directory will be scanned for files ending in C{.pem}, all of which will be considered valid certificate authorities for this connection. @type caCertsDir: L{str} @param hostname: The hostname to use for validating the server's certificate. @type hostname: L{unicode} @return: The coerced values as a L{dict}. Perform any argument value coercion necessary for UNIX client parameters. Valid keyword arguments to this function are all L{IReactorUNIX.connectUNIX} keyword arguments except for C{checkPID}. Instead, C{lockfile} is accepted and has the same meaning. Also C{path} is used instead of C{address}. Valid positional arguments to this function are C{path}. @return: The coerced values as a C{dict}. Construct a client endpoint from a description string. Client description strings are much like server description strings, although they take all of their arguments as keywords, aside from host and port. You can create a TCP client endpoint with the 'host' and 'port' arguments, like so:: clientFromString(reactor, "tcp:host=www.example.com:port=80") or, without specifying host and port keywords:: clientFromString(reactor, "tcp:www.example.com:80") Or you can specify only one or the other, as in the following 2 examples:: clientFromString(reactor, "tcp:host=www.example.com:80") clientFromString(reactor, "tcp:www.example.com:port=80") or an SSL client endpoint with those arguments, plus the arguments used by the server SSL, for a client certificate:: clientFromString(reactor, "ssl:web.example.com:443:" "privateKey=foo.pem:certKey=foo.pem") to specify your certificate trust roots, you can identify a directory with PEM files in it with the C{caCertsDir} argument:: clientFromString(reactor, "ssl:host=web.example.com:port=443:" "caCertsDir=/etc/ssl/certs") Both TCP and SSL client endpoint description strings can include a 'bindAddress' keyword argument, whose value should be a local IPv4 address. This fixes the client socket to that IP address:: clientFromString(reactor, "tcp:www.example.com:80:" "bindAddress=192.0.2.100") NB: Fixed client ports are not currently supported in TCP or SSL client endpoints. The client socket will always use an ephemeral port assigned by the operating system You can create a UNIX client endpoint with the 'path' argument and optional 'lockfile' and 'timeout' arguments:: clientFromString( reactor, b"unix:path=/var/foo/bar:lockfile=1:timeout=9") or, with the path as a positional argument with or without optional arguments as in the following 2 examples:: clientFromString(reactor, "unix:/var/foo/bar") clientFromString(reactor, "unix:/var/foo/bar:lockfile=1:timeout=9") This function is also extensible; new endpoint types may be registered as L{IStreamClientEndpointStringParserWithReactor} plugins. See that interface for more information. @param reactor: The client endpoint will be constructed with this reactor. @param description: The strports description to parse. @type description: L{str} @return: A new endpoint which can be used to connect with the parameters given by C{description}. @rtype: L{IStreamClientEndpoint<twisted.internet.interfaces.IStreamClientEndpoint>} @since: 10.2 Connect a protocol instance to an endpoint. This allows using a client endpoint without having to create a factory. @param endpoint: A client endpoint to connect to. @param protocol: A protocol instance. @return: The result of calling C{connect} on the endpoint, i.e. a L{Deferred} that will fire with the protocol when connected, or an appropriate error. @since: 13.1 An endpoint that wraps another endpoint. Construct a L{_WrapperEndpoint}. Connect the given protocol factory and unwrap its result. A server endpoint that wraps another server endpoint. Construct a L{_WrapperServerEndpoint}. Connect the given protocol factory and unwrap its result. Wrap an endpoint which upgrades to TLS as soon as the connection is established. @since: 16.0 @param connectionCreator: The TLS options to use when connecting; see L{twisted.internet.ssl.optionsForClientTLS} for how to construct this. @type connectionCreator: L{twisted.internet.interfaces.IOpenSSLClientConnectionCreator} @param wrappedEndpoint: The endpoint to wrap. @type wrappedEndpoint: An L{IStreamClientEndpoint} provider. @return: an endpoint that provides transport level encryption layered on top of C{wrappedEndpoint} @rtype: L{twisted.internet.interfaces.IStreamClientEndpoint} Internal method to construct an endpoint from string parameters. @param reactor: The reactor passed to L{clientFromString}. @param host: The hostname to connect to. @type host: L{bytes} or L{unicode} @param port: The port to connect to. @type port: L{bytes} or L{unicode} @param timeout: For each individual connection attempt, the number of seconds to wait before assuming the connection has failed. @type timeout: L{bytes} or L{unicode} @param bindAddress: The address to which to bind outgoing connections. @type bindAddress: L{bytes} or L{unicode} @param certificate: a string representing a filesystem path to a PEM-encoded certificate. @type certificate: L{bytes} or L{unicode} @param privateKey: a string representing a filesystem path to a PEM-encoded certificate. @type privateKey: L{bytes} or L{unicode} @param endpoint: an optional string endpoint description of an endpoint to wrap; if this is passed then C{host} is used only for certificate verification. @type endpoint: L{bytes} or L{unicode} @return: a client TLS endpoint @rtype: L{IStreamClientEndpoint} Stream client endpoint string parser for L{wrapClientTLS} with L{HostnameEndpoint}. @ivar prefix: See L{IStreamClientEndpointStringParserWithReactor.prefix}. Redirects to another function L{_parseClientTLS}; tricks zope.interface into believing the interface is correctly implemented, since the signature is (C{reactor}, C{*args}, C{**kwargs}). See L{_parseClientTLS} for the specific signature description for this endpoint parser. @param reactor: The reactor passed to L{clientFromString}. @param args: The positional arguments in the endpoint description. @type args: L{tuple} @param kwargs: The named arguments in the endpoint description. @type kwargs: L{dict} @return: a client TLS endpoint @rtype: L{IStreamClientEndpoint} | 1.851418 | 2 |
color_see.py | Amenoimi/Simple_OCR | 0 | 6625468 |
from PIL import Image
import numpy as np
import cv2
class color_see():
def pick_color(self,event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
self.pixel = self.frame[y,x]
#you might want to adjust the ranges(+-10, etc):
self.upper = np.array([ self.pixel[0] + 20, self.pixel[1] + 20, self.pixel[2] + 20])
self.lower = np.array([ self.pixel[0] - 20, self.pixel[1] - 20, self.pixel[2] - 20])
print( self.pixel, self.lower, self.upper)
def __init__(self):
self.lower = np.array([0, 0, 0])
self.upper = np.array([0, 0, 0])
# mouse callback function
# 選擇第二隻攝影機
cap = cv2.VideoCapture(0)
while(True):
# 從攝影機擷取一張影像
ret, self.frame = cap.read()
cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
cv2.setMouseCallback('frame', self.pick_color)
filtered = cv2.inRange(self.frame, self.lower, self.upper)
blurred = cv2.GaussianBlur(filtered, (25, 15), 0)
# find contours in the image
(_, cnts, _) = cv2.findContours(blurred.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(cnts) > 0:
for cnt in cnts:
# compute the (rotated) bounding box around then
# contour and then draw it
rect = np.int32(cv2.boxPoints(cv2.minAreaRect(cnt)))
cv2.drawContours(self.frame, [rect], -1, (0, 255, 0), 2)
# 顯示圖片
cv2.imshow('frame', self.frame)
cv2.waitKey(1)
if cv2.getWindowProperty('frame', cv2.WND_PROP_AUTOSIZE) == -1:
break
# 釋放攝影機
cap.release()
# 關閉所有 OpenCV 視窗
cv2.destroyAllWindows()
if __name__ == '__main__':
p = color_see() |
from PIL import Image
import numpy as np
import cv2
class color_see():
def pick_color(self,event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
self.pixel = self.frame[y,x]
#you might want to adjust the ranges(+-10, etc):
self.upper = np.array([ self.pixel[0] + 20, self.pixel[1] + 20, self.pixel[2] + 20])
self.lower = np.array([ self.pixel[0] - 20, self.pixel[1] - 20, self.pixel[2] - 20])
print( self.pixel, self.lower, self.upper)
def __init__(self):
self.lower = np.array([0, 0, 0])
self.upper = np.array([0, 0, 0])
# mouse callback function
# 選擇第二隻攝影機
cap = cv2.VideoCapture(0)
while(True):
# 從攝影機擷取一張影像
ret, self.frame = cap.read()
cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
cv2.setMouseCallback('frame', self.pick_color)
filtered = cv2.inRange(self.frame, self.lower, self.upper)
blurred = cv2.GaussianBlur(filtered, (25, 15), 0)
# find contours in the image
(_, cnts, _) = cv2.findContours(blurred.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(cnts) > 0:
for cnt in cnts:
# compute the (rotated) bounding box around then
# contour and then draw it
rect = np.int32(cv2.boxPoints(cv2.minAreaRect(cnt)))
cv2.drawContours(self.frame, [rect], -1, (0, 255, 0), 2)
# 顯示圖片
cv2.imshow('frame', self.frame)
cv2.waitKey(1)
if cv2.getWindowProperty('frame', cv2.WND_PROP_AUTOSIZE) == -1:
break
# 釋放攝影機
cap.release()
# 關閉所有 OpenCV 視窗
cv2.destroyAllWindows()
if __name__ == '__main__':
p = color_see() | en | 0.42 | #you might want to adjust the ranges(+-10, etc): # mouse callback function # 選擇第二隻攝影機 # 從攝影機擷取一張影像 # find contours in the image # compute the (rotated) bounding box around then # contour and then draw it # 顯示圖片 # 釋放攝影機 # 關閉所有 OpenCV 視窗 | 2.99187 | 3 |
10_days_of_statistics_5_2.py | sercangul/HackerRank | 0 | 6625469 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 3 19:19:34 2019
@author: sercangul
"""
a, b = map(float, input().split())
print (round(160+40*(a**2 + a),3))
print (round(128+40*(b**2 + b),3)) | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 3 19:19:34 2019
@author: sercangul
"""
a, b = map(float, input().split())
print (round(160+40*(a**2 + a),3))
print (round(128+40*(b**2 + b),3)) | en | 0.400077 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Mon Jun 3 19:19:34 2019 @author: sercangul | 3.193033 | 3 |
start.py | RaghuA06/Platformer-Game-using-PyGame | 0 | 6625470 | <reponame>RaghuA06/Platformer-Game-using-PyGame
def intro_screen():
background = pygame.Surface(size)
background = background.convert()
background = pygame.image.load("startscreen.jpg")
gray = (140,140,145)
light_gray = (190,190,200)
white = (255,255,255)
button1 = pygame.Rect(100,100,150,25)
button2 = pygame.Rect(100,200,150,25)
button3 = pygame.Rect(100,300,150,25)
button4 = pygame.Rect(100,400,150,25)
my_font = pygame.font.SysFont("comicsansms", 48)
my_font2 = pygame.font.SysFont("opensans",30)
label = my_font.render("Dungeon Dweller", True, white)
Play = my_font2.render("Play", True, white)
Instructions = my_font2.render("Instructions", True, white)
Credits = my_font2.render("Credits", True, white)
Quit = my_font2.render("Quit", True, white)
screen.blit(background, (0,0))
screen.blit(label, (250, 20))
timer = pygame.time.Clock()
sets = True
while sets:
timer.tick(60)
for e in pygame.event.get():
if e.type == pygame.QUIT:
pygame.quit()
quit()
if e.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = e.pos
if button1.collidepoint(mouse_pos):
#print("Play")
main1.starteverything()
elif button2.collidepoint(mouse_pos):
print("Instructions")
elif button3.collidepoint(mouse_pos):
print("Credits")
elif button4.collidepoint(mouse_pos):
print("Quit")
must = pygame.mouse.get_pos()
if 100+150 > must[0] > 100 and 100+25 > must[1] > 100:
pygame.draw.rect(screen, (255,0,0), button1)
else:
pygame.draw.rect(screen, (200,20,20) ,button1)
if 100+150 > must[0] > 100 and 200+25 > must[1] > 200:
pygame.draw.rect(screen, light_gray, button2)
else:
pygame.draw.rect(screen, gray, button2)
if 100+150 > must[0] > 100 and 300+25 > must[1] > 300:
pygame.draw.rect(screen, light_gray, button3)
else:
pygame.draw.rect(screen, gray, button3)
if 100+150 > must[0] > 100 and 400+25 > must[1] > 400:
pygame.draw.rect(screen, light_gray, button4)
else:
pygame.draw.rect(screen, gray, button4)
screen.blit(Play, (150,100))
screen.blit(Instructions, (112,200))
screen.blit(Credits, (140,300))
screen.blit(Quit, (150,400))
pygame.display.update()
| def intro_screen():
background = pygame.Surface(size)
background = background.convert()
background = pygame.image.load("startscreen.jpg")
gray = (140,140,145)
light_gray = (190,190,200)
white = (255,255,255)
button1 = pygame.Rect(100,100,150,25)
button2 = pygame.Rect(100,200,150,25)
button3 = pygame.Rect(100,300,150,25)
button4 = pygame.Rect(100,400,150,25)
my_font = pygame.font.SysFont("comicsansms", 48)
my_font2 = pygame.font.SysFont("opensans",30)
label = my_font.render("Dungeon Dweller", True, white)
Play = my_font2.render("Play", True, white)
Instructions = my_font2.render("Instructions", True, white)
Credits = my_font2.render("Credits", True, white)
Quit = my_font2.render("Quit", True, white)
screen.blit(background, (0,0))
screen.blit(label, (250, 20))
timer = pygame.time.Clock()
sets = True
while sets:
timer.tick(60)
for e in pygame.event.get():
if e.type == pygame.QUIT:
pygame.quit()
quit()
if e.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = e.pos
if button1.collidepoint(mouse_pos):
#print("Play")
main1.starteverything()
elif button2.collidepoint(mouse_pos):
print("Instructions")
elif button3.collidepoint(mouse_pos):
print("Credits")
elif button4.collidepoint(mouse_pos):
print("Quit")
must = pygame.mouse.get_pos()
if 100+150 > must[0] > 100 and 100+25 > must[1] > 100:
pygame.draw.rect(screen, (255,0,0), button1)
else:
pygame.draw.rect(screen, (200,20,20) ,button1)
if 100+150 > must[0] > 100 and 200+25 > must[1] > 200:
pygame.draw.rect(screen, light_gray, button2)
else:
pygame.draw.rect(screen, gray, button2)
if 100+150 > must[0] > 100 and 300+25 > must[1] > 300:
pygame.draw.rect(screen, light_gray, button3)
else:
pygame.draw.rect(screen, gray, button3)
if 100+150 > must[0] > 100 and 400+25 > must[1] > 400:
pygame.draw.rect(screen, light_gray, button4)
else:
pygame.draw.rect(screen, gray, button4)
screen.blit(Play, (150,100))
screen.blit(Instructions, (112,200))
screen.blit(Credits, (140,300))
screen.blit(Quit, (150,400))
pygame.display.update() | ru | 0.417693 | #print("Play") | 3.312525 | 3 |
src/config/common/vnc_cassandra.py | codilime/contrail-controller-arch | 0 | 6625471 | <filename>src/config/common/vnc_cassandra.py
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
import pycassa
from pycassa import ColumnFamily
from pycassa.batch import Mutator
from pycassa.system_manager import SystemManager, SIMPLE_STRATEGY
from pycassa.pool import AllServersUnavailable, MaximumRetryException
import gevent
from vnc_api import vnc_api
from exceptions import NoIdError, DatabaseUnavailableError, VncError
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus
from pysandesh.gen_py.process_info.ttypes import ConnectionType as ConnType
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from sandesh_common.vns.constants import API_SERVER_KEYSPACE_NAME, \
CASSANDRA_DEFAULT_GC_GRACE_SECONDS
import time
from cfgm_common import jsonutils as json
import utils
import datetime
import re
from operator import itemgetter
import itertools
import sys
from collections import Mapping
def merge_dict(orig_dict, new_dict):
for key, value in new_dict.iteritems():
if key not in orig_dict:
orig_dict[key] = new_dict[key]
elif isinstance(value, Mapping):
orig_dict[key] = merge_dict(orig_dict.get(key, {}), value)
elif isinstance(value, list):
orig_dict[key] = orig_dict[key].append(value)
else:
orig_dict[key] = new_dict[key]
return orig_dict
class VncCassandraClient(object):
# Name to ID mapping keyspace + tables
_UUID_KEYSPACE_NAME = API_SERVER_KEYSPACE_NAME
# TODO describe layout
_OBJ_UUID_CF_NAME = 'obj_uuid_table'
# TODO describe layout
_OBJ_FQ_NAME_CF_NAME = 'obj_fq_name_table'
# key: object type, column ($type:$id, uuid)
# where type is entity object is being shared with. Project initially
_OBJ_SHARED_CF_NAME = 'obj_shared_table'
_UUID_KEYSPACE = {
_UUID_KEYSPACE_NAME: {
_OBJ_UUID_CF_NAME: {
'cf_args': {
'autopack_names': False,
'autopack_values': False,
},
},
_OBJ_FQ_NAME_CF_NAME: {
'cf_args': {
'autopack_values': False,
},
},
_OBJ_SHARED_CF_NAME: {}
}
}
_MAX_COL = 10000000
@classmethod
def get_db_info(cls):
db_info = [(cls._UUID_KEYSPACE_NAME, [cls._OBJ_UUID_CF_NAME,
cls._OBJ_FQ_NAME_CF_NAME,
cls._OBJ_SHARED_CF_NAME])]
return db_info
# end get_db_info
@staticmethod
def _is_parent(column_name):
return column_name[:7] == 'parent:'
@staticmethod
def _is_prop(column_name):
return column_name[:5] == 'prop:'
@staticmethod
def _is_prop_list(column_name):
return column_name[:6] == 'propl:'
@staticmethod
def _is_prop_map(column_name):
return column_name[:6] == 'propm:'
@staticmethod
def _is_ref(column_name):
return column_name[:4] == 'ref:'
@staticmethod
def _is_backref(column_name):
return column_name[:8] == 'backref:'
@staticmethod
def _is_children(column_name):
return column_name[:9] == 'children:'
def __init__(self, server_list, db_prefix, rw_keyspaces, ro_keyspaces,
logger, generate_url=None, reset_config=False, credential=None):
self._reset_config = reset_config
self._cache_uuid_to_fq_name = {}
if db_prefix:
self._db_prefix = '%s_' %(db_prefix)
else:
self._db_prefix = ''
self._server_list = server_list
self._num_dbnodes = len(self._server_list)
self._conn_state = ConnectionStatus.INIT
self._logger = logger
self._credential = credential
# if no generate_url is specified, use a dummy function that always
# returns an empty string
self._generate_url = generate_url or (lambda x,y: '')
self._cf_dict = {}
self._ro_keyspaces = ro_keyspaces or {}
self._rw_keyspaces = rw_keyspaces or {}
if ((self._UUID_KEYSPACE_NAME not in self._ro_keyspaces) and
(self._UUID_KEYSPACE_NAME not in self._rw_keyspaces)):
self._ro_keyspaces.update(self._UUID_KEYSPACE)
self._cassandra_init(server_list)
self._cache_uuid_to_fq_name = {}
self._obj_uuid_cf = self._cf_dict[self._OBJ_UUID_CF_NAME]
self._obj_fq_name_cf = self._cf_dict[self._OBJ_FQ_NAME_CF_NAME]
self._obj_shared_cf = self._cf_dict[self._OBJ_SHARED_CF_NAME]
# end __init__
def get_cf(self, cf_name):
return self._cf_dict.get(cf_name)
#end
def add(self, cf_name, key, value):
try:
self.get_cf(cf_name).insert(key, value)
return True
except:
return False
#end
def get(self, cf_name, key, columns=None, start='', finish=''):
result = self.multiget(cf_name,
[key],
columns=columns,
start=start,
finish=finish)
return result.get(key)
def multiget(self, cf_name, keys, columns=None, start='', finish='',
timestamp=False):
_thrift_limit_size = 10000
results = {}
cf = self.get_cf(cf_name)
if not columns or start or finish:
try:
results = cf.multiget(keys,
column_start=start,
column_finish=finish,
include_timestamp=timestamp,
column_count=self._MAX_COL)
except OverflowError:
for key in keys:
rows = dict(cf.xget(key,
column_start=start,
column_finish=finish,
include_timestamp=timestamp))
if rows:
results[key] = rows
if columns:
max_key_range, _ = divmod(_thrift_limit_size, len(columns))
if max_key_range > 1:
for key_chunk in [keys[x:x+max_key_range] for x in
xrange(0, len(keys), max_key_range)]:
rows = cf.multiget(key_chunk,
columns=columns,
include_timestamp=timestamp,
column_count=self._MAX_COL)
merge_dict(results, rows)
elif max_key_range == 0:
for column_chunk in [columns[x:x+(_thrift_limit_size - 1)] for x in
xrange(0, len(columns), _thrift_limit_size - 1)]:
rows = cf.multiget(keys,
columns=column_chunk,
include_timestamp=timestamp,
column_count=self._MAX_COL)
merge_dict(results, rows)
elif max_key_range == 1:
for key in keys:
try:
cols = cf.get(key,
columns=column_chunk,
include_timestamp=timestamp,
column_count=self._MAX_COL)
except pycassa.NotFoundException:
continue
results.setdefault(key, {}).update(cols)
for key in results:
for col, val in results[key].items():
try:
if timestamp:
results[key][col] = (json.loads(val[0]), val[1])
else:
results[key][col] = json.loads(val)
except ValueError as e:
msg = ("Cannot json load the value of cf: %s, key:%s "
"(error: %s). Use it as is: %s" %
(cf_name, key, str(e),
val if not timestamp else val[0]))
self._logger(msg, level=SandeshLevel.SYS_WARN)
results[key][col] = val
return results
def delete(self, cf_name, key):
try:
self.get_cf(cf_name).remove(key)
return True
except:
return False
#end
def get_range(self, cf_name):
try:
return self.get_cf(cf_name).get_range(column_count=100000)
except:
return None
#end
def get_one_col(self, cf_name, key, column):
col = self.multiget(cf_name, [key], columns=[column])
if key not in col:
raise NoIdError(key)
elif len(col[key]) > 1:
raise VncError('Multi match %s for %s' % (column, key))
return col[key][column]
def _create_prop(self, bch, obj_uuid, prop_name, prop_val):
bch.insert(obj_uuid, {'prop:%s' % (prop_name): json.dumps(prop_val)})
# end _create_prop
def _update_prop(self, bch, obj_uuid, prop_name, new_props):
if new_props[prop_name] is None:
bch.remove(obj_uuid, columns=['prop:' + prop_name])
else:
bch.insert(
obj_uuid,
{'prop:' + prop_name: json.dumps(new_props[prop_name])})
# prop has been accounted for, remove so only new ones remain
del new_props[prop_name]
# end _update_prop
def _add_to_prop_list(self, bch, obj_uuid, prop_name,
prop_elem_value, prop_elem_position):
bch.insert(obj_uuid,
{'propl:%s:%s' %(prop_name, prop_elem_position):
json.dumps(prop_elem_value)})
# end _add_to_prop_list
def _delete_from_prop_list(self, bch, obj_uuid, prop_name,
prop_elem_position):
bch.remove(obj_uuid,
columns=['propl:%s:%s' %(prop_name, prop_elem_position)])
# end _delete_from_prop_list
def _set_in_prop_map(self, bch, obj_uuid, prop_name,
prop_elem_value, prop_elem_position):
bch.insert(obj_uuid,
{'propm:%s:%s' %(prop_name, prop_elem_position):
json.dumps(prop_elem_value)})
# end _set_in_prop_map
def _delete_from_prop_map(self, bch, obj_uuid, prop_name,
prop_elem_position):
bch.remove(obj_uuid,
columns=['propm:%s:%s' %(prop_name, prop_elem_position)])
# end _delete_from_prop_map
def _create_child(self, bch, parent_type, parent_uuid,
child_type, child_uuid):
child_col = {'children:%s:%s' %
(child_type, child_uuid): json.dumps(None)}
bch.insert(parent_uuid, child_col)
parent_col = {'parent:%s:%s' %
(parent_type, parent_uuid): json.dumps(None)}
bch.insert(child_uuid, parent_col)
# end _create_child
def _delete_child(self, bch, parent_type, parent_uuid,
child_type, child_uuid):
child_col = {'children:%s:%s' %
(child_type, child_uuid): json.dumps(None)}
bch.remove(parent_uuid, columns=[
'children:%s:%s' % (child_type, child_uuid)])
# end _delete_child
def _create_ref(self, bch, obj_type, obj_uuid, ref_obj_type, ref_uuid,
ref_data):
bch.insert(
obj_uuid, {'ref:%s:%s' %
(ref_obj_type, ref_uuid): json.dumps(ref_data)})
if obj_type == ref_obj_type:
bch.insert(
ref_uuid, {'ref:%s:%s' %
(obj_type, obj_uuid): json.dumps(ref_data)})
else:
bch.insert(
ref_uuid, {'backref:%s:%s' %
(obj_type, obj_uuid): json.dumps(ref_data)})
# end _create_ref
def _update_ref(self, bch, obj_type, obj_uuid, ref_obj_type, old_ref_uuid,
new_ref_infos):
if ref_obj_type not in new_ref_infos:
# update body didn't touch this type, nop
return
if old_ref_uuid not in new_ref_infos[ref_obj_type]:
# remove old ref
bch.remove(obj_uuid, columns=[
'ref:%s:%s' % (ref_obj_type, old_ref_uuid)])
if obj_type == ref_obj_type:
bch.remove(old_ref_uuid, columns=[
'ref:%s:%s' % (obj_type, obj_uuid)])
else:
bch.remove(old_ref_uuid, columns=[
'backref:%s:%s' % (obj_type, obj_uuid)])
else:
# retain old ref with new ref attr
new_ref_data = new_ref_infos[ref_obj_type][old_ref_uuid]
bch.insert(
obj_uuid,
{'ref:%s:%s' %
(ref_obj_type, old_ref_uuid): json.dumps(new_ref_data)})
if obj_type == ref_obj_type:
bch.insert(
old_ref_uuid,
{'ref:%s:%s' %
(obj_type, obj_uuid): json.dumps(new_ref_data)})
else:
bch.insert(
old_ref_uuid,
{'backref:%s:%s' %
(obj_type, obj_uuid): json.dumps(new_ref_data)})
# uuid has been accounted for, remove so only new ones remain
del new_ref_infos[ref_obj_type][old_ref_uuid]
# end _update_ref
def _delete_ref(self, bch, obj_type, obj_uuid, ref_obj_type, ref_uuid):
send = False
if bch is None:
send = True
bch = self._cassandra_db._obj_uuid_cf.batch()
bch.remove(obj_uuid, columns=['ref:%s:%s' % (ref_obj_type, ref_uuid)])
if obj_type == ref_obj_type:
bch.remove(ref_uuid, columns=[
'ref:%s:%s' % (obj_type, obj_uuid)])
else:
bch.remove(ref_uuid, columns=[
'backref:%s:%s' % (obj_type, obj_uuid)])
if send:
bch.send()
# end _delete_ref
def _update_sandesh_status(self, status, msg=''):
ConnectionState.update(conn_type=ConnType.DATABASE,
name='Cassandra', status=status, message=msg,
server_addrs=self._server_list)
def _handle_exceptions(self, func):
def wrapper(*args, **kwargs):
if (sys._getframe(1).f_code.co_name != 'multiget' and
func.__name__ in ['get', 'multiget']):
msg = ("It is not recommended to use 'get' or 'multiget' "
"pycassa methods. It's better to use 'xget' or "
"'get_range' methods due to thrift limitations")
self._logger(msg, level=SandeshLevel.SYS_WARN)
try:
if self._conn_state != ConnectionStatus.UP:
# will set conn_state to UP if successful
self._cassandra_init_conn_pools()
return func(*args, **kwargs)
except (AllServersUnavailable, MaximumRetryException) as e:
if self._conn_state != ConnectionStatus.DOWN:
self._update_sandesh_status(ConnectionStatus.DOWN)
msg = 'Cassandra connection down. Exception in %s' %(
str(func))
self._logger(msg, level=SandeshLevel.SYS_ERR)
self._conn_state = ConnectionStatus.DOWN
raise DatabaseUnavailableError(
'Error, %s: %s' %(str(e), utils.detailed_traceback()))
return wrapper
# end _handle_exceptions
# Helper routines for cassandra
def _cassandra_init(self, server_list):
# 1. Ensure keyspace and schema/CFs exist
# 2. Read in persisted data and publish to ifmap server
self._update_sandesh_status(ConnectionStatus.INIT)
ColumnFamily.get = self._handle_exceptions(ColumnFamily.get)
ColumnFamily.multiget = self._handle_exceptions(ColumnFamily.multiget)
ColumnFamily.xget = self._handle_exceptions(ColumnFamily.xget)
ColumnFamily.get_range = self._handle_exceptions(ColumnFamily.get_range)
ColumnFamily.insert = self._handle_exceptions(ColumnFamily.insert)
ColumnFamily.remove = self._handle_exceptions(ColumnFamily.remove)
Mutator.send = self._handle_exceptions(Mutator.send)
self.sys_mgr = self._cassandra_system_manager()
self.existing_keyspaces = self.sys_mgr.list_keyspaces()
for ks,cf_dict in self._rw_keyspaces.items():
keyspace = '%s%s' %(self._db_prefix, ks)
self._cassandra_ensure_keyspace(keyspace, cf_dict)
for ks,_ in self._ro_keyspaces.items():
keyspace = '%s%s' %(self._db_prefix, ks)
self._cassandra_wait_for_keyspace(keyspace)
self._cassandra_init_conn_pools()
# end _cassandra_init
def _cassandra_system_manager(self):
# Retry till cassandra is up
server_idx = 0
connected = False
while not connected:
try:
cass_server = self._server_list[server_idx]
sys_mgr = SystemManager(cass_server,
credentials=self._credential)
connected = True
except Exception:
# TODO do only for
# thrift.transport.TTransport.TTransportException
server_idx = (server_idx + 1) % self._num_dbnodes
time.sleep(3)
return sys_mgr
# end _cassandra_system_manager
def _cassandra_wait_for_keyspace(self, keyspace):
# Wait for it to be created by another process
while keyspace not in self.existing_keyspaces:
gevent.sleep(1)
self._logger("Waiting for keyspace %s to be created" % keyspace,
level=SandeshLevel.SYS_NOTICE)
self.existing_keyspaces = self.sys_mgr.list_keyspaces()
# end _cassandra_wait_for_keyspace
def _cassandra_ensure_keyspace(self, keyspace_name, cf_dict):
if self._reset_config and keyspace_name in self.existing_keyspaces:
try:
self.sys_mgr.drop_keyspace(keyspace_name)
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
self._logger(str(e), level=SandeshLevel.SYS_NOTICE)
if (self._reset_config or keyspace_name not in self.existing_keyspaces):
try:
self.sys_mgr.create_keyspace(keyspace_name, SIMPLE_STRATEGY,
{'replication_factor': str(self._num_dbnodes)})
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
self._logger("Warning! " + str(e), level=SandeshLevel.SYS_WARN)
gc_grace_sec = CASSANDRA_DEFAULT_GC_GRACE_SECONDS
for cf_name in cf_dict:
create_cf_kwargs = cf_dict[cf_name].get('create_cf_args', {})
try:
self.sys_mgr.create_column_family(
keyspace_name, cf_name,
gc_grace_seconds=gc_grace_sec,
default_validation_class='UTF8Type',
**create_cf_kwargs)
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
self._logger("Info! " + str(e), level=SandeshLevel.SYS_INFO)
self.sys_mgr.alter_column_family(keyspace_name, cf_name,
gc_grace_seconds=gc_grace_sec,
default_validation_class='UTF8Type',
**create_cf_kwargs)
# end _cassandra_ensure_keyspace
def _cassandra_init_conn_pools(self):
for ks,cf_dict in itertools.chain(self._rw_keyspaces.items(),
self._ro_keyspaces.items()):
keyspace = '%s%s' %(self._db_prefix, ks)
pool = pycassa.ConnectionPool(
keyspace, self._server_list, max_overflow=-1, use_threadlocal=True,
prefill=True, pool_size=20, pool_timeout=120,
max_retries=30, timeout=5, credentials=self._credential)
rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM
wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM
for cf_name in cf_dict:
cf_kwargs = cf_dict[cf_name].get('cf_args', {})
self._cf_dict[cf_name] = ColumnFamily(
pool, cf_name, read_consistency_level=rd_consistency,
write_consistency_level=wr_consistency,
dict_class=dict,
**cf_kwargs)
ConnectionState.update(conn_type = ConnType.DATABASE,
name = 'Cassandra', status = ConnectionStatus.UP, message = '',
server_addrs = self._server_list)
self._conn_state = ConnectionStatus.UP
msg = 'Cassandra connection ESTABLISHED'
self._logger(msg, level=SandeshLevel.SYS_NOTICE)
# end _cassandra_init_conn_pools
def _get_resource_class(self, obj_type):
if hasattr(self, '_db_client_mgr'):
return self._db_client_mgr.get_resource_class(obj_type)
cls_name = '%s' % (utils.CamelCase(obj_type))
return getattr(vnc_api, cls_name)
# end _get_resource_class
def _get_xsd_class(self, xsd_type):
return getattr(vnc_api, xsd_type)
# end _get_xsd_class
def object_create(self, obj_type, obj_id, obj_dict,
uuid_batch=None, fqname_batch=None):
obj_class = self._get_resource_class(obj_type)
if uuid_batch:
bch = uuid_batch
else:
# Gather column values for obj and updates to backrefs
# in a batch and write it at the end
bch = self._obj_uuid_cf.batch()
obj_cols = {}
obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
obj_cols['type'] = json.dumps(obj_type)
if 'parent_type' in obj_dict:
# non config-root child
parent_type = obj_dict['parent_type']
if parent_type not in obj_class.parent_types:
return False, (400, 'Invalid parent type: %s' % parent_type)
parent_object_type = \
self._get_resource_class(parent_type).object_type
parent_fq_name = obj_dict['fq_name'][:-1]
obj_cols['parent_type'] = json.dumps(parent_type)
parent_uuid = self.fq_name_to_uuid(parent_object_type,
parent_fq_name)
self._create_child(bch, parent_object_type, parent_uuid, obj_type,
obj_id)
# Properties
for prop_field in obj_class.prop_fields:
field = obj_dict.get(prop_field)
# Specifically checking for None
if field is None:
continue
if prop_field == 'id_perms':
field['created'] = datetime.datetime.utcnow().isoformat()
field['last_modified'] = field['created']
if prop_field in obj_class.prop_list_fields:
# store list elements in list order
# iterate on wrapped element or directly or prop field
if obj_class.prop_list_field_has_wrappers[prop_field]:
wrapper_field = field.keys()[0]
list_coll = field[wrapper_field]
else:
list_coll = field
for i in range(len(list_coll)):
self._add_to_prop_list(
bch, obj_id, prop_field, list_coll[i], str(i))
elif prop_field in obj_class.prop_map_fields:
# iterate on wrapped element or directly or prop field
if obj_class.prop_map_field_has_wrappers[prop_field]:
wrapper_field = field.keys()[0]
map_coll = field[wrapper_field]
else:
map_coll = field
map_key_name = obj_class.prop_map_field_key_names[prop_field]
for map_elem in map_coll:
map_key = map_elem[map_key_name]
self._set_in_prop_map(
bch, obj_id, prop_field, map_elem, map_key)
else:
self._create_prop(bch, obj_id, prop_field, field)
# References
# e.g. ref_field = 'network_ipam_refs'
# ref_res_type = 'network-ipam'
# ref_link_type = 'VnSubnetsType'
# is_weakref = False
for ref_field in obj_class.ref_fields:
ref_fld_types_list = list(obj_class.ref_field_types[ref_field])
ref_res_type = ref_fld_types_list[0]
ref_link_type = ref_fld_types_list[1]
ref_obj_type = self._get_resource_class(ref_res_type).object_type
refs = obj_dict.get(ref_field, [])
for ref in refs:
ref_uuid = self.fq_name_to_uuid(ref_obj_type, ref['to'])
ref_attr = ref.get('attr')
ref_data = {'attr': ref_attr, 'is_weakref': False}
self._create_ref(bch, obj_type, obj_id, ref_obj_type, ref_uuid,
ref_data)
bch.insert(obj_id, obj_cols)
if not uuid_batch:
bch.send()
# Update fqname table
fq_name_str = ':'.join(obj_dict['fq_name'])
fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_id:
json.dumps(None)}
if fqname_batch:
fqname_batch.insert(obj_type, fq_name_cols)
else:
self._obj_fq_name_cf.insert(obj_type, fq_name_cols)
return (True, '')
# end object_create
def object_read(self, obj_type, obj_uuids, field_names=None):
if not obj_uuids:
return (True, [])
# if field_names=None, all fields will be read/returned
obj_class = self._get_resource_class(obj_type)
ref_fields = obj_class.ref_fields
backref_fields = obj_class.backref_fields
children_fields = obj_class.children_fields
list_fields = obj_class.prop_list_fields
map_fields = obj_class.prop_map_fields
prop_fields = obj_class.prop_fields - (list_fields | map_fields)
# optimize for common case of reading non-backref, non-children fields
# ignoring columns starting from 'b' and 'c' - significant performance
# impact in scaled setting. e.g. read of project
obj_rows = {}
if (field_names is None or
set(field_names) & (backref_fields | children_fields)):
# atleast one backref/children field is needed
obj_rows = self.multiget(self._OBJ_UUID_CF_NAME,
obj_uuids,
timestamp=True)
elif not set(field_names) & ref_fields:
# specific props have been asked fetch exactly those
columns = set(['type', 'fq_name', 'parent_type'])
for fname in set(field_names) & prop_fields:
columns.add('prop:' + fname)
obj_rows = self.multiget(self._OBJ_UUID_CF_NAME,
obj_uuids,
columns=list(columns),
start='parent:',
finish='parent;',
timestamp=True)
for fname in set(field_names) & list_fields:
merge_dict(obj_rows,
self.multiget(self._OBJ_UUID_CF_NAME,
obj_uuids,
start='propl:%s:' % fname,
finish='propl:%s;' % fname,
timestamp=True))
for fname in set(field_names) & map_fields:
merge_dict(obj_rows,
self.multiget(self._OBJ_UUID_CF_NAME,
obj_uuids,
start='propm:%s:' % fname,
finish='propm:%s;' % fname,
timestamp=True))
else:
# ignore reading backref + children columns
obj_rows = self.multiget(self._OBJ_UUID_CF_NAME,
obj_uuids,
start='d',
timestamp=True)
if not obj_rows:
if len(obj_uuids) == 1:
raise NoIdError(obj_uuids[0])
else:
return (True, [])
results = []
for obj_uuid, obj_cols in obj_rows.items():
if obj_type != obj_cols.pop('type')[0]:
continue
result = {}
result['uuid'] = obj_uuid
result['fq_name'] = obj_cols.pop('fq_name')[0]
for col_name in obj_cols.keys():
if self._is_parent(col_name):
# non config-root child
(_, _, parent_uuid) = col_name.split(':')
parent_res_type = obj_cols['parent_type'][0]
result['parent_type'] = parent_res_type
try:
result['parent_uuid'] = parent_uuid
result['parent_href'] = self._generate_url(parent_res_type,
parent_uuid)
except NoIdError:
err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
return (False, err_msg)
continue
if self._is_prop(col_name):
(_, prop_name) = col_name.split(':')
if ((prop_name not in prop_fields) or
(field_names and prop_name not in field_names)):
continue
result[prop_name] = obj_cols[col_name][0]
continue
if self._is_prop_list(col_name):
(_, prop_name, prop_elem_position) = col_name.split(':')
if field_names and prop_name not in field_names:
continue
if obj_class.prop_list_field_has_wrappers[prop_name]:
prop_field_types = obj_class.prop_field_types[prop_name]
wrapper_type = prop_field_types['xsd_type']
wrapper_cls = self._get_xsd_class(wrapper_type)
wrapper_field = wrapper_cls.attr_fields[0]
if prop_name not in result:
result[prop_name] = {wrapper_field: []}
result[prop_name][wrapper_field].append(
(obj_cols[col_name][0], prop_elem_position))
else:
if prop_name not in result:
result[prop_name] = []
result[prop_name].append((obj_cols[col_name][0],
prop_elem_position))
continue
if self._is_prop_map(col_name):
(_, prop_name, _) = col_name.split(':')
if field_names and prop_name not in field_names:
continue
if obj_class.prop_map_field_has_wrappers[prop_name]:
prop_field_types = obj_class.prop_field_types[prop_name]
wrapper_type = prop_field_types['xsd_type']
wrapper_cls = self._get_xsd_class(wrapper_type)
wrapper_field = wrapper_cls.attr_fields[0]
if prop_name not in result:
result[prop_name] = {wrapper_field: []}
result[prop_name][wrapper_field].append(
obj_cols[col_name][0])
else:
if prop_name not in result:
result[prop_name] = []
result[prop_name].append(obj_cols[col_name][0])
continue
if self._is_children(col_name):
(_, child_type, child_uuid) = col_name.split(':')
if field_names and '%ss' %(child_type) not in field_names:
continue
if child_type+'s' not in children_fields:
continue
child_tstamp = obj_cols[col_name][1]
try:
self._read_child(result, obj_uuid, child_type,
child_uuid, child_tstamp)
except NoIdError:
continue
continue
if self._is_ref(col_name):
(_, ref_type, ref_uuid) = col_name.split(':')
if ((ref_type+'_refs' not in ref_fields) or
(field_names and ref_type + '_refs' not in field_names)):
continue
self._read_ref(result, obj_uuid, ref_type, ref_uuid,
obj_cols[col_name][0])
continue
if self._is_backref(col_name):
(_, back_ref_type, back_ref_uuid) = col_name.split(':')
if back_ref_type+'_back_refs' not in backref_fields:
continue
if (field_names and
'%s_back_refs' %(back_ref_type) not in field_names):
continue
try:
self._read_back_ref(result, obj_uuid, back_ref_type,
back_ref_uuid, obj_cols[col_name][0])
except NoIdError:
continue
continue
# for all column names
# sort children by creation time
for child_field in obj_class.children_fields:
if child_field not in result:
continue
sorted_children = sorted(result[child_field],
key = itemgetter('tstamp'))
# re-write result's children without timestamp
result[child_field] = sorted_children
[child.pop('tstamp') for child in result[child_field]]
# for all children
# Ordering property lists by position attribute
for prop_name in (obj_class.prop_list_fields & set(result.keys())):
if isinstance(result[prop_name], list):
result[prop_name] = [el[0] for el in
sorted(result[prop_name],
key=itemgetter(1))]
elif isinstance(result[prop_name], dict):
wrapper, unsorted_list = result[prop_name].popitem()
result[prop_name][wrapper] = [el[0] for el in
sorted(unsorted_list,
key=itemgetter(1))]
results.append(result)
# end for all rows
return (True, results)
# end object_read
def object_count_children(self, obj_type, obj_uuid, child_type):
if child_type is None:
return (False, '')
obj_class = self._get_resource_class(obj_type)
obj_uuid_cf = self._obj_uuid_cf
if child_type not in obj_class.children_fields:
return (False,
'%s is not a child type of %s' %(child_type, obj_type))
col_start = 'children:'+child_type[:-1]+':'
col_finish = 'children:'+child_type[:-1]+';'
num_children = obj_uuid_cf.get_count(obj_uuid,
column_start=col_start,
column_finish=col_finish)
return (True, num_children)
# end object_count_children
def update_last_modified(self, bch, obj_uuid, id_perms=None):
if id_perms is None:
id_perms = self.get_one_col(self._OBJ_UUID_CF_NAME,
obj_uuid,
'prop:id_perms')
id_perms['last_modified'] = datetime.datetime.utcnow().isoformat()
self._update_prop(bch, obj_uuid, 'id_perms', {'id_perms': id_perms})
# end update_last_modified
def object_update(self, obj_type, obj_uuid, new_obj_dict,
uuid_batch=None):
obj_class = self._get_resource_class(obj_type)
# Grab ref-uuids and properties in new version
new_ref_infos = {}
# Properties
new_props = {}
for prop_field in obj_class.prop_fields:
if prop_field in new_obj_dict:
new_props[prop_field] = new_obj_dict[prop_field]
# References
# e.g. ref_field = 'network_ipam_refs'
# ref_type = 'network-ipam'
# ref_link_type = 'VnSubnetsType'
# is_weakref = False
for ref_field in obj_class.ref_fields:
ref_fld_types_list = list(obj_class.ref_field_types[ref_field])
ref_res_type = ref_fld_types_list[0]
ref_link_type = ref_fld_types_list[1]
is_weakref = ref_fld_types_list[2]
ref_obj_type = self._get_resource_class(ref_res_type).object_type
if ref_field in new_obj_dict:
new_refs = new_obj_dict[ref_field]
new_ref_infos[ref_obj_type] = {}
for new_ref in new_refs or []:
new_ref_uuid = self.fq_name_to_uuid(ref_obj_type,
new_ref['to'])
new_ref_attr = new_ref.get('attr')
new_ref_data = {'attr': new_ref_attr,
'is_weakref': is_weakref}
new_ref_infos[ref_obj_type][new_ref_uuid] = new_ref_data
# Gather column values for obj and updates to backrefs
# in a batch and write it at the end
obj_uuid_cf = self._obj_uuid_cf
if uuid_batch:
bch = uuid_batch
else:
bch = obj_uuid_cf.batch()
for col_name, col_value in obj_uuid_cf.xget(obj_uuid):
if self._is_prop(col_name):
(_, prop_name) = col_name.split(':')
if prop_name == 'id_perms':
# id-perms always has to be updated for last-mod timestamp
# get it from request dict(or from db if not in request dict)
new_id_perms = new_obj_dict.get(
prop_name, json.loads(col_value))
self.update_last_modified(bch, obj_uuid, new_id_perms)
elif prop_name in new_obj_dict:
self._update_prop(bch, obj_uuid, prop_name, new_props)
if self._is_prop_list(col_name):
(_, prop_name, prop_elem_position) = col_name.split(':')
if prop_name in new_props:
# delete all old values of prop list
self._delete_from_prop_list(
bch, obj_uuid, prop_name, prop_elem_position)
if self._is_prop_map(col_name):
(_, prop_name, prop_elem_position) = col_name.split(':')
if prop_name in new_props:
# delete all old values of prop list
self._delete_from_prop_map(
bch, obj_uuid, prop_name, prop_elem_position)
if self._is_ref(col_name):
(_, ref_type, ref_uuid) = col_name.split(':')
self._update_ref(bch, obj_type, obj_uuid, ref_type, ref_uuid,
new_ref_infos)
# for all column names
# create new refs
for ref_type in new_ref_infos.keys():
for ref_uuid in new_ref_infos[ref_type].keys():
ref_data = new_ref_infos[ref_type][ref_uuid]
self._create_ref(bch, obj_type, obj_uuid, ref_type, ref_uuid,
ref_data)
# create new props
for prop_name in new_props.keys():
if prop_name in obj_class.prop_list_fields:
# store list elements in list order
# iterate on wrapped element or directly on prop field
# for wrapped lists, store without the wrapper. regenerate
# wrapper on read
if (obj_class.prop_list_field_has_wrappers[prop_name] and
new_props[prop_name]):
wrapper_field = new_props[prop_name].keys()[0]
list_coll = new_props[prop_name][wrapper_field]
else:
list_coll = new_props[prop_name]
for i in range(len(list_coll)):
self._add_to_prop_list(bch, obj_uuid,
prop_name, list_coll[i], str(i))
elif prop_name in obj_class.prop_map_fields:
# store map elements in key order
# iterate on wrapped element or directly on prop field
# for wrapped lists, store without the wrapper. regenerate
# wrapper on read
if (obj_class.prop_map_field_has_wrappers[prop_name] and
new_props[prop_name]):
wrapper_field = new_props[prop_name].keys()[0]
map_coll = new_props[prop_name][wrapper_field]
else:
map_coll = new_props[prop_name]
map_key_name = obj_class.prop_map_field_key_names[prop_name]
for map_elem in map_coll:
map_key = map_elem[map_key_name]
self._set_in_prop_map(bch, obj_uuid,
prop_name, map_elem, map_key)
else:
self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
if not uuid_batch:
bch.send()
return (True, '')
# end object_update
def object_list(self, obj_type, parent_uuids=None, back_ref_uuids=None,
obj_uuids=None, count=False, filters=None):
obj_class = self._get_resource_class(obj_type)
children_fq_names_uuids = []
def filter_rows(coll_infos, filters=None):
if not coll_infos or not filters:
return coll_infos
filtered_infos = {}
columns = ['prop:%s' % filter_key for filter_key in filters]
rows = self.multiget(self._OBJ_UUID_CF_NAME,
coll_infos.keys(),
columns=columns)
for obj_uuid, properties in rows.items():
# give chance for zk heartbeat/ping
gevent.sleep(0)
full_match = True
for filter_key, filter_values in filters.items():
property = 'prop:%s' % filter_key
if (property not in properties or
properties[property] not in filter_values):
full_match=False
break
if full_match:
filtered_infos[obj_uuid] = coll_infos[obj_uuid]
return filtered_infos
# end filter_rows
def get_fq_name_uuid_list(obj_uuids):
ret_list = []
for obj_uuid in obj_uuids:
try:
if obj_type != self.uuid_to_obj_type(obj_uuid):
continue
obj_fq_name = self.uuid_to_fq_name(obj_uuid)
ret_list.append((obj_fq_name, obj_uuid))
except NoIdError:
pass
return ret_list
# end get_fq_name_uuid_list
if parent_uuids:
# go from parent to child
obj_rows = self.multiget(self._OBJ_UUID_CF_NAME,
parent_uuids,
start='children:%s:' % (obj_type),
finish='children:%s;' % (obj_type),
timestamp=True)
def filter_rows_parent_anchor(sort=False):
# flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
all_cols = [cols for obj_key in obj_rows.keys()
for cols in obj_rows[obj_key].items()]
all_child_infos = {}
for col_name, col_val_ts in all_cols:
# give chance for zk heartbeat/ping
gevent.sleep(0)
child_uuid = col_name.split(':')[2]
if obj_uuids and child_uuid not in obj_uuids:
continue
all_child_infos[child_uuid] = {'uuid': child_uuid,
'tstamp': col_val_ts[1]}
filt_child_infos = filter_rows(all_child_infos, filters)
if not sort:
ret_child_infos = filt_child_infos.values()
else:
ret_child_infos = sorted(filt_child_infos.values(),
key=itemgetter('tstamp'))
return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
# end filter_rows_parent_anchor
children_fq_names_uuids.extend(filter_rows_parent_anchor(sort=True))
if back_ref_uuids:
# go from anchor to backrefs
col_start = 'backref:%s:' %(obj_type)
col_fin = 'backref:%s;' %(obj_type)
obj_rows = self.multiget(self._OBJ_UUID_CF_NAME,
back_ref_uuids,
start='backref:%s:' % (obj_type),
finish='backref:%s;' % (obj_type),
timestamp=True)
def filter_rows_backref_anchor():
# flatten to [('backref:<obj-type>:<uuid>', (<val>,<ts>), *]
all_cols = [cols for obj_key in obj_rows.keys()
for cols in obj_rows[obj_key].items()]
all_backref_infos = {}
for col_name, col_val_ts in all_cols:
# give chance for zk heartbeat/ping
gevent.sleep(0)
backref_uuid = col_name.split(':')[2]
if obj_uuids and backref_uuid not in obj_uuids:
continue
all_backref_infos[backref_uuid] = \
{'uuid': backref_uuid, 'tstamp': col_val_ts[1]}
filt_backref_infos = filter_rows(all_backref_infos, filters)
return get_fq_name_uuid_list(r['uuid'] for r in
filt_backref_infos.values())
# end filter_rows_backref_anchor
children_fq_names_uuids.extend(filter_rows_backref_anchor())
if not parent_uuids and not back_ref_uuids:
if obj_uuids:
# exact objects specified
def filter_rows_object_list():
all_obj_infos = {}
for obj_uuid in obj_uuids:
all_obj_infos[obj_uuid] = None
filt_obj_infos = filter_rows(all_obj_infos, filters)
return get_fq_name_uuid_list(filt_obj_infos.keys())
# end filter_rows_object_list
children_fq_names_uuids.extend(filter_rows_object_list())
else: # grab all resources of this type
obj_fq_name_cf = self._obj_fq_name_cf
cols = obj_fq_name_cf.xget('%s' %(obj_type))
def filter_rows_no_anchor():
all_obj_infos = {}
for col_name, _ in cols:
# give chance for zk heartbeat/ping
gevent.sleep(0)
col_name_arr = utils.decode_string(col_name).split(':')
obj_uuid = col_name_arr[-1]
all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
filt_obj_infos = filter_rows(all_obj_infos, filters)
return filt_obj_infos.values()
# end filter_rows_no_anchor
children_fq_names_uuids.extend(filter_rows_no_anchor())
if count:
return (True, len(children_fq_names_uuids))
return (True, children_fq_names_uuids)
# end object_list
def object_delete(self, obj_type, obj_uuid):
obj_class = self._get_resource_class(obj_type)
obj_uuid_cf = self._obj_uuid_cf
fq_name = self.get_one_col(self._OBJ_UUID_CF_NAME,
obj_uuid, 'fq_name')
bch = obj_uuid_cf.batch()
# unlink from parent
col_start = 'parent:'
col_fin = 'parent;'
col_name_iter = obj_uuid_cf.xget(
obj_uuid, column_start=col_start, column_finish=col_fin)
for (col_name, col_val) in col_name_iter:
(_, parent_type, parent_uuid) = col_name.split(':')
self._delete_child(
bch, parent_type, parent_uuid, obj_type, obj_uuid)
# remove refs
col_start = 'ref:'
col_fin = 'ref;'
col_name_iter = obj_uuid_cf.xget(
obj_uuid, column_start=col_start, column_finish=col_fin)
for (col_name, col_val) in col_name_iter:
(_, ref_type, ref_uuid) = col_name.split(':')
self._delete_ref(bch, obj_type, obj_uuid, ref_type, ref_uuid)
# remove link from relaxed back refs
col_start = 'relaxbackref:'
col_fin = 'relaxbackref;'
col_name_iter = obj_uuid_cf.xget(
obj_uuid, column_start=col_start, column_finish=col_fin)
for (col_name, col_val) in col_name_iter:
(_, backref_uuid) = col_name.split(':')
self._delete_ref(bch, None, backref_uuid, obj_type, obj_uuid)
bch.remove(obj_uuid)
bch.send()
# Update fqname table
fq_name_str = ':'.join(fq_name)
fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
self._obj_fq_name_cf.remove(obj_type, columns = [fq_name_col])
return (True, '')
# end object_delete
def prop_collection_read(self, obj_type, obj_uuid, obj_fields, position):
obj_class = self._get_resource_class(obj_type)
result = {}
# always read-in id-perms for upper-layers to do rbac/visibility
result['id_perms'] = self.get_one_col(self._OBJ_UUID_CF_NAME,
obj_uuid, 'prop:id_perms')
# read in prop-list or prop-map fields
for field in obj_fields:
if field in obj_class.prop_list_fields:
prop_pfx = 'propl'
elif field in obj_class.prop_map_fields:
prop_pfx = 'propm'
else:
continue
if position:
col_start = '%s:%s:%s' %(prop_pfx, field, position)
col_end = '%s:%s:%s' %(prop_pfx, field, position)
else:
col_start = '%s:%s:' %(prop_pfx, field)
col_end = '%s:%s;' %(prop_pfx, field)
obj_cols = self._obj_uuid_cf.xget(obj_uuid,
column_start=col_start,
column_finish=col_end)
result[field] = []
for name, value in obj_cols:
# tuple of col_value, position. result is already sorted
# lexically by position (necessary only for list property)
result[field].append((json.loads(value), name.split(':')[-1]))
return (True, result)
# end prop_collection_read
def cache_uuid_to_fq_name_add(self, id, fq_name, obj_type):
self._cache_uuid_to_fq_name[id] = (fq_name, obj_type)
# end cache_uuid_to_fq_name_add
def cache_uuid_to_fq_name_del(self, id):
try:
del self._cache_uuid_to_fq_name[id]
except KeyError:
pass
# end cache_uuid_to_fq_name_del
def uuid_to_fq_name(self, id):
try:
return self._cache_uuid_to_fq_name[id][0]
except KeyError:
obj = self.get(self._OBJ_UUID_CF_NAME, id,
columns=['fq_name', 'type'])
if not obj:
raise NoIdError(id)
fq_name = obj['fq_name']
obj_type = obj['type']
self.cache_uuid_to_fq_name_add(id, fq_name, obj_type)
return fq_name
# end uuid_to_fq_name
def uuid_to_obj_type(self, id):
try:
return self._cache_uuid_to_fq_name[id][1]
except KeyError:
obj = self.get(self._OBJ_UUID_CF_NAME, id,
columns=['fq_name', 'type'])
if not obj:
raise NoIdError(id)
fq_name = obj['fq_name']
obj_type = obj['type']
self.cache_uuid_to_fq_name_add(id, fq_name, obj_type)
return obj_type
# end uuid_to_obj_type
def fq_name_to_uuid(self, obj_type, fq_name):
fq_name_str = utils.encode_string(':'.join(fq_name))
col_infos = self.get(self._OBJ_FQ_NAME_CF_NAME,
obj_type,
start=fq_name_str + ':',
finish=fq_name_str + ';')
if not col_infos:
raise NoIdError('%s %s' % (obj_type, fq_name_str))
if len(col_infos) > 1:
raise VncError('Multi match %s for %s' % (fq_name_str, obj_type))
return col_infos.popitem()[0].split(':')[-1]
# end fq_name_to_uuid
# return all objects shared with a (share_type, share_id)
def get_shared(self, obj_type, share_id = '', share_type = 'global'):
result = []
column = '%s:%s' % (share_type, share_id)
col_infos = self.get(self._OBJ_SHARED_CF_NAME,
obj_type,
start=column + ':',
finish=column + ';')
if not col_infos:
return None
for (col_name, col_val) in col_infos.items():
# ('*:*:f7963198-08a4-4b96-a02e-41cc66593163', u'7')
obj_uuid = col_name.split(':')[-1]
result.append((obj_uuid, col_val))
return result
# share an object 'obj_id' with <share_type:share_id>
# rwx indicate type of access (sharing) allowed
def set_shared(self, obj_type, obj_id, share_id = '', share_type = 'global', rwx = 7):
col_name = '%s:%s:%s' % (share_type, share_id, obj_id)
self._obj_shared_cf.insert(obj_type, {col_name : json.dumps(rwx)})
# delete share of 'obj_id' object with <share_type:share_id>
def del_shared(self, obj_type, obj_id, share_id = '', share_type = 'global'):
col_name = '%s:%s:%s' % (share_type, share_id, obj_id)
self._obj_shared_cf.remove(obj_type, columns=[col_name])
def _read_child(self, result, obj_uuid, child_obj_type, child_uuid,
child_tstamp):
if '%ss' % (child_obj_type) not in result:
result['%ss' % (child_obj_type)] = []
child_res_type = self._get_resource_class(child_obj_type).resource_type
child_info = {}
child_info['to'] = self.uuid_to_fq_name(child_uuid)
child_info['href'] = self._generate_url(child_res_type, child_uuid)
child_info['uuid'] = child_uuid
child_info['tstamp'] = child_tstamp
result['%ss' % (child_obj_type)].append(child_info)
# end _read_child
def _read_ref(self, result, obj_uuid, ref_obj_type, ref_uuid, ref_data_json):
if '%s_refs' % (ref_obj_type) not in result:
result['%s_refs' % (ref_obj_type)] = []
ref_res_type = self._get_resource_class(ref_obj_type).resource_type
ref_data = ref_data_json
ref_info = {}
try:
ref_info['to'] = self.uuid_to_fq_name(ref_uuid)
except NoIdError as e:
ref_info['to'] = ['ERROR']
if ref_data:
try:
ref_info['attr'] = ref_data['attr']
except KeyError:
# TODO remove backward compat old format had attr directly
ref_info['attr'] = ref_data
ref_info['href'] = self._generate_url(ref_res_type, ref_uuid)
ref_info['uuid'] = ref_uuid
result['%s_refs' % (ref_obj_type)].append(ref_info)
# end _read_ref
def _read_back_ref(self, result, obj_uuid, back_ref_obj_type, back_ref_uuid,
back_ref_data_json):
if '%s_back_refs' % (back_ref_obj_type) not in result:
result['%s_back_refs' % (back_ref_obj_type)] = []
back_ref_res_type = self._get_resource_class(back_ref_obj_type).resource_type
back_ref_info = {}
back_ref_info['to'] = self.uuid_to_fq_name(back_ref_uuid)
back_ref_data = back_ref_data_json
if back_ref_data:
try:
back_ref_info['attr'] = back_ref_data['attr']
except KeyError:
# TODO remove backward compat old format had attr directly
back_ref_info['attr'] = back_ref_data
back_ref_info['href'] = self._generate_url(back_ref_res_type, back_ref_uuid)
back_ref_info['uuid'] = back_ref_uuid
result['%s_back_refs' % (back_ref_obj_type)].append(back_ref_info)
# end _read_back_ref
| <filename>src/config/common/vnc_cassandra.py
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
import pycassa
from pycassa import ColumnFamily
from pycassa.batch import Mutator
from pycassa.system_manager import SystemManager, SIMPLE_STRATEGY
from pycassa.pool import AllServersUnavailable, MaximumRetryException
import gevent
from vnc_api import vnc_api
from exceptions import NoIdError, DatabaseUnavailableError, VncError
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus
from pysandesh.gen_py.process_info.ttypes import ConnectionType as ConnType
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from sandesh_common.vns.constants import API_SERVER_KEYSPACE_NAME, \
CASSANDRA_DEFAULT_GC_GRACE_SECONDS
import time
from cfgm_common import jsonutils as json
import utils
import datetime
import re
from operator import itemgetter
import itertools
import sys
from collections import Mapping
def merge_dict(orig_dict, new_dict):
for key, value in new_dict.iteritems():
if key not in orig_dict:
orig_dict[key] = new_dict[key]
elif isinstance(value, Mapping):
orig_dict[key] = merge_dict(orig_dict.get(key, {}), value)
elif isinstance(value, list):
orig_dict[key] = orig_dict[key].append(value)
else:
orig_dict[key] = new_dict[key]
return orig_dict
class VncCassandraClient(object):
# Name to ID mapping keyspace + tables
_UUID_KEYSPACE_NAME = API_SERVER_KEYSPACE_NAME
# TODO describe layout
_OBJ_UUID_CF_NAME = 'obj_uuid_table'
# TODO describe layout
_OBJ_FQ_NAME_CF_NAME = 'obj_fq_name_table'
# key: object type, column ($type:$id, uuid)
# where type is entity object is being shared with. Project initially
_OBJ_SHARED_CF_NAME = 'obj_shared_table'
_UUID_KEYSPACE = {
_UUID_KEYSPACE_NAME: {
_OBJ_UUID_CF_NAME: {
'cf_args': {
'autopack_names': False,
'autopack_values': False,
},
},
_OBJ_FQ_NAME_CF_NAME: {
'cf_args': {
'autopack_values': False,
},
},
_OBJ_SHARED_CF_NAME: {}
}
}
_MAX_COL = 10000000
@classmethod
def get_db_info(cls):
db_info = [(cls._UUID_KEYSPACE_NAME, [cls._OBJ_UUID_CF_NAME,
cls._OBJ_FQ_NAME_CF_NAME,
cls._OBJ_SHARED_CF_NAME])]
return db_info
# end get_db_info
@staticmethod
def _is_parent(column_name):
return column_name[:7] == 'parent:'
@staticmethod
def _is_prop(column_name):
return column_name[:5] == 'prop:'
@staticmethod
def _is_prop_list(column_name):
return column_name[:6] == 'propl:'
@staticmethod
def _is_prop_map(column_name):
return column_name[:6] == 'propm:'
@staticmethod
def _is_ref(column_name):
return column_name[:4] == 'ref:'
@staticmethod
def _is_backref(column_name):
return column_name[:8] == 'backref:'
@staticmethod
def _is_children(column_name):
return column_name[:9] == 'children:'
def __init__(self, server_list, db_prefix, rw_keyspaces, ro_keyspaces,
logger, generate_url=None, reset_config=False, credential=None):
self._reset_config = reset_config
self._cache_uuid_to_fq_name = {}
if db_prefix:
self._db_prefix = '%s_' %(db_prefix)
else:
self._db_prefix = ''
self._server_list = server_list
self._num_dbnodes = len(self._server_list)
self._conn_state = ConnectionStatus.INIT
self._logger = logger
self._credential = credential
# if no generate_url is specified, use a dummy function that always
# returns an empty string
self._generate_url = generate_url or (lambda x,y: '')
self._cf_dict = {}
self._ro_keyspaces = ro_keyspaces or {}
self._rw_keyspaces = rw_keyspaces or {}
if ((self._UUID_KEYSPACE_NAME not in self._ro_keyspaces) and
(self._UUID_KEYSPACE_NAME not in self._rw_keyspaces)):
self._ro_keyspaces.update(self._UUID_KEYSPACE)
self._cassandra_init(server_list)
self._cache_uuid_to_fq_name = {}
self._obj_uuid_cf = self._cf_dict[self._OBJ_UUID_CF_NAME]
self._obj_fq_name_cf = self._cf_dict[self._OBJ_FQ_NAME_CF_NAME]
self._obj_shared_cf = self._cf_dict[self._OBJ_SHARED_CF_NAME]
# end __init__
def get_cf(self, cf_name):
return self._cf_dict.get(cf_name)
#end
def add(self, cf_name, key, value):
try:
self.get_cf(cf_name).insert(key, value)
return True
except:
return False
#end
def get(self, cf_name, key, columns=None, start='', finish=''):
result = self.multiget(cf_name,
[key],
columns=columns,
start=start,
finish=finish)
return result.get(key)
def multiget(self, cf_name, keys, columns=None, start='', finish='',
timestamp=False):
_thrift_limit_size = 10000
results = {}
cf = self.get_cf(cf_name)
if not columns or start or finish:
try:
results = cf.multiget(keys,
column_start=start,
column_finish=finish,
include_timestamp=timestamp,
column_count=self._MAX_COL)
except OverflowError:
for key in keys:
rows = dict(cf.xget(key,
column_start=start,
column_finish=finish,
include_timestamp=timestamp))
if rows:
results[key] = rows
if columns:
max_key_range, _ = divmod(_thrift_limit_size, len(columns))
if max_key_range > 1:
for key_chunk in [keys[x:x+max_key_range] for x in
xrange(0, len(keys), max_key_range)]:
rows = cf.multiget(key_chunk,
columns=columns,
include_timestamp=timestamp,
column_count=self._MAX_COL)
merge_dict(results, rows)
elif max_key_range == 0:
for column_chunk in [columns[x:x+(_thrift_limit_size - 1)] for x in
xrange(0, len(columns), _thrift_limit_size - 1)]:
rows = cf.multiget(keys,
columns=column_chunk,
include_timestamp=timestamp,
column_count=self._MAX_COL)
merge_dict(results, rows)
elif max_key_range == 1:
for key in keys:
try:
cols = cf.get(key,
columns=column_chunk,
include_timestamp=timestamp,
column_count=self._MAX_COL)
except pycassa.NotFoundException:
continue
results.setdefault(key, {}).update(cols)
for key in results:
for col, val in results[key].items():
try:
if timestamp:
results[key][col] = (json.loads(val[0]), val[1])
else:
results[key][col] = json.loads(val)
except ValueError as e:
msg = ("Cannot json load the value of cf: %s, key:%s "
"(error: %s). Use it as is: %s" %
(cf_name, key, str(e),
val if not timestamp else val[0]))
self._logger(msg, level=SandeshLevel.SYS_WARN)
results[key][col] = val
return results
def delete(self, cf_name, key):
try:
self.get_cf(cf_name).remove(key)
return True
except:
return False
#end
def get_range(self, cf_name):
try:
return self.get_cf(cf_name).get_range(column_count=100000)
except:
return None
#end
def get_one_col(self, cf_name, key, column):
col = self.multiget(cf_name, [key], columns=[column])
if key not in col:
raise NoIdError(key)
elif len(col[key]) > 1:
raise VncError('Multi match %s for %s' % (column, key))
return col[key][column]
def _create_prop(self, bch, obj_uuid, prop_name, prop_val):
bch.insert(obj_uuid, {'prop:%s' % (prop_name): json.dumps(prop_val)})
# end _create_prop
def _update_prop(self, bch, obj_uuid, prop_name, new_props):
if new_props[prop_name] is None:
bch.remove(obj_uuid, columns=['prop:' + prop_name])
else:
bch.insert(
obj_uuid,
{'prop:' + prop_name: json.dumps(new_props[prop_name])})
# prop has been accounted for, remove so only new ones remain
del new_props[prop_name]
# end _update_prop
def _add_to_prop_list(self, bch, obj_uuid, prop_name,
prop_elem_value, prop_elem_position):
bch.insert(obj_uuid,
{'propl:%s:%s' %(prop_name, prop_elem_position):
json.dumps(prop_elem_value)})
# end _add_to_prop_list
def _delete_from_prop_list(self, bch, obj_uuid, prop_name,
prop_elem_position):
bch.remove(obj_uuid,
columns=['propl:%s:%s' %(prop_name, prop_elem_position)])
# end _delete_from_prop_list
def _set_in_prop_map(self, bch, obj_uuid, prop_name,
prop_elem_value, prop_elem_position):
bch.insert(obj_uuid,
{'propm:%s:%s' %(prop_name, prop_elem_position):
json.dumps(prop_elem_value)})
# end _set_in_prop_map
def _delete_from_prop_map(self, bch, obj_uuid, prop_name,
prop_elem_position):
bch.remove(obj_uuid,
columns=['propm:%s:%s' %(prop_name, prop_elem_position)])
# end _delete_from_prop_map
def _create_child(self, bch, parent_type, parent_uuid,
child_type, child_uuid):
child_col = {'children:%s:%s' %
(child_type, child_uuid): json.dumps(None)}
bch.insert(parent_uuid, child_col)
parent_col = {'parent:%s:%s' %
(parent_type, parent_uuid): json.dumps(None)}
bch.insert(child_uuid, parent_col)
# end _create_child
def _delete_child(self, bch, parent_type, parent_uuid,
child_type, child_uuid):
child_col = {'children:%s:%s' %
(child_type, child_uuid): json.dumps(None)}
bch.remove(parent_uuid, columns=[
'children:%s:%s' % (child_type, child_uuid)])
# end _delete_child
def _create_ref(self, bch, obj_type, obj_uuid, ref_obj_type, ref_uuid,
ref_data):
bch.insert(
obj_uuid, {'ref:%s:%s' %
(ref_obj_type, ref_uuid): json.dumps(ref_data)})
if obj_type == ref_obj_type:
bch.insert(
ref_uuid, {'ref:%s:%s' %
(obj_type, obj_uuid): json.dumps(ref_data)})
else:
bch.insert(
ref_uuid, {'backref:%s:%s' %
(obj_type, obj_uuid): json.dumps(ref_data)})
# end _create_ref
def _update_ref(self, bch, obj_type, obj_uuid, ref_obj_type, old_ref_uuid,
new_ref_infos):
if ref_obj_type not in new_ref_infos:
# update body didn't touch this type, nop
return
if old_ref_uuid not in new_ref_infos[ref_obj_type]:
# remove old ref
bch.remove(obj_uuid, columns=[
'ref:%s:%s' % (ref_obj_type, old_ref_uuid)])
if obj_type == ref_obj_type:
bch.remove(old_ref_uuid, columns=[
'ref:%s:%s' % (obj_type, obj_uuid)])
else:
bch.remove(old_ref_uuid, columns=[
'backref:%s:%s' % (obj_type, obj_uuid)])
else:
# retain old ref with new ref attr
new_ref_data = new_ref_infos[ref_obj_type][old_ref_uuid]
bch.insert(
obj_uuid,
{'ref:%s:%s' %
(ref_obj_type, old_ref_uuid): json.dumps(new_ref_data)})
if obj_type == ref_obj_type:
bch.insert(
old_ref_uuid,
{'ref:%s:%s' %
(obj_type, obj_uuid): json.dumps(new_ref_data)})
else:
bch.insert(
old_ref_uuid,
{'backref:%s:%s' %
(obj_type, obj_uuid): json.dumps(new_ref_data)})
# uuid has been accounted for, remove so only new ones remain
del new_ref_infos[ref_obj_type][old_ref_uuid]
# end _update_ref
def _delete_ref(self, bch, obj_type, obj_uuid, ref_obj_type, ref_uuid):
send = False
if bch is None:
send = True
bch = self._cassandra_db._obj_uuid_cf.batch()
bch.remove(obj_uuid, columns=['ref:%s:%s' % (ref_obj_type, ref_uuid)])
if obj_type == ref_obj_type:
bch.remove(ref_uuid, columns=[
'ref:%s:%s' % (obj_type, obj_uuid)])
else:
bch.remove(ref_uuid, columns=[
'backref:%s:%s' % (obj_type, obj_uuid)])
if send:
bch.send()
# end _delete_ref
def _update_sandesh_status(self, status, msg=''):
ConnectionState.update(conn_type=ConnType.DATABASE,
name='Cassandra', status=status, message=msg,
server_addrs=self._server_list)
def _handle_exceptions(self, func):
def wrapper(*args, **kwargs):
if (sys._getframe(1).f_code.co_name != 'multiget' and
func.__name__ in ['get', 'multiget']):
msg = ("It is not recommended to use 'get' or 'multiget' "
"pycassa methods. It's better to use 'xget' or "
"'get_range' methods due to thrift limitations")
self._logger(msg, level=SandeshLevel.SYS_WARN)
try:
if self._conn_state != ConnectionStatus.UP:
# will set conn_state to UP if successful
self._cassandra_init_conn_pools()
return func(*args, **kwargs)
except (AllServersUnavailable, MaximumRetryException) as e:
if self._conn_state != ConnectionStatus.DOWN:
self._update_sandesh_status(ConnectionStatus.DOWN)
msg = 'Cassandra connection down. Exception in %s' %(
str(func))
self._logger(msg, level=SandeshLevel.SYS_ERR)
self._conn_state = ConnectionStatus.DOWN
raise DatabaseUnavailableError(
'Error, %s: %s' %(str(e), utils.detailed_traceback()))
return wrapper
# end _handle_exceptions
# Helper routines for cassandra
def _cassandra_init(self, server_list):
# 1. Ensure keyspace and schema/CFs exist
# 2. Read in persisted data and publish to ifmap server
self._update_sandesh_status(ConnectionStatus.INIT)
ColumnFamily.get = self._handle_exceptions(ColumnFamily.get)
ColumnFamily.multiget = self._handle_exceptions(ColumnFamily.multiget)
ColumnFamily.xget = self._handle_exceptions(ColumnFamily.xget)
ColumnFamily.get_range = self._handle_exceptions(ColumnFamily.get_range)
ColumnFamily.insert = self._handle_exceptions(ColumnFamily.insert)
ColumnFamily.remove = self._handle_exceptions(ColumnFamily.remove)
Mutator.send = self._handle_exceptions(Mutator.send)
self.sys_mgr = self._cassandra_system_manager()
self.existing_keyspaces = self.sys_mgr.list_keyspaces()
for ks,cf_dict in self._rw_keyspaces.items():
keyspace = '%s%s' %(self._db_prefix, ks)
self._cassandra_ensure_keyspace(keyspace, cf_dict)
for ks,_ in self._ro_keyspaces.items():
keyspace = '%s%s' %(self._db_prefix, ks)
self._cassandra_wait_for_keyspace(keyspace)
self._cassandra_init_conn_pools()
# end _cassandra_init
def _cassandra_system_manager(self):
# Retry till cassandra is up
server_idx = 0
connected = False
while not connected:
try:
cass_server = self._server_list[server_idx]
sys_mgr = SystemManager(cass_server,
credentials=self._credential)
connected = True
except Exception:
# TODO do only for
# thrift.transport.TTransport.TTransportException
server_idx = (server_idx + 1) % self._num_dbnodes
time.sleep(3)
return sys_mgr
# end _cassandra_system_manager
def _cassandra_wait_for_keyspace(self, keyspace):
# Wait for it to be created by another process
while keyspace not in self.existing_keyspaces:
gevent.sleep(1)
self._logger("Waiting for keyspace %s to be created" % keyspace,
level=SandeshLevel.SYS_NOTICE)
self.existing_keyspaces = self.sys_mgr.list_keyspaces()
# end _cassandra_wait_for_keyspace
def _cassandra_ensure_keyspace(self, keyspace_name, cf_dict):
if self._reset_config and keyspace_name in self.existing_keyspaces:
try:
self.sys_mgr.drop_keyspace(keyspace_name)
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
self._logger(str(e), level=SandeshLevel.SYS_NOTICE)
if (self._reset_config or keyspace_name not in self.existing_keyspaces):
try:
self.sys_mgr.create_keyspace(keyspace_name, SIMPLE_STRATEGY,
{'replication_factor': str(self._num_dbnodes)})
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
self._logger("Warning! " + str(e), level=SandeshLevel.SYS_WARN)
gc_grace_sec = CASSANDRA_DEFAULT_GC_GRACE_SECONDS
for cf_name in cf_dict:
create_cf_kwargs = cf_dict[cf_name].get('create_cf_args', {})
try:
self.sys_mgr.create_column_family(
keyspace_name, cf_name,
gc_grace_seconds=gc_grace_sec,
default_validation_class='UTF8Type',
**create_cf_kwargs)
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
self._logger("Info! " + str(e), level=SandeshLevel.SYS_INFO)
self.sys_mgr.alter_column_family(keyspace_name, cf_name,
gc_grace_seconds=gc_grace_sec,
default_validation_class='UTF8Type',
**create_cf_kwargs)
# end _cassandra_ensure_keyspace
def _cassandra_init_conn_pools(self):
for ks,cf_dict in itertools.chain(self._rw_keyspaces.items(),
self._ro_keyspaces.items()):
keyspace = '%s%s' %(self._db_prefix, ks)
pool = pycassa.ConnectionPool(
keyspace, self._server_list, max_overflow=-1, use_threadlocal=True,
prefill=True, pool_size=20, pool_timeout=120,
max_retries=30, timeout=5, credentials=self._credential)
rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM
wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM
for cf_name in cf_dict:
cf_kwargs = cf_dict[cf_name].get('cf_args', {})
self._cf_dict[cf_name] = ColumnFamily(
pool, cf_name, read_consistency_level=rd_consistency,
write_consistency_level=wr_consistency,
dict_class=dict,
**cf_kwargs)
ConnectionState.update(conn_type = ConnType.DATABASE,
name = 'Cassandra', status = ConnectionStatus.UP, message = '',
server_addrs = self._server_list)
self._conn_state = ConnectionStatus.UP
msg = 'Cassandra connection ESTABLISHED'
self._logger(msg, level=SandeshLevel.SYS_NOTICE)
# end _cassandra_init_conn_pools
def _get_resource_class(self, obj_type):
if hasattr(self, '_db_client_mgr'):
return self._db_client_mgr.get_resource_class(obj_type)
cls_name = '%s' % (utils.CamelCase(obj_type))
return getattr(vnc_api, cls_name)
# end _get_resource_class
def _get_xsd_class(self, xsd_type):
return getattr(vnc_api, xsd_type)
# end _get_xsd_class
def object_create(self, obj_type, obj_id, obj_dict,
uuid_batch=None, fqname_batch=None):
obj_class = self._get_resource_class(obj_type)
if uuid_batch:
bch = uuid_batch
else:
# Gather column values for obj and updates to backrefs
# in a batch and write it at the end
bch = self._obj_uuid_cf.batch()
obj_cols = {}
obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
obj_cols['type'] = json.dumps(obj_type)
if 'parent_type' in obj_dict:
# non config-root child
parent_type = obj_dict['parent_type']
if parent_type not in obj_class.parent_types:
return False, (400, 'Invalid parent type: %s' % parent_type)
parent_object_type = \
self._get_resource_class(parent_type).object_type
parent_fq_name = obj_dict['fq_name'][:-1]
obj_cols['parent_type'] = json.dumps(parent_type)
parent_uuid = self.fq_name_to_uuid(parent_object_type,
parent_fq_name)
self._create_child(bch, parent_object_type, parent_uuid, obj_type,
obj_id)
# Properties
for prop_field in obj_class.prop_fields:
field = obj_dict.get(prop_field)
# Specifically checking for None
if field is None:
continue
if prop_field == 'id_perms':
field['created'] = datetime.datetime.utcnow().isoformat()
field['last_modified'] = field['created']
if prop_field in obj_class.prop_list_fields:
# store list elements in list order
# iterate on wrapped element or directly or prop field
if obj_class.prop_list_field_has_wrappers[prop_field]:
wrapper_field = field.keys()[0]
list_coll = field[wrapper_field]
else:
list_coll = field
for i in range(len(list_coll)):
self._add_to_prop_list(
bch, obj_id, prop_field, list_coll[i], str(i))
elif prop_field in obj_class.prop_map_fields:
# iterate on wrapped element or directly or prop field
if obj_class.prop_map_field_has_wrappers[prop_field]:
wrapper_field = field.keys()[0]
map_coll = field[wrapper_field]
else:
map_coll = field
map_key_name = obj_class.prop_map_field_key_names[prop_field]
for map_elem in map_coll:
map_key = map_elem[map_key_name]
self._set_in_prop_map(
bch, obj_id, prop_field, map_elem, map_key)
else:
self._create_prop(bch, obj_id, prop_field, field)
# References
# e.g. ref_field = 'network_ipam_refs'
# ref_res_type = 'network-ipam'
# ref_link_type = 'VnSubnetsType'
# is_weakref = False
for ref_field in obj_class.ref_fields:
ref_fld_types_list = list(obj_class.ref_field_types[ref_field])
ref_res_type = ref_fld_types_list[0]
ref_link_type = ref_fld_types_list[1]
ref_obj_type = self._get_resource_class(ref_res_type).object_type
refs = obj_dict.get(ref_field, [])
for ref in refs:
ref_uuid = self.fq_name_to_uuid(ref_obj_type, ref['to'])
ref_attr = ref.get('attr')
ref_data = {'attr': ref_attr, 'is_weakref': False}
self._create_ref(bch, obj_type, obj_id, ref_obj_type, ref_uuid,
ref_data)
bch.insert(obj_id, obj_cols)
if not uuid_batch:
bch.send()
# Update fqname table
fq_name_str = ':'.join(obj_dict['fq_name'])
fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_id:
json.dumps(None)}
if fqname_batch:
fqname_batch.insert(obj_type, fq_name_cols)
else:
self._obj_fq_name_cf.insert(obj_type, fq_name_cols)
return (True, '')
# end object_create
def object_read(self, obj_type, obj_uuids, field_names=None):
if not obj_uuids:
return (True, [])
# if field_names=None, all fields will be read/returned
obj_class = self._get_resource_class(obj_type)
ref_fields = obj_class.ref_fields
backref_fields = obj_class.backref_fields
children_fields = obj_class.children_fields
list_fields = obj_class.prop_list_fields
map_fields = obj_class.prop_map_fields
prop_fields = obj_class.prop_fields - (list_fields | map_fields)
# optimize for common case of reading non-backref, non-children fields
# ignoring columns starting from 'b' and 'c' - significant performance
# impact in scaled setting. e.g. read of project
obj_rows = {}
if (field_names is None or
set(field_names) & (backref_fields | children_fields)):
# atleast one backref/children field is needed
obj_rows = self.multiget(self._OBJ_UUID_CF_NAME,
obj_uuids,
timestamp=True)
elif not set(field_names) & ref_fields:
# specific props have been asked fetch exactly those
columns = set(['type', 'fq_name', 'parent_type'])
for fname in set(field_names) & prop_fields:
columns.add('prop:' + fname)
obj_rows = self.multiget(self._OBJ_UUID_CF_NAME,
obj_uuids,
columns=list(columns),
start='parent:',
finish='parent;',
timestamp=True)
for fname in set(field_names) & list_fields:
merge_dict(obj_rows,
self.multiget(self._OBJ_UUID_CF_NAME,
obj_uuids,
start='propl:%s:' % fname,
finish='propl:%s;' % fname,
timestamp=True))
for fname in set(field_names) & map_fields:
merge_dict(obj_rows,
self.multiget(self._OBJ_UUID_CF_NAME,
obj_uuids,
start='propm:%s:' % fname,
finish='propm:%s;' % fname,
timestamp=True))
else:
# ignore reading backref + children columns
obj_rows = self.multiget(self._OBJ_UUID_CF_NAME,
obj_uuids,
start='d',
timestamp=True)
if not obj_rows:
if len(obj_uuids) == 1:
raise NoIdError(obj_uuids[0])
else:
return (True, [])
results = []
for obj_uuid, obj_cols in obj_rows.items():
if obj_type != obj_cols.pop('type')[0]:
continue
result = {}
result['uuid'] = obj_uuid
result['fq_name'] = obj_cols.pop('fq_name')[0]
for col_name in obj_cols.keys():
if self._is_parent(col_name):
# non config-root child
(_, _, parent_uuid) = col_name.split(':')
parent_res_type = obj_cols['parent_type'][0]
result['parent_type'] = parent_res_type
try:
result['parent_uuid'] = parent_uuid
result['parent_href'] = self._generate_url(parent_res_type,
parent_uuid)
except NoIdError:
err_msg = 'Unknown uuid for parent ' + result['fq_name'][-2]
return (False, err_msg)
continue
if self._is_prop(col_name):
(_, prop_name) = col_name.split(':')
if ((prop_name not in prop_fields) or
(field_names and prop_name not in field_names)):
continue
result[prop_name] = obj_cols[col_name][0]
continue
if self._is_prop_list(col_name):
(_, prop_name, prop_elem_position) = col_name.split(':')
if field_names and prop_name not in field_names:
continue
if obj_class.prop_list_field_has_wrappers[prop_name]:
prop_field_types = obj_class.prop_field_types[prop_name]
wrapper_type = prop_field_types['xsd_type']
wrapper_cls = self._get_xsd_class(wrapper_type)
wrapper_field = wrapper_cls.attr_fields[0]
if prop_name not in result:
result[prop_name] = {wrapper_field: []}
result[prop_name][wrapper_field].append(
(obj_cols[col_name][0], prop_elem_position))
else:
if prop_name not in result:
result[prop_name] = []
result[prop_name].append((obj_cols[col_name][0],
prop_elem_position))
continue
if self._is_prop_map(col_name):
(_, prop_name, _) = col_name.split(':')
if field_names and prop_name not in field_names:
continue
if obj_class.prop_map_field_has_wrappers[prop_name]:
prop_field_types = obj_class.prop_field_types[prop_name]
wrapper_type = prop_field_types['xsd_type']
wrapper_cls = self._get_xsd_class(wrapper_type)
wrapper_field = wrapper_cls.attr_fields[0]
if prop_name not in result:
result[prop_name] = {wrapper_field: []}
result[prop_name][wrapper_field].append(
obj_cols[col_name][0])
else:
if prop_name not in result:
result[prop_name] = []
result[prop_name].append(obj_cols[col_name][0])
continue
if self._is_children(col_name):
(_, child_type, child_uuid) = col_name.split(':')
if field_names and '%ss' %(child_type) not in field_names:
continue
if child_type+'s' not in children_fields:
continue
child_tstamp = obj_cols[col_name][1]
try:
self._read_child(result, obj_uuid, child_type,
child_uuid, child_tstamp)
except NoIdError:
continue
continue
if self._is_ref(col_name):
(_, ref_type, ref_uuid) = col_name.split(':')
if ((ref_type+'_refs' not in ref_fields) or
(field_names and ref_type + '_refs' not in field_names)):
continue
self._read_ref(result, obj_uuid, ref_type, ref_uuid,
obj_cols[col_name][0])
continue
if self._is_backref(col_name):
(_, back_ref_type, back_ref_uuid) = col_name.split(':')
if back_ref_type+'_back_refs' not in backref_fields:
continue
if (field_names and
'%s_back_refs' %(back_ref_type) not in field_names):
continue
try:
self._read_back_ref(result, obj_uuid, back_ref_type,
back_ref_uuid, obj_cols[col_name][0])
except NoIdError:
continue
continue
# for all column names
# sort children by creation time
for child_field in obj_class.children_fields:
if child_field not in result:
continue
sorted_children = sorted(result[child_field],
key = itemgetter('tstamp'))
# re-write result's children without timestamp
result[child_field] = sorted_children
[child.pop('tstamp') for child in result[child_field]]
# for all children
# Ordering property lists by position attribute
for prop_name in (obj_class.prop_list_fields & set(result.keys())):
if isinstance(result[prop_name], list):
result[prop_name] = [el[0] for el in
sorted(result[prop_name],
key=itemgetter(1))]
elif isinstance(result[prop_name], dict):
wrapper, unsorted_list = result[prop_name].popitem()
result[prop_name][wrapper] = [el[0] for el in
sorted(unsorted_list,
key=itemgetter(1))]
results.append(result)
# end for all rows
return (True, results)
# end object_read
def object_count_children(self, obj_type, obj_uuid, child_type):
if child_type is None:
return (False, '')
obj_class = self._get_resource_class(obj_type)
obj_uuid_cf = self._obj_uuid_cf
if child_type not in obj_class.children_fields:
return (False,
'%s is not a child type of %s' %(child_type, obj_type))
col_start = 'children:'+child_type[:-1]+':'
col_finish = 'children:'+child_type[:-1]+';'
num_children = obj_uuid_cf.get_count(obj_uuid,
column_start=col_start,
column_finish=col_finish)
return (True, num_children)
# end object_count_children
def update_last_modified(self, bch, obj_uuid, id_perms=None):
if id_perms is None:
id_perms = self.get_one_col(self._OBJ_UUID_CF_NAME,
obj_uuid,
'prop:id_perms')
id_perms['last_modified'] = datetime.datetime.utcnow().isoformat()
self._update_prop(bch, obj_uuid, 'id_perms', {'id_perms': id_perms})
# end update_last_modified
def object_update(self, obj_type, obj_uuid, new_obj_dict,
uuid_batch=None):
obj_class = self._get_resource_class(obj_type)
# Grab ref-uuids and properties in new version
new_ref_infos = {}
# Properties
new_props = {}
for prop_field in obj_class.prop_fields:
if prop_field in new_obj_dict:
new_props[prop_field] = new_obj_dict[prop_field]
# References
# e.g. ref_field = 'network_ipam_refs'
# ref_type = 'network-ipam'
# ref_link_type = 'VnSubnetsType'
# is_weakref = False
for ref_field in obj_class.ref_fields:
ref_fld_types_list = list(obj_class.ref_field_types[ref_field])
ref_res_type = ref_fld_types_list[0]
ref_link_type = ref_fld_types_list[1]
is_weakref = ref_fld_types_list[2]
ref_obj_type = self._get_resource_class(ref_res_type).object_type
if ref_field in new_obj_dict:
new_refs = new_obj_dict[ref_field]
new_ref_infos[ref_obj_type] = {}
for new_ref in new_refs or []:
new_ref_uuid = self.fq_name_to_uuid(ref_obj_type,
new_ref['to'])
new_ref_attr = new_ref.get('attr')
new_ref_data = {'attr': new_ref_attr,
'is_weakref': is_weakref}
new_ref_infos[ref_obj_type][new_ref_uuid] = new_ref_data
# Gather column values for obj and updates to backrefs
# in a batch and write it at the end
obj_uuid_cf = self._obj_uuid_cf
if uuid_batch:
bch = uuid_batch
else:
bch = obj_uuid_cf.batch()
for col_name, col_value in obj_uuid_cf.xget(obj_uuid):
if self._is_prop(col_name):
(_, prop_name) = col_name.split(':')
if prop_name == 'id_perms':
# id-perms always has to be updated for last-mod timestamp
# get it from request dict(or from db if not in request dict)
new_id_perms = new_obj_dict.get(
prop_name, json.loads(col_value))
self.update_last_modified(bch, obj_uuid, new_id_perms)
elif prop_name in new_obj_dict:
self._update_prop(bch, obj_uuid, prop_name, new_props)
if self._is_prop_list(col_name):
(_, prop_name, prop_elem_position) = col_name.split(':')
if prop_name in new_props:
# delete all old values of prop list
self._delete_from_prop_list(
bch, obj_uuid, prop_name, prop_elem_position)
if self._is_prop_map(col_name):
(_, prop_name, prop_elem_position) = col_name.split(':')
if prop_name in new_props:
# delete all old values of prop list
self._delete_from_prop_map(
bch, obj_uuid, prop_name, prop_elem_position)
if self._is_ref(col_name):
(_, ref_type, ref_uuid) = col_name.split(':')
self._update_ref(bch, obj_type, obj_uuid, ref_type, ref_uuid,
new_ref_infos)
# for all column names
# create new refs
for ref_type in new_ref_infos.keys():
for ref_uuid in new_ref_infos[ref_type].keys():
ref_data = new_ref_infos[ref_type][ref_uuid]
self._create_ref(bch, obj_type, obj_uuid, ref_type, ref_uuid,
ref_data)
# create new props
for prop_name in new_props.keys():
if prop_name in obj_class.prop_list_fields:
# store list elements in list order
# iterate on wrapped element or directly on prop field
# for wrapped lists, store without the wrapper. regenerate
# wrapper on read
if (obj_class.prop_list_field_has_wrappers[prop_name] and
new_props[prop_name]):
wrapper_field = new_props[prop_name].keys()[0]
list_coll = new_props[prop_name][wrapper_field]
else:
list_coll = new_props[prop_name]
for i in range(len(list_coll)):
self._add_to_prop_list(bch, obj_uuid,
prop_name, list_coll[i], str(i))
elif prop_name in obj_class.prop_map_fields:
# store map elements in key order
# iterate on wrapped element or directly on prop field
# for wrapped lists, store without the wrapper. regenerate
# wrapper on read
if (obj_class.prop_map_field_has_wrappers[prop_name] and
new_props[prop_name]):
wrapper_field = new_props[prop_name].keys()[0]
map_coll = new_props[prop_name][wrapper_field]
else:
map_coll = new_props[prop_name]
map_key_name = obj_class.prop_map_field_key_names[prop_name]
for map_elem in map_coll:
map_key = map_elem[map_key_name]
self._set_in_prop_map(bch, obj_uuid,
prop_name, map_elem, map_key)
else:
self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
if not uuid_batch:
bch.send()
return (True, '')
# end object_update
def object_list(self, obj_type, parent_uuids=None, back_ref_uuids=None,
obj_uuids=None, count=False, filters=None):
obj_class = self._get_resource_class(obj_type)
children_fq_names_uuids = []
def filter_rows(coll_infos, filters=None):
if not coll_infos or not filters:
return coll_infos
filtered_infos = {}
columns = ['prop:%s' % filter_key for filter_key in filters]
rows = self.multiget(self._OBJ_UUID_CF_NAME,
coll_infos.keys(),
columns=columns)
for obj_uuid, properties in rows.items():
# give chance for zk heartbeat/ping
gevent.sleep(0)
full_match = True
for filter_key, filter_values in filters.items():
property = 'prop:%s' % filter_key
if (property not in properties or
properties[property] not in filter_values):
full_match=False
break
if full_match:
filtered_infos[obj_uuid] = coll_infos[obj_uuid]
return filtered_infos
# end filter_rows
def get_fq_name_uuid_list(obj_uuids):
ret_list = []
for obj_uuid in obj_uuids:
try:
if obj_type != self.uuid_to_obj_type(obj_uuid):
continue
obj_fq_name = self.uuid_to_fq_name(obj_uuid)
ret_list.append((obj_fq_name, obj_uuid))
except NoIdError:
pass
return ret_list
# end get_fq_name_uuid_list
if parent_uuids:
# go from parent to child
obj_rows = self.multiget(self._OBJ_UUID_CF_NAME,
parent_uuids,
start='children:%s:' % (obj_type),
finish='children:%s;' % (obj_type),
timestamp=True)
def filter_rows_parent_anchor(sort=False):
# flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
all_cols = [cols for obj_key in obj_rows.keys()
for cols in obj_rows[obj_key].items()]
all_child_infos = {}
for col_name, col_val_ts in all_cols:
# give chance for zk heartbeat/ping
gevent.sleep(0)
child_uuid = col_name.split(':')[2]
if obj_uuids and child_uuid not in obj_uuids:
continue
all_child_infos[child_uuid] = {'uuid': child_uuid,
'tstamp': col_val_ts[1]}
filt_child_infos = filter_rows(all_child_infos, filters)
if not sort:
ret_child_infos = filt_child_infos.values()
else:
ret_child_infos = sorted(filt_child_infos.values(),
key=itemgetter('tstamp'))
return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
# end filter_rows_parent_anchor
children_fq_names_uuids.extend(filter_rows_parent_anchor(sort=True))
if back_ref_uuids:
# go from anchor to backrefs
col_start = 'backref:%s:' %(obj_type)
col_fin = 'backref:%s;' %(obj_type)
obj_rows = self.multiget(self._OBJ_UUID_CF_NAME,
back_ref_uuids,
start='backref:%s:' % (obj_type),
finish='backref:%s;' % (obj_type),
timestamp=True)
def filter_rows_backref_anchor():
# flatten to [('backref:<obj-type>:<uuid>', (<val>,<ts>), *]
all_cols = [cols for obj_key in obj_rows.keys()
for cols in obj_rows[obj_key].items()]
all_backref_infos = {}
for col_name, col_val_ts in all_cols:
# give chance for zk heartbeat/ping
gevent.sleep(0)
backref_uuid = col_name.split(':')[2]
if obj_uuids and backref_uuid not in obj_uuids:
continue
all_backref_infos[backref_uuid] = \
{'uuid': backref_uuid, 'tstamp': col_val_ts[1]}
filt_backref_infos = filter_rows(all_backref_infos, filters)
return get_fq_name_uuid_list(r['uuid'] for r in
filt_backref_infos.values())
# end filter_rows_backref_anchor
children_fq_names_uuids.extend(filter_rows_backref_anchor())
if not parent_uuids and not back_ref_uuids:
if obj_uuids:
# exact objects specified
def filter_rows_object_list():
all_obj_infos = {}
for obj_uuid in obj_uuids:
all_obj_infos[obj_uuid] = None
filt_obj_infos = filter_rows(all_obj_infos, filters)
return get_fq_name_uuid_list(filt_obj_infos.keys())
# end filter_rows_object_list
children_fq_names_uuids.extend(filter_rows_object_list())
else: # grab all resources of this type
obj_fq_name_cf = self._obj_fq_name_cf
cols = obj_fq_name_cf.xget('%s' %(obj_type))
def filter_rows_no_anchor():
all_obj_infos = {}
for col_name, _ in cols:
# give chance for zk heartbeat/ping
gevent.sleep(0)
col_name_arr = utils.decode_string(col_name).split(':')
obj_uuid = col_name_arr[-1]
all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
filt_obj_infos = filter_rows(all_obj_infos, filters)
return filt_obj_infos.values()
# end filter_rows_no_anchor
children_fq_names_uuids.extend(filter_rows_no_anchor())
if count:
return (True, len(children_fq_names_uuids))
return (True, children_fq_names_uuids)
# end object_list
def object_delete(self, obj_type, obj_uuid):
obj_class = self._get_resource_class(obj_type)
obj_uuid_cf = self._obj_uuid_cf
fq_name = self.get_one_col(self._OBJ_UUID_CF_NAME,
obj_uuid, 'fq_name')
bch = obj_uuid_cf.batch()
# unlink from parent
col_start = 'parent:'
col_fin = 'parent;'
col_name_iter = obj_uuid_cf.xget(
obj_uuid, column_start=col_start, column_finish=col_fin)
for (col_name, col_val) in col_name_iter:
(_, parent_type, parent_uuid) = col_name.split(':')
self._delete_child(
bch, parent_type, parent_uuid, obj_type, obj_uuid)
# remove refs
col_start = 'ref:'
col_fin = 'ref;'
col_name_iter = obj_uuid_cf.xget(
obj_uuid, column_start=col_start, column_finish=col_fin)
for (col_name, col_val) in col_name_iter:
(_, ref_type, ref_uuid) = col_name.split(':')
self._delete_ref(bch, obj_type, obj_uuid, ref_type, ref_uuid)
# remove link from relaxed back refs
col_start = 'relaxbackref:'
col_fin = 'relaxbackref;'
col_name_iter = obj_uuid_cf.xget(
obj_uuid, column_start=col_start, column_finish=col_fin)
for (col_name, col_val) in col_name_iter:
(_, backref_uuid) = col_name.split(':')
self._delete_ref(bch, None, backref_uuid, obj_type, obj_uuid)
bch.remove(obj_uuid)
bch.send()
# Update fqname table
fq_name_str = ':'.join(fq_name)
fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
self._obj_fq_name_cf.remove(obj_type, columns = [fq_name_col])
return (True, '')
# end object_delete
def prop_collection_read(self, obj_type, obj_uuid, obj_fields, position):
obj_class = self._get_resource_class(obj_type)
result = {}
# always read-in id-perms for upper-layers to do rbac/visibility
result['id_perms'] = self.get_one_col(self._OBJ_UUID_CF_NAME,
obj_uuid, 'prop:id_perms')
# read in prop-list or prop-map fields
for field in obj_fields:
if field in obj_class.prop_list_fields:
prop_pfx = 'propl'
elif field in obj_class.prop_map_fields:
prop_pfx = 'propm'
else:
continue
if position:
col_start = '%s:%s:%s' %(prop_pfx, field, position)
col_end = '%s:%s:%s' %(prop_pfx, field, position)
else:
col_start = '%s:%s:' %(prop_pfx, field)
col_end = '%s:%s;' %(prop_pfx, field)
obj_cols = self._obj_uuid_cf.xget(obj_uuid,
column_start=col_start,
column_finish=col_end)
result[field] = []
for name, value in obj_cols:
# tuple of col_value, position. result is already sorted
# lexically by position (necessary only for list property)
result[field].append((json.loads(value), name.split(':')[-1]))
return (True, result)
# end prop_collection_read
def cache_uuid_to_fq_name_add(self, id, fq_name, obj_type):
self._cache_uuid_to_fq_name[id] = (fq_name, obj_type)
# end cache_uuid_to_fq_name_add
def cache_uuid_to_fq_name_del(self, id):
try:
del self._cache_uuid_to_fq_name[id]
except KeyError:
pass
# end cache_uuid_to_fq_name_del
def uuid_to_fq_name(self, id):
try:
return self._cache_uuid_to_fq_name[id][0]
except KeyError:
obj = self.get(self._OBJ_UUID_CF_NAME, id,
columns=['fq_name', 'type'])
if not obj:
raise NoIdError(id)
fq_name = obj['fq_name']
obj_type = obj['type']
self.cache_uuid_to_fq_name_add(id, fq_name, obj_type)
return fq_name
# end uuid_to_fq_name
def uuid_to_obj_type(self, id):
try:
return self._cache_uuid_to_fq_name[id][1]
except KeyError:
obj = self.get(self._OBJ_UUID_CF_NAME, id,
columns=['fq_name', 'type'])
if not obj:
raise NoIdError(id)
fq_name = obj['fq_name']
obj_type = obj['type']
self.cache_uuid_to_fq_name_add(id, fq_name, obj_type)
return obj_type
# end uuid_to_obj_type
def fq_name_to_uuid(self, obj_type, fq_name):
fq_name_str = utils.encode_string(':'.join(fq_name))
col_infos = self.get(self._OBJ_FQ_NAME_CF_NAME,
obj_type,
start=fq_name_str + ':',
finish=fq_name_str + ';')
if not col_infos:
raise NoIdError('%s %s' % (obj_type, fq_name_str))
if len(col_infos) > 1:
raise VncError('Multi match %s for %s' % (fq_name_str, obj_type))
return col_infos.popitem()[0].split(':')[-1]
# end fq_name_to_uuid
# return all objects shared with a (share_type, share_id)
def get_shared(self, obj_type, share_id = '', share_type = 'global'):
result = []
column = '%s:%s' % (share_type, share_id)
col_infos = self.get(self._OBJ_SHARED_CF_NAME,
obj_type,
start=column + ':',
finish=column + ';')
if not col_infos:
return None
for (col_name, col_val) in col_infos.items():
# ('*:*:f7963198-08a4-4b96-a02e-41cc66593163', u'7')
obj_uuid = col_name.split(':')[-1]
result.append((obj_uuid, col_val))
return result
# share an object 'obj_id' with <share_type:share_id>
# rwx indicate type of access (sharing) allowed
def set_shared(self, obj_type, obj_id, share_id = '', share_type = 'global', rwx = 7):
col_name = '%s:%s:%s' % (share_type, share_id, obj_id)
self._obj_shared_cf.insert(obj_type, {col_name : json.dumps(rwx)})
# delete share of 'obj_id' object with <share_type:share_id>
def del_shared(self, obj_type, obj_id, share_id = '', share_type = 'global'):
col_name = '%s:%s:%s' % (share_type, share_id, obj_id)
self._obj_shared_cf.remove(obj_type, columns=[col_name])
def _read_child(self, result, obj_uuid, child_obj_type, child_uuid,
child_tstamp):
if '%ss' % (child_obj_type) not in result:
result['%ss' % (child_obj_type)] = []
child_res_type = self._get_resource_class(child_obj_type).resource_type
child_info = {}
child_info['to'] = self.uuid_to_fq_name(child_uuid)
child_info['href'] = self._generate_url(child_res_type, child_uuid)
child_info['uuid'] = child_uuid
child_info['tstamp'] = child_tstamp
result['%ss' % (child_obj_type)].append(child_info)
# end _read_child
def _read_ref(self, result, obj_uuid, ref_obj_type, ref_uuid, ref_data_json):
if '%s_refs' % (ref_obj_type) not in result:
result['%s_refs' % (ref_obj_type)] = []
ref_res_type = self._get_resource_class(ref_obj_type).resource_type
ref_data = ref_data_json
ref_info = {}
try:
ref_info['to'] = self.uuid_to_fq_name(ref_uuid)
except NoIdError as e:
ref_info['to'] = ['ERROR']
if ref_data:
try:
ref_info['attr'] = ref_data['attr']
except KeyError:
# TODO remove backward compat old format had attr directly
ref_info['attr'] = ref_data
ref_info['href'] = self._generate_url(ref_res_type, ref_uuid)
ref_info['uuid'] = ref_uuid
result['%s_refs' % (ref_obj_type)].append(ref_info)
# end _read_ref
def _read_back_ref(self, result, obj_uuid, back_ref_obj_type, back_ref_uuid,
back_ref_data_json):
if '%s_back_refs' % (back_ref_obj_type) not in result:
result['%s_back_refs' % (back_ref_obj_type)] = []
back_ref_res_type = self._get_resource_class(back_ref_obj_type).resource_type
back_ref_info = {}
back_ref_info['to'] = self.uuid_to_fq_name(back_ref_uuid)
back_ref_data = back_ref_data_json
if back_ref_data:
try:
back_ref_info['attr'] = back_ref_data['attr']
except KeyError:
# TODO remove backward compat old format had attr directly
back_ref_info['attr'] = back_ref_data
back_ref_info['href'] = self._generate_url(back_ref_res_type, back_ref_uuid)
back_ref_info['uuid'] = back_ref_uuid
result['%s_back_refs' % (back_ref_obj_type)].append(back_ref_info)
# end _read_back_ref
| en | 0.635883 | # # Copyright (c) 2014 Juniper Networks, Inc. All rights reserved. # # Name to ID mapping keyspace + tables # TODO describe layout # TODO describe layout # key: object type, column ($type:$id, uuid) # where type is entity object is being shared with. Project initially # end get_db_info # if no generate_url is specified, use a dummy function that always # returns an empty string # end __init__ #end #end #end #end # end _create_prop # prop has been accounted for, remove so only new ones remain # end _update_prop # end _add_to_prop_list # end _delete_from_prop_list # end _set_in_prop_map # end _delete_from_prop_map # end _create_child # end _delete_child # end _create_ref # update body didn't touch this type, nop # remove old ref # retain old ref with new ref attr # uuid has been accounted for, remove so only new ones remain # end _update_ref # end _delete_ref # will set conn_state to UP if successful # end _handle_exceptions # Helper routines for cassandra # 1. Ensure keyspace and schema/CFs exist # 2. Read in persisted data and publish to ifmap server # end _cassandra_init # Retry till cassandra is up # TODO do only for # thrift.transport.TTransport.TTransportException # end _cassandra_system_manager # Wait for it to be created by another process # end _cassandra_wait_for_keyspace # TODO verify only EEXISTS # TODO verify only EEXISTS # TODO verify only EEXISTS # end _cassandra_ensure_keyspace # end _cassandra_init_conn_pools # end _get_resource_class # end _get_xsd_class # Gather column values for obj and updates to backrefs # in a batch and write it at the end # non config-root child # Properties # Specifically checking for None # store list elements in list order # iterate on wrapped element or directly or prop field # iterate on wrapped element or directly or prop field # References # e.g. ref_field = 'network_ipam_refs' # ref_res_type = 'network-ipam' # ref_link_type = 'VnSubnetsType' # is_weakref = False # Update fqname table # end object_create # if field_names=None, all fields will be read/returned # optimize for common case of reading non-backref, non-children fields # ignoring columns starting from 'b' and 'c' - significant performance # impact in scaled setting. e.g. read of project # atleast one backref/children field is needed # specific props have been asked fetch exactly those # ignore reading backref + children columns # non config-root child # for all column names # sort children by creation time # re-write result's children without timestamp # for all children # Ordering property lists by position attribute # end for all rows # end object_read # end object_count_children # end update_last_modified # Grab ref-uuids and properties in new version # Properties # References # e.g. ref_field = 'network_ipam_refs' # ref_type = 'network-ipam' # ref_link_type = 'VnSubnetsType' # is_weakref = False # Gather column values for obj and updates to backrefs # in a batch and write it at the end # id-perms always has to be updated for last-mod timestamp # get it from request dict(or from db if not in request dict) # delete all old values of prop list # delete all old values of prop list # for all column names # create new refs # create new props # store list elements in list order # iterate on wrapped element or directly on prop field # for wrapped lists, store without the wrapper. regenerate # wrapper on read # store map elements in key order # iterate on wrapped element or directly on prop field # for wrapped lists, store without the wrapper. regenerate # wrapper on read # end object_update # give chance for zk heartbeat/ping # end filter_rows # end get_fq_name_uuid_list # go from parent to child # flatten to [('children:<type>:<uuid>', (<val>,<ts>), *] # give chance for zk heartbeat/ping # end filter_rows_parent_anchor # go from anchor to backrefs # flatten to [('backref:<obj-type>:<uuid>', (<val>,<ts>), *] # give chance for zk heartbeat/ping # end filter_rows_backref_anchor # exact objects specified # end filter_rows_object_list # grab all resources of this type # give chance for zk heartbeat/ping # end filter_rows_no_anchor # end object_list # unlink from parent # remove refs # remove link from relaxed back refs # Update fqname table # end object_delete # always read-in id-perms for upper-layers to do rbac/visibility # read in prop-list or prop-map fields # tuple of col_value, position. result is already sorted # lexically by position (necessary only for list property) # end prop_collection_read # end cache_uuid_to_fq_name_add # end cache_uuid_to_fq_name_del # end uuid_to_fq_name # end uuid_to_obj_type # end fq_name_to_uuid # return all objects shared with a (share_type, share_id) # ('*:*:f7963198-08a4-4b96-a02e-41cc66593163', u'7') # share an object 'obj_id' with <share_type:share_id> # rwx indicate type of access (sharing) allowed # delete share of 'obj_id' object with <share_type:share_id> # end _read_child # TODO remove backward compat old format had attr directly # end _read_ref # TODO remove backward compat old format had attr directly # end _read_back_ref | 1.801456 | 2 |
worker.py | chrononyan/ok | 148 | 6625472 | #!/usr/bin/env python3
import os
from flask_rq import get_worker
from raven import Client
from raven.transport.http import HTTPTransport
from rq.contrib.sentry import register_sentry
from server import create_app
if __name__ == '__main__':
# default to dev config
env = os.getenv('OK_ENV', 'dev')
app = create_app(env)
with app.app_context():
worker = get_worker()
sentry_dsn = os.getenv('SENTRY_DSN')
if sentry_dsn:
client = Client(sentry_dsn, transport=HTTPTransport)
# disable sentry for now (causes worker CrashLoopBackOff in kubernetes)
# register_sentry(client, worker)
worker.work()
| #!/usr/bin/env python3
import os
from flask_rq import get_worker
from raven import Client
from raven.transport.http import HTTPTransport
from rq.contrib.sentry import register_sentry
from server import create_app
if __name__ == '__main__':
# default to dev config
env = os.getenv('OK_ENV', 'dev')
app = create_app(env)
with app.app_context():
worker = get_worker()
sentry_dsn = os.getenv('SENTRY_DSN')
if sentry_dsn:
client = Client(sentry_dsn, transport=HTTPTransport)
# disable sentry for now (causes worker CrashLoopBackOff in kubernetes)
# register_sentry(client, worker)
worker.work()
| en | 0.590113 | #!/usr/bin/env python3 # default to dev config # disable sentry for now (causes worker CrashLoopBackOff in kubernetes) # register_sentry(client, worker) | 1.888609 | 2 |
front-end/testsuite-python-lib/Python-3.3.0/Lib/site.py | MalloyPower/parsing-python | 1 | 6625473 | """Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
This will append site-specific paths to the module search path. On
Unix (including Mac OSX), it starts with sys.prefix and
sys.exec_prefix (if different) and appends
lib/python<version>/site-packages as well as lib/site-python.
On other platforms (such as Windows), it tries each of the
prefixes directly, as well as with lib/site-packages appended. The
resulting directories, if they exist, are appended to sys.path, and
also inspected for path configuration files.
If a file named "pyvenv.cfg" exists one directory above sys.executable,
sys.prefix and sys.exec_prefix are set to that directory and
it is also checked for site-packages and site-python (sys.base_prefix and
sys.base_exec_prefix will always be the "real" prefixes of the Python
installation). If "pyvenv.cfg" (a bootstrap configuration file) contains
the key "include-system-site-packages" set to anything other than "false"
(case-insensitive), the system-level prefixes will still also be
searched for site-packages; otherwise they won't.
All of the resulting site-specific directories, if they exist, are
appended to sys.path, and also inspected for path configuration
files.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.5/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.5/site-packages/bar
/usr/local/lib/python2.5/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
import re
import builtins
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
# These values are initialized by the getuserbase() and getusersitepackages()
# functions, through the main() function when Python starts.
USER_SITE = None
USER_BASE = None
def makepath(*paths):
dir = os.path.join(*paths)
try:
dir = os.path.abspath(dir)
except OSError:
pass
return dir, os.path.normcase(dir)
def abs_paths():
"""Set all module __file__ and __cached__ attributes to an absolute path"""
for m in set(sys.modules.values()):
if (getattr(getattr(m, '__loader__', None), '__module__', None) !=
'_frozen_importlib'):
continue # don't mess with a PEP 302-supplied __file__
try:
m.__file__ = os.path.abspath(m.__file__)
except (AttributeError, OSError):
pass
try:
m.__cached__ = os.path.abspath(m.__cached__)
except (AttributeError, OSError):
pass
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Process a .pth file within the site-packages directory:
For each line in the file, either combine it with sitedir to a path
and add that to known_paths, or execute it if it starts with 'import '.
"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "r")
except IOError:
return
with f:
for n, line in enumerate(f):
if line.startswith("#"):
continue
try:
if line.startswith(("import ", "import\t")):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
except Exception:
print("Error processing line {:d} of {}:\n".format(n+1, fullname),
file=sys.stderr)
import traceback
for record in traceback.format_exception(*sys.exc_info()):
for line in record.splitlines():
print(' '+line, file=sys.stderr)
print("\nRemainder of file ignored", file=sys.stderr)
break
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
known_paths.add(sitedircase)
try:
names = os.listdir(sitedir)
except os.error:
return
names = [name for name in names if name.endswith(".pth")]
for name in sorted(names):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if sys.flags.no_user_site:
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def getuserbase():
"""Returns the `user base` directory path.
The `user base` directory can be used to store data. If the global
variable ``USER_BASE`` is not initialized yet, this function will also set
it.
"""
global USER_BASE
if USER_BASE is not None:
return USER_BASE
from sysconfig import get_config_var
USER_BASE = get_config_var('userbase')
return USER_BASE
def getusersitepackages():
"""Returns the user-specific site-packages directory path.
If the global variable ``USER_SITE`` is not initialized yet, this
function will also set it.
"""
global USER_SITE
user_base = getuserbase() # this will also set USER_BASE
if USER_SITE is not None:
return USER_SITE
from sysconfig import get_path
if sys.platform == 'darwin':
from sysconfig import get_config_var
if get_config_var('PYTHONFRAMEWORK'):
USER_SITE = get_path('purelib', 'osx_framework_user')
return USER_SITE
USER_SITE = get_path('purelib', '%s_user' % os.name)
return USER_SITE
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
"""
# get the per user site-package path
# this call will also make sure USER_BASE and USER_SITE are set
user_site = getusersitepackages()
if ENABLE_USER_SITE and os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def getsitepackages(prefixes=None):
"""Returns a list containing all global site-packages directories
(and possibly site-python).
For each directory present in ``prefixes`` (or the global ``PREFIXES``),
this function will find its `site-packages` subdirectory depending on the
system environment, and will return a list of full paths.
"""
sitepackages = []
seen = set()
if prefixes is None:
prefixes = PREFIXES
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos'):
sitepackages.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitepackages.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"site-packages"))
sitepackages.append(os.path.join(prefix, "lib", "site-python"))
else:
sitepackages.append(prefix)
sitepackages.append(os.path.join(prefix, "lib", "site-packages"))
if sys.platform == "darwin":
# for framework builds *only* we add the standard Apple
# locations.
from sysconfig import get_config_var
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
sitepackages.append(
os.path.join("/Library", framework,
sys.version[:3], "site-packages"))
return sitepackages
def addsitepackages(known_paths, prefixes=None):
"""Add site-packages (and possibly site-python) to sys.path"""
for sitedir in getsitepackages(prefixes):
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new builtins 'quit' and 'exit'.
These are objects which make the interpreter exit when called.
The repr of each object contains a hint at how it works.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
fd = -1
if hasattr(sys.stdin, "fileno"):
fd = sys.stdin.fileno()
if fd != 0:
# Don't close stdin if it wraps fd 0
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = Quitter('quit')
builtins.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = open(filename, "r")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print(self.__lines[i])
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
key = input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in builtins"""
builtins.copyright = _Printer("copyright", sys.copyright)
if sys.platform[:4] == 'java':
builtins.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
else:
builtins.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
builtins.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the builtin 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
builtins.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
CONFIG_LINE = re.compile(r'^(?P<key>(\w|[-_])+)\s*=\s*(?P<value>.*)\s*$')
def venv(known_paths):
global PREFIXES, ENABLE_USER_SITE
env = os.environ
if sys.platform == 'darwin' and '__PYVENV_LAUNCHER__' in env:
executable = os.environ['__PYVENV_LAUNCHER__']
else:
executable = sys.executable
executable_dir, executable_name = os.path.split(executable)
site_prefix = os.path.dirname(executable_dir)
sys._home = None
if sys.platform == 'win32':
executable_name = os.path.splitext(executable_name)[0]
conf_basename = 'pyvenv.cfg'
candidate_confs = [
conffile for conffile in (
os.path.join(executable_dir, conf_basename),
os.path.join(site_prefix, conf_basename)
)
if os.path.isfile(conffile)
]
if candidate_confs:
virtual_conf = candidate_confs[0]
system_site = "true"
with open(virtual_conf) as f:
for line in f:
line = line.strip()
m = CONFIG_LINE.match(line)
if m:
d = m.groupdict()
key, value = d['key'].lower(), d['value']
if key == 'include-system-site-packages':
system_site = value.lower()
elif key == 'home':
sys._home = value
sys.prefix = sys.exec_prefix = site_prefix
# Doing this here ensures venv takes precedence over user-site
addsitepackages(known_paths, [sys.prefix])
# addsitepackages will process site_prefix again if its in PREFIXES,
# but that's ok; known_paths will prevent anything being added twice
if system_site == "true":
PREFIXES.insert(0, sys.prefix)
else:
PREFIXES = [sys.prefix]
ENABLE_USER_SITE = False
return known_paths
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
except Exception as err:
if os.environ.get("PYTHONVERBOSE"):
sys.excepthook(*sys.exc_info())
else:
sys.stderr.write(
"Error in sitecustomize; set PYTHONVERBOSE for traceback:\n"
"%s: %s\n" %
(err.__class__.__name__, err))
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
except Exception as err:
if os.environ.get("PYTHONVERBOSE"):
sys.excepthook(*sys.exc_info())
else:
sys.stderr.write(
"Error in usercustomize; set PYTHONVERBOSE for traceback:\n"
"%s: %s\n" %
(err.__class__.__name__, err))
def main():
"""Add standard site-specific directories to the module search path.
This function is called automatically when this module is imported,
unless the python interpreter was started with the -S flag.
"""
global ENABLE_USER_SITE
abs_paths()
known_paths = removeduppaths()
known_paths = venv(known_paths)
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
known_paths = addusersitepackages(known_paths)
known_paths = addsitepackages(known_paths)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Prevent edition of sys.path when python was started with -S and
# site is imported later.
if not sys.flags.no_site:
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print("sys.path = [")
for dir in sys.path:
print(" %r," % (dir,))
print("]")
print("USER_BASE: %r (%s)" % (USER_BASE,
"exists" if os.path.isdir(USER_BASE) else "doesn't exist"))
print("USER_SITE: %r (%s)" % (USER_SITE,
"exists" if os.path.isdir(USER_SITE) else "doesn't exist"))
print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print(os.pathsep.join(buffer))
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
sys.exit(10)
if __name__ == '__main__':
_script()
| """Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
This will append site-specific paths to the module search path. On
Unix (including Mac OSX), it starts with sys.prefix and
sys.exec_prefix (if different) and appends
lib/python<version>/site-packages as well as lib/site-python.
On other platforms (such as Windows), it tries each of the
prefixes directly, as well as with lib/site-packages appended. The
resulting directories, if they exist, are appended to sys.path, and
also inspected for path configuration files.
If a file named "pyvenv.cfg" exists one directory above sys.executable,
sys.prefix and sys.exec_prefix are set to that directory and
it is also checked for site-packages and site-python (sys.base_prefix and
sys.base_exec_prefix will always be the "real" prefixes of the Python
installation). If "pyvenv.cfg" (a bootstrap configuration file) contains
the key "include-system-site-packages" set to anything other than "false"
(case-insensitive), the system-level prefixes will still also be
searched for site-packages; otherwise they won't.
All of the resulting site-specific directories, if they exist, are
appended to sys.path, and also inspected for path configuration
files.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.5/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.5/site-packages/bar
/usr/local/lib/python2.5/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
import re
import builtins
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
# These values are initialized by the getuserbase() and getusersitepackages()
# functions, through the main() function when Python starts.
USER_SITE = None
USER_BASE = None
def makepath(*paths):
dir = os.path.join(*paths)
try:
dir = os.path.abspath(dir)
except OSError:
pass
return dir, os.path.normcase(dir)
def abs_paths():
"""Set all module __file__ and __cached__ attributes to an absolute path"""
for m in set(sys.modules.values()):
if (getattr(getattr(m, '__loader__', None), '__module__', None) !=
'_frozen_importlib'):
continue # don't mess with a PEP 302-supplied __file__
try:
m.__file__ = os.path.abspath(m.__file__)
except (AttributeError, OSError):
pass
try:
m.__cached__ = os.path.abspath(m.__cached__)
except (AttributeError, OSError):
pass
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Process a .pth file within the site-packages directory:
For each line in the file, either combine it with sitedir to a path
and add that to known_paths, or execute it if it starts with 'import '.
"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "r")
except IOError:
return
with f:
for n, line in enumerate(f):
if line.startswith("#"):
continue
try:
if line.startswith(("import ", "import\t")):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
except Exception:
print("Error processing line {:d} of {}:\n".format(n+1, fullname),
file=sys.stderr)
import traceback
for record in traceback.format_exception(*sys.exc_info()):
for line in record.splitlines():
print(' '+line, file=sys.stderr)
print("\nRemainder of file ignored", file=sys.stderr)
break
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
known_paths.add(sitedircase)
try:
names = os.listdir(sitedir)
except os.error:
return
names = [name for name in names if name.endswith(".pth")]
for name in sorted(names):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if sys.flags.no_user_site:
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def getuserbase():
"""Returns the `user base` directory path.
The `user base` directory can be used to store data. If the global
variable ``USER_BASE`` is not initialized yet, this function will also set
it.
"""
global USER_BASE
if USER_BASE is not None:
return USER_BASE
from sysconfig import get_config_var
USER_BASE = get_config_var('userbase')
return USER_BASE
def getusersitepackages():
"""Returns the user-specific site-packages directory path.
If the global variable ``USER_SITE`` is not initialized yet, this
function will also set it.
"""
global USER_SITE
user_base = getuserbase() # this will also set USER_BASE
if USER_SITE is not None:
return USER_SITE
from sysconfig import get_path
if sys.platform == 'darwin':
from sysconfig import get_config_var
if get_config_var('PYTHONFRAMEWORK'):
USER_SITE = get_path('purelib', 'osx_framework_user')
return USER_SITE
USER_SITE = get_path('purelib', '%s_user' % os.name)
return USER_SITE
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
"""
# get the per user site-package path
# this call will also make sure USER_BASE and USER_SITE are set
user_site = getusersitepackages()
if ENABLE_USER_SITE and os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def getsitepackages(prefixes=None):
"""Returns a list containing all global site-packages directories
(and possibly site-python).
For each directory present in ``prefixes`` (or the global ``PREFIXES``),
this function will find its `site-packages` subdirectory depending on the
system environment, and will return a list of full paths.
"""
sitepackages = []
seen = set()
if prefixes is None:
prefixes = PREFIXES
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos'):
sitepackages.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitepackages.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"site-packages"))
sitepackages.append(os.path.join(prefix, "lib", "site-python"))
else:
sitepackages.append(prefix)
sitepackages.append(os.path.join(prefix, "lib", "site-packages"))
if sys.platform == "darwin":
# for framework builds *only* we add the standard Apple
# locations.
from sysconfig import get_config_var
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
sitepackages.append(
os.path.join("/Library", framework,
sys.version[:3], "site-packages"))
return sitepackages
def addsitepackages(known_paths, prefixes=None):
"""Add site-packages (and possibly site-python) to sys.path"""
for sitedir in getsitepackages(prefixes):
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new builtins 'quit' and 'exit'.
These are objects which make the interpreter exit when called.
The repr of each object contains a hint at how it works.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
fd = -1
if hasattr(sys.stdin, "fileno"):
fd = sys.stdin.fileno()
if fd != 0:
# Don't close stdin if it wraps fd 0
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = Quitter('quit')
builtins.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = open(filename, "r")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print(self.__lines[i])
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
key = input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in builtins"""
builtins.copyright = _Printer("copyright", sys.copyright)
if sys.platform[:4] == 'java':
builtins.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
else:
builtins.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
builtins.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the builtin 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
builtins.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
CONFIG_LINE = re.compile(r'^(?P<key>(\w|[-_])+)\s*=\s*(?P<value>.*)\s*$')
def venv(known_paths):
global PREFIXES, ENABLE_USER_SITE
env = os.environ
if sys.platform == 'darwin' and '__PYVENV_LAUNCHER__' in env:
executable = os.environ['__PYVENV_LAUNCHER__']
else:
executable = sys.executable
executable_dir, executable_name = os.path.split(executable)
site_prefix = os.path.dirname(executable_dir)
sys._home = None
if sys.platform == 'win32':
executable_name = os.path.splitext(executable_name)[0]
conf_basename = 'pyvenv.cfg'
candidate_confs = [
conffile for conffile in (
os.path.join(executable_dir, conf_basename),
os.path.join(site_prefix, conf_basename)
)
if os.path.isfile(conffile)
]
if candidate_confs:
virtual_conf = candidate_confs[0]
system_site = "true"
with open(virtual_conf) as f:
for line in f:
line = line.strip()
m = CONFIG_LINE.match(line)
if m:
d = m.groupdict()
key, value = d['key'].lower(), d['value']
if key == 'include-system-site-packages':
system_site = value.lower()
elif key == 'home':
sys._home = value
sys.prefix = sys.exec_prefix = site_prefix
# Doing this here ensures venv takes precedence over user-site
addsitepackages(known_paths, [sys.prefix])
# addsitepackages will process site_prefix again if its in PREFIXES,
# but that's ok; known_paths will prevent anything being added twice
if system_site == "true":
PREFIXES.insert(0, sys.prefix)
else:
PREFIXES = [sys.prefix]
ENABLE_USER_SITE = False
return known_paths
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
except Exception as err:
if os.environ.get("PYTHONVERBOSE"):
sys.excepthook(*sys.exc_info())
else:
sys.stderr.write(
"Error in sitecustomize; set PYTHONVERBOSE for traceback:\n"
"%s: %s\n" %
(err.__class__.__name__, err))
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
except Exception as err:
if os.environ.get("PYTHONVERBOSE"):
sys.excepthook(*sys.exc_info())
else:
sys.stderr.write(
"Error in usercustomize; set PYTHONVERBOSE for traceback:\n"
"%s: %s\n" %
(err.__class__.__name__, err))
def main():
"""Add standard site-specific directories to the module search path.
This function is called automatically when this module is imported,
unless the python interpreter was started with the -S flag.
"""
global ENABLE_USER_SITE
abs_paths()
known_paths = removeduppaths()
known_paths = venv(known_paths)
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
known_paths = addusersitepackages(known_paths)
known_paths = addsitepackages(known_paths)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Prevent edition of sys.path when python was started with -S and
# site is imported later.
if not sys.flags.no_site:
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print("sys.path = [")
for dir in sys.path:
print(" %r," % (dir,))
print("]")
print("USER_BASE: %r (%s)" % (USER_BASE,
"exists" if os.path.isdir(USER_BASE) else "doesn't exist"))
print("USER_SITE: %r (%s)" % (USER_SITE,
"exists" if os.path.isdir(USER_SITE) else "doesn't exist"))
print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print(os.pathsep.join(buffer))
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
sys.exit(10)
if __name__ == '__main__':
_script()
| en | 0.855353 | Append module search paths for third-party packages to sys.path. **************************************************************** * This module is automatically imported during initialization. * **************************************************************** This will append site-specific paths to the module search path. On Unix (including Mac OSX), it starts with sys.prefix and sys.exec_prefix (if different) and appends lib/python<version>/site-packages as well as lib/site-python. On other platforms (such as Windows), it tries each of the prefixes directly, as well as with lib/site-packages appended. The resulting directories, if they exist, are appended to sys.path, and also inspected for path configuration files. If a file named "pyvenv.cfg" exists one directory above sys.executable, sys.prefix and sys.exec_prefix are set to that directory and it is also checked for site-packages and site-python (sys.base_prefix and sys.base_exec_prefix will always be the "real" prefixes of the Python installation). If "pyvenv.cfg" (a bootstrap configuration file) contains the key "include-system-site-packages" set to anything other than "false" (case-insensitive), the system-level prefixes will still also be searched for site-packages; otherwise they won't. All of the resulting site-specific directories, if they exist, are appended to sys.path, and also inspected for path configuration files. A path configuration file is a file whose name has the form <package>.pth; its contents are additional directories (one per line) to be added to sys.path. Non-existing directories (or non-directories) are never added to sys.path; no directory is added to sys.path more than once. Blank lines and lines beginning with '#' are skipped. Lines starting with 'import' are executed. For example, suppose sys.prefix and sys.exec_prefix are set to /usr/local and there is a directory /usr/local/lib/python2.5/site-packages with three subdirectories, foo, bar and spam, and two path configuration files, foo.pth and bar.pth. Assume foo.pth contains the following: # foo package configuration foo bar bletch and bar.pth contains: # bar package configuration bar Then the following directories are added to sys.path, in this order: /usr/local/lib/python2.5/site-packages/bar /usr/local/lib/python2.5/site-packages/foo Note that bletch is omitted because it doesn't exist; bar precedes foo because bar.pth comes alphabetically before foo.pth; and spam is omitted because it is not mentioned in either path configuration file. After these path manipulations, an attempt is made to import a module named sitecustomize, which can perform arbitrary additional site-specific customizations. If this import fails with an ImportError exception, it is silently ignored. # Prefixes for site-packages; add additional prefixes like /usr/local here # Enable per user site-packages directory # set it to False to disable the feature or True to force the feature # for distutils.commands.install # These values are initialized by the getuserbase() and getusersitepackages() # functions, through the main() function when Python starts. Set all module __file__ and __cached__ attributes to an absolute path # don't mess with a PEP 302-supplied __file__ Remove duplicate entries from sys.path along with making them absolute # This ensures that the initial path provided by the interpreter contains # only absolute pathnames, even if we're running from the build directory. # Filter out duplicate paths (on case-insensitive file systems also # if they only differ in case); turn relative paths into absolute # paths. Return a set containing all existing directory entries from sys.path Process a .pth file within the site-packages directory: For each line in the file, either combine it with sitedir to a path and add that to known_paths, or execute it if it starts with 'import '. Add 'sitedir' argument to sys.path if missing and handle .pth files in 'sitedir' # Add path component Check if user site directory is safe for inclusion The function tests for the command line flag (including environment var), process uid/gid equal to effective uid/gid. None: Disabled for security reasons False: Disabled by user (command line option) True: Safe and enabled # check process uid == effective uid # check process gid == effective gid Returns the `user base` directory path. The `user base` directory can be used to store data. If the global variable ``USER_BASE`` is not initialized yet, this function will also set it. Returns the user-specific site-packages directory path. If the global variable ``USER_SITE`` is not initialized yet, this function will also set it. # this will also set USER_BASE Add a per user site-package to sys.path Each user has its own python directory with site-packages in the home directory. # get the per user site-package path # this call will also make sure USER_BASE and USER_SITE are set Returns a list containing all global site-packages directories (and possibly site-python). For each directory present in ``prefixes`` (or the global ``PREFIXES``), this function will find its `site-packages` subdirectory depending on the system environment, and will return a list of full paths. # for framework builds *only* we add the standard Apple # locations. Add site-packages (and possibly site-python) to sys.path The OS/2 EMX port has optional extension modules that do double duty as DLLs (and must use the .DLL file extension) for other extensions. The library search path needs to be amended so these will be found during module import. Use BEGINLIBPATH so that these are at the start of the library search path. Define new builtins 'quit' and 'exit'. These are objects which make the interpreter exit when called. The repr of each object contains a hint at how it works. # Shells like IDLE catch the SystemExit, but listen when their # stdin wrapper is closed. # Don't close stdin if it wraps fd 0 interactive prompt objects for printing the license text, a list of contributors and the copyright notice. Set 'copyright' and 'credits' in builtins \ Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands for supporting Python development. See www.python.org for more information. Define the builtin 'help'. This is a wrapper around pydoc.help (with a twist). On Windows, some default encodings are not provided by Python, while they are always available as "mbcs" in each locale. Make them usable by aliasing to "mbcs" in such a case. # "cp***" ? # Doing this here ensures venv takes precedence over user-site # addsitepackages will process site_prefix again if its in PREFIXES, # but that's ok; known_paths will prevent anything being added twice Run custom site specific code, if available. Run custom user specific code, if available. Add standard site-specific directories to the module search path. This function is called automatically when this module is imported, unless the python interpreter was started with the -S flag. # Prevent edition of sys.path when python was started with -S and # site is imported later. \ %s [--user-base] [--user-site] Without arguments print some useful information With arguments print the value of USER_BASE and/or USER_SITE separated by '%s'. Exit codes with --user-base or --user-site: 0 - user site directory is enabled 1 - user site directory is disabled by user 2 - uses site directory is disabled by super user or for security reasons >2 - unknown error | 2.42229 | 2 |
texar/data/data/dataset_utils_test.py | Holmeswww/Text_Infilling | 87 | 6625474 | # -*- coding: utf-8 -*-
#
"""
Unit tests for data utils.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
from texar.data.data import dataset_utils as dsutils
# pylint: disable=invalid-name
class TransformationTest(tf.test.TestCase):
"""Tests various transformation utilities.
"""
def test_make_chained_transformation(self):
"""Tests :func:`texar.data.make_chained_transformation`
"""
original_data = np.arange(0, 10)
dataset = tf.data.Dataset.from_tensor_slices(original_data)
def _tran_a(data):
return data + 100
def _tran_b(data):
return data + 1000
def _tran_c(data):
return data + 10000
chained_tran = dsutils.make_chained_transformation(
[_tran_a, _tran_b, _tran_c])
dataset = dataset.map(chained_tran)
iterator = dataset.make_one_shot_iterator()
elem = iterator.get_next()
with self.test_session() as sess:
data_ = []
while True:
try:
data_.append(sess.run(elem))
except tf.errors.OutOfRangeError:
break
self.assertEqual(len(data_), len(original_data))
data_ = [elem_ - 11100 for elem_ in data_]
self.assertEqual(data_, original_data.tolist())
if __name__ == "__main__":
tf.test.main()
| # -*- coding: utf-8 -*-
#
"""
Unit tests for data utils.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
from texar.data.data import dataset_utils as dsutils
# pylint: disable=invalid-name
class TransformationTest(tf.test.TestCase):
"""Tests various transformation utilities.
"""
def test_make_chained_transformation(self):
"""Tests :func:`texar.data.make_chained_transformation`
"""
original_data = np.arange(0, 10)
dataset = tf.data.Dataset.from_tensor_slices(original_data)
def _tran_a(data):
return data + 100
def _tran_b(data):
return data + 1000
def _tran_c(data):
return data + 10000
chained_tran = dsutils.make_chained_transformation(
[_tran_a, _tran_b, _tran_c])
dataset = dataset.map(chained_tran)
iterator = dataset.make_one_shot_iterator()
elem = iterator.get_next()
with self.test_session() as sess:
data_ = []
while True:
try:
data_.append(sess.run(elem))
except tf.errors.OutOfRangeError:
break
self.assertEqual(len(data_), len(original_data))
data_ = [elem_ - 11100 for elem_ in data_]
self.assertEqual(data_, original_data.tolist())
if __name__ == "__main__":
tf.test.main()
| en | 0.560208 | # -*- coding: utf-8 -*- # Unit tests for data utils. # pylint: disable=invalid-name Tests various transformation utilities. Tests :func:`texar.data.make_chained_transformation` | 2.611157 | 3 |
igtodiscordhook/imaging.py | hrmorley34/igtodiscordhook | 0 | 6625475 | <filename>igtodiscordhook/imaging.py
import math
from os import fdopen
from pathlib import Path
from PIL import Image
from tempfile import mkstemp
from typing import Iterable, List
EXPORT_SUFFIX = ".png"
def load(path: Path) -> Image.Image:
return Image.open(path)
def save(dir: Path, im: Image.Image) -> Path:
fd, fname = mkstemp(dir=dir, suffix=EXPORT_SUFFIX)
try:
im.save(fname)
finally:
fdopen(fd).close()
return Path(fname)
def combine_images_row(
imgs: List[Image.Image], width: int, pad: int
) -> Iterable[Image.Image]:
for i in range(0, math.ceil(len(imgs) / width)):
yield combine_images(imgs[i * width : (i + 1) * width], width=width, pad=pad)
def combine_images(imgs: List[Image.Image], width: int, pad: int) -> Image.Image:
if len(imgs) < 1:
raise ValueError
elif len(imgs) == 1:
return imgs[0]
size = imgs[0].size
if len(imgs) < width:
countwidth = len(imgs)
countheight = 1
else:
countwidth = width
countheight = math.ceil(len(imgs) / width)
final_size = (
countwidth * (size[0] + pad) - pad,
countheight * (size[1] + pad) - pad,
)
final_image = Image.new("RGBA", final_size, (0, 0, 0, 0))
for index, im in enumerate(imgs):
xindex, yindex = index % width, index // width
x = xindex * (size[0] + pad)
y = yindex * (size[1] + pad)
if im.size == size:
resized = im
# elif abs(im.size[1] / im.size[0] - size[1] / size[0]) < 0.001:
# # same ratio, so just resize
# resized = im.resize(size, Image.BICUBIC)
else:
resized = im.copy()
resized.thumbnail(size, Image.BICUBIC)
print(resized.size, resized.size == size)
x += (size[0] - resized.width) // 2
y += (size[1] - resized.height) // 2
final_image.paste(resized, (x, y))
return final_image
| <filename>igtodiscordhook/imaging.py
import math
from os import fdopen
from pathlib import Path
from PIL import Image
from tempfile import mkstemp
from typing import Iterable, List
EXPORT_SUFFIX = ".png"
def load(path: Path) -> Image.Image:
return Image.open(path)
def save(dir: Path, im: Image.Image) -> Path:
fd, fname = mkstemp(dir=dir, suffix=EXPORT_SUFFIX)
try:
im.save(fname)
finally:
fdopen(fd).close()
return Path(fname)
def combine_images_row(
imgs: List[Image.Image], width: int, pad: int
) -> Iterable[Image.Image]:
for i in range(0, math.ceil(len(imgs) / width)):
yield combine_images(imgs[i * width : (i + 1) * width], width=width, pad=pad)
def combine_images(imgs: List[Image.Image], width: int, pad: int) -> Image.Image:
if len(imgs) < 1:
raise ValueError
elif len(imgs) == 1:
return imgs[0]
size = imgs[0].size
if len(imgs) < width:
countwidth = len(imgs)
countheight = 1
else:
countwidth = width
countheight = math.ceil(len(imgs) / width)
final_size = (
countwidth * (size[0] + pad) - pad,
countheight * (size[1] + pad) - pad,
)
final_image = Image.new("RGBA", final_size, (0, 0, 0, 0))
for index, im in enumerate(imgs):
xindex, yindex = index % width, index // width
x = xindex * (size[0] + pad)
y = yindex * (size[1] + pad)
if im.size == size:
resized = im
# elif abs(im.size[1] / im.size[0] - size[1] / size[0]) < 0.001:
# # same ratio, so just resize
# resized = im.resize(size, Image.BICUBIC)
else:
resized = im.copy()
resized.thumbnail(size, Image.BICUBIC)
print(resized.size, resized.size == size)
x += (size[0] - resized.width) // 2
y += (size[1] - resized.height) // 2
final_image.paste(resized, (x, y))
return final_image
| en | 0.324093 | # elif abs(im.size[1] / im.size[0] - size[1] / size[0]) < 0.001: # # same ratio, so just resize # resized = im.resize(size, Image.BICUBIC) | 2.856111 | 3 |
solutions/101.symmetric-tree.240649552.ac.py | satu0king/Leetcode-Solutions | 78 | 6625476 | <filename>solutions/101.symmetric-tree.240649552.ac.py<gh_stars>10-100
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
def f(left, right):
if left is None and right is None:
return True
if left is None or right is None:
return False
if left.val!= right.val:
return False
return f(left.right, right.left) and f(left.left, right.right)
if root is None:
return True
return f(root.left, root.right)
| <filename>solutions/101.symmetric-tree.240649552.ac.py<gh_stars>10-100
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
def f(left, right):
if left is None and right is None:
return True
if left is None or right is None:
return False
if left.val!= right.val:
return False
return f(left.right, right.left) and f(left.left, right.right)
if root is None:
return True
return f(root.left, root.right)
| en | 0.60307 | # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None | 3.836682 | 4 |
coca/utils/optim.py | CISiPLab/cisip-GreenCap | 12 | 6625477 | # -*- coding: utf-8 -*-
"""
Created on 16 Sep 2020 15:01:35
@author: jiahuei
"""
import logging
import math
import torch
from torch import optim
logger = logging.getLogger(__name__)
# noinspection PyAttributeOutsideInit
class RateOpt:
"""Optim wrapper that implements rate."""
def step(self, step=None, epoch=None):
"""Update parameters and rate"""
self._step += 1
self._epoch = epoch
rate = self.rate()
for p in self.optimizer.param_groups:
if "pruning_mask" in p:
logger.debug("Pruning masks encountered. Skip LR setting.")
continue
p["lr"] = rate
self._rate = rate
self.optimizer.step()
def __getattr__(self, name):
return getattr(self.optimizer, name)
class NoamOpt(RateOpt):
"""Optim wrapper that implements rate."""
def __init__(self, optimizer, model_size, factor, warmup):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def rate(self):
"""Implement `lrate` above"""
step = self._step
return self.factor * (self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5)))
class StepLROpt(RateOpt):
"""Optim wrapper that implements rate."""
def __init__(
self,
optimizer,
learning_rate_init,
learning_rate_decay_start,
learning_rate_decay_every,
learning_rate_decay_rate,
):
if learning_rate_decay_start >= 0:
assert (
learning_rate_decay_every > 0
), f"`learning_rate_decay_every` must be > 0, saw {learning_rate_decay_every}"
assert (
0 < learning_rate_decay_rate < 1
), f"`learning_rate_decay_rate` must be > 0 and < 1, saw {learning_rate_decay_rate}"
self.optimizer = optimizer
self.learning_rate_init = learning_rate_init
self.learning_rate_decay_start = learning_rate_decay_start
self.learning_rate_decay_every = learning_rate_decay_every
self.learning_rate_decay_rate = learning_rate_decay_rate
self._rate = 0
self._step = 0
self._epoch = 0
def rate(self):
"""Implement `lrate` above"""
# Assign the learning rate
if self._epoch > self.learning_rate_decay_start >= 0:
frac = (self._epoch - self.learning_rate_decay_start) // self.learning_rate_decay_every
decay_factor = self.learning_rate_decay_rate ** frac
current_lr = self.learning_rate_init * decay_factor
else:
current_lr = self.learning_rate_init
return current_lr
class CosineOpt(RateOpt):
"""Optim wrapper that implements rate."""
def __init__(self, optimizer, max_train_step, learning_rate_init, learning_rate_min):
self.optimizer = optimizer
# self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
# optimizer, T_max=max_train_step, eta_min=learning_rate_min, last_epoch=-1
# )
self._step = 0
self._rate = 0
self.max_train_step = max_train_step
self.learning_rate_min = learning_rate_min
self.learning_rate_init = learning_rate_init
def rate(self):
"""Implement `lrate` above"""
step = self._step / self.max_train_step
step = 1.0 + math.cos(min(1.0, step) * math.pi)
lr = (self.learning_rate_init - self.learning_rate_min) * (step / 2) + self.learning_rate_min
return lr
ALL_SCHEDULERS = ("noam", "step", "cosine")
def get_optim(parameters, config):
scheduler_name = config.lr_scheduler.lower()
if scheduler_name == "noam":
if config.optim.lower() != "adam":
logger.warning(f"Noam scheduler should be used with ADAM. Ignoring optim choice: {config.optim}")
return NoamOpt(
torch.optim.Adam(parameters, lr=0, betas=(0.9, 0.98), eps=1e-9),
model_size=config.d_model,
factor=config.noamopt_factor,
warmup=config.noamopt_warmup,
)
elif scheduler_name == "step":
return StepLROpt(
build_optimizer(parameters, config),
config.learning_rate,
config.learning_rate_decay_start,
config.learning_rate_decay_every,
config.learning_rate_decay_rate,
)
elif scheduler_name == "cosine":
return CosineOpt(
build_optimizer(parameters, config),
config.max_train_step,
config.learning_rate,
config.learning_rate_min,
)
else:
raise Exception(f"Bad option `config.lr_scheduler`: {config.lr_scheduler}")
ALL_OPTIMIZERS = ("rmsprop", "adagrad", "sgd", "sgdm", "sgdmom", "adam")
def build_optimizer(params, config):
optimizer_name = config.optim.lower()
if optimizer_name == "rmsprop":
return optim.RMSprop(
params, config.learning_rate, config.optim_alpha, config.optim_epsilon, weight_decay=config.weight_decay
)
elif optimizer_name == "adagrad":
return optim.Adagrad(params, config.learning_rate, weight_decay=config.weight_decay)
elif optimizer_name == "sgd":
return optim.SGD(params, config.learning_rate, weight_decay=config.weight_decay)
elif optimizer_name == "sgdm":
return optim.SGD(params, config.learning_rate, config.optim_alpha, weight_decay=config.weight_decay)
elif optimizer_name == "sgdmom":
return optim.SGD(
params, config.learning_rate, config.optim_alpha, weight_decay=config.weight_decay, nesterov=True
)
elif optimizer_name == "adam":
return optim.Adam(
params,
config.learning_rate,
(config.optim_alpha, config.optim_beta),
config.optim_epsilon,
weight_decay=config.weight_decay,
)
else:
raise Exception(f"Bad option `config.optim`: {config.optim}")
# def set_lr(optimizer, lr):
# for group in optimizer.param_groups:
# group["lr"] = lr
#
#
# def get_lr(optimizer):
# for group in optimizer.param_groups:
# return group["lr"]
def clip_gradient(optimizer, grad_clip):
for group in optimizer.param_groups:
# for param in group["params"]:
# param.grad.data.clamp_(-grad_clip, grad_clip)
torch.nn.utils.clip_grad_value_(group["params"], grad_clip)
| # -*- coding: utf-8 -*-
"""
Created on 16 Sep 2020 15:01:35
@author: jiahuei
"""
import logging
import math
import torch
from torch import optim
logger = logging.getLogger(__name__)
# noinspection PyAttributeOutsideInit
class RateOpt:
"""Optim wrapper that implements rate."""
def step(self, step=None, epoch=None):
"""Update parameters and rate"""
self._step += 1
self._epoch = epoch
rate = self.rate()
for p in self.optimizer.param_groups:
if "pruning_mask" in p:
logger.debug("Pruning masks encountered. Skip LR setting.")
continue
p["lr"] = rate
self._rate = rate
self.optimizer.step()
def __getattr__(self, name):
return getattr(self.optimizer, name)
class NoamOpt(RateOpt):
"""Optim wrapper that implements rate."""
def __init__(self, optimizer, model_size, factor, warmup):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def rate(self):
"""Implement `lrate` above"""
step = self._step
return self.factor * (self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5)))
class StepLROpt(RateOpt):
"""Optim wrapper that implements rate."""
def __init__(
self,
optimizer,
learning_rate_init,
learning_rate_decay_start,
learning_rate_decay_every,
learning_rate_decay_rate,
):
if learning_rate_decay_start >= 0:
assert (
learning_rate_decay_every > 0
), f"`learning_rate_decay_every` must be > 0, saw {learning_rate_decay_every}"
assert (
0 < learning_rate_decay_rate < 1
), f"`learning_rate_decay_rate` must be > 0 and < 1, saw {learning_rate_decay_rate}"
self.optimizer = optimizer
self.learning_rate_init = learning_rate_init
self.learning_rate_decay_start = learning_rate_decay_start
self.learning_rate_decay_every = learning_rate_decay_every
self.learning_rate_decay_rate = learning_rate_decay_rate
self._rate = 0
self._step = 0
self._epoch = 0
def rate(self):
"""Implement `lrate` above"""
# Assign the learning rate
if self._epoch > self.learning_rate_decay_start >= 0:
frac = (self._epoch - self.learning_rate_decay_start) // self.learning_rate_decay_every
decay_factor = self.learning_rate_decay_rate ** frac
current_lr = self.learning_rate_init * decay_factor
else:
current_lr = self.learning_rate_init
return current_lr
class CosineOpt(RateOpt):
"""Optim wrapper that implements rate."""
def __init__(self, optimizer, max_train_step, learning_rate_init, learning_rate_min):
self.optimizer = optimizer
# self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
# optimizer, T_max=max_train_step, eta_min=learning_rate_min, last_epoch=-1
# )
self._step = 0
self._rate = 0
self.max_train_step = max_train_step
self.learning_rate_min = learning_rate_min
self.learning_rate_init = learning_rate_init
def rate(self):
"""Implement `lrate` above"""
step = self._step / self.max_train_step
step = 1.0 + math.cos(min(1.0, step) * math.pi)
lr = (self.learning_rate_init - self.learning_rate_min) * (step / 2) + self.learning_rate_min
return lr
ALL_SCHEDULERS = ("noam", "step", "cosine")
def get_optim(parameters, config):
scheduler_name = config.lr_scheduler.lower()
if scheduler_name == "noam":
if config.optim.lower() != "adam":
logger.warning(f"Noam scheduler should be used with ADAM. Ignoring optim choice: {config.optim}")
return NoamOpt(
torch.optim.Adam(parameters, lr=0, betas=(0.9, 0.98), eps=1e-9),
model_size=config.d_model,
factor=config.noamopt_factor,
warmup=config.noamopt_warmup,
)
elif scheduler_name == "step":
return StepLROpt(
build_optimizer(parameters, config),
config.learning_rate,
config.learning_rate_decay_start,
config.learning_rate_decay_every,
config.learning_rate_decay_rate,
)
elif scheduler_name == "cosine":
return CosineOpt(
build_optimizer(parameters, config),
config.max_train_step,
config.learning_rate,
config.learning_rate_min,
)
else:
raise Exception(f"Bad option `config.lr_scheduler`: {config.lr_scheduler}")
ALL_OPTIMIZERS = ("rmsprop", "adagrad", "sgd", "sgdm", "sgdmom", "adam")
def build_optimizer(params, config):
optimizer_name = config.optim.lower()
if optimizer_name == "rmsprop":
return optim.RMSprop(
params, config.learning_rate, config.optim_alpha, config.optim_epsilon, weight_decay=config.weight_decay
)
elif optimizer_name == "adagrad":
return optim.Adagrad(params, config.learning_rate, weight_decay=config.weight_decay)
elif optimizer_name == "sgd":
return optim.SGD(params, config.learning_rate, weight_decay=config.weight_decay)
elif optimizer_name == "sgdm":
return optim.SGD(params, config.learning_rate, config.optim_alpha, weight_decay=config.weight_decay)
elif optimizer_name == "sgdmom":
return optim.SGD(
params, config.learning_rate, config.optim_alpha, weight_decay=config.weight_decay, nesterov=True
)
elif optimizer_name == "adam":
return optim.Adam(
params,
config.learning_rate,
(config.optim_alpha, config.optim_beta),
config.optim_epsilon,
weight_decay=config.weight_decay,
)
else:
raise Exception(f"Bad option `config.optim`: {config.optim}")
# def set_lr(optimizer, lr):
# for group in optimizer.param_groups:
# group["lr"] = lr
#
#
# def get_lr(optimizer):
# for group in optimizer.param_groups:
# return group["lr"]
def clip_gradient(optimizer, grad_clip):
for group in optimizer.param_groups:
# for param in group["params"]:
# param.grad.data.clamp_(-grad_clip, grad_clip)
torch.nn.utils.clip_grad_value_(group["params"], grad_clip)
| en | 0.368122 | # -*- coding: utf-8 -*- Created on 16 Sep 2020 15:01:35 @author: jiahuei # noinspection PyAttributeOutsideInit Optim wrapper that implements rate. Update parameters and rate Optim wrapper that implements rate. Implement `lrate` above Optim wrapper that implements rate. Implement `lrate` above # Assign the learning rate Optim wrapper that implements rate. # self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( # optimizer, T_max=max_train_step, eta_min=learning_rate_min, last_epoch=-1 # ) Implement `lrate` above # def set_lr(optimizer, lr): # for group in optimizer.param_groups: # group["lr"] = lr # # # def get_lr(optimizer): # for group in optimizer.param_groups: # return group["lr"] # for param in group["params"]: # param.grad.data.clamp_(-grad_clip, grad_clip) | 2.417036 | 2 |
home/urls.py | ASAM-DevProject/p24 | 0 | 6625478 | from django.urls import path
from home.views import home, error_access_permission_dr, error_access_permission_sick
app_name = 'home'
urlpatterns = [
path('', home, name='home'),
path('permission/sick', error_access_permission_sick, name='permission_sick'),
path('permission/dr', error_access_permission_dr, name='permission_dr'),
] | from django.urls import path
from home.views import home, error_access_permission_dr, error_access_permission_sick
app_name = 'home'
urlpatterns = [
path('', home, name='home'),
path('permission/sick', error_access_permission_sick, name='permission_sick'),
path('permission/dr', error_access_permission_dr, name='permission_dr'),
] | none | 1 | 1.663661 | 2 | |
selection/dbms/hana_dbms.py | penggan666/index_selection_evaluation | 37 | 6625479 | import json
import logging
import re
import subprocess
import time
import pyhdb
from ..database_connector import DatabaseConnector
class HanaDatabaseConnector(DatabaseConnector):
def __init__(self, db_name, autocommit=False):
DatabaseConnector.__init__(self, db_name, autocommit=autocommit)
self.db_system = "hana"
self._connection = None
# `db_name` is the schema name
if not self.db_name:
self.db_name = "SYSTEM"
logging.getLogger(name="pyhdb").setLevel(logging.ERROR)
self.read_connection_file()
self.create_connection()
self._alter_configuration()
logging.debug("HANA connector created: {}".format(db_name))
def read_connection_file(self):
with open("database_connection.json", "r") as file:
connection_data = json.load(file)
self.host = connection_data["host"]
self.port = connection_data["port"]
self.db_user = connection_data["db_user"]
self.db_user_password = connection_data["db_user_password"]
self.import_directory = connection_data["import_directory"]
self.ssh_user = connection_data["ssh_user"]
def _alter_configuration(self):
logging.info("Setting HANA variables")
variables = [
(
"indexserver.ini",
"SYSTEM",
"datastatistics",
"dev_force_use_non_runtime_datastatistics",
"true",
),
(
"global.ini",
"SYSTEM",
"datastatistics",
"dev_force_use_non_runtime_datastatistics",
"true",
),
(
"indexserver.ini",
"database",
"import_export",
"enable_csv_import_path_filter",
"false",
),
]
string = (
"alter system alter configuration ('{}', '{}') "
"set ('{}','{}')='{}' WITH RECONFIGURE"
)
for database_variable in variables:
execute_string = string.format(*database_variable)
logging.debug(execute_string)
self.exec_only(execute_string)
def create_connection(self):
if self._connection:
self.close()
self._connection = pyhdb.connect(
host=self.host,
port=self.port,
user=self.db_user,
password=self.db_user_password,
)
self._connection.autocommit = self.autocommit
self._cursor = self._connection.cursor()
self.exec_only("set schema {}".format(self.db_name))
def database_names(self):
result = self.exec_fetch("select schema_name from schemas", False)
return [x[0].lower() for x in result]
def enable_simulation(self):
create_schema = f"create schema {self.db_name}_empty"
self.exec_only(create_schema)
self.exec_only(f"set schema {self.db_name}_empty")
self.create_tables()
def update_query_text(self, text):
# TODO 'tpch' / 'tpcds' custom rules
text = text.replace(";\nlimit ", " limit ").replace("limit -1", "")
text = self._replace_interval_by_function(text, "day")
text = self._replace_interval_by_function(text, "month")
text = self._replace_interval_by_function(text, "year")
text = self._change_substring_syntax(text)
return text
def _replace_interval_by_function(self, text, token):
text = re.sub(
rf"date '(.+)' (.) interval '(.*)' {token}",
rf"add_{token}s(to_date('\1','YYYY-MM-DD'),\2\3)",
text,
)
return text
def _change_substring_syntax(self, text):
text = re.sub(
r"substring\((.+) from (.+) for (.+)\)", r"substring(\1, \2, \3)", text
)
return text
def create_database(self, database_name):
self.exec_only("Create schema {}".format(database_name))
logging.info("Database (schema) {} created".format(database_name))
def import_data(self, table, path):
scp_target = f"{self.ssh_user}@{self.host}:{self.import_directory}"
# TODO pass scp output to logger
subprocess.run(["scp", path, scp_target])
csv_file = self.import_directory + "/" + path.split("/")[-1]
import_statement = (
f"import from csv file '{csv_file}' "
f"into {table} with record delimited by '\\n' "
"field delimited by '|'"
)
logging.debug("Import csv statement {}".format(table))
self.exec_only(import_statement)
def get_plan(self, query):
query_text = self._prepare_query(query)
statement_name = f"{self.db_name}_q{query.nr}"
statement = (
f"explain plan set " f"statement_name='{statement_name}' for " f"{query_text}"
)
try:
self.exec_only(statement)
except Exception as e:
# pdb returns this even if the explain statement worked
if str(e) != "Invalid or unsupported function code received: 7":
raise e
# TODO store result in dictionary-like format
result = self.exec_fetch(
"select operator_name, operator_details, "
"output_size, subtree_cost, execution_engine "
"from explain_plan_table "
f"where statement_name='{statement_name}'",
one=False,
)
self.exec_only(
"delete from explain_plan_table where " f"statement_name='{statement_name}'"
)
self._cleanup_query(query)
return result
def _cleanup_query(self, query):
for query_statement in query.text.split(";"):
if "drop view" in query_statement:
self.exec_only(query_statement)
def get_cost(self, query):
# TODO how to get cost when simulating indexes
query_plan = self.get_plan(query)
total_cost = query_plan[0][3]
return total_cost
def exec_query(self, query, timeout=None):
query_text = self._prepare_query(query)
start_time = time.time()
self._cursor.execute(query_text)
execution_time = time.time() - start_time
self._cleanup_query(query)
return execution_time, {}
def drop_indexes(self):
logging.info("Dropping indexes")
statement = "select index_name from indexes where schema_name="
statement += f"'{self.db_name.upper()}'"
indexes = self.exec_fetch(statement, one=False)
for index in indexes:
index_name = index[0]
drop_stmt = "drop index {}".format(index_name)
logging.debug("Dropping index {}".format(index_name))
self.exec_only(drop_stmt)
def create_statistics(self):
logging.info("HANA")
def indexes_size(self):
# TODO implement
return 0
def create_index(self, index):
table_name = index.table()
statement = (
f"create index {index.index_idx()} "
f"on {table_name} ({index.joined_column_names()})"
)
self.exec_only(statement)
# TODO update index.estimated_size
| import json
import logging
import re
import subprocess
import time
import pyhdb
from ..database_connector import DatabaseConnector
class HanaDatabaseConnector(DatabaseConnector):
def __init__(self, db_name, autocommit=False):
DatabaseConnector.__init__(self, db_name, autocommit=autocommit)
self.db_system = "hana"
self._connection = None
# `db_name` is the schema name
if not self.db_name:
self.db_name = "SYSTEM"
logging.getLogger(name="pyhdb").setLevel(logging.ERROR)
self.read_connection_file()
self.create_connection()
self._alter_configuration()
logging.debug("HANA connector created: {}".format(db_name))
def read_connection_file(self):
with open("database_connection.json", "r") as file:
connection_data = json.load(file)
self.host = connection_data["host"]
self.port = connection_data["port"]
self.db_user = connection_data["db_user"]
self.db_user_password = connection_data["db_user_password"]
self.import_directory = connection_data["import_directory"]
self.ssh_user = connection_data["ssh_user"]
def _alter_configuration(self):
logging.info("Setting HANA variables")
variables = [
(
"indexserver.ini",
"SYSTEM",
"datastatistics",
"dev_force_use_non_runtime_datastatistics",
"true",
),
(
"global.ini",
"SYSTEM",
"datastatistics",
"dev_force_use_non_runtime_datastatistics",
"true",
),
(
"indexserver.ini",
"database",
"import_export",
"enable_csv_import_path_filter",
"false",
),
]
string = (
"alter system alter configuration ('{}', '{}') "
"set ('{}','{}')='{}' WITH RECONFIGURE"
)
for database_variable in variables:
execute_string = string.format(*database_variable)
logging.debug(execute_string)
self.exec_only(execute_string)
def create_connection(self):
if self._connection:
self.close()
self._connection = pyhdb.connect(
host=self.host,
port=self.port,
user=self.db_user,
password=self.db_user_password,
)
self._connection.autocommit = self.autocommit
self._cursor = self._connection.cursor()
self.exec_only("set schema {}".format(self.db_name))
def database_names(self):
result = self.exec_fetch("select schema_name from schemas", False)
return [x[0].lower() for x in result]
def enable_simulation(self):
create_schema = f"create schema {self.db_name}_empty"
self.exec_only(create_schema)
self.exec_only(f"set schema {self.db_name}_empty")
self.create_tables()
def update_query_text(self, text):
# TODO 'tpch' / 'tpcds' custom rules
text = text.replace(";\nlimit ", " limit ").replace("limit -1", "")
text = self._replace_interval_by_function(text, "day")
text = self._replace_interval_by_function(text, "month")
text = self._replace_interval_by_function(text, "year")
text = self._change_substring_syntax(text)
return text
def _replace_interval_by_function(self, text, token):
text = re.sub(
rf"date '(.+)' (.) interval '(.*)' {token}",
rf"add_{token}s(to_date('\1','YYYY-MM-DD'),\2\3)",
text,
)
return text
def _change_substring_syntax(self, text):
text = re.sub(
r"substring\((.+) from (.+) for (.+)\)", r"substring(\1, \2, \3)", text
)
return text
def create_database(self, database_name):
self.exec_only("Create schema {}".format(database_name))
logging.info("Database (schema) {} created".format(database_name))
def import_data(self, table, path):
scp_target = f"{self.ssh_user}@{self.host}:{self.import_directory}"
# TODO pass scp output to logger
subprocess.run(["scp", path, scp_target])
csv_file = self.import_directory + "/" + path.split("/")[-1]
import_statement = (
f"import from csv file '{csv_file}' "
f"into {table} with record delimited by '\\n' "
"field delimited by '|'"
)
logging.debug("Import csv statement {}".format(table))
self.exec_only(import_statement)
def get_plan(self, query):
query_text = self._prepare_query(query)
statement_name = f"{self.db_name}_q{query.nr}"
statement = (
f"explain plan set " f"statement_name='{statement_name}' for " f"{query_text}"
)
try:
self.exec_only(statement)
except Exception as e:
# pdb returns this even if the explain statement worked
if str(e) != "Invalid or unsupported function code received: 7":
raise e
# TODO store result in dictionary-like format
result = self.exec_fetch(
"select operator_name, operator_details, "
"output_size, subtree_cost, execution_engine "
"from explain_plan_table "
f"where statement_name='{statement_name}'",
one=False,
)
self.exec_only(
"delete from explain_plan_table where " f"statement_name='{statement_name}'"
)
self._cleanup_query(query)
return result
def _cleanup_query(self, query):
for query_statement in query.text.split(";"):
if "drop view" in query_statement:
self.exec_only(query_statement)
def get_cost(self, query):
# TODO how to get cost when simulating indexes
query_plan = self.get_plan(query)
total_cost = query_plan[0][3]
return total_cost
def exec_query(self, query, timeout=None):
query_text = self._prepare_query(query)
start_time = time.time()
self._cursor.execute(query_text)
execution_time = time.time() - start_time
self._cleanup_query(query)
return execution_time, {}
def drop_indexes(self):
logging.info("Dropping indexes")
statement = "select index_name from indexes where schema_name="
statement += f"'{self.db_name.upper()}'"
indexes = self.exec_fetch(statement, one=False)
for index in indexes:
index_name = index[0]
drop_stmt = "drop index {}".format(index_name)
logging.debug("Dropping index {}".format(index_name))
self.exec_only(drop_stmt)
def create_statistics(self):
logging.info("HANA")
def indexes_size(self):
# TODO implement
return 0
def create_index(self, index):
table_name = index.table()
statement = (
f"create index {index.index_idx()} "
f"on {table_name} ({index.joined_column_names()})"
)
self.exec_only(statement)
# TODO update index.estimated_size
| en | 0.60727 | # `db_name` is the schema name # TODO 'tpch' / 'tpcds' custom rules # TODO pass scp output to logger # pdb returns this even if the explain statement worked # TODO store result in dictionary-like format # TODO how to get cost when simulating indexes # TODO implement # TODO update index.estimated_size | 2.176155 | 2 |
api/draft_registrations/views.py | tsukaeru/RDM-osf.io | 11 | 6625480 | from rest_framework import permissions as drf_permissions, exceptions
from framework.auth.oauth_scopes import CoreScopes
from api.base import permissions as base_permissions
from api.base.pagination import DraftRegistrationContributorPagination
from api.draft_registrations.permissions import (
DraftContributorDetailPermissions,
IsContributorOrAdminContributor,
IsAdminContributor,
)
from api.draft_registrations.serializers import (
DraftRegistrationSerializer,
DraftRegistrationDetailSerializer,
DraftRegistrationContributorsSerializer,
DraftRegistrationContributorDetailSerializer,
DraftRegistrationContributorsCreateSerializer,
)
from api.nodes.views import (
NodeDraftRegistrationsList,
NodeDraftRegistrationDetail,
NodeInstitutionsList,
NodeInstitutionsRelationship,
NodeContributorsList,
NodeContributorDetail,
DraftMixin,
)
from api.nodes.permissions import ContributorOrPublic, AdminDeletePermissions
from api.subjects.views import SubjectRelationshipBaseView, BaseResourceSubjectsList
from osf.models import DraftRegistrationContributor
class DraftRegistrationMixin(DraftMixin):
"""
Old DraftMixin was built under the assumption that a node was provided from the start.
All permission checking went through the node, not the draft.
New draft registration endpoints do permission checking on the draft registration.
"""
# Overrides DraftMixin
def check_branched_from(self, draft):
# We do not have to check the branched_from relationship. node_id is not a kwarg
return
# Overrides DraftMixin
def check_resource_permissions(self, resource):
# Checks permissions on draft_registration, regardless of whether or not
# draft_registration is branched off of a node
return self.check_object_permissions(self.request, resource)
class DraftRegistrationList(NodeDraftRegistrationsList):
permission_classes = (
IsContributorOrAdminContributor,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
view_category = 'draft_registrations'
view_name = 'draft-registration-list'
# overrides NodeDraftRegistrationList
def get_serializer_class(self):
return DraftRegistrationSerializer
# overrides NodeDraftRegistrationList
def get_queryset(self):
user = self.request.user
if user.is_anonymous:
raise exceptions.NotAuthenticated()
# Returns DraftRegistrations for which a user is a contributor
return user.draft_registrations_active
class DraftRegistrationDetail(NodeDraftRegistrationDetail, DraftRegistrationMixin):
permission_classes = (
IsContributorOrAdminContributor,
AdminDeletePermissions,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
view_category = 'draft_registrations'
view_name = 'draft-registration-detail'
# overrides NodeDraftRegistrationDetail
def get_serializer_class(self):
return DraftRegistrationDetailSerializer
class DraftInstitutionsList(NodeInstitutionsList, DraftRegistrationMixin):
permission_classes = (
ContributorOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.INSTITUTION_READ, CoreScopes.DRAFT_REGISTRATIONS_READ]
view_category = 'draft_registrations'
view_name = 'draft-registration-institutions'
# Overrides NodeInstitutionsList
def get_resource(self):
return self.get_draft()
class DraftInstitutionsRelationship(NodeInstitutionsRelationship, DraftRegistrationMixin):
permission_classes = (
ContributorOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
view_category = 'draft_registrations'
view_name = 'draft-registration-relationships-institutions'
# Overrides NodeInstitutionsRelationship
def get_resource(self):
return self.get_draft(check_object_permissions=False)
class DraftSubjectsList(BaseResourceSubjectsList, DraftRegistrationMixin):
permission_classes = (
ContributorOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.DRAFT_REGISTRATIONS_READ]
view_category = 'draft_registrations'
view_name = 'draft-registration-subjects'
def get_resource(self):
# Overrides BaseResourceSubjectsList
return self.get_draft()
class DraftSubjectsRelationship(SubjectRelationshipBaseView, DraftRegistrationMixin):
permission_classes = (
ContributorOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.DRAFT_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.DRAFT_REGISTRATIONS_WRITE]
view_category = 'draft_registrations'
view_name = 'draft-registration-relationships-subjects'
ordering = ('-id',)
def get_resource(self, check_object_permissions=True):
# Overrides SubjectRelationshipBaseView
return self.get_draft(check_object_permissions=check_object_permissions)
class DraftContributorsList(NodeContributorsList, DraftRegistrationMixin):
permission_classes = (
IsAdminContributor,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
pagination_class = DraftRegistrationContributorPagination
required_read_scopes = [CoreScopes.DRAFT_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.DRAFT_REGISTRATIONS_WRITE]
view_category = 'draft_registrations'
view_name = 'draft-registration-contributors'
serializer_class = DraftRegistrationContributorsSerializer
def get_default_queryset(self):
# Overrides NodeContributorsList
draft = self.get_draft()
return draft.draftregistrationcontributor_set.all().include('user__guids')
# overrides NodeContributorsList
def get_serializer_class(self):
if self.request.method in ('PUT', 'PATCH', 'DELETE'):
return DraftRegistrationContributorDetailSerializer
elif self.request.method == 'POST':
return DraftRegistrationContributorsCreateSerializer
else:
return DraftRegistrationContributorsSerializer
def get_resource(self):
return self.get_draft()
# Overrides NodeContributorsList
def get_serializer_context(self):
context = super().get_serializer_context()
context['resource'] = self.get_resource()
context['default_email'] = 'draft_registration'
return context
class DraftContributorDetail(NodeContributorDetail, DraftRegistrationMixin):
permission_classes = (
DraftContributorDetailPermissions,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
view_category = 'draft_registrations'
view_name = 'draft-registration-contributor-detail'
serializer_class = DraftRegistrationContributorDetailSerializer
required_read_scopes = [CoreScopes.DRAFT_CONTRIBUTORS_READ]
required_write_scopes = [CoreScopes.DRAFT_CONTRIBUTORS_WRITE]
def get_resource(self):
return self.get_draft()
# overrides RetrieveAPIView
def get_object(self):
draft_registration = self.get_draft()
user = self.get_user()
# May raise a permission denied
self.check_object_permissions(self.request, user)
try:
return draft_registration.draftregistrationcontributor_set.get(user=user)
except DraftRegistrationContributor.DoesNotExist:
raise exceptions.NotFound('{} cannot be found in the list of contributors.'.format(user))
def get_serializer_context(self):
context = super().get_serializer_context()
context['resource'] = self.get_draft()
context['default_email'] = 'draft'
return context
| from rest_framework import permissions as drf_permissions, exceptions
from framework.auth.oauth_scopes import CoreScopes
from api.base import permissions as base_permissions
from api.base.pagination import DraftRegistrationContributorPagination
from api.draft_registrations.permissions import (
DraftContributorDetailPermissions,
IsContributorOrAdminContributor,
IsAdminContributor,
)
from api.draft_registrations.serializers import (
DraftRegistrationSerializer,
DraftRegistrationDetailSerializer,
DraftRegistrationContributorsSerializer,
DraftRegistrationContributorDetailSerializer,
DraftRegistrationContributorsCreateSerializer,
)
from api.nodes.views import (
NodeDraftRegistrationsList,
NodeDraftRegistrationDetail,
NodeInstitutionsList,
NodeInstitutionsRelationship,
NodeContributorsList,
NodeContributorDetail,
DraftMixin,
)
from api.nodes.permissions import ContributorOrPublic, AdminDeletePermissions
from api.subjects.views import SubjectRelationshipBaseView, BaseResourceSubjectsList
from osf.models import DraftRegistrationContributor
class DraftRegistrationMixin(DraftMixin):
"""
Old DraftMixin was built under the assumption that a node was provided from the start.
All permission checking went through the node, not the draft.
New draft registration endpoints do permission checking on the draft registration.
"""
# Overrides DraftMixin
def check_branched_from(self, draft):
# We do not have to check the branched_from relationship. node_id is not a kwarg
return
# Overrides DraftMixin
def check_resource_permissions(self, resource):
# Checks permissions on draft_registration, regardless of whether or not
# draft_registration is branched off of a node
return self.check_object_permissions(self.request, resource)
class DraftRegistrationList(NodeDraftRegistrationsList):
permission_classes = (
IsContributorOrAdminContributor,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
view_category = 'draft_registrations'
view_name = 'draft-registration-list'
# overrides NodeDraftRegistrationList
def get_serializer_class(self):
return DraftRegistrationSerializer
# overrides NodeDraftRegistrationList
def get_queryset(self):
user = self.request.user
if user.is_anonymous:
raise exceptions.NotAuthenticated()
# Returns DraftRegistrations for which a user is a contributor
return user.draft_registrations_active
class DraftRegistrationDetail(NodeDraftRegistrationDetail, DraftRegistrationMixin):
permission_classes = (
IsContributorOrAdminContributor,
AdminDeletePermissions,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
view_category = 'draft_registrations'
view_name = 'draft-registration-detail'
# overrides NodeDraftRegistrationDetail
def get_serializer_class(self):
return DraftRegistrationDetailSerializer
class DraftInstitutionsList(NodeInstitutionsList, DraftRegistrationMixin):
permission_classes = (
ContributorOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.INSTITUTION_READ, CoreScopes.DRAFT_REGISTRATIONS_READ]
view_category = 'draft_registrations'
view_name = 'draft-registration-institutions'
# Overrides NodeInstitutionsList
def get_resource(self):
return self.get_draft()
class DraftInstitutionsRelationship(NodeInstitutionsRelationship, DraftRegistrationMixin):
permission_classes = (
ContributorOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
view_category = 'draft_registrations'
view_name = 'draft-registration-relationships-institutions'
# Overrides NodeInstitutionsRelationship
def get_resource(self):
return self.get_draft(check_object_permissions=False)
class DraftSubjectsList(BaseResourceSubjectsList, DraftRegistrationMixin):
permission_classes = (
ContributorOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.DRAFT_REGISTRATIONS_READ]
view_category = 'draft_registrations'
view_name = 'draft-registration-subjects'
def get_resource(self):
# Overrides BaseResourceSubjectsList
return self.get_draft()
class DraftSubjectsRelationship(SubjectRelationshipBaseView, DraftRegistrationMixin):
permission_classes = (
ContributorOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.DRAFT_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.DRAFT_REGISTRATIONS_WRITE]
view_category = 'draft_registrations'
view_name = 'draft-registration-relationships-subjects'
ordering = ('-id',)
def get_resource(self, check_object_permissions=True):
# Overrides SubjectRelationshipBaseView
return self.get_draft(check_object_permissions=check_object_permissions)
class DraftContributorsList(NodeContributorsList, DraftRegistrationMixin):
permission_classes = (
IsAdminContributor,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
pagination_class = DraftRegistrationContributorPagination
required_read_scopes = [CoreScopes.DRAFT_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.DRAFT_REGISTRATIONS_WRITE]
view_category = 'draft_registrations'
view_name = 'draft-registration-contributors'
serializer_class = DraftRegistrationContributorsSerializer
def get_default_queryset(self):
# Overrides NodeContributorsList
draft = self.get_draft()
return draft.draftregistrationcontributor_set.all().include('user__guids')
# overrides NodeContributorsList
def get_serializer_class(self):
if self.request.method in ('PUT', 'PATCH', 'DELETE'):
return DraftRegistrationContributorDetailSerializer
elif self.request.method == 'POST':
return DraftRegistrationContributorsCreateSerializer
else:
return DraftRegistrationContributorsSerializer
def get_resource(self):
return self.get_draft()
# Overrides NodeContributorsList
def get_serializer_context(self):
context = super().get_serializer_context()
context['resource'] = self.get_resource()
context['default_email'] = 'draft_registration'
return context
class DraftContributorDetail(NodeContributorDetail, DraftRegistrationMixin):
permission_classes = (
DraftContributorDetailPermissions,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
view_category = 'draft_registrations'
view_name = 'draft-registration-contributor-detail'
serializer_class = DraftRegistrationContributorDetailSerializer
required_read_scopes = [CoreScopes.DRAFT_CONTRIBUTORS_READ]
required_write_scopes = [CoreScopes.DRAFT_CONTRIBUTORS_WRITE]
def get_resource(self):
return self.get_draft()
# overrides RetrieveAPIView
def get_object(self):
draft_registration = self.get_draft()
user = self.get_user()
# May raise a permission denied
self.check_object_permissions(self.request, user)
try:
return draft_registration.draftregistrationcontributor_set.get(user=user)
except DraftRegistrationContributor.DoesNotExist:
raise exceptions.NotFound('{} cannot be found in the list of contributors.'.format(user))
def get_serializer_context(self):
context = super().get_serializer_context()
context['resource'] = self.get_draft()
context['default_email'] = 'draft'
return context
| en | 0.773921 | Old DraftMixin was built under the assumption that a node was provided from the start. All permission checking went through the node, not the draft. New draft registration endpoints do permission checking on the draft registration. # Overrides DraftMixin # We do not have to check the branched_from relationship. node_id is not a kwarg # Overrides DraftMixin # Checks permissions on draft_registration, regardless of whether or not # draft_registration is branched off of a node # overrides NodeDraftRegistrationList # overrides NodeDraftRegistrationList # Returns DraftRegistrations for which a user is a contributor # overrides NodeDraftRegistrationDetail # Overrides NodeInstitutionsList # Overrides NodeInstitutionsRelationship # Overrides BaseResourceSubjectsList # Overrides SubjectRelationshipBaseView # Overrides NodeContributorsList # overrides NodeContributorsList # Overrides NodeContributorsList # overrides RetrieveAPIView # May raise a permission denied | 1.933683 | 2 |
parse_lnk.py | cccs-rs/assemblyline-service-characterize | 1 | 6625481 | import struct
from assemblyline.common.str_utils import safe_str
LinkFlags_def = ['HasLinkTargetIDList',
'HasLinkInfo',
'HasName',
'HasRelativePath',
'HasWorkingDir',
'HasArguments',
'HasIconLocation',
'IsUnicode',
'ForceNoLinkInfo',
'HasExpString',
'RunInSeparateProcess',
'Unused1',
'HasDarwinID',
'RunAsUser',
'HasExpIcon',
'NoPidlAlias',
'Unused2',
'RunWithShimLayer',
'ForceNoLinkTrack',
'EnableTargetMetadata',
'DisableLinkPathTracking',
'DisableKnownFolderTracking',
'DisableKnownFolderAlias',
'AllowLinkToLink',
'UnaliasOnSave',
'PreferEnvironmentPath',
'KeepLocalIDListForUNCTarget']
FileAttributes_def = ['FILE_ATTRIBUTE_READONLY',
'FILE_ATTRIBUTE_HIDDEN',
'FILE_ATTRIBUTE_SYSTEM',
'Reserved1',
'FILE_ATTRIBUTE_DIRECTORY',
'FILE_ATTRIBUTE_ARCHIVE',
'Reserved2',
'FILE_ATTRIBUTE_NORMAL',
'FILE_ATTRIBUTE_TEMPORARY',
'FILE_ATTRIBUTE_SPARSE_FILE',
'FILE_ATTRIBUTE_REPARSE_POINT',
'FILE_ATTRIBUTE_COMPRESSED',
'FILE_ATTRIBUTE_OFFLINE',
'FILE_ATTRIBUTE_NOT_CONTENT_INDEXED',
'FILE_ATTRIBUTE_ENCRYPTED']
LinkInfoFlags_def = ['VolumeIDAndLocalBasePath',
'CNRLAndPathSuffix']
CNRLFlags_def = ['ValidDevice',
'ValidNetType']
NetworkProviderType_enum = {
0x001A0000: 'WNNC_NET_AVID',
0x001B0000: 'WNNC_NET_DOCUSPACE',
0x001C0000: 'WNNC_NET_MANGOSOFT',
0x001D0000: 'WNNC_NET_SERNET',
0X001E0000: 'WNNC_NET_RIVERFRONT1',
0x001F0000: 'WNNC_NET_RIVERFRONT2',
0x00200000: 'WNNC_NET_DECORB',
0x00210000: 'WNNC_NET_PROTSTOR',
0x00220000: 'WNNC_NET_FJ_REDIR',
0x00230000: 'WNNC_NET_DISTINCT',
0x00240000: 'WNNC_NET_TWINS',
0x00250000: 'WNNC_NET_RDR2SAMPLE',
0x00260000: 'WNNC_NET_CSC',
0x00270000: 'WNNC_NET_3IN1',
0x00290000: 'WNNC_NET_EXTENDNET',
0x002A0000: 'WNNC_NET_STAC',
0x002B0000: 'WNNC_NET_FOXBAT',
0x002C0000: 'WNNC_NET_YAHOO',
0x002D0000: 'WNNC_NET_EXIFS',
0x002E0000: 'WNNC_NET_DAV',
0x002F0000: 'WNNC_NET_KNOWARE',
0x00300000: 'WNNC_NET_OBJECT_DIRE',
0x00310000: 'WNNC_NET_MASFAX',
0x00320000: 'WNNC_NET_HOB_NFS',
0x00330000: 'WNNC_NET_SHIVA',
0x00340000: 'WNNC_NET_IBMAL',
0x00350000: 'WNNC_NET_LOCK',
0x00360000: 'WNNC_NET_TERMSRV',
0x00370000: 'WNNC_NET_SRT',
0x00380000: 'WNNC_NET_QUINCY',
0x00390000: 'WNNC_NET_OPENAFS',
0X003A0000: 'WNNC_NET_AVID1',
0x003B0000: 'WNNC_NET_DFS',
0x003C0000: 'WNNC_NET_KWNP',
0x003D0000: 'WNNC_NET_ZENWORKS',
0x003E0000: 'WNNC_NET_DRIVEONWEB',
0x003F0000: 'WNNC_NET_VMWARE',
0x00400000: 'WNNC_NET_RSFX',
0x00410000: 'WNNC_NET_MFILES',
0x00420000: 'WNNC_NET_MS_NFS',
0x00430000: 'WNNC_NET_GOOGLE',
None: 'INVALID'
}
showCommand_enum = {
0x1: 'SW_SHOWNORMAL',
0x3: 'SW_SHOWMAXIMIZED',
0x7: 'SW_SHOWMINNOACTIVE',
None: 'SW_SHOWNORMAL'
}
def parse_bitmask(mask_def, mask):
i = 0
out = []
while mask != 0:
if mask & 1:
try:
out.append(mask_def[i])
except IndexError:
pass
mask >>= 1
i += 1
return out
def parse_enumeration(enum_def, val):
if val not in enum_def:
return enum_def[None]
else:
return enum_def[val]
def parse_pstr(data, is_utf16):
n_len, = struct.unpack('<H', data[:2])
if is_utf16:
n_len *= 2
out_str = data[2: 2 + n_len]
if is_utf16:
out_str = out_str.decode('utf-16')
data = data[2 + n_len:]
return data, out_str
def extract_value(data, offset, end=b'\x00', is_utf16=True):
value = data[offset:].split(end, 1)[0]
if is_utf16:
return value.decode("utf-16", errors='ignore')
else:
return safe_str(value)
def decode_lnk(lnk, parse_tidlist=False):
""" See MS-SHLLINK """
try:
metadata = {}
headersize, linkclsid, link_flags, file_atributes, ctime, atime, mtime, \
fsize, icon_index, show_command, hot_key, \
r1, r2, r3 = struct.unpack('<I16sIIQQQIIIHHII', lnk[:76])
if headersize != 76 or linkclsid != b'\x01\x14\x02\x00\x00\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00F':
return None
show_command = parse_enumeration(showCommand_enum, show_command)
link_flags = parse_bitmask(LinkFlags_def, link_flags)
file_atributes = parse_bitmask(FileAttributes_def, file_atributes)
metadata['showCommand'] = show_command
metadata['linkFlags'] = link_flags
metadata['fileAtributes'] = file_atributes
lnk = lnk[76:]
is_utf16 = 'IsUnicode' in link_flags
if 'HasLinkTargetIDList' in link_flags:
ltid_len, = struct.unpack('<H', lnk[:2])
link_target_id_list = lnk[2:ltid_len+2]
lnk = lnk[ltid_len+2:]
if parse_tidlist:
# The spec doesn't give a clear indication of why this is needed.
# So I've made it optional and disabled by default.
id_list = [[]]
while link_target_id_list:
if link_target_id_list[0:2] == b'\x00\x00':
id_list.append([])
link_target_id_list = link_target_id_list[2:]
else:
itm_size, = struct.unpack('<H', link_target_id_list[0:2])
id_list[-1].append(link_target_id_list[2:itm_size])
link_target_id_list = link_target_id_list[itm_size:]
id_list.pop(-1)
metadata['IDList'] = id_list
if 'HasLinkInfo' in link_flags:
link_info_size, link_info_header_size, link_info_flags, volume_id_offset, local_base_path_offset, \
cnrl_offset, common_path_suffix_offset = struct.unpack('<IIIIIII', lnk[:28])
link_info = lnk[:link_info_size]
lnk = lnk[link_info_size:]
link_info_flags = parse_bitmask(LinkInfoFlags_def, link_info_flags)
if 'VolumeIDAndLocalBasePath' in link_info_flags:
vid = {}
volume_id_size, drive_type, drive_serial_number, volume_label_offset, volume_label_offset_unicode = \
struct.unpack('<IIIII', link_info[volume_id_offset:volume_id_offset+20])
vid['DriveType'] = ['DRIVE_UNKNOWN', 'DRIVE_NO_ROOT_DIR', 'DRIVE_REMOVABLE', 'DRIVE_FIXED',
'DRIVE_REMOTE', 'DRIVE_CDROM', 'DRIVE_RAMDISK'][drive_type]
vid['DriveSerialNumber'] = drive_serial_number
vid['VolumeLabel'] = extract_value(link_info, volume_id_offset + volume_label_offset, is_utf16=is_utf16)
if volume_label_offset == 0x14:
vid['VolumeLabelUnicode'] = extract_value(link_info,
volume_id_offset + volume_label_offset_unicode,
end=b'\x00\x00', is_utf16=is_utf16)
metadata['BasePath'] = extract_value(link_info, local_base_path_offset, is_utf16=False)
metadata['VolumeID'] = vid
if 'CNRLAndPathSuffix' in link_info_flags:
cnrlo = {}
cnrl_size, cnrl_flags, net_name_offset, device_name_offset, \
network_provider_type = struct.unpack("<IIIII", link_info[cnrl_offset:cnrl_offset+20])
cnrl_flags = parse_bitmask(CNRLFlags_def, cnrl_flags)
metadata['NetName'] = extract_value(link_info, cnrl_offset + net_name_offset, is_utf16=is_utf16)
if 'ValidDevice' in cnrl_flags:
cnrlo['DeviceName'] = extract_value(link_info, cnrl_offset + device_name_offset, is_utf16=is_utf16)
if 'ValidNetType' in cnrl_flags:
cnrlo['NetworkProviderType'] = parse_enumeration(NetworkProviderType_enum, network_provider_type)
if cnrl_size > 0x14:
net_name_offset_unicode, device_name_offset_unicode = \
struct.unpack("<II", link_info[cnrl_offset + 20:cnrl_offset + 28])
cnrlo['NetNameUnicode'] = extract_value(link_info, cnrl_offset + net_name_offset_unicode,
end=b'\x00\x00', is_utf16=is_utf16)
cnrlo['DeviceNameUnicode'] = extract_value(link_info, cnrl_offset + device_name_offset_unicode,
end=b'\x00\x00', is_utf16=is_utf16)
metadata['CommonNetworkRelativeLink'] = cnrlo
# String data
if 'HasName' in link_flags:
lnk, metadata['NAME_STRING'] = parse_pstr(lnk, is_utf16)
if 'HasRelativePath' in link_flags:
lnk, metadata['RELATIVE_PATH'] = parse_pstr(lnk, is_utf16)
if 'HasWorkingDir' in link_flags:
lnk, metadata['WORKING_DIR'] = parse_pstr(lnk, is_utf16)
if 'HasArguments' in link_flags:
lnk, metadata['COMMAND_LINE_ARGUMENTS'] = parse_pstr(lnk, is_utf16)
if 'HasIconLocation' in link_flags:
lnk, metadata['ICON_LOCATION'] = parse_pstr(lnk, is_utf16)
# Note: there is technically an "ExtraData" block after the strings.
# But I couldn't find anything in them that was worth parsing out.
return metadata
except struct.error:
# Not enough bytes in the file
return None
if __name__ == '__main__':
import sys
with open(sys.argv[1], 'rb') as fh:
print(decode_lnk(fh.read()))
| import struct
from assemblyline.common.str_utils import safe_str
LinkFlags_def = ['HasLinkTargetIDList',
'HasLinkInfo',
'HasName',
'HasRelativePath',
'HasWorkingDir',
'HasArguments',
'HasIconLocation',
'IsUnicode',
'ForceNoLinkInfo',
'HasExpString',
'RunInSeparateProcess',
'Unused1',
'HasDarwinID',
'RunAsUser',
'HasExpIcon',
'NoPidlAlias',
'Unused2',
'RunWithShimLayer',
'ForceNoLinkTrack',
'EnableTargetMetadata',
'DisableLinkPathTracking',
'DisableKnownFolderTracking',
'DisableKnownFolderAlias',
'AllowLinkToLink',
'UnaliasOnSave',
'PreferEnvironmentPath',
'KeepLocalIDListForUNCTarget']
FileAttributes_def = ['FILE_ATTRIBUTE_READONLY',
'FILE_ATTRIBUTE_HIDDEN',
'FILE_ATTRIBUTE_SYSTEM',
'Reserved1',
'FILE_ATTRIBUTE_DIRECTORY',
'FILE_ATTRIBUTE_ARCHIVE',
'Reserved2',
'FILE_ATTRIBUTE_NORMAL',
'FILE_ATTRIBUTE_TEMPORARY',
'FILE_ATTRIBUTE_SPARSE_FILE',
'FILE_ATTRIBUTE_REPARSE_POINT',
'FILE_ATTRIBUTE_COMPRESSED',
'FILE_ATTRIBUTE_OFFLINE',
'FILE_ATTRIBUTE_NOT_CONTENT_INDEXED',
'FILE_ATTRIBUTE_ENCRYPTED']
LinkInfoFlags_def = ['VolumeIDAndLocalBasePath',
'CNRLAndPathSuffix']
CNRLFlags_def = ['ValidDevice',
'ValidNetType']
NetworkProviderType_enum = {
0x001A0000: 'WNNC_NET_AVID',
0x001B0000: 'WNNC_NET_DOCUSPACE',
0x001C0000: 'WNNC_NET_MANGOSOFT',
0x001D0000: 'WNNC_NET_SERNET',
0X001E0000: 'WNNC_NET_RIVERFRONT1',
0x001F0000: 'WNNC_NET_RIVERFRONT2',
0x00200000: 'WNNC_NET_DECORB',
0x00210000: 'WNNC_NET_PROTSTOR',
0x00220000: 'WNNC_NET_FJ_REDIR',
0x00230000: 'WNNC_NET_DISTINCT',
0x00240000: 'WNNC_NET_TWINS',
0x00250000: 'WNNC_NET_RDR2SAMPLE',
0x00260000: 'WNNC_NET_CSC',
0x00270000: 'WNNC_NET_3IN1',
0x00290000: 'WNNC_NET_EXTENDNET',
0x002A0000: 'WNNC_NET_STAC',
0x002B0000: 'WNNC_NET_FOXBAT',
0x002C0000: 'WNNC_NET_YAHOO',
0x002D0000: 'WNNC_NET_EXIFS',
0x002E0000: 'WNNC_NET_DAV',
0x002F0000: 'WNNC_NET_KNOWARE',
0x00300000: 'WNNC_NET_OBJECT_DIRE',
0x00310000: 'WNNC_NET_MASFAX',
0x00320000: 'WNNC_NET_HOB_NFS',
0x00330000: 'WNNC_NET_SHIVA',
0x00340000: 'WNNC_NET_IBMAL',
0x00350000: 'WNNC_NET_LOCK',
0x00360000: 'WNNC_NET_TERMSRV',
0x00370000: 'WNNC_NET_SRT',
0x00380000: 'WNNC_NET_QUINCY',
0x00390000: 'WNNC_NET_OPENAFS',
0X003A0000: 'WNNC_NET_AVID1',
0x003B0000: 'WNNC_NET_DFS',
0x003C0000: 'WNNC_NET_KWNP',
0x003D0000: 'WNNC_NET_ZENWORKS',
0x003E0000: 'WNNC_NET_DRIVEONWEB',
0x003F0000: 'WNNC_NET_VMWARE',
0x00400000: 'WNNC_NET_RSFX',
0x00410000: 'WNNC_NET_MFILES',
0x00420000: 'WNNC_NET_MS_NFS',
0x00430000: 'WNNC_NET_GOOGLE',
None: 'INVALID'
}
showCommand_enum = {
0x1: 'SW_SHOWNORMAL',
0x3: 'SW_SHOWMAXIMIZED',
0x7: 'SW_SHOWMINNOACTIVE',
None: 'SW_SHOWNORMAL'
}
def parse_bitmask(mask_def, mask):
i = 0
out = []
while mask != 0:
if mask & 1:
try:
out.append(mask_def[i])
except IndexError:
pass
mask >>= 1
i += 1
return out
def parse_enumeration(enum_def, val):
if val not in enum_def:
return enum_def[None]
else:
return enum_def[val]
def parse_pstr(data, is_utf16):
n_len, = struct.unpack('<H', data[:2])
if is_utf16:
n_len *= 2
out_str = data[2: 2 + n_len]
if is_utf16:
out_str = out_str.decode('utf-16')
data = data[2 + n_len:]
return data, out_str
def extract_value(data, offset, end=b'\x00', is_utf16=True):
value = data[offset:].split(end, 1)[0]
if is_utf16:
return value.decode("utf-16", errors='ignore')
else:
return safe_str(value)
def decode_lnk(lnk, parse_tidlist=False):
""" See MS-SHLLINK """
try:
metadata = {}
headersize, linkclsid, link_flags, file_atributes, ctime, atime, mtime, \
fsize, icon_index, show_command, hot_key, \
r1, r2, r3 = struct.unpack('<I16sIIQQQIIIHHII', lnk[:76])
if headersize != 76 or linkclsid != b'\x01\x14\x02\x00\x00\x00\x00\x00\xc0\x00\x00\x00\x00\x00\x00F':
return None
show_command = parse_enumeration(showCommand_enum, show_command)
link_flags = parse_bitmask(LinkFlags_def, link_flags)
file_atributes = parse_bitmask(FileAttributes_def, file_atributes)
metadata['showCommand'] = show_command
metadata['linkFlags'] = link_flags
metadata['fileAtributes'] = file_atributes
lnk = lnk[76:]
is_utf16 = 'IsUnicode' in link_flags
if 'HasLinkTargetIDList' in link_flags:
ltid_len, = struct.unpack('<H', lnk[:2])
link_target_id_list = lnk[2:ltid_len+2]
lnk = lnk[ltid_len+2:]
if parse_tidlist:
# The spec doesn't give a clear indication of why this is needed.
# So I've made it optional and disabled by default.
id_list = [[]]
while link_target_id_list:
if link_target_id_list[0:2] == b'\x00\x00':
id_list.append([])
link_target_id_list = link_target_id_list[2:]
else:
itm_size, = struct.unpack('<H', link_target_id_list[0:2])
id_list[-1].append(link_target_id_list[2:itm_size])
link_target_id_list = link_target_id_list[itm_size:]
id_list.pop(-1)
metadata['IDList'] = id_list
if 'HasLinkInfo' in link_flags:
link_info_size, link_info_header_size, link_info_flags, volume_id_offset, local_base_path_offset, \
cnrl_offset, common_path_suffix_offset = struct.unpack('<IIIIIII', lnk[:28])
link_info = lnk[:link_info_size]
lnk = lnk[link_info_size:]
link_info_flags = parse_bitmask(LinkInfoFlags_def, link_info_flags)
if 'VolumeIDAndLocalBasePath' in link_info_flags:
vid = {}
volume_id_size, drive_type, drive_serial_number, volume_label_offset, volume_label_offset_unicode = \
struct.unpack('<IIIII', link_info[volume_id_offset:volume_id_offset+20])
vid['DriveType'] = ['DRIVE_UNKNOWN', 'DRIVE_NO_ROOT_DIR', 'DRIVE_REMOVABLE', 'DRIVE_FIXED',
'DRIVE_REMOTE', 'DRIVE_CDROM', 'DRIVE_RAMDISK'][drive_type]
vid['DriveSerialNumber'] = drive_serial_number
vid['VolumeLabel'] = extract_value(link_info, volume_id_offset + volume_label_offset, is_utf16=is_utf16)
if volume_label_offset == 0x14:
vid['VolumeLabelUnicode'] = extract_value(link_info,
volume_id_offset + volume_label_offset_unicode,
end=b'\x00\x00', is_utf16=is_utf16)
metadata['BasePath'] = extract_value(link_info, local_base_path_offset, is_utf16=False)
metadata['VolumeID'] = vid
if 'CNRLAndPathSuffix' in link_info_flags:
cnrlo = {}
cnrl_size, cnrl_flags, net_name_offset, device_name_offset, \
network_provider_type = struct.unpack("<IIIII", link_info[cnrl_offset:cnrl_offset+20])
cnrl_flags = parse_bitmask(CNRLFlags_def, cnrl_flags)
metadata['NetName'] = extract_value(link_info, cnrl_offset + net_name_offset, is_utf16=is_utf16)
if 'ValidDevice' in cnrl_flags:
cnrlo['DeviceName'] = extract_value(link_info, cnrl_offset + device_name_offset, is_utf16=is_utf16)
if 'ValidNetType' in cnrl_flags:
cnrlo['NetworkProviderType'] = parse_enumeration(NetworkProviderType_enum, network_provider_type)
if cnrl_size > 0x14:
net_name_offset_unicode, device_name_offset_unicode = \
struct.unpack("<II", link_info[cnrl_offset + 20:cnrl_offset + 28])
cnrlo['NetNameUnicode'] = extract_value(link_info, cnrl_offset + net_name_offset_unicode,
end=b'\x00\x00', is_utf16=is_utf16)
cnrlo['DeviceNameUnicode'] = extract_value(link_info, cnrl_offset + device_name_offset_unicode,
end=b'\x00\x00', is_utf16=is_utf16)
metadata['CommonNetworkRelativeLink'] = cnrlo
# String data
if 'HasName' in link_flags:
lnk, metadata['NAME_STRING'] = parse_pstr(lnk, is_utf16)
if 'HasRelativePath' in link_flags:
lnk, metadata['RELATIVE_PATH'] = parse_pstr(lnk, is_utf16)
if 'HasWorkingDir' in link_flags:
lnk, metadata['WORKING_DIR'] = parse_pstr(lnk, is_utf16)
if 'HasArguments' in link_flags:
lnk, metadata['COMMAND_LINE_ARGUMENTS'] = parse_pstr(lnk, is_utf16)
if 'HasIconLocation' in link_flags:
lnk, metadata['ICON_LOCATION'] = parse_pstr(lnk, is_utf16)
# Note: there is technically an "ExtraData" block after the strings.
# But I couldn't find anything in them that was worth parsing out.
return metadata
except struct.error:
# Not enough bytes in the file
return None
if __name__ == '__main__':
import sys
with open(sys.argv[1], 'rb') as fh:
print(decode_lnk(fh.read()))
| en | 0.972905 | See MS-SHLLINK # The spec doesn't give a clear indication of why this is needed. # So I've made it optional and disabled by default. # String data # Note: there is technically an "ExtraData" block after the strings. # But I couldn't find anything in them that was worth parsing out. # Not enough bytes in the file | 1.437974 | 1 |
src/harness/reference_models/tools/examples/fss_pointing_test.py | NSF-Swift/Spectrum-Access-System | 58 | 6625482 | # Copyright 2017 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import fss_pointing as fp
# Tests based on https://www.ngs.noaa.gov/CORS/Articles/SolerEisemannJSE.pdf
# which uses a slightly different SPHERICAL_GSO_CST than FCC 05-56
fp._GSO_SPHERICAL_CST = 0.1508 # instead of 0.1512
class TestAntenna(unittest.TestCase):
def test_gso_elevation(self):
# From https://www.ngs.noaa.gov/CORS/Articles/SolerEisemannJSE.pdf
# Table 1
self.assertAlmostEqual(fp.GsoElevation(0, 50, 50), 90)
self.assertAlmostEqual(fp.GsoElevation(5, 50, 50), 84.1139, 3)
self.assertAlmostEqual(fp.GsoElevation(25, 50, 50), 60.7782, 3)
self.assertAlmostEqual(fp.GsoElevation(55, 50, 50), 27.2990, 3)
self.assertAlmostEqual(fp.GsoElevation(80, 50, 50), 1.3291, 3)
# Table 2
self.assertAlmostEqual(fp.GsoElevation(45, 0, 30), 30.2785, 3)
self.assertAlmostEqual(fp.GsoElevation(45, 0, 75), 1.8768, 3)
self.assertAlmostEqual(fp.GsoElevation(45, 0, 77.6865), 0, 3)
self.assertAlmostEqual(fp.GsoElevation(45, 0, -60), 12.2299, 3)
def test_gso_azimuth(self):
# From: https://www.ngs.noaa.gov/CORS/Articles/SolerEisemannJSE.pdf
# Table 2
self.assertAlmostEqual(fp.GsoAzimuth(45, 0, 0), 180)
self.assertAlmostEqual(fp.GsoAzimuth(45, 0, 10), 165.9981, 4)
self.assertAlmostEqual(fp.GsoAzimuth(45, 0, 30), 140.7685, 4)
self.assertAlmostEqual(fp.GsoAzimuth(45, 0, 75), 100.7286, 4)
self.assertAlmostEqual(fp.GsoAzimuth(45, 0, 77.6865), 98.7743, 4)
self.assertAlmostEqual(fp.GsoAzimuth(45, 0, -60), 247.7923, 4)
def test_gso_pointings(self):
# From above, shall be +-30degree visibility
pointings = fp.GsoPossiblePointings(45, 0,
west_elevation_limit=30.2785,
east_elevation_limit=30.2785)
sat_lon_slots = range(-29, 30, 2)
self.assertEqual(len(pointings), len(sat_lon_slots))
for lon in sat_lon_slots:
elev = fp.GsoElevation(45, 0, lon)
azi = fp.GsoAzimuth(45, 0, lon)
self.assertIn((azi, elev), pointings)
# Same with further limitation in sat_orbit
pointings = fp.GsoPossiblePointings(45, 0,
west_elevation_limit=30.2785,
east_elevation_limit=30.2785,
west_sat_lon=-21,
east_sat_lon=40)
sat_lon_slots = range(-21, 30, 2)
self.assertEqual(len(pointings), len(sat_lon_slots))
for lon in sat_lon_slots:
elev = fp.GsoElevation(45, 0, lon)
azi = fp.GsoAzimuth(45, 0, lon)
self.assertIn((azi, elev), pointings)
# Same with further limitation in sat_orbit and azimuth
pointings = fp.GsoPossiblePointings(45, 0,
east_elevation_limit=30.2785,
west_sat_lon=-21,
east_sat_lon=40,
west_azimuth_limit=166,
east_azimuth_limit=140.7685)
sat_lon_slots = range(11, 30, 2)
self.assertEqual(len(pointings), len(sat_lon_slots))
for lon in sat_lon_slots:
elev = fp.GsoElevation(45, 0, lon)
azi = fp.GsoAzimuth(45, 0, lon)
self.assertIn((azi, elev), pointings)
# Check the 180 degree crossover
pointings = fp.GsoPossiblePointings(45, 180,
west_sat_lon=150,
east_sat_lon=-150)
sat_lon_slots = range(180-29, 180+30, 2)
self.assertEqual(len(pointings), len(sat_lon_slots))
for lon in sat_lon_slots:
elev = fp.GsoElevation(45, 180, lon)
azi = fp.GsoAzimuth(45, 180, lon)
self.assertIn((azi, elev), pointings)
if __name__ == '__main__':
unittest.main()
| # Copyright 2017 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import fss_pointing as fp
# Tests based on https://www.ngs.noaa.gov/CORS/Articles/SolerEisemannJSE.pdf
# which uses a slightly different SPHERICAL_GSO_CST than FCC 05-56
fp._GSO_SPHERICAL_CST = 0.1508 # instead of 0.1512
class TestAntenna(unittest.TestCase):
def test_gso_elevation(self):
# From https://www.ngs.noaa.gov/CORS/Articles/SolerEisemannJSE.pdf
# Table 1
self.assertAlmostEqual(fp.GsoElevation(0, 50, 50), 90)
self.assertAlmostEqual(fp.GsoElevation(5, 50, 50), 84.1139, 3)
self.assertAlmostEqual(fp.GsoElevation(25, 50, 50), 60.7782, 3)
self.assertAlmostEqual(fp.GsoElevation(55, 50, 50), 27.2990, 3)
self.assertAlmostEqual(fp.GsoElevation(80, 50, 50), 1.3291, 3)
# Table 2
self.assertAlmostEqual(fp.GsoElevation(45, 0, 30), 30.2785, 3)
self.assertAlmostEqual(fp.GsoElevation(45, 0, 75), 1.8768, 3)
self.assertAlmostEqual(fp.GsoElevation(45, 0, 77.6865), 0, 3)
self.assertAlmostEqual(fp.GsoElevation(45, 0, -60), 12.2299, 3)
def test_gso_azimuth(self):
# From: https://www.ngs.noaa.gov/CORS/Articles/SolerEisemannJSE.pdf
# Table 2
self.assertAlmostEqual(fp.GsoAzimuth(45, 0, 0), 180)
self.assertAlmostEqual(fp.GsoAzimuth(45, 0, 10), 165.9981, 4)
self.assertAlmostEqual(fp.GsoAzimuth(45, 0, 30), 140.7685, 4)
self.assertAlmostEqual(fp.GsoAzimuth(45, 0, 75), 100.7286, 4)
self.assertAlmostEqual(fp.GsoAzimuth(45, 0, 77.6865), 98.7743, 4)
self.assertAlmostEqual(fp.GsoAzimuth(45, 0, -60), 247.7923, 4)
def test_gso_pointings(self):
# From above, shall be +-30degree visibility
pointings = fp.GsoPossiblePointings(45, 0,
west_elevation_limit=30.2785,
east_elevation_limit=30.2785)
sat_lon_slots = range(-29, 30, 2)
self.assertEqual(len(pointings), len(sat_lon_slots))
for lon in sat_lon_slots:
elev = fp.GsoElevation(45, 0, lon)
azi = fp.GsoAzimuth(45, 0, lon)
self.assertIn((azi, elev), pointings)
# Same with further limitation in sat_orbit
pointings = fp.GsoPossiblePointings(45, 0,
west_elevation_limit=30.2785,
east_elevation_limit=30.2785,
west_sat_lon=-21,
east_sat_lon=40)
sat_lon_slots = range(-21, 30, 2)
self.assertEqual(len(pointings), len(sat_lon_slots))
for lon in sat_lon_slots:
elev = fp.GsoElevation(45, 0, lon)
azi = fp.GsoAzimuth(45, 0, lon)
self.assertIn((azi, elev), pointings)
# Same with further limitation in sat_orbit and azimuth
pointings = fp.GsoPossiblePointings(45, 0,
east_elevation_limit=30.2785,
west_sat_lon=-21,
east_sat_lon=40,
west_azimuth_limit=166,
east_azimuth_limit=140.7685)
sat_lon_slots = range(11, 30, 2)
self.assertEqual(len(pointings), len(sat_lon_slots))
for lon in sat_lon_slots:
elev = fp.GsoElevation(45, 0, lon)
azi = fp.GsoAzimuth(45, 0, lon)
self.assertIn((azi, elev), pointings)
# Check the 180 degree crossover
pointings = fp.GsoPossiblePointings(45, 180,
west_sat_lon=150,
east_sat_lon=-150)
sat_lon_slots = range(180-29, 180+30, 2)
self.assertEqual(len(pointings), len(sat_lon_slots))
for lon in sat_lon_slots:
elev = fp.GsoElevation(45, 180, lon)
azi = fp.GsoAzimuth(45, 180, lon)
self.assertIn((azi, elev), pointings)
if __name__ == '__main__':
unittest.main()
| en | 0.810415 | # Copyright 2017 SAS Project Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Tests based on https://www.ngs.noaa.gov/CORS/Articles/SolerEisemannJSE.pdf # which uses a slightly different SPHERICAL_GSO_CST than FCC 05-56 # instead of 0.1512 # From https://www.ngs.noaa.gov/CORS/Articles/SolerEisemannJSE.pdf # Table 1 # Table 2 # From: https://www.ngs.noaa.gov/CORS/Articles/SolerEisemannJSE.pdf # Table 2 # From above, shall be +-30degree visibility # Same with further limitation in sat_orbit # Same with further limitation in sat_orbit and azimuth # Check the 180 degree crossover | 1.611472 | 2 |
tests/small/test_forge.py | durandj/mymcadmin | 0 | 6625483 | """
Tests for the Forge related functions
"""
import os
import os.path
import unittest
import unittest.mock
import nose
import requests
from mymcadmin.errors import ForgeError
from mymcadmin.forge import (
get_forge_for_mc_version,
get_forge_mc_versions,
get_forge_version,
)
class TestForge(unittest.TestCase):
"""
Tests for the Forge related functions
"""
@unittest.mock.patch('requests.get')
def test_get_forge_mc_versions(self, requests_get):
"""
Tests that we can retrieve a list of all the MC versions Forge supports
"""
mock_response = unittest.mock.Mock(spec = requests.Response)
mock_response.ok = True
mock_response.content = """
<html>
<body>
<div class="versions">
<ul class="links">
<li class="li-version-list">
<span>1.8</span>
<div class="versions-info">
<ul class="text">
<li class="li-version-list-current">
1.8.9
</li>
<li>
<a href="http://example.com/1.8.8">
1.8.8
</a>
</li>
</ul>
</div>
</li>
<li class="li-version-list">
<span>1.7</span>
<div class="versions-info">
<ul class="text">
<li>
<a href="http://example.com/1.7.10">
1.7.10
</a>
</li>
<li>
<a href="http://example.com/1.7.2">
1.7.2
</a>
</li>
</ul>
</div>
</li>
</ul>
</div>
</body>
</html>
"""
requests_get.return_value = mock_response
versions = get_forge_mc_versions()
requests_get.assert_called_with(
'http://files.minecraftforge.net/',
)
self.assertListEqual(
[
'1.8.9',
'1.8.8',
'1.7.10',
'1.7.2',
],
versions,
'Version list did not match expected',
)
# pylint: disable=no-self-use
@nose.tools.raises(ForgeError)
@unittest.mock.patch('requests.get')
def test_forge_mc_versions_network(self, requests_get):
"""
Tests that we handle when we can't get to the Forge site
"""
mock_response = unittest.mock.Mock(spec = requests.Response)
mock_response.ok = False
requests_get.return_value = mock_response
get_forge_mc_versions()
# pylint: enable=no-self-use
def test_get_forge_version(self):
"""
Tests that we can get the correct Forge jar by version
"""
self._do_forge_version()
def test_get_forge_version_path(self):
"""
Tests that we can get the right Forge version and put it at the path
"""
self._do_forge_version('home')
# pylint: disable=no-self-use
@nose.tools.raises(ForgeError)
@unittest.mock.patch('mymcadmin.forge.get_forge_mc_versions')
def test_get_forge_ver_bad_mc(self, versions):
"""
Tests that we handle when the given Minecraft version is bad
"""
versions.return_value = []
get_forge_version('1.8.9', '10.10.10.10')
# pylint: enable=no-self-use
# pylint: disable=no-self-use
@nose.tools.raises(ForgeError)
@unittest.mock.patch('requests.get')
@unittest.mock.patch('mymcadmin.forge.get_forge_mc_versions')
def test_get_forge_ver_bad_forge(self, versions, requests_get):
"""
Tests that we handle when the given Forge version is bad
"""
versions.return_value = ['1.8.9']
mock_response = unittest.mock.Mock(spec = requests.Response)
mock_response.ok = True
mock_response.content = SAMPLE_DOWNLOADS_PAGE.format('LATEST')
requests_get.return_value = mock_response
get_forge_version('1.8.9', '20.20.20.20')
# pylint: enable=no-self-use
# pylint: disable=no-self-use
@nose.tools.raises(ForgeError)
@unittest.mock.patch('requests.get')
@unittest.mock.patch('mymcadmin.forge.get_forge_mc_versions')
def test_get_forge_ver_network1(self, versions, requests_get):
"""
Tests that we handle when theres a network problem getting the list page
"""
versions.return_value = ['1.8.9']
mock_response = unittest.mock.Mock(spec = requests.Response)
mock_response.ok = False
requests_get.return_value = mock_response
get_forge_version('1.8.9', '20.20.20.20')
# pylint: enable=no-self-use
# pylint: disable=no-self-use
@nose.tools.raises(ForgeError)
@unittest.mock.patch('requests.get')
@unittest.mock.patch('mymcadmin.forge.get_forge_mc_versions')
def test_get_forge_ver_network2(self, versions, requests_get):
"""
Tests that we handle when theres a network problem getting the jar
"""
versions.return_value = ['1.8.9']
mock_list_response = unittest.mock.Mock(spec = requests.Response)
mock_list_response.ok = True
mock_list_response.content = SAMPLE_DOWNLOADS_PAGE.format('LATEST')
mock_jar_response = unittest.mock.Mock(spec = requests.Response)
mock_jar_response.ok = False
requests_get.side_effect = [
mock_list_response,
mock_jar_response,
]
get_forge_version('1.8.9', '10.10.10.10')
# pylint: enable=no-self-use
def test_get_forge_for_mc_latest(self):
"""
Tests that we can get the latest Forge jar by Minecraft version
"""
self._do_forge_for_mc('LATEST')
def test_get_forge_for_mc_recommend(self):
"""
Tests that we can get the recommended Forge jar by Minecraft version
"""
self._do_forge_for_mc('RECOMMENDED')
def test_get_forge_for_mc_path(self):
"""
Tests that we can get the correct Forge version and put it at the path
"""
self._do_forge_for_mc('LATEST', 'home')
# pylint: disable=no-self-use
@nose.tools.raises(ForgeError)
@unittest.mock.patch('requests.get')
def test_get_forge_for_mc_network1(self, requests_get):
"""
Tests that we handle when there's a networking problem getting the list
"""
mock_response = unittest.mock.Mock(spec = requests.Response)
mock_response.ok = False
requests_get.return_value = mock_response
get_forge_for_mc_version('1.8.9')
# pylint: enable=no-self-use
# pylint: disable=no-self-use
@nose.tools.raises(ForgeError)
@unittest.mock.patch('requests.get')
@unittest.mock.patch('mymcadmin.forge.get_forge_mc_versions')
def test_get_forge_for_mc_network2(self, versions, requests_get):
"""
Tests that we handle when there's a networking problem getting the jar
"""
versions.return_value = ['1.8.9']
mock_list_response = unittest.mock.Mock(spec = requests.Response)
mock_list_response.ok = True
mock_list_response.content = SAMPLE_DOWNLOADS_PAGE.format('LATEST')
mock_jar_response = unittest.mock.Mock(spec = requests.Response)
mock_jar_response.ok = False
requests_get.side_effect = [
mock_list_response,
mock_jar_response,
]
get_forge_for_mc_version('1.8.9')
# pylint: enable=no-self-use
# pylint: disable=no-self-use
@nose.tools.raises(ForgeError)
@unittest.mock.patch('mymcadmin.forge.get_forge_mc_versions')
def test_get_forge_for_mc_bad_ver(self, forge_versions):
"""
Tests that we handle when an unsupported MC version is given
"""
forge_versions.return_value = []
get_forge_for_mc_version('1.8.9')
# pylint: disable=no-self-use
def _do_forge_for_mc(self, release, path = None):
root = path if path is not None else os.getcwd()
version_id = '1.8.9'
with unittest.mock.patch('mymcadmin.forge.get_forge_mc_versions') as forge_versions, \
unittest.mock.patch('requests.get') as requests_get, \
unittest.mock.patch('mymcadmin.utils.download_file') as download_file:
forge_versions.return_value = ['1.8.9']
mock_version_response = unittest.mock.Mock(spec = requests.Response)
mock_version_response.ok = True
mock_version_response.content = SAMPLE_DOWNLOADS_PAGE.format(release)
mock_inst_jar_response = unittest.mock.Mock(spec = requests.Response)
mock_inst_jar_response.ok = True
mock_uni_jar_response = unittest.mock.Mock(spec = requests.Response)
mock_uni_jar_response.ok = True
requests_get.side_effect = [
mock_version_response,
mock_inst_jar_response,
mock_uni_jar_response,
]
inst_jar_path, uni_jar_path = get_forge_for_mc_version(
version_id,
path = path,
)
self.assertEqual(
os.path.join(root, 'forge-1.8.9-10.10.10.10-installer.jar'),
inst_jar_path,
'Installer path did not match expected',
)
self.assertEqual(
os.path.join(root, 'forge-1.8.9-10.10.10.10-universal.jar'),
uni_jar_path,
'Jar path did not match expected',
)
# pylint: disable=line-too-long
requests_get.assert_has_calls(
[
unittest.mock.call(
'http://files.minecraftforge.net/maven/net/minecraftforge/forge/index_1.8.9.html',
),
unittest.mock.call(
'http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-installer.jar',
stream = True,
),
unittest.mock.call(
'http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-universal.jar',
stream = True,
),
]
)
# pylint: enable=line-too-long
download_file.assert_has_calls(
[
unittest.mock.call(
mock_inst_jar_response,
inst_jar_path,
'943a702d06f34599aee1f8da8ef9f7296031d699',
),
unittest.mock.call(
mock_uni_jar_response,
uni_jar_path,
'943a702d06f34599aee1f8da8ef9f7296031d699',
)
]
)
def _do_forge_version(self, path = None):
root = path if path is not None else os.getcwd()
version_id = '1.8.9'
with unittest.mock.patch('mymcadmin.forge.get_forge_mc_versions') as forge_versions, \
unittest.mock.patch('requests.get') as requests_get, \
unittest.mock.patch('mymcadmin.utils.download_file') as download_file:
forge_versions.return_value = ['1.8.9']
mock_version_response = unittest.mock.Mock(spec = requests.Response)
mock_version_response.ok = True
mock_version_response.content = SAMPLE_DOWNLOADS_PAGE.format('LATEST')
mock_inst_jar_response = unittest.mock.Mock(spec = requests.Response)
mock_inst_jar_response.ok = True
mock_uni_jar_response = unittest.mock.Mock(spec = requests.Response)
mock_uni_jar_response.ok = True
requests_get.side_effect = [
mock_version_response,
mock_inst_jar_response,
mock_uni_jar_response,
]
inst_jar, uni_jar = get_forge_version(
version_id,
'10.10.10.10',
path = path,
)
self.assertEqual(
os.path.join(root, 'forge-1.8.9-10.10.10.10-installer.jar'),
inst_jar,
'Installer path did not match expected',
)
self.assertEqual(
os.path.join(root, 'forge-1.8.9-10.10.10.10-universal.jar'),
uni_jar,
'Jar path did not match expected',
)
# pylint: disable=line-too-long
requests_get.assert_has_calls(
[
unittest.mock.call(
'http://files.minecraftforge.net/maven/net/minecraftforge/forge/index_1.8.9.html',
),
unittest.mock.call(
'http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-installer.jar',
stream = True,
),
unittest.mock.call(
'http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-universal.jar',
stream = True,
),
]
)
# pylint: enable=line-too-long
download_file.assert_has_calls(
[
unittest.mock.call(
mock_inst_jar_response,
inst_jar,
'943a702d06f34599aee1f8da8ef9f7296031d699',
),
unittest.mock.call(
mock_uni_jar_response,
uni_jar,
'943a702d06f34599aee1f8da8ef9f7296031d699',
),
]
)
SAMPLE_DOWNLOADS_PAGE = """
<html>
<body>
<table class="downloadsTable">
<tbody>
<tr>
<th>Version</th>
<th>Time</th>
<th>Downloads</th>
</tr>
<tr>
<td>
<ul>
<li>
10.10.10.10
<a class="info-link tooltipstered" data-toggle="popup" style="cursor:default;">
<i class="fa fa-start promo-{}"></i>
</a>
</li>
</ul>
</td>
<td>01/01/2016 00:00:00 AM</td>
<td>
<ul>
<li>
Changelog
</li>
<li>
<a href="http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-installer.jar">
<i class="fa fa-save classifier-installer"></i>
Installer
</a>
<div class="info">
<strong>MD5:</strong>
deadbeef
<strong>SHA1:</strong>
943a702d06f34599aee1f8da8ef9f7296031d699
<br>
<a href="http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-installer.jar">
(Direct Download)
</a>
</div>
</li>
<li>
Installer-win
</li>
<li>
MDK
</li>
<li>
<a href="http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-universal.jar">
<i class="fa fa-save classifier-universal"></i>
Universal
</a>
<div class="info">
<strong>MD5:</strong>
1b0aed33d51dbcacbe6440fa8998f9e6<br>
<strong>SHA1:</strong>
943a702d06f34599aee1f8da8ef9f7296031d699
<br>
<a href="http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-universal.jar">
(Direct Download)
</a>
</div>
</li>
</ul>
</td>
</tr>
</tbody>
</table>
</body>
</html>
"""
if __name__ == '__main__':
unittest.main()
| """
Tests for the Forge related functions
"""
import os
import os.path
import unittest
import unittest.mock
import nose
import requests
from mymcadmin.errors import ForgeError
from mymcadmin.forge import (
get_forge_for_mc_version,
get_forge_mc_versions,
get_forge_version,
)
class TestForge(unittest.TestCase):
"""
Tests for the Forge related functions
"""
@unittest.mock.patch('requests.get')
def test_get_forge_mc_versions(self, requests_get):
"""
Tests that we can retrieve a list of all the MC versions Forge supports
"""
mock_response = unittest.mock.Mock(spec = requests.Response)
mock_response.ok = True
mock_response.content = """
<html>
<body>
<div class="versions">
<ul class="links">
<li class="li-version-list">
<span>1.8</span>
<div class="versions-info">
<ul class="text">
<li class="li-version-list-current">
1.8.9
</li>
<li>
<a href="http://example.com/1.8.8">
1.8.8
</a>
</li>
</ul>
</div>
</li>
<li class="li-version-list">
<span>1.7</span>
<div class="versions-info">
<ul class="text">
<li>
<a href="http://example.com/1.7.10">
1.7.10
</a>
</li>
<li>
<a href="http://example.com/1.7.2">
1.7.2
</a>
</li>
</ul>
</div>
</li>
</ul>
</div>
</body>
</html>
"""
requests_get.return_value = mock_response
versions = get_forge_mc_versions()
requests_get.assert_called_with(
'http://files.minecraftforge.net/',
)
self.assertListEqual(
[
'1.8.9',
'1.8.8',
'1.7.10',
'1.7.2',
],
versions,
'Version list did not match expected',
)
# pylint: disable=no-self-use
@nose.tools.raises(ForgeError)
@unittest.mock.patch('requests.get')
def test_forge_mc_versions_network(self, requests_get):
"""
Tests that we handle when we can't get to the Forge site
"""
mock_response = unittest.mock.Mock(spec = requests.Response)
mock_response.ok = False
requests_get.return_value = mock_response
get_forge_mc_versions()
# pylint: enable=no-self-use
def test_get_forge_version(self):
"""
Tests that we can get the correct Forge jar by version
"""
self._do_forge_version()
def test_get_forge_version_path(self):
"""
Tests that we can get the right Forge version and put it at the path
"""
self._do_forge_version('home')
# pylint: disable=no-self-use
@nose.tools.raises(ForgeError)
@unittest.mock.patch('mymcadmin.forge.get_forge_mc_versions')
def test_get_forge_ver_bad_mc(self, versions):
"""
Tests that we handle when the given Minecraft version is bad
"""
versions.return_value = []
get_forge_version('1.8.9', '10.10.10.10')
# pylint: enable=no-self-use
# pylint: disable=no-self-use
@nose.tools.raises(ForgeError)
@unittest.mock.patch('requests.get')
@unittest.mock.patch('mymcadmin.forge.get_forge_mc_versions')
def test_get_forge_ver_bad_forge(self, versions, requests_get):
"""
Tests that we handle when the given Forge version is bad
"""
versions.return_value = ['1.8.9']
mock_response = unittest.mock.Mock(spec = requests.Response)
mock_response.ok = True
mock_response.content = SAMPLE_DOWNLOADS_PAGE.format('LATEST')
requests_get.return_value = mock_response
get_forge_version('1.8.9', '20.20.20.20')
# pylint: enable=no-self-use
# pylint: disable=no-self-use
@nose.tools.raises(ForgeError)
@unittest.mock.patch('requests.get')
@unittest.mock.patch('mymcadmin.forge.get_forge_mc_versions')
def test_get_forge_ver_network1(self, versions, requests_get):
"""
Tests that we handle when theres a network problem getting the list page
"""
versions.return_value = ['1.8.9']
mock_response = unittest.mock.Mock(spec = requests.Response)
mock_response.ok = False
requests_get.return_value = mock_response
get_forge_version('1.8.9', '20.20.20.20')
# pylint: enable=no-self-use
# pylint: disable=no-self-use
@nose.tools.raises(ForgeError)
@unittest.mock.patch('requests.get')
@unittest.mock.patch('mymcadmin.forge.get_forge_mc_versions')
def test_get_forge_ver_network2(self, versions, requests_get):
"""
Tests that we handle when theres a network problem getting the jar
"""
versions.return_value = ['1.8.9']
mock_list_response = unittest.mock.Mock(spec = requests.Response)
mock_list_response.ok = True
mock_list_response.content = SAMPLE_DOWNLOADS_PAGE.format('LATEST')
mock_jar_response = unittest.mock.Mock(spec = requests.Response)
mock_jar_response.ok = False
requests_get.side_effect = [
mock_list_response,
mock_jar_response,
]
get_forge_version('1.8.9', '10.10.10.10')
# pylint: enable=no-self-use
def test_get_forge_for_mc_latest(self):
"""
Tests that we can get the latest Forge jar by Minecraft version
"""
self._do_forge_for_mc('LATEST')
def test_get_forge_for_mc_recommend(self):
"""
Tests that we can get the recommended Forge jar by Minecraft version
"""
self._do_forge_for_mc('RECOMMENDED')
def test_get_forge_for_mc_path(self):
"""
Tests that we can get the correct Forge version and put it at the path
"""
self._do_forge_for_mc('LATEST', 'home')
# pylint: disable=no-self-use
@nose.tools.raises(ForgeError)
@unittest.mock.patch('requests.get')
def test_get_forge_for_mc_network1(self, requests_get):
"""
Tests that we handle when there's a networking problem getting the list
"""
mock_response = unittest.mock.Mock(spec = requests.Response)
mock_response.ok = False
requests_get.return_value = mock_response
get_forge_for_mc_version('1.8.9')
# pylint: enable=no-self-use
# pylint: disable=no-self-use
@nose.tools.raises(ForgeError)
@unittest.mock.patch('requests.get')
@unittest.mock.patch('mymcadmin.forge.get_forge_mc_versions')
def test_get_forge_for_mc_network2(self, versions, requests_get):
"""
Tests that we handle when there's a networking problem getting the jar
"""
versions.return_value = ['1.8.9']
mock_list_response = unittest.mock.Mock(spec = requests.Response)
mock_list_response.ok = True
mock_list_response.content = SAMPLE_DOWNLOADS_PAGE.format('LATEST')
mock_jar_response = unittest.mock.Mock(spec = requests.Response)
mock_jar_response.ok = False
requests_get.side_effect = [
mock_list_response,
mock_jar_response,
]
get_forge_for_mc_version('1.8.9')
# pylint: enable=no-self-use
# pylint: disable=no-self-use
@nose.tools.raises(ForgeError)
@unittest.mock.patch('mymcadmin.forge.get_forge_mc_versions')
def test_get_forge_for_mc_bad_ver(self, forge_versions):
"""
Tests that we handle when an unsupported MC version is given
"""
forge_versions.return_value = []
get_forge_for_mc_version('1.8.9')
# pylint: disable=no-self-use
def _do_forge_for_mc(self, release, path = None):
root = path if path is not None else os.getcwd()
version_id = '1.8.9'
with unittest.mock.patch('mymcadmin.forge.get_forge_mc_versions') as forge_versions, \
unittest.mock.patch('requests.get') as requests_get, \
unittest.mock.patch('mymcadmin.utils.download_file') as download_file:
forge_versions.return_value = ['1.8.9']
mock_version_response = unittest.mock.Mock(spec = requests.Response)
mock_version_response.ok = True
mock_version_response.content = SAMPLE_DOWNLOADS_PAGE.format(release)
mock_inst_jar_response = unittest.mock.Mock(spec = requests.Response)
mock_inst_jar_response.ok = True
mock_uni_jar_response = unittest.mock.Mock(spec = requests.Response)
mock_uni_jar_response.ok = True
requests_get.side_effect = [
mock_version_response,
mock_inst_jar_response,
mock_uni_jar_response,
]
inst_jar_path, uni_jar_path = get_forge_for_mc_version(
version_id,
path = path,
)
self.assertEqual(
os.path.join(root, 'forge-1.8.9-10.10.10.10-installer.jar'),
inst_jar_path,
'Installer path did not match expected',
)
self.assertEqual(
os.path.join(root, 'forge-1.8.9-10.10.10.10-universal.jar'),
uni_jar_path,
'Jar path did not match expected',
)
# pylint: disable=line-too-long
requests_get.assert_has_calls(
[
unittest.mock.call(
'http://files.minecraftforge.net/maven/net/minecraftforge/forge/index_1.8.9.html',
),
unittest.mock.call(
'http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-installer.jar',
stream = True,
),
unittest.mock.call(
'http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-universal.jar',
stream = True,
),
]
)
# pylint: enable=line-too-long
download_file.assert_has_calls(
[
unittest.mock.call(
mock_inst_jar_response,
inst_jar_path,
'943a702d06f34599aee1f8da8ef9f7296031d699',
),
unittest.mock.call(
mock_uni_jar_response,
uni_jar_path,
'943a702d06f34599aee1f8da8ef9f7296031d699',
)
]
)
def _do_forge_version(self, path = None):
root = path if path is not None else os.getcwd()
version_id = '1.8.9'
with unittest.mock.patch('mymcadmin.forge.get_forge_mc_versions') as forge_versions, \
unittest.mock.patch('requests.get') as requests_get, \
unittest.mock.patch('mymcadmin.utils.download_file') as download_file:
forge_versions.return_value = ['1.8.9']
mock_version_response = unittest.mock.Mock(spec = requests.Response)
mock_version_response.ok = True
mock_version_response.content = SAMPLE_DOWNLOADS_PAGE.format('LATEST')
mock_inst_jar_response = unittest.mock.Mock(spec = requests.Response)
mock_inst_jar_response.ok = True
mock_uni_jar_response = unittest.mock.Mock(spec = requests.Response)
mock_uni_jar_response.ok = True
requests_get.side_effect = [
mock_version_response,
mock_inst_jar_response,
mock_uni_jar_response,
]
inst_jar, uni_jar = get_forge_version(
version_id,
'10.10.10.10',
path = path,
)
self.assertEqual(
os.path.join(root, 'forge-1.8.9-10.10.10.10-installer.jar'),
inst_jar,
'Installer path did not match expected',
)
self.assertEqual(
os.path.join(root, 'forge-1.8.9-10.10.10.10-universal.jar'),
uni_jar,
'Jar path did not match expected',
)
# pylint: disable=line-too-long
requests_get.assert_has_calls(
[
unittest.mock.call(
'http://files.minecraftforge.net/maven/net/minecraftforge/forge/index_1.8.9.html',
),
unittest.mock.call(
'http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-installer.jar',
stream = True,
),
unittest.mock.call(
'http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-universal.jar',
stream = True,
),
]
)
# pylint: enable=line-too-long
download_file.assert_has_calls(
[
unittest.mock.call(
mock_inst_jar_response,
inst_jar,
'943a702d06f34599aee1f8da8ef9f7296031d699',
),
unittest.mock.call(
mock_uni_jar_response,
uni_jar,
'943a702d06f34599aee1f8da8ef9f7296031d699',
),
]
)
SAMPLE_DOWNLOADS_PAGE = """
<html>
<body>
<table class="downloadsTable">
<tbody>
<tr>
<th>Version</th>
<th>Time</th>
<th>Downloads</th>
</tr>
<tr>
<td>
<ul>
<li>
10.10.10.10
<a class="info-link tooltipstered" data-toggle="popup" style="cursor:default;">
<i class="fa fa-start promo-{}"></i>
</a>
</li>
</ul>
</td>
<td>01/01/2016 00:00:00 AM</td>
<td>
<ul>
<li>
Changelog
</li>
<li>
<a href="http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-installer.jar">
<i class="fa fa-save classifier-installer"></i>
Installer
</a>
<div class="info">
<strong>MD5:</strong>
deadbeef
<strong>SHA1:</strong>
943a702d06f34599aee1f8da8ef9f7296031d699
<br>
<a href="http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-installer.jar">
(Direct Download)
</a>
</div>
</li>
<li>
Installer-win
</li>
<li>
MDK
</li>
<li>
<a href="http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-universal.jar">
<i class="fa fa-save classifier-universal"></i>
Universal
</a>
<div class="info">
<strong>MD5:</strong>
1b0aed33d51dbcacbe6440fa8998f9e6<br>
<strong>SHA1:</strong>
943a702d06f34599aee1f8da8ef9f7296031d699
<br>
<a href="http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-universal.jar">
(Direct Download)
</a>
</div>
</li>
</ul>
</td>
</tr>
</tbody>
</table>
</body>
</html>
"""
if __name__ == '__main__':
unittest.main()
| en | 0.487434 | Tests for the Forge related functions Tests for the Forge related functions Tests that we can retrieve a list of all the MC versions Forge supports <html> <body> <div class="versions"> <ul class="links"> <li class="li-version-list"> <span>1.8</span> <div class="versions-info"> <ul class="text"> <li class="li-version-list-current"> 1.8.9 </li> <li> <a href="http://example.com/1.8.8"> 1.8.8 </a> </li> </ul> </div> </li> <li class="li-version-list"> <span>1.7</span> <div class="versions-info"> <ul class="text"> <li> <a href="http://example.com/1.7.10"> 1.7.10 </a> </li> <li> <a href="http://example.com/1.7.2"> 1.7.2 </a> </li> </ul> </div> </li> </ul> </div> </body> </html> # pylint: disable=no-self-use Tests that we handle when we can't get to the Forge site # pylint: enable=no-self-use Tests that we can get the correct Forge jar by version Tests that we can get the right Forge version and put it at the path # pylint: disable=no-self-use Tests that we handle when the given Minecraft version is bad # pylint: enable=no-self-use # pylint: disable=no-self-use Tests that we handle when the given Forge version is bad # pylint: enable=no-self-use # pylint: disable=no-self-use Tests that we handle when theres a network problem getting the list page # pylint: enable=no-self-use # pylint: disable=no-self-use Tests that we handle when theres a network problem getting the jar # pylint: enable=no-self-use Tests that we can get the latest Forge jar by Minecraft version Tests that we can get the recommended Forge jar by Minecraft version Tests that we can get the correct Forge version and put it at the path # pylint: disable=no-self-use Tests that we handle when there's a networking problem getting the list # pylint: enable=no-self-use # pylint: disable=no-self-use Tests that we handle when there's a networking problem getting the jar # pylint: enable=no-self-use # pylint: disable=no-self-use Tests that we handle when an unsupported MC version is given # pylint: disable=no-self-use # pylint: disable=line-too-long # pylint: enable=line-too-long # pylint: disable=line-too-long # pylint: enable=line-too-long <html> <body> <table class="downloadsTable"> <tbody> <tr> <th>Version</th> <th>Time</th> <th>Downloads</th> </tr> <tr> <td> <ul> <li> 10.10.10.10 <a class="info-link tooltipstered" data-toggle="popup" style="cursor:default;"> <i class="fa fa-start promo-{}"></i> </a> </li> </ul> </td> <td>01/01/2016 00:00:00 AM</td> <td> <ul> <li> Changelog </li> <li> <a href="http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-installer.jar"> <i class="fa fa-save classifier-installer"></i> Installer </a> <div class="info"> <strong>MD5:</strong> deadbeef <strong>SHA1:</strong> 943a702d06f34599aee1f8da8ef9f7296031d699 <br> <a href="http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-installer.jar"> (Direct Download) </a> </div> </li> <li> Installer-win </li> <li> MDK </li> <li> <a href="http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-universal.jar"> <i class="fa fa-save classifier-universal"></i> Universal </a> <div class="info"> <strong>MD5:</strong> 1b0aed33d51dbcacbe6440fa8998f9e6<br> <strong>SHA1:</strong> 943a702d06f34599aee1f8da8ef9f7296031d699 <br> <a href="http://example.com/10.10.10.10/forge-1.8.9-10.10.10.10-universal.jar"> (Direct Download) </a> </div> </li> </ul> </td> </tr> </tbody> </table> </body> </html> | 2.536054 | 3 |
dataset_scripts/voc/create_annotations_voc.py | AlbertoSabater/Keras-YOLO-v3 | 7 | 6625484 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 15:09:26 2019
@author: asabater
"""
import os
from tqdm import tqdm
import xml.etree.ElementTree
import random
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
"chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
"pottedplant", "sheep", "sofa", "train", "tvmonitor"]
# %%
#annotations_train = []
#annotations_val = []
annotations = []
for voc_set in ['2007', '2012']:
# with open('/mnt/hdd/datasets/VOC/{}_train.txt'.format(voc_set)) as f:
# frames_train = [ '/'.join(l.split('/')[-4:]) for l in f.read().splitlines() ]
# with open('/mnt/hdd/datasets/VOC/{}_val.txt'.format(voc_set)) as f:
# frames_val = [ '/'.join(l.split('/')[-4:]) for l in f.read().splitlines() ]
base_path = 'VOCdevkit/VOC{}/JPEGImages/'.format(voc_set)
annotations_path = '/mnt/hdd/datasets/VOC/VOCdevkit/VOC{}/Annotations/'.format(voc_set)
frames = [ annotations_path + f for f in os.listdir(annotations_path) ]
for fr in tqdm(frames, total=len(frames)):
root = xml.etree.ElementTree.parse(fr).getroot()
fr_name = base_path + root.find('filename').text
objs = root.findall('object')
if len(objs) == 0:
print('len==0')
continue
boxes = []
for obj in objs:
obj_name = obj.find('name').text
bbx = obj.find('bndbox')
xmin = int(float(bbx.find('xmin').text))
ymin = int(float(bbx.find('ymin').text))
xmax = int(float(bbx.find('xmax').text))
ymax = int(float(bbx.find('ymax').text))
boxes.append('{},{},{},{},{}'.format(xmin, ymin, xmax, ymax, classes.index(obj_name)))
# if fr_name in frames_train:
# annotations_train.append(fr_name + ' ' + ' '.join(boxes))
# elif fr_name in frames_val:
# annotations_train.append(fr_name + ' ' + ' '.join(boxes))
# else:
# raise ValueError(fr_name)
annotations.append(fr_name + ' ' + ' '.join(boxes))
# %%
random.shuffle(annotations)
val_perc = 0.2
annotations_train = annotations[int(len(annotations)*val_perc):]
annotations_val = annotations[:int(len(annotations)*val_perc)]
with open('annotations_voc_train.txt', 'w') as f:
for l in annotations_train:
f.write(l + '\n')
with open('annotations_voc_val.txt', 'w') as f:
for l in annotations_val:
f.write(l + '\n')
with open('voc_classes.txt', 'w') as f:
for l in classes:
f.write(l + '\n')
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 15:09:26 2019
@author: asabater
"""
import os
from tqdm import tqdm
import xml.etree.ElementTree
import random
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
"chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
"pottedplant", "sheep", "sofa", "train", "tvmonitor"]
# %%
#annotations_train = []
#annotations_val = []
annotations = []
for voc_set in ['2007', '2012']:
# with open('/mnt/hdd/datasets/VOC/{}_train.txt'.format(voc_set)) as f:
# frames_train = [ '/'.join(l.split('/')[-4:]) for l in f.read().splitlines() ]
# with open('/mnt/hdd/datasets/VOC/{}_val.txt'.format(voc_set)) as f:
# frames_val = [ '/'.join(l.split('/')[-4:]) for l in f.read().splitlines() ]
base_path = 'VOCdevkit/VOC{}/JPEGImages/'.format(voc_set)
annotations_path = '/mnt/hdd/datasets/VOC/VOCdevkit/VOC{}/Annotations/'.format(voc_set)
frames = [ annotations_path + f for f in os.listdir(annotations_path) ]
for fr in tqdm(frames, total=len(frames)):
root = xml.etree.ElementTree.parse(fr).getroot()
fr_name = base_path + root.find('filename').text
objs = root.findall('object')
if len(objs) == 0:
print('len==0')
continue
boxes = []
for obj in objs:
obj_name = obj.find('name').text
bbx = obj.find('bndbox')
xmin = int(float(bbx.find('xmin').text))
ymin = int(float(bbx.find('ymin').text))
xmax = int(float(bbx.find('xmax').text))
ymax = int(float(bbx.find('ymax').text))
boxes.append('{},{},{},{},{}'.format(xmin, ymin, xmax, ymax, classes.index(obj_name)))
# if fr_name in frames_train:
# annotations_train.append(fr_name + ' ' + ' '.join(boxes))
# elif fr_name in frames_val:
# annotations_train.append(fr_name + ' ' + ' '.join(boxes))
# else:
# raise ValueError(fr_name)
annotations.append(fr_name + ' ' + ' '.join(boxes))
# %%
random.shuffle(annotations)
val_perc = 0.2
annotations_train = annotations[int(len(annotations)*val_perc):]
annotations_val = annotations[:int(len(annotations)*val_perc)]
with open('annotations_voc_train.txt', 'w') as f:
for l in annotations_train:
f.write(l + '\n')
with open('annotations_voc_val.txt', 'w') as f:
for l in annotations_val:
f.write(l + '\n')
with open('voc_classes.txt', 'w') as f:
for l in classes:
f.write(l + '\n')
| en | 0.471523 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Tue Mar 26 15:09:26 2019 @author: asabater # %% #annotations_train = [] #annotations_val = [] # with open('/mnt/hdd/datasets/VOC/{}_train.txt'.format(voc_set)) as f: # frames_train = [ '/'.join(l.split('/')[-4:]) for l in f.read().splitlines() ] # with open('/mnt/hdd/datasets/VOC/{}_val.txt'.format(voc_set)) as f: # frames_val = [ '/'.join(l.split('/')[-4:]) for l in f.read().splitlines() ] # if fr_name in frames_train: # annotations_train.append(fr_name + ' ' + ' '.join(boxes)) # elif fr_name in frames_val: # annotations_train.append(fr_name + ' ' + ' '.join(boxes)) # else: # raise ValueError(fr_name) # %% | 2.27064 | 2 |
slotting/forms.py | uofllodi/warehousingtools | 0 | 6625485 | <reponame>uofllodi/warehousingtools<filename>slotting/forms.py
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
import numpy as np
from django.utils.safestring import mark_safe
import certifi
import urllib3
from botocore.client import Config
import boto3
from django.conf import settings
def read_array(urlname, dim):
http = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where())
r = http.request('GET', urlname)
csvfile = r.data.decode('utf-8')
if dim == 1:
rel = csvfile.splitlines()
if len(rel) == 1:
rel = csvfile.split(',')
elif dim == 2:
lines = csvfile.splitlines()
rel = []
for line in lines:
rel.append(line.split(','))
rel = np.array(rel, dtype=np.float)
return rel
def delete_file(urlname):
# delete file
try:
s3 = boto3.client('s3', 'us-east-2', config=Config(signature_version='s3v4'))
S3_BUCKET = settings.AWS_STORAGE_BUCKET_NAME
s3.delete_object(Bucket=S3_BUCKET, Key=urlname.split('/')[-1])
except:
print("Boto3 connection failing")
class SlotProfileDataForm(forms.Form):
L = forms.IntegerField(min_value=2, max_value=4, label='Number of slot types', initial=3 )
nskus = forms.IntegerField(min_value=10, max_value=1000, label='Number of skus', initial=100)
alpha = forms.DecimalField(min_value=50, max_value=99.99999, label=mark_safe(" Desired Storage Service Level (%) " +
"<i class ='fa fa-question-circle' aria-hidden='true' title= 'Probability that in one day (or period of time) the storage area " +
"can stow all pallets received.'"), initial=97.5)
b = forms.DecimalField(min_value=0, label= mark_safe("Vertical clearance within slot (inches) " +
"<i class ='fa fa-question-circle' aria-hidden='true' title=" +
"'Required space between the top of the pallet and the beam of the slot above'"), initial=4)
M = forms.IntegerField(min_value=1, label='Pallet positions per slot', initial=2)
hs = forms.FileField(label=mark_safe("Pallet height of each sku (inches) <i class='fa fa-question-circle' aria-hidden='true' title='Upload a csv file with one column and as many rows as skus, " +
"such that the pallet height for SKU 1 is the cell on the first row of the column, the pallet height for SKU 2 is the cell on the second row of the column. " +
"Do not include labels. Rows must be in the same order than in the file of inventory levels'></i>"),
help_text=mark_safe("Download an <a href='/static/files/hs.csv'> example </a> with 100 skus"),
widget=forms.FileInput(attrs={'accept': ".csv"}), required=False) #validators = [validators.validate_hs])
invs = forms.FileField(label=mark_safe("Inventory level of each sku <i class='fa fa-question-circle' aria-hidden='true' title='Upload a csv file with as" +
" many rows as skus and as many columns as time-periods, such that the number of pallets of SKU 3 at period 5 is the cell on the third row and fifth column. " +
" Do not include labels. Rows must be in the same order than in the file of pallet heights.'></i>"),
help_text= mark_safe("Download an <a href='/static/files/invs.csv'> example </a> with 100 skus"),
widget=forms.FileInput(attrs={'accept': ".csv"}), required=False)
hsurl = forms.CharField(widget=forms.HiddenInput(), required=False)
invsurl = forms.CharField(widget=forms.HiddenInput(), required=False)
def clean_L(self):
return int(self.cleaned_data.get("L"))
def clean_nskus(self):
return int(self.cleaned_data.get("nskus"))
def clean_alpha(self):
return float(self.cleaned_data.get("alpha")) / 100
def clean_b(self):
return float(self.cleaned_data.get("b"))
def clean_M(self):
return int(self.cleaned_data.get("M"))
def clean_hsurl(self):
urlname = self.cleaned_data.get("hsurl")
if urlname:
try:
hs = read_array(urlname, 1)
except:
raise ValidationError(
_(' The pallet heights file could not be read as an array of numbers'),
)
#delete_file(urlname)
nskus = int(self.cleaned_data.get("nskus"))
if len(hs.shape) > 1:
raise ValidationError(
_('The pallet heights file must be a one-dimensional array'),
)
elif hs.shape[0] != nskus:
raise ValidationError(
_('There are {} pallet height, but {} skus'.format(str(hs.shape[0]), str(nskus))),
)
if np.min(hs) < 0:
raise ValidationError(
_('There are negative pallet heights'),
)
if np.isnan(np.sum(hs)):
raise ValidationError(
_('The pallet heights file have non-numeric characters'),
)
else:
raise ValidationError(
_(' Upload pallet heights file'),
)
return hs
def clean_invsurl(self):
urlname = self.cleaned_data.get("invsurl")
if urlname:
try:
invs = read_array(urlname, 2)
except:
raise ValidationError(
_('The inventory levels file could not be read as an 2D array of numbers'),
)
#delete_file(urlname)
nskus = int(self.cleaned_data.get("nskus"))
if len(invs.shape) != 2:
raise ValidationError(
_('The inventory levels file must be a 2D array'),
)
elif invs.shape[0] != nskus:
raise ValidationError(
_('There are {} rows of inventory levels, but {} skus'.format(str(invs.shape[0]), str(nskus))),
)
if np.min(invs) < 0:
raise ValidationError(
_('There are negative inventory levels'),
)
if np.isnan(np.sum(invs)):
raise ValidationError(
_('The inventory levels file have non-numeric characters'),
)
else:
raise ValidationError(
_(' Upload inventory levels file'),
)
return invs
| from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
import numpy as np
from django.utils.safestring import mark_safe
import certifi
import urllib3
from botocore.client import Config
import boto3
from django.conf import settings
def read_array(urlname, dim):
http = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where())
r = http.request('GET', urlname)
csvfile = r.data.decode('utf-8')
if dim == 1:
rel = csvfile.splitlines()
if len(rel) == 1:
rel = csvfile.split(',')
elif dim == 2:
lines = csvfile.splitlines()
rel = []
for line in lines:
rel.append(line.split(','))
rel = np.array(rel, dtype=np.float)
return rel
def delete_file(urlname):
# delete file
try:
s3 = boto3.client('s3', 'us-east-2', config=Config(signature_version='s3v4'))
S3_BUCKET = settings.AWS_STORAGE_BUCKET_NAME
s3.delete_object(Bucket=S3_BUCKET, Key=urlname.split('/')[-1])
except:
print("Boto3 connection failing")
class SlotProfileDataForm(forms.Form):
L = forms.IntegerField(min_value=2, max_value=4, label='Number of slot types', initial=3 )
nskus = forms.IntegerField(min_value=10, max_value=1000, label='Number of skus', initial=100)
alpha = forms.DecimalField(min_value=50, max_value=99.99999, label=mark_safe(" Desired Storage Service Level (%) " +
"<i class ='fa fa-question-circle' aria-hidden='true' title= 'Probability that in one day (or period of time) the storage area " +
"can stow all pallets received.'"), initial=97.5)
b = forms.DecimalField(min_value=0, label= mark_safe("Vertical clearance within slot (inches) " +
"<i class ='fa fa-question-circle' aria-hidden='true' title=" +
"'Required space between the top of the pallet and the beam of the slot above'"), initial=4)
M = forms.IntegerField(min_value=1, label='Pallet positions per slot', initial=2)
hs = forms.FileField(label=mark_safe("Pallet height of each sku (inches) <i class='fa fa-question-circle' aria-hidden='true' title='Upload a csv file with one column and as many rows as skus, " +
"such that the pallet height for SKU 1 is the cell on the first row of the column, the pallet height for SKU 2 is the cell on the second row of the column. " +
"Do not include labels. Rows must be in the same order than in the file of inventory levels'></i>"),
help_text=mark_safe("Download an <a href='/static/files/hs.csv'> example </a> with 100 skus"),
widget=forms.FileInput(attrs={'accept': ".csv"}), required=False) #validators = [validators.validate_hs])
invs = forms.FileField(label=mark_safe("Inventory level of each sku <i class='fa fa-question-circle' aria-hidden='true' title='Upload a csv file with as" +
" many rows as skus and as many columns as time-periods, such that the number of pallets of SKU 3 at period 5 is the cell on the third row and fifth column. " +
" Do not include labels. Rows must be in the same order than in the file of pallet heights.'></i>"),
help_text= mark_safe("Download an <a href='/static/files/invs.csv'> example </a> with 100 skus"),
widget=forms.FileInput(attrs={'accept': ".csv"}), required=False)
hsurl = forms.CharField(widget=forms.HiddenInput(), required=False)
invsurl = forms.CharField(widget=forms.HiddenInput(), required=False)
def clean_L(self):
return int(self.cleaned_data.get("L"))
def clean_nskus(self):
return int(self.cleaned_data.get("nskus"))
def clean_alpha(self):
return float(self.cleaned_data.get("alpha")) / 100
def clean_b(self):
return float(self.cleaned_data.get("b"))
def clean_M(self):
return int(self.cleaned_data.get("M"))
def clean_hsurl(self):
urlname = self.cleaned_data.get("hsurl")
if urlname:
try:
hs = read_array(urlname, 1)
except:
raise ValidationError(
_(' The pallet heights file could not be read as an array of numbers'),
)
#delete_file(urlname)
nskus = int(self.cleaned_data.get("nskus"))
if len(hs.shape) > 1:
raise ValidationError(
_('The pallet heights file must be a one-dimensional array'),
)
elif hs.shape[0] != nskus:
raise ValidationError(
_('There are {} pallet height, but {} skus'.format(str(hs.shape[0]), str(nskus))),
)
if np.min(hs) < 0:
raise ValidationError(
_('There are negative pallet heights'),
)
if np.isnan(np.sum(hs)):
raise ValidationError(
_('The pallet heights file have non-numeric characters'),
)
else:
raise ValidationError(
_(' Upload pallet heights file'),
)
return hs
def clean_invsurl(self):
urlname = self.cleaned_data.get("invsurl")
if urlname:
try:
invs = read_array(urlname, 2)
except:
raise ValidationError(
_('The inventory levels file could not be read as an 2D array of numbers'),
)
#delete_file(urlname)
nskus = int(self.cleaned_data.get("nskus"))
if len(invs.shape) != 2:
raise ValidationError(
_('The inventory levels file must be a 2D array'),
)
elif invs.shape[0] != nskus:
raise ValidationError(
_('There are {} rows of inventory levels, but {} skus'.format(str(invs.shape[0]), str(nskus))),
)
if np.min(invs) < 0:
raise ValidationError(
_('There are negative inventory levels'),
)
if np.isnan(np.sum(invs)):
raise ValidationError(
_('The inventory levels file have non-numeric characters'),
)
else:
raise ValidationError(
_(' Upload inventory levels file'),
)
return invs | en | 0.81421 | # delete file #validators = [validators.validate_hs]) #delete_file(urlname) #delete_file(urlname) | 1.964337 | 2 |
pages/intentpreview_test.py | rakuco/chromium-dashboard | 1 | 6625486 | <gh_stars>1-10
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import testing_config # Must be imported before the module under test.
from unittest import mock
import flask
import werkzeug
from pages import intentpreview
from internals import models
test_app = flask.Flask(__name__)
class IntentEmailPreviewHandlerTest(testing_config.CustomTestCase):
def setUp(self):
self.feature_1 = models.Feature(
name='feature one', summary='sum', category=1, visibility=1,
standardization=1, web_dev_views=1, impl_status_chrome=1,
intent_stage=models.INTENT_IMPLEMENT)
self.feature_1.put()
self.request_path = '/admin/features/launch/%d/%d?intent' % (
models.INTENT_SHIP, self.feature_1.key.integer_id())
self.handler = intentpreview.IntentEmailPreviewHandler()
def tearDown(self):
self.feature_1.key.delete()
def test_get__anon(self):
"""Anon cannot view this preview features, gets redirected to login."""
testing_config.sign_out()
feature_id = self.feature_1.key.integer_id()
with test_app.test_request_context(self.request_path):
actual_response = self.handler.get_template_data(feature_id=feature_id)
self.assertEqual('302 FOUND', actual_response.status)
def test_get__no_existing(self):
"""Trying to view a feature that does not exist gives a 404."""
testing_config.sign_in('user<EMAIL>', 123567890)
bad_feature_id = self.feature_1.key.integer_id() + 1
with test_app.test_request_context(self.request_path):
with self.assertRaises(werkzeug.exceptions.NotFound):
self.handler.get_template_data(feature_id=bad_feature_id)
def test_get__no_stage_specified(self):
"""Allowed user can preview intent email for a feature using an old URL."""
request_path = (
'/admin/features/launch/%d?intent' % self.feature_1.key.integer_id())
testing_config.sign_in('<EMAIL>', 123567890)
feature_id = self.feature_1.key.integer_id()
with test_app.test_request_context(self.request_path):
actual_data = self.handler.get_template_data(feature_id=feature_id)
self.assertIn('feature', actual_data)
self.assertEqual('feature one', actual_data['feature']['name'])
def test_get__normal(self):
"""Allowed user can preview intent email for a feature."""
testing_config.sign_in('<EMAIL>', 123567890)
feature_id = self.feature_1.key.integer_id()
with test_app.test_request_context(self.request_path):
actual_data = self.handler.get_template_data(feature_id=feature_id)
self.assertIn('feature', actual_data)
self.assertEqual('feature one', actual_data['feature']['name'])
def test_get_page_data(self):
"""page_data has correct values."""
feature_id = self.feature_1.key.integer_id()
with test_app.test_request_context(self.request_path):
page_data = self.handler.get_page_data(
feature_id, self.feature_1, models.INTENT_IMPLEMENT)
self.assertEqual(
'http://localhost/feature/%d' % feature_id,
page_data['default_url'])
self.assertEqual(
['motivation'],
page_data['sections_to_show'])
self.assertEqual(
'Intent to Prototype',
page_data['subject_prefix'])
def test_compute_subject_prefix__incubate_new_feature(self):
"""We offer users the correct subject line for each intent stage."""
self.assertEqual(
'Intent stage "None"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_NONE))
self.assertEqual(
'Intent stage "Start incubating"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_INCUBATE))
self.assertEqual(
'Intent to Prototype',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_IMPLEMENT))
self.assertEqual(
'Ready for Trial',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_EXPERIMENT))
self.assertEqual(
'Intent stage "Evaluate readiness to ship"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_IMPLEMENT_SHIP))
self.assertEqual(
'Intent to Experiment',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_EXTEND_TRIAL))
self.assertEqual(
'Intent to Ship',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_SHIP))
self.assertEqual(
'Intent to Extend Deprecation Trial',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_REMOVED))
self.assertEqual(
'Intent stage "Shipped"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_SHIPPED))
self.assertEqual(
'Intent stage "Parked"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_PARKED))
def test_compute_subject_prefix__deprecate_feature(self):
"""We offer users the correct subject line for each intent stage."""
self.feature_1.feature_type = models.FEATURE_TYPE_DEPRECATION_ID
self.assertEqual(
'Intent stage "None"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_NONE))
self.assertEqual(
'Intent to Deprecate and Remove',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_INCUBATE))
self.assertEqual(
'Request for Deprecation Trial',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_EXTEND_TRIAL))
| # Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import testing_config # Must be imported before the module under test.
from unittest import mock
import flask
import werkzeug
from pages import intentpreview
from internals import models
test_app = flask.Flask(__name__)
class IntentEmailPreviewHandlerTest(testing_config.CustomTestCase):
def setUp(self):
self.feature_1 = models.Feature(
name='feature one', summary='sum', category=1, visibility=1,
standardization=1, web_dev_views=1, impl_status_chrome=1,
intent_stage=models.INTENT_IMPLEMENT)
self.feature_1.put()
self.request_path = '/admin/features/launch/%d/%d?intent' % (
models.INTENT_SHIP, self.feature_1.key.integer_id())
self.handler = intentpreview.IntentEmailPreviewHandler()
def tearDown(self):
self.feature_1.key.delete()
def test_get__anon(self):
"""Anon cannot view this preview features, gets redirected to login."""
testing_config.sign_out()
feature_id = self.feature_1.key.integer_id()
with test_app.test_request_context(self.request_path):
actual_response = self.handler.get_template_data(feature_id=feature_id)
self.assertEqual('302 FOUND', actual_response.status)
def test_get__no_existing(self):
"""Trying to view a feature that does not exist gives a 404."""
testing_config.sign_in('user<EMAIL>', 123567890)
bad_feature_id = self.feature_1.key.integer_id() + 1
with test_app.test_request_context(self.request_path):
with self.assertRaises(werkzeug.exceptions.NotFound):
self.handler.get_template_data(feature_id=bad_feature_id)
def test_get__no_stage_specified(self):
"""Allowed user can preview intent email for a feature using an old URL."""
request_path = (
'/admin/features/launch/%d?intent' % self.feature_1.key.integer_id())
testing_config.sign_in('<EMAIL>', 123567890)
feature_id = self.feature_1.key.integer_id()
with test_app.test_request_context(self.request_path):
actual_data = self.handler.get_template_data(feature_id=feature_id)
self.assertIn('feature', actual_data)
self.assertEqual('feature one', actual_data['feature']['name'])
def test_get__normal(self):
"""Allowed user can preview intent email for a feature."""
testing_config.sign_in('<EMAIL>', 123567890)
feature_id = self.feature_1.key.integer_id()
with test_app.test_request_context(self.request_path):
actual_data = self.handler.get_template_data(feature_id=feature_id)
self.assertIn('feature', actual_data)
self.assertEqual('feature one', actual_data['feature']['name'])
def test_get_page_data(self):
"""page_data has correct values."""
feature_id = self.feature_1.key.integer_id()
with test_app.test_request_context(self.request_path):
page_data = self.handler.get_page_data(
feature_id, self.feature_1, models.INTENT_IMPLEMENT)
self.assertEqual(
'http://localhost/feature/%d' % feature_id,
page_data['default_url'])
self.assertEqual(
['motivation'],
page_data['sections_to_show'])
self.assertEqual(
'Intent to Prototype',
page_data['subject_prefix'])
def test_compute_subject_prefix__incubate_new_feature(self):
"""We offer users the correct subject line for each intent stage."""
self.assertEqual(
'Intent stage "None"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_NONE))
self.assertEqual(
'Intent stage "Start incubating"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_INCUBATE))
self.assertEqual(
'Intent to Prototype',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_IMPLEMENT))
self.assertEqual(
'Ready for Trial',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_EXPERIMENT))
self.assertEqual(
'Intent stage "Evaluate readiness to ship"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_IMPLEMENT_SHIP))
self.assertEqual(
'Intent to Experiment',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_EXTEND_TRIAL))
self.assertEqual(
'Intent to Ship',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_SHIP))
self.assertEqual(
'Intent to Extend Deprecation Trial',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_REMOVED))
self.assertEqual(
'Intent stage "Shipped"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_SHIPPED))
self.assertEqual(
'Intent stage "Parked"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_PARKED))
def test_compute_subject_prefix__deprecate_feature(self):
"""We offer users the correct subject line for each intent stage."""
self.feature_1.feature_type = models.FEATURE_TYPE_DEPRECATION_ID
self.assertEqual(
'Intent stage "None"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_NONE))
self.assertEqual(
'Intent to Deprecate and Remove',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_INCUBATE))
self.assertEqual(
'Request for Deprecation Trial',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_EXTEND_TRIAL)) | en | 0.842338 | # Copyright 2020 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Must be imported before the module under test. Anon cannot view this preview features, gets redirected to login. Trying to view a feature that does not exist gives a 404. Allowed user can preview intent email for a feature using an old URL. Allowed user can preview intent email for a feature. page_data has correct values. We offer users the correct subject line for each intent stage. We offer users the correct subject line for each intent stage. | 2.026979 | 2 |
saleor/graphql/package/schema.py | nlkhagva/saleor | 0 | 6625487 | import graphene
from ..core.fields import FilterInputConnectionField
from .bulk_mutations import GaduurBulkDelete, PackageBulkDelete, PackageBulkUstatus, PackageLineBulkDelete
from .mutations import GaduurCreate, GaduurDelete, GaduurUpdate, PackageCreate, PackageDelete, PackageUpdate, PackageLineDelete
from .resolvers import resolve_gaduur, resolve_gaduurs, resolve_package, resolve_packages, resolve_packageLines
from .sorters import GaduurSortingInput, PackageSortingInput, PackageLineSortingInput
from .types import Gaduur, Package, PackageLine
from .filters import GaduurFilterInput, PackageFilterInput, PackageLineFilterInput
class GaduurQueries(graphene.ObjectType):
#####################
#### gaduur dagavar
gaduur = graphene.Field(
Gaduur,
id=graphene.Argument(graphene.ID),
description="Lookup a page by ID.",
)
gaduurs = FilterInputConnectionField(
Gaduur,
sort_by=GaduurSortingInput(description="Sort pages."),
filter=GaduurFilterInput(description="Filtering options for pages."),
description="List of the gaduur's.",
)
def resolve_gaduur(self, info, id=None):
return resolve_gaduur(info, id)
def resolve_gaduurs(self, info, query=None, **_kwargs):
return resolve_gaduurs(info, query=query)
class GaduurMutations(graphene.ObjectType):
gaduur_create = GaduurCreate.Field()
gaduur_delete = GaduurDelete.Field()
gaduur_bulk_delete = GaduurBulkDelete.Field()
gaduur_update = GaduurUpdate.Field()
###################################
# PACKAGES
# class FlineCustom(CountableDjangoObjectType):
# order_id = graphene.Int()
# checked = graphene.Boolean()
# class Meta:
# description = "Represents line of the fulfillment."
# interfaces = [graphene.relay.Node]
# model = FulfillmentLineModel
# only_fields = ["id", "quantity", "ustatus", "changed_date", "soon_date"]
# @staticmethod
# def resolve_order_id(root: FulfillmentLineModel, _info):
# return root.order_line.order_id
# def resoleve_checked(root: FulfillmentLineModel, _info):
# return False
# class FlinesByAddress(graphene.ObjectType):
# address = graphene.Field(
# Address,
# description="Хүлээн авах хаяг"
# )
# lines = graphene.List(
# FlineCustom,
# description="custom fulfillmentline"
# )
class PackageQueries(graphene.ObjectType):
package = graphene.Field(
Package,
id=graphene.Argument(graphene.ID),
description="Look up a page by ID"
)
packages = FilterInputConnectionField(
Package,
sort_by=PackageSortingInput(description="sort packages"),
filter=PackageFilterInput(description="filtering options for package"),
description="List of the package"
)
packageLines = FilterInputConnectionField(
PackageLine,
sort_by=PackageLineSortingInput(description="filtering packageline"),
filter=PackageLineFilterInput(description="sort packageline"),
description="list of packageLine"
)
# flines_by_address = graphene.Field(
# FlinesByAddress,
# description="flines"
# )
# graphene.Node.to_global_id("ProductVariant", variant.id) for variant in variants
def resolve_package(self, info, id=None):
return resolve_package(info, id)
def resolve_packages(self, info, query=None, **_kwargs):
return resolve_packages(info, query=query)
def resolve_pacageLines(self, info, query=None, **_kwargs):
return resolve_packageLines(info, query=query)
# def resolve_flines_by_address(self, info, ordernumber=None, **_kwargs):
# return resolve_flines_by_address(info, ordernumber)
class PackageMutations(graphene.ObjectType):
package_create = PackageCreate.Field()
package_delete = PackageDelete.Field()
package_bulk_delete = PackageBulkDelete.Field()
package_update = PackageUpdate.Field()
package_bulk_ustatus = PackageBulkUstatus.Field()
package_line_delete = PackageLineDelete.Field()
package_line_bulk_delete = PackageLineBulkDelete.Field()
| import graphene
from ..core.fields import FilterInputConnectionField
from .bulk_mutations import GaduurBulkDelete, PackageBulkDelete, PackageBulkUstatus, PackageLineBulkDelete
from .mutations import GaduurCreate, GaduurDelete, GaduurUpdate, PackageCreate, PackageDelete, PackageUpdate, PackageLineDelete
from .resolvers import resolve_gaduur, resolve_gaduurs, resolve_package, resolve_packages, resolve_packageLines
from .sorters import GaduurSortingInput, PackageSortingInput, PackageLineSortingInput
from .types import Gaduur, Package, PackageLine
from .filters import GaduurFilterInput, PackageFilterInput, PackageLineFilterInput
class GaduurQueries(graphene.ObjectType):
#####################
#### gaduur dagavar
gaduur = graphene.Field(
Gaduur,
id=graphene.Argument(graphene.ID),
description="Lookup a page by ID.",
)
gaduurs = FilterInputConnectionField(
Gaduur,
sort_by=GaduurSortingInput(description="Sort pages."),
filter=GaduurFilterInput(description="Filtering options for pages."),
description="List of the gaduur's.",
)
def resolve_gaduur(self, info, id=None):
return resolve_gaduur(info, id)
def resolve_gaduurs(self, info, query=None, **_kwargs):
return resolve_gaduurs(info, query=query)
class GaduurMutations(graphene.ObjectType):
gaduur_create = GaduurCreate.Field()
gaduur_delete = GaduurDelete.Field()
gaduur_bulk_delete = GaduurBulkDelete.Field()
gaduur_update = GaduurUpdate.Field()
###################################
# PACKAGES
# class FlineCustom(CountableDjangoObjectType):
# order_id = graphene.Int()
# checked = graphene.Boolean()
# class Meta:
# description = "Represents line of the fulfillment."
# interfaces = [graphene.relay.Node]
# model = FulfillmentLineModel
# only_fields = ["id", "quantity", "ustatus", "changed_date", "soon_date"]
# @staticmethod
# def resolve_order_id(root: FulfillmentLineModel, _info):
# return root.order_line.order_id
# def resoleve_checked(root: FulfillmentLineModel, _info):
# return False
# class FlinesByAddress(graphene.ObjectType):
# address = graphene.Field(
# Address,
# description="Хүлээн авах хаяг"
# )
# lines = graphene.List(
# FlineCustom,
# description="custom fulfillmentline"
# )
class PackageQueries(graphene.ObjectType):
package = graphene.Field(
Package,
id=graphene.Argument(graphene.ID),
description="Look up a page by ID"
)
packages = FilterInputConnectionField(
Package,
sort_by=PackageSortingInput(description="sort packages"),
filter=PackageFilterInput(description="filtering options for package"),
description="List of the package"
)
packageLines = FilterInputConnectionField(
PackageLine,
sort_by=PackageLineSortingInput(description="filtering packageline"),
filter=PackageLineFilterInput(description="sort packageline"),
description="list of packageLine"
)
# flines_by_address = graphene.Field(
# FlinesByAddress,
# description="flines"
# )
# graphene.Node.to_global_id("ProductVariant", variant.id) for variant in variants
def resolve_package(self, info, id=None):
return resolve_package(info, id)
def resolve_packages(self, info, query=None, **_kwargs):
return resolve_packages(info, query=query)
def resolve_pacageLines(self, info, query=None, **_kwargs):
return resolve_packageLines(info, query=query)
# def resolve_flines_by_address(self, info, ordernumber=None, **_kwargs):
# return resolve_flines_by_address(info, ordernumber)
class PackageMutations(graphene.ObjectType):
package_create = PackageCreate.Field()
package_delete = PackageDelete.Field()
package_bulk_delete = PackageBulkDelete.Field()
package_update = PackageUpdate.Field()
package_bulk_ustatus = PackageBulkUstatus.Field()
package_line_delete = PackageLineDelete.Field()
package_line_bulk_delete = PackageLineBulkDelete.Field()
| en | 0.493878 | ##################### #### gaduur dagavar ################################### # PACKAGES # class FlineCustom(CountableDjangoObjectType): # order_id = graphene.Int() # checked = graphene.Boolean() # class Meta: # description = "Represents line of the fulfillment." # interfaces = [graphene.relay.Node] # model = FulfillmentLineModel # only_fields = ["id", "quantity", "ustatus", "changed_date", "soon_date"] # @staticmethod # def resolve_order_id(root: FulfillmentLineModel, _info): # return root.order_line.order_id # def resoleve_checked(root: FulfillmentLineModel, _info): # return False # class FlinesByAddress(graphene.ObjectType): # address = graphene.Field( # Address, # description="Хүлээн авах хаяг" # ) # lines = graphene.List( # FlineCustom, # description="custom fulfillmentline" # ) # flines_by_address = graphene.Field( # FlinesByAddress, # description="flines" # ) # graphene.Node.to_global_id("ProductVariant", variant.id) for variant in variants # def resolve_flines_by_address(self, info, ordernumber=None, **_kwargs): # return resolve_flines_by_address(info, ordernumber) | 2.211454 | 2 |
mpds_aiida/common.py | mpds-io/mpds-aiida | 2 | 6625488 |
import os
import json
from collections import namedtuple
import yaml
from ase.data import chemical_symbols
from aiida_crystal_dft.io.d12 import D12
from aiida_crystal_dft.io.basis import BasisFile # NB only used to determine ecp
from mpds_client import APIError
from mpds_aiida import TEMPLATE_DIR
verbatim_basis = namedtuple("basis", field_names="content, all_electron")
def guess_metal(ase_obj):
"""
Make an educated guess of the metallic compound character,
returns bool
"""
non_metallic_atoms = {
'H', 'He',
'Be', 'B', 'C', 'N', 'O', 'F', 'Ne',
'Si', 'P', 'S', 'Cl', 'Ar',
'Ge', 'As', 'Se', 'Br', 'Kr',
'Sb', 'Te', 'I', 'Xe',
'Po', 'At', 'Rn',
'Og'
}
return not any([el for el in set(ase_obj.get_chemical_symbols()) if el in non_metallic_atoms])
def get_basis_sets(repo_dir):
"""
Keeps all available BS in a dict for convenience
NB. we assume BS repo_dir = AiiDA's *basis_family*
"""
assert os.path.exists(repo_dir), "No folder %s with the basis sets found" % repo_dir
bs_repo = {}
for filename in os.listdir(repo_dir):
if not filename.endswith('.basis'):
continue
el = filename.split('.')[0]
assert el in chemical_symbols, "Unexpected basis set file %s" % filename
with open(repo_dir + os.sep + filename, 'r') as f:
bs_str = f.read().strip()
bs_parsed = BasisFile().parse(bs_str)
bs_repo[el] = verbatim_basis(content=bs_str, all_electron=('ecp' not in bs_parsed))
return bs_repo
def get_template(template='minimal.yml'):
"""
Templates present the permanent calc setup
"""
template_loc = os.path.join(TEMPLATE_DIR, template)
if not os.path.exists(template_loc):
template_loc = template
assert os.path.exists(template_loc)
with open(template_loc) as f:
calc = yaml.load(f.read(), Loader=yaml.SafeLoader)
# assert 'parameters' in calc and 'crystal' in calc['parameters'] and 'basis_family' in calc
return calc
def get_input(calc_params_crystal, elements, bs_src, label):
"""
Generates a program input
"""
calc_params_crystal['title'] = label
if isinstance(bs_src, dict):
return D12(parameters=calc_params_crystal, basis=[bs_src[el] for el in elements])
elif isinstance(bs_src, str):
return D12(parameters=calc_params_crystal, basis=bs_src)
raise RuntimeError('Unknown basis set source format!')
supported_arities = {1: 'unary', 2: 'binary', 3: 'ternary', 4: 'quaternary', 5: 'quinary'}
def get_mpds_structures(mpds_api, elements, more_query_args=None):
"""
Given some arbitrary chemical elements,
get their possible crystalline structures
Returns: list (NB dups)
"""
assert sorted(list(set(elements))) == sorted(elements) and \
len(elements) <= len(supported_arities)
structures = []
query = {
"props": "atomic structure",
"elements": '-'.join(elements),
"classes": supported_arities[len(elements)] + ", non-disordered"
}
if more_query_args and type(more_query_args) == dict:
query.update(more_query_args)
try:
for item in mpds_api.get_data(
query,
fields={'S': [
'phase',
'occs_noneq', # non-disordered phases may still have != 1
'cell_abc',
'sg_n',
'basis_noneq',
'els_noneq'
]}
):
if item and any([occ != 1 for occ in item[1]]):
continue
ase_obj = mpds_api.compile_crystal(item, flavor='ase')
if not ase_obj:
continue
ase_obj.info['phase'] = item[0]
structures.append(ase_obj)
except APIError as ex:
if ex.code == 204:
print("No results!")
return []
else: raise
return structures
def get_mpds_phases(mpds_api, elements, more_query_args=None):
"""
Given some arbitrary chemical elements,
get their possible distinct phases,
having at least one supported crystalline structure known
Returns: set
"""
assert sorted(list(set(elements))) == sorted(elements) and \
len(elements) <= len(supported_arities)
phases = set()
query = {
"props": "atomic structure",
"elements": '-'.join(elements),
"classes": supported_arities[len(elements)] + ", non-disordered"
}
if more_query_args and type(more_query_args) == dict:
query.update(more_query_args)
try:
for item in mpds_api.get_data(
query,
fields={'S': [
'phase',
'occs_noneq', # non-disordered phases may still have != 1
'els_noneq'
]}
):
if not item or not item[-1]:
continue
if any([occ != 1 for occ in item[1]]):
continue
phases.add(item[0])
except APIError as ex:
if ex.code == 204:
print("No results!")
return []
else: raise
return phases
def get_aiida_cnf():
cnf_path = os.path.expanduser('~/.aiida/config.json')
assert os.path.exists(cnf_path)
with open(cnf_path) as f:
contents = json.loads(f.read())
return contents['profiles'][contents['default_profile']]
def get_aiida_uuid(path_string):
parts = path_string.split('/')
for n in range(len(parts) - 1):
if len(parts[n]) == 2 and len(parts[n + 1]) == 2:
return parts[n] + parts[n + 1] + parts[n + 2]
return False
def formula_to_latex(given_string):
sub, output = False, ''
for token in given_string:
if token.isdigit() or token == '.':
if not sub:
output += '_{'
sub = True
else:
if sub:
output += '}'
sub = False
output += token
if sub:
output += '}'
return '$' + output + '$'
def fix_label_names(labels):
count = 1
for n in range(len(labels)):
if ',' in labels[n]:
labels[n] = '$A_{%s}$' % count
count += 1
return labels
ARCHIVE_README = "\r\n".join("""In-house MPDS / PAULING FILE ab initio calculations data
(c) by Sobolev, Civalleri, Maschio, Erba, Dovesi, <NAME>
Please, cite as:
Sobolev, Civalleri, Maschio, Erba, Dovesi, <NAME>,
https://mpds.io/phase/{phase}
https://mpds.io/calculations/{aname}.7z
These data are licensed under a Creative Commons Attribution 4.0 International License.
http://creativecommons.org/licenses/by/4.0
The calculations are done using the CRYSTAL code:
Dovesi, Erba, Orlando, Zicovich-Wilson, Civalleri, Maschio, Rerat, Casassa,
Baima, Salustro, Kirtman. WIREs Comput Mol Sci. (2018),
https://doi.org/10.1002/wcms.1360
Dovesi, Saunders, Roetti, Orlando, Zicovich-Wilson, Pascale, Civalleri, Doll,
<NAME>, Llunell, Causa, Noel, Maschio, Erba, <NAME>.
CRYSTAL17 User Manual (University of Turin, 2017), http://www.crystal.unito.it
The automation is done using the AiiDA code:
Pizzi, Cepellotti, Sabatini, Marzari, Kozinsky. Comp Mat Sci (2016),
https://doi.org/10.1016/j.commatsci.2015.09.013""".splitlines()) |
import os
import json
from collections import namedtuple
import yaml
from ase.data import chemical_symbols
from aiida_crystal_dft.io.d12 import D12
from aiida_crystal_dft.io.basis import BasisFile # NB only used to determine ecp
from mpds_client import APIError
from mpds_aiida import TEMPLATE_DIR
verbatim_basis = namedtuple("basis", field_names="content, all_electron")
def guess_metal(ase_obj):
"""
Make an educated guess of the metallic compound character,
returns bool
"""
non_metallic_atoms = {
'H', 'He',
'Be', 'B', 'C', 'N', 'O', 'F', 'Ne',
'Si', 'P', 'S', 'Cl', 'Ar',
'Ge', 'As', 'Se', 'Br', 'Kr',
'Sb', 'Te', 'I', 'Xe',
'Po', 'At', 'Rn',
'Og'
}
return not any([el for el in set(ase_obj.get_chemical_symbols()) if el in non_metallic_atoms])
def get_basis_sets(repo_dir):
"""
Keeps all available BS in a dict for convenience
NB. we assume BS repo_dir = AiiDA's *basis_family*
"""
assert os.path.exists(repo_dir), "No folder %s with the basis sets found" % repo_dir
bs_repo = {}
for filename in os.listdir(repo_dir):
if not filename.endswith('.basis'):
continue
el = filename.split('.')[0]
assert el in chemical_symbols, "Unexpected basis set file %s" % filename
with open(repo_dir + os.sep + filename, 'r') as f:
bs_str = f.read().strip()
bs_parsed = BasisFile().parse(bs_str)
bs_repo[el] = verbatim_basis(content=bs_str, all_electron=('ecp' not in bs_parsed))
return bs_repo
def get_template(template='minimal.yml'):
"""
Templates present the permanent calc setup
"""
template_loc = os.path.join(TEMPLATE_DIR, template)
if not os.path.exists(template_loc):
template_loc = template
assert os.path.exists(template_loc)
with open(template_loc) as f:
calc = yaml.load(f.read(), Loader=yaml.SafeLoader)
# assert 'parameters' in calc and 'crystal' in calc['parameters'] and 'basis_family' in calc
return calc
def get_input(calc_params_crystal, elements, bs_src, label):
"""
Generates a program input
"""
calc_params_crystal['title'] = label
if isinstance(bs_src, dict):
return D12(parameters=calc_params_crystal, basis=[bs_src[el] for el in elements])
elif isinstance(bs_src, str):
return D12(parameters=calc_params_crystal, basis=bs_src)
raise RuntimeError('Unknown basis set source format!')
supported_arities = {1: 'unary', 2: 'binary', 3: 'ternary', 4: 'quaternary', 5: 'quinary'}
def get_mpds_structures(mpds_api, elements, more_query_args=None):
"""
Given some arbitrary chemical elements,
get their possible crystalline structures
Returns: list (NB dups)
"""
assert sorted(list(set(elements))) == sorted(elements) and \
len(elements) <= len(supported_arities)
structures = []
query = {
"props": "atomic structure",
"elements": '-'.join(elements),
"classes": supported_arities[len(elements)] + ", non-disordered"
}
if more_query_args and type(more_query_args) == dict:
query.update(more_query_args)
try:
for item in mpds_api.get_data(
query,
fields={'S': [
'phase',
'occs_noneq', # non-disordered phases may still have != 1
'cell_abc',
'sg_n',
'basis_noneq',
'els_noneq'
]}
):
if item and any([occ != 1 for occ in item[1]]):
continue
ase_obj = mpds_api.compile_crystal(item, flavor='ase')
if not ase_obj:
continue
ase_obj.info['phase'] = item[0]
structures.append(ase_obj)
except APIError as ex:
if ex.code == 204:
print("No results!")
return []
else: raise
return structures
def get_mpds_phases(mpds_api, elements, more_query_args=None):
"""
Given some arbitrary chemical elements,
get their possible distinct phases,
having at least one supported crystalline structure known
Returns: set
"""
assert sorted(list(set(elements))) == sorted(elements) and \
len(elements) <= len(supported_arities)
phases = set()
query = {
"props": "atomic structure",
"elements": '-'.join(elements),
"classes": supported_arities[len(elements)] + ", non-disordered"
}
if more_query_args and type(more_query_args) == dict:
query.update(more_query_args)
try:
for item in mpds_api.get_data(
query,
fields={'S': [
'phase',
'occs_noneq', # non-disordered phases may still have != 1
'els_noneq'
]}
):
if not item or not item[-1]:
continue
if any([occ != 1 for occ in item[1]]):
continue
phases.add(item[0])
except APIError as ex:
if ex.code == 204:
print("No results!")
return []
else: raise
return phases
def get_aiida_cnf():
cnf_path = os.path.expanduser('~/.aiida/config.json')
assert os.path.exists(cnf_path)
with open(cnf_path) as f:
contents = json.loads(f.read())
return contents['profiles'][contents['default_profile']]
def get_aiida_uuid(path_string):
parts = path_string.split('/')
for n in range(len(parts) - 1):
if len(parts[n]) == 2 and len(parts[n + 1]) == 2:
return parts[n] + parts[n + 1] + parts[n + 2]
return False
def formula_to_latex(given_string):
sub, output = False, ''
for token in given_string:
if token.isdigit() or token == '.':
if not sub:
output += '_{'
sub = True
else:
if sub:
output += '}'
sub = False
output += token
if sub:
output += '}'
return '$' + output + '$'
def fix_label_names(labels):
count = 1
for n in range(len(labels)):
if ',' in labels[n]:
labels[n] = '$A_{%s}$' % count
count += 1
return labels
ARCHIVE_README = "\r\n".join("""In-house MPDS / PAULING FILE ab initio calculations data
(c) by Sobolev, Civalleri, Maschio, Erba, Dovesi, <NAME>
Please, cite as:
Sobolev, Civalleri, Maschio, Erba, Dovesi, <NAME>,
https://mpds.io/phase/{phase}
https://mpds.io/calculations/{aname}.7z
These data are licensed under a Creative Commons Attribution 4.0 International License.
http://creativecommons.org/licenses/by/4.0
The calculations are done using the CRYSTAL code:
Dovesi, Erba, Orlando, Zicovich-Wilson, Civalleri, Maschio, Rerat, Casassa,
Baima, Salustro, Kirtman. WIREs Comput Mol Sci. (2018),
https://doi.org/10.1002/wcms.1360
Dovesi, Saunders, Roetti, Orlando, Zicovich-Wilson, Pascale, Civalleri, Doll,
<NAME>, Llunell, Causa, Noel, Maschio, Erba, <NAME>.
CRYSTAL17 User Manual (University of Turin, 2017), http://www.crystal.unito.it
The automation is done using the AiiDA code:
Pizzi, Cepellotti, Sabatini, Marzari, Kozinsky. Comp Mat Sci (2016),
https://doi.org/10.1016/j.commatsci.2015.09.013""".splitlines()) | en | 0.6785 | # NB only used to determine ecp Make an educated guess of the metallic compound character, returns bool Keeps all available BS in a dict for convenience NB. we assume BS repo_dir = AiiDA's *basis_family* Templates present the permanent calc setup # assert 'parameters' in calc and 'crystal' in calc['parameters'] and 'basis_family' in calc Generates a program input Given some arbitrary chemical elements, get their possible crystalline structures Returns: list (NB dups) # non-disordered phases may still have != 1 Given some arbitrary chemical elements, get their possible distinct phases, having at least one supported crystalline structure known Returns: set # non-disordered phases may still have != 1 In-house MPDS / PAULING FILE ab initio calculations data (c) by Sobolev, Civalleri, Maschio, Erba, Dovesi, <NAME> Please, cite as: Sobolev, Civalleri, Maschio, Erba, Dovesi, <NAME>, https://mpds.io/phase/{phase} https://mpds.io/calculations/{aname}.7z These data are licensed under a Creative Commons Attribution 4.0 International License. http://creativecommons.org/licenses/by/4.0 The calculations are done using the CRYSTAL code: Dovesi, Erba, Orlando, Zicovich-Wilson, Civalleri, Maschio, Rerat, Casassa, Baima, Salustro, Kirtman. WIREs Comput Mol Sci. (2018), https://doi.org/10.1002/wcms.1360 Dovesi, Saunders, Roetti, Orlando, Zicovich-Wilson, Pascale, Civalleri, Doll, <NAME>, Llunell, Causa, Noel, Maschio, Erba, <NAME>. CRYSTAL17 User Manual (University of Turin, 2017), http://www.crystal.unito.it The automation is done using the AiiDA code: Pizzi, Cepellotti, Sabatini, Marzari, Kozinsky. Comp Mat Sci (2016), https://doi.org/10.1016/j.commatsci.2015.09.013 | 2.24616 | 2 |
language-modeling/fast_transformers/__init__.py | minhtannguyen/transformer-mgk | 5 | 6625489 | <reponame>minhtannguyen/transformer-mgk<filename>language-modeling/fast_transformers/__init__.py<gh_stars>1-10
"""Provide a library with fast transformer implementations."""
__author__ = ""
__copyright__ = ""
__license__ = "MIT"
__maintainer__ = ""
__email__ = ""
__url__ = "https://github.com/idiap/fast-transformers"
__version__ = "0.4.0"
| """Provide a library with fast transformer implementations."""
__author__ = ""
__copyright__ = ""
__license__ = "MIT"
__maintainer__ = ""
__email__ = ""
__url__ = "https://github.com/idiap/fast-transformers"
__version__ = "0.4.0" | en | 0.81628 | Provide a library with fast transformer implementations. | 1.022413 | 1 |
bundle_cache/app_store/tk-flame-export/v1.9.1/python/dialogs/summary_dialog.py | ColinKennedy/tk-config-default2-respawn | 4 | 6625490 | <filename>bundle_cache/app_store/tk-flame-export/v1.9.1/python/dialogs/summary_dialog.py
# Copyright (c) 2014 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .ui.submission_complete_dialog import Ui_SubmissionCompleteDialog
from .ui.submission_failed_dialog import Ui_SubmissionFailedDialog
class SubmissionCompleteDialog(QtGui.QWidget):
"""
Summary dialog popping up after a Shot export has completed.
"""
def __init__(self, message):
"""
Constructor
"""
# first, call the base class and let it do its thing.
QtGui.QWidget.__init__(self)
# now load in the UI that was created in the UI designer
self.ui = Ui_SubmissionCompleteDialog()
self.ui.setupUi(self)
self.ui.status.setText(message)
# with the tk dialogs, we need to hook up our modal
# dialog signals in a special way
self.__exit_code = QtGui.QDialog.Rejected
self.ui.submit.clicked.connect(self._on_submit_clicked)
@property
def exit_code(self):
"""
Used to pass exit code back though sgtk dialog
:returns: The dialog exit code
"""
return self.__exit_code
@property
def hide_tk_title_bar(self):
"""
Tell the system to not show the std toolbar.
"""
return True
def _on_submit_clicked(self):
"""
Called when the 'submit' button is clicked.
"""
self.__exit_code = QtGui.QDialog.Accepted
self.close()
class SubmissionFailedDialog(QtGui.QWidget):
"""
Summary dialog popping up when a Shot export fails.
"""
def __init__(self):
"""
Constructor
"""
# first, call the base class and let it do its thing.
QtGui.QWidget.__init__(self)
# now load in the UI that was created in the UI designer
self.ui = Ui_SubmissionFailedDialog()
self.ui.setupUi(self)
# with the tk dialogs, we need to hook up our modal
# dialog signals in a special way
self.__exit_code = QtGui.QDialog.Rejected
self.ui.submit.clicked.connect(self._on_submit_clicked)
@property
def exit_code(self):
"""
Used to pass exit code back though sgtk dialog
:returns: The dialog exit code
"""
return self.__exit_code
@property
def hide_tk_title_bar(self):
"""
Tell the system to not show the std toolbar.
"""
return True
def _on_submit_clicked(self):
"""
Called when the 'submit' button is clicked.
"""
self.__exit_code = QtGui.QDialog.Accepted
self.close()
| <filename>bundle_cache/app_store/tk-flame-export/v1.9.1/python/dialogs/summary_dialog.py
# Copyright (c) 2014 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sgtk
from sgtk.platform.qt import QtCore, QtGui
from .ui.submission_complete_dialog import Ui_SubmissionCompleteDialog
from .ui.submission_failed_dialog import Ui_SubmissionFailedDialog
class SubmissionCompleteDialog(QtGui.QWidget):
"""
Summary dialog popping up after a Shot export has completed.
"""
def __init__(self, message):
"""
Constructor
"""
# first, call the base class and let it do its thing.
QtGui.QWidget.__init__(self)
# now load in the UI that was created in the UI designer
self.ui = Ui_SubmissionCompleteDialog()
self.ui.setupUi(self)
self.ui.status.setText(message)
# with the tk dialogs, we need to hook up our modal
# dialog signals in a special way
self.__exit_code = QtGui.QDialog.Rejected
self.ui.submit.clicked.connect(self._on_submit_clicked)
@property
def exit_code(self):
"""
Used to pass exit code back though sgtk dialog
:returns: The dialog exit code
"""
return self.__exit_code
@property
def hide_tk_title_bar(self):
"""
Tell the system to not show the std toolbar.
"""
return True
def _on_submit_clicked(self):
"""
Called when the 'submit' button is clicked.
"""
self.__exit_code = QtGui.QDialog.Accepted
self.close()
class SubmissionFailedDialog(QtGui.QWidget):
"""
Summary dialog popping up when a Shot export fails.
"""
def __init__(self):
"""
Constructor
"""
# first, call the base class and let it do its thing.
QtGui.QWidget.__init__(self)
# now load in the UI that was created in the UI designer
self.ui = Ui_SubmissionFailedDialog()
self.ui.setupUi(self)
# with the tk dialogs, we need to hook up our modal
# dialog signals in a special way
self.__exit_code = QtGui.QDialog.Rejected
self.ui.submit.clicked.connect(self._on_submit_clicked)
@property
def exit_code(self):
"""
Used to pass exit code back though sgtk dialog
:returns: The dialog exit code
"""
return self.__exit_code
@property
def hide_tk_title_bar(self):
"""
Tell the system to not show the std toolbar.
"""
return True
def _on_submit_clicked(self):
"""
Called when the 'submit' button is clicked.
"""
self.__exit_code = QtGui.QDialog.Accepted
self.close()
| en | 0.895716 | # Copyright (c) 2014 Shotgun Software Inc. # # CONFIDENTIAL AND PROPRIETARY # # This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit # Source Code License included in this distribution package. See LICENSE. # By accessing, using, copying or modifying this work you indicate your # agreement to the Shotgun Pipeline Toolkit Source Code License. All rights # not expressly granted therein are reserved by Shotgun Software Inc. Summary dialog popping up after a Shot export has completed. Constructor # first, call the base class and let it do its thing. # now load in the UI that was created in the UI designer # with the tk dialogs, we need to hook up our modal # dialog signals in a special way Used to pass exit code back though sgtk dialog :returns: The dialog exit code Tell the system to not show the std toolbar. Called when the 'submit' button is clicked. Summary dialog popping up when a Shot export fails. Constructor # first, call the base class and let it do its thing. # now load in the UI that was created in the UI designer # with the tk dialogs, we need to hook up our modal # dialog signals in a special way Used to pass exit code back though sgtk dialog :returns: The dialog exit code Tell the system to not show the std toolbar. Called when the 'submit' button is clicked. | 2.003612 | 2 |
abc192/d.py | nishio/atcoder | 1 | 6625491 | <filename>abc192/d.py
# included from snippets/main.py
def debug(*x, msg=""):
import sys
print(msg, *x, file=sys.stderr)
def lessEqual(s, base, limit):
ret = 0
for c in s:
ret *= base
ret += int(c)
if limit < ret:
return False
return True
def solve(X, M):
sX = str(X)
if len(sX) == 1:
if X <= M:
return 1
else:
return 0
d = max(int(c)for c in str(X))
v = int(sX, d + 1)
if M < v:
return 0
left = d + 1
start = left
right = M + 1 # (1)
while left < right - 1:
x = (left + right) // 2
if lessEqual(sX, x, M):
left = x
else:
right = x
return right - start
def anotherWay(X, M):
from math import log, exp
sX = str(X)
return exp(log(M / int(sX[0])) / (len(sX) - 1)) - solve(X, M)
def main():
X = int(input())
M = int(input())
print(solve(X, M))
# tests
T1 = """
22
10
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
2
"""
T2 = """
999
1500
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
3
"""
T3 = """
100000000000000000000000000000000000000000000000000000000000
1000000000000000000
"""
TEST_T3 = """
>>> as_input(T3)
>>> main()
1
"""
T4 = """
2
1
"""
TEST_T4 = """
>>> as_input(T4)
>>> main()
0
"""
T5 = """
1
2
"""
TEST_T5 = """
>>> as_input(T5)
>>> main()
1
"""
T6 = """
10
1000
"""
TEST_T6 = """
>>> as_input(T6)
>>> main()
999
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
print(k)
doctest.run_docstring_examples(g[k], g, name=k)
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
if __name__ == "__main__":
import sys
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
sys.setrecursionlimit(10 ** 6)
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
sys.exit()
# end of snippets/main.py
| <filename>abc192/d.py
# included from snippets/main.py
def debug(*x, msg=""):
import sys
print(msg, *x, file=sys.stderr)
def lessEqual(s, base, limit):
ret = 0
for c in s:
ret *= base
ret += int(c)
if limit < ret:
return False
return True
def solve(X, M):
sX = str(X)
if len(sX) == 1:
if X <= M:
return 1
else:
return 0
d = max(int(c)for c in str(X))
v = int(sX, d + 1)
if M < v:
return 0
left = d + 1
start = left
right = M + 1 # (1)
while left < right - 1:
x = (left + right) // 2
if lessEqual(sX, x, M):
left = x
else:
right = x
return right - start
def anotherWay(X, M):
from math import log, exp
sX = str(X)
return exp(log(M / int(sX[0])) / (len(sX) - 1)) - solve(X, M)
def main():
X = int(input())
M = int(input())
print(solve(X, M))
# tests
T1 = """
22
10
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
2
"""
T2 = """
999
1500
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
3
"""
T3 = """
100000000000000000000000000000000000000000000000000000000000
1000000000000000000
"""
TEST_T3 = """
>>> as_input(T3)
>>> main()
1
"""
T4 = """
2
1
"""
TEST_T4 = """
>>> as_input(T4)
>>> main()
0
"""
T5 = """
1
2
"""
TEST_T5 = """
>>> as_input(T5)
>>> main()
1
"""
T6 = """
10
1000
"""
TEST_T6 = """
>>> as_input(T6)
>>> main()
999
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
print(k)
doctest.run_docstring_examples(g[k], g, name=k)
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
if __name__ == "__main__":
import sys
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
sys.setrecursionlimit(10 ** 6)
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
sys.exit()
# end of snippets/main.py
| en | 0.530997 | # included from snippets/main.py # (1) # tests 22 10 >>> as_input(T1) >>> main() 2 999 1500 >>> as_input(T2) >>> main() 3 100000000000000000000000000000000000000000000000000000000000 1000000000000000000 >>> as_input(T3) >>> main() 1 2 1 >>> as_input(T4) >>> main() 0 1 2 >>> as_input(T5) >>> main() 1 10 1000 >>> as_input(T6) >>> main() 999 # end of snippets/main.py | 3.23576 | 3 |
WeBlog/posts/views.py | Harshad347/WeBlog | 0 | 6625492 | from django.shortcuts import render, redirect, get_object_or_404
from .models import Post
from comments.models import Comment
# from accounts.models import Profile
from .forms import PostForm
from comments.forms import CommentForm
from django.contrib import messages
from django.views.generic import FormView, UpdateView, TemplateView, CreateView, ListView, DetailView, DeleteView
from django.contrib.auth.decorators import login_required
from django.urls import reverse_lazy
@login_required
def post_list(request):
posts = Post.objects.all()
# profile = Profile.objects.get(user=request.user)
context = {
'posts': posts,
# 'profile': profile,
}
return render(request, 'posts/home.html', context)
@login_required
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
comments = Comment.objects.filter(post=post,).order_by('-commented_on')
# profile = Profile.objects.get(user=request.user)
context = {
'post': post,
'comments': comments,
# 'profile': profile,
}
return render(request, 'posts/post_detail.html', context)
@login_required
def post_create(request):
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.save()
return redirect('post-detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'posts/post_form.html', {'form': form})
@login_required
def post_update(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == 'POST':
form = PostForm(request.POST, request.FILES, instance=post)
if form.is_valid():
post.save()
return redirect('post-detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'posts/post_update.html', {'form': form})
@login_required
def post_delete(request, pk):
post = get_object_or_404(Post, pk=pk)
post.delete()
return redirect('/')
| from django.shortcuts import render, redirect, get_object_or_404
from .models import Post
from comments.models import Comment
# from accounts.models import Profile
from .forms import PostForm
from comments.forms import CommentForm
from django.contrib import messages
from django.views.generic import FormView, UpdateView, TemplateView, CreateView, ListView, DetailView, DeleteView
from django.contrib.auth.decorators import login_required
from django.urls import reverse_lazy
@login_required
def post_list(request):
posts = Post.objects.all()
# profile = Profile.objects.get(user=request.user)
context = {
'posts': posts,
# 'profile': profile,
}
return render(request, 'posts/home.html', context)
@login_required
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
comments = Comment.objects.filter(post=post,).order_by('-commented_on')
# profile = Profile.objects.get(user=request.user)
context = {
'post': post,
'comments': comments,
# 'profile': profile,
}
return render(request, 'posts/post_detail.html', context)
@login_required
def post_create(request):
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.save()
return redirect('post-detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'posts/post_form.html', {'form': form})
@login_required
def post_update(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == 'POST':
form = PostForm(request.POST, request.FILES, instance=post)
if form.is_valid():
post.save()
return redirect('post-detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'posts/post_update.html', {'form': form})
@login_required
def post_delete(request, pk):
post = get_object_or_404(Post, pk=pk)
post.delete()
return redirect('/')
| en | 0.404713 | # from accounts.models import Profile # profile = Profile.objects.get(user=request.user) # 'profile': profile, # profile = Profile.objects.get(user=request.user) # 'profile': profile, | 2.150557 | 2 |
dreamhostapi/module.py | mcgid/python-dreamhostapi | 8 | 6625493 | <gh_stars>1-10
from dreamhostapi.exceptions import APIError
class Module(object):
def __init__(self, name, call_function):
self._name = name
self._no_such_commands = []
self._call = call_function
def __getattr__(self, method_name):
if method_name.startswith('__'):
raise AttributeError("'{}' object has no attribute '{}'".format(self.__class__.__name__, method_name))
if method_name in self._no_such_commands:
raise AttributeError("API module '{}' has no command '{}'".format(self._name, method_name))
def method(*args, **params):
if args:
raise TypeError('Parameters must be specified as keyword arguments')
response = self._call(self._name + '-' + method_name, params)
if response['result'] != 'success':
if response['data'] == 'no_such_cmd':
self._no_such_commands.append(method_name)
delattr(self, method_name)
raise AttributeError("API module '{}' has no command '{}'".format(self._name, method_name))
else:
raise APIError(response['data'])
return response['data']
setattr(self, method_name, method)
return method
| from dreamhostapi.exceptions import APIError
class Module(object):
def __init__(self, name, call_function):
self._name = name
self._no_such_commands = []
self._call = call_function
def __getattr__(self, method_name):
if method_name.startswith('__'):
raise AttributeError("'{}' object has no attribute '{}'".format(self.__class__.__name__, method_name))
if method_name in self._no_such_commands:
raise AttributeError("API module '{}' has no command '{}'".format(self._name, method_name))
def method(*args, **params):
if args:
raise TypeError('Parameters must be specified as keyword arguments')
response = self._call(self._name + '-' + method_name, params)
if response['result'] != 'success':
if response['data'] == 'no_such_cmd':
self._no_such_commands.append(method_name)
delattr(self, method_name)
raise AttributeError("API module '{}' has no command '{}'".format(self._name, method_name))
else:
raise APIError(response['data'])
return response['data']
setattr(self, method_name, method)
return method | none | 1 | 2.56306 | 3 | |
pink/constants.py | Fogapod/pink | 0 | 6625494 | import os
from dotenv import load_dotenv
load_dotenv()
PREFIX = os.environ["BOT_PREFIX"]
| import os
from dotenv import load_dotenv
load_dotenv()
PREFIX = os.environ["BOT_PREFIX"]
| none | 1 | 1.518213 | 2 | |
frozen_dir.py | oneincloud/xyft_strategy2_py3.5.3 | 0 | 6625495 | <reponame>oneincloud/xyft_strategy2_py3.5.3<gh_stars>0
import sys
import os
def app_path():
'''
Return the base application path.
:return:
'''
if hasattr(sys,'frozen'):
# Handle PyInstaller
return os.path.dirname(sys.executable)
return os.path.dirname(__file__)
#生成资源文件目录访问路径
def resource_path():
if getattr(sys, 'frozen', False): #是否Bundle Resource
base_path = sys._MEIPASS
else:
base_path = os.path.abspath(".")
return base_path
# return os.path.join(base_path, relative_path) | import sys
import os
def app_path():
'''
Return the base application path.
:return:
'''
if hasattr(sys,'frozen'):
# Handle PyInstaller
return os.path.dirname(sys.executable)
return os.path.dirname(__file__)
#生成资源文件目录访问路径
def resource_path():
if getattr(sys, 'frozen', False): #是否Bundle Resource
base_path = sys._MEIPASS
else:
base_path = os.path.abspath(".")
return base_path
# return os.path.join(base_path, relative_path) | en | 0.288853 | Return the base application path. :return: # Handle PyInstaller #生成资源文件目录访问路径 #是否Bundle Resource # return os.path.join(base_path, relative_path) | 2.483233 | 2 |
code/import_classes_example.py | gatoravi/python_chennai_jul2016 | 0 | 6625496 | from classes import *
def main():
s1 = Shape("red")
t1 = Triangle("blue")
t1.color_function()
t2 = Triangle("green")
t2.color_function()
t2.myshape()
main()
| from classes import *
def main():
s1 = Shape("red")
t1 = Triangle("blue")
t1.color_function()
t2 = Triangle("green")
t2.color_function()
t2.myshape()
main()
| none | 1 | 2.823967 | 3 | |
12_Nguyen_Lam_Manh_Tuyen/1.6.py | lpython2006e/exercies | 0 | 6625497 | #Write a guessing game where the user has to guess a secret number.
# After every guess the program tells the user whether their number was too large or too small.
# At the end the number of tries needed should be printed.
# It counts only as one try if they input the same number multiple times consecutively.
import random
lov=[]
secretnum=random.randrange(1,100)
print(secretnum)
guess=()
print("Please input your guess")
while guess!=secretnum:
guess = input()
while guess.isdigit() == False:
print("Your input is not a valid number, please try again")
guess = input()
if int(guess)<secretnum:
print("Your input number is lower than the secret number, try higher")
print("Please input your guess again")
lov.append(guess)
if int(guess)>secretnum:
print("Your input number is higher than the secret number, try lower")
print("Please input your guess again")
lov.append(guess)
if int(guess)==secretnum:
#count times user have tried to input
lov=list(set(lov))
count=len(lov)+1
print("Bingo, You've guessed it correcly in {} times".format(count))
| #Write a guessing game where the user has to guess a secret number.
# After every guess the program tells the user whether their number was too large or too small.
# At the end the number of tries needed should be printed.
# It counts only as one try if they input the same number multiple times consecutively.
import random
lov=[]
secretnum=random.randrange(1,100)
print(secretnum)
guess=()
print("Please input your guess")
while guess!=secretnum:
guess = input()
while guess.isdigit() == False:
print("Your input is not a valid number, please try again")
guess = input()
if int(guess)<secretnum:
print("Your input number is lower than the secret number, try higher")
print("Please input your guess again")
lov.append(guess)
if int(guess)>secretnum:
print("Your input number is higher than the secret number, try lower")
print("Please input your guess again")
lov.append(guess)
if int(guess)==secretnum:
#count times user have tried to input
lov=list(set(lov))
count=len(lov)+1
print("Bingo, You've guessed it correcly in {} times".format(count))
| en | 0.96838 | #Write a guessing game where the user has to guess a secret number. # After every guess the program tells the user whether their number was too large or too small. # At the end the number of tries needed should be printed. # It counts only as one try if they input the same number multiple times consecutively. #count times user have tried to input | 4.108098 | 4 |
certbot/tests/helpful_test.py | vivithemage/certbot | 16,789 | 6625498 | <reponame>vivithemage/certbot<gh_stars>1000+
"""Tests for certbot.helpful_parser"""
import unittest
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock
from certbot import errors
from certbot._internal.cli import HelpfulArgumentParser
from certbot._internal.cli import _DomainsAction
from certbot._internal import constants
class TestScanningFlags(unittest.TestCase):
'''Test the prescan_for_flag method of HelpfulArgumentParser'''
def test_prescan_no_help_flag(self):
arg_parser = HelpfulArgumentParser(['run'], {})
detected_flag = arg_parser.prescan_for_flag('--help',
['all', 'certonly'])
self.assertIs(detected_flag, False)
detected_flag = arg_parser.prescan_for_flag('-h',
['all, certonly'])
self.assertIs(detected_flag, False)
def test_prescan_unvalid_topic(self):
arg_parser = HelpfulArgumentParser(['--help', 'all'], {})
detected_flag = arg_parser.prescan_for_flag('--help',
['potato'])
self.assertIs(detected_flag, True)
detected_flag = arg_parser.prescan_for_flag('-h',
arg_parser.help_topics)
self.assertIs(detected_flag, False)
def test_prescan_valid_topic(self):
arg_parser = HelpfulArgumentParser(['-h', 'all'], {})
detected_flag = arg_parser.prescan_for_flag('-h',
arg_parser.help_topics)
self.assertEqual(detected_flag, 'all')
detected_flag = arg_parser.prescan_for_flag('--help',
arg_parser.help_topics)
self.assertIs(detected_flag, False)
class TestDetermineVerbs(unittest.TestCase):
'''Tests for determine_verb methods of HelpfulArgumentParser'''
def test_determine_verb_wrong_verb(self):
arg_parser = HelpfulArgumentParser(['potato'], {})
self.assertEqual(arg_parser.verb, "run")
self.assertEqual(arg_parser.args, ["potato"])
def test_determine_verb_help(self):
arg_parser = HelpfulArgumentParser(['--help', 'everything'], {})
self.assertEqual(arg_parser.verb, "help")
self.assertEqual(arg_parser.args, ["--help", "everything"])
arg_parser = HelpfulArgumentParser(['-d', 'some_domain', '--help',
'all'], {})
self.assertEqual(arg_parser.verb, "help")
self.assertEqual(arg_parser.args, ['-d', 'some_domain', '--help',
'all'])
def test_determine_verb(self):
arg_parser = HelpfulArgumentParser(['certonly'], {})
self.assertEqual(arg_parser.verb, 'certonly')
self.assertEqual(arg_parser.args, [])
arg_parser = HelpfulArgumentParser(['auth'], {})
self.assertEqual(arg_parser.verb, 'certonly')
self.assertEqual(arg_parser.args, [])
arg_parser = HelpfulArgumentParser(['everything'], {})
self.assertEqual(arg_parser.verb, 'run')
self.assertEqual(arg_parser.args, [])
class TestAdd(unittest.TestCase):
'''Tests for add method in HelpfulArgumentParser'''
def test_add_trivial_argument(self):
arg_parser = HelpfulArgumentParser(['run'], {})
arg_parser.add(None, "--hello-world")
parsed_args = arg_parser.parser.parse_args(['--hello-world',
'Hello World!'])
self.assertIs(parsed_args.hello_world, 'Hello World!')
self.assertFalse(hasattr(parsed_args, 'potato'))
def test_add_expected_argument(self):
arg_parser = HelpfulArgumentParser(['--help', 'run'], {})
arg_parser.add(
[None, "run", "certonly", "register"],
"--eab-kid", dest="eab_kid", action="store",
metavar="EAB_KID",
help="Key Identifier for External Account Binding")
parsed_args = arg_parser.parser.parse_args(["--eab-kid", None])
self.assertIsNone(parsed_args.eab_kid)
self.assertTrue(hasattr(parsed_args, 'eab_kid'))
class TestAddGroup(unittest.TestCase):
'''Test add_group method of HelpfulArgumentParser'''
def test_add_group_no_input(self):
arg_parser = HelpfulArgumentParser(['run'], {})
self.assertRaises(TypeError, arg_parser.add_group)
def test_add_group_topic_not_visible(self):
# The user request help on run. A topic that given somewhere in the
# args won't be added to the groups in the parser.
arg_parser = HelpfulArgumentParser(['--help', 'run'], {})
arg_parser.add_group("auth",
description="description of auth")
self.assertEqual(arg_parser.groups, {})
def test_add_group_topic_requested_help(self):
arg_parser = HelpfulArgumentParser(['--help', 'run'], {})
arg_parser.add_group("run",
description="description of run")
self.assertTrue(arg_parser.groups["run"])
arg_parser.add_group("certonly", description="description of certonly")
with self.assertRaises(KeyError):
self.assertIs(arg_parser.groups["certonly"], False)
class TestParseArgsErrors(unittest.TestCase):
'''Tests for errors that should be met for some cases in parse_args method
in HelpfulArgumentParser'''
def test_parse_args_renew_force_interactive(self):
arg_parser = HelpfulArgumentParser(['renew', '--force-interactive'],
{})
arg_parser.add(
None, constants.FORCE_INTERACTIVE_FLAG, action="store_true")
with self.assertRaises(errors.Error):
arg_parser.parse_args()
def test_parse_args_non_interactive_and_force_interactive(self):
arg_parser = HelpfulArgumentParser(['--force-interactive',
'--non-interactive'], {})
arg_parser.add(
None, constants.FORCE_INTERACTIVE_FLAG, action="store_true")
arg_parser.add(
None, "--non-interactive", dest="noninteractive_mode",
action="store_true"
)
with self.assertRaises(errors.Error):
arg_parser.parse_args()
def test_parse_args_subset_names_wildcard_domain(self):
arg_parser = HelpfulArgumentParser(['--domain',
'*.example.com,potato.example.com',
'--allow-subset-of-names'], {})
# The following arguments are added because they have to be defined
# in order for arg_parser to run completely. They are not used for the
# test.
arg_parser.add(
None, constants.FORCE_INTERACTIVE_FLAG, action="store_true")
arg_parser.add(
None, "--non-interactive", dest="noninteractive_mode",
action="store_true")
arg_parser.add(
None, "--staging"
)
arg_parser.add(None, "--dry-run")
arg_parser.add(None, "--csr")
arg_parser.add(None, "--must-staple")
arg_parser.add(None, "--validate-hooks")
arg_parser.add(None, "-d", "--domain", dest="domains",
metavar="DOMAIN", action=_DomainsAction)
arg_parser.add(None, "--allow-subset-of-names")
# with self.assertRaises(errors.Error):
# arg_parser.parse_args()
def test_parse_args_hosts_and_auto_hosts(self):
arg_parser = HelpfulArgumentParser(['--hsts', '--auto-hsts'], {})
arg_parser.add(
None, "--hsts", action="store_true", dest="hsts")
arg_parser.add(
None, "--auto-hsts", action="store_true", dest="auto_hsts")
# The following arguments are added because they have to be defined
# in order for arg_parser to run completely. They are not used for the
# test.
arg_parser.add(
None, constants.FORCE_INTERACTIVE_FLAG, action="store_true")
arg_parser.add(
None, "--non-interactive", dest="noninteractive_mode",
action="store_true")
arg_parser.add(None, "--staging")
arg_parser.add(None, "--dry-run")
arg_parser.add(None, "--csr")
arg_parser.add(None, "--must-staple")
arg_parser.add(None, "--validate-hooks")
arg_parser.add(None, "--allow-subset-of-names")
with self.assertRaises(errors.Error):
arg_parser.parse_args()
class TestAddDeprecatedArgument(unittest.TestCase):
"""Tests for add_deprecated_argument method of HelpfulArgumentParser"""
@mock.patch.object(HelpfulArgumentParser, "modify_kwargs_for_default_detection")
def test_no_default_detection_modifications(self, mock_modify):
arg_parser = HelpfulArgumentParser(["run"], {}, detect_defaults=True)
arg_parser.add_deprecated_argument("--foo", 0)
arg_parser.parse_args()
mock_modify.assert_not_called()
if __name__ == '__main__':
unittest.main() # pragma: no cover
| """Tests for certbot.helpful_parser"""
import unittest
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock
from certbot import errors
from certbot._internal.cli import HelpfulArgumentParser
from certbot._internal.cli import _DomainsAction
from certbot._internal import constants
class TestScanningFlags(unittest.TestCase):
'''Test the prescan_for_flag method of HelpfulArgumentParser'''
def test_prescan_no_help_flag(self):
arg_parser = HelpfulArgumentParser(['run'], {})
detected_flag = arg_parser.prescan_for_flag('--help',
['all', 'certonly'])
self.assertIs(detected_flag, False)
detected_flag = arg_parser.prescan_for_flag('-h',
['all, certonly'])
self.assertIs(detected_flag, False)
def test_prescan_unvalid_topic(self):
arg_parser = HelpfulArgumentParser(['--help', 'all'], {})
detected_flag = arg_parser.prescan_for_flag('--help',
['potato'])
self.assertIs(detected_flag, True)
detected_flag = arg_parser.prescan_for_flag('-h',
arg_parser.help_topics)
self.assertIs(detected_flag, False)
def test_prescan_valid_topic(self):
arg_parser = HelpfulArgumentParser(['-h', 'all'], {})
detected_flag = arg_parser.prescan_for_flag('-h',
arg_parser.help_topics)
self.assertEqual(detected_flag, 'all')
detected_flag = arg_parser.prescan_for_flag('--help',
arg_parser.help_topics)
self.assertIs(detected_flag, False)
class TestDetermineVerbs(unittest.TestCase):
'''Tests for determine_verb methods of HelpfulArgumentParser'''
def test_determine_verb_wrong_verb(self):
arg_parser = HelpfulArgumentParser(['potato'], {})
self.assertEqual(arg_parser.verb, "run")
self.assertEqual(arg_parser.args, ["potato"])
def test_determine_verb_help(self):
arg_parser = HelpfulArgumentParser(['--help', 'everything'], {})
self.assertEqual(arg_parser.verb, "help")
self.assertEqual(arg_parser.args, ["--help", "everything"])
arg_parser = HelpfulArgumentParser(['-d', 'some_domain', '--help',
'all'], {})
self.assertEqual(arg_parser.verb, "help")
self.assertEqual(arg_parser.args, ['-d', 'some_domain', '--help',
'all'])
def test_determine_verb(self):
arg_parser = HelpfulArgumentParser(['certonly'], {})
self.assertEqual(arg_parser.verb, 'certonly')
self.assertEqual(arg_parser.args, [])
arg_parser = HelpfulArgumentParser(['auth'], {})
self.assertEqual(arg_parser.verb, 'certonly')
self.assertEqual(arg_parser.args, [])
arg_parser = HelpfulArgumentParser(['everything'], {})
self.assertEqual(arg_parser.verb, 'run')
self.assertEqual(arg_parser.args, [])
class TestAdd(unittest.TestCase):
'''Tests for add method in HelpfulArgumentParser'''
def test_add_trivial_argument(self):
arg_parser = HelpfulArgumentParser(['run'], {})
arg_parser.add(None, "--hello-world")
parsed_args = arg_parser.parser.parse_args(['--hello-world',
'Hello World!'])
self.assertIs(parsed_args.hello_world, 'Hello World!')
self.assertFalse(hasattr(parsed_args, 'potato'))
def test_add_expected_argument(self):
arg_parser = HelpfulArgumentParser(['--help', 'run'], {})
arg_parser.add(
[None, "run", "certonly", "register"],
"--eab-kid", dest="eab_kid", action="store",
metavar="EAB_KID",
help="Key Identifier for External Account Binding")
parsed_args = arg_parser.parser.parse_args(["--eab-kid", None])
self.assertIsNone(parsed_args.eab_kid)
self.assertTrue(hasattr(parsed_args, 'eab_kid'))
class TestAddGroup(unittest.TestCase):
'''Test add_group method of HelpfulArgumentParser'''
def test_add_group_no_input(self):
arg_parser = HelpfulArgumentParser(['run'], {})
self.assertRaises(TypeError, arg_parser.add_group)
def test_add_group_topic_not_visible(self):
# The user request help on run. A topic that given somewhere in the
# args won't be added to the groups in the parser.
arg_parser = HelpfulArgumentParser(['--help', 'run'], {})
arg_parser.add_group("auth",
description="description of auth")
self.assertEqual(arg_parser.groups, {})
def test_add_group_topic_requested_help(self):
arg_parser = HelpfulArgumentParser(['--help', 'run'], {})
arg_parser.add_group("run",
description="description of run")
self.assertTrue(arg_parser.groups["run"])
arg_parser.add_group("certonly", description="description of certonly")
with self.assertRaises(KeyError):
self.assertIs(arg_parser.groups["certonly"], False)
class TestParseArgsErrors(unittest.TestCase):
'''Tests for errors that should be met for some cases in parse_args method
in HelpfulArgumentParser'''
def test_parse_args_renew_force_interactive(self):
arg_parser = HelpfulArgumentParser(['renew', '--force-interactive'],
{})
arg_parser.add(
None, constants.FORCE_INTERACTIVE_FLAG, action="store_true")
with self.assertRaises(errors.Error):
arg_parser.parse_args()
def test_parse_args_non_interactive_and_force_interactive(self):
arg_parser = HelpfulArgumentParser(['--force-interactive',
'--non-interactive'], {})
arg_parser.add(
None, constants.FORCE_INTERACTIVE_FLAG, action="store_true")
arg_parser.add(
None, "--non-interactive", dest="noninteractive_mode",
action="store_true"
)
with self.assertRaises(errors.Error):
arg_parser.parse_args()
def test_parse_args_subset_names_wildcard_domain(self):
arg_parser = HelpfulArgumentParser(['--domain',
'*.example.com,potato.example.com',
'--allow-subset-of-names'], {})
# The following arguments are added because they have to be defined
# in order for arg_parser to run completely. They are not used for the
# test.
arg_parser.add(
None, constants.FORCE_INTERACTIVE_FLAG, action="store_true")
arg_parser.add(
None, "--non-interactive", dest="noninteractive_mode",
action="store_true")
arg_parser.add(
None, "--staging"
)
arg_parser.add(None, "--dry-run")
arg_parser.add(None, "--csr")
arg_parser.add(None, "--must-staple")
arg_parser.add(None, "--validate-hooks")
arg_parser.add(None, "-d", "--domain", dest="domains",
metavar="DOMAIN", action=_DomainsAction)
arg_parser.add(None, "--allow-subset-of-names")
# with self.assertRaises(errors.Error):
# arg_parser.parse_args()
def test_parse_args_hosts_and_auto_hosts(self):
arg_parser = HelpfulArgumentParser(['--hsts', '--auto-hsts'], {})
arg_parser.add(
None, "--hsts", action="store_true", dest="hsts")
arg_parser.add(
None, "--auto-hsts", action="store_true", dest="auto_hsts")
# The following arguments are added because they have to be defined
# in order for arg_parser to run completely. They are not used for the
# test.
arg_parser.add(
None, constants.FORCE_INTERACTIVE_FLAG, action="store_true")
arg_parser.add(
None, "--non-interactive", dest="noninteractive_mode",
action="store_true")
arg_parser.add(None, "--staging")
arg_parser.add(None, "--dry-run")
arg_parser.add(None, "--csr")
arg_parser.add(None, "--must-staple")
arg_parser.add(None, "--validate-hooks")
arg_parser.add(None, "--allow-subset-of-names")
with self.assertRaises(errors.Error):
arg_parser.parse_args()
class TestAddDeprecatedArgument(unittest.TestCase):
"""Tests for add_deprecated_argument method of HelpfulArgumentParser"""
@mock.patch.object(HelpfulArgumentParser, "modify_kwargs_for_default_detection")
def test_no_default_detection_modifications(self, mock_modify):
arg_parser = HelpfulArgumentParser(["run"], {}, detect_defaults=True)
arg_parser.add_deprecated_argument("--foo", 0)
arg_parser.parse_args()
mock_modify.assert_not_called()
if __name__ == '__main__':
unittest.main() # pragma: no cover | en | 0.799785 | Tests for certbot.helpful_parser # pragma: no cover Test the prescan_for_flag method of HelpfulArgumentParser Tests for determine_verb methods of HelpfulArgumentParser Tests for add method in HelpfulArgumentParser Test add_group method of HelpfulArgumentParser # The user request help on run. A topic that given somewhere in the # args won't be added to the groups in the parser. Tests for errors that should be met for some cases in parse_args method in HelpfulArgumentParser # The following arguments are added because they have to be defined # in order for arg_parser to run completely. They are not used for the # test. # with self.assertRaises(errors.Error): # arg_parser.parse_args() # The following arguments are added because they have to be defined # in order for arg_parser to run completely. They are not used for the # test. Tests for add_deprecated_argument method of HelpfulArgumentParser # pragma: no cover | 2.730441 | 3 |
abeja/training/api/client.py | abeja-inc/abeja-platform-sdk | 2 | 6625499 | import json
import tempfile
import zipfile
from io import BytesIO
from pathlib import Path
from typing import AnyStr, IO, Optional, List, Dict, Any
from abeja.exceptions import BadRequest
from abeja.common.api_client import BaseAPIClient
from abeja.common.file_helpers import convert_to_zipfile_object
from abeja.common.utils import get_filter_archived_applied_params
from abeja.common.instance_type import InstanceType
class APIClient(BaseAPIClient):
"""A Low-Level client for Training API
.. code-block:: python
from abeja.training import APIClient
api_client = APIClient()
"""
def create_training_job_definition(
self,
organization_id: str,
job_definition_name: str) -> dict:
"""create a training job definition
API reference: POST /organizations/<organization_id>/training/definitions
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test"
response = api_client.create_training_job_definition(organization_id,
job_definition_name)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"job_definition_id": "1443334816413",
"versions": [],
"organization_id": "1200123565071",
"modified_at": "2018-05-17T02:13:35.726812Z",
"created_at": "2018-05-17T02:13:35.726691Z",
"version_count": 0,
"name": "test"
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
data = {'name': job_definition_name}
path = '/organizations/{}/training/definitions/'.format(
organization_id)
return self._connection.api_request(
method='POST', path=path, json=data)
def archive_training_job_definition(
self,
organization_id: str,
job_definition_name: str) -> dict:
"""archive a training job definition
API reference: POST /organizations/<organization_id>/training/definitions/{name}/archive
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test"
response = api_client.archive_training_job_definition(organization_id,
job_definition_name)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/archive'.format(
organization_id, job_definition_name)
return self._connection.api_request(method='POST', path=path, json={})
def unarchive_training_job_definition(
self,
organization_id: str,
job_definition_name: str) -> dict:
"""unarchive a training job definition
API reference: POST /organizations/<organization_id>/training/definitions/{name}/unarchive
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test"
response = api_client.unarchive_training_job_definition(organization_id,
job_definition_name)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/unarchive'.format(
organization_id, job_definition_name)
return self._connection.api_request(method='POST', path=path, json={})
def get_training_job_definitions(self, organization_id: str,
filter_archived: Optional[bool] = None,
offset: Optional[int] = None,
limit: Optional[int] = None) -> dict:
"""get training job definitions
API reference: GET /organizations/<organization_id>/training/definitions
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
response = api_client.get_training_job_definitions(organization_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **filter_archived** (bool): **[optional]** If ``true``, include archived jobs, otherwise exclude archived jobs. (default: ``false``)
- **offset** (int): **[optional]** paging offset.
- **limit** (int): **[optional]** paging limit.
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"entries": [
{
"version_count": 1,
"created_at": "2018-03-08T00:46:50.791787Z",
"organization_id": "1200123565071",
"versions": [
{
"job_definition_version": 1,
"user_parameters": {},
"handler": "train:handler",
"image": "abeja-inc/all-gpu:19.04",
"modified_at": "2018-03-08T00:48:12.207883Z",
"datasets": {
"train": "1376063797251"
},
"created_at": "2018-03-08T00:48:12.132471Z",
"job_definition_id": "1381349997580"
}
],
"name": "test",
"archived": false,
"modified_at": "2018-03-08T00:46:50.791946Z",
"job_definition_id": "1381349997580"
}
]
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
params = {} # type: Dict[str, Any]
if filter_archived is not None:
params = get_filter_archived_applied_params(
params, filter_archived)
if offset is not None:
params['offset'] = offset
if limit is not None:
params['limit'] = limit
path = '/organizations/{}/training/definitions/'.format(
organization_id)
return self._connection.api_request(
method='GET', path=path, params=params if params else None)
def get_training_job_definition(
self,
organization_id: str,
job_definition_name: str,
include_jobs: Optional[bool] = None) -> dict:
"""get a training job definition.
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = 'test'
response = api_client.get_training_job_definition(organization_id, job_definition_name)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **include_jobs** (bool): If ``True``, also returns training jobs in response. By historical reason,
the default value is **True**, but you should specify False because it degrades
API performance if you have a massive amount of jobs in the target training
job definition.
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"modified_at": "2018-05-17T02:13:35.726812Z",
"organization_id": "1200123565071",
"created_at": "2018-05-17T02:13:35.726691Z",
"job_definition_id": "1443334816413",
"name": "test",
"archived": false,
"versions": [],
"version_count": 0
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}'.format(
organization_id, job_definition_name)
# parameters
params = {}
if include_jobs is None:
pass
elif include_jobs:
params['include_jobs'] = 'true'
else:
params['include_jobs'] = 'false'
return self._connection.api_request(
method='GET', path=path, params=(
None if len(params) == 0 else params))
def delete_training_job_definition(
self,
organization_id: str,
job_definition_name: str) -> dict:
"""delete a training job definition.
API reference: DELETE /organizations/<organization_id>/training/definitions/<job_definition_name>
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = 'test'
response = api_client.delete_training_job_definition(organization_id, job_definition_name)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"message": "test deleted"
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}'.format(
organization_id, job_definition_name)
return self._connection.api_request(method='DELETE', path=path)
def create_training_job_definition_version_native_api(
self, organization_id: str, job_definition_name: str,
source_code: IO[AnyStr], parameters: dict) -> dict:
"""create a training job definition version.
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/versions
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
source_code = open("./train.zip", "rb")
handler = "train:handler"
image = "abeja-inc/all-gpu:19.04"
environment = {"key": "value"}
description = "description"
response = api_client.create_training_job_definition_version_native_api(
organization_id, job_definition_name, source_code,
parameters={"handler": handler, "image": image, "environment": environment, "description": description})
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **source_code** (IO): zip or tar.gz archived file-like object to run training job
- **parameters** (dict): parameters excluding source code to run training job
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"job_definition_version": 1,
"user_parameters": {},
"environment": {},
"description": "description",
"datasets": {
"mnist": "1111111111111"
},
"modified_at": "2018-05-17T12:34:46.344076Z",
"job_definition_id": "1443714239154",
"handler": "train:handler",
"created_at": "2018-05-17T12:34:46.296488Z",
"image": "abeja-inc/all-gpu:19.04"
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/versions'.format(
organization_id, job_definition_name)
files = {
'source_code': (
'source_code.zip',
source_code,
'application/zip'),
'parameters': (
'params.json',
BytesIO(
json.dumps(parameters).encode()),
'application/json'),
}
return self._connection.api_request(
method='POST', path=path, files=files)
def create_training_job_definition_version(
self, organization_id: str, job_definition_name: str,
filepaths: List[str], handler: str,
image: Optional[str] = None, environment: Optional[Dict[str, Any]] = None,
description: Optional[str] = None) -> dict:
"""create a training job definition version.
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/versions
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
filepaths = ["./requirements.txt", "./train.py"]
handler = "train:handler"
image = "abeja-inc/all-gpu:19.04"
environment = {"key": "value"}
description = "description"
response = api_client.create_training_job_definition_version(
organization_id, job_definition_name, filepaths, handler,
image=image, environment=environment, description=description)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **filepaths** (list): file list to run training job
- **handler** (str): path to handler (ex. train:handler )
- **image** (Optional[str]): runtime environment
- **environment** (Optional[dict]): user defined parameters set as environment variables
- **description** (Optional[str]): description
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"job_definition_version": 1,
"user_parameters": {},
"environment": {},
"description": "description",
"datasets": {
"mnist": "1111111111111"
},
"modified_at": "2018-05-17T12:34:46.344076Z",
"job_definition_id": "1443714239154",
"handler": "train:handler",
"created_at": "2018-05-17T12:34:46.296488Z",
"image": "abeja-inc/all-gpu:19.04"
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
try:
source_code = tempfile.NamedTemporaryFile(suffix='.zip')
with zipfile.ZipFile(source_code.name, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip:
for filepath in filepaths:
path_obj = Path(filepath)
new_zip.write(filepath, path_obj.name)
source_code.seek(0)
parameters = {'handler': handler} # type: Dict[str, Any]
if image:
parameters['image'] = image
if environment:
parameters['environment'] = environment
if description:
parameters['description'] = description
return self.create_training_job_definition_version_native_api(
organization_id, job_definition_name, source_code, parameters)
finally:
if source_code:
source_code.close()
def get_training_job_definition_versions(
self, organization_id: str, job_definition_name: str,
filter_archived: Optional[bool] = None) -> dict:
"""get training job definition versions.
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/versions
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = 'test_job_definition'
response = api_client.get_training_job_definition_versions(organization_id, job_definition_name)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **filter_archived** (bool): **[optional]** If ``true``, include archived jobs, otherwise exclude archived jobs. (default: ``false``)
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"entries": [
{
"job_definition_version": 1,
"user_parameters": {},
"datasets": {
"mnist": "1111111111111"
},
"modified_at": "2018-05-17T12:34:46.344076Z",
"job_definition_id": "1443714239154",
"handler": "train:handler",
"created_at": "2018-05-17T12:34:46.296488Z",
"image": "abeja-inc/all-gpu:19.04",
"archived": false
}
]
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
params = None if filter_archived is None else get_filter_archived_applied_params(
{}, filter_archived)
path = '/organizations/{}/training/definitions/{}/versions'.format(
organization_id, job_definition_name)
return self._connection.api_request(
method='GET', path=path, params=params)
def get_training_job_definition_version(
self,
organization_id: str,
job_definition_name: str,
version_id: int) -> dict:
"""get a training job definition version
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id>
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
version_id = 1
response = api_client.get_training_job_definition_version(organization_id, job_definition_name, version_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **version_id** (int): training job version
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"job_definition_version": 1,
"user_parameters": {},
"datasets": {
"mnist": "1111111111111"
},
"modified_at": "2018-05-17T12:34:46.344076Z",
"job_definition_id": "1443714239154",
"handler": "train:handler",
"created_at": "2018-05-17T12:34:46.296488Z",
"image": "abeja-inc/all-gpu:19.04",
"archived": false
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/versions/{}'.format(
organization_id, job_definition_name, version_id)
return self._connection.api_request(method='GET', path=path)
def patch_training_job_definition_version(
self,
organization_id: str,
job_definition_name: str,
version_id: int,
description: str) -> dict:
"""Update a training job definition version
API reference: PATCH /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id>
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
version_id = 1
response = api_client.patch_training_job_definition_version(organization_id, job_definition_name, version_id, description='new version')
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **version_id** (int): training job version
- **description** (str): description
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"job_definition_version": 1,
"user_parameters": {},
"datasets": {
"mnist": "1111111111111"
},
"modified_at": "2018-05-17T12:34:46.344076Z",
"job_definition_id": "1443714239154",
"handler": "train:handler",
"created_at": "2018-05-17T12:34:46.296488Z",
"image": "abeja-inc/all-gpu:19.04",
"archived": false
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/versions/{}'.format(
organization_id, job_definition_name, version_id)
params = {'description': description}
return self._connection.api_request(
method='PATCH', path=path, json=params)
def archive_training_job_definition_version(
self,
organization_id: str,
job_definition_name: str,
version_id: int) -> dict:
"""archive a training job definition version
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id>/archive
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
version_id = 1
response = api_client.archive_training_job_definition_version(organization_id, job_definition_name, version_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **version_id** (int): training job version
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"message": "archived"
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/versions/{}/archive'.format(
organization_id, job_definition_name, version_id)
return self._connection.api_request(method='POST', path=path)
def unarchive_training_job_definition_version(
self,
organization_id: str,
job_definition_name: str,
version_id: int) -> dict:
"""unarchive a training job definition version
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id>/unarchive
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
version_id = 1
response = api_client.unarchive_training_job_definition_version(organization_id, job_definition_name, version_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **version_id** (int): training job version
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"message": "unarchived"
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/versions/{}/unarchive'.format(
organization_id, job_definition_name, version_id)
return self._connection.api_request(method='POST', path=path)
def delete_training_job_definition_version(
self,
organization_id: str,
job_definition_name: str,
version_id: int) -> dict:
"""delete a training job definition version
API reference: DELETE /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id>
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
version_id = 1
response = api_client.delete_training_job_definition_version(organization_id, job_definition_name, version_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **version_id** (int): training job version
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"message": "deleted"
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/versions/{}'.format(
organization_id, job_definition_name, version_id)
return self._connection.api_request(method='DELETE', path=path)
def create_training_job(
self,
organization_id: str,
job_definition_name: str,
version_id: int,
user_parameters: Optional[dict] = None,
datasets: Optional[dict] = None,
instance_type: Optional[str] = None,
environment: Optional[dict] = None,
description: Optional[str] = None,
export_log: Optional[bool] = None) -> dict:
"""create a training job
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id>/jobs
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
version_id = 1
user_parameters = {
'BATCH_SIZE': 50
}
datasets = {
"mnist": "1111111111111"
}
response = api_client.create_training_job(
organization_id, job_definition_name, version_id, user_parameters, datasets)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **version_id** (int): training job version
- **user_parameters** (dict): (**deprecated!!**) user defined parameters set as environment variables. use ``environment`` instead.
- **datasets** (dict): **[optional]** datasets, combination of alias and dataset_id
- **instance_type** (str): **[optional]** instance type of running environment
- **environment** (dict): **[optional]** user defined parameters set as environment variables
- **description** (str): **[optional]** description of this job
- **export_log** (bool): **[optional]** If ``true``, include the log in the model.
This feature is only available with 19.04 or later images. (default: ``false``)
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"job_definition_id": "1443714239154",
"user_parameters": {},
"start_time": null,
"created_at": "2018-05-17T12:43:59.322367Z",
"job_definition_version": 1,
"completion_time": null,
"status": "Pending",
"instance_type": "cpu-1",
"modified_at": "2018-05-17T12:43:59.322673Z",
"training_job_id": "1443722127663",
"creator": {
"email": "<EMAIL>",
"is_registered": true,
"created_at": "2017-05-26T01:38:46Z",
"id": "1128347408389",
"display_name": null,
"updated_at": "2018-01-04T03:02:12Z",
"role": "admin"
},
"description": null,
"statistics": null
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
data = {} # type: Dict[str, Any]
if environment is not None:
data['environment'] = environment
elif user_parameters is not None:
data['environment'] = user_parameters
if datasets is not None:
data['datasets'] = datasets
if instance_type is not None:
# validation
try:
InstanceType.parse(instance_type)
data['instance_type'] = instance_type
except ValueError:
error_message = "'{}' is an invalid instance_type".format(
instance_type)
raise BadRequest(
error=error_message,
error_description=error_message,
status_code=400)
if description is not None:
data['description'] = description
if export_log is not None:
data['export_log'] = export_log
path = '/organizations/{}/training/definitions/{}/versions/{}/jobs'.format(
organization_id, job_definition_name, version_id)
return self._connection.api_request(
method='POST', path=path, json=data)
def get_training_jobs(
self, organization_id: str, job_definition_name: str,
limit: Optional[int]=None, offset: Optional[int]=None,
filter_archived: Optional[bool] = None) -> dict:
"""get training jobs
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/jobs
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
response = api_client.get_training_jobs(organization_id, job_definition_name)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **limit** (int): **[optional]** max number of jobs to be returned (default: 10)
- **offset** (int): **[optional]** offset of jobs ( which starts from 0 )
- **filter_archived** (bool): **[optional]** If ``true``, include archived jobs, otherwise exclude archived jobs. (default: ``false``)
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"entries": [
{
"user_parameters": {},
"start_time": null,
"training_job_id": "1443722127663",
"created_at": "2018-05-17T12:43:59.322367Z",
"completion_time": null,
"id": "1443722127663",
"job_definition_version": 1,
"description": null,
"statistics": null,
"job_definition_id": "1443714239154",
"modified_at": "2018-05-17T12:43:59.322673Z",
"status": "Pending",
"archived": false,
"creator": {
"email": "<EMAIL>",
"created_at": "2017-05-26T01:38:46Z",
"id": "1128347408389",
"role": "admin",
"display_name": null,
"updated_at": "2018-01-04T03:02:12Z",
"is_registered": true
}
}
],
"limit": 10,
"offset": 0,
"total": 1
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
params = {} if filter_archived is None else get_filter_archived_applied_params(
{}, filter_archived)
if limit is not None:
params['limit'] = limit
if offset is not None:
params['offset'] = offset
path = '/organizations/{}/training/definitions/{}/jobs'.format(
organization_id, job_definition_name)
return self._connection.api_request(
method='GET', path=path, params=params)
def get_training_job(
self,
organization_id: str,
job_definition_name: str,
training_job_id: str) -> dict:
"""get a training job
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/jobs/<training_job_id>
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
training_job_id = "1443722127663"
response = api_client.get_training_job(organization_id, job_definition_name, training_job_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **training_job_id** (str): TRAINING_JOB_ID
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"job_definition_id": "1443714239154",
"user_parameters": {},
"start_time": null,
"created_at": "2018-05-17T12:43:59.322367Z",
"job_definition_version": 1,
"completion_time": null,
"status": "Pending",
"modified_at": "2018-05-17T12:43:59.322673Z",
"training_job_id": "1443722127663",
"archived": false,
"creator": {
"email": "<EMAIL>",
"is_registered": true,
"created_at": "2017-05-26T01:38:46Z",
"id": "1128347408389",
"display_name": null,
"updated_at": "2018-01-04T03:02:12Z",
"role": "admin"
},
"description": null,
"statistics": null
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/jobs/{}'.format(
organization_id, job_definition_name, training_job_id)
return self._connection.api_request(method='GET', path=path)
def stop_training_job(
self,
organization_id: str,
job_definition_name: str,
training_job_id: str) -> dict:
"""stop a training job
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/jobs/<training_job_id>/stop
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
training_job_id = "1443722127663"
response = api_client.stop_training_job(organization_id, job_definition_name, training_job_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **training_job_id** (str): TRAINING_JOB_ID
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"message": "test_job_definition:1443722127663 stopped"
}
Raises:
- Unauthorized: Authentication failed
- Forbidden:
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/jobs/{}/stop'.format(
organization_id, job_definition_name, training_job_id)
return self._connection.api_request(method='POST', path=path)
def archive_training_job(
self,
organization_id: str,
job_definition_name: str,
training_job_id: str) -> dict:
"""Archive a training job.
API reference: POST /organizations/<organization_id>/training/definitions/{job_definition_name}/jobs/{training_job_id}/archive
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test"
training_job_id = "1234567890123"
response = api_client.archive_training_job(organization_id,
job_definition_name,
training_job_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **training_job_id** (str): TRAINING_JOB_ID
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/jobs/{}/archive'.format(
organization_id, job_definition_name, training_job_id)
return self._connection.api_request(method='POST', path=path, json={})
def unarchive_training_job(
self,
organization_id: str,
job_definition_name: str,
training_job_id: str) -> dict:
"""Archive a training job.
API reference: POST /organizations/<organization_id>/training/definitions/{job_definition_name}/jobs/{training_job_id}/unarchive
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test"
training_job_id = "1234567890123"
response = api_client.unarchive_training_job(organization_id,
job_definition_name,
training_job_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **training_job_id** (str): TRAINING_JOB_ID
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/jobs/{}/unarchive'.format(
organization_id, job_definition_name, training_job_id)
return self._connection.api_request(method='POST', path=path, json={})
def get_training_result(
self,
organization_id: str,
job_definition_name: str,
training_job_id: str) -> dict:
"""get a training job result
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/jobs/<training_job_id>/result
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
training_job_id = "1443722127663"
response = api_client.get_training_result(organization_id, job_definition_name,
training_job_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **training_job_id** (str): TRAINING_JOB_ID
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"artifacts": {
"complete": {
"uri": "dummy_url",
}
}
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/jobs/{}/result'.format(
organization_id, job_definition_name, training_job_id)
return self._connection.api_request(method='GET', path=path)
def update_statistics(
self,
organization_id: str,
job_definition_name: str,
training_job_id: str,
statistics: dict) -> dict:
"""update a training job statistics
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/jobs/<training_job_id>/statistics
Request Syntax:
.. code-block:: python
from abeja.training.statistics import Statistics
statistics = Statistics(progress_percentage=0.5, epoch=1, num_epochs=5, key1='value1')
statistics.add_stage(name=Statistics.STAGE_TRAIN, accuracy=0.9, loss=0.05)
statistics.add_stage(name=Statistics.STAGE_VALIDATION, accuracy=0.8, loss=0.1, key2=2)
response = api_client.update_statistics(statistics.get_statistics())
Params:
- **statistics** (str): statistics needs to be saved and updated
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"statistics": {
"num_epochs": 5,
"epoch": 1,
"progress_percentage": 0.5,
"stages": {
"train": {
"accuracy": 0.9,
"loss": 0.05
},
"validation": {
"accuracy": 0.8,
"loss": 0.1,
"key2": 2
}
},
"key1": "value1"
}
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
data = {
'statistics': statistics
}
path = '/organizations/{}/training/definitions/{}/jobs/{}/statistics'.format(
organization_id, job_definition_name, training_job_id)
return self._connection.api_request(
method='POST', path=path, json=data)
# Training model
def get_training_models(
self, organization_id: str, job_definition_name: str,
filter_archived: Optional[bool] = None) -> dict:
"""Get models entries
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/models
Request syntax:
.. code-block:: python
response = api_client.list_models(organization_id='1102940376065')
Params:
- **organization_id** (str): organization_id
- **job_definition_name** (str): training job definition name
- **filter_archived** (bool): **[optional]** If ``true``, include archived jobs, otherwise exclude archived jobs. (default: ``false``)
Return type:
dict
Returns:
Response syntax:
.. code-block:: json
{
"entries": [
{
"training_model_id": "1111111111111",
"job_definition_id": "1111111111111",
"training_job_id": "1111111111111",
"user_parameters": {},
"description": "this is description of the model",
"archived": false,
"exec_env": "cloud",
"archived": false,
"created_at": "2018-01-01T00:00:00.00000Z",
"modified_at": "2018-01-01T00:00:00.00000Z"
}
]
}
Response Structure:
- **entries** (list)
- (dict)
- **training_model_id** (str) : training model id
- **job_definition_id** (str) : job definition id
- **training_job_id** (str) : training job id
- **user_parameters** (dict): user defined parameters.
- **description** (str) : model description.
- **archived** (bool) : archived or not.
- **exec_env** (enum) : Executed environment. One of [cloud, local, none].
Raises:
- Unauthorized: Authentication failed
- InternalServerError
"""
params = None if filter_archived is None else get_filter_archived_applied_params(
{}, filter_archived)
path = '/organizations/{}/training/definitions/{}/models'.format(
organization_id, job_definition_name)
return self._connection.api_request(
method='GET', path=path, params=params)
def create_training_model(
self, organization_id: str, job_definition_name: str,
model_data: IO, parameters: Optional[dict] = None) -> dict:
"""create a training model.
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/models
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = 'test_job_definition'
model_data = '....'
parameters = {
"description": "description",
"user_parameters": {}
}
response = api_client.create_training_model(
organization_id, job_definition_name, model_data, parameters)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **model_data** (IO): model data
- **parameters** (dict): parameters for creating training model
- **training_job_id** (str): The ID of a corresponding training job.
- **description** (str): Description
- **user_parameters** (dict): user defined parameters.
- **metrics** (dict): user defined metrics.
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"training_model_id": "1111111111111",
"job_definition_id": "1111111111111",
"training_job_id": "1111111111111",
"user_parameters": {},
"description": "this is description of the model",
"archived": false,
"exec_env": "cloud",
"created_at": "2018-01-01T00:00:00.00000Z",
"modified_at": "2018-01-01T00:00:00.00000Z"
}
Raises:
- InvalidDataFormat
- Unauthorized: Authentication failed
- InternalServerError
"""
if model_data is None:
error_message = "model_data is necessary"
raise BadRequest(
error=error_message,
error_description=error_message,
status_code=400)
if parameters is None:
parameters = {}
model_data = convert_to_zipfile_object(model_data)
files = {
'model_data': (
'model_data.zip',
model_data,
'application/zip'),
'parameters': (
'params.json',
BytesIO(
json.dumps(parameters).encode()),
'application/json')}
path = '/organizations/{}/training/definitions/{}/models'.format(
organization_id, job_definition_name)
return self._connection.api_request(
method='POST', path=path, files=files)
def get_training_model(
self,
organization_id: str,
job_definition_name: str,
model_id: str) -> dict:
"""get a training model
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/models/<model_id>
Request Syntax:
.. code-block:: python
response = api_client.get_training_model(
organization_id='1111111111111', job_definition_name='1111111111111', model_id='1111111111111')
Params:
- **organization_id** (str): organization_id
- **job_definition_name** (str): training job definition name
- **model_id** (str): model_id of the requested model
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"training_model_id": "1111111111111",
"job_definition_id": "1111111111111",
"training_job_id": "1111111111111",
"user_parameters": {},
"description": "this is description of the model",
"archived": false,
"exec_env": "cloud",
"archived": false,
"created_at": "2018-01-01T00:00:00.00000Z",
"modified_at": "2018-01-01T00:00:00.00000Z"
}
Response Structure:
- **training_model_id** (str) : training model id
- **job_definition_id** (str) : job definition id
- **training_job_id** (str) : training job id
- **user_parameters** (dict): user defined parameters.
- **description** (str) : model description.
- **archived** (bool) : archived or not.
- **exec_env** (enum) : Executed environment. One of [cloud, local, none].
Raises:
- NotFound: model not found
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/models/{}'.format(
organization_id, job_definition_name, model_id)
return self._connection.api_request(method='GET', path=path)
def patch_training_model(
self,
organization_id: str,
job_definition_name: str,
model_id: str,
description: str) -> dict:
"""patch a training model
API reference: PATCH /organizations/<organization_id>/training/definitions/<job_definition_name>/models/<model_id>
Request Syntax:
.. code-block:: python
response = api_client.patch_training_model(
organization_id='1111111111111', job_definition_name='1111111111111',
model_id='1111111111111', description='new description')
Params:
- **organization_id** (str): organization_id
- **job_definition_name** (str): training job definition name
- **model_id** (str): model_id of the requested model
- **description** (str): description
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"training_model_id": "1111111111111",
"job_definition_id": "1111111111111",
"training_job_id": "1111111111111",
"user_parameters": {},
"description": "this is description of the model",
"archived": false,
"exec_env": "cloud",
"created_at": "2018-01-01T00:00:00.00000Z",
"modified_at": "2018-01-01T00:00:00.00000Z"
}
Response Structure:
- **training_model_id** (str) : training model id
- **job_definition_id** (str) : job definition id
- **training_job_id** (str) : training job id
- **user_parameters** (dict): user defined parameters.
- **description** (str) : model description.
- **archived** (bool) : archived or not.
- **exec_env** (enum) : Executed environment. One of [cloud, local, none].
Raises:
- NotFound: model not found
- Unauthorized: Authentication failed
- InternalServerError
"""
params = {
'description': description
}
path = '/organizations/{}/training/definitions/{}/models/{}'.format(
organization_id, job_definition_name, model_id)
return self._connection.api_request(
method='PATCH', path=path, json=params)
def download_training_model(
self,
organization_id: str,
job_definition_name: str,
model_id: str) -> dict:
"""download a training model
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/models/<model_id>/download
Request Syntax:
.. code-block:: python
response = api_client.download_training_model(
organization_id='1111111111111', job_definition_name='1111111111111', model_id='1111111111111')
Params:
- **organization_id** (str): organization_id
- **job_definition_name** (str): training job definition name
- **model_id** (str): model_id of the requested model
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"download_uri": "https://..."
}
Response Structure:
- **download_uri** (str) : presigned download link of the training model
Raises:
- NotFound: model not found
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/models/{}/download'.format(
organization_id, job_definition_name, model_id)
return self._connection.api_request(method='GET', path=path)
def archive_training_model(
self,
organization_id: str,
job_definition_name: str,
model_id: str) -> dict:
"""archive a training model
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/models/<model_id>/archive
Request Syntax:
.. code-block:: python
response = api_client.archive_training_model(
organization_id='1111111111111', job_definition_name='1111111111111', model_id='1111111111111')
Params:
- **organization_id** (str): organization_id
- **job_definition_name** (str): training job definition name
- **model_id** (str): model_id of the requested model
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"message": "{job_definition_name}:{model_id} archived"
}
Response Structure:
- **message** (str) : message
Raises:
- NotFound: model not found
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/models/{}/archive'.format(
organization_id, job_definition_name, model_id)
return self._connection.api_request(method='POST', path=path)
def unarchive_training_model(
self,
organization_id: str,
job_definition_name: str,
model_id: str) -> dict:
"""unarchive a training model
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/models/<model_id>/unarchive
Request Syntax:
.. code-block:: python
response = api_client.unarchive_training_model(
organization_id='1111111111111', job_definition_name='1111111111111', model_id='1111111111111')
Params:
- **organization_id** (str): organization_id
- **job_definition_name** (str): training job definition name
- **model_id** (str): model_id of the requested model
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"message": "{job_definition_name}:{model_id} unarchived"
}
Response Structure:
- **message** (str) : message
Raises:
- NotFound: model not found
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/models/{}/unarchive'.format(
organization_id, job_definition_name, model_id)
return self._connection.api_request(method='POST', path=path)
| import json
import tempfile
import zipfile
from io import BytesIO
from pathlib import Path
from typing import AnyStr, IO, Optional, List, Dict, Any
from abeja.exceptions import BadRequest
from abeja.common.api_client import BaseAPIClient
from abeja.common.file_helpers import convert_to_zipfile_object
from abeja.common.utils import get_filter_archived_applied_params
from abeja.common.instance_type import InstanceType
class APIClient(BaseAPIClient):
"""A Low-Level client for Training API
.. code-block:: python
from abeja.training import APIClient
api_client = APIClient()
"""
def create_training_job_definition(
self,
organization_id: str,
job_definition_name: str) -> dict:
"""create a training job definition
API reference: POST /organizations/<organization_id>/training/definitions
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test"
response = api_client.create_training_job_definition(organization_id,
job_definition_name)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"job_definition_id": "1443334816413",
"versions": [],
"organization_id": "1200123565071",
"modified_at": "2018-05-17T02:13:35.726812Z",
"created_at": "2018-05-17T02:13:35.726691Z",
"version_count": 0,
"name": "test"
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
data = {'name': job_definition_name}
path = '/organizations/{}/training/definitions/'.format(
organization_id)
return self._connection.api_request(
method='POST', path=path, json=data)
def archive_training_job_definition(
self,
organization_id: str,
job_definition_name: str) -> dict:
"""archive a training job definition
API reference: POST /organizations/<organization_id>/training/definitions/{name}/archive
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test"
response = api_client.archive_training_job_definition(organization_id,
job_definition_name)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/archive'.format(
organization_id, job_definition_name)
return self._connection.api_request(method='POST', path=path, json={})
def unarchive_training_job_definition(
self,
organization_id: str,
job_definition_name: str) -> dict:
"""unarchive a training job definition
API reference: POST /organizations/<organization_id>/training/definitions/{name}/unarchive
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test"
response = api_client.unarchive_training_job_definition(organization_id,
job_definition_name)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/unarchive'.format(
organization_id, job_definition_name)
return self._connection.api_request(method='POST', path=path, json={})
def get_training_job_definitions(self, organization_id: str,
filter_archived: Optional[bool] = None,
offset: Optional[int] = None,
limit: Optional[int] = None) -> dict:
"""get training job definitions
API reference: GET /organizations/<organization_id>/training/definitions
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
response = api_client.get_training_job_definitions(organization_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **filter_archived** (bool): **[optional]** If ``true``, include archived jobs, otherwise exclude archived jobs. (default: ``false``)
- **offset** (int): **[optional]** paging offset.
- **limit** (int): **[optional]** paging limit.
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"entries": [
{
"version_count": 1,
"created_at": "2018-03-08T00:46:50.791787Z",
"organization_id": "1200123565071",
"versions": [
{
"job_definition_version": 1,
"user_parameters": {},
"handler": "train:handler",
"image": "abeja-inc/all-gpu:19.04",
"modified_at": "2018-03-08T00:48:12.207883Z",
"datasets": {
"train": "1376063797251"
},
"created_at": "2018-03-08T00:48:12.132471Z",
"job_definition_id": "1381349997580"
}
],
"name": "test",
"archived": false,
"modified_at": "2018-03-08T00:46:50.791946Z",
"job_definition_id": "1381349997580"
}
]
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
params = {} # type: Dict[str, Any]
if filter_archived is not None:
params = get_filter_archived_applied_params(
params, filter_archived)
if offset is not None:
params['offset'] = offset
if limit is not None:
params['limit'] = limit
path = '/organizations/{}/training/definitions/'.format(
organization_id)
return self._connection.api_request(
method='GET', path=path, params=params if params else None)
def get_training_job_definition(
self,
organization_id: str,
job_definition_name: str,
include_jobs: Optional[bool] = None) -> dict:
"""get a training job definition.
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = 'test'
response = api_client.get_training_job_definition(organization_id, job_definition_name)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **include_jobs** (bool): If ``True``, also returns training jobs in response. By historical reason,
the default value is **True**, but you should specify False because it degrades
API performance if you have a massive amount of jobs in the target training
job definition.
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"modified_at": "2018-05-17T02:13:35.726812Z",
"organization_id": "1200123565071",
"created_at": "2018-05-17T02:13:35.726691Z",
"job_definition_id": "1443334816413",
"name": "test",
"archived": false,
"versions": [],
"version_count": 0
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}'.format(
organization_id, job_definition_name)
# parameters
params = {}
if include_jobs is None:
pass
elif include_jobs:
params['include_jobs'] = 'true'
else:
params['include_jobs'] = 'false'
return self._connection.api_request(
method='GET', path=path, params=(
None if len(params) == 0 else params))
def delete_training_job_definition(
self,
organization_id: str,
job_definition_name: str) -> dict:
"""delete a training job definition.
API reference: DELETE /organizations/<organization_id>/training/definitions/<job_definition_name>
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = 'test'
response = api_client.delete_training_job_definition(organization_id, job_definition_name)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"message": "test deleted"
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}'.format(
organization_id, job_definition_name)
return self._connection.api_request(method='DELETE', path=path)
def create_training_job_definition_version_native_api(
self, organization_id: str, job_definition_name: str,
source_code: IO[AnyStr], parameters: dict) -> dict:
"""create a training job definition version.
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/versions
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
source_code = open("./train.zip", "rb")
handler = "train:handler"
image = "abeja-inc/all-gpu:19.04"
environment = {"key": "value"}
description = "description"
response = api_client.create_training_job_definition_version_native_api(
organization_id, job_definition_name, source_code,
parameters={"handler": handler, "image": image, "environment": environment, "description": description})
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **source_code** (IO): zip or tar.gz archived file-like object to run training job
- **parameters** (dict): parameters excluding source code to run training job
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"job_definition_version": 1,
"user_parameters": {},
"environment": {},
"description": "description",
"datasets": {
"mnist": "1111111111111"
},
"modified_at": "2018-05-17T12:34:46.344076Z",
"job_definition_id": "1443714239154",
"handler": "train:handler",
"created_at": "2018-05-17T12:34:46.296488Z",
"image": "abeja-inc/all-gpu:19.04"
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/versions'.format(
organization_id, job_definition_name)
files = {
'source_code': (
'source_code.zip',
source_code,
'application/zip'),
'parameters': (
'params.json',
BytesIO(
json.dumps(parameters).encode()),
'application/json'),
}
return self._connection.api_request(
method='POST', path=path, files=files)
def create_training_job_definition_version(
self, organization_id: str, job_definition_name: str,
filepaths: List[str], handler: str,
image: Optional[str] = None, environment: Optional[Dict[str, Any]] = None,
description: Optional[str] = None) -> dict:
"""create a training job definition version.
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/versions
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
filepaths = ["./requirements.txt", "./train.py"]
handler = "train:handler"
image = "abeja-inc/all-gpu:19.04"
environment = {"key": "value"}
description = "description"
response = api_client.create_training_job_definition_version(
organization_id, job_definition_name, filepaths, handler,
image=image, environment=environment, description=description)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **filepaths** (list): file list to run training job
- **handler** (str): path to handler (ex. train:handler )
- **image** (Optional[str]): runtime environment
- **environment** (Optional[dict]): user defined parameters set as environment variables
- **description** (Optional[str]): description
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"job_definition_version": 1,
"user_parameters": {},
"environment": {},
"description": "description",
"datasets": {
"mnist": "1111111111111"
},
"modified_at": "2018-05-17T12:34:46.344076Z",
"job_definition_id": "1443714239154",
"handler": "train:handler",
"created_at": "2018-05-17T12:34:46.296488Z",
"image": "abeja-inc/all-gpu:19.04"
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
try:
source_code = tempfile.NamedTemporaryFile(suffix='.zip')
with zipfile.ZipFile(source_code.name, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip:
for filepath in filepaths:
path_obj = Path(filepath)
new_zip.write(filepath, path_obj.name)
source_code.seek(0)
parameters = {'handler': handler} # type: Dict[str, Any]
if image:
parameters['image'] = image
if environment:
parameters['environment'] = environment
if description:
parameters['description'] = description
return self.create_training_job_definition_version_native_api(
organization_id, job_definition_name, source_code, parameters)
finally:
if source_code:
source_code.close()
def get_training_job_definition_versions(
self, organization_id: str, job_definition_name: str,
filter_archived: Optional[bool] = None) -> dict:
"""get training job definition versions.
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/versions
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = 'test_job_definition'
response = api_client.get_training_job_definition_versions(organization_id, job_definition_name)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **filter_archived** (bool): **[optional]** If ``true``, include archived jobs, otherwise exclude archived jobs. (default: ``false``)
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"entries": [
{
"job_definition_version": 1,
"user_parameters": {},
"datasets": {
"mnist": "1111111111111"
},
"modified_at": "2018-05-17T12:34:46.344076Z",
"job_definition_id": "1443714239154",
"handler": "train:handler",
"created_at": "2018-05-17T12:34:46.296488Z",
"image": "abeja-inc/all-gpu:19.04",
"archived": false
}
]
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
params = None if filter_archived is None else get_filter_archived_applied_params(
{}, filter_archived)
path = '/organizations/{}/training/definitions/{}/versions'.format(
organization_id, job_definition_name)
return self._connection.api_request(
method='GET', path=path, params=params)
def get_training_job_definition_version(
self,
organization_id: str,
job_definition_name: str,
version_id: int) -> dict:
"""get a training job definition version
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id>
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
version_id = 1
response = api_client.get_training_job_definition_version(organization_id, job_definition_name, version_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **version_id** (int): training job version
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"job_definition_version": 1,
"user_parameters": {},
"datasets": {
"mnist": "1111111111111"
},
"modified_at": "2018-05-17T12:34:46.344076Z",
"job_definition_id": "1443714239154",
"handler": "train:handler",
"created_at": "2018-05-17T12:34:46.296488Z",
"image": "abeja-inc/all-gpu:19.04",
"archived": false
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/versions/{}'.format(
organization_id, job_definition_name, version_id)
return self._connection.api_request(method='GET', path=path)
def patch_training_job_definition_version(
self,
organization_id: str,
job_definition_name: str,
version_id: int,
description: str) -> dict:
"""Update a training job definition version
API reference: PATCH /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id>
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
version_id = 1
response = api_client.patch_training_job_definition_version(organization_id, job_definition_name, version_id, description='new version')
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **version_id** (int): training job version
- **description** (str): description
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"job_definition_version": 1,
"user_parameters": {},
"datasets": {
"mnist": "1111111111111"
},
"modified_at": "2018-05-17T12:34:46.344076Z",
"job_definition_id": "1443714239154",
"handler": "train:handler",
"created_at": "2018-05-17T12:34:46.296488Z",
"image": "abeja-inc/all-gpu:19.04",
"archived": false
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/versions/{}'.format(
organization_id, job_definition_name, version_id)
params = {'description': description}
return self._connection.api_request(
method='PATCH', path=path, json=params)
def archive_training_job_definition_version(
self,
organization_id: str,
job_definition_name: str,
version_id: int) -> dict:
"""archive a training job definition version
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id>/archive
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
version_id = 1
response = api_client.archive_training_job_definition_version(organization_id, job_definition_name, version_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **version_id** (int): training job version
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"message": "archived"
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/versions/{}/archive'.format(
organization_id, job_definition_name, version_id)
return self._connection.api_request(method='POST', path=path)
def unarchive_training_job_definition_version(
self,
organization_id: str,
job_definition_name: str,
version_id: int) -> dict:
"""unarchive a training job definition version
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id>/unarchive
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
version_id = 1
response = api_client.unarchive_training_job_definition_version(organization_id, job_definition_name, version_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **version_id** (int): training job version
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"message": "unarchived"
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/versions/{}/unarchive'.format(
organization_id, job_definition_name, version_id)
return self._connection.api_request(method='POST', path=path)
def delete_training_job_definition_version(
self,
organization_id: str,
job_definition_name: str,
version_id: int) -> dict:
"""delete a training job definition version
API reference: DELETE /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id>
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
version_id = 1
response = api_client.delete_training_job_definition_version(organization_id, job_definition_name, version_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **version_id** (int): training job version
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"message": "deleted"
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/versions/{}'.format(
organization_id, job_definition_name, version_id)
return self._connection.api_request(method='DELETE', path=path)
def create_training_job(
self,
organization_id: str,
job_definition_name: str,
version_id: int,
user_parameters: Optional[dict] = None,
datasets: Optional[dict] = None,
instance_type: Optional[str] = None,
environment: Optional[dict] = None,
description: Optional[str] = None,
export_log: Optional[bool] = None) -> dict:
"""create a training job
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id>/jobs
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
version_id = 1
user_parameters = {
'BATCH_SIZE': 50
}
datasets = {
"mnist": "1111111111111"
}
response = api_client.create_training_job(
organization_id, job_definition_name, version_id, user_parameters, datasets)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **version_id** (int): training job version
- **user_parameters** (dict): (**deprecated!!**) user defined parameters set as environment variables. use ``environment`` instead.
- **datasets** (dict): **[optional]** datasets, combination of alias and dataset_id
- **instance_type** (str): **[optional]** instance type of running environment
- **environment** (dict): **[optional]** user defined parameters set as environment variables
- **description** (str): **[optional]** description of this job
- **export_log** (bool): **[optional]** If ``true``, include the log in the model.
This feature is only available with 19.04 or later images. (default: ``false``)
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"job_definition_id": "1443714239154",
"user_parameters": {},
"start_time": null,
"created_at": "2018-05-17T12:43:59.322367Z",
"job_definition_version": 1,
"completion_time": null,
"status": "Pending",
"instance_type": "cpu-1",
"modified_at": "2018-05-17T12:43:59.322673Z",
"training_job_id": "1443722127663",
"creator": {
"email": "<EMAIL>",
"is_registered": true,
"created_at": "2017-05-26T01:38:46Z",
"id": "1128347408389",
"display_name": null,
"updated_at": "2018-01-04T03:02:12Z",
"role": "admin"
},
"description": null,
"statistics": null
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
data = {} # type: Dict[str, Any]
if environment is not None:
data['environment'] = environment
elif user_parameters is not None:
data['environment'] = user_parameters
if datasets is not None:
data['datasets'] = datasets
if instance_type is not None:
# validation
try:
InstanceType.parse(instance_type)
data['instance_type'] = instance_type
except ValueError:
error_message = "'{}' is an invalid instance_type".format(
instance_type)
raise BadRequest(
error=error_message,
error_description=error_message,
status_code=400)
if description is not None:
data['description'] = description
if export_log is not None:
data['export_log'] = export_log
path = '/organizations/{}/training/definitions/{}/versions/{}/jobs'.format(
organization_id, job_definition_name, version_id)
return self._connection.api_request(
method='POST', path=path, json=data)
def get_training_jobs(
self, organization_id: str, job_definition_name: str,
limit: Optional[int]=None, offset: Optional[int]=None,
filter_archived: Optional[bool] = None) -> dict:
"""get training jobs
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/jobs
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
response = api_client.get_training_jobs(organization_id, job_definition_name)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **limit** (int): **[optional]** max number of jobs to be returned (default: 10)
- **offset** (int): **[optional]** offset of jobs ( which starts from 0 )
- **filter_archived** (bool): **[optional]** If ``true``, include archived jobs, otherwise exclude archived jobs. (default: ``false``)
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"entries": [
{
"user_parameters": {},
"start_time": null,
"training_job_id": "1443722127663",
"created_at": "2018-05-17T12:43:59.322367Z",
"completion_time": null,
"id": "1443722127663",
"job_definition_version": 1,
"description": null,
"statistics": null,
"job_definition_id": "1443714239154",
"modified_at": "2018-05-17T12:43:59.322673Z",
"status": "Pending",
"archived": false,
"creator": {
"email": "<EMAIL>",
"created_at": "2017-05-26T01:38:46Z",
"id": "1128347408389",
"role": "admin",
"display_name": null,
"updated_at": "2018-01-04T03:02:12Z",
"is_registered": true
}
}
],
"limit": 10,
"offset": 0,
"total": 1
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
params = {} if filter_archived is None else get_filter_archived_applied_params(
{}, filter_archived)
if limit is not None:
params['limit'] = limit
if offset is not None:
params['offset'] = offset
path = '/organizations/{}/training/definitions/{}/jobs'.format(
organization_id, job_definition_name)
return self._connection.api_request(
method='GET', path=path, params=params)
def get_training_job(
self,
organization_id: str,
job_definition_name: str,
training_job_id: str) -> dict:
"""get a training job
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/jobs/<training_job_id>
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
training_job_id = "1443722127663"
response = api_client.get_training_job(organization_id, job_definition_name, training_job_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **training_job_id** (str): TRAINING_JOB_ID
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"job_definition_id": "1443714239154",
"user_parameters": {},
"start_time": null,
"created_at": "2018-05-17T12:43:59.322367Z",
"job_definition_version": 1,
"completion_time": null,
"status": "Pending",
"modified_at": "2018-05-17T12:43:59.322673Z",
"training_job_id": "1443722127663",
"archived": false,
"creator": {
"email": "<EMAIL>",
"is_registered": true,
"created_at": "2017-05-26T01:38:46Z",
"id": "1128347408389",
"display_name": null,
"updated_at": "2018-01-04T03:02:12Z",
"role": "admin"
},
"description": null,
"statistics": null
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/jobs/{}'.format(
organization_id, job_definition_name, training_job_id)
return self._connection.api_request(method='GET', path=path)
def stop_training_job(
self,
organization_id: str,
job_definition_name: str,
training_job_id: str) -> dict:
"""stop a training job
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/jobs/<training_job_id>/stop
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
training_job_id = "1443722127663"
response = api_client.stop_training_job(organization_id, job_definition_name, training_job_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **training_job_id** (str): TRAINING_JOB_ID
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"message": "test_job_definition:1443722127663 stopped"
}
Raises:
- Unauthorized: Authentication failed
- Forbidden:
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/jobs/{}/stop'.format(
organization_id, job_definition_name, training_job_id)
return self._connection.api_request(method='POST', path=path)
def archive_training_job(
self,
organization_id: str,
job_definition_name: str,
training_job_id: str) -> dict:
"""Archive a training job.
API reference: POST /organizations/<organization_id>/training/definitions/{job_definition_name}/jobs/{training_job_id}/archive
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test"
training_job_id = "1234567890123"
response = api_client.archive_training_job(organization_id,
job_definition_name,
training_job_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **training_job_id** (str): TRAINING_JOB_ID
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/jobs/{}/archive'.format(
organization_id, job_definition_name, training_job_id)
return self._connection.api_request(method='POST', path=path, json={})
def unarchive_training_job(
self,
organization_id: str,
job_definition_name: str,
training_job_id: str) -> dict:
"""Archive a training job.
API reference: POST /organizations/<organization_id>/training/definitions/{job_definition_name}/jobs/{training_job_id}/unarchive
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test"
training_job_id = "1234567890123"
response = api_client.unarchive_training_job(organization_id,
job_definition_name,
training_job_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **training_job_id** (str): TRAINING_JOB_ID
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/jobs/{}/unarchive'.format(
organization_id, job_definition_name, training_job_id)
return self._connection.api_request(method='POST', path=path, json={})
def get_training_result(
self,
organization_id: str,
job_definition_name: str,
training_job_id: str) -> dict:
"""get a training job result
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/jobs/<training_job_id>/result
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = "test_job_definition"
training_job_id = "1443722127663"
response = api_client.get_training_result(organization_id, job_definition_name,
training_job_id)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **training_job_id** (str): TRAINING_JOB_ID
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"artifacts": {
"complete": {
"uri": "dummy_url",
}
}
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/jobs/{}/result'.format(
organization_id, job_definition_name, training_job_id)
return self._connection.api_request(method='GET', path=path)
def update_statistics(
self,
organization_id: str,
job_definition_name: str,
training_job_id: str,
statistics: dict) -> dict:
"""update a training job statistics
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/jobs/<training_job_id>/statistics
Request Syntax:
.. code-block:: python
from abeja.training.statistics import Statistics
statistics = Statistics(progress_percentage=0.5, epoch=1, num_epochs=5, key1='value1')
statistics.add_stage(name=Statistics.STAGE_TRAIN, accuracy=0.9, loss=0.05)
statistics.add_stage(name=Statistics.STAGE_VALIDATION, accuracy=0.8, loss=0.1, key2=2)
response = api_client.update_statistics(statistics.get_statistics())
Params:
- **statistics** (str): statistics needs to be saved and updated
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"statistics": {
"num_epochs": 5,
"epoch": 1,
"progress_percentage": 0.5,
"stages": {
"train": {
"accuracy": 0.9,
"loss": 0.05
},
"validation": {
"accuracy": 0.8,
"loss": 0.1,
"key2": 2
}
},
"key1": "value1"
}
}
Raises:
- BadRequest
- Unauthorized: Authentication failed
- InternalServerError
"""
data = {
'statistics': statistics
}
path = '/organizations/{}/training/definitions/{}/jobs/{}/statistics'.format(
organization_id, job_definition_name, training_job_id)
return self._connection.api_request(
method='POST', path=path, json=data)
# Training model
def get_training_models(
self, organization_id: str, job_definition_name: str,
filter_archived: Optional[bool] = None) -> dict:
"""Get models entries
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/models
Request syntax:
.. code-block:: python
response = api_client.list_models(organization_id='1102940376065')
Params:
- **organization_id** (str): organization_id
- **job_definition_name** (str): training job definition name
- **filter_archived** (bool): **[optional]** If ``true``, include archived jobs, otherwise exclude archived jobs. (default: ``false``)
Return type:
dict
Returns:
Response syntax:
.. code-block:: json
{
"entries": [
{
"training_model_id": "1111111111111",
"job_definition_id": "1111111111111",
"training_job_id": "1111111111111",
"user_parameters": {},
"description": "this is description of the model",
"archived": false,
"exec_env": "cloud",
"archived": false,
"created_at": "2018-01-01T00:00:00.00000Z",
"modified_at": "2018-01-01T00:00:00.00000Z"
}
]
}
Response Structure:
- **entries** (list)
- (dict)
- **training_model_id** (str) : training model id
- **job_definition_id** (str) : job definition id
- **training_job_id** (str) : training job id
- **user_parameters** (dict): user defined parameters.
- **description** (str) : model description.
- **archived** (bool) : archived or not.
- **exec_env** (enum) : Executed environment. One of [cloud, local, none].
Raises:
- Unauthorized: Authentication failed
- InternalServerError
"""
params = None if filter_archived is None else get_filter_archived_applied_params(
{}, filter_archived)
path = '/organizations/{}/training/definitions/{}/models'.format(
organization_id, job_definition_name)
return self._connection.api_request(
method='GET', path=path, params=params)
def create_training_model(
self, organization_id: str, job_definition_name: str,
model_data: IO, parameters: Optional[dict] = None) -> dict:
"""create a training model.
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/models
Request Syntax:
.. code-block:: python
organization_id = "1102940376065"
job_definition_name = 'test_job_definition'
model_data = '....'
parameters = {
"description": "description",
"user_parameters": {}
}
response = api_client.create_training_model(
organization_id, job_definition_name, model_data, parameters)
Params:
- **organization_id** (str): ORGANIZATION_ID
- **job_definition_name** (str): training job definition name
- **model_data** (IO): model data
- **parameters** (dict): parameters for creating training model
- **training_job_id** (str): The ID of a corresponding training job.
- **description** (str): Description
- **user_parameters** (dict): user defined parameters.
- **metrics** (dict): user defined metrics.
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"training_model_id": "1111111111111",
"job_definition_id": "1111111111111",
"training_job_id": "1111111111111",
"user_parameters": {},
"description": "this is description of the model",
"archived": false,
"exec_env": "cloud",
"created_at": "2018-01-01T00:00:00.00000Z",
"modified_at": "2018-01-01T00:00:00.00000Z"
}
Raises:
- InvalidDataFormat
- Unauthorized: Authentication failed
- InternalServerError
"""
if model_data is None:
error_message = "model_data is necessary"
raise BadRequest(
error=error_message,
error_description=error_message,
status_code=400)
if parameters is None:
parameters = {}
model_data = convert_to_zipfile_object(model_data)
files = {
'model_data': (
'model_data.zip',
model_data,
'application/zip'),
'parameters': (
'params.json',
BytesIO(
json.dumps(parameters).encode()),
'application/json')}
path = '/organizations/{}/training/definitions/{}/models'.format(
organization_id, job_definition_name)
return self._connection.api_request(
method='POST', path=path, files=files)
def get_training_model(
self,
organization_id: str,
job_definition_name: str,
model_id: str) -> dict:
"""get a training model
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/models/<model_id>
Request Syntax:
.. code-block:: python
response = api_client.get_training_model(
organization_id='1111111111111', job_definition_name='1111111111111', model_id='1111111111111')
Params:
- **organization_id** (str): organization_id
- **job_definition_name** (str): training job definition name
- **model_id** (str): model_id of the requested model
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"training_model_id": "1111111111111",
"job_definition_id": "1111111111111",
"training_job_id": "1111111111111",
"user_parameters": {},
"description": "this is description of the model",
"archived": false,
"exec_env": "cloud",
"archived": false,
"created_at": "2018-01-01T00:00:00.00000Z",
"modified_at": "2018-01-01T00:00:00.00000Z"
}
Response Structure:
- **training_model_id** (str) : training model id
- **job_definition_id** (str) : job definition id
- **training_job_id** (str) : training job id
- **user_parameters** (dict): user defined parameters.
- **description** (str) : model description.
- **archived** (bool) : archived or not.
- **exec_env** (enum) : Executed environment. One of [cloud, local, none].
Raises:
- NotFound: model not found
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/models/{}'.format(
organization_id, job_definition_name, model_id)
return self._connection.api_request(method='GET', path=path)
def patch_training_model(
self,
organization_id: str,
job_definition_name: str,
model_id: str,
description: str) -> dict:
"""patch a training model
API reference: PATCH /organizations/<organization_id>/training/definitions/<job_definition_name>/models/<model_id>
Request Syntax:
.. code-block:: python
response = api_client.patch_training_model(
organization_id='1111111111111', job_definition_name='1111111111111',
model_id='1111111111111', description='new description')
Params:
- **organization_id** (str): organization_id
- **job_definition_name** (str): training job definition name
- **model_id** (str): model_id of the requested model
- **description** (str): description
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"training_model_id": "1111111111111",
"job_definition_id": "1111111111111",
"training_job_id": "1111111111111",
"user_parameters": {},
"description": "this is description of the model",
"archived": false,
"exec_env": "cloud",
"created_at": "2018-01-01T00:00:00.00000Z",
"modified_at": "2018-01-01T00:00:00.00000Z"
}
Response Structure:
- **training_model_id** (str) : training model id
- **job_definition_id** (str) : job definition id
- **training_job_id** (str) : training job id
- **user_parameters** (dict): user defined parameters.
- **description** (str) : model description.
- **archived** (bool) : archived or not.
- **exec_env** (enum) : Executed environment. One of [cloud, local, none].
Raises:
- NotFound: model not found
- Unauthorized: Authentication failed
- InternalServerError
"""
params = {
'description': description
}
path = '/organizations/{}/training/definitions/{}/models/{}'.format(
organization_id, job_definition_name, model_id)
return self._connection.api_request(
method='PATCH', path=path, json=params)
def download_training_model(
self,
organization_id: str,
job_definition_name: str,
model_id: str) -> dict:
"""download a training model
API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/models/<model_id>/download
Request Syntax:
.. code-block:: python
response = api_client.download_training_model(
organization_id='1111111111111', job_definition_name='1111111111111', model_id='1111111111111')
Params:
- **organization_id** (str): organization_id
- **job_definition_name** (str): training job definition name
- **model_id** (str): model_id of the requested model
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"download_uri": "https://..."
}
Response Structure:
- **download_uri** (str) : presigned download link of the training model
Raises:
- NotFound: model not found
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/models/{}/download'.format(
organization_id, job_definition_name, model_id)
return self._connection.api_request(method='GET', path=path)
def archive_training_model(
self,
organization_id: str,
job_definition_name: str,
model_id: str) -> dict:
"""archive a training model
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/models/<model_id>/archive
Request Syntax:
.. code-block:: python
response = api_client.archive_training_model(
organization_id='1111111111111', job_definition_name='1111111111111', model_id='1111111111111')
Params:
- **organization_id** (str): organization_id
- **job_definition_name** (str): training job definition name
- **model_id** (str): model_id of the requested model
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"message": "{job_definition_name}:{model_id} archived"
}
Response Structure:
- **message** (str) : message
Raises:
- NotFound: model not found
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/models/{}/archive'.format(
organization_id, job_definition_name, model_id)
return self._connection.api_request(method='POST', path=path)
def unarchive_training_model(
self,
organization_id: str,
job_definition_name: str,
model_id: str) -> dict:
"""unarchive a training model
API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/models/<model_id>/unarchive
Request Syntax:
.. code-block:: python
response = api_client.unarchive_training_model(
organization_id='1111111111111', job_definition_name='1111111111111', model_id='1111111111111')
Params:
- **organization_id** (str): organization_id
- **job_definition_name** (str): training job definition name
- **model_id** (str): model_id of the requested model
Return type:
dict
Returns:
Response Syntax:
.. code-block:: json
{
"message": "{job_definition_name}:{model_id} unarchived"
}
Response Structure:
- **message** (str) : message
Raises:
- NotFound: model not found
- Unauthorized: Authentication failed
- InternalServerError
"""
path = '/organizations/{}/training/definitions/{}/models/{}/unarchive'.format(
organization_id, job_definition_name, model_id)
return self._connection.api_request(method='POST', path=path)
| en | 0.545723 | A Low-Level client for Training API .. code-block:: python from abeja.training import APIClient api_client = APIClient() create a training job definition API reference: POST /organizations/<organization_id>/training/definitions Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = "test" response = api_client.create_training_job_definition(organization_id, job_definition_name) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name Return type: dict Returns: Response Syntax: .. code-block:: json { "job_definition_id": "1443334816413", "versions": [], "organization_id": "1200123565071", "modified_at": "2018-05-17T02:13:35.726812Z", "created_at": "2018-05-17T02:13:35.726691Z", "version_count": 0, "name": "test" } Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError archive a training job definition API reference: POST /organizations/<organization_id>/training/definitions/{name}/archive Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = "test" response = api_client.archive_training_job_definition(organization_id, job_definition_name) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError unarchive a training job definition API reference: POST /organizations/<organization_id>/training/definitions/{name}/unarchive Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = "test" response = api_client.unarchive_training_job_definition(organization_id, job_definition_name) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError get training job definitions API reference: GET /organizations/<organization_id>/training/definitions Request Syntax: .. code-block:: python organization_id = "1102940376065" response = api_client.get_training_job_definitions(organization_id) Params: - **organization_id** (str): ORGANIZATION_ID - **filter_archived** (bool): **[optional]** If ``true``, include archived jobs, otherwise exclude archived jobs. (default: ``false``) - **offset** (int): **[optional]** paging offset. - **limit** (int): **[optional]** paging limit. Return type: dict Returns: Response Syntax: .. code-block:: json { "entries": [ { "version_count": 1, "created_at": "2018-03-08T00:46:50.791787Z", "organization_id": "1200123565071", "versions": [ { "job_definition_version": 1, "user_parameters": {}, "handler": "train:handler", "image": "abeja-inc/all-gpu:19.04", "modified_at": "2018-03-08T00:48:12.207883Z", "datasets": { "train": "1376063797251" }, "created_at": "2018-03-08T00:48:12.132471Z", "job_definition_id": "1381349997580" } ], "name": "test", "archived": false, "modified_at": "2018-03-08T00:46:50.791946Z", "job_definition_id": "1381349997580" } ] } Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError # type: Dict[str, Any] get a training job definition. API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name> Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = 'test' response = api_client.get_training_job_definition(organization_id, job_definition_name) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name - **include_jobs** (bool): If ``True``, also returns training jobs in response. By historical reason, the default value is **True**, but you should specify False because it degrades API performance if you have a massive amount of jobs in the target training job definition. Return type: dict Returns: Response Syntax: .. code-block:: json { "modified_at": "2018-05-17T02:13:35.726812Z", "organization_id": "1200123565071", "created_at": "2018-05-17T02:13:35.726691Z", "job_definition_id": "1443334816413", "name": "test", "archived": false, "versions": [], "version_count": 0 } Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError # parameters delete a training job definition. API reference: DELETE /organizations/<organization_id>/training/definitions/<job_definition_name> Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = 'test' response = api_client.delete_training_job_definition(organization_id, job_definition_name) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name Return type: dict Returns: Response Syntax: .. code-block:: json { "message": "test deleted" } Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError create a training job definition version. API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/versions Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = "test_job_definition" source_code = open("./train.zip", "rb") handler = "train:handler" image = "abeja-inc/all-gpu:19.04" environment = {"key": "value"} description = "description" response = api_client.create_training_job_definition_version_native_api( organization_id, job_definition_name, source_code, parameters={"handler": handler, "image": image, "environment": environment, "description": description}) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name - **source_code** (IO): zip or tar.gz archived file-like object to run training job - **parameters** (dict): parameters excluding source code to run training job Return type: dict Returns: Response Syntax: .. code-block:: json { "job_definition_version": 1, "user_parameters": {}, "environment": {}, "description": "description", "datasets": { "mnist": "1111111111111" }, "modified_at": "2018-05-17T12:34:46.344076Z", "job_definition_id": "1443714239154", "handler": "train:handler", "created_at": "2018-05-17T12:34:46.296488Z", "image": "abeja-inc/all-gpu:19.04" } Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError create a training job definition version. API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/versions Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = "test_job_definition" filepaths = ["./requirements.txt", "./train.py"] handler = "train:handler" image = "abeja-inc/all-gpu:19.04" environment = {"key": "value"} description = "description" response = api_client.create_training_job_definition_version( organization_id, job_definition_name, filepaths, handler, image=image, environment=environment, description=description) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name - **filepaths** (list): file list to run training job - **handler** (str): path to handler (ex. train:handler ) - **image** (Optional[str]): runtime environment - **environment** (Optional[dict]): user defined parameters set as environment variables - **description** (Optional[str]): description Return type: dict Returns: Response Syntax: .. code-block:: json { "job_definition_version": 1, "user_parameters": {}, "environment": {}, "description": "description", "datasets": { "mnist": "1111111111111" }, "modified_at": "2018-05-17T12:34:46.344076Z", "job_definition_id": "1443714239154", "handler": "train:handler", "created_at": "2018-05-17T12:34:46.296488Z", "image": "abeja-inc/all-gpu:19.04" } Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError # type: Dict[str, Any] get training job definition versions. API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/versions Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = 'test_job_definition' response = api_client.get_training_job_definition_versions(organization_id, job_definition_name) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name - **filter_archived** (bool): **[optional]** If ``true``, include archived jobs, otherwise exclude archived jobs. (default: ``false``) Return type: dict Returns: Response Syntax: .. code-block:: json { "entries": [ { "job_definition_version": 1, "user_parameters": {}, "datasets": { "mnist": "1111111111111" }, "modified_at": "2018-05-17T12:34:46.344076Z", "job_definition_id": "1443714239154", "handler": "train:handler", "created_at": "2018-05-17T12:34:46.296488Z", "image": "abeja-inc/all-gpu:19.04", "archived": false } ] } Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError get a training job definition version API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id> Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = "test_job_definition" version_id = 1 response = api_client.get_training_job_definition_version(organization_id, job_definition_name, version_id) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name - **version_id** (int): training job version Return type: dict Returns: Response Syntax: .. code-block:: json { "job_definition_version": 1, "user_parameters": {}, "datasets": { "mnist": "1111111111111" }, "modified_at": "2018-05-17T12:34:46.344076Z", "job_definition_id": "1443714239154", "handler": "train:handler", "created_at": "2018-05-17T12:34:46.296488Z", "image": "abeja-inc/all-gpu:19.04", "archived": false } Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError Update a training job definition version API reference: PATCH /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id> Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = "test_job_definition" version_id = 1 response = api_client.patch_training_job_definition_version(organization_id, job_definition_name, version_id, description='new version') Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name - **version_id** (int): training job version - **description** (str): description Return type: dict Returns: Response Syntax: .. code-block:: json { "job_definition_version": 1, "user_parameters": {}, "datasets": { "mnist": "1111111111111" }, "modified_at": "2018-05-17T12:34:46.344076Z", "job_definition_id": "1443714239154", "handler": "train:handler", "created_at": "2018-05-17T12:34:46.296488Z", "image": "abeja-inc/all-gpu:19.04", "archived": false } Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError archive a training job definition version API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id>/archive Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = "test_job_definition" version_id = 1 response = api_client.archive_training_job_definition_version(organization_id, job_definition_name, version_id) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name - **version_id** (int): training job version Return type: dict Returns: Response Syntax: .. code-block:: json { "message": "archived" } Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError unarchive a training job definition version API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id>/unarchive Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = "test_job_definition" version_id = 1 response = api_client.unarchive_training_job_definition_version(organization_id, job_definition_name, version_id) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name - **version_id** (int): training job version Return type: dict Returns: Response Syntax: .. code-block:: json { "message": "unarchived" } Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError delete a training job definition version API reference: DELETE /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id> Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = "test_job_definition" version_id = 1 response = api_client.delete_training_job_definition_version(organization_id, job_definition_name, version_id) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name - **version_id** (int): training job version Return type: dict Returns: Response Syntax: .. code-block:: json { "message": "deleted" } Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError create a training job API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/versions/<version_id>/jobs Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = "test_job_definition" version_id = 1 user_parameters = { 'BATCH_SIZE': 50 } datasets = { "mnist": "1111111111111" } response = api_client.create_training_job( organization_id, job_definition_name, version_id, user_parameters, datasets) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name - **version_id** (int): training job version - **user_parameters** (dict): (**deprecated!!**) user defined parameters set as environment variables. use ``environment`` instead. - **datasets** (dict): **[optional]** datasets, combination of alias and dataset_id - **instance_type** (str): **[optional]** instance type of running environment - **environment** (dict): **[optional]** user defined parameters set as environment variables - **description** (str): **[optional]** description of this job - **export_log** (bool): **[optional]** If ``true``, include the log in the model. This feature is only available with 19.04 or later images. (default: ``false``) Return type: dict Returns: Response Syntax: .. code-block:: json { "job_definition_id": "1443714239154", "user_parameters": {}, "start_time": null, "created_at": "2018-05-17T12:43:59.322367Z", "job_definition_version": 1, "completion_time": null, "status": "Pending", "instance_type": "cpu-1", "modified_at": "2018-05-17T12:43:59.322673Z", "training_job_id": "1443722127663", "creator": { "email": "<EMAIL>", "is_registered": true, "created_at": "2017-05-26T01:38:46Z", "id": "1128347408389", "display_name": null, "updated_at": "2018-01-04T03:02:12Z", "role": "admin" }, "description": null, "statistics": null } Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError # type: Dict[str, Any] # validation get training jobs API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/jobs Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = "test_job_definition" response = api_client.get_training_jobs(organization_id, job_definition_name) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name - **limit** (int): **[optional]** max number of jobs to be returned (default: 10) - **offset** (int): **[optional]** offset of jobs ( which starts from 0 ) - **filter_archived** (bool): **[optional]** If ``true``, include archived jobs, otherwise exclude archived jobs. (default: ``false``) Return type: dict Returns: Response Syntax: .. code-block:: json { "entries": [ { "user_parameters": {}, "start_time": null, "training_job_id": "1443722127663", "created_at": "2018-05-17T12:43:59.322367Z", "completion_time": null, "id": "1443722127663", "job_definition_version": 1, "description": null, "statistics": null, "job_definition_id": "1443714239154", "modified_at": "2018-05-17T12:43:59.322673Z", "status": "Pending", "archived": false, "creator": { "email": "<EMAIL>", "created_at": "2017-05-26T01:38:46Z", "id": "1128347408389", "role": "admin", "display_name": null, "updated_at": "2018-01-04T03:02:12Z", "is_registered": true } } ], "limit": 10, "offset": 0, "total": 1 } Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError get a training job API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/jobs/<training_job_id> Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = "test_job_definition" training_job_id = "1443722127663" response = api_client.get_training_job(organization_id, job_definition_name, training_job_id) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name - **training_job_id** (str): TRAINING_JOB_ID Return type: dict Returns: Response Syntax: .. code-block:: json { "job_definition_id": "1443714239154", "user_parameters": {}, "start_time": null, "created_at": "2018-05-17T12:43:59.322367Z", "job_definition_version": 1, "completion_time": null, "status": "Pending", "modified_at": "2018-05-17T12:43:59.322673Z", "training_job_id": "1443722127663", "archived": false, "creator": { "email": "<EMAIL>", "is_registered": true, "created_at": "2017-05-26T01:38:46Z", "id": "1128347408389", "display_name": null, "updated_at": "2018-01-04T03:02:12Z", "role": "admin" }, "description": null, "statistics": null } Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError stop a training job API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/jobs/<training_job_id>/stop Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = "test_job_definition" training_job_id = "1443722127663" response = api_client.stop_training_job(organization_id, job_definition_name, training_job_id) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name - **training_job_id** (str): TRAINING_JOB_ID Return type: dict Returns: Response Syntax: .. code-block:: json { "message": "test_job_definition:1443722127663 stopped" } Raises: - Unauthorized: Authentication failed - Forbidden: - InternalServerError Archive a training job. API reference: POST /organizations/<organization_id>/training/definitions/{job_definition_name}/jobs/{training_job_id}/archive Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = "test" training_job_id = "1234567890123" response = api_client.archive_training_job(organization_id, job_definition_name, training_job_id) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name - **training_job_id** (str): TRAINING_JOB_ID Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError Archive a training job. API reference: POST /organizations/<organization_id>/training/definitions/{job_definition_name}/jobs/{training_job_id}/unarchive Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = "test" training_job_id = "1234567890123" response = api_client.unarchive_training_job(organization_id, job_definition_name, training_job_id) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name - **training_job_id** (str): TRAINING_JOB_ID Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError get a training job result API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/jobs/<training_job_id>/result Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = "test_job_definition" training_job_id = "1443722127663" response = api_client.get_training_result(organization_id, job_definition_name, training_job_id) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name - **training_job_id** (str): TRAINING_JOB_ID Return type: dict Returns: Response Syntax: .. code-block:: json { "artifacts": { "complete": { "uri": "dummy_url", } } } Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError update a training job statistics API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/jobs/<training_job_id>/statistics Request Syntax: .. code-block:: python from abeja.training.statistics import Statistics statistics = Statistics(progress_percentage=0.5, epoch=1, num_epochs=5, key1='value1') statistics.add_stage(name=Statistics.STAGE_TRAIN, accuracy=0.9, loss=0.05) statistics.add_stage(name=Statistics.STAGE_VALIDATION, accuracy=0.8, loss=0.1, key2=2) response = api_client.update_statistics(statistics.get_statistics()) Params: - **statistics** (str): statistics needs to be saved and updated Return type: dict Returns: Response Syntax: .. code-block:: json { "statistics": { "num_epochs": 5, "epoch": 1, "progress_percentage": 0.5, "stages": { "train": { "accuracy": 0.9, "loss": 0.05 }, "validation": { "accuracy": 0.8, "loss": 0.1, "key2": 2 } }, "key1": "value1" } } Raises: - BadRequest - Unauthorized: Authentication failed - InternalServerError # Training model Get models entries API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/models Request syntax: .. code-block:: python response = api_client.list_models(organization_id='1102940376065') Params: - **organization_id** (str): organization_id - **job_definition_name** (str): training job definition name - **filter_archived** (bool): **[optional]** If ``true``, include archived jobs, otherwise exclude archived jobs. (default: ``false``) Return type: dict Returns: Response syntax: .. code-block:: json { "entries": [ { "training_model_id": "1111111111111", "job_definition_id": "1111111111111", "training_job_id": "1111111111111", "user_parameters": {}, "description": "this is description of the model", "archived": false, "exec_env": "cloud", "archived": false, "created_at": "2018-01-01T00:00:00.00000Z", "modified_at": "2018-01-01T00:00:00.00000Z" } ] } Response Structure: - **entries** (list) - (dict) - **training_model_id** (str) : training model id - **job_definition_id** (str) : job definition id - **training_job_id** (str) : training job id - **user_parameters** (dict): user defined parameters. - **description** (str) : model description. - **archived** (bool) : archived or not. - **exec_env** (enum) : Executed environment. One of [cloud, local, none]. Raises: - Unauthorized: Authentication failed - InternalServerError create a training model. API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/models Request Syntax: .. code-block:: python organization_id = "1102940376065" job_definition_name = 'test_job_definition' model_data = '....' parameters = { "description": "description", "user_parameters": {} } response = api_client.create_training_model( organization_id, job_definition_name, model_data, parameters) Params: - **organization_id** (str): ORGANIZATION_ID - **job_definition_name** (str): training job definition name - **model_data** (IO): model data - **parameters** (dict): parameters for creating training model - **training_job_id** (str): The ID of a corresponding training job. - **description** (str): Description - **user_parameters** (dict): user defined parameters. - **metrics** (dict): user defined metrics. Return type: dict Returns: Response Syntax: .. code-block:: json { "training_model_id": "1111111111111", "job_definition_id": "1111111111111", "training_job_id": "1111111111111", "user_parameters": {}, "description": "this is description of the model", "archived": false, "exec_env": "cloud", "created_at": "2018-01-01T00:00:00.00000Z", "modified_at": "2018-01-01T00:00:00.00000Z" } Raises: - InvalidDataFormat - Unauthorized: Authentication failed - InternalServerError get a training model API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/models/<model_id> Request Syntax: .. code-block:: python response = api_client.get_training_model( organization_id='1111111111111', job_definition_name='1111111111111', model_id='1111111111111') Params: - **organization_id** (str): organization_id - **job_definition_name** (str): training job definition name - **model_id** (str): model_id of the requested model Return type: dict Returns: Response Syntax: .. code-block:: json { "training_model_id": "1111111111111", "job_definition_id": "1111111111111", "training_job_id": "1111111111111", "user_parameters": {}, "description": "this is description of the model", "archived": false, "exec_env": "cloud", "archived": false, "created_at": "2018-01-01T00:00:00.00000Z", "modified_at": "2018-01-01T00:00:00.00000Z" } Response Structure: - **training_model_id** (str) : training model id - **job_definition_id** (str) : job definition id - **training_job_id** (str) : training job id - **user_parameters** (dict): user defined parameters. - **description** (str) : model description. - **archived** (bool) : archived or not. - **exec_env** (enum) : Executed environment. One of [cloud, local, none]. Raises: - NotFound: model not found - Unauthorized: Authentication failed - InternalServerError patch a training model API reference: PATCH /organizations/<organization_id>/training/definitions/<job_definition_name>/models/<model_id> Request Syntax: .. code-block:: python response = api_client.patch_training_model( organization_id='1111111111111', job_definition_name='1111111111111', model_id='1111111111111', description='new description') Params: - **organization_id** (str): organization_id - **job_definition_name** (str): training job definition name - **model_id** (str): model_id of the requested model - **description** (str): description Return type: dict Returns: Response Syntax: .. code-block:: json { "training_model_id": "1111111111111", "job_definition_id": "1111111111111", "training_job_id": "1111111111111", "user_parameters": {}, "description": "this is description of the model", "archived": false, "exec_env": "cloud", "created_at": "2018-01-01T00:00:00.00000Z", "modified_at": "2018-01-01T00:00:00.00000Z" } Response Structure: - **training_model_id** (str) : training model id - **job_definition_id** (str) : job definition id - **training_job_id** (str) : training job id - **user_parameters** (dict): user defined parameters. - **description** (str) : model description. - **archived** (bool) : archived or not. - **exec_env** (enum) : Executed environment. One of [cloud, local, none]. Raises: - NotFound: model not found - Unauthorized: Authentication failed - InternalServerError download a training model API reference: GET /organizations/<organization_id>/training/definitions/<job_definition_name>/models/<model_id>/download Request Syntax: .. code-block:: python response = api_client.download_training_model( organization_id='1111111111111', job_definition_name='1111111111111', model_id='1111111111111') Params: - **organization_id** (str): organization_id - **job_definition_name** (str): training job definition name - **model_id** (str): model_id of the requested model Return type: dict Returns: Response Syntax: .. code-block:: json { "download_uri": "https://..." } Response Structure: - **download_uri** (str) : presigned download link of the training model Raises: - NotFound: model not found - Unauthorized: Authentication failed - InternalServerError archive a training model API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/models/<model_id>/archive Request Syntax: .. code-block:: python response = api_client.archive_training_model( organization_id='1111111111111', job_definition_name='1111111111111', model_id='1111111111111') Params: - **organization_id** (str): organization_id - **job_definition_name** (str): training job definition name - **model_id** (str): model_id of the requested model Return type: dict Returns: Response Syntax: .. code-block:: json { "message": "{job_definition_name}:{model_id} archived" } Response Structure: - **message** (str) : message Raises: - NotFound: model not found - Unauthorized: Authentication failed - InternalServerError unarchive a training model API reference: POST /organizations/<organization_id>/training/definitions/<job_definition_name>/models/<model_id>/unarchive Request Syntax: .. code-block:: python response = api_client.unarchive_training_model( organization_id='1111111111111', job_definition_name='1111111111111', model_id='1111111111111') Params: - **organization_id** (str): organization_id - **job_definition_name** (str): training job definition name - **model_id** (str): model_id of the requested model Return type: dict Returns: Response Syntax: .. code-block:: json { "message": "{job_definition_name}:{model_id} unarchived" } Response Structure: - **message** (str) : message Raises: - NotFound: model not found - Unauthorized: Authentication failed - InternalServerError | 2.074778 | 2 |
cloudify_azure/resources/network/networkinterfacecard.py | cloudify-cosmo/cloudify-azure-plugin | 2 | 6625500 | # #######
# Copyright (c) 2016-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
resources.network.NetworkInterfaceCard
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Microsoft Azure Network Interface Card interface
"""
from uuid import uuid4
from msrestazure.azure_exceptions import CloudError
from cloudify import exceptions as cfy_exc
from cloudify.decorators import operation
from cloudify_azure import (constants, decorators, utils)
from cloudify_azure.resources.network.ipconfiguration \
import get_ip_configurations
from cloudify_azure.resources.network.publicipaddress import PUBLIC_IP_PROPERTY
from azure_sdk.resources.network.network_interface_card \
import NetworkInterfaceCard
from azure_sdk.resources.network.public_ip_address \
import PublicIPAddress
def get_unique_ip_conf_name(nic, resource_group_name,
nic_name, name):
if not name:
for _ in range(0, 15):
name = "{0}".format(uuid4())
try:
result = nic.get(resource_group_name, nic_name)
for ip_conf in result.get("ip_configurations"):
if ip_conf.get("name") == name: # found ipc with same name
name = ""
break
if name:
return name
except CloudError: # if exception that nic_name is not there yet
return name
else:
return name
def get_connected_nsg(ctx):
"""Finds a connected Network Security Group"""
nsg = None
rel_type = constants.REL_NIC_CONNECTED_TO_NSG
for rel in ctx.instance.relationships:
if isinstance(rel_type, tuple):
if any(x in rel.type_hierarchy for x in rel_type):
nsg = rel.target
else:
if rel_type in rel.type_hierarchy:
nsg = rel.target
return {
'id': nsg.instance.runtime_properties.get("resource_id", "")
} if nsg else None
@operation(resumable=True)
@decorators.with_generate_name(NetworkInterfaceCard)
def create(ctx, **_):
"""Uses an existing, or creates a new, Network Interface Card"""
name = utils.get_resource_name(ctx)
resource_group_name = utils.get_resource_group(ctx)
ctx.logger.info("Created NIC with name {0} "
"inside ResourceGroup {1}".format(name,
resource_group_name))
ctx.instance.runtime_properties['resource_group'] = resource_group_name
@operation(resumable=True)
@decorators.with_azure_resource(NetworkInterfaceCard)
def configure(ctx, **_):
"""
Uses an existing, or creates a new, Network Interface Card
.. warning::
The "configure" operation is actually the second half of
the "create" operation. This is necessary since IP
Configuration nodes are treated as separate, stand-alone
types and must be "connected" to the NIC before
it's actually created. The actual "create" operation
simply assigns a UUID for the node and the "configure"
operation creates the object
"""
# Create a resource (if necessary)
azure_config = utils.get_client_config(ctx.node.properties)
name = ctx.instance.runtime_properties.get('name')
resource_group_name = utils.get_resource_group(ctx)
api_version = \
ctx.node.properties.get('api_version', constants.API_VER_NETWORK)
network_interface_card = NetworkInterfaceCard(azure_config, ctx.logger,
api_version)
nic_params = {
'location': ctx.node.properties.get('location'),
'tags': ctx.node.properties.get('tags'),
'primary': ctx.node.properties.get('primary'),
}
nic_params = \
utils.handle_resource_config_params(nic_params,
ctx.node.properties.get(
'resource_config', {}))
# Special Case network_security_group instead of networkSecurityGroups
nic_params['network_security_group'] = \
nic_params.pop('network_security_groups', None)
# clean empty values from params
nic_params = \
utils.cleanup_empty_params(nic_params)
nic_params = utils.dict_update(
nic_params, {
'network_security_group': get_connected_nsg(ctx),
'ip_configurations': get_ip_configurations(ctx)
}
)
# clean empty values from params
nic_params = \
utils.cleanup_empty_params(nic_params)
try:
result = \
network_interface_card.create_or_update(
resource_group_name,
name,
nic_params)
except CloudError as cr:
raise cfy_exc.NonRecoverableError(
"configure nic '{0}' "
"failed with this error : {1}".format(name,
cr.message)
)
utils.save_common_info_in_runtime_properties(
resource_group_name=resource_group_name,
resource_name=name,
resource_get_create_result=result)
@operation(resumable=True)
def start(ctx, **_):
"""
Stores NIC IPs in runtime properties.
"""
azure_config = utils.get_client_config(ctx.node.properties)
name = ctx.instance.runtime_properties.get('name')
resource_group_name = utils.get_resource_group(ctx)
api_version = \
ctx.node.properties.get('api_version', constants.API_VER_NETWORK)
network_interface_card = NetworkInterfaceCard(azure_config, ctx.logger,
api_version)
nic_data = network_interface_card.get(resource_group_name, name)
for ip_cfg in nic_data.get('ip_configurations', list()):
# Get the Private IP Address endpoint
ctx.instance.runtime_properties['ip'] = \
ip_cfg.get('private_ip_address')
public_ip = \
ip_cfg.get('public_ip_address', {}).get('ip_address', None)
if not public_ip:
pip = PublicIPAddress(azure_config, ctx.logger)
try:
pip_name = ip_cfg.get(
'public_ip_address', {}).get('id').rsplit('/', 1)[1]
except AttributeError:
public_ip = ctx.instance.runtime_properties['ip']
else:
public_ip_data = pip.get(resource_group_name, pip_name)
public_ip = public_ip_data.get("ip_address")
# Get the Public IP Address endpoint
ctx.instance.runtime_properties['public_ip'] = \
public_ip
# For consistency with other plugins.
ctx.instance.runtime_properties[PUBLIC_IP_PROPERTY] = \
public_ip
# We should also consider that maybe there will be many
# public ip addresses.
public_ip_addresses = \
ctx.instance.runtime_properties.get('public_ip_addresses', [])
if public_ip not in public_ip_addresses:
public_ip_addresses.append(public_ip)
ctx.instance.runtime_properties['public_ip_addresses'] = \
public_ip_addresses
@operation(resumable=True)
def delete(ctx, **_):
"""Deletes a Network Interface Card"""
# Delete the resource
if ctx.node.properties.get('use_external_resource', False):
return
azure_config = utils.get_client_config(ctx.node.properties)
resource_group_name = utils.get_resource_group(ctx)
name = ctx.instance.runtime_properties.get('name')
api_version = \
ctx.node.properties.get('api_version', constants.API_VER_NETWORK)
network_interface_card = NetworkInterfaceCard(azure_config, ctx.logger,
api_version)
utils.handle_delete(
ctx, network_interface_card, resource_group_name, name)
@operation(resumable=True)
def attach_ip_configuration(ctx, **_):
"""Generates a usable UUID for the NIC's IP Configuration"""
# Generate the IPConfiguration's name
azure_config = utils.get_client_config(ctx.source.node.properties)
resource_group_name = utils.get_resource_group(ctx.source)
nic_name = ctx.source.instance.runtime_properties.get('name')
network_interface_card = NetworkInterfaceCard(azure_config, ctx.logger)
ip_configuration_name = ctx.target.node.properties.get('name')
ip_configuration_name = \
get_unique_ip_conf_name(network_interface_card, resource_group_name,
nic_name, ip_configuration_name)
ctx.target.instance.runtime_properties['name'] = ip_configuration_name
| # #######
# Copyright (c) 2016-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
resources.network.NetworkInterfaceCard
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Microsoft Azure Network Interface Card interface
"""
from uuid import uuid4
from msrestazure.azure_exceptions import CloudError
from cloudify import exceptions as cfy_exc
from cloudify.decorators import operation
from cloudify_azure import (constants, decorators, utils)
from cloudify_azure.resources.network.ipconfiguration \
import get_ip_configurations
from cloudify_azure.resources.network.publicipaddress import PUBLIC_IP_PROPERTY
from azure_sdk.resources.network.network_interface_card \
import NetworkInterfaceCard
from azure_sdk.resources.network.public_ip_address \
import PublicIPAddress
def get_unique_ip_conf_name(nic, resource_group_name,
nic_name, name):
if not name:
for _ in range(0, 15):
name = "{0}".format(uuid4())
try:
result = nic.get(resource_group_name, nic_name)
for ip_conf in result.get("ip_configurations"):
if ip_conf.get("name") == name: # found ipc with same name
name = ""
break
if name:
return name
except CloudError: # if exception that nic_name is not there yet
return name
else:
return name
def get_connected_nsg(ctx):
"""Finds a connected Network Security Group"""
nsg = None
rel_type = constants.REL_NIC_CONNECTED_TO_NSG
for rel in ctx.instance.relationships:
if isinstance(rel_type, tuple):
if any(x in rel.type_hierarchy for x in rel_type):
nsg = rel.target
else:
if rel_type in rel.type_hierarchy:
nsg = rel.target
return {
'id': nsg.instance.runtime_properties.get("resource_id", "")
} if nsg else None
@operation(resumable=True)
@decorators.with_generate_name(NetworkInterfaceCard)
def create(ctx, **_):
"""Uses an existing, or creates a new, Network Interface Card"""
name = utils.get_resource_name(ctx)
resource_group_name = utils.get_resource_group(ctx)
ctx.logger.info("Created NIC with name {0} "
"inside ResourceGroup {1}".format(name,
resource_group_name))
ctx.instance.runtime_properties['resource_group'] = resource_group_name
@operation(resumable=True)
@decorators.with_azure_resource(NetworkInterfaceCard)
def configure(ctx, **_):
"""
Uses an existing, or creates a new, Network Interface Card
.. warning::
The "configure" operation is actually the second half of
the "create" operation. This is necessary since IP
Configuration nodes are treated as separate, stand-alone
types and must be "connected" to the NIC before
it's actually created. The actual "create" operation
simply assigns a UUID for the node and the "configure"
operation creates the object
"""
# Create a resource (if necessary)
azure_config = utils.get_client_config(ctx.node.properties)
name = ctx.instance.runtime_properties.get('name')
resource_group_name = utils.get_resource_group(ctx)
api_version = \
ctx.node.properties.get('api_version', constants.API_VER_NETWORK)
network_interface_card = NetworkInterfaceCard(azure_config, ctx.logger,
api_version)
nic_params = {
'location': ctx.node.properties.get('location'),
'tags': ctx.node.properties.get('tags'),
'primary': ctx.node.properties.get('primary'),
}
nic_params = \
utils.handle_resource_config_params(nic_params,
ctx.node.properties.get(
'resource_config', {}))
# Special Case network_security_group instead of networkSecurityGroups
nic_params['network_security_group'] = \
nic_params.pop('network_security_groups', None)
# clean empty values from params
nic_params = \
utils.cleanup_empty_params(nic_params)
nic_params = utils.dict_update(
nic_params, {
'network_security_group': get_connected_nsg(ctx),
'ip_configurations': get_ip_configurations(ctx)
}
)
# clean empty values from params
nic_params = \
utils.cleanup_empty_params(nic_params)
try:
result = \
network_interface_card.create_or_update(
resource_group_name,
name,
nic_params)
except CloudError as cr:
raise cfy_exc.NonRecoverableError(
"configure nic '{0}' "
"failed with this error : {1}".format(name,
cr.message)
)
utils.save_common_info_in_runtime_properties(
resource_group_name=resource_group_name,
resource_name=name,
resource_get_create_result=result)
@operation(resumable=True)
def start(ctx, **_):
"""
Stores NIC IPs in runtime properties.
"""
azure_config = utils.get_client_config(ctx.node.properties)
name = ctx.instance.runtime_properties.get('name')
resource_group_name = utils.get_resource_group(ctx)
api_version = \
ctx.node.properties.get('api_version', constants.API_VER_NETWORK)
network_interface_card = NetworkInterfaceCard(azure_config, ctx.logger,
api_version)
nic_data = network_interface_card.get(resource_group_name, name)
for ip_cfg in nic_data.get('ip_configurations', list()):
# Get the Private IP Address endpoint
ctx.instance.runtime_properties['ip'] = \
ip_cfg.get('private_ip_address')
public_ip = \
ip_cfg.get('public_ip_address', {}).get('ip_address', None)
if not public_ip:
pip = PublicIPAddress(azure_config, ctx.logger)
try:
pip_name = ip_cfg.get(
'public_ip_address', {}).get('id').rsplit('/', 1)[1]
except AttributeError:
public_ip = ctx.instance.runtime_properties['ip']
else:
public_ip_data = pip.get(resource_group_name, pip_name)
public_ip = public_ip_data.get("ip_address")
# Get the Public IP Address endpoint
ctx.instance.runtime_properties['public_ip'] = \
public_ip
# For consistency with other plugins.
ctx.instance.runtime_properties[PUBLIC_IP_PROPERTY] = \
public_ip
# We should also consider that maybe there will be many
# public ip addresses.
public_ip_addresses = \
ctx.instance.runtime_properties.get('public_ip_addresses', [])
if public_ip not in public_ip_addresses:
public_ip_addresses.append(public_ip)
ctx.instance.runtime_properties['public_ip_addresses'] = \
public_ip_addresses
@operation(resumable=True)
def delete(ctx, **_):
"""Deletes a Network Interface Card"""
# Delete the resource
if ctx.node.properties.get('use_external_resource', False):
return
azure_config = utils.get_client_config(ctx.node.properties)
resource_group_name = utils.get_resource_group(ctx)
name = ctx.instance.runtime_properties.get('name')
api_version = \
ctx.node.properties.get('api_version', constants.API_VER_NETWORK)
network_interface_card = NetworkInterfaceCard(azure_config, ctx.logger,
api_version)
utils.handle_delete(
ctx, network_interface_card, resource_group_name, name)
@operation(resumable=True)
def attach_ip_configuration(ctx, **_):
"""Generates a usable UUID for the NIC's IP Configuration"""
# Generate the IPConfiguration's name
azure_config = utils.get_client_config(ctx.source.node.properties)
resource_group_name = utils.get_resource_group(ctx.source)
nic_name = ctx.source.instance.runtime_properties.get('name')
network_interface_card = NetworkInterfaceCard(azure_config, ctx.logger)
ip_configuration_name = ctx.target.node.properties.get('name')
ip_configuration_name = \
get_unique_ip_conf_name(network_interface_card, resource_group_name,
nic_name, ip_configuration_name)
ctx.target.instance.runtime_properties['name'] = ip_configuration_name
| en | 0.815068 | # ####### # Copyright (c) 2016-2020 Cloudify Platform Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. resources.network.NetworkInterfaceCard ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Microsoft Azure Network Interface Card interface # found ipc with same name # if exception that nic_name is not there yet Finds a connected Network Security Group Uses an existing, or creates a new, Network Interface Card Uses an existing, or creates a new, Network Interface Card .. warning:: The "configure" operation is actually the second half of the "create" operation. This is necessary since IP Configuration nodes are treated as separate, stand-alone types and must be "connected" to the NIC before it's actually created. The actual "create" operation simply assigns a UUID for the node and the "configure" operation creates the object # Create a resource (if necessary) # Special Case network_security_group instead of networkSecurityGroups # clean empty values from params # clean empty values from params Stores NIC IPs in runtime properties. # Get the Private IP Address endpoint # Get the Public IP Address endpoint # For consistency with other plugins. # We should also consider that maybe there will be many # public ip addresses. Deletes a Network Interface Card # Delete the resource Generates a usable UUID for the NIC's IP Configuration # Generate the IPConfiguration's name | 1.96718 | 2 |
aceapi/cloudphish/test.py | ace-ecosystem/ACE | 24 | 6625501 | <reponame>ace-ecosystem/ACE<filename>aceapi/cloudphish/test.py
# vim: sw=4:ts=4:et
import hashlib
import logging
import os, os.path
import threading
import time
import tarfile
from subprocess import Popen, PIPE
from unittest import TestCase
import saq
from aceapi.test import APIBasicTestCase
from saq.analysis import RootAnalysis
from saq.brocess import query_brocess_by_fqdn
from saq.constants import *
from saq.cloudphish import *
from saq.database import use_db, get_db_connection, initialize_node
from saq.test import *
from saq.util import *
import requests
from flask import url_for
# part of our sample set of data
TEST_URL = 'http://localhost:8088/Payment_Advice.pdf'
class CloudphishTestCase(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# subprocess for http server
self.http_server = None
def setUp(self, *args, **kwargs):
super().setUp(*args, **kwargs)
with get_db_connection() as db:
c = db.cursor()
c.execute("DELETE FROM cloudphish_analysis_results")
db.commit()
self.start_http_server()
def start_http_server(self):
logging.debug("starting http server")
self.http_server = Popen(['python3', '-m', 'http.server', '8088'],
cwd=os.path.join(saq.SAQ_HOME, 'test_data', 'pdf'), stdout=PIPE, stderr=PIPE)
def _reader(p):
for line in p:
logging.info("[http_server] - {}".format(line.strip()))
threading.Thread(target=_reader, args=(self.http_server.stdout,), daemon=True).start()
threading.Thread(target=_reader, args=(self.http_server.stderr,), daemon=True).start()
time.sleep(0.1)
# wait for it to start...
while True:
try:
r = requests.get(TEST_URL)
logging.debug("http server started!: {}".format(r))
break
except Exception as e:
logging.debug("waiting for http server to start... ({})".format(e))
time.sleep(0.25)
def stop_http_server(self):
if self.http_server:
logging.debug("stopping http server")
self.http_server.terminate()
self.http_server.wait()
self.http_server = None
def tearDown(self, *args, **kwargs):
super().tearDown(*args, **kwargs)
self.stop_http_server()
class CloudphishAPITestCase(CloudphishTestCase, ACEEngineTestCase):
#def setUp(self, *args, **kwargs):
#super().setUp(*args, **kwargs)
def test_http_server(self):
# make sure our http server is working
r = requests.get(TEST_URL)
self.assertEquals(r.status_code, 200)
@use_db
def test_submit_valid_url(self, db, c):
result = self.client.get(url_for('cloudphish.submit', url=TEST_URL, ignore_filters='1'))
result = result.get_json()
self.assertIsNotNone(result)
# first check the result
for key in [ KEY_RESULT, KEY_DETAILS, KEY_STATUS, KEY_ANALYSIS_RESULT, KEY_HTTP_RESULT,
KEY_HTTP_MESSAGE, KEY_SHA256_CONTENT, KEY_LOCATION, KEY_FILE_NAME ]:
self.assertTrue(key in result)
self.assertEquals(result[KEY_RESULT], RESULT_OK)
self.assertEquals(result[KEY_STATUS], STATUS_NEW)
self.assertEquals(result[KEY_ANALYSIS_RESULT], SCAN_RESULT_UNKNOWN)
self.assertIsNotNone(result[KEY_DETAILS])
# everything else should be None
for key in [ KEY_HTTP_RESULT, KEY_HTTP_MESSAGE, KEY_SHA256_CONTENT, KEY_LOCATION, KEY_FILE_NAME ]:
self.assertIsNone(result[key])
# we should have a single entry in the cloudphish_analysis_results table
c.execute("""SELECT sha256_url, http_result_code, sha256_content, result, insert_date, uuid, status
FROM cloudphish_analysis_results""")
result = c.fetchall()
self.assertEquals(len(result), 1)
sha256_url, http_result_code, sha256_content, result, insert_date, _uuid, status = result[0]
self.assertIsNotNone(sha256_url)
self.assertIsNone(http_result_code)
self.assertIsNone(sha256_content)
self.assertEquals(result, SCAN_RESULT_UNKNOWN)
self.assertIsNotNone(insert_date)
self.assertIsNotNone(_uuid)
self.assertEquals(status, STATUS_NEW)
# we should have a matching entry in the workload for this uuid
c.execute("""SELECT id, uuid, node_id, analysis_mode, insert_date, company_id, exclusive_uuid, storage_dir
FROM workload""")
result = c.fetchall()
self.assertEquals(len(result), 1)
_id, workload_uuid, node_id, analysis_mode, insert_date, company_id, exclusive_uuid, storage_dir = result[0]
self.assertIsNotNone(_id)
self.assertEquals(workload_uuid, _uuid)
self.assertEquals(node_id, saq.SAQ_NODE_ID)
self.assertEquals(analysis_mode, ANALYSIS_MODE_CLOUDPHISH)
self.assertIsNotNone(insert_date)
self.assertEquals(company_id, saq.COMPANY_ID)
self.assertIsNone(exclusive_uuid)
self.assertIsNotNone(storage_dir)
# and then make sure we can load the analysis
root = RootAnalysis(storage_dir=storage_dir)
root.load()
self.assertTrue(isinstance(root.details, dict))
for key in [ KEY_DETAILS_URL, KEY_DETAILS_SHA256_URL, KEY_DETAILS_CONTEXT ]:
self.assertTrue(key in root.details)
# now we start an engine to work on cloudphish analysis
engine = TestEngine(analysis_pools={ANALYSIS_MODE_CLOUDPHISH: 1}, local_analysis_modes=[ANALYSIS_MODE_CLOUDPHISH])
engine.enable_alerting()
engine.enable_module('analysis_module_crawlphish', ANALYSIS_MODE_CLOUDPHISH)
engine.enable_module('analysis_module_cloudphish_request_analyzer', ANALYSIS_MODE_CLOUDPHISH)
# force this analysis to become an alert
engine.enable_module('analysis_module_forced_detection', ANALYSIS_MODE_CLOUDPHISH)
#engine.enable_module('analysis_module_detection', ANALYSIS_MODE_CLOUDPHISH)
#engine.enable_module('analysis_module_alert', ANALYSIS_MODE_CLOUDPHISH)
engine.controlled_stop()
engine.start()
engine.wait()
# we should still have a single entry in the cloudphish_analysis_results table
# but it should be updated with the analysis results
db.commit()
c.execute("""SELECT HEX(sha256_url), http_result_code, http_message, HEX(sha256_content), result, insert_date, uuid, status
FROM cloudphish_analysis_results""")
result = c.fetchall()
self.assertEquals(len(result), 1)
sha256_url, http_result_code, http_message, sha256_content, result, insert_date, _uuid, status = result[0]
self.assertIsNotNone(sha256_url)
self.assertEquals(http_result_code, 200)
self.assertEquals(http_message, 'OK')
self.assertIsNotNone(sha256_content)
self.assertEquals(result, SCAN_RESULT_ALERT)
self.assertIsNotNone(insert_date)
self.assertIsNotNone(_uuid)
self.assertEquals(status, STATUS_ANALYZED)
# and we should have an entry in the cloudphish_content_metadata table
c.execute("""SELECT node, name FROM cloudphish_content_metadata WHERE sha256_content = UNHEX(%s)""", sha256_content)
result = c.fetchall()
self.assertEquals(len(result), 1)
node, file_name = result[0]
self.assertEquals(node, saq.SAQ_NODE)
file_name = file_name.decode('utf-16le')
self.assertEquals(file_name, 'Payment_Advice.pdf')
# we should have seen the analysis mode change
wait_for_log_count('changed from cloudphish to correlation', 1, 5)
# should also have an entry to work the new alert
old_storage_dir = storage_dir
c.execute("""SELECT id, uuid, node_id, analysis_mode, insert_date, company_id, exclusive_uuid, storage_dir
FROM workload""")
result = c.fetchall()
self.assertEquals(len(result), 1)
_id, workload_uuid, node_id, analysis_mode, insert_date, company_id, exclusive_uuid, storage_dir = result[0]
self.assertIsNotNone(_id)
self.assertEquals(workload_uuid, _uuid)
self.assertEquals(node_id, saq.SAQ_NODE_ID)
self.assertEquals(analysis_mode, ANALYSIS_MODE_CORRELATION)
self.assertIsNotNone(insert_date)
self.assertEquals(company_id, saq.COMPANY_ID)
self.assertIsNone(exclusive_uuid)
self.assertEquals(storage_dir, storage_dir_from_uuid(workload_uuid))
# now we make a second api call to the same url
result = self.client.get(url_for('cloudphish.submit', url=TEST_URL, ignore_filters='1'))
result = result.get_json()
self.assertIsNotNone(result)
# first check the result
for key in [ KEY_RESULT, KEY_DETAILS, KEY_STATUS, KEY_ANALYSIS_RESULT, KEY_HTTP_RESULT,
KEY_HTTP_MESSAGE, KEY_SHA256_CONTENT, KEY_LOCATION, KEY_FILE_NAME ]:
self.assertTrue(key in result)
self.assertEquals(result[KEY_RESULT], RESULT_OK)
self.assertEquals(result[KEY_STATUS], STATUS_ANALYZED)
self.assertEquals(result[KEY_ANALYSIS_RESULT], SCAN_RESULT_ALERT)
# everything else should be None
self.assertEquals(result[KEY_HTTP_RESULT], 200)
self.assertEquals(result[KEY_HTTP_MESSAGE], 'OK')
self.assertEquals(result[KEY_SHA256_CONTENT], sha256_content)
self.assertEquals(result[KEY_LOCATION], saq.SAQ_NODE)
self.assertEquals(result[KEY_FILE_NAME], 'Payment_Advice.pdf')
# now attempt to download the binary by sha256
result = self.client.get(url_for('cloudphish.download', s=sha256_url))
# make sure we got the actual file
m = hashlib.sha256()
m.update(result.data)
sha256_result = m.hexdigest()
self.assertEquals(sha256_result.lower(), sha256_content.lower())
# and make sure we got the file name
filename_ok = False
for header in result.headers:
header_name, header_value = header
if header_name == 'Content-Disposition':
self.assertTrue('Payment_Advice.pdf' in header_value)
filename_ok = True
self.assertTrue(filename_ok)
# now attempt to download the alert itself
result = self.client.get(url_for('engine.download', uuid=_uuid))
# we should get back a tar file
tar_path = os.path.join(saq.TEMP_DIR, 'download.tar')
output_dir = os.path.join(saq.TEMP_DIR, 'download')
try:
with open(tar_path, 'wb') as fp:
for chunk in result.response:
fp.write(chunk)
with tarfile.open(name=tar_path, mode='r|') as tar:
tar.extractall(path=output_dir)
downloaded_root = RootAnalysis(storage_dir=output_dir)
downloaded_root.load()
self.assertTrue(isinstance(root.details, dict))
for key in [ KEY_DETAILS_URL, KEY_DETAILS_SHA256_URL, KEY_DETAILS_CONTEXT ]:
self.assertTrue(key in root.details)
finally:
try:
os.remove(tar_path)
except:
pass
try:
shutil.rmtree(output_dir)
except:
pass
# and then finally make sure we can clear the alert
result = self.client.get(url_for('cloudphish.clear_alert', url=TEST_URL))
self.assertEquals(result.status_code, 200)
db.commit()
c.execute("SELECT result FROM cloudphish_analysis_results WHERE sha256_url = UNHEX(%s)", (sha256_url,))
row = c.fetchone()
self.assertEquals(row[0], SCAN_RESULT_CLEAR)
# we should have a brocess entry for this http request
self.assertEquals(query_brocess_by_fqdn('localhost'), 1)
@use_db
def test_submit_invalid_url(self, db, c):
# try submitting something that is clearly not a URL
result = self.client.get(url_for('cloudphish.submit', url=b'\xFF\x80\x34\x01\x45', ignore_filters='1'))
self.assertEquals(result.status_code, 500)
def test_submit_ignore_filters(self):
# we add a url for something that should be blacklisted but we ignore the filters
with open(self.blacklist_path, 'w') as fp:
fp.write('localhost\n')
result = self.client.get(url_for('cloudphish.submit', url=TEST_URL, ignore_filters='1'))
result = result.get_json()
self.assertIsNotNone(result)
# first check the result
for key in [ KEY_RESULT, KEY_DETAILS, KEY_STATUS, KEY_ANALYSIS_RESULT, KEY_HTTP_RESULT,
KEY_HTTP_MESSAGE, KEY_SHA256_CONTENT, KEY_LOCATION, KEY_FILE_NAME ]:
self.assertTrue(key in result)
self.assertEquals(result[KEY_RESULT], RESULT_OK)
self.assertEquals(result[KEY_STATUS], STATUS_NEW)
self.assertEquals(result[KEY_ANALYSIS_RESULT], SCAN_RESULT_UNKNOWN)
self.assertIsNotNone(result[KEY_DETAILS])
# everything else should be None
for key in [ KEY_HTTP_RESULT, KEY_HTTP_MESSAGE, KEY_SHA256_CONTENT, KEY_LOCATION, KEY_FILE_NAME ]:
self.assertIsNone(result[key])
def test_download_redirect(self):
# create a request to download the pdf
result = self.client.get(url_for('cloudphish.submit', url=TEST_URL, ignore_filters='1'))
# have the engine process it
engine = TestEngine(analysis_pools={ANALYSIS_MODE_CLOUDPHISH: 1}, local_analysis_modes=[ANALYSIS_MODE_CLOUDPHISH])
engine.enable_alerting()
engine.enable_module('analysis_module_crawlphish', ANALYSIS_MODE_CLOUDPHISH)
engine.enable_module('analysis_module_cloudphish_request_analyzer', ANALYSIS_MODE_CLOUDPHISH)
# force this analysis to become an alert
engine.enable_module('analysis_module_forced_detection', ANALYSIS_MODE_CLOUDPHISH)
#engine.enable_module('analysis_module_detection', ANALYSIS_MODE_CLOUDPHISH)
#engine.enable_module('analysis_module_alert', ANALYSIS_MODE_CLOUDPHISH)
engine.controlled_stop()
engine.start()
engine.wait()
# get the sha256_content
submission_result = self.client.get(url_for('cloudphish.submit', url=TEST_URL, ignore_filters='1'))
submission_result = submission_result.get_json()
self.assertIsNotNone(submission_result[KEY_SHA256_URL])
# change what node we are
saq.SAQ_NODE = 'second_host'
initialize_node()
self.initialize_test_client()
# we should get a redirect back to the other node
result = self.client.get(url_for('cloudphish.download', s=submission_result[KEY_SHA256_URL]))
self.assertEquals(result.status_code, 302)
| # vim: sw=4:ts=4:et
import hashlib
import logging
import os, os.path
import threading
import time
import tarfile
from subprocess import Popen, PIPE
from unittest import TestCase
import saq
from aceapi.test import APIBasicTestCase
from saq.analysis import RootAnalysis
from saq.brocess import query_brocess_by_fqdn
from saq.constants import *
from saq.cloudphish import *
from saq.database import use_db, get_db_connection, initialize_node
from saq.test import *
from saq.util import *
import requests
from flask import url_for
# part of our sample set of data
TEST_URL = 'http://localhost:8088/Payment_Advice.pdf'
class CloudphishTestCase(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# subprocess for http server
self.http_server = None
def setUp(self, *args, **kwargs):
super().setUp(*args, **kwargs)
with get_db_connection() as db:
c = db.cursor()
c.execute("DELETE FROM cloudphish_analysis_results")
db.commit()
self.start_http_server()
def start_http_server(self):
logging.debug("starting http server")
self.http_server = Popen(['python3', '-m', 'http.server', '8088'],
cwd=os.path.join(saq.SAQ_HOME, 'test_data', 'pdf'), stdout=PIPE, stderr=PIPE)
def _reader(p):
for line in p:
logging.info("[http_server] - {}".format(line.strip()))
threading.Thread(target=_reader, args=(self.http_server.stdout,), daemon=True).start()
threading.Thread(target=_reader, args=(self.http_server.stderr,), daemon=True).start()
time.sleep(0.1)
# wait for it to start...
while True:
try:
r = requests.get(TEST_URL)
logging.debug("http server started!: {}".format(r))
break
except Exception as e:
logging.debug("waiting for http server to start... ({})".format(e))
time.sleep(0.25)
def stop_http_server(self):
if self.http_server:
logging.debug("stopping http server")
self.http_server.terminate()
self.http_server.wait()
self.http_server = None
def tearDown(self, *args, **kwargs):
super().tearDown(*args, **kwargs)
self.stop_http_server()
class CloudphishAPITestCase(CloudphishTestCase, ACEEngineTestCase):
#def setUp(self, *args, **kwargs):
#super().setUp(*args, **kwargs)
def test_http_server(self):
# make sure our http server is working
r = requests.get(TEST_URL)
self.assertEquals(r.status_code, 200)
@use_db
def test_submit_valid_url(self, db, c):
result = self.client.get(url_for('cloudphish.submit', url=TEST_URL, ignore_filters='1'))
result = result.get_json()
self.assertIsNotNone(result)
# first check the result
for key in [ KEY_RESULT, KEY_DETAILS, KEY_STATUS, KEY_ANALYSIS_RESULT, KEY_HTTP_RESULT,
KEY_HTTP_MESSAGE, KEY_SHA256_CONTENT, KEY_LOCATION, KEY_FILE_NAME ]:
self.assertTrue(key in result)
self.assertEquals(result[KEY_RESULT], RESULT_OK)
self.assertEquals(result[KEY_STATUS], STATUS_NEW)
self.assertEquals(result[KEY_ANALYSIS_RESULT], SCAN_RESULT_UNKNOWN)
self.assertIsNotNone(result[KEY_DETAILS])
# everything else should be None
for key in [ KEY_HTTP_RESULT, KEY_HTTP_MESSAGE, KEY_SHA256_CONTENT, KEY_LOCATION, KEY_FILE_NAME ]:
self.assertIsNone(result[key])
# we should have a single entry in the cloudphish_analysis_results table
c.execute("""SELECT sha256_url, http_result_code, sha256_content, result, insert_date, uuid, status
FROM cloudphish_analysis_results""")
result = c.fetchall()
self.assertEquals(len(result), 1)
sha256_url, http_result_code, sha256_content, result, insert_date, _uuid, status = result[0]
self.assertIsNotNone(sha256_url)
self.assertIsNone(http_result_code)
self.assertIsNone(sha256_content)
self.assertEquals(result, SCAN_RESULT_UNKNOWN)
self.assertIsNotNone(insert_date)
self.assertIsNotNone(_uuid)
self.assertEquals(status, STATUS_NEW)
# we should have a matching entry in the workload for this uuid
c.execute("""SELECT id, uuid, node_id, analysis_mode, insert_date, company_id, exclusive_uuid, storage_dir
FROM workload""")
result = c.fetchall()
self.assertEquals(len(result), 1)
_id, workload_uuid, node_id, analysis_mode, insert_date, company_id, exclusive_uuid, storage_dir = result[0]
self.assertIsNotNone(_id)
self.assertEquals(workload_uuid, _uuid)
self.assertEquals(node_id, saq.SAQ_NODE_ID)
self.assertEquals(analysis_mode, ANALYSIS_MODE_CLOUDPHISH)
self.assertIsNotNone(insert_date)
self.assertEquals(company_id, saq.COMPANY_ID)
self.assertIsNone(exclusive_uuid)
self.assertIsNotNone(storage_dir)
# and then make sure we can load the analysis
root = RootAnalysis(storage_dir=storage_dir)
root.load()
self.assertTrue(isinstance(root.details, dict))
for key in [ KEY_DETAILS_URL, KEY_DETAILS_SHA256_URL, KEY_DETAILS_CONTEXT ]:
self.assertTrue(key in root.details)
# now we start an engine to work on cloudphish analysis
engine = TestEngine(analysis_pools={ANALYSIS_MODE_CLOUDPHISH: 1}, local_analysis_modes=[ANALYSIS_MODE_CLOUDPHISH])
engine.enable_alerting()
engine.enable_module('analysis_module_crawlphish', ANALYSIS_MODE_CLOUDPHISH)
engine.enable_module('analysis_module_cloudphish_request_analyzer', ANALYSIS_MODE_CLOUDPHISH)
# force this analysis to become an alert
engine.enable_module('analysis_module_forced_detection', ANALYSIS_MODE_CLOUDPHISH)
#engine.enable_module('analysis_module_detection', ANALYSIS_MODE_CLOUDPHISH)
#engine.enable_module('analysis_module_alert', ANALYSIS_MODE_CLOUDPHISH)
engine.controlled_stop()
engine.start()
engine.wait()
# we should still have a single entry in the cloudphish_analysis_results table
# but it should be updated with the analysis results
db.commit()
c.execute("""SELECT HEX(sha256_url), http_result_code, http_message, HEX(sha256_content), result, insert_date, uuid, status
FROM cloudphish_analysis_results""")
result = c.fetchall()
self.assertEquals(len(result), 1)
sha256_url, http_result_code, http_message, sha256_content, result, insert_date, _uuid, status = result[0]
self.assertIsNotNone(sha256_url)
self.assertEquals(http_result_code, 200)
self.assertEquals(http_message, 'OK')
self.assertIsNotNone(sha256_content)
self.assertEquals(result, SCAN_RESULT_ALERT)
self.assertIsNotNone(insert_date)
self.assertIsNotNone(_uuid)
self.assertEquals(status, STATUS_ANALYZED)
# and we should have an entry in the cloudphish_content_metadata table
c.execute("""SELECT node, name FROM cloudphish_content_metadata WHERE sha256_content = UNHEX(%s)""", sha256_content)
result = c.fetchall()
self.assertEquals(len(result), 1)
node, file_name = result[0]
self.assertEquals(node, saq.SAQ_NODE)
file_name = file_name.decode('utf-16le')
self.assertEquals(file_name, 'Payment_Advice.pdf')
# we should have seen the analysis mode change
wait_for_log_count('changed from cloudphish to correlation', 1, 5)
# should also have an entry to work the new alert
old_storage_dir = storage_dir
c.execute("""SELECT id, uuid, node_id, analysis_mode, insert_date, company_id, exclusive_uuid, storage_dir
FROM workload""")
result = c.fetchall()
self.assertEquals(len(result), 1)
_id, workload_uuid, node_id, analysis_mode, insert_date, company_id, exclusive_uuid, storage_dir = result[0]
self.assertIsNotNone(_id)
self.assertEquals(workload_uuid, _uuid)
self.assertEquals(node_id, saq.SAQ_NODE_ID)
self.assertEquals(analysis_mode, ANALYSIS_MODE_CORRELATION)
self.assertIsNotNone(insert_date)
self.assertEquals(company_id, saq.COMPANY_ID)
self.assertIsNone(exclusive_uuid)
self.assertEquals(storage_dir, storage_dir_from_uuid(workload_uuid))
# now we make a second api call to the same url
result = self.client.get(url_for('cloudphish.submit', url=TEST_URL, ignore_filters='1'))
result = result.get_json()
self.assertIsNotNone(result)
# first check the result
for key in [ KEY_RESULT, KEY_DETAILS, KEY_STATUS, KEY_ANALYSIS_RESULT, KEY_HTTP_RESULT,
KEY_HTTP_MESSAGE, KEY_SHA256_CONTENT, KEY_LOCATION, KEY_FILE_NAME ]:
self.assertTrue(key in result)
self.assertEquals(result[KEY_RESULT], RESULT_OK)
self.assertEquals(result[KEY_STATUS], STATUS_ANALYZED)
self.assertEquals(result[KEY_ANALYSIS_RESULT], SCAN_RESULT_ALERT)
# everything else should be None
self.assertEquals(result[KEY_HTTP_RESULT], 200)
self.assertEquals(result[KEY_HTTP_MESSAGE], 'OK')
self.assertEquals(result[KEY_SHA256_CONTENT], sha256_content)
self.assertEquals(result[KEY_LOCATION], saq.SAQ_NODE)
self.assertEquals(result[KEY_FILE_NAME], 'Payment_Advice.pdf')
# now attempt to download the binary by sha256
result = self.client.get(url_for('cloudphish.download', s=sha256_url))
# make sure we got the actual file
m = hashlib.sha256()
m.update(result.data)
sha256_result = m.hexdigest()
self.assertEquals(sha256_result.lower(), sha256_content.lower())
# and make sure we got the file name
filename_ok = False
for header in result.headers:
header_name, header_value = header
if header_name == 'Content-Disposition':
self.assertTrue('Payment_Advice.pdf' in header_value)
filename_ok = True
self.assertTrue(filename_ok)
# now attempt to download the alert itself
result = self.client.get(url_for('engine.download', uuid=_uuid))
# we should get back a tar file
tar_path = os.path.join(saq.TEMP_DIR, 'download.tar')
output_dir = os.path.join(saq.TEMP_DIR, 'download')
try:
with open(tar_path, 'wb') as fp:
for chunk in result.response:
fp.write(chunk)
with tarfile.open(name=tar_path, mode='r|') as tar:
tar.extractall(path=output_dir)
downloaded_root = RootAnalysis(storage_dir=output_dir)
downloaded_root.load()
self.assertTrue(isinstance(root.details, dict))
for key in [ KEY_DETAILS_URL, KEY_DETAILS_SHA256_URL, KEY_DETAILS_CONTEXT ]:
self.assertTrue(key in root.details)
finally:
try:
os.remove(tar_path)
except:
pass
try:
shutil.rmtree(output_dir)
except:
pass
# and then finally make sure we can clear the alert
result = self.client.get(url_for('cloudphish.clear_alert', url=TEST_URL))
self.assertEquals(result.status_code, 200)
db.commit()
c.execute("SELECT result FROM cloudphish_analysis_results WHERE sha256_url = UNHEX(%s)", (sha256_url,))
row = c.fetchone()
self.assertEquals(row[0], SCAN_RESULT_CLEAR)
# we should have a brocess entry for this http request
self.assertEquals(query_brocess_by_fqdn('localhost'), 1)
@use_db
def test_submit_invalid_url(self, db, c):
# try submitting something that is clearly not a URL
result = self.client.get(url_for('cloudphish.submit', url=b'\xFF\x80\x34\x01\x45', ignore_filters='1'))
self.assertEquals(result.status_code, 500)
def test_submit_ignore_filters(self):
# we add a url for something that should be blacklisted but we ignore the filters
with open(self.blacklist_path, 'w') as fp:
fp.write('localhost\n')
result = self.client.get(url_for('cloudphish.submit', url=TEST_URL, ignore_filters='1'))
result = result.get_json()
self.assertIsNotNone(result)
# first check the result
for key in [ KEY_RESULT, KEY_DETAILS, KEY_STATUS, KEY_ANALYSIS_RESULT, KEY_HTTP_RESULT,
KEY_HTTP_MESSAGE, KEY_SHA256_CONTENT, KEY_LOCATION, KEY_FILE_NAME ]:
self.assertTrue(key in result)
self.assertEquals(result[KEY_RESULT], RESULT_OK)
self.assertEquals(result[KEY_STATUS], STATUS_NEW)
self.assertEquals(result[KEY_ANALYSIS_RESULT], SCAN_RESULT_UNKNOWN)
self.assertIsNotNone(result[KEY_DETAILS])
# everything else should be None
for key in [ KEY_HTTP_RESULT, KEY_HTTP_MESSAGE, KEY_SHA256_CONTENT, KEY_LOCATION, KEY_FILE_NAME ]:
self.assertIsNone(result[key])
def test_download_redirect(self):
# create a request to download the pdf
result = self.client.get(url_for('cloudphish.submit', url=TEST_URL, ignore_filters='1'))
# have the engine process it
engine = TestEngine(analysis_pools={ANALYSIS_MODE_CLOUDPHISH: 1}, local_analysis_modes=[ANALYSIS_MODE_CLOUDPHISH])
engine.enable_alerting()
engine.enable_module('analysis_module_crawlphish', ANALYSIS_MODE_CLOUDPHISH)
engine.enable_module('analysis_module_cloudphish_request_analyzer', ANALYSIS_MODE_CLOUDPHISH)
# force this analysis to become an alert
engine.enable_module('analysis_module_forced_detection', ANALYSIS_MODE_CLOUDPHISH)
#engine.enable_module('analysis_module_detection', ANALYSIS_MODE_CLOUDPHISH)
#engine.enable_module('analysis_module_alert', ANALYSIS_MODE_CLOUDPHISH)
engine.controlled_stop()
engine.start()
engine.wait()
# get the sha256_content
submission_result = self.client.get(url_for('cloudphish.submit', url=TEST_URL, ignore_filters='1'))
submission_result = submission_result.get_json()
self.assertIsNotNone(submission_result[KEY_SHA256_URL])
# change what node we are
saq.SAQ_NODE = 'second_host'
initialize_node()
self.initialize_test_client()
# we should get a redirect back to the other node
result = self.client.get(url_for('cloudphish.download', s=submission_result[KEY_SHA256_URL]))
self.assertEquals(result.status_code, 302) | en | 0.742011 | # vim: sw=4:ts=4:et # part of our sample set of data # subprocess for http server # wait for it to start... #def setUp(self, *args, **kwargs): #super().setUp(*args, **kwargs) # make sure our http server is working # first check the result # everything else should be None # we should have a single entry in the cloudphish_analysis_results table SELECT sha256_url, http_result_code, sha256_content, result, insert_date, uuid, status FROM cloudphish_analysis_results # we should have a matching entry in the workload for this uuid SELECT id, uuid, node_id, analysis_mode, insert_date, company_id, exclusive_uuid, storage_dir FROM workload # and then make sure we can load the analysis # now we start an engine to work on cloudphish analysis # force this analysis to become an alert #engine.enable_module('analysis_module_detection', ANALYSIS_MODE_CLOUDPHISH) #engine.enable_module('analysis_module_alert', ANALYSIS_MODE_CLOUDPHISH) # we should still have a single entry in the cloudphish_analysis_results table # but it should be updated with the analysis results SELECT HEX(sha256_url), http_result_code, http_message, HEX(sha256_content), result, insert_date, uuid, status FROM cloudphish_analysis_results # and we should have an entry in the cloudphish_content_metadata table SELECT node, name FROM cloudphish_content_metadata WHERE sha256_content = UNHEX(%s) # we should have seen the analysis mode change # should also have an entry to work the new alert SELECT id, uuid, node_id, analysis_mode, insert_date, company_id, exclusive_uuid, storage_dir FROM workload # now we make a second api call to the same url # first check the result # everything else should be None # now attempt to download the binary by sha256 # make sure we got the actual file # and make sure we got the file name # now attempt to download the alert itself # we should get back a tar file # and then finally make sure we can clear the alert # we should have a brocess entry for this http request # try submitting something that is clearly not a URL # we add a url for something that should be blacklisted but we ignore the filters # first check the result # everything else should be None # create a request to download the pdf # have the engine process it # force this analysis to become an alert #engine.enable_module('analysis_module_detection', ANALYSIS_MODE_CLOUDPHISH) #engine.enable_module('analysis_module_alert', ANALYSIS_MODE_CLOUDPHISH) # get the sha256_content # change what node we are # we should get a redirect back to the other node | 2.019849 | 2 |
virt/ansible-latest/lib/python2.7/site-packages/ansible/modules/cloud/amazon/ec2_elb_facts.py | lakhlaifi/RedHat-Ansible | 1 | 6625502 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_elb_facts
short_description: Gather facts about EC2 Elastic Load Balancers in AWS
description:
- Gather facts about EC2 Elastic Load Balancers in AWS
version_added: "2.0"
author:
- "<NAME> (@mjschultz)"
- "<NAME> (@nand0p)"
options:
names:
description:
- List of ELB names to gather facts about. Pass this option to gather facts about a set of ELBs, otherwise, all ELBs are returned.
aliases: ['elb_ids', 'ec2_elbs']
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Output format tries to match ec2_elb_lb module input parameters
# Gather facts about all ELBs
- action:
module: ec2_elb_facts
register: elb_facts
- action:
module: debug
msg: "{{ item.dns_name }}"
loop: "{{ elb_facts.elbs }}"
# Gather facts about a particular ELB
- action:
module: ec2_elb_facts
names: frontend-prod-elb
register: elb_facts
- action:
module: debug
msg: "{{ elb_facts.elbs.0.dns_name }}"
# Gather facts about a set of ELBs
- action:
module: ec2_elb_facts
names:
- frontend-prod-elb
- backend-prod-elb
register: elb_facts
- action:
module: debug
msg: "{{ item.dns_name }}"
loop: "{{ elb_facts.elbs }}"
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (
AWSRetry,
connect_to_aws,
ec2_argument_spec,
get_aws_connection_info,
)
try:
import boto.ec2.elb
from boto.ec2.tag import Tag
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class ElbInformation(object):
"""Handles ELB information."""
def __init__(self,
module,
names,
region,
**aws_connect_params):
self.module = module
self.names = names
self.region = region
self.aws_connect_params = aws_connect_params
self.connection = self._get_elb_connection()
def _get_tags(self, elbname):
params = {'LoadBalancerNames.member.1': elbname}
elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)])
return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key'))
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def _get_elb_connection(self):
return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
def _get_elb_listeners(self, listeners):
listener_list = []
for listener in listeners:
listener_dict = {
'load_balancer_port': listener[0],
'instance_port': listener[1],
'protocol': listener[2],
'instance_protocol': listener[3]
}
try:
ssl_certificate_id = listener[4]
except IndexError:
pass
else:
if ssl_certificate_id:
listener_dict['ssl_certificate_id'] = ssl_certificate_id
listener_list.append(listener_dict)
return listener_list
def _get_health_check(self, health_check):
protocol, port_path = health_check.target.split(':')
try:
port, path = port_path.split('/', 1)
path = '/{0}'.format(path)
except ValueError:
port = port_path
path = None
health_check_dict = {
'ping_protocol': protocol.lower(),
'ping_port': int(port),
'response_timeout': health_check.timeout,
'interval': health_check.interval,
'unhealthy_threshold': health_check.unhealthy_threshold,
'healthy_threshold': health_check.healthy_threshold,
}
if path:
health_check_dict['ping_path'] = path
return health_check_dict
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def _get_elb_info(self, elb):
elb_info = {
'name': elb.name,
'zones': elb.availability_zones,
'dns_name': elb.dns_name,
'canonical_hosted_zone_name': elb.canonical_hosted_zone_name,
'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id,
'hosted_zone_name': elb.canonical_hosted_zone_name,
'hosted_zone_id': elb.canonical_hosted_zone_name_id,
'instances': [instance.id for instance in elb.instances],
'listeners': self._get_elb_listeners(elb.listeners),
'scheme': elb.scheme,
'security_groups': elb.security_groups,
'health_check': self._get_health_check(elb.health_check),
'subnets': elb.subnets,
'instances_inservice': [],
'instances_inservice_count': 0,
'instances_outofservice': [],
'instances_outofservice_count': 0,
'instances_inservice_percent': 0.0,
'tags': self._get_tags(elb.name)
}
if elb.vpc_id:
elb_info['vpc_id'] = elb.vpc_id
if elb.instances:
instance_health = self.connection.describe_instance_health(elb.name)
elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService']
elb_info['instances_inservice_count'] = len(elb_info['instances_inservice'])
elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService']
elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice'])
try:
elb_info['instances_inservice_percent'] = (
float(elb_info['instances_inservice_count']) /
float(elb_info['instances_inservice_count'] + elb_info['instances_outofservice_count'])
) * 100.
except ZeroDivisionError:
elb_info['instances_inservice_percent'] = 0.
return elb_info
def list_elbs(self):
elb_array, token = [], None
get_elb_with_backoff = AWSRetry.backoff(tries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers)
while True:
all_elbs = get_elb_with_backoff(marker=token)
token = all_elbs.next_marker
if all_elbs:
if self.names:
for existing_lb in all_elbs:
if existing_lb.name in self.names:
elb_array.append(existing_lb)
else:
elb_array.extend(all_elbs)
else:
break
if token is None:
break
return list(map(self._get_elb_info, elb_array))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
names={'default': [], 'type': 'list'}
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
try:
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="region must be specified")
names = module.params['names']
elb_information = ElbInformation(
module, names, region, **aws_connect_params)
ec2_facts_result = dict(changed=False,
elbs=elb_information.list_elbs())
except BotoServerError as err:
module.fail_json(msg="{0}: {1}".format(err.error_code, err.error_message),
exception=traceback.format_exc())
module.exit_json(**ec2_facts_result)
if __name__ == '__main__':
main()
| #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_elb_facts
short_description: Gather facts about EC2 Elastic Load Balancers in AWS
description:
- Gather facts about EC2 Elastic Load Balancers in AWS
version_added: "2.0"
author:
- "<NAME> (@mjschultz)"
- "<NAME> (@nand0p)"
options:
names:
description:
- List of ELB names to gather facts about. Pass this option to gather facts about a set of ELBs, otherwise, all ELBs are returned.
aliases: ['elb_ids', 'ec2_elbs']
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Output format tries to match ec2_elb_lb module input parameters
# Gather facts about all ELBs
- action:
module: ec2_elb_facts
register: elb_facts
- action:
module: debug
msg: "{{ item.dns_name }}"
loop: "{{ elb_facts.elbs }}"
# Gather facts about a particular ELB
- action:
module: ec2_elb_facts
names: frontend-prod-elb
register: elb_facts
- action:
module: debug
msg: "{{ elb_facts.elbs.0.dns_name }}"
# Gather facts about a set of ELBs
- action:
module: ec2_elb_facts
names:
- frontend-prod-elb
- backend-prod-elb
register: elb_facts
- action:
module: debug
msg: "{{ item.dns_name }}"
loop: "{{ elb_facts.elbs }}"
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (
AWSRetry,
connect_to_aws,
ec2_argument_spec,
get_aws_connection_info,
)
try:
import boto.ec2.elb
from boto.ec2.tag import Tag
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class ElbInformation(object):
"""Handles ELB information."""
def __init__(self,
module,
names,
region,
**aws_connect_params):
self.module = module
self.names = names
self.region = region
self.aws_connect_params = aws_connect_params
self.connection = self._get_elb_connection()
def _get_tags(self, elbname):
params = {'LoadBalancerNames.member.1': elbname}
elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)])
return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key'))
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def _get_elb_connection(self):
return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
def _get_elb_listeners(self, listeners):
listener_list = []
for listener in listeners:
listener_dict = {
'load_balancer_port': listener[0],
'instance_port': listener[1],
'protocol': listener[2],
'instance_protocol': listener[3]
}
try:
ssl_certificate_id = listener[4]
except IndexError:
pass
else:
if ssl_certificate_id:
listener_dict['ssl_certificate_id'] = ssl_certificate_id
listener_list.append(listener_dict)
return listener_list
def _get_health_check(self, health_check):
protocol, port_path = health_check.target.split(':')
try:
port, path = port_path.split('/', 1)
path = '/{0}'.format(path)
except ValueError:
port = port_path
path = None
health_check_dict = {
'ping_protocol': protocol.lower(),
'ping_port': int(port),
'response_timeout': health_check.timeout,
'interval': health_check.interval,
'unhealthy_threshold': health_check.unhealthy_threshold,
'healthy_threshold': health_check.healthy_threshold,
}
if path:
health_check_dict['ping_path'] = path
return health_check_dict
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def _get_elb_info(self, elb):
elb_info = {
'name': elb.name,
'zones': elb.availability_zones,
'dns_name': elb.dns_name,
'canonical_hosted_zone_name': elb.canonical_hosted_zone_name,
'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id,
'hosted_zone_name': elb.canonical_hosted_zone_name,
'hosted_zone_id': elb.canonical_hosted_zone_name_id,
'instances': [instance.id for instance in elb.instances],
'listeners': self._get_elb_listeners(elb.listeners),
'scheme': elb.scheme,
'security_groups': elb.security_groups,
'health_check': self._get_health_check(elb.health_check),
'subnets': elb.subnets,
'instances_inservice': [],
'instances_inservice_count': 0,
'instances_outofservice': [],
'instances_outofservice_count': 0,
'instances_inservice_percent': 0.0,
'tags': self._get_tags(elb.name)
}
if elb.vpc_id:
elb_info['vpc_id'] = elb.vpc_id
if elb.instances:
instance_health = self.connection.describe_instance_health(elb.name)
elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService']
elb_info['instances_inservice_count'] = len(elb_info['instances_inservice'])
elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService']
elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice'])
try:
elb_info['instances_inservice_percent'] = (
float(elb_info['instances_inservice_count']) /
float(elb_info['instances_inservice_count'] + elb_info['instances_outofservice_count'])
) * 100.
except ZeroDivisionError:
elb_info['instances_inservice_percent'] = 0.
return elb_info
def list_elbs(self):
elb_array, token = [], None
get_elb_with_backoff = AWSRetry.backoff(tries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers)
while True:
all_elbs = get_elb_with_backoff(marker=token)
token = all_elbs.next_marker
if all_elbs:
if self.names:
for existing_lb in all_elbs:
if existing_lb.name in self.names:
elb_array.append(existing_lb)
else:
elb_array.extend(all_elbs)
else:
break
if token is None:
break
return list(map(self._get_elb_info, elb_array))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
names={'default': [], 'type': 'list'}
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
try:
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="region must be specified")
names = module.params['names']
elb_information = ElbInformation(
module, names, region, **aws_connect_params)
ec2_facts_result = dict(changed=False,
elbs=elb_information.list_elbs())
except BotoServerError as err:
module.fail_json(msg="{0}: {1}".format(err.error_code, err.error_message),
exception=traceback.format_exc())
module.exit_json(**ec2_facts_result)
if __name__ == '__main__':
main()
| en | 0.685583 | #!/usr/bin/python # # This is a free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This Ansible library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this library. If not, see <http://www.gnu.org/licenses/>. --- module: ec2_elb_facts short_description: Gather facts about EC2 Elastic Load Balancers in AWS description: - Gather facts about EC2 Elastic Load Balancers in AWS version_added: "2.0" author: - "<NAME> (@mjschultz)" - "<NAME> (@nand0p)" options: names: description: - List of ELB names to gather facts about. Pass this option to gather facts about a set of ELBs, otherwise, all ELBs are returned. aliases: ['elb_ids', 'ec2_elbs'] extends_documentation_fragment: - aws - ec2 # Note: These examples do not set authentication details, see the AWS Guide for details. # Output format tries to match ec2_elb_lb module input parameters # Gather facts about all ELBs - action: module: ec2_elb_facts register: elb_facts - action: module: debug msg: "{{ item.dns_name }}" loop: "{{ elb_facts.elbs }}" # Gather facts about a particular ELB - action: module: ec2_elb_facts names: frontend-prod-elb register: elb_facts - action: module: debug msg: "{{ elb_facts.elbs.0.dns_name }}" # Gather facts about a set of ELBs - action: module: ec2_elb_facts names: - frontend-prod-elb - backend-prod-elb register: elb_facts - action: module: debug msg: "{{ item.dns_name }}" loop: "{{ elb_facts.elbs }}" Handles ELB information. | 1.723997 | 2 |
problem.py | FlorianEisenbarth/DataCamp_DeputiesWatcher | 1 | 6625503 | import pandas as pd
import json
import numpy as np
from dataclasses import dataclass
import os
from os.path import join, splitext
import unidecode
import pickle as pkl
import sys
from sklearn.model_selection import KFold
import functools
import rampwf
from sklearn.base import is_classifier
from sklearn.metrics import f1_score
from rampwf.prediction_types.base import BasePrediction
from rampwf.score_types import BaseScoreType
from rampwf.workflows import SKLearnPipeline
import warnings
PARTIES_SIGLES = [
"SOC",
"FI",
"Dem",
"LT",
"GDR",
"LaREM",
"Agir ens",
"UDI-I",
"LR",
"NI",
]
RANDOM_STATE = 777
DATA_HOME = "data"
if not sys.warnoptions:
warnings.simplefilter("ignore")
@dataclass
class Vote:
"""Base class containing all relevant basis information of the dataset"""
id: str
code_type_vote: str
libelle_type_vote: str
demandeur: str
libelle: str
nb_votants: int
date: str # en faire un datetime ce serait bien ; à regarder
vote_counts: pd.DataFrame
@classmethod
def load_from_files(cls, id, data_home=DATA_HOME, train_or_test="train"):
f_name = join(data_home, train_or_test, id)
with open(f_name + ".json", "r") as f:
vote_metadata = json.load(f)
vote_counts = (
pd.read_csv(f_name + ".csv", sep=",")
.rename(columns={"Unnamed: 0": "party"})
.
# renommer la première colonne (partis)
set_index("party")
)
vote = cls(
id=id,
code_type_vote=vote_metadata["code_type_vote"],
libelle_type_vote=vote_metadata["libelle_type_vote"],
demandeur=vote_metadata["demandeur"],
libelle=vote_metadata["libelle"],
nb_votants=vote_metadata["nb_votants"],
date=vote_metadata["date"],
vote_counts=vote_counts,
)
return vote
def to_X_y(self):
"""Transform a Vote object into an observation X of features (dictionnary)
and a label y
"""
number_of_dpt_per_party = {
party: sum(self.vote_counts.loc[party])
for party in self.vote_counts.index
}
X = {
"code_type_vote": self.code_type_vote,
"libelle_type_vote": self.libelle_type_vote,
"demandeur": self.demandeur,
"libelle": self.libelle,
"nb_votants": self.nb_votants,
"date": self.date,
"presence_per_party": number_of_dpt_per_party,
}
vote_columns = self.vote_counts.columns
y = {}
for party in self.vote_counts.index:
major_position = vote_columns[
np.argmax(self.vote_counts.loc[party])
]
y[party] = 1.0 * (major_position == "pours")
return X, y
# ----------
# score type
# ----------
class CustomF1Score(BaseScoreType):
def __init__(
self,
weights_type="log",
precision=3,
):
"""Custom weighted F1 score. Weights depends on group's amount of deputies.
Args:
weights_type (str, optional): 'log' or 'linear'. Defaults to 'log'.
precision (int, optional): decimals considered. Defaults to 3.
"""
self.name = f"Weighted F1-score ({weights_type})"
self.set_weights(path=".", type=weights_type)
self.precision = precision
def __call__(self, y_true, y_pred) -> float:
score_list = []
for i, w in enumerate(self.weights_):
score_list.append(f1_score(y_true[:, i], y_pred[:, i]))
weighted_score = np.array(score_list) @ self.weights_
return weighted_score
def set_weights(self, path, type="linear"):
"""Return the weights associated to each party. The default weight for a party
(type='linear') is the mere proportion of deputies in the party among all the
deputies. if type='log', the weight is passed through natural logartihm.
"""
file_name = join(path, "data/dpt_data", "liste_deputes_excel.csv")
dpt_data = pd.read_csv(file_name, sep=";")
groups_column_name = dpt_data.columns[-1]
counts = (
dpt_data.groupby(groups_column_name)
.nunique()["identifiant"]
.to_dict()
)
if type == "linear":
list_count = np.array([counts[key] for key in PARTIES_SIGLES])
elif type == "log":
list_count = np.log(
np.array([counts[key] for key in PARTIES_SIGLES])
)
else:
raise ValueError("Unknown value for argument 'type' :", type)
self.weights_ = list_count / np.sum(list_count)
# -----------------------
# A little bit of reading
# -----------------------
def _read_data(path, train_or_test="train", save=True):
"""Return the features dataset X and the labels dataset y for either the train or the test"""
directory = join(path, DATA_HOME, train_or_test)
votes_names = os.listdir(directory)
votes_names = [
splitext(vote)[0] for vote in votes_names if vote.endswith(".json")
]
votes_names.sort(key=lambda name: int(splitext(name)[0][10:]))
for i, f_name in enumerate(votes_names):
vote = Vote.load_from_files(f_name, train_or_test=train_or_test)
features, label = vote.to_X_y()
if i == 0:
X = pd.DataFrame(columns=[key for key in features.keys()])
y = pd.DataFrame(columns=[key for key in label.keys()])
X.loc[f_name] = features
y.loc[f_name] = label
# Add a column equal to the index
X["vote_uid"] = X.index
y = y.to_numpy()
if save:
file_name = join(
path, DATA_HOME, train_or_test, train_or_test + "_data.pkl"
)
with open(file_name, "wb") as f:
pkl.dump((X, y), f)
return X, y
def _read_info_actors():
filename = "data/dpt_data/nosdeputes.fr_synthese_2020-11-21.csv"
df = pd.read_csv(filename, sep=";")
old_cols = [
"id",
"nom",
"prenom",
"nom_de_famille",
"date_naissance",
"sexe",
"parti_ratt_financier",
]
new_cols = [
"custom_id",
"membre_fullname",
"membre_prenom",
"membre_nom",
"membre_birthDate",
"membre_sex",
"membre_parti",
]
df.rename(
dict(zip(old_cols, new_cols)),
axis=1,
inplace=True,
)
df = df[new_cols]
return df
def _read_actor(filename):
acteur = pd.read_csv(filename, sep=";")
id = acteur["uid[1]"]
civ = acteur["etatCivil[1]/ident[1]/civ[1]"]
prenom = acteur["etatCivil[1]/ident[1]/prenom[1]"]
nom = acteur["etatCivil[1]/ident[1]/nom[1]"]
output = pd.DataFrame(
{
"membre_acteurRef": id,
"membre_civ": civ,
"membre_prenom": prenom,
"membre_nom": nom,
}
)
return output
def _read_all_actors():
all_acteur_filenames = os.listdir("data/acteur")
output = pd.DataFrame()
for filename in all_acteur_filenames:
acteur = _read_actor("data/acteur/" + filename)
# Update
if not output.empty:
output = output.append(acteur)
else:
output = acteur
return output
def get_actor_party_data():
"""
Returns general information about deputies and parties.
To be used for creating features.
Returns:
actors: pd.DataFrame with info about actors.
"""
try:
actors = pd.read_csv("data/acteurs.csv")
except:
actors = _read_all_actors()
actors.to_csv("data/acteurs.csv")
actors_info = _read_info_actors()
actors["membre_fullname"] = actors.apply(
lambda x: x["membre_prenom"] + " " + x["membre_nom"], axis=1
)
actors["slug"] = actors["membre_fullname"].apply(_normalize_txt)
actors.drop(["membre_fullname"], axis=1, inplace=True)
actors_info.drop(["membre_prenom", "membre_nom"], axis=1, inplace=True)
actors_info["slug"] = actors_info["membre_fullname"].apply(_normalize_txt)
actors_merge = pd.merge(actors, actors_info, on="slug")
return actors_merge
def _normalize_txt(txt: str) -> str:
"""Remove accents and lowercase text."""
if type(txt) == str:
return unidecode.unidecode(txt).lower()
else:
return txt
# -----------------------
# Ramp problem definition
# -----------------------
class _MultiOutputClassification(BasePrediction):
def __init__(self, n_columns, y_pred=None, y_true=None, n_samples=None):
self.n_columns = n_columns
if y_pred is not None:
self.y_pred = np.array(y_pred)
elif y_true is not None:
self.y_pred = np.array(y_true)
elif n_samples is not None:
if self.n_columns == 0:
shape = n_samples
else:
shape = (n_samples, self.n_columns)
self.y_pred = np.empty(shape, dtype=float)
self.y_pred.fill(np.nan)
else:
raise ValueError(
"Missing init argument: y_pred, y_true, or n_samples"
)
self.check_y_pred_dimensions()
@classmethod
def combine(cls, predictions_list, index_list=None):
"""Inherits from the base class where the scores are averaged.
Here, averaged predictions < 0.5 will be set to 0.0 and averaged
predictions >= 0.5 will be set to 1.0 so that `y_pred` will consist
only of 0.0s and 1.0s.
"""
# call the combine from the BasePrediction
combined_predictions = super(_MultiOutputClassification, cls).combine(
predictions_list=predictions_list, index_list=index_list
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
combined_predictions.y_pred[
combined_predictions.y_pred < 0.5
] = 0.0
combined_predictions.y_pred[
combined_predictions.y_pred >= 0.5
] = 1.0
return combined_predictions
# Workflow for the classification problem which uses predict instead of
# predict_proba
class EstimatorVotes(SKLearnPipeline):
"""Choose predict method.
Parameters
----------
predict_method : {'auto', 'predict', 'predict_proba',
'decision_function'}, default='auto'
Prediction method to use. If 'auto', uses 'predict_proba' when
estimator is a classifier and 'predict' otherwise.
"""
def __init__(self, predict_method="auto"):
super().__init__()
self.predict_method = predict_method
def test_submission(self, estimator_fitted, X):
"""Predict using a fitted estimator.
Parameters
----------
estimator_fitted : Estimator object
A fitted scikit-learn estimator.
X : {array-like, sparse matrix, dataframe}
The test data set.
Returns
-------
pred
"""
methods = ("auto", "predict", "predict_proba", "decision_function")
X = X.reset_index(drop=True) # make sure the indices are ordered
if self.predict_method not in methods:
raise NotImplementedError(
f"'method' should be one of: {methods} "
f"Got: {self.predict_method}"
)
if self.predict_method == "auto":
y_pred = estimator_fitted.predict_proba(X)
y_pred = y_pred >= 0.5
elif hasattr(estimator_fitted, self.predict_method):
# call estimator with the `predict_method`
est_predict = getattr(estimator_fitted, self.predict_method)
y_pred = est_predict(X)
else:
raise NotImplementedError(
"Estimator does not support method: " f"{self.predict_method}."
)
if np.any(np.isnan(y_pred)):
raise ValueError("NaNs found in the predictions.")
return y_pred
def make_workflow():
# defines new workflow, where predict instead of predict_proba is called
return EstimatorVotes(predict_method="auto")
def partial_multioutput(cls=_MultiOutputClassification, **kwds):
# this class partially inititates _MultiOutputClassification with given
# keywords
class _PartialMultiOutputClassification(_MultiOutputClassification):
__init__ = functools.partialmethod(cls.__init__, **kwds)
return _PartialMultiOutputClassification
def make_multioutput(n_columns):
return partial_multioutput(n_columns=n_columns)
problem_title = "Deputy Watchers"
Predictions = make_multioutput(n_columns=len(PARTIES_SIGLES))
workflow = make_workflow()
score_types = [CustomF1Score()]
def get_cv(X, y):
cv = KFold(n_splits=5)
return cv.split(X, y)
def get_train_data(path="."):
file_name = join(path, DATA_HOME, "train", "train_data.pkl")
if os.path.isfile(file_name):
with open(file_name, "rb") as f:
X, y = pkl.load(f)
return X, y
try:
X, y = _read_data(path=path, train_or_test="train", save=True)
except FileNotFoundError:
print("Data files not created yet. Run 'create_files.py' first.")
sys.exit(0)
return X, y
def get_test_data(path="."):
file_name = join(path, DATA_HOME, "test", "test_data.pkl")
if os.path.isfile(file_name):
with open(file_name, "rb") as f:
X, y = pkl.load(f)
return X, y
try:
X, y = _read_data(path=path, train_or_test="test", save=True)
except FileNotFoundError:
print("Data files not created yet. Run 'create_files.py' first.")
sys.exit(0)
return X, y | import pandas as pd
import json
import numpy as np
from dataclasses import dataclass
import os
from os.path import join, splitext
import unidecode
import pickle as pkl
import sys
from sklearn.model_selection import KFold
import functools
import rampwf
from sklearn.base import is_classifier
from sklearn.metrics import f1_score
from rampwf.prediction_types.base import BasePrediction
from rampwf.score_types import BaseScoreType
from rampwf.workflows import SKLearnPipeline
import warnings
PARTIES_SIGLES = [
"SOC",
"FI",
"Dem",
"LT",
"GDR",
"LaREM",
"Agir ens",
"UDI-I",
"LR",
"NI",
]
RANDOM_STATE = 777
DATA_HOME = "data"
if not sys.warnoptions:
warnings.simplefilter("ignore")
@dataclass
class Vote:
"""Base class containing all relevant basis information of the dataset"""
id: str
code_type_vote: str
libelle_type_vote: str
demandeur: str
libelle: str
nb_votants: int
date: str # en faire un datetime ce serait bien ; à regarder
vote_counts: pd.DataFrame
@classmethod
def load_from_files(cls, id, data_home=DATA_HOME, train_or_test="train"):
f_name = join(data_home, train_or_test, id)
with open(f_name + ".json", "r") as f:
vote_metadata = json.load(f)
vote_counts = (
pd.read_csv(f_name + ".csv", sep=",")
.rename(columns={"Unnamed: 0": "party"})
.
# renommer la première colonne (partis)
set_index("party")
)
vote = cls(
id=id,
code_type_vote=vote_metadata["code_type_vote"],
libelle_type_vote=vote_metadata["libelle_type_vote"],
demandeur=vote_metadata["demandeur"],
libelle=vote_metadata["libelle"],
nb_votants=vote_metadata["nb_votants"],
date=vote_metadata["date"],
vote_counts=vote_counts,
)
return vote
def to_X_y(self):
"""Transform a Vote object into an observation X of features (dictionnary)
and a label y
"""
number_of_dpt_per_party = {
party: sum(self.vote_counts.loc[party])
for party in self.vote_counts.index
}
X = {
"code_type_vote": self.code_type_vote,
"libelle_type_vote": self.libelle_type_vote,
"demandeur": self.demandeur,
"libelle": self.libelle,
"nb_votants": self.nb_votants,
"date": self.date,
"presence_per_party": number_of_dpt_per_party,
}
vote_columns = self.vote_counts.columns
y = {}
for party in self.vote_counts.index:
major_position = vote_columns[
np.argmax(self.vote_counts.loc[party])
]
y[party] = 1.0 * (major_position == "pours")
return X, y
# ----------
# score type
# ----------
class CustomF1Score(BaseScoreType):
def __init__(
self,
weights_type="log",
precision=3,
):
"""Custom weighted F1 score. Weights depends on group's amount of deputies.
Args:
weights_type (str, optional): 'log' or 'linear'. Defaults to 'log'.
precision (int, optional): decimals considered. Defaults to 3.
"""
self.name = f"Weighted F1-score ({weights_type})"
self.set_weights(path=".", type=weights_type)
self.precision = precision
def __call__(self, y_true, y_pred) -> float:
score_list = []
for i, w in enumerate(self.weights_):
score_list.append(f1_score(y_true[:, i], y_pred[:, i]))
weighted_score = np.array(score_list) @ self.weights_
return weighted_score
def set_weights(self, path, type="linear"):
"""Return the weights associated to each party. The default weight for a party
(type='linear') is the mere proportion of deputies in the party among all the
deputies. if type='log', the weight is passed through natural logartihm.
"""
file_name = join(path, "data/dpt_data", "liste_deputes_excel.csv")
dpt_data = pd.read_csv(file_name, sep=";")
groups_column_name = dpt_data.columns[-1]
counts = (
dpt_data.groupby(groups_column_name)
.nunique()["identifiant"]
.to_dict()
)
if type == "linear":
list_count = np.array([counts[key] for key in PARTIES_SIGLES])
elif type == "log":
list_count = np.log(
np.array([counts[key] for key in PARTIES_SIGLES])
)
else:
raise ValueError("Unknown value for argument 'type' :", type)
self.weights_ = list_count / np.sum(list_count)
# -----------------------
# A little bit of reading
# -----------------------
def _read_data(path, train_or_test="train", save=True):
"""Return the features dataset X and the labels dataset y for either the train or the test"""
directory = join(path, DATA_HOME, train_or_test)
votes_names = os.listdir(directory)
votes_names = [
splitext(vote)[0] for vote in votes_names if vote.endswith(".json")
]
votes_names.sort(key=lambda name: int(splitext(name)[0][10:]))
for i, f_name in enumerate(votes_names):
vote = Vote.load_from_files(f_name, train_or_test=train_or_test)
features, label = vote.to_X_y()
if i == 0:
X = pd.DataFrame(columns=[key for key in features.keys()])
y = pd.DataFrame(columns=[key for key in label.keys()])
X.loc[f_name] = features
y.loc[f_name] = label
# Add a column equal to the index
X["vote_uid"] = X.index
y = y.to_numpy()
if save:
file_name = join(
path, DATA_HOME, train_or_test, train_or_test + "_data.pkl"
)
with open(file_name, "wb") as f:
pkl.dump((X, y), f)
return X, y
def _read_info_actors():
filename = "data/dpt_data/nosdeputes.fr_synthese_2020-11-21.csv"
df = pd.read_csv(filename, sep=";")
old_cols = [
"id",
"nom",
"prenom",
"nom_de_famille",
"date_naissance",
"sexe",
"parti_ratt_financier",
]
new_cols = [
"custom_id",
"membre_fullname",
"membre_prenom",
"membre_nom",
"membre_birthDate",
"membre_sex",
"membre_parti",
]
df.rename(
dict(zip(old_cols, new_cols)),
axis=1,
inplace=True,
)
df = df[new_cols]
return df
def _read_actor(filename):
acteur = pd.read_csv(filename, sep=";")
id = acteur["uid[1]"]
civ = acteur["etatCivil[1]/ident[1]/civ[1]"]
prenom = acteur["etatCivil[1]/ident[1]/prenom[1]"]
nom = acteur["etatCivil[1]/ident[1]/nom[1]"]
output = pd.DataFrame(
{
"membre_acteurRef": id,
"membre_civ": civ,
"membre_prenom": prenom,
"membre_nom": nom,
}
)
return output
def _read_all_actors():
all_acteur_filenames = os.listdir("data/acteur")
output = pd.DataFrame()
for filename in all_acteur_filenames:
acteur = _read_actor("data/acteur/" + filename)
# Update
if not output.empty:
output = output.append(acteur)
else:
output = acteur
return output
def get_actor_party_data():
"""
Returns general information about deputies and parties.
To be used for creating features.
Returns:
actors: pd.DataFrame with info about actors.
"""
try:
actors = pd.read_csv("data/acteurs.csv")
except:
actors = _read_all_actors()
actors.to_csv("data/acteurs.csv")
actors_info = _read_info_actors()
actors["membre_fullname"] = actors.apply(
lambda x: x["membre_prenom"] + " " + x["membre_nom"], axis=1
)
actors["slug"] = actors["membre_fullname"].apply(_normalize_txt)
actors.drop(["membre_fullname"], axis=1, inplace=True)
actors_info.drop(["membre_prenom", "membre_nom"], axis=1, inplace=True)
actors_info["slug"] = actors_info["membre_fullname"].apply(_normalize_txt)
actors_merge = pd.merge(actors, actors_info, on="slug")
return actors_merge
def _normalize_txt(txt: str) -> str:
"""Remove accents and lowercase text."""
if type(txt) == str:
return unidecode.unidecode(txt).lower()
else:
return txt
# -----------------------
# Ramp problem definition
# -----------------------
class _MultiOutputClassification(BasePrediction):
def __init__(self, n_columns, y_pred=None, y_true=None, n_samples=None):
self.n_columns = n_columns
if y_pred is not None:
self.y_pred = np.array(y_pred)
elif y_true is not None:
self.y_pred = np.array(y_true)
elif n_samples is not None:
if self.n_columns == 0:
shape = n_samples
else:
shape = (n_samples, self.n_columns)
self.y_pred = np.empty(shape, dtype=float)
self.y_pred.fill(np.nan)
else:
raise ValueError(
"Missing init argument: y_pred, y_true, or n_samples"
)
self.check_y_pred_dimensions()
@classmethod
def combine(cls, predictions_list, index_list=None):
"""Inherits from the base class where the scores are averaged.
Here, averaged predictions < 0.5 will be set to 0.0 and averaged
predictions >= 0.5 will be set to 1.0 so that `y_pred` will consist
only of 0.0s and 1.0s.
"""
# call the combine from the BasePrediction
combined_predictions = super(_MultiOutputClassification, cls).combine(
predictions_list=predictions_list, index_list=index_list
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
combined_predictions.y_pred[
combined_predictions.y_pred < 0.5
] = 0.0
combined_predictions.y_pred[
combined_predictions.y_pred >= 0.5
] = 1.0
return combined_predictions
# Workflow for the classification problem which uses predict instead of
# predict_proba
class EstimatorVotes(SKLearnPipeline):
"""Choose predict method.
Parameters
----------
predict_method : {'auto', 'predict', 'predict_proba',
'decision_function'}, default='auto'
Prediction method to use. If 'auto', uses 'predict_proba' when
estimator is a classifier and 'predict' otherwise.
"""
def __init__(self, predict_method="auto"):
super().__init__()
self.predict_method = predict_method
def test_submission(self, estimator_fitted, X):
"""Predict using a fitted estimator.
Parameters
----------
estimator_fitted : Estimator object
A fitted scikit-learn estimator.
X : {array-like, sparse matrix, dataframe}
The test data set.
Returns
-------
pred
"""
methods = ("auto", "predict", "predict_proba", "decision_function")
X = X.reset_index(drop=True) # make sure the indices are ordered
if self.predict_method not in methods:
raise NotImplementedError(
f"'method' should be one of: {methods} "
f"Got: {self.predict_method}"
)
if self.predict_method == "auto":
y_pred = estimator_fitted.predict_proba(X)
y_pred = y_pred >= 0.5
elif hasattr(estimator_fitted, self.predict_method):
# call estimator with the `predict_method`
est_predict = getattr(estimator_fitted, self.predict_method)
y_pred = est_predict(X)
else:
raise NotImplementedError(
"Estimator does not support method: " f"{self.predict_method}."
)
if np.any(np.isnan(y_pred)):
raise ValueError("NaNs found in the predictions.")
return y_pred
def make_workflow():
# defines new workflow, where predict instead of predict_proba is called
return EstimatorVotes(predict_method="auto")
def partial_multioutput(cls=_MultiOutputClassification, **kwds):
# this class partially inititates _MultiOutputClassification with given
# keywords
class _PartialMultiOutputClassification(_MultiOutputClassification):
__init__ = functools.partialmethod(cls.__init__, **kwds)
return _PartialMultiOutputClassification
def make_multioutput(n_columns):
return partial_multioutput(n_columns=n_columns)
problem_title = "Deputy Watchers"
Predictions = make_multioutput(n_columns=len(PARTIES_SIGLES))
workflow = make_workflow()
score_types = [CustomF1Score()]
def get_cv(X, y):
cv = KFold(n_splits=5)
return cv.split(X, y)
def get_train_data(path="."):
file_name = join(path, DATA_HOME, "train", "train_data.pkl")
if os.path.isfile(file_name):
with open(file_name, "rb") as f:
X, y = pkl.load(f)
return X, y
try:
X, y = _read_data(path=path, train_or_test="train", save=True)
except FileNotFoundError:
print("Data files not created yet. Run 'create_files.py' first.")
sys.exit(0)
return X, y
def get_test_data(path="."):
file_name = join(path, DATA_HOME, "test", "test_data.pkl")
if os.path.isfile(file_name):
with open(file_name, "rb") as f:
X, y = pkl.load(f)
return X, y
try:
X, y = _read_data(path=path, train_or_test="test", save=True)
except FileNotFoundError:
print("Data files not created yet. Run 'create_files.py' first.")
sys.exit(0)
return X, y | en | 0.624958 | Base class containing all relevant basis information of the dataset # en faire un datetime ce serait bien ; à regarder # renommer la première colonne (partis) Transform a Vote object into an observation X of features (dictionnary) and a label y # ---------- # score type # ---------- Custom weighted F1 score. Weights depends on group's amount of deputies. Args: weights_type (str, optional): 'log' or 'linear'. Defaults to 'log'. precision (int, optional): decimals considered. Defaults to 3. Return the weights associated to each party. The default weight for a party (type='linear') is the mere proportion of deputies in the party among all the deputies. if type='log', the weight is passed through natural logartihm. # ----------------------- # A little bit of reading # ----------------------- Return the features dataset X and the labels dataset y for either the train or the test # Add a column equal to the index # Update Returns general information about deputies and parties. To be used for creating features. Returns: actors: pd.DataFrame with info about actors. Remove accents and lowercase text. # ----------------------- # Ramp problem definition # ----------------------- Inherits from the base class where the scores are averaged. Here, averaged predictions < 0.5 will be set to 0.0 and averaged predictions >= 0.5 will be set to 1.0 so that `y_pred` will consist only of 0.0s and 1.0s. # call the combine from the BasePrediction # Workflow for the classification problem which uses predict instead of # predict_proba Choose predict method. Parameters ---------- predict_method : {'auto', 'predict', 'predict_proba', 'decision_function'}, default='auto' Prediction method to use. If 'auto', uses 'predict_proba' when estimator is a classifier and 'predict' otherwise. Predict using a fitted estimator. Parameters ---------- estimator_fitted : Estimator object A fitted scikit-learn estimator. X : {array-like, sparse matrix, dataframe} The test data set. Returns ------- pred # make sure the indices are ordered # call estimator with the `predict_method` # defines new workflow, where predict instead of predict_proba is called # this class partially inititates _MultiOutputClassification with given # keywords | 2.209157 | 2 |
scraper/storage_spiders/techlandcomvn.py | chongiadung/choinho | 0 | 6625504 | <gh_stars>0
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@id='pro_name_head']/h1",
'price' : "//div[@class='pro_detail_price']/b",
'category' : "//div[@class='categoryPath']/div/a",
'description' : "//div[@class='pro_detail_sum']",
'images' : "//table//tr/td[@id='productImageBox']/a/img/@src | //div[@id='proImageThum']/ul/li/a[@class='lightbox']/img/@src",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'techland.com.vn'
allowed_domains = ['techland.com.vn']
start_urls = ['http://www.techland.com.vn/?from=welcome']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/sp+\d+/']), 'parse_item'),
Rule(LinkExtractor(allow=['/c+\d+/']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@id='pro_name_head']/h1",
'price' : "//div[@class='pro_detail_price']/b",
'category' : "//div[@class='categoryPath']/div/a",
'description' : "//div[@class='pro_detail_sum']",
'images' : "//table//tr/td[@id='productImageBox']/a/img/@src | //div[@id='proImageThum']/ul/li/a[@class='lightbox']/img/@src",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'techland.com.vn'
allowed_domains = ['techland.com.vn']
start_urls = ['http://www.techland.com.vn/?from=welcome']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/sp+\d+/']), 'parse_item'),
Rule(LinkExtractor(allow=['/c+\d+/']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
] | en | 0.729121 | # Auto generated by generator.py. Delete this line if you make modification. #Rule(LinkExtractor(), 'parse_item_and_links'), | 2.076571 | 2 |
hykufe-client/main.py | sortteam/HyKuFe | 3 | 6625505 | <filename>hykufe-client/main.py
import hykufe
# hykufe.HyKuFeBuilder()\
# .setName("test1").setImage("test2")\
# .setCPU("test3").setMemory("test4")\
# .setGPU("test5").setReplica("test6")\
# .build('access_key', 'secret_key').writeYamlFile("test.yaml")
hykufe.HyKuFeBuilder().build('access_key', 'secret_key').createJOB()
| <filename>hykufe-client/main.py
import hykufe
# hykufe.HyKuFeBuilder()\
# .setName("test1").setImage("test2")\
# .setCPU("test3").setMemory("test4")\
# .setGPU("test5").setReplica("test6")\
# .build('access_key', 'secret_key').writeYamlFile("test.yaml")
hykufe.HyKuFeBuilder().build('access_key', 'secret_key').createJOB()
| en | 0.103137 | # hykufe.HyKuFeBuilder()\ # .setName("test1").setImage("test2")\ # .setCPU("test3").setMemory("test4")\ # .setGPU("test5").setReplica("test6")\ # .build('access_key', 'secret_key').writeYamlFile("test.yaml") | 1.444063 | 1 |
Coronavirus Statictics India/graph/urls.py | ShrayankM/Covid-19-India-Analysis | 1 | 6625506 | from django.contrib import admin
from django.urls import path, include
from graph import views
app_name = 'graph'
urlpatterns = [
path('pie/', views.pie, name = 'pie'),
path('area/', views.area, name = 'area'),
]
| from django.contrib import admin
from django.urls import path, include
from graph import views
app_name = 'graph'
urlpatterns = [
path('pie/', views.pie, name = 'pie'),
path('area/', views.area, name = 'area'),
]
| none | 1 | 1.590852 | 2 | |
scoring/dictionary/YSQ93.py | majazeh/risloo-samples | 0 | 6625507 | <filename>scoring/dictionary/YSQ93.py
f1= 'ed'
f2 = 'ab'
f3 = 'ma'
f4 = 'si'
f5 = 'ds'
f6 = 'fa'
f7 = 'ai'
f8 = 'vu'
f9 = 'eu'
f10 = 'sb'
f11 = 'ss'
f12 = 'ei'
f13 = 'us'
f14 = 'et'
f15 = 'is'
f16 = 'as'
f17 = 'np'
f18 = 'pu'
factors_names = (f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18)
factors = {
1: f1, 2: f2, 3: f3, 4: f4, 5: f5, 6: f6, 7: f7, 8: f8, 9: f9, 10: f10, 11: f11, 12: f12, 13: f13, 14: f14, 15: f15, 16: f16, 17: f17, 18: f18,
19: f1, 20: f2, 21: f3, 22: f4, 23: f5, 24: f6, 25: f7, 26: f8, 27: f9, 28: f10, 29: f11, 30: f12, 31: f13, 32: f14, 33: f15, 34: f16, 35: f17, 36: f18,
37: f1, 38: f2, 39: f3, 40: f4, 41: f5, 42: f6, 43: f7, 44: f8, 45: f9, 46: f10, 47: f11, 48: f12, 49: f13, 50: f14, 51: f15, 52: f16, 53: f17, 54: f18,
55: f1, 56: f2, 57: f3, 58: f4, 59: f5, 60: f6, 61: f7, 62: f8, 63: f9, 64: f10, 65: f11, 66: f12, 67: f13, 68: f14, 69: f15, 70: f16, 71: f17, 72: f18,
73: f1, 74: f2, 75: f3, 76: f4, 77: f5, 78: f6, 79: f7, 80: f8, 81: f9, 82: f10, 83: f11, 84: f12, 85: f13, 86: f14, 87: f15, 88: f16, 89: f17, 90: f18
} | <filename>scoring/dictionary/YSQ93.py
f1= 'ed'
f2 = 'ab'
f3 = 'ma'
f4 = 'si'
f5 = 'ds'
f6 = 'fa'
f7 = 'ai'
f8 = 'vu'
f9 = 'eu'
f10 = 'sb'
f11 = 'ss'
f12 = 'ei'
f13 = 'us'
f14 = 'et'
f15 = 'is'
f16 = 'as'
f17 = 'np'
f18 = 'pu'
factors_names = (f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18)
factors = {
1: f1, 2: f2, 3: f3, 4: f4, 5: f5, 6: f6, 7: f7, 8: f8, 9: f9, 10: f10, 11: f11, 12: f12, 13: f13, 14: f14, 15: f15, 16: f16, 17: f17, 18: f18,
19: f1, 20: f2, 21: f3, 22: f4, 23: f5, 24: f6, 25: f7, 26: f8, 27: f9, 28: f10, 29: f11, 30: f12, 31: f13, 32: f14, 33: f15, 34: f16, 35: f17, 36: f18,
37: f1, 38: f2, 39: f3, 40: f4, 41: f5, 42: f6, 43: f7, 44: f8, 45: f9, 46: f10, 47: f11, 48: f12, 49: f13, 50: f14, 51: f15, 52: f16, 53: f17, 54: f18,
55: f1, 56: f2, 57: f3, 58: f4, 59: f5, 60: f6, 61: f7, 62: f8, 63: f9, 64: f10, 65: f11, 66: f12, 67: f13, 68: f14, 69: f15, 70: f16, 71: f17, 72: f18,
73: f1, 74: f2, 75: f3, 76: f4, 77: f5, 78: f6, 79: f7, 80: f8, 81: f9, 82: f10, 83: f11, 84: f12, 85: f13, 86: f14, 87: f15, 88: f16, 89: f17, 90: f18
} | none | 1 | 1.998469 | 2 | |
ex074.py | felipesch92/PythonExercicios | 0 | 6625508 | # Crie um programa que vai gerar cinco números aleatórios e
# colocar em uma tupla. Depois disso, mostre a listagem de números
# gerados e também indique o menor e o maior valor que estão na tupla.
from random import randint
numeros = (randint(1, 10), randint(1, 10), randint(1, 10),
randint(1, 10), randint(1, 10))
print(f'Os valores sorteados foram: ', end='')
maior = menor = numeros[0]
for n in numeros:
print(f'{n }', end=' ')
if n > maior:
maior = n
if n < menor:
menor = n
print(f'\nO maior número da tupla é: {maior} {max(numeros)}')
print(f'O menor número da tupla é: {menor} {min(numeros)}')
| # Crie um programa que vai gerar cinco números aleatórios e
# colocar em uma tupla. Depois disso, mostre a listagem de números
# gerados e também indique o menor e o maior valor que estão na tupla.
from random import randint
numeros = (randint(1, 10), randint(1, 10), randint(1, 10),
randint(1, 10), randint(1, 10))
print(f'Os valores sorteados foram: ', end='')
maior = menor = numeros[0]
for n in numeros:
print(f'{n }', end=' ')
if n > maior:
maior = n
if n < menor:
menor = n
print(f'\nO maior número da tupla é: {maior} {max(numeros)}')
print(f'O menor número da tupla é: {menor} {min(numeros)}')
| pt | 0.997626 | # Crie um programa que vai gerar cinco números aleatórios e # colocar em uma tupla. Depois disso, mostre a listagem de números # gerados e também indique o menor e o maior valor que estão na tupla. | 4.197444 | 4 |
dephell/converters/pip.py | eli-schwartz/dephell | 0 | 6625509 | <filename>dephell/converters/pip.py<gh_stars>0
# built-in
from pathlib import Path
from types import SimpleNamespace
from typing import Optional
from urllib.parse import urlparse
# external
from dephell_links import DirLink
from pip._internal.download import PipSession
from pip._internal.index import PackageFinder
from pip._internal.req import parse_requirements
# app
from ..config import config
from ..controllers import DependencyMaker
from ..models import RootDependency
from ..repositories import WareHouseRepo
from .base import BaseConverter
class PIPConverter(BaseConverter):
sep = ' \\\n '
def can_parse(self, path: Path, content: Optional[str] = None) -> bool:
if isinstance(path, str):
path = Path(path)
if path.name == 'requirements.txt':
if path.with_name('requirements.in').exists():
return (self.lock is True)
if path.with_name('requirements.lock').exists():
return (self.lock is False)
return True
if self.lock:
return (path.name == 'requirements.lock')
else:
return (path.name == 'requirements.in')
def __init__(self, lock):
self.lock = lock
def load(self, path) -> RootDependency:
deps = []
root = RootDependency()
warehouse_url = urlparse(config['warehouse']).hostname
if warehouse_url in ('pypi.org', 'pypi.python.org'):
warehouse_url += '/simple'
finder = PackageFinder(
find_links=[],
index_urls=[warehouse_url],
session=PipSession(),
)
# https://github.com/pypa/pip/blob/master/src/pip/_internal/req/constructors.py
reqs = parse_requirements(
filename=str(path),
session=PipSession(),
finder=finder,
)
for req in reqs:
# https://github.com/pypa/pip/blob/master/src/pip/_internal/req/req_install.py
if req.req is None:
req.req = SimpleNamespace(
name=req.link.url.split('/')[-1],
specifier='*',
marker=None,
extras=None,
)
deps.extend(DependencyMaker.from_requirement(
source=root,
req=req.req,
url=req.link and req.link.url,
editable=req.editable,
))
# update repository
if finder.index_urls:
finded_host = urlparse(finder.index_urls[0]).hostname
if finded_host != urlparse(warehouse_url).hostname:
repo = WareHouseRepo(url=finder.index_urls[0])
for dep in deps:
if isinstance(dep.repo, WareHouseRepo):
dep.repo = repo
root.attach_dependencies(deps)
return root
def dumps(self, reqs, project: Optional[RootDependency] = None,
content: Optional[str] = None) -> str:
lines = []
# get repos urls
urls = dict()
for req in reqs:
if isinstance(req.dep.repo, WareHouseRepo):
urls[req.dep.repo.name] = req.dep.repo.pretty_url
# dump repos urls
# pip._internal.build_env
if len(urls) == 1:
_name, url = urls.popitem()
elif 'pypi' in urls:
url = urls.pop('pypi')
else:
url = None
if url:
lines.append('-i ' + url)
for url in urls.values():
lines.append('--extra-index-url ' + url)
# disable hashes when dir-based deps are presented
# https://github.com/dephell/dephell/issues/41
with_hashes = not any(isinstance(req.dep.link, DirLink) for req in reqs)
for req in reqs:
lines.append(self._format_req(req=req, with_hashes=with_hashes))
return '\n'.join(lines) + '\n'
# https://github.com/pypa/packaging/blob/master/packaging/requirements.py
# https://github.com/jazzband/pip-tools/blob/master/piptools/utils.py
def _format_req(self, req, *, with_hashes: bool = True) -> str:
line = ''
if req.editable:
line += '-e '
if req.link is not None:
req.link.name = req.name # patch `#=egg` by right name
line += req.link.long
else:
line += req.raw_name
if req.extras:
line += '[{extras}]'.format(extras=','.join(req.extras))
if req.version:
line += req.version
if req.markers:
line += '; ' + req.markers
if with_hashes and req.hashes:
for digest in req.hashes:
# https://github.com/jazzband/pip-tools/blob/master/piptools/writer.py
line += '{sep}--hash {hash}'.format(
sep=self.sep,
hash=digest,
)
if self.lock and req.sources:
line += '{sep}# ^ from {sources}'.format(
sep=self.sep,
sources=', '.join(req.sources),
)
return line
| <filename>dephell/converters/pip.py<gh_stars>0
# built-in
from pathlib import Path
from types import SimpleNamespace
from typing import Optional
from urllib.parse import urlparse
# external
from dephell_links import DirLink
from pip._internal.download import PipSession
from pip._internal.index import PackageFinder
from pip._internal.req import parse_requirements
# app
from ..config import config
from ..controllers import DependencyMaker
from ..models import RootDependency
from ..repositories import WareHouseRepo
from .base import BaseConverter
class PIPConverter(BaseConverter):
sep = ' \\\n '
def can_parse(self, path: Path, content: Optional[str] = None) -> bool:
if isinstance(path, str):
path = Path(path)
if path.name == 'requirements.txt':
if path.with_name('requirements.in').exists():
return (self.lock is True)
if path.with_name('requirements.lock').exists():
return (self.lock is False)
return True
if self.lock:
return (path.name == 'requirements.lock')
else:
return (path.name == 'requirements.in')
def __init__(self, lock):
self.lock = lock
def load(self, path) -> RootDependency:
deps = []
root = RootDependency()
warehouse_url = urlparse(config['warehouse']).hostname
if warehouse_url in ('pypi.org', 'pypi.python.org'):
warehouse_url += '/simple'
finder = PackageFinder(
find_links=[],
index_urls=[warehouse_url],
session=PipSession(),
)
# https://github.com/pypa/pip/blob/master/src/pip/_internal/req/constructors.py
reqs = parse_requirements(
filename=str(path),
session=PipSession(),
finder=finder,
)
for req in reqs:
# https://github.com/pypa/pip/blob/master/src/pip/_internal/req/req_install.py
if req.req is None:
req.req = SimpleNamespace(
name=req.link.url.split('/')[-1],
specifier='*',
marker=None,
extras=None,
)
deps.extend(DependencyMaker.from_requirement(
source=root,
req=req.req,
url=req.link and req.link.url,
editable=req.editable,
))
# update repository
if finder.index_urls:
finded_host = urlparse(finder.index_urls[0]).hostname
if finded_host != urlparse(warehouse_url).hostname:
repo = WareHouseRepo(url=finder.index_urls[0])
for dep in deps:
if isinstance(dep.repo, WareHouseRepo):
dep.repo = repo
root.attach_dependencies(deps)
return root
def dumps(self, reqs, project: Optional[RootDependency] = None,
content: Optional[str] = None) -> str:
lines = []
# get repos urls
urls = dict()
for req in reqs:
if isinstance(req.dep.repo, WareHouseRepo):
urls[req.dep.repo.name] = req.dep.repo.pretty_url
# dump repos urls
# pip._internal.build_env
if len(urls) == 1:
_name, url = urls.popitem()
elif 'pypi' in urls:
url = urls.pop('pypi')
else:
url = None
if url:
lines.append('-i ' + url)
for url in urls.values():
lines.append('--extra-index-url ' + url)
# disable hashes when dir-based deps are presented
# https://github.com/dephell/dephell/issues/41
with_hashes = not any(isinstance(req.dep.link, DirLink) for req in reqs)
for req in reqs:
lines.append(self._format_req(req=req, with_hashes=with_hashes))
return '\n'.join(lines) + '\n'
# https://github.com/pypa/packaging/blob/master/packaging/requirements.py
# https://github.com/jazzband/pip-tools/blob/master/piptools/utils.py
def _format_req(self, req, *, with_hashes: bool = True) -> str:
line = ''
if req.editable:
line += '-e '
if req.link is not None:
req.link.name = req.name # patch `#=egg` by right name
line += req.link.long
else:
line += req.raw_name
if req.extras:
line += '[{extras}]'.format(extras=','.join(req.extras))
if req.version:
line += req.version
if req.markers:
line += '; ' + req.markers
if with_hashes and req.hashes:
for digest in req.hashes:
# https://github.com/jazzband/pip-tools/blob/master/piptools/writer.py
line += '{sep}--hash {hash}'.format(
sep=self.sep,
hash=digest,
)
if self.lock and req.sources:
line += '{sep}# ^ from {sources}'.format(
sep=self.sep,
sources=', '.join(req.sources),
)
return line
| en | 0.593944 | # built-in # external # app # https://github.com/pypa/pip/blob/master/src/pip/_internal/req/constructors.py # https://github.com/pypa/pip/blob/master/src/pip/_internal/req/req_install.py # update repository # get repos urls # dump repos urls # pip._internal.build_env # disable hashes when dir-based deps are presented # https://github.com/dephell/dephell/issues/41 # https://github.com/pypa/packaging/blob/master/packaging/requirements.py # https://github.com/jazzband/pip-tools/blob/master/piptools/utils.py # patch `#=egg` by right name # https://github.com/jazzband/pip-tools/blob/master/piptools/writer.py # ^ from {sources}'.format( | 2.154964 | 2 |
user/views.py | Carlosmax1/user-current-track | 0 | 6625510 | from django.shortcuts import render
from django.http import HttpResponse, response
from . import ctrack
def user(request):
rp = ctrack.Track('carloosxdd','<KEY> <KEY>', 'a', 'a')
user = rp.user()
return render(request, 'user.html', user)
| from django.shortcuts import render
from django.http import HttpResponse, response
from . import ctrack
def user(request):
rp = ctrack.Track('carloosxdd','<KEY> <KEY>', 'a', 'a')
user = rp.user()
return render(request, 'user.html', user)
| none | 1 | 1.857057 | 2 | |
src/tools/docmaker/utils.py | maxon887/freetype | 9 | 6625511 | <gh_stars>1-10
#
# utils.py
#
# Auxiliary functions for the `docmaker' tool (library file).
#
# Copyright 2002-2017 by
# <NAME>.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
import string, sys, os, glob, itertools
# current output directory
#
output_dir = None
# A function that generates a sorting key. We want lexicographical order
# (primary key) except that capital letters are sorted before lowercase
# ones (secondary key).
#
# The primary key is implemented by lowercasing the input. The secondary
# key is simply the original data appended, character by character. For
# example, the sort key for `FT_x' is `fFtT__xx', while the sort key for
# `ft_X' is `fftt__xX'. Since ASCII codes of uppercase letters are
# numerically smaller than the codes of lowercase letters, `fFtT__xx' gets
# sorted before `fftt__xX'.
#
def index_key( s ):
return string.join( itertools.chain( *zip( s.lower(), s ) ) )
# Sort `input_list', placing the elements of `order_list' in front.
#
def sort_order_list( input_list, order_list ):
new_list = order_list[:]
for id in input_list:
if not id in order_list:
new_list.append( id )
return new_list
# Divert standard output to a given project documentation file. Use
# `output_dir' to determine the filename location if necessary and save the
# old stdout handle in a tuple that is returned by this function.
#
def open_output( filename ):
global output_dir
if output_dir and output_dir != "":
filename = output_dir + os.sep + filename
old_stdout = sys.stdout
new_file = open( filename, "w" )
sys.stdout = new_file
return ( new_file, old_stdout )
# Close the output that was returned by `open_output'.
#
def close_output( output ):
output[0].close()
sys.stdout = output[1]
# Check output directory.
#
def check_output():
global output_dir
if output_dir:
if output_dir != "":
if not os.path.isdir( output_dir ):
sys.stderr.write( "argument"
+ " '" + output_dir + "' "
+ "is not a valid directory\n" )
sys.exit( 2 )
else:
output_dir = None
def file_exists( pathname ):
"""Check that a given file exists."""
result = 1
try:
file = open( pathname, "r" )
file.close()
except:
result = None
sys.stderr.write( pathname + " couldn't be accessed\n" )
return result
def make_file_list( args = None ):
"""Build a list of input files from command-line arguments."""
file_list = []
# sys.stderr.write( repr( sys.argv[1 :] ) + '\n' )
if not args:
args = sys.argv[1:]
for pathname in args:
if string.find( pathname, '*' ) >= 0:
newpath = glob.glob( pathname )
newpath.sort() # sort files -- this is important because
# of the order of files
else:
newpath = [pathname]
file_list.extend( newpath )
if len( file_list ) == 0:
file_list = None
else:
# now filter the file list to remove non-existing ones
file_list = filter( file_exists, file_list )
return file_list
# eof
| #
# utils.py
#
# Auxiliary functions for the `docmaker' tool (library file).
#
# Copyright 2002-2017 by
# <NAME>.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
import string, sys, os, glob, itertools
# current output directory
#
output_dir = None
# A function that generates a sorting key. We want lexicographical order
# (primary key) except that capital letters are sorted before lowercase
# ones (secondary key).
#
# The primary key is implemented by lowercasing the input. The secondary
# key is simply the original data appended, character by character. For
# example, the sort key for `FT_x' is `fFtT__xx', while the sort key for
# `ft_X' is `fftt__xX'. Since ASCII codes of uppercase letters are
# numerically smaller than the codes of lowercase letters, `fFtT__xx' gets
# sorted before `fftt__xX'.
#
def index_key( s ):
return string.join( itertools.chain( *zip( s.lower(), s ) ) )
# Sort `input_list', placing the elements of `order_list' in front.
#
def sort_order_list( input_list, order_list ):
new_list = order_list[:]
for id in input_list:
if not id in order_list:
new_list.append( id )
return new_list
# Divert standard output to a given project documentation file. Use
# `output_dir' to determine the filename location if necessary and save the
# old stdout handle in a tuple that is returned by this function.
#
def open_output( filename ):
global output_dir
if output_dir and output_dir != "":
filename = output_dir + os.sep + filename
old_stdout = sys.stdout
new_file = open( filename, "w" )
sys.stdout = new_file
return ( new_file, old_stdout )
# Close the output that was returned by `open_output'.
#
def close_output( output ):
output[0].close()
sys.stdout = output[1]
# Check output directory.
#
def check_output():
global output_dir
if output_dir:
if output_dir != "":
if not os.path.isdir( output_dir ):
sys.stderr.write( "argument"
+ " '" + output_dir + "' "
+ "is not a valid directory\n" )
sys.exit( 2 )
else:
output_dir = None
def file_exists( pathname ):
"""Check that a given file exists."""
result = 1
try:
file = open( pathname, "r" )
file.close()
except:
result = None
sys.stderr.write( pathname + " couldn't be accessed\n" )
return result
def make_file_list( args = None ):
"""Build a list of input files from command-line arguments."""
file_list = []
# sys.stderr.write( repr( sys.argv[1 :] ) + '\n' )
if not args:
args = sys.argv[1:]
for pathname in args:
if string.find( pathname, '*' ) >= 0:
newpath = glob.glob( pathname )
newpath.sort() # sort files -- this is important because
# of the order of files
else:
newpath = [pathname]
file_list.extend( newpath )
if len( file_list ) == 0:
file_list = None
else:
# now filter the file list to remove non-existing ones
file_list = filter( file_exists, file_list )
return file_list
# eof | en | 0.804087 | # # utils.py # # Auxiliary functions for the `docmaker' tool (library file). # # Copyright 2002-2017 by # <NAME>. # # This file is part of the FreeType project, and may only be used, # modified, and distributed under the terms of the FreeType project # license, LICENSE.TXT. By continuing to use, modify, or distribute # this file you indicate that you have read the license and # understand and accept it fully. # current output directory # # A function that generates a sorting key. We want lexicographical order # (primary key) except that capital letters are sorted before lowercase # ones (secondary key). # # The primary key is implemented by lowercasing the input. The secondary # key is simply the original data appended, character by character. For # example, the sort key for `FT_x' is `fFtT__xx', while the sort key for # `ft_X' is `fftt__xX'. Since ASCII codes of uppercase letters are # numerically smaller than the codes of lowercase letters, `fFtT__xx' gets # sorted before `fftt__xX'. # # Sort `input_list', placing the elements of `order_list' in front. # # Divert standard output to a given project documentation file. Use # `output_dir' to determine the filename location if necessary and save the # old stdout handle in a tuple that is returned by this function. # # Close the output that was returned by `open_output'. # # Check output directory. # Check that a given file exists. Build a list of input files from command-line arguments. # sys.stderr.write( repr( sys.argv[1 :] ) + '\n' ) # sort files -- this is important because # of the order of files # now filter the file list to remove non-existing ones # eof | 2.870995 | 3 |
src/tests/part2/q142_test_linked_list_cycle_ii.py | hychrisli/PyAlgorithms | 0 | 6625512 | from src.base.test_cases import TestCases
from src.mappers.list2linkedlist import to_linkedlist
class LinkedListCycleIiTestCases(TestCases):
def __init__(self):
super(LinkedListCycleIiTestCases, self).__init__()
head, begin = self.gen_list1()
self.__add_test_case__('Test 1', head, begin)
head, begin = self.gen_list2()
self.__add_test_case__('Test 2', head, begin)
@staticmethod
def gen_list1():
head = to_linkedlist([1, 2, 3, 4, 5, 6, 7])
cur = head
while cur.next:
cur = cur.next
cur.next = head.next.next
return head, cur.next
@staticmethod
def gen_list2():
head = to_linkedlist([1, 2, 3, 4, 5, 6])
cur = head
while cur.next:
cur = cur.next
cur.next = head.next
return head, cur.next
| from src.base.test_cases import TestCases
from src.mappers.list2linkedlist import to_linkedlist
class LinkedListCycleIiTestCases(TestCases):
def __init__(self):
super(LinkedListCycleIiTestCases, self).__init__()
head, begin = self.gen_list1()
self.__add_test_case__('Test 1', head, begin)
head, begin = self.gen_list2()
self.__add_test_case__('Test 2', head, begin)
@staticmethod
def gen_list1():
head = to_linkedlist([1, 2, 3, 4, 5, 6, 7])
cur = head
while cur.next:
cur = cur.next
cur.next = head.next.next
return head, cur.next
@staticmethod
def gen_list2():
head = to_linkedlist([1, 2, 3, 4, 5, 6])
cur = head
while cur.next:
cur = cur.next
cur.next = head.next
return head, cur.next
| none | 1 | 3.04913 | 3 | |
DSK1/code.py | devashri12/greyatom-python-for-data-science | 0 | 6625513 | # --------------
#Code starts here
def read_file(path):
#Function to read file
file=open(path,mode='r')
#Opening of the file located in the path in 'read' mode
sentence=file.read()
#Reading of the first line of the file and storing it in a variable
file.close()
#Closing of the file
return(sentence)
#Returning the first line of the file
sample_message=read_file(file_path)
print(sample_message)
message_1=read_file(file_path_1)
message_2=read_file(file_path_2)
#Calling the function to read file
print(message_1)
print(message_2)
#Printing the line of the file
#Function to fuse message
def fuse_msg(message_a,message_b):
a=int(message_a)
b=int(message_b)
quotient=b//a
#Integer division of two numbers
return((quotient))
#Returning the quotient in string format
secret_msg_1=fuse_msg(message_1,message_2)
#Calling the function to read file
#Calling the function 'fuse_msg'
print(secret_msg_1)
#Printing the secret message
message_3=read_file(file_path_3)
print(message_3)
#Function to substitute the message
def substitute_msg(message_c):
if (message_c=='Red'):
sub='Army General'
elif (message_c=='Green'):
sub='Data Scientist'
else:
sub='Marine Biologist'
return(sub)
#If-else to compare the contents of the file
#Returning the substitute of the message
secret_msg_2=substitute_msg(message_3)
#Calling the function to read file
#Calling the function 'substitute_msg'
print(secret_msg_2)
#Printing the secret message
message_4=read_file(file_path_4)
message_5=read_file(file_path_5)
print(message_4)
print(message_5)
#Function to compare message
def compare_msg(message_d,message_e):
a_list=[]
b_list=[]
c_list=[]
a_list=message_d.split()
print(a_list)
#Splitting the message into a list
b_list=message_e.split()
print(b_list)
#Splitting the message into a list
#c_list=set(a_list)-set(b_list)
for a in b_list:
for b in a_list:
if a == b :
a_list.remove(b)
#print("Final List Size: ", len(a_list))
print(a_list)
# print(c_list)
#Comparing the elements from both the lists
final_msg=" ".join(a_list)
#Combining the words of a list back to a single string sentence
return(final_msg)
#Returning the sentence
#Calling the function to read file
#Calling the function to read file
secret_msg_3= compare_msg(message_4,message_5)
#Calling the function 'compare messages'
print(secret_msg_3)
#Printing the secret message
message_6=read_file(file_path_6)
print(message_6)
#Function to filter message
def extract_msg(message_f):
a_list=message_f.split()
print(a_list)
#Splitting the message into a list
even_word=lambda x: len(x)%2==0
#Creating the lambda function to identify even length words
b_list=list(filter(even_word,a_list))
#Splitting the message into a list
final_msg=" ".join(b_list)
#Combining the words of a list back to a single string sentence
return(final_msg)
#Returning the sentence
secret_msg_4=extract_msg(message_6)
print(secret_msg_4)
#Calling the function to read file
#Calling the function 'filter_msg'
#Printing the secret message
#Secret message parts in the correct order
print(secret_msg_3)
print(secret_msg_1)
print(secret_msg_4)
print(secret_msg_2)
message_parts=[secret_msg_3,secret_msg_1,secret_msg_4,secret_msg_2]
#secret_message=str(message_parts)
secret_msg =' '.join(map(str, message_parts))
# define the path where you
final_path= user_data_dir + '/secret_message.txt'
#Combine the secret message parts into a single complete secret message
#Function to write inside a file
def write_file(secret_msg,final_path):
f=open(final_path,'a+')
#Opening a file named 'secret_message' in 'write' mode
for i in secret_msg:
f.write('%s'%i)
#Writing to the file
f.close()
#Closing the file
a=write_file(secret_msg,final_path)
print(secret_msg)
#Calling the function to w
#my_lst = ['you are now', 1, 'step closer to become', 'Data Scientist']
##my_lst_str = ' '.join(map(str, my_lst))
#print(my_lst_str)
#Printing the entire secret message
#Code ends here
| # --------------
#Code starts here
def read_file(path):
#Function to read file
file=open(path,mode='r')
#Opening of the file located in the path in 'read' mode
sentence=file.read()
#Reading of the first line of the file and storing it in a variable
file.close()
#Closing of the file
return(sentence)
#Returning the first line of the file
sample_message=read_file(file_path)
print(sample_message)
message_1=read_file(file_path_1)
message_2=read_file(file_path_2)
#Calling the function to read file
print(message_1)
print(message_2)
#Printing the line of the file
#Function to fuse message
def fuse_msg(message_a,message_b):
a=int(message_a)
b=int(message_b)
quotient=b//a
#Integer division of two numbers
return((quotient))
#Returning the quotient in string format
secret_msg_1=fuse_msg(message_1,message_2)
#Calling the function to read file
#Calling the function 'fuse_msg'
print(secret_msg_1)
#Printing the secret message
message_3=read_file(file_path_3)
print(message_3)
#Function to substitute the message
def substitute_msg(message_c):
if (message_c=='Red'):
sub='Army General'
elif (message_c=='Green'):
sub='Data Scientist'
else:
sub='Marine Biologist'
return(sub)
#If-else to compare the contents of the file
#Returning the substitute of the message
secret_msg_2=substitute_msg(message_3)
#Calling the function to read file
#Calling the function 'substitute_msg'
print(secret_msg_2)
#Printing the secret message
message_4=read_file(file_path_4)
message_5=read_file(file_path_5)
print(message_4)
print(message_5)
#Function to compare message
def compare_msg(message_d,message_e):
a_list=[]
b_list=[]
c_list=[]
a_list=message_d.split()
print(a_list)
#Splitting the message into a list
b_list=message_e.split()
print(b_list)
#Splitting the message into a list
#c_list=set(a_list)-set(b_list)
for a in b_list:
for b in a_list:
if a == b :
a_list.remove(b)
#print("Final List Size: ", len(a_list))
print(a_list)
# print(c_list)
#Comparing the elements from both the lists
final_msg=" ".join(a_list)
#Combining the words of a list back to a single string sentence
return(final_msg)
#Returning the sentence
#Calling the function to read file
#Calling the function to read file
secret_msg_3= compare_msg(message_4,message_5)
#Calling the function 'compare messages'
print(secret_msg_3)
#Printing the secret message
message_6=read_file(file_path_6)
print(message_6)
#Function to filter message
def extract_msg(message_f):
a_list=message_f.split()
print(a_list)
#Splitting the message into a list
even_word=lambda x: len(x)%2==0
#Creating the lambda function to identify even length words
b_list=list(filter(even_word,a_list))
#Splitting the message into a list
final_msg=" ".join(b_list)
#Combining the words of a list back to a single string sentence
return(final_msg)
#Returning the sentence
secret_msg_4=extract_msg(message_6)
print(secret_msg_4)
#Calling the function to read file
#Calling the function 'filter_msg'
#Printing the secret message
#Secret message parts in the correct order
print(secret_msg_3)
print(secret_msg_1)
print(secret_msg_4)
print(secret_msg_2)
message_parts=[secret_msg_3,secret_msg_1,secret_msg_4,secret_msg_2]
#secret_message=str(message_parts)
secret_msg =' '.join(map(str, message_parts))
# define the path where you
final_path= user_data_dir + '/secret_message.txt'
#Combine the secret message parts into a single complete secret message
#Function to write inside a file
def write_file(secret_msg,final_path):
f=open(final_path,'a+')
#Opening a file named 'secret_message' in 'write' mode
for i in secret_msg:
f.write('%s'%i)
#Writing to the file
f.close()
#Closing the file
a=write_file(secret_msg,final_path)
print(secret_msg)
#Calling the function to w
#my_lst = ['you are now', 1, 'step closer to become', 'Data Scientist']
##my_lst_str = ' '.join(map(str, my_lst))
#print(my_lst_str)
#Printing the entire secret message
#Code ends here
| en | 0.710921 | # -------------- #Code starts here #Function to read file #Opening of the file located in the path in 'read' mode #Reading of the first line of the file and storing it in a variable #Closing of the file #Returning the first line of the file #Calling the function to read file #Printing the line of the file #Function to fuse message #Integer division of two numbers #Returning the quotient in string format #Calling the function to read file #Calling the function 'fuse_msg' #Printing the secret message #Function to substitute the message #If-else to compare the contents of the file #Returning the substitute of the message #Calling the function to read file #Calling the function 'substitute_msg' #Printing the secret message #Function to compare message #Splitting the message into a list #Splitting the message into a list #c_list=set(a_list)-set(b_list) #print("Final List Size: ", len(a_list)) # print(c_list) #Comparing the elements from both the lists #Combining the words of a list back to a single string sentence #Returning the sentence #Calling the function to read file #Calling the function to read file #Calling the function 'compare messages' #Printing the secret message #Function to filter message #Splitting the message into a list #Creating the lambda function to identify even length words #Splitting the message into a list #Combining the words of a list back to a single string sentence #Returning the sentence #Calling the function to read file #Calling the function 'filter_msg' #Printing the secret message #Secret message parts in the correct order #secret_message=str(message_parts) # define the path where you #Combine the secret message parts into a single complete secret message #Function to write inside a file #Opening a file named 'secret_message' in 'write' mode #Writing to the file #Closing the file #Calling the function to w #my_lst = ['you are now', 1, 'step closer to become', 'Data Scientist'] ##my_lst_str = ' '.join(map(str, my_lst)) #print(my_lst_str) #Printing the entire secret message #Code ends here | 4.140283 | 4 |
test.py | zawlinnnaing/my-wiki-crawler | 0 | 6625514 | <gh_stars>0
import multiprocessing as mp
print("CPU count", mp.cpu_count())
| import multiprocessing as mp
print("CPU count", mp.cpu_count()) | none | 1 | 1.959622 | 2 | |
pythonProject1/venv/Lib/site-packages/tkinterx/graph/shape.py | mjtomlinson/CNE330_Python_1_Final_Project | 0 | 6625515 | #from functools import lru_cache
class Rectangle:
def __init__(self, bbox):
self.x0, self.y0, self.x1, self.y1 = bbox
self.bunch = {
'left_top_corner': (self.x0, self.y0),
'top_middle': (self.center[0], self.y0),
'right_top_corner': (self.x1, self.y0),
'right_middle': (self.x1, self.center[1]),
'right_bottom_corner': (self.x1, self.y1),
'bottom_middle': (self.center[0], self.y1),
'left_bottom_corner': (self.x0, self.y1),
'left_middle': (self.x0, self.center[1])
}
@property
def grad_x(self):
return self.x1 - self.x0
@property
def grad_y(self):
return self.y1 - self.y0
@property
def width(self):
return abs(self.grad_x)
@property
def height(self):
return abs(self.grad_y)
@property
def center(self):
x = (self.x0 + self.x1)/2
y = (self.y0 + self.y1)/2
return x, y
def __contains__(self, point):
x, y = point
x_cond = x in range(self.x0, self.x1)
y_cond = y in range(self.y0, self.y1)
return x_cond and y_cond
def __lt__(self, other):
'''self < other'''
return self.width < other.width or self.height < other.height
def __le__(self, other):
'''self < other'''
return self.width <= other.width or self.height <= other.height
| #from functools import lru_cache
class Rectangle:
def __init__(self, bbox):
self.x0, self.y0, self.x1, self.y1 = bbox
self.bunch = {
'left_top_corner': (self.x0, self.y0),
'top_middle': (self.center[0], self.y0),
'right_top_corner': (self.x1, self.y0),
'right_middle': (self.x1, self.center[1]),
'right_bottom_corner': (self.x1, self.y1),
'bottom_middle': (self.center[0], self.y1),
'left_bottom_corner': (self.x0, self.y1),
'left_middle': (self.x0, self.center[1])
}
@property
def grad_x(self):
return self.x1 - self.x0
@property
def grad_y(self):
return self.y1 - self.y0
@property
def width(self):
return abs(self.grad_x)
@property
def height(self):
return abs(self.grad_y)
@property
def center(self):
x = (self.x0 + self.x1)/2
y = (self.y0 + self.y1)/2
return x, y
def __contains__(self, point):
x, y = point
x_cond = x in range(self.x0, self.x1)
y_cond = y in range(self.y0, self.y1)
return x_cond and y_cond
def __lt__(self, other):
'''self < other'''
return self.width < other.width or self.height < other.height
def __le__(self, other):
'''self < other'''
return self.width <= other.width or self.height <= other.height
| en | 0.509984 | #from functools import lru_cache self < other self < other | 3.330178 | 3 |
Text/vectorizer.py | sergeiGKS/AI-Frameworks | 29 | 6625516 | <gh_stars>10-100
import collections
from scipy import sparse
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction import FeatureHasher
class Vectorizer:
def __init__(self, vectorizer_type, nb_hash=None):
self.vectorizer_type = vectorizer_type
self.nb_hash = nb_hash
def vectorizer_train(self, df, columns='Description', nb_gram=1, binary=False):
data_array = [line for line in df[columns].values]
# Hashage
if self.nb_hash is None:
feathash = None
if self.vectorizer_type == "tfidf":
vec = TfidfVectorizer(ngram_range=(1, nb_gram))
data_vec = vec.fit_transform(data_array)
else:
vec = CountVectorizer(binary=binary)
data_vec = vec.fit_transform(data_array)
else:
data_dic_array = [collections.Counter(line.split(" ")) for line in data_array]
feathash = FeatureHasher(self.nb_hash)
data_hash = feathash.fit_transform(data_dic_array)
if self.vectorizer_type == "tfidf":
vec = TfidfTransformer()
data_vec = vec.fit_transform(data_hash)
else:
vec = None
data_vec = data_hash
return vec, feathash, data_vec
@staticmethod
def apply_vectorizer(df, vec, feathash, columns='Description'):
data_array = [line for line in df[columns].values]
# Hashage
if feathash is None:
data_hash = data_array
else:
data_dic_array = [collections.Counter(line.split(" ")) for line in data_array]
data_hash = feathash.transform(data_dic_array)
if vec is None:
data_vec = data_hash
else:
data_vec = vec.transform(data_hash)
return data_vec
def save_dataframe(self, data, name=""):
sparse.save_npz("data/vec_%s_nb_hash_%s_vectorizer_%s" % (name, str(self.nb_hash), str(self.vectorizer_type)),
data)
def load_dataframe(self, name=""):
return sparse.load_npz(
"data/vec_%s_nb_hash_%s_vectorizer_%s.npz" % (name, str(self.nb_hash), str(self.vectorizer_type)))
| import collections
from scipy import sparse
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction import FeatureHasher
class Vectorizer:
def __init__(self, vectorizer_type, nb_hash=None):
self.vectorizer_type = vectorizer_type
self.nb_hash = nb_hash
def vectorizer_train(self, df, columns='Description', nb_gram=1, binary=False):
data_array = [line for line in df[columns].values]
# Hashage
if self.nb_hash is None:
feathash = None
if self.vectorizer_type == "tfidf":
vec = TfidfVectorizer(ngram_range=(1, nb_gram))
data_vec = vec.fit_transform(data_array)
else:
vec = CountVectorizer(binary=binary)
data_vec = vec.fit_transform(data_array)
else:
data_dic_array = [collections.Counter(line.split(" ")) for line in data_array]
feathash = FeatureHasher(self.nb_hash)
data_hash = feathash.fit_transform(data_dic_array)
if self.vectorizer_type == "tfidf":
vec = TfidfTransformer()
data_vec = vec.fit_transform(data_hash)
else:
vec = None
data_vec = data_hash
return vec, feathash, data_vec
@staticmethod
def apply_vectorizer(df, vec, feathash, columns='Description'):
data_array = [line for line in df[columns].values]
# Hashage
if feathash is None:
data_hash = data_array
else:
data_dic_array = [collections.Counter(line.split(" ")) for line in data_array]
data_hash = feathash.transform(data_dic_array)
if vec is None:
data_vec = data_hash
else:
data_vec = vec.transform(data_hash)
return data_vec
def save_dataframe(self, data, name=""):
sparse.save_npz("data/vec_%s_nb_hash_%s_vectorizer_%s" % (name, str(self.nb_hash), str(self.vectorizer_type)),
data)
def load_dataframe(self, name=""):
return sparse.load_npz(
"data/vec_%s_nb_hash_%s_vectorizer_%s.npz" % (name, str(self.nb_hash), str(self.vectorizer_type))) | en | 0.686987 | # Hashage # Hashage | 2.83694 | 3 |
experiments/geometric_objects/train.py | brambozz/pl-3D-U-Net | 0 | 6625517 | import pytorch_lightning as pl
import pl3dunet.unet as unet
import generate_dataloader
# Define train dataloader
def train_dataloader():
return generate_dataloader.get_dataloader()
# Initialize network
model = unet.UNet(in_channels=1, out_channels=5)
model.train_dataloader = train_dataloader
trainer = pl.Trainer()
trainer.fit(model)
| import pytorch_lightning as pl
import pl3dunet.unet as unet
import generate_dataloader
# Define train dataloader
def train_dataloader():
return generate_dataloader.get_dataloader()
# Initialize network
model = unet.UNet(in_channels=1, out_channels=5)
model.train_dataloader = train_dataloader
trainer = pl.Trainer()
trainer.fit(model)
| en | 0.533313 | # Define train dataloader # Initialize network | 2.364131 | 2 |
starfish/core/spots/FindSpots/_base.py | haoxusci/starfish | 0 | 6625518 | from abc import abstractmethod
from typing import Callable, Optional
import numpy as np
from starfish.core.imagestack.imagestack import ImageStack
from starfish.core.pipeline.algorithmbase import AlgorithmBase
from starfish.core.types import Number, SpotFindingResults
class FindSpotsAlgorithm(metaclass=AlgorithmBase):
"""
Starfish spot finders use a variety of means to detect bright spots against
dark backgrounds. Starfish's spot detectors each have different strengths and weaknesses.
**Fixed-position spot finders**
The following spot finders have two modes of operation.
The first mode is suitable for coded
experiments where genes are identified by patterns of spots over all rounds and channels of the
experiment. In this mode, the spot finders identify spots in a single reference image,
which can be either a dots auxiliary image, or a maximum intensity projection of the primary
images. The positions of the maxima are then measured in all other images, and the intensities
across the complete experiment are stored in an :ref:`IntensityTable`
The second mode is suitable for assays that detect spots in a single round, such as single
molecule FISH and RNAscope. This mode simply finds all the spots and concatenates them into a
long-form IntensityTable. In this mode, the spots are not measured in images that correspond to
other :code:`(round, channel)` pairs; those positions of the IntensityTable are filled with
:code:`np.nan`.
1. The :py:class:`~starfish.spots._find_spots.blob.BlobDetector` allows the user to pre-filter
an image using either a Laplacian-of-Gaussians or
Difference-of-Gaussians (fast approximation to Laplacian-of-Gaussians). These filters are
applied at with a user-specified variety of Gaussian kernel sizes, and the best-fitting size is
automatically selected. This allows this filter to detect Gaussian shaped blobs of various
sizes.
"""
@abstractmethod
def run(self, image_stack: ImageStack,
reference_image: Optional[ImageStack] = None, *args) -> SpotFindingResults:
"""Find and measure spots across rounds and channels in the provided ImageStack."""
raise NotImplementedError()
@staticmethod
def _get_measurement_function(
measurement_type: str
) -> Callable[[np.ndarray], Number]:
try:
measurement_function = getattr(np, measurement_type)
except AttributeError:
raise ValueError(
f'measurement_type must be a numpy reduce function such as "max" or "mean". '
f'{measurement_type} not found.')
return measurement_function
| from abc import abstractmethod
from typing import Callable, Optional
import numpy as np
from starfish.core.imagestack.imagestack import ImageStack
from starfish.core.pipeline.algorithmbase import AlgorithmBase
from starfish.core.types import Number, SpotFindingResults
class FindSpotsAlgorithm(metaclass=AlgorithmBase):
"""
Starfish spot finders use a variety of means to detect bright spots against
dark backgrounds. Starfish's spot detectors each have different strengths and weaknesses.
**Fixed-position spot finders**
The following spot finders have two modes of operation.
The first mode is suitable for coded
experiments where genes are identified by patterns of spots over all rounds and channels of the
experiment. In this mode, the spot finders identify spots in a single reference image,
which can be either a dots auxiliary image, or a maximum intensity projection of the primary
images. The positions of the maxima are then measured in all other images, and the intensities
across the complete experiment are stored in an :ref:`IntensityTable`
The second mode is suitable for assays that detect spots in a single round, such as single
molecule FISH and RNAscope. This mode simply finds all the spots and concatenates them into a
long-form IntensityTable. In this mode, the spots are not measured in images that correspond to
other :code:`(round, channel)` pairs; those positions of the IntensityTable are filled with
:code:`np.nan`.
1. The :py:class:`~starfish.spots._find_spots.blob.BlobDetector` allows the user to pre-filter
an image using either a Laplacian-of-Gaussians or
Difference-of-Gaussians (fast approximation to Laplacian-of-Gaussians). These filters are
applied at with a user-specified variety of Gaussian kernel sizes, and the best-fitting size is
automatically selected. This allows this filter to detect Gaussian shaped blobs of various
sizes.
"""
@abstractmethod
def run(self, image_stack: ImageStack,
reference_image: Optional[ImageStack] = None, *args) -> SpotFindingResults:
"""Find and measure spots across rounds and channels in the provided ImageStack."""
raise NotImplementedError()
@staticmethod
def _get_measurement_function(
measurement_type: str
) -> Callable[[np.ndarray], Number]:
try:
measurement_function = getattr(np, measurement_type)
except AttributeError:
raise ValueError(
f'measurement_type must be a numpy reduce function such as "max" or "mean". '
f'{measurement_type} not found.')
return measurement_function
| en | 0.89896 | Starfish spot finders use a variety of means to detect bright spots against dark backgrounds. Starfish's spot detectors each have different strengths and weaknesses. **Fixed-position spot finders** The following spot finders have two modes of operation. The first mode is suitable for coded experiments where genes are identified by patterns of spots over all rounds and channels of the experiment. In this mode, the spot finders identify spots in a single reference image, which can be either a dots auxiliary image, or a maximum intensity projection of the primary images. The positions of the maxima are then measured in all other images, and the intensities across the complete experiment are stored in an :ref:`IntensityTable` The second mode is suitable for assays that detect spots in a single round, such as single molecule FISH and RNAscope. This mode simply finds all the spots and concatenates them into a long-form IntensityTable. In this mode, the spots are not measured in images that correspond to other :code:`(round, channel)` pairs; those positions of the IntensityTable are filled with :code:`np.nan`. 1. The :py:class:`~starfish.spots._find_spots.blob.BlobDetector` allows the user to pre-filter an image using either a Laplacian-of-Gaussians or Difference-of-Gaussians (fast approximation to Laplacian-of-Gaussians). These filters are applied at with a user-specified variety of Gaussian kernel sizes, and the best-fitting size is automatically selected. This allows this filter to detect Gaussian shaped blobs of various sizes. Find and measure spots across rounds and channels in the provided ImageStack. | 2.431979 | 2 |
code/main.py | qatoqat/osu-background-remover | 1 | 6625519 | <filename>code/main.py<gh_stars>1-10
# from os import listdir
from os.path import isfile, join
from os import path as fpath
from os import walk, makedirs
from PIL import Image
import errno
import shutil
mypath = "E:\-Ext-\osu\Songs"
bakpath = mypath + "\imgbak"
chosencolor = (10, 10, 10)
# onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
i = 0
j = 0
k = 0
if not fpath.isdir(bakpath): # create folder for backup
try:
makedirs(bakpath)
except OSError as e:
if e.errno != errno.EEXIST:
print("backup folder exists!")
raise # raises the error again
for path, subdirs, files in walk(mypath):
if not path.startswith(bakpath): # loop directory
for name in files:
sname = name.lower()
if sname.endswith(('.png', '.jpg', '.bmp', '.jpeg')): # check if file is an image
fullname = join(path, name)
im = Image.open(fullname)
if not im.size[0] < 640:
print(fullname)
print(' width: %d - height: %d' % im.size)
i += 1
dstdir = join(bakpath, fpath.relpath(path, mypath))
try:
makedirs(dstdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise # raises the error again
if not fpath.exists(dstdir + "\\" +name) or im.size != (555, 555):
shutil.copy(fullname, dstdir)
print("Copied")
img = Image.new('RGB', (555, 555), chosencolor)
ext = "JPEG"
if sname.endswith('.png'):
ext = "PNG"
elif sname.endswith('.bmp'):
ext = "BMP"
if img.save(fullname, ext):
print("Replaced")
else:
print("Cant replace")
elif im.size == (555, 555):
getcolor = im.getpixel((0,0))
print(getcolor)
print(chosencolor)
if getcolor != chosencolor:
img = Image.new('RGB', (555, 555), chosencolor)
ext = "JPEG"
if sname.endswith('.png'):
ext = "PNG"
elif sname.endswith('.bmp'):
ext = "BMP"
img.save(fullname, ext)
print("Replaced")
k += 1
else:
print("Blanket")
j += 1
print("Total new: " + str(i))
print("Total blank: " + str(j))
print("Total replaced: " + str(k))
| <filename>code/main.py<gh_stars>1-10
# from os import listdir
from os.path import isfile, join
from os import path as fpath
from os import walk, makedirs
from PIL import Image
import errno
import shutil
mypath = "E:\-Ext-\osu\Songs"
bakpath = mypath + "\imgbak"
chosencolor = (10, 10, 10)
# onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
i = 0
j = 0
k = 0
if not fpath.isdir(bakpath): # create folder for backup
try:
makedirs(bakpath)
except OSError as e:
if e.errno != errno.EEXIST:
print("backup folder exists!")
raise # raises the error again
for path, subdirs, files in walk(mypath):
if not path.startswith(bakpath): # loop directory
for name in files:
sname = name.lower()
if sname.endswith(('.png', '.jpg', '.bmp', '.jpeg')): # check if file is an image
fullname = join(path, name)
im = Image.open(fullname)
if not im.size[0] < 640:
print(fullname)
print(' width: %d - height: %d' % im.size)
i += 1
dstdir = join(bakpath, fpath.relpath(path, mypath))
try:
makedirs(dstdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise # raises the error again
if not fpath.exists(dstdir + "\\" +name) or im.size != (555, 555):
shutil.copy(fullname, dstdir)
print("Copied")
img = Image.new('RGB', (555, 555), chosencolor)
ext = "JPEG"
if sname.endswith('.png'):
ext = "PNG"
elif sname.endswith('.bmp'):
ext = "BMP"
if img.save(fullname, ext):
print("Replaced")
else:
print("Cant replace")
elif im.size == (555, 555):
getcolor = im.getpixel((0,0))
print(getcolor)
print(chosencolor)
if getcolor != chosencolor:
img = Image.new('RGB', (555, 555), chosencolor)
ext = "JPEG"
if sname.endswith('.png'):
ext = "PNG"
elif sname.endswith('.bmp'):
ext = "BMP"
img.save(fullname, ext)
print("Replaced")
k += 1
else:
print("Blanket")
j += 1
print("Total new: " + str(i))
print("Total blank: " + str(j))
print("Total replaced: " + str(k))
| en | 0.672255 | # from os import listdir # onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))] # create folder for backup # raises the error again # loop directory # check if file is an image # raises the error again | 3.048923 | 3 |
source/story.py | noltron000-coursework/adventure | 1 | 6625520 | from event import *
class Story:
'''
A story points a reader to the first event.
It also holds metadata, such as its title.
Finally, it holds the function of impetus -- self.write().
This thing sets off all the recursion within the Event.
'''
def __init__(self):
# The content is long-form, unformatted text.
# It is essentially the meta-data of the story.
self.title = ''
self.subtitle = ''
self.synopsis = ''
# The first event of the story.
self.root = None
def __repr__(self):
'''
Represents what is seen by the user.
Specifically, this outputs a string containing:
- the title of the story
- the subtitle of the story
- a synopsis of the story
- the number of possible endings
'''
return (
f'{self.title.upper()}\n'
f'{self.subtitle}\n'
f'{len(self.subtitle) * "-"}\n\n'
f'{self.synopsis}\n'
)
def write(self):
'''
Write will ask the user for some basic metadata.
Then, it will start the process of asking for all the
other data and storylines that a good adventure needs.
'''
input(
f'{"=" * 47}\n'
' Welcome to Adventure Creator!\n'
'This CLI tool will help you get started on your\n'
'very own choose-your-own-adventure style story.\n'
' ~~PRESS ENTER TO CONTINUE~~\n'
f'{"=" * 47}\n'
)
self.title = input(
'Please input the story\'s title:\t'
)
self.subtitle = input(
'Please input the story\'s subtitle:\t'
)
self.synopsis = input(
'Please input the story\'s synopsis:\t'
)
self.root = Event()
self.root.add_content()
self.root.add_choices()
| from event import *
class Story:
'''
A story points a reader to the first event.
It also holds metadata, such as its title.
Finally, it holds the function of impetus -- self.write().
This thing sets off all the recursion within the Event.
'''
def __init__(self):
# The content is long-form, unformatted text.
# It is essentially the meta-data of the story.
self.title = ''
self.subtitle = ''
self.synopsis = ''
# The first event of the story.
self.root = None
def __repr__(self):
'''
Represents what is seen by the user.
Specifically, this outputs a string containing:
- the title of the story
- the subtitle of the story
- a synopsis of the story
- the number of possible endings
'''
return (
f'{self.title.upper()}\n'
f'{self.subtitle}\n'
f'{len(self.subtitle) * "-"}\n\n'
f'{self.synopsis}\n'
)
def write(self):
'''
Write will ask the user for some basic metadata.
Then, it will start the process of asking for all the
other data and storylines that a good adventure needs.
'''
input(
f'{"=" * 47}\n'
' Welcome to Adventure Creator!\n'
'This CLI tool will help you get started on your\n'
'very own choose-your-own-adventure style story.\n'
' ~~PRESS ENTER TO CONTINUE~~\n'
f'{"=" * 47}\n'
)
self.title = input(
'Please input the story\'s title:\t'
)
self.subtitle = input(
'Please input the story\'s subtitle:\t'
)
self.synopsis = input(
'Please input the story\'s synopsis:\t'
)
self.root = Event()
self.root.add_content()
self.root.add_choices()
| en | 0.906007 | A story points a reader to the first event. It also holds metadata, such as its title. Finally, it holds the function of impetus -- self.write(). This thing sets off all the recursion within the Event. # The content is long-form, unformatted text. # It is essentially the meta-data of the story. # The first event of the story. Represents what is seen by the user. Specifically, this outputs a string containing: - the title of the story - the subtitle of the story - a synopsis of the story - the number of possible endings Write will ask the user for some basic metadata. Then, it will start the process of asking for all the other data and storylines that a good adventure needs. | 3.833477 | 4 |
Scarky2/account/views.py | kopringo/Scarky2 | 0 | 6625521 |
def signup(request):
pass
def signup_confirm(request):
pass |
def signup(request):
pass
def signup_confirm(request):
pass | none | 1 | 0.919145 | 1 | |
cdfs_rows.py | abeagomez/nonograms_solver | 0 | 6625522 | <gh_stars>0
from itertools import combinations_with_replacement
from cdfs_box import Stack, problem, build_board
from pprint import pprint
def gen_lines(width, pattern):
"""
This yields a tuple for each possible layout of
pattern inside the row. The tuple elements are the
gaps before each block in pattern.
The tuple doesn't include the last gap, since that's
just: width - sum(sol) - sum(pattern)
"""
spaces = width - (sum(pattern) + len(pattern) - 1)
for sol in combinations_with_replacement(range(spaces + 1), len(pattern)):
sol = sol[0:1] + tuple((sol[i] - sol[i - 1] + 1) for i in range(1, len(sol)))
yield sol
def expand_solution(solution, width, pattern):
"""
expands a solution to a tuple of 1 (ON) and 0 (OFF)
"""
r = []
for s, p in zip(solution, pattern):
r.extend([False] * s)
r.extend([True] * p)
r.extend([False] * (width - sum(solution) - sum(pattern)))
return r
def cdfs(width: int, col_rest: list, row_rest: list):
board = build_board(width)
p = problem(col_rest, row_rest, width, board)
stack = Stack()
# Each state is the current status of the problem and the index of the row being analyzed.
stack.push((p, 0))
while not stack.isEmpty():
p, row = stack.pop()
assert isinstance(p, problem)
if row >= width:
# Found (only reaches this point if all rows were correct)
return p.board
np = problem(col_rest, row_rest, width, p.copy_board())
for sol in gen_lines(width, row_rest[row]):
sol = expand_solution(sol, width, row_rest[row])
np.board[row] = sol
res = True
for i in range(width):
idx = row * width + i
if sol[i] is True:
res &= np.check_column_when_set_true(np.current_column(i), idx)
else:
res &= np.check_column_when_set_false(np.current_column(i), idx)
if not res:
break
if res:
nnp = problem(col_rest, row_rest, width, np.copy_board())
stack.push((nnp, row + 1))
return None
if __name__ == '__main__':
from case_generator import generate_boards
import time
t = time.time()
pprint(cdfs(15, [[2, 1], [2, 1, 1], [1, 1, 1, 1], [2, 1], [1, 1, 1], [1, 1, 1, 1], [3], [3, 2, 1], [1, 1, 3, 1],
[1, 1, 1, 1],
[1, 2, 1, 1, 1], [1, 1, 1, 1], [1, 2, 1], [1], [1, 4, 1]],
[[1, 2, 1], [1, 3, 1], [1, 1, 3], [1, 1, 1], [1, 1], [1, 1], [1, 2, 1, 1, 1], [2, 3, 3, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1], [1, 2], [1, 1], [1, 1, 1]]
))
print(time.time() - t)
# for a in generate_boards(1, 15, 0.3):
# pprint(cdfs(a[0], a[1][0], a[1][1]))
# print(a[2])
# print("""XX.X
# XXX.
# XXX.
# X.X.""")
# print('---------------')
# print('[[[4], [3], [3], [1]], [[2, 1], [3], [3], [1, 1]]]')
# print('---------------')
# pprint(cdfs(a[0], [[4], [3], [3], [1]], [[2, 1], [3], [3], [1, 1]]))
| from itertools import combinations_with_replacement
from cdfs_box import Stack, problem, build_board
from pprint import pprint
def gen_lines(width, pattern):
"""
This yields a tuple for each possible layout of
pattern inside the row. The tuple elements are the
gaps before each block in pattern.
The tuple doesn't include the last gap, since that's
just: width - sum(sol) - sum(pattern)
"""
spaces = width - (sum(pattern) + len(pattern) - 1)
for sol in combinations_with_replacement(range(spaces + 1), len(pattern)):
sol = sol[0:1] + tuple((sol[i] - sol[i - 1] + 1) for i in range(1, len(sol)))
yield sol
def expand_solution(solution, width, pattern):
"""
expands a solution to a tuple of 1 (ON) and 0 (OFF)
"""
r = []
for s, p in zip(solution, pattern):
r.extend([False] * s)
r.extend([True] * p)
r.extend([False] * (width - sum(solution) - sum(pattern)))
return r
def cdfs(width: int, col_rest: list, row_rest: list):
board = build_board(width)
p = problem(col_rest, row_rest, width, board)
stack = Stack()
# Each state is the current status of the problem and the index of the row being analyzed.
stack.push((p, 0))
while not stack.isEmpty():
p, row = stack.pop()
assert isinstance(p, problem)
if row >= width:
# Found (only reaches this point if all rows were correct)
return p.board
np = problem(col_rest, row_rest, width, p.copy_board())
for sol in gen_lines(width, row_rest[row]):
sol = expand_solution(sol, width, row_rest[row])
np.board[row] = sol
res = True
for i in range(width):
idx = row * width + i
if sol[i] is True:
res &= np.check_column_when_set_true(np.current_column(i), idx)
else:
res &= np.check_column_when_set_false(np.current_column(i), idx)
if not res:
break
if res:
nnp = problem(col_rest, row_rest, width, np.copy_board())
stack.push((nnp, row + 1))
return None
if __name__ == '__main__':
from case_generator import generate_boards
import time
t = time.time()
pprint(cdfs(15, [[2, 1], [2, 1, 1], [1, 1, 1, 1], [2, 1], [1, 1, 1], [1, 1, 1, 1], [3], [3, 2, 1], [1, 1, 3, 1],
[1, 1, 1, 1],
[1, 2, 1, 1, 1], [1, 1, 1, 1], [1, 2, 1], [1], [1, 4, 1]],
[[1, 2, 1], [1, 3, 1], [1, 1, 3], [1, 1, 1], [1, 1], [1, 1], [1, 2, 1, 1, 1], [2, 3, 3, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1], [1, 2], [1, 1], [1, 1, 1]]
))
print(time.time() - t)
# for a in generate_boards(1, 15, 0.3):
# pprint(cdfs(a[0], a[1][0], a[1][1]))
# print(a[2])
# print("""XX.X
# XXX.
# XXX.
# X.X.""")
# print('---------------')
# print('[[[4], [3], [3], [1]], [[2, 1], [3], [3], [1, 1]]]')
# print('---------------')
# pprint(cdfs(a[0], [[4], [3], [3], [1]], [[2, 1], [3], [3], [1, 1]])) | en | 0.797376 | This yields a tuple for each possible layout of pattern inside the row. The tuple elements are the gaps before each block in pattern. The tuple doesn't include the last gap, since that's just: width - sum(sol) - sum(pattern) expands a solution to a tuple of 1 (ON) and 0 (OFF) # Each state is the current status of the problem and the index of the row being analyzed. # Found (only reaches this point if all rows were correct) # for a in generate_boards(1, 15, 0.3): # pprint(cdfs(a[0], a[1][0], a[1][1])) # print(a[2]) # print("""XX.X # XXX. # XXX. # X.X.""") # print('---------------') # print('[[[4], [3], [3], [1]], [[2, 1], [3], [3], [1, 1]]]') # print('---------------') # pprint(cdfs(a[0], [[4], [3], [3], [1]], [[2, 1], [3], [3], [1, 1]])) | 3.142236 | 3 |
tests/exam/while2.py | Mieschendahl/assignment-final-stub | 0 | 6625523 | <filename>tests/exam/while2.py
#in=
#golden=12345
i = 1
while i < 6:
print(i)
i = i + 1
| <filename>tests/exam/while2.py
#in=
#golden=12345
i = 1
while i < 6:
print(i)
i = i + 1
| zh | 0.507815 | #in= #golden=12345 | 2.760688 | 3 |
mobula/__init__.py | wkcn/mobula | 47 | 6625524 | from .Net import *
from .wrapper import *
| from .Net import *
from .wrapper import *
| none | 1 | 1.062995 | 1 | |
classification/dataloder.py | utsabbuet17/DSPProject84 | 0 | 6625525 | <gh_stars>0
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
def TrainLoader(batchSize, imgDir, trainTransform) :
dataloader = DataLoader(ImageFolder(imgDir, trainTransform), batch_size=batchSize, shuffle=True)# transform is for image to tensor making
return dataloader
def ValLoader(batchSize, imgDir, valTransform) :
dataloader = DataLoader(ImageFolder(imgDir, valTransform), batch_size=batchSize, shuffle=False)
return dataloader
| from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
def TrainLoader(batchSize, imgDir, trainTransform) :
dataloader = DataLoader(ImageFolder(imgDir, trainTransform), batch_size=batchSize, shuffle=True)# transform is for image to tensor making
return dataloader
def ValLoader(batchSize, imgDir, valTransform) :
dataloader = DataLoader(ImageFolder(imgDir, valTransform), batch_size=batchSize, shuffle=False)
return dataloader | en | 0.961287 | # transform is for image to tensor making | 2.984951 | 3 |
website/urls.py | Arman19891006/Mysite | 0 | 6625526 | <gh_stars>0
from django.urls import path
from website.views import *
app_name = 'website'
urlpatterns = [
path('' , index_view , name = 'index'),
path('about' , about_view,name = 'about'),
path('contact' , contact_view,name = 'contact'),
path('test' , test_view,name = 'test'),
] | from django.urls import path
from website.views import *
app_name = 'website'
urlpatterns = [
path('' , index_view , name = 'index'),
path('about' , about_view,name = 'about'),
path('contact' , contact_view,name = 'contact'),
path('test' , test_view,name = 'test'),
] | none | 1 | 1.894136 | 2 | |
rc3_m3u.py | KOLANICH-tools/rc3_ical_fahrplan.py | 0 | 6625527 | <gh_stars>0
#!/usr/bin/env python3
import sys
from datetime import datetime, timedelta
from pathlib import Path
try:
import ujson as json
except ImportError:
import json
rooms = {
"cbase": "c-base",
"cwtv": "Chaos-West TV",
"r3s": "Remote Rhein Ruhr Stage",
"csh": "ChaosStudio Hamburg",
"chaoszone": "ChaosZone TV",
"fem": "FeM",
"franconiannet": "franconian.net",
"aboutfuture": "about:future",
"sendezentrum": "Sendezentrum",
"haecksen": "Haecksen",
"gehacktes": "Gehacktes from Hell / Bierscheune",
"xhain": "xHain Lichtung",
"infobeamer": "Infobeamer"
}
BASE = "https://live.dus.c3voc.de/"
resolutions = {
"hd": "hd",
"sd": "sd",
"audio": "segment"
}
formats = {
"HLS": ("hls", "m3u8"),
"WebM": ("webm", "webm")
}
translations = {
"native": "Native",
"translated": "Translated",
"translated-2": "Translated 2"
}
def main() -> None:
curDir = Path(".").absolute()
for formatName, formatDescriptor in formats.items():
formatDir, ext = formatDescriptor
for resolution in resolutions:
formatResFile = curDir / ("rc3_" + formatDir + "_" + resolution + ".m3u")
with formatResFile.open("wt") as f:
for roomSlug, roomName in rooms.items():
prefix = BASE + formatDir + "/" + roomSlug + "/"
for translSlug, translName in translations.items():
resUri = prefix + translSlug + "_" + resolution + "." + ext
print(file=f)
print("#EXTINF:-1, " + roomName + " " + translName, file=f)
print(resUri, file=f)
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
import sys
from datetime import datetime, timedelta
from pathlib import Path
try:
import ujson as json
except ImportError:
import json
rooms = {
"cbase": "c-base",
"cwtv": "Chaos-West TV",
"r3s": "Remote Rhein Ruhr Stage",
"csh": "ChaosStudio Hamburg",
"chaoszone": "ChaosZone TV",
"fem": "FeM",
"franconiannet": "franconian.net",
"aboutfuture": "about:future",
"sendezentrum": "Sendezentrum",
"haecksen": "Haecksen",
"gehacktes": "Gehacktes from Hell / Bierscheune",
"xhain": "xHain Lichtung",
"infobeamer": "Infobeamer"
}
BASE = "https://live.dus.c3voc.de/"
resolutions = {
"hd": "hd",
"sd": "sd",
"audio": "segment"
}
formats = {
"HLS": ("hls", "m3u8"),
"WebM": ("webm", "webm")
}
translations = {
"native": "Native",
"translated": "Translated",
"translated-2": "Translated 2"
}
def main() -> None:
curDir = Path(".").absolute()
for formatName, formatDescriptor in formats.items():
formatDir, ext = formatDescriptor
for resolution in resolutions:
formatResFile = curDir / ("rc3_" + formatDir + "_" + resolution + ".m3u")
with formatResFile.open("wt") as f:
for roomSlug, roomName in rooms.items():
prefix = BASE + formatDir + "/" + roomSlug + "/"
for translSlug, translName in translations.items():
resUri = prefix + translSlug + "_" + resolution + "." + ext
print(file=f)
print("#EXTINF:-1, " + roomName + " " + translName, file=f)
print(resUri, file=f)
if __name__ == "__main__":
main() | fr | 0.221828 | #!/usr/bin/env python3 | 2.307096 | 2 |
python/bifrost/views/basic_views.py | Radio-Camera-Initiative/bifrost | 0 | 6625528 |
# Copyright (c) 2016, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from bifrost.pipeline import block_view
from bifrost.DataType import DataType
from bifrost.units import convert_units
from numpy import isclose
from copy import deepcopy
def custom(block, hdr_transform):
"""An alias to `bifrost.pipeline.block_view`
"""
return block_view(block, hdr_transform)
def rename_axis(block, old, new):
def header_transform(hdr, old=old, new=new):
axis = hdr['_tensor']['labels'].index(old)
hdr['_tensor']['labels'][axis] = new
return hdr
return block_view(block, header_transform)
def reinterpret_axis(block, axis, label, scale=None, units=None):
""" Manually reinterpret the scale and/or units on an axis """
def header_transform(hdr, axis=axis, label=label, scale=scale, units=units):
tensor = hdr['_tensor']
if isinstance(axis, basestring):
axis = tensor['labels'].index(axis)
if label is not None:
tensor['labels'][axis] = label
if scale is not None:
tensor['scales'][axis] = scale
if units is not None:
tensor['units'][axis] = units
return hdr
return block_view(block, header_transform)
def reverse_scale(block, axis):
""" Manually reverse the scale factor on a given axis"""
def header_transform(hdr, axis=axis):
tensor = hdr['_tensor']
if isinstance(axis, basestring):
axis = tensor['labels'].index(axis)
tensor['scales'][axis][1] *= -1
return hdr
return block_view(block, header_transform)
def add_axis(block, axis, label=None, scale=None, units=None):
"""Add an extra dimension to the frame at position 'axis'
E.g., if the shape is [-1, 3, 2], then
selecting axis=1 would change the shape to be
[-1, 1, 3, 2].
Axis may be negative, or a string corresponding to an existing axis label,
in which case the new axis is inserted after the referenced axis.
"""
def header_transform(hdr, axis=axis, label=label, scale=scale, units=units):
tensor = hdr['_tensor']
if isinstance(axis, basestring):
axis = tensor['labels'].index(axis) + 1
if axis < 0:
axis += len(tensor['shape']) + 1
tensor['shape'].insert(axis, 1)
if 'labels' in tensor:
tensor['labels'].insert(axis, label)
if 'scales' in tensor:
tensor['scales'].insert(axis, scale)
if 'units' in tensor:
tensor['units'].insert(axis, units)
return hdr
return block_view(block, header_transform)
def delete_axis(block, axis):
"""Remove a unitary dimension from the frame
E.g., if the shape is [-1, 1, 3, 2], then
selecting axis=1 would change the shape to be
[-1, 3, 2].
Axis may be negative, or a string corresponding to an existing axis label.
"""
def header_transform(hdr, axis=axis):
tensor = hdr['_tensor']
specified_axis = axis
if isinstance(axis, basestring):
specified_axis = "'%s'" % specified_axis
axis = tensor['labels'].index(axis)
if axis < 0:
axis += len(tensor['shape']) + 1
if tensor['shape'][axis] != 1:
raise ValueError("Cannot delete non-unitary axis %s with shape %i"
% (specified_axis, tensor['shape'][axis]))
del tensor['shape'][axis]
if 'labels' in tensor:
del tensor['labels'][axis]
if 'scales' in tensor:
del tensor['scales'][axis]
if 'units' in tensor:
del tensor['units'][axis]
return hdr
return block_view(block, header_transform)
def astype(block, dtype):
def header_transform(hdr, new_dtype=dtype):
tensor = hdr['_tensor']
old_dtype = tensor['dtype']
old_itemsize = DataType(old_dtype).itemsize
new_itemsize = DataType(new_dtype).itemsize
old_axissize = old_itemsize * tensor['shape'][-1]
if old_axissize % new_itemsize:
raise ValueError("New type not compatible with data shape")
tensor['shape'][-1] = old_axissize // new_itemsize
tensor['dtype'] = dtype
return hdr
return block_view(block, header_transform)
def split_axis(block, axis, n, label=None):
# Set function attributes to enable capture in nested function (closure)
def header_transform(hdr, axis=axis, n=n, label=label):
tensor = hdr['_tensor']
if isinstance(axis, basestring):
axis = tensor['labels'].index(axis)
shape = tensor['shape']
if shape[axis] == -1:
# Axis is frame axis
# TODO: Should assert even division here instead?
# ***TODO: Why does pipeline deadlock when this doesn't divide?
hdr['gulp_nframe'] = (hdr['gulp_nframe'] - 1) / n + 1
else:
# Axis is not frame axis
if shape[axis] % n:
raise ValueError("Split does not evenly divide axis (%i // %i)" %
(tensor['shape'][axis], n))
shape[axis] //= n
shape.insert(axis + 1, n)
if 'units' in tensor:
tensor['units'].insert(axis + 1, tensor['units'][axis])
if 'labels' in tensor:
if label is None:
label = tensor['labels'][axis] + "_split"
tensor['labels'].insert(axis + 1, label)
if 'scales' in tensor:
tensor['scales'].insert(axis + 1, [0, tensor['scales'][axis][1]])
tensor['scales'][axis][1] *= n
return hdr
return block_view(block, header_transform)
def merge_axes(block, axis1, axis2, label=None):
def header_transform(hdr, axis1=axis1, axis2=axis2, label=label):
tensor = hdr['_tensor']
if isinstance(axis1, basestring):
axis1 = tensor['labels'].index(axis1)
if isinstance(axis2, basestring):
axis2 = tensor['labels'].index(axis2)
axis1, axis2 = sorted([axis1, axis2])
if axis2 != axis1 + 1:
raise ValueError("Merge axes must be adjacent")
n = tensor['shape'][axis2]
if n == -1:
# Axis2 is frame axis
raise ValueError("Second merge axis cannot be frame axis")
elif tensor['shape'][axis1] == -1:
# Axis1 is frame axis
hdr['gulp_nframe'] *= n
else:
# Neither axis is frame axis
tensor['shape'][axis1] *= n
del tensor['shape'][axis2]
if 'scales' in tensor and 'units' in tensor:
scale1 = tensor['scales'][axis1][1]
scale2 = tensor['scales'][axis2][1]
units1 = tensor['units'][axis1]
units2 = tensor['units'][axis2]
scale2 = convert_units(scale2, units2, units1)
if not isclose(scale1, n * scale2):
raise ValueError("Scales of merge axes do not line up: "
"%f != %f" % (scale1, n * scale2))
tensor['scales'][axis1][1] = scale2
del tensor['scales'][axis2]
del tensor['units'][axis2]
if 'labels' in tensor:
if label is not None:
tensor['labels'][axis1] = label
del tensor['labels'][axis2]
return hdr
return block_view(block, header_transform)
|
# Copyright (c) 2016, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from bifrost.pipeline import block_view
from bifrost.DataType import DataType
from bifrost.units import convert_units
from numpy import isclose
from copy import deepcopy
def custom(block, hdr_transform):
"""An alias to `bifrost.pipeline.block_view`
"""
return block_view(block, hdr_transform)
def rename_axis(block, old, new):
def header_transform(hdr, old=old, new=new):
axis = hdr['_tensor']['labels'].index(old)
hdr['_tensor']['labels'][axis] = new
return hdr
return block_view(block, header_transform)
def reinterpret_axis(block, axis, label, scale=None, units=None):
""" Manually reinterpret the scale and/or units on an axis """
def header_transform(hdr, axis=axis, label=label, scale=scale, units=units):
tensor = hdr['_tensor']
if isinstance(axis, basestring):
axis = tensor['labels'].index(axis)
if label is not None:
tensor['labels'][axis] = label
if scale is not None:
tensor['scales'][axis] = scale
if units is not None:
tensor['units'][axis] = units
return hdr
return block_view(block, header_transform)
def reverse_scale(block, axis):
""" Manually reverse the scale factor on a given axis"""
def header_transform(hdr, axis=axis):
tensor = hdr['_tensor']
if isinstance(axis, basestring):
axis = tensor['labels'].index(axis)
tensor['scales'][axis][1] *= -1
return hdr
return block_view(block, header_transform)
def add_axis(block, axis, label=None, scale=None, units=None):
"""Add an extra dimension to the frame at position 'axis'
E.g., if the shape is [-1, 3, 2], then
selecting axis=1 would change the shape to be
[-1, 1, 3, 2].
Axis may be negative, or a string corresponding to an existing axis label,
in which case the new axis is inserted after the referenced axis.
"""
def header_transform(hdr, axis=axis, label=label, scale=scale, units=units):
tensor = hdr['_tensor']
if isinstance(axis, basestring):
axis = tensor['labels'].index(axis) + 1
if axis < 0:
axis += len(tensor['shape']) + 1
tensor['shape'].insert(axis, 1)
if 'labels' in tensor:
tensor['labels'].insert(axis, label)
if 'scales' in tensor:
tensor['scales'].insert(axis, scale)
if 'units' in tensor:
tensor['units'].insert(axis, units)
return hdr
return block_view(block, header_transform)
def delete_axis(block, axis):
"""Remove a unitary dimension from the frame
E.g., if the shape is [-1, 1, 3, 2], then
selecting axis=1 would change the shape to be
[-1, 3, 2].
Axis may be negative, or a string corresponding to an existing axis label.
"""
def header_transform(hdr, axis=axis):
tensor = hdr['_tensor']
specified_axis = axis
if isinstance(axis, basestring):
specified_axis = "'%s'" % specified_axis
axis = tensor['labels'].index(axis)
if axis < 0:
axis += len(tensor['shape']) + 1
if tensor['shape'][axis] != 1:
raise ValueError("Cannot delete non-unitary axis %s with shape %i"
% (specified_axis, tensor['shape'][axis]))
del tensor['shape'][axis]
if 'labels' in tensor:
del tensor['labels'][axis]
if 'scales' in tensor:
del tensor['scales'][axis]
if 'units' in tensor:
del tensor['units'][axis]
return hdr
return block_view(block, header_transform)
def astype(block, dtype):
def header_transform(hdr, new_dtype=dtype):
tensor = hdr['_tensor']
old_dtype = tensor['dtype']
old_itemsize = DataType(old_dtype).itemsize
new_itemsize = DataType(new_dtype).itemsize
old_axissize = old_itemsize * tensor['shape'][-1]
if old_axissize % new_itemsize:
raise ValueError("New type not compatible with data shape")
tensor['shape'][-1] = old_axissize // new_itemsize
tensor['dtype'] = dtype
return hdr
return block_view(block, header_transform)
def split_axis(block, axis, n, label=None):
# Set function attributes to enable capture in nested function (closure)
def header_transform(hdr, axis=axis, n=n, label=label):
tensor = hdr['_tensor']
if isinstance(axis, basestring):
axis = tensor['labels'].index(axis)
shape = tensor['shape']
if shape[axis] == -1:
# Axis is frame axis
# TODO: Should assert even division here instead?
# ***TODO: Why does pipeline deadlock when this doesn't divide?
hdr['gulp_nframe'] = (hdr['gulp_nframe'] - 1) / n + 1
else:
# Axis is not frame axis
if shape[axis] % n:
raise ValueError("Split does not evenly divide axis (%i // %i)" %
(tensor['shape'][axis], n))
shape[axis] //= n
shape.insert(axis + 1, n)
if 'units' in tensor:
tensor['units'].insert(axis + 1, tensor['units'][axis])
if 'labels' in tensor:
if label is None:
label = tensor['labels'][axis] + "_split"
tensor['labels'].insert(axis + 1, label)
if 'scales' in tensor:
tensor['scales'].insert(axis + 1, [0, tensor['scales'][axis][1]])
tensor['scales'][axis][1] *= n
return hdr
return block_view(block, header_transform)
def merge_axes(block, axis1, axis2, label=None):
def header_transform(hdr, axis1=axis1, axis2=axis2, label=label):
tensor = hdr['_tensor']
if isinstance(axis1, basestring):
axis1 = tensor['labels'].index(axis1)
if isinstance(axis2, basestring):
axis2 = tensor['labels'].index(axis2)
axis1, axis2 = sorted([axis1, axis2])
if axis2 != axis1 + 1:
raise ValueError("Merge axes must be adjacent")
n = tensor['shape'][axis2]
if n == -1:
# Axis2 is frame axis
raise ValueError("Second merge axis cannot be frame axis")
elif tensor['shape'][axis1] == -1:
# Axis1 is frame axis
hdr['gulp_nframe'] *= n
else:
# Neither axis is frame axis
tensor['shape'][axis1] *= n
del tensor['shape'][axis2]
if 'scales' in tensor and 'units' in tensor:
scale1 = tensor['scales'][axis1][1]
scale2 = tensor['scales'][axis2][1]
units1 = tensor['units'][axis1]
units2 = tensor['units'][axis2]
scale2 = convert_units(scale2, units2, units1)
if not isclose(scale1, n * scale2):
raise ValueError("Scales of merge axes do not line up: "
"%f != %f" % (scale1, n * scale2))
tensor['scales'][axis1][1] = scale2
del tensor['scales'][axis2]
del tensor['units'][axis2]
if 'labels' in tensor:
if label is not None:
tensor['labels'][axis1] = label
del tensor['labels'][axis2]
return hdr
return block_view(block, header_transform)
| en | 0.76255 | # Copyright (c) 2016, The Bifrost Authors. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Bifrost Authors nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. An alias to `bifrost.pipeline.block_view` Manually reinterpret the scale and/or units on an axis Manually reverse the scale factor on a given axis Add an extra dimension to the frame at position 'axis' E.g., if the shape is [-1, 3, 2], then selecting axis=1 would change the shape to be [-1, 1, 3, 2]. Axis may be negative, or a string corresponding to an existing axis label, in which case the new axis is inserted after the referenced axis. Remove a unitary dimension from the frame E.g., if the shape is [-1, 1, 3, 2], then selecting axis=1 would change the shape to be [-1, 3, 2]. Axis may be negative, or a string corresponding to an existing axis label. # Set function attributes to enable capture in nested function (closure) # Axis is frame axis # TODO: Should assert even division here instead? # ***TODO: Why does pipeline deadlock when this doesn't divide? # Axis is not frame axis # Axis2 is frame axis # Axis1 is frame axis # Neither axis is frame axis | 1.488988 | 1 |
botnet.py | PlannedTube9/ZeusbotPort | 0 | 6625529 | <filename>botnet.py<gh_stars>0
#!/usr/bin/python2.7
# -*- coding: utf-8
from utils import Utils
import json
import logging
import random
logger = logging.getLogger(__name__)
class Botnet:
ut = Utils()
def __init__(self, player):
self.username = player.username
self.password = <PASSWORD>
self.uhash = player.uhash
self.botNetServers = 3
self.botnet = []
self.p = player
self.ofwhat = ["fw", "av", "smash", "mwk"]
self.energy = 0
self._initbot()
def _initbot(self):
"""
Grab the amount of bots in the botnet
and populate and array of Bot class
:return: none
"""
data = self._botnetInfo()
bots = json.loads(data)
self.botnet = []
if int(bots['count']) > 0:
for i in bots['data']:
bot = Bot(i['running'], self.ofwhat[random.randint(0,3)], self.energy, i['hostname'], self.username, self.password, self.uhash)
self.botnet.append(bot)
def printbots(self):
"""
Print a list of player PCs in the botnet
:return: None
"""
for bot in self.botnet:
logger.info(bot)
def getbotnetdata(self):
"""
Return an array of bot class.
Contains all the bots in the botnet.
:return: list of bot class
"""
return self.botnet
def getInfo(self):
"""
Get info about the entire botnet.
Including if you can attack bot net servers etc.
Also botnet PC info.
:return: list of vHack serves that can be hacked.
['1','2','1']. '1' = can be hacked, '2' time not elapsed.
"""
response = self.ut.requestString(self.username, self.password, self.uhash, "vh_botnetInfo.php")
response = json.loads(response)
return response
def attack(self):
"""
Check if vHack server botnet is attackable,
then attack if can.
:return: none
"""
self._initbot()
logger.info("Trying Bot Net")
cinfo = self.getInfo()
for i in range(1, self.botNetServers + 1):
if cinfo[i - 1] == '1':
logger.debug('I am attacking #{}'.format(i))
if i == 1:
response = self.ut.requestString(self.username, self.password, self.uhash, "vh_attackCompany.php", company=str(i))
else:
response = self.ut.requestString(self.username, self.password, self.uhash, "vh_attackCompany" + str(i) + ".php", company=str(i))
logger.debug('I attacked #{} with response {}'.format(i, response))
if response == '0':
logger.info('#{} Netcoins gained'.format(i))
else:
logger.info('#{} Failed! No netcoins...'.format(i))
else:
logger.info("Botnet #{} not hackable yet".format(i))
def upgradebotnet(self, hostname, running, count):
"""
Check if there is enough money to upgrade a botnet PC.
Cycle through and upgrade until no money.
:return: None
"""
ofwhat = self.ofwhat[random.randint(0,3)]
logger.info("Prepare attempting to upgrade botnet PC "+ hostname + " [upgrading: " + ofwhat + "]")
get_infobot = self.getInfo()
if (int(get_infobot['data'][count]['strength']) == 3000):
logger.info("The bot '"+hostname+"' is on max strength [max strength 3000] ")
return True
if (int(get_infobot['data'][count]['running']) == 0):
new_bal = self.upgradesinglebot(hostname, ofwhat)
if new_bal:
logger.info("Waiting! Doing updates for bot '" + hostname + "' ..")
return True
else:
logger.info("You don't have enough energy to upgrade '" + hostname + "'! :(")
return False
else:
logger.info("Waiting! Doing updates for bot '" + hostname + "' ..")
return False
logger.error("The bot '{}' is not upgradeable".format(hostname))
return False
def _botnetInfo(self):
"""
Get the botnet information including vHack servers and PC data.
:return: string
'{"count":"14",
"data":[{"bID":"1","bLVL":"100","bSTR":"100","bPRICE":"10000000"},
{"bID":"2","bLVL":"100","bSTR":"100","bPRICE":"10000000"}],
"strength":23,"resethours1":"","resetminutes1":"14","resethours2":"4","resetminutes2":"15",
"resethours3":"3","resetminutes3":"15",
"canAtt1":"2","canAtt2":"2","canAtt3":"2"}'
"""
temp = self.ut.requestString(self.username, self.password, self.uhash, "vh_botnetInfo.php")
return temp
def upgradesinglebot(self, hostname, ofwhat):
"""
Pass in bot class object and call upgrade function based on bot ID.
details :
{u'strength': u'22', u'old': u'30', u'mm': u'68359859',
u'money': u'66259859', u'costs': u'2100000',
u'lvl': u'21', u'new': u'22'}
current lvl, bot number, x, x, upgrade cost, lvl, next lvl
:return: None
"""
response = self.ut.requestString(self.username, self.password, self.uhash, "vh_upgradePC.php", hostname=hostname, ofwhat=ofwhat, inst="0", much="1")
jsons = json.loads(response)
if int(jsons['result']) == 0:
return True
else:
logger.error("Upgrades on " + hostname + " Failed !")
return False
def __repr__(self):
return "Botnet details: vHackServers: {0}, Bot Net PC's: {1}".format(self.botNetServers, self.botnet)
class Bot:
ut = Utils()
def __init__(self, running, ofwhat, energy, hostname, username, password, uhash):
self.username = username
self.uhash = uhash
self.password = password
self.running = int(running)
self.ofwhat = ofwhat
self.energy = energy
self.hostname = hostname
def botupgradable(self, running):
"""
Determine if botnet PC is at max level or not.
:return: Bool
"""
if running == 0:
return True
else:
return False
def nextlevelcostenergy(self):
"""
Return the cost of upgrading bot to the next level
:return:int
"""
return self.energy
def parse_json_stream(self, stream):
decoder = json.JSONDecoder()
while stream:
obj, idx = decoder.raw_decode(stream)
yield obj
stream = stream[idx:].lstrip()
def upgradesinglebot(self, hostname, ofwhat):
"""
Pass in bot class object and call upgrade function based on bot ID.
details :
{u'strength': u'22', u'old': u'30', u'mm': u'68359859',
u'money': u'66259859', u'costs': u'2100000',
u'lvl': u'21', u'new': u'22'}
current lvl, bot number, x, x, upgrade cost, lvl, next lvl
:return: None
"""
response = self.ut.requestString(self.username, self.password, self.uhash, "vh_upgradePC.php", hostname=hostname, ofwhat=ofwhat)
#response = response.split('}{')[0] + '}'
#jsons = json.loads(response)
#logger.info(jsons)
return True
def __repr__(self):
return "Bot details: running: {0}, energy: {1}, upgrade: {2}, botname: {3}".format(self.running, self.energy, self.ofwhat, self.hostname)
| <filename>botnet.py<gh_stars>0
#!/usr/bin/python2.7
# -*- coding: utf-8
from utils import Utils
import json
import logging
import random
logger = logging.getLogger(__name__)
class Botnet:
ut = Utils()
def __init__(self, player):
self.username = player.username
self.password = <PASSWORD>
self.uhash = player.uhash
self.botNetServers = 3
self.botnet = []
self.p = player
self.ofwhat = ["fw", "av", "smash", "mwk"]
self.energy = 0
self._initbot()
def _initbot(self):
"""
Grab the amount of bots in the botnet
and populate and array of Bot class
:return: none
"""
data = self._botnetInfo()
bots = json.loads(data)
self.botnet = []
if int(bots['count']) > 0:
for i in bots['data']:
bot = Bot(i['running'], self.ofwhat[random.randint(0,3)], self.energy, i['hostname'], self.username, self.password, self.uhash)
self.botnet.append(bot)
def printbots(self):
"""
Print a list of player PCs in the botnet
:return: None
"""
for bot in self.botnet:
logger.info(bot)
def getbotnetdata(self):
"""
Return an array of bot class.
Contains all the bots in the botnet.
:return: list of bot class
"""
return self.botnet
def getInfo(self):
"""
Get info about the entire botnet.
Including if you can attack bot net servers etc.
Also botnet PC info.
:return: list of vHack serves that can be hacked.
['1','2','1']. '1' = can be hacked, '2' time not elapsed.
"""
response = self.ut.requestString(self.username, self.password, self.uhash, "vh_botnetInfo.php")
response = json.loads(response)
return response
def attack(self):
"""
Check if vHack server botnet is attackable,
then attack if can.
:return: none
"""
self._initbot()
logger.info("Trying Bot Net")
cinfo = self.getInfo()
for i in range(1, self.botNetServers + 1):
if cinfo[i - 1] == '1':
logger.debug('I am attacking #{}'.format(i))
if i == 1:
response = self.ut.requestString(self.username, self.password, self.uhash, "vh_attackCompany.php", company=str(i))
else:
response = self.ut.requestString(self.username, self.password, self.uhash, "vh_attackCompany" + str(i) + ".php", company=str(i))
logger.debug('I attacked #{} with response {}'.format(i, response))
if response == '0':
logger.info('#{} Netcoins gained'.format(i))
else:
logger.info('#{} Failed! No netcoins...'.format(i))
else:
logger.info("Botnet #{} not hackable yet".format(i))
def upgradebotnet(self, hostname, running, count):
"""
Check if there is enough money to upgrade a botnet PC.
Cycle through and upgrade until no money.
:return: None
"""
ofwhat = self.ofwhat[random.randint(0,3)]
logger.info("Prepare attempting to upgrade botnet PC "+ hostname + " [upgrading: " + ofwhat + "]")
get_infobot = self.getInfo()
if (int(get_infobot['data'][count]['strength']) == 3000):
logger.info("The bot '"+hostname+"' is on max strength [max strength 3000] ")
return True
if (int(get_infobot['data'][count]['running']) == 0):
new_bal = self.upgradesinglebot(hostname, ofwhat)
if new_bal:
logger.info("Waiting! Doing updates for bot '" + hostname + "' ..")
return True
else:
logger.info("You don't have enough energy to upgrade '" + hostname + "'! :(")
return False
else:
logger.info("Waiting! Doing updates for bot '" + hostname + "' ..")
return False
logger.error("The bot '{}' is not upgradeable".format(hostname))
return False
def _botnetInfo(self):
"""
Get the botnet information including vHack servers and PC data.
:return: string
'{"count":"14",
"data":[{"bID":"1","bLVL":"100","bSTR":"100","bPRICE":"10000000"},
{"bID":"2","bLVL":"100","bSTR":"100","bPRICE":"10000000"}],
"strength":23,"resethours1":"","resetminutes1":"14","resethours2":"4","resetminutes2":"15",
"resethours3":"3","resetminutes3":"15",
"canAtt1":"2","canAtt2":"2","canAtt3":"2"}'
"""
temp = self.ut.requestString(self.username, self.password, self.uhash, "vh_botnetInfo.php")
return temp
def upgradesinglebot(self, hostname, ofwhat):
"""
Pass in bot class object and call upgrade function based on bot ID.
details :
{u'strength': u'22', u'old': u'30', u'mm': u'68359859',
u'money': u'66259859', u'costs': u'2100000',
u'lvl': u'21', u'new': u'22'}
current lvl, bot number, x, x, upgrade cost, lvl, next lvl
:return: None
"""
response = self.ut.requestString(self.username, self.password, self.uhash, "vh_upgradePC.php", hostname=hostname, ofwhat=ofwhat, inst="0", much="1")
jsons = json.loads(response)
if int(jsons['result']) == 0:
return True
else:
logger.error("Upgrades on " + hostname + " Failed !")
return False
def __repr__(self):
return "Botnet details: vHackServers: {0}, Bot Net PC's: {1}".format(self.botNetServers, self.botnet)
class Bot:
ut = Utils()
def __init__(self, running, ofwhat, energy, hostname, username, password, uhash):
self.username = username
self.uhash = uhash
self.password = password
self.running = int(running)
self.ofwhat = ofwhat
self.energy = energy
self.hostname = hostname
def botupgradable(self, running):
"""
Determine if botnet PC is at max level or not.
:return: Bool
"""
if running == 0:
return True
else:
return False
def nextlevelcostenergy(self):
"""
Return the cost of upgrading bot to the next level
:return:int
"""
return self.energy
def parse_json_stream(self, stream):
decoder = json.JSONDecoder()
while stream:
obj, idx = decoder.raw_decode(stream)
yield obj
stream = stream[idx:].lstrip()
def upgradesinglebot(self, hostname, ofwhat):
"""
Pass in bot class object and call upgrade function based on bot ID.
details :
{u'strength': u'22', u'old': u'30', u'mm': u'68359859',
u'money': u'66259859', u'costs': u'2100000',
u'lvl': u'21', u'new': u'22'}
current lvl, bot number, x, x, upgrade cost, lvl, next lvl
:return: None
"""
response = self.ut.requestString(self.username, self.password, self.uhash, "vh_upgradePC.php", hostname=hostname, ofwhat=ofwhat)
#response = response.split('}{')[0] + '}'
#jsons = json.loads(response)
#logger.info(jsons)
return True
def __repr__(self):
return "Bot details: running: {0}, energy: {1}, upgrade: {2}, botname: {3}".format(self.running, self.energy, self.ofwhat, self.hostname)
| en | 0.640965 | #!/usr/bin/python2.7 # -*- coding: utf-8 Grab the amount of bots in the botnet and populate and array of Bot class :return: none Print a list of player PCs in the botnet :return: None Return an array of bot class. Contains all the bots in the botnet. :return: list of bot class Get info about the entire botnet. Including if you can attack bot net servers etc. Also botnet PC info. :return: list of vHack serves that can be hacked. ['1','2','1']. '1' = can be hacked, '2' time not elapsed. Check if vHack server botnet is attackable, then attack if can. :return: none #{}'.format(i)) #{} with response {}'.format(i, response)) #{} not hackable yet".format(i)) Check if there is enough money to upgrade a botnet PC. Cycle through and upgrade until no money. :return: None Get the botnet information including vHack servers and PC data. :return: string '{"count":"14", "data":[{"bID":"1","bLVL":"100","bSTR":"100","bPRICE":"10000000"}, {"bID":"2","bLVL":"100","bSTR":"100","bPRICE":"10000000"}], "strength":23,"resethours1":"","resetminutes1":"14","resethours2":"4","resetminutes2":"15", "resethours3":"3","resetminutes3":"15", "canAtt1":"2","canAtt2":"2","canAtt3":"2"}' Pass in bot class object and call upgrade function based on bot ID. details : {u'strength': u'22', u'old': u'30', u'mm': u'68359859', u'money': u'66259859', u'costs': u'2100000', u'lvl': u'21', u'new': u'22'} current lvl, bot number, x, x, upgrade cost, lvl, next lvl :return: None Determine if botnet PC is at max level or not. :return: Bool Return the cost of upgrading bot to the next level :return:int Pass in bot class object and call upgrade function based on bot ID. details : {u'strength': u'22', u'old': u'30', u'mm': u'68359859', u'money': u'66259859', u'costs': u'2100000', u'lvl': u'21', u'new': u'22'} current lvl, bot number, x, x, upgrade cost, lvl, next lvl :return: None #response = response.split('}{')[0] + '}' #jsons = json.loads(response) #logger.info(jsons) | 3.122351 | 3 |
tests/models/xDeepFM_test.py | HazzaCheng/DeepCTR | 2 | 6625530 | <gh_stars>1-10
import pytest
import tensorflow as tf
from deepctr.estimator import xDeepFMEstimator
from deepctr.models import xDeepFM
from ..utils import check_model, get_test_data, SAMPLE_SIZE, get_test_data_estimator, check_estimator, \
Estimator_TEST_TF1
@pytest.mark.parametrize(
'dnn_hidden_units,cin_layer_size,cin_split_half,cin_activation,sparse_feature_num,dense_feature_dim',
[ # ((), (), True, 'linear', 1, 2),
((8,), (), True, 'linear', 1, 1),
((), (8,), True, 'linear', 2, 2),
((8,), (8,), False, 'relu', 1, 0)
]
)
def test_xDeepFM(dnn_hidden_units, cin_layer_size, cin_split_half, cin_activation, sparse_feature_num,
dense_feature_dim):
model_name = "xDeepFM"
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(sample_size, sparse_feature_num=sparse_feature_num,
dense_feature_num=sparse_feature_num)
model = xDeepFM(feature_columns, feature_columns, dnn_hidden_units=dnn_hidden_units, cin_layer_size=cin_layer_size,
cin_split_half=cin_split_half, cin_activation=cin_activation, dnn_dropout=0.5)
check_model(model, model_name, x, y)
# @pytest.mark.parametrize(
# 'hidden_size,cin_layer_size,',
# [((8,), (3, 8)),
# ]
# )
# def test_xDeepFM_invalid(hidden_size, cin_layer_size):
# feature_dim_dict = {'sparse': {'sparse_1': 2, 'sparse_2': 5,
# 'sparse_3': 10}, 'dense': ['dense_1', 'dense_2', 'dense_3']}
# with pytest.raises(ValueError):
# _ = xDeepFM(feature_dim_dict, None, dnn_hidden_units=hidden_size, cin_layer_size=cin_layer_size)
@pytest.mark.parametrize(
'dnn_hidden_units,cin_layer_size,cin_split_half,cin_activation,sparse_feature_num,dense_feature_dim',
[ # ((), (), True, 'linear', 1, 2),
((8,), (8,), False, 'relu', 2, 1)
]
)
def test_xDeepFMEstimator(dnn_hidden_units, cin_layer_size, cin_split_half, cin_activation, sparse_feature_num,
dense_feature_dim):
if not Estimator_TEST_TF1 and tf.__version__ < "2.2.0":
return
model_name = "xDeepFM"
sample_size = SAMPLE_SIZE
linear_feature_columns, dnn_feature_columns, input_fn = get_test_data_estimator(sample_size,
sparse_feature_num=sparse_feature_num,
dense_feature_num=sparse_feature_num)
model = xDeepFMEstimator(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=dnn_hidden_units,
cin_layer_size=cin_layer_size,
cin_split_half=cin_split_half, cin_activation=cin_activation, dnn_dropout=0.5)
check_estimator(model, input_fn)
if __name__ == "__main__":
pass
| import pytest
import tensorflow as tf
from deepctr.estimator import xDeepFMEstimator
from deepctr.models import xDeepFM
from ..utils import check_model, get_test_data, SAMPLE_SIZE, get_test_data_estimator, check_estimator, \
Estimator_TEST_TF1
@pytest.mark.parametrize(
'dnn_hidden_units,cin_layer_size,cin_split_half,cin_activation,sparse_feature_num,dense_feature_dim',
[ # ((), (), True, 'linear', 1, 2),
((8,), (), True, 'linear', 1, 1),
((), (8,), True, 'linear', 2, 2),
((8,), (8,), False, 'relu', 1, 0)
]
)
def test_xDeepFM(dnn_hidden_units, cin_layer_size, cin_split_half, cin_activation, sparse_feature_num,
dense_feature_dim):
model_name = "xDeepFM"
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(sample_size, sparse_feature_num=sparse_feature_num,
dense_feature_num=sparse_feature_num)
model = xDeepFM(feature_columns, feature_columns, dnn_hidden_units=dnn_hidden_units, cin_layer_size=cin_layer_size,
cin_split_half=cin_split_half, cin_activation=cin_activation, dnn_dropout=0.5)
check_model(model, model_name, x, y)
# @pytest.mark.parametrize(
# 'hidden_size,cin_layer_size,',
# [((8,), (3, 8)),
# ]
# )
# def test_xDeepFM_invalid(hidden_size, cin_layer_size):
# feature_dim_dict = {'sparse': {'sparse_1': 2, 'sparse_2': 5,
# 'sparse_3': 10}, 'dense': ['dense_1', 'dense_2', 'dense_3']}
# with pytest.raises(ValueError):
# _ = xDeepFM(feature_dim_dict, None, dnn_hidden_units=hidden_size, cin_layer_size=cin_layer_size)
@pytest.mark.parametrize(
'dnn_hidden_units,cin_layer_size,cin_split_half,cin_activation,sparse_feature_num,dense_feature_dim',
[ # ((), (), True, 'linear', 1, 2),
((8,), (8,), False, 'relu', 2, 1)
]
)
def test_xDeepFMEstimator(dnn_hidden_units, cin_layer_size, cin_split_half, cin_activation, sparse_feature_num,
dense_feature_dim):
if not Estimator_TEST_TF1 and tf.__version__ < "2.2.0":
return
model_name = "xDeepFM"
sample_size = SAMPLE_SIZE
linear_feature_columns, dnn_feature_columns, input_fn = get_test_data_estimator(sample_size,
sparse_feature_num=sparse_feature_num,
dense_feature_num=sparse_feature_num)
model = xDeepFMEstimator(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=dnn_hidden_units,
cin_layer_size=cin_layer_size,
cin_split_half=cin_split_half, cin_activation=cin_activation, dnn_dropout=0.5)
check_estimator(model, input_fn)
if __name__ == "__main__":
pass | en | 0.27497 | # ((), (), True, 'linear', 1, 2), # @pytest.mark.parametrize( # 'hidden_size,cin_layer_size,', # [((8,), (3, 8)), # ] # ) # def test_xDeepFM_invalid(hidden_size, cin_layer_size): # feature_dim_dict = {'sparse': {'sparse_1': 2, 'sparse_2': 5, # 'sparse_3': 10}, 'dense': ['dense_1', 'dense_2', 'dense_3']} # with pytest.raises(ValueError): # _ = xDeepFM(feature_dim_dict, None, dnn_hidden_units=hidden_size, cin_layer_size=cin_layer_size) # ((), (), True, 'linear', 1, 2), | 2.334799 | 2 |
test/functional/feature_blockfilterindex_prune.py | picacoin/picacoin | 1 | 6625531 | <filename>test/functional/feature_blockfilterindex_prune.py
#!/usr/bin/env python3
# Copyright (c) 2020 The Picacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test blockfilterindex in conjunction with prune."""
from test_framework.test_framework import PicacoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
class FeatureBlockfilterindexPruneTest(PicacoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-fastprune", "-prune=1", "-blockfilterindex=1"]]
def sync_index(self, height):
expected = {'basic block filter index': {'synced': True, 'best_block_height': height}}
self.wait_until(lambda: self.nodes[0].getindexinfo() == expected)
def run_test(self):
self.log.info("check if we can access a blockfilter when pruning is enabled but no blocks are actually pruned")
self.sync_index(height=200)
assert_greater_than(len(self.nodes[0].getblockfilter(self.nodes[0].getbestblockhash())['filter']), 0)
# Mine two batches of blocks to avoid hitting NODE_NETWORK_LIMITED_MIN_BLOCKS disconnection
self.nodes[0].generate(250)
self.sync_all()
self.nodes[0].generate(250)
self.sync_all()
self.sync_index(height=700)
self.log.info("prune some blocks")
pruneheight = self.nodes[0].pruneblockchain(400)
assert_equal(pruneheight, 248)
self.log.info("check if we can access the tips blockfilter when we have pruned some blocks")
assert_greater_than(len(self.nodes[0].getblockfilter(self.nodes[0].getbestblockhash())['filter']), 0)
self.log.info("check if we can access the blockfilter of a pruned block")
assert_greater_than(len(self.nodes[0].getblockfilter(self.nodes[0].getblockhash(2))['filter']), 0)
self.log.info("start node without blockfilterindex")
self.restart_node(0, extra_args=["-fastprune", "-prune=1"])
self.log.info("make sure accessing the blockfilters throws an error")
assert_raises_rpc_error(-1, "Index is not enabled for filtertype basic", self.nodes[0].getblockfilter, self.nodes[0].getblockhash(2))
self.nodes[0].generate(1000)
self.log.info("prune below the blockfilterindexes best block while blockfilters are disabled")
pruneheight_new = self.nodes[0].pruneblockchain(1000)
assert_greater_than(pruneheight_new, pruneheight)
self.stop_node(0)
self.log.info("make sure we get an init error when starting the node again with block filters")
with self.nodes[0].assert_debug_log(["basic block filter index best block of the index goes beyond pruned data. Please disable the index or reindex (which will download the whole blockchain again)"]):
self.nodes[0].assert_start_raises_init_error(extra_args=["-fastprune", "-prune=1", "-blockfilterindex=1"])
self.log.info("make sure the node starts again with the -reindex arg")
self.start_node(0, extra_args = ["-fastprune", "-prune=1", "-blockfilterindex", "-reindex"])
if __name__ == '__main__':
FeatureBlockfilterindexPruneTest().main()
| <filename>test/functional/feature_blockfilterindex_prune.py
#!/usr/bin/env python3
# Copyright (c) 2020 The Picacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test blockfilterindex in conjunction with prune."""
from test_framework.test_framework import PicacoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
class FeatureBlockfilterindexPruneTest(PicacoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-fastprune", "-prune=1", "-blockfilterindex=1"]]
def sync_index(self, height):
expected = {'basic block filter index': {'synced': True, 'best_block_height': height}}
self.wait_until(lambda: self.nodes[0].getindexinfo() == expected)
def run_test(self):
self.log.info("check if we can access a blockfilter when pruning is enabled but no blocks are actually pruned")
self.sync_index(height=200)
assert_greater_than(len(self.nodes[0].getblockfilter(self.nodes[0].getbestblockhash())['filter']), 0)
# Mine two batches of blocks to avoid hitting NODE_NETWORK_LIMITED_MIN_BLOCKS disconnection
self.nodes[0].generate(250)
self.sync_all()
self.nodes[0].generate(250)
self.sync_all()
self.sync_index(height=700)
self.log.info("prune some blocks")
pruneheight = self.nodes[0].pruneblockchain(400)
assert_equal(pruneheight, 248)
self.log.info("check if we can access the tips blockfilter when we have pruned some blocks")
assert_greater_than(len(self.nodes[0].getblockfilter(self.nodes[0].getbestblockhash())['filter']), 0)
self.log.info("check if we can access the blockfilter of a pruned block")
assert_greater_than(len(self.nodes[0].getblockfilter(self.nodes[0].getblockhash(2))['filter']), 0)
self.log.info("start node without blockfilterindex")
self.restart_node(0, extra_args=["-fastprune", "-prune=1"])
self.log.info("make sure accessing the blockfilters throws an error")
assert_raises_rpc_error(-1, "Index is not enabled for filtertype basic", self.nodes[0].getblockfilter, self.nodes[0].getblockhash(2))
self.nodes[0].generate(1000)
self.log.info("prune below the blockfilterindexes best block while blockfilters are disabled")
pruneheight_new = self.nodes[0].pruneblockchain(1000)
assert_greater_than(pruneheight_new, pruneheight)
self.stop_node(0)
self.log.info("make sure we get an init error when starting the node again with block filters")
with self.nodes[0].assert_debug_log(["basic block filter index best block of the index goes beyond pruned data. Please disable the index or reindex (which will download the whole blockchain again)"]):
self.nodes[0].assert_start_raises_init_error(extra_args=["-fastprune", "-prune=1", "-blockfilterindex=1"])
self.log.info("make sure the node starts again with the -reindex arg")
self.start_node(0, extra_args = ["-fastprune", "-prune=1", "-blockfilterindex", "-reindex"])
if __name__ == '__main__':
FeatureBlockfilterindexPruneTest().main()
| en | 0.645886 | #!/usr/bin/env python3 # Copyright (c) 2020 The Picacoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. Test blockfilterindex in conjunction with prune. # Mine two batches of blocks to avoid hitting NODE_NETWORK_LIMITED_MIN_BLOCKS disconnection | 2.164608 | 2 |
yt/testing.py | tukss/yt | 0 | 6625532 | <reponame>tukss/yt
import functools
import hashlib
import importlib
import itertools as it
import os
import pickle
import shutil
import tempfile
import unittest
import matplotlib
import numpy as np
from numpy.random import RandomState
from unyt.exceptions import UnitOperationError
from yt.config import ytcfg
from yt.funcs import iterable
from yt.loaders import load
from yt.units.yt_array import YTArray, YTQuantity
# we import this in a weird way from numpy.testing to avoid triggering
# flake8 errors from the unused imports. These test functions are imported
# elsewhere in yt from here so we want them to be imported here.
from numpy.testing import assert_array_equal, assert_almost_equal # NOQA isort:skip
from numpy.testing import assert_equal, assert_array_less # NOQA isort:skip
from numpy.testing import assert_string_equal # NOQA isort:skip
from numpy.testing import assert_array_almost_equal_nulp # NOQA isort:skip
from numpy.testing import assert_allclose, assert_raises # NOQA isort:skip
from numpy.testing import assert_approx_equal # NOQA isort:skip
from numpy.testing import assert_array_almost_equal # NOQA isort:skip
ANSWER_TEST_TAG = "answer_test"
# Expose assert_true and assert_less_equal from unittest.TestCase
# this is adopted from nose. Doing this here allows us to avoid importing
# nose at the top level.
class _Dummy(unittest.TestCase):
def nop():
pass
_t = _Dummy("nop")
assert_true = getattr(_t, "assertTrue") # noqa: B009
assert_less_equal = getattr(_t, "assertLessEqual") # noqa: B009
def assert_rel_equal(a1, a2, decimals, err_msg="", verbose=True):
# We have nan checks in here because occasionally we have fields that get
# weighted without non-zero weights. I'm looking at you, particle fields!
if isinstance(a1, np.ndarray):
assert a1.size == a2.size
# Mask out NaNs
assert (np.isnan(a1) == np.isnan(a2)).all()
a1[np.isnan(a1)] = 1.0
a2[np.isnan(a2)] = 1.0
# Mask out 0
ind1 = np.array(np.abs(a1) < np.finfo(a1.dtype).eps)
ind2 = np.array(np.abs(a2) < np.finfo(a2.dtype).eps)
assert (ind1 == ind2).all()
a1[ind1] = 1.0
a2[ind2] = 1.0
elif np.any(np.isnan(a1)) and np.any(np.isnan(a2)):
return True
if not isinstance(a1, np.ndarray) and a1 == a2 == 0.0:
# NANS!
a1 = a2 = 1.0
return assert_almost_equal(
np.array(a1) / np.array(a2), 1.0, decimals, err_msg=err_msg, verbose=verbose
)
def amrspace(extent, levels=7, cells=8):
"""Creates two numpy arrays representing the left and right bounds of
an AMR grid as well as an array for the AMR level of each cell.
Parameters
----------
extent : array-like
This a sequence of length 2*ndims that is the bounds of each dimension.
For example, the 2D unit square would be given by [0.0, 1.0, 0.0, 1.0].
A 3D cylindrical grid may look like [0.0, 2.0, -1.0, 1.0, 0.0, 2*np.pi].
levels : int or sequence of ints, optional
This is the number of AMR refinement levels. If given as a sequence (of
length ndims), then each dimension will be refined down to this level.
All values in this array must be the same or zero. A zero valued dimension
indicates that this dim should not be refined. Taking the 3D cylindrical
example above if we don't want refine theta but want r and z at 5 we would
set levels=(5, 5, 0).
cells : int, optional
This is the number of cells per refinement level.
Returns
-------
left : float ndarray, shape=(npoints, ndims)
The left AMR grid points.
right : float ndarray, shape=(npoints, ndims)
The right AMR grid points.
level : int ndarray, shape=(npoints,)
The AMR level for each point.
Examples
--------
>>> l, r, lvl = amrspace([0.0, 2.0, 1.0, 2.0, 0.0, 3.14], levels=(3,3,0), cells=2)
>>> print l
[[ 0. 1. 0. ]
[ 0.25 1. 0. ]
[ 0. 1.125 0. ]
[ 0.25 1.125 0. ]
[ 0.5 1. 0. ]
[ 0. 1.25 0. ]
[ 0.5 1.25 0. ]
[ 1. 1. 0. ]
[ 0. 1.5 0. ]
[ 1. 1.5 0. ]]
"""
extent = np.asarray(extent, dtype="f8")
dextent = extent[1::2] - extent[::2]
ndims = len(dextent)
if isinstance(levels, int):
minlvl = maxlvl = levels
levels = np.array([levels] * ndims, dtype="int32")
else:
levels = np.asarray(levels, dtype="int32")
minlvl = levels.min()
maxlvl = levels.max()
if minlvl != maxlvl and (minlvl != 0 or set([minlvl, maxlvl]) != set(levels)):
raise ValueError("all levels must have the same value or zero.")
dims_zero = levels == 0
dims_nonzero = ~dims_zero
ndims_nonzero = dims_nonzero.sum()
npoints = (cells ** ndims_nonzero - 1) * maxlvl + 1
left = np.empty((npoints, ndims), dtype="float64")
right = np.empty((npoints, ndims), dtype="float64")
level = np.empty(npoints, dtype="int32")
# fill zero dims
left[:, dims_zero] = extent[::2][dims_zero]
right[:, dims_zero] = extent[1::2][dims_zero]
# fill non-zero dims
dcell = 1.0 / cells
left_slice = tuple(
[
slice(extent[2 * n], extent[2 * n + 1], extent[2 * n + 1])
if dims_zero[n]
else slice(0.0, 1.0, dcell)
for n in range(ndims)
]
)
right_slice = tuple(
[
slice(extent[2 * n + 1], extent[2 * n], -extent[2 * n + 1])
if dims_zero[n]
else slice(dcell, 1.0 + dcell, dcell)
for n in range(ndims)
]
)
left_norm_grid = np.reshape(np.mgrid[left_slice].T.flat[ndims:], (-1, ndims))
lng_zero = left_norm_grid[:, dims_zero]
lng_nonzero = left_norm_grid[:, dims_nonzero]
right_norm_grid = np.reshape(np.mgrid[right_slice].T.flat[ndims:], (-1, ndims))
rng_zero = right_norm_grid[:, dims_zero]
rng_nonzero = right_norm_grid[:, dims_nonzero]
level[0] = maxlvl
left[0, :] = extent[::2]
right[0, dims_zero] = extent[1::2][dims_zero]
right[0, dims_nonzero] = (dcell ** maxlvl) * dextent[dims_nonzero] + extent[::2][
dims_nonzero
]
for i, lvl in enumerate(range(maxlvl, 0, -1)):
start = (cells ** ndims_nonzero - 1) * i + 1
stop = (cells ** ndims_nonzero - 1) * (i + 1) + 1
dsize = dcell ** (lvl - 1) * dextent[dims_nonzero]
level[start:stop] = lvl
left[start:stop, dims_zero] = lng_zero
left[start:stop, dims_nonzero] = lng_nonzero * dsize + extent[::2][dims_nonzero]
right[start:stop, dims_zero] = rng_zero
right[start:stop, dims_nonzero] = (
rng_nonzero * dsize + extent[::2][dims_nonzero]
)
return left, right, level
def fake_random_ds(
ndims,
peak_value=1.0,
fields=("density", "velocity_x", "velocity_y", "velocity_z"),
units=("g/cm**3", "cm/s", "cm/s", "cm/s"),
particle_fields=None,
particle_field_units=None,
negative=False,
nprocs=1,
particles=0,
length_unit=1.0,
unit_system="cgs",
bbox=None,
):
from yt.loaders import load_uniform_grid
prng = RandomState(0x4D3D3D3)
if not iterable(ndims):
ndims = [ndims, ndims, ndims]
else:
assert len(ndims) == 3
if not iterable(negative):
negative = [negative for f in fields]
assert len(fields) == len(negative)
offsets = []
for n in negative:
if n:
offsets.append(0.5)
else:
offsets.append(0.0)
data = {}
for field, offset, u in zip(fields, offsets, units):
v = (prng.random_sample(ndims) - offset) * peak_value
if field[0] == "all":
v = v.ravel()
data[field] = (v, u)
if particles:
if particle_fields is not None:
for field, unit in zip(particle_fields, particle_field_units):
if field in ("particle_position", "particle_velocity"):
data["io", field] = (prng.random_sample((int(particles), 3)), unit)
else:
data["io", field] = (prng.random_sample(size=int(particles)), unit)
else:
for f in (f"particle_position_{ax}" for ax in "xyz"):
data["io", f] = (prng.random_sample(size=particles), "code_length")
for f in (f"particle_velocity_{ax}" for ax in "xyz"):
data["io", f] = (prng.random_sample(size=particles) - 0.5, "cm/s")
data["io", "particle_mass"] = (prng.random_sample(particles), "g")
ug = load_uniform_grid(
data,
ndims,
length_unit=length_unit,
nprocs=nprocs,
unit_system=unit_system,
bbox=bbox,
)
return ug
_geom_transforms = {
# These are the bounds we want. Cartesian we just assume goes 0 .. 1.
"cartesian": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)),
"spherical": ((0.0, 0.0, 0.0), (1.0, np.pi, 2 * np.pi)),
"cylindrical": ((0.0, 0.0, 0.0), (1.0, 1.0, 2.0 * np.pi)), # rzt
"polar": ((0.0, 0.0, 0.0), (1.0, 2.0 * np.pi, 1.0)), # rtz
"geographic": ((-90.0, -180.0, 0.0), (90.0, 180.0, 1000.0)), # latlonalt
"internal_geographic": ((-90.0, -180.0, 0.0), (90.0, 180.0, 1000.0)), # latlondep
}
def fake_amr_ds(
fields=("Density",), geometry="cartesian", particles=0, length_unit=None
):
from yt.loaders import load_amr_grids
prng = RandomState(0x4D3D3D3)
LE, RE = _geom_transforms[geometry]
LE = np.array(LE)
RE = np.array(RE)
data = []
for gspec in _amr_grid_index:
level, left_edge, right_edge, dims = gspec
left_edge = left_edge * (RE - LE) + LE
right_edge = right_edge * (RE - LE) + LE
gdata = dict(
level=level, left_edge=left_edge, right_edge=right_edge, dimensions=dims
)
for f in fields:
gdata[f] = prng.random_sample(dims)
if particles:
for i, f in enumerate(f"particle_position_{ax}" for ax in "xyz"):
pdata = prng.random_sample(particles)
pdata /= right_edge[i] - left_edge[i]
pdata += left_edge[i]
gdata["io", f] = (pdata, "code_length")
for f in (f"particle_velocity_{ax}" for ax in "xyz"):
gdata["io", f] = (prng.random_sample(particles) - 0.5, "cm/s")
gdata["io", "particle_mass"] = (prng.random_sample(particles), "g")
data.append(gdata)
bbox = np.array([LE, RE]).T
return load_amr_grids(
data, [32, 32, 32], geometry=geometry, bbox=bbox, length_unit=length_unit
)
def fake_particle_ds(
fields=(
"particle_position_x",
"particle_position_y",
"particle_position_z",
"particle_mass",
"particle_velocity_x",
"particle_velocity_y",
"particle_velocity_z",
),
units=("cm", "cm", "cm", "g", "cm/s", "cm/s", "cm/s"),
negative=(False, False, False, False, True, True, True),
npart=16 ** 3,
length_unit=1.0,
data=None,
):
from yt.loaders import load_particles
prng = RandomState(0x4D3D3D3)
if not iterable(negative):
negative = [negative for f in fields]
assert len(fields) == len(negative)
offsets = []
for n in negative:
if n:
offsets.append(0.5)
else:
offsets.append(0.0)
data = data if data else {}
for field, offset, u in zip(fields, offsets, units):
if field in data:
v = data[field]
continue
if "position" in field:
v = prng.normal(loc=0.5, scale=0.25, size=npart)
np.clip(v, 0.0, 1.0, v)
v = prng.random_sample(npart) - offset
data[field] = (v, u)
bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])
ds = load_particles(data, 1.0, bbox=bbox)
return ds
def fake_tetrahedral_ds():
from yt.frontends.stream.sample_data.tetrahedral_mesh import (
_connectivity,
_coordinates,
)
from yt.loaders import load_unstructured_mesh
prng = RandomState(0x4D3D3D3)
# the distance from the origin
node_data = {}
dist = np.sum(_coordinates ** 2, 1)
node_data[("connect1", "test")] = dist[_connectivity]
# each element gets a random number
elem_data = {}
elem_data[("connect1", "elem")] = prng.rand(_connectivity.shape[0])
ds = load_unstructured_mesh(
_connectivity, _coordinates, node_data=node_data, elem_data=elem_data
)
return ds
def fake_hexahedral_ds():
from yt.frontends.stream.sample_data.hexahedral_mesh import (
_connectivity,
_coordinates,
)
from yt.loaders import load_unstructured_mesh
prng = RandomState(0x4D3D3D3)
# the distance from the origin
node_data = {}
dist = np.sum(_coordinates ** 2, 1)
node_data[("connect1", "test")] = dist[_connectivity - 1]
# each element gets a random number
elem_data = {}
elem_data[("connect1", "elem")] = prng.rand(_connectivity.shape[0])
ds = load_unstructured_mesh(
_connectivity - 1, _coordinates, node_data=node_data, elem_data=elem_data
)
return ds
def small_fake_hexahedral_ds():
from yt.loaders import load_unstructured_mesh
_coordinates = np.array(
[
[-1.0, -1.0, -1.0],
[0.0, -1.0, -1.0],
[-0.0, 0.0, -1.0],
[-1.0, -0.0, -1.0],
[-1.0, -1.0, 0.0],
[-0.0, -1.0, 0.0],
[-0.0, 0.0, -0.0],
[-1.0, 0.0, -0.0],
]
)
_connectivity = np.array([[1, 2, 3, 4, 5, 6, 7, 8]])
# the distance from the origin
node_data = {}
dist = np.sum(_coordinates ** 2, 1)
node_data[("connect1", "test")] = dist[_connectivity - 1]
ds = load_unstructured_mesh(_connectivity - 1, _coordinates, node_data=node_data)
return ds
def fake_vr_orientation_test_ds(N=96, scale=1):
"""
create a toy dataset that puts a sphere at (0,0,0), a single cube
on +x, two cubes on +y, and three cubes on +z in a domain from
[-1*scale,1*scale]**3. The lower planes
(x = -1*scale, y = -1*scale, z = -1*scale) are also given non-zero
values.
This dataset allows you to easily explore orientations and
handiness in VR and other renderings
Parameters
----------
N : integer
The number of cells along each direction
scale : float
A spatial scale, the domain boundaries will be multiplied by scale to
test datasets that have spatial different scales (e.g. data in CGS units)
"""
from yt.loaders import load_uniform_grid
xmin = ymin = zmin = -1.0 * scale
xmax = ymax = zmax = 1.0 * scale
dcoord = (xmax - xmin) / N
arr = np.zeros((N, N, N), dtype=np.float64)
arr[:, :, :] = 1.0e-4
bbox = np.array([[xmin, xmax], [ymin, ymax], [zmin, zmax]])
# coordinates -- in the notation data[i, j, k]
x = (np.arange(N) + 0.5) * dcoord + xmin
y = (np.arange(N) + 0.5) * dcoord + ymin
z = (np.arange(N) + 0.5) * dcoord + zmin
x3d, y3d, z3d = np.meshgrid(x, y, z, indexing="ij")
# sphere at the origin
c = np.array([0.5 * (xmin + xmax), 0.5 * (ymin + ymax), 0.5 * (zmin + zmax)])
r = np.sqrt((x3d - c[0]) ** 2 + (y3d - c[1]) ** 2 + (z3d - c[2]) ** 2)
arr[r < 0.05] = 1.0
arr[abs(x3d - xmin) < 2 * dcoord] = 0.3
arr[abs(y3d - ymin) < 2 * dcoord] = 0.3
arr[abs(z3d - zmin) < 2 * dcoord] = 0.3
# single cube on +x
xc = 0.75 * scale
dx = 0.05 * scale
idx = np.logical_and(
np.logical_and(x3d > xc - dx, x3d < xc + dx),
np.logical_and(
np.logical_and(y3d > -dx, y3d < dx), np.logical_and(z3d > -dx, z3d < dx)
),
)
arr[idx] = 1.0
# two cubes on +y
dy = 0.05 * scale
for yc in [0.65 * scale, 0.85 * scale]:
idx = np.logical_and(
np.logical_and(y3d > yc - dy, y3d < yc + dy),
np.logical_and(
np.logical_and(x3d > -dy, x3d < dy), np.logical_and(z3d > -dy, z3d < dy)
),
)
arr[idx] = 0.8
# three cubes on +z
dz = 0.05 * scale
for zc in [0.5 * scale, 0.7 * scale, 0.9 * scale]:
idx = np.logical_and(
np.logical_and(z3d > zc - dz, z3d < zc + dz),
np.logical_and(
np.logical_and(x3d > -dz, x3d < dz), np.logical_and(y3d > -dz, y3d < dz)
),
)
arr[idx] = 0.6
data = dict(density=(arr, "g/cm**3"))
ds = load_uniform_grid(data, arr.shape, bbox=bbox)
return ds
def fake_sph_orientation_ds():
"""Returns an in-memory SPH dataset useful for testing
This dataset should have one particle at the origin, one more particle
along the x axis, two along y, and three along z. All particles will
have non-overlapping smoothing regions with a radius of 0.25, masses of 1,
and densities of 1, and zero velocity.
"""
from yt import load_particles
npart = 7
# one particle at the origin, one particle along x-axis, two along y,
# three along z
data = {
"particle_position_x": (np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]), "cm"),
"particle_position_y": (np.array([0.0, 0.0, 1.0, 2.0, 0.0, 0.0, 0.0]), "cm"),
"particle_position_z": (np.array([0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0]), "cm"),
"particle_mass": (np.ones(npart), "g"),
"particle_velocity_x": (np.zeros(npart), "cm/s"),
"particle_velocity_y": (np.zeros(npart), "cm/s"),
"particle_velocity_z": (np.zeros(npart), "cm/s"),
"smoothing_length": (0.25 * np.ones(npart), "cm"),
"density": (np.ones(npart), "g/cm**3"),
"temperature": (np.ones(npart), "K"),
}
bbox = np.array([[-4, 4], [-4, 4], [-4, 4]])
return load_particles(data=data, length_unit=1.0, bbox=bbox)
def fake_sph_grid_ds(hsml_factor=1.0):
"""Returns an in-memory SPH dataset useful for testing
This dataset should have 27 particles with the particles arranged uniformly
on a 3D grid. The bottom left corner is (0.5,0.5,0.5) and the top right
corner is (2.5,2.5,2.5). All particles will have non-overlapping smoothing
regions with a radius of 0.05, masses of 1, and densities of 1, and zero
velocity.
"""
from yt import load_particles
npart = 27
x = np.empty(npart)
y = np.empty(npart)
z = np.empty(npart)
tot = 0
for i in range(0, 3):
for j in range(0, 3):
for k in range(0, 3):
x[tot] = i + 0.5
y[tot] = j + 0.5
z[tot] = k + 0.5
tot += 1
data = {
"particle_position_x": (x, "cm"),
"particle_position_y": (y, "cm"),
"particle_position_z": (z, "cm"),
"particle_mass": (np.ones(npart), "g"),
"particle_velocity_x": (np.zeros(npart), "cm/s"),
"particle_velocity_y": (np.zeros(npart), "cm/s"),
"particle_velocity_z": (np.zeros(npart), "cm/s"),
"smoothing_length": (0.05 * np.ones(npart) * hsml_factor, "cm"),
"density": (np.ones(npart), "g/cm**3"),
"temperature": (np.ones(npart), "K"),
}
bbox = np.array([[0, 3], [0, 3], [0, 3]])
return load_particles(data=data, length_unit=1.0, bbox=bbox)
def construct_octree_mask(prng=RandomState(0x1D3D3D3), refined=None): # noqa B008
# Implementation taken from url:
# http://docs.hyperion-rt.org/en/stable/advanced/indepth_oct.html
if refined in (None, True):
refined = [True]
if not refined:
refined = [False]
return refined
# Loop over subcells
for _ in range(8):
# Insert criterion for whether cell should be sub-divided. Here we
# just use a random number to demonstrate.
divide = prng.random_sample() < 0.12
# Append boolean to overall list
refined.append(divide)
# If the cell is sub-divided, recursively divide it further
if divide:
construct_octree_mask(prng, refined)
return refined
def fake_octree_ds(
prng=RandomState(0x4D3D3D3), # noqa B008
refined=None,
quantities=None,
bbox=None,
sim_time=0.0,
length_unit=None,
mass_unit=None,
time_unit=None,
velocity_unit=None,
magnetic_unit=None,
periodicity=(True, True, True),
over_refine_factor=1,
partial_coverage=1,
unit_system="cgs",
):
from yt.loaders import load_octree
octree_mask = np.asarray(
construct_octree_mask(prng=prng, refined=refined), dtype=np.uint8
)
particles = np.sum(np.invert(octree_mask))
if quantities is None:
quantities = {}
quantities[("gas", "density")] = prng.random_sample((particles, 1))
quantities[("gas", "velocity_x")] = prng.random_sample((particles, 1))
quantities[("gas", "velocity_y")] = prng.random_sample((particles, 1))
quantities[("gas", "velocity_z")] = prng.random_sample((particles, 1))
ds = load_octree(
octree_mask=octree_mask,
data=quantities,
bbox=bbox,
sim_time=sim_time,
length_unit=length_unit,
mass_unit=mass_unit,
time_unit=time_unit,
velocity_unit=velocity_unit,
magnetic_unit=magnetic_unit,
periodicity=periodicity,
partial_coverage=partial_coverage,
over_refine_factor=over_refine_factor,
unit_system=unit_system,
)
return ds
def add_noise_fields(ds):
"""Add 4 classes of noise fields to a dataset"""
prng = RandomState(0x4D3D3D3)
def _binary_noise(field, data):
"""random binary data"""
res = prng.random_integers(0, 1, data.size).astype("float64")
return res
def _positive_noise(field, data):
"""random strictly positive data"""
return prng.random_sample(data.size) + 1e-16
def _negative_noise(field, data):
"""random negative data"""
return -prng.random_sample(data.size)
def _even_noise(field, data):
"""random data with mixed signs"""
return 2 * prng.random_sample(data.size) - 1
ds.add_field("noise0", _binary_noise, sampling_type="cell")
ds.add_field("noise1", _positive_noise, sampling_type="cell")
ds.add_field("noise2", _negative_noise, sampling_type="cell")
ds.add_field("noise3", _even_noise, sampling_type="cell")
def expand_keywords(keywords, full=False):
"""
expand_keywords is a means for testing all possible keyword
arguments in the nosetests. Simply pass it a dictionary of all the
keyword arguments and all of the values for these arguments that you
want to test.
It will return a list of kwargs dicts containing combinations of
the various kwarg values you passed it. These can then be passed
to the appropriate function in nosetests.
If full=True, then every possible combination of keywords is produced,
otherwise, every keyword option is included at least once in the output
list. Be careful, by using full=True, you may be in for an exponentially
larger number of tests!
Parameters
----------
keywords : dict
a dictionary where the keys are the keywords for the function,
and the values of each key are the possible values that this key
can take in the function
full : bool
if set to True, every possible combination of given keywords is
returned
Returns
-------
array of dicts
An array of dictionaries to be individually passed to the appropriate
function matching these kwargs.
Examples
--------
>>> keywords = {}
>>> keywords['dpi'] = (50, 100, 200)
>>> keywords['cmap'] = ('arbre', 'kelp')
>>> list_of_kwargs = expand_keywords(keywords)
>>> print(list_of_kwargs)
array([{'cmap': 'arbre', 'dpi': 50},
{'cmap': 'kelp', 'dpi': 100},
{'cmap': 'arbre', 'dpi': 200}], dtype=object)
>>> list_of_kwargs = expand_keywords(keywords, full=True)
>>> print(list_of_kwargs)
array([{'cmap': 'arbre', 'dpi': 50},
{'cmap': 'arbre', 'dpi': 100},
{'cmap': 'arbre', 'dpi': 200},
{'cmap': 'kelp', 'dpi': 50},
{'cmap': 'kelp', 'dpi': 100},
{'cmap': 'kelp', 'dpi': 200}], dtype=object)
>>> for kwargs in list_of_kwargs:
... write_projection(*args, **kwargs)
"""
# if we want every possible combination of keywords, use iter magic
if full:
keys = sorted(keywords)
list_of_kwarg_dicts = np.array(
[
dict(zip(keys, prod))
for prod in it.product(*(keywords[key] for key in keys))
]
)
# if we just want to probe each keyword, but not necessarily every
# combination
else:
# Determine the maximum number of values any of the keywords has
num_lists = 0
for val in keywords.values():
if isinstance(val, str):
num_lists = max(1.0, num_lists)
else:
num_lists = max(len(val), num_lists)
# Construct array of kwargs dicts, each element of the list is a different
# **kwargs dict. each kwargs dict gives a different combination of
# the possible values of the kwargs
# initialize array
list_of_kwarg_dicts = np.array([dict() for x in range(num_lists)])
# fill in array
for i in np.arange(num_lists):
list_of_kwarg_dicts[i] = {}
for key in keywords.keys():
# if it's a string, use it (there's only one)
if isinstance(keywords[key], str):
list_of_kwarg_dicts[i][key] = keywords[key]
# if there are more options, use the i'th val
elif i < len(keywords[key]):
list_of_kwarg_dicts[i][key] = keywords[key][i]
# if there are not more options, use the 0'th val
else:
list_of_kwarg_dicts[i][key] = keywords[key][0]
return list_of_kwarg_dicts
def requires_module(module):
"""
Decorator that takes a module name as an argument and tries to import it.
If the module imports without issue, the function is returned, but if not,
a null function is returned. This is so tests that depend on certain modules
being imported will not fail if the module is not installed on the testing
platform.
"""
from nose import SkipTest
def ffalse(func):
@functools.wraps(func)
def false_wrapper(*args, **kwargs):
raise SkipTest
return false_wrapper
def ftrue(func):
@functools.wraps(func)
def true_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return true_wrapper
try:
importlib.import_module(module)
except ImportError:
return ffalse
else:
return ftrue
def requires_file(req_file):
from nose import SkipTest
path = ytcfg.get("yt", "test_data_dir")
def ffalse(func):
@functools.wraps(func)
def false_wrapper(*args, **kwargs):
if ytcfg.getboolean("yt", "__strict_requires"):
raise FileNotFoundError(req_file)
raise SkipTest
return false_wrapper
def ftrue(func):
@functools.wraps(func)
def true_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return true_wrapper
if os.path.exists(req_file):
return ftrue
else:
if os.path.exists(os.path.join(path, req_file)):
return ftrue
else:
return ffalse
def disable_dataset_cache(func):
@functools.wraps(func)
def newfunc(*args, **kwargs):
restore_cfg_state = False
if ytcfg.get("yt", "skip_dataset_cache") == "False":
ytcfg["yt", "skip_dataset_cache"] = "True"
rv = func(*args, **kwargs)
if restore_cfg_state:
ytcfg["yt", "skip_dataset_cache"] = "False"
return rv
return newfunc
@disable_dataset_cache
def units_override_check(fn):
units_list = ["length", "time", "mass", "velocity", "magnetic", "temperature"]
ds1 = load(fn)
units_override = {}
attrs1 = []
attrs2 = []
for u in units_list:
unit_attr = getattr(ds1, f"{u}_unit", None)
if unit_attr is not None:
attrs1.append(unit_attr)
units_override[f"{u}_unit"] = (unit_attr.v, unit_attr.units)
del ds1
ds2 = load(fn, units_override=units_override)
assert len(ds2.units_override) > 0
for u in units_list:
unit_attr = getattr(ds2, f"{u}_unit", None)
if unit_attr is not None:
attrs2.append(unit_attr)
assert_equal(attrs1, attrs2)
# This is an export of the 40 grids in IsolatedGalaxy that are of level 4 or
# lower. It's just designed to give a sample AMR index to deal with.
_amr_grid_index = [
[0, [0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [32, 32, 32]],
[1, [0.25, 0.21875, 0.25], [0.5, 0.5, 0.5], [16, 18, 16]],
[1, [0.5, 0.21875, 0.25], [0.75, 0.5, 0.5], [16, 18, 16]],
[1, [0.21875, 0.5, 0.25], [0.5, 0.75, 0.5], [18, 16, 16]],
[1, [0.5, 0.5, 0.25], [0.75, 0.75, 0.5], [16, 16, 16]],
[1, [0.25, 0.25, 0.5], [0.5, 0.5, 0.75], [16, 16, 16]],
[1, [0.5, 0.25, 0.5], [0.75, 0.5, 0.75], [16, 16, 16]],
[1, [0.25, 0.5, 0.5], [0.5, 0.75, 0.75], [16, 16, 16]],
[1, [0.5, 0.5, 0.5], [0.75, 0.75, 0.75], [16, 16, 16]],
[2, [0.5, 0.5, 0.5], [0.71875, 0.71875, 0.71875], [28, 28, 28]],
[3, [0.5, 0.5, 0.5], [0.6640625, 0.65625, 0.6796875], [42, 40, 46]],
[4, [0.5, 0.5, 0.5], [0.59765625, 0.6015625, 0.6015625], [50, 52, 52]],
[2, [0.28125, 0.5, 0.5], [0.5, 0.734375, 0.71875], [28, 30, 28]],
[3, [0.3359375, 0.5, 0.5], [0.5, 0.671875, 0.6640625], [42, 44, 42]],
[4, [0.40625, 0.5, 0.5], [0.5, 0.59765625, 0.59765625], [48, 50, 50]],
[2, [0.5, 0.28125, 0.5], [0.71875, 0.5, 0.71875], [28, 28, 28]],
[3, [0.5, 0.3359375, 0.5], [0.671875, 0.5, 0.6640625], [44, 42, 42]],
[4, [0.5, 0.40625, 0.5], [0.6015625, 0.5, 0.59765625], [52, 48, 50]],
[2, [0.28125, 0.28125, 0.5], [0.5, 0.5, 0.71875], [28, 28, 28]],
[3, [0.3359375, 0.3359375, 0.5], [0.5, 0.5, 0.671875], [42, 42, 44]],
[
4,
[0.46484375, 0.37890625, 0.50390625],
[0.4765625, 0.390625, 0.515625],
[6, 6, 6],
],
[4, [0.40625, 0.40625, 0.5], [0.5, 0.5, 0.59765625], [48, 48, 50]],
[2, [0.5, 0.5, 0.28125], [0.71875, 0.71875, 0.5], [28, 28, 28]],
[3, [0.5, 0.5, 0.3359375], [0.6796875, 0.6953125, 0.5], [46, 50, 42]],
[4, [0.5, 0.5, 0.40234375], [0.59375, 0.6015625, 0.5], [48, 52, 50]],
[2, [0.265625, 0.5, 0.28125], [0.5, 0.71875, 0.5], [30, 28, 28]],
[3, [0.3359375, 0.5, 0.328125], [0.5, 0.65625, 0.5], [42, 40, 44]],
[4, [0.40234375, 0.5, 0.40625], [0.5, 0.60546875, 0.5], [50, 54, 48]],
[2, [0.5, 0.265625, 0.28125], [0.71875, 0.5, 0.5], [28, 30, 28]],
[3, [0.5, 0.3203125, 0.328125], [0.6640625, 0.5, 0.5], [42, 46, 44]],
[4, [0.5, 0.3984375, 0.40625], [0.546875, 0.5, 0.5], [24, 52, 48]],
[4, [0.546875, 0.41796875, 0.4453125], [0.5625, 0.4375, 0.5], [8, 10, 28]],
[4, [0.546875, 0.453125, 0.41796875], [0.5546875, 0.48046875, 0.4375], [4, 14, 10]],
[4, [0.546875, 0.4375, 0.4375], [0.609375, 0.5, 0.5], [32, 32, 32]],
[4, [0.546875, 0.4921875, 0.41796875], [0.56640625, 0.5, 0.4375], [10, 4, 10]],
[
4,
[0.546875, 0.48046875, 0.41796875],
[0.5703125, 0.4921875, 0.4375],
[12, 6, 10],
],
[4, [0.55859375, 0.46875, 0.43359375], [0.5703125, 0.48046875, 0.4375], [6, 6, 2]],
[2, [0.265625, 0.28125, 0.28125], [0.5, 0.5, 0.5], [30, 28, 28]],
[3, [0.328125, 0.3359375, 0.328125], [0.5, 0.5, 0.5], [44, 42, 44]],
[4, [0.4140625, 0.40625, 0.40625], [0.5, 0.5, 0.5], [44, 48, 48]],
]
def check_results(func):
r"""This is a decorator for a function to verify that the (numpy ndarray)
result of a function is what it should be.
This function is designed to be used for very light answer testing.
Essentially, it wraps around a larger function that returns a numpy array,
and that has results that should not change. It is not necessarily used
inside the testing scripts themselves, but inside testing scripts written
by developers during the testing of pull requests and new functionality.
If a hash is specified, it "wins" and the others are ignored. Otherwise,
tolerance is 1e-8 (just above single precision.)
The correct results will be stored if the command line contains
--answer-reference , and otherwise it will compare against the results on
disk. The filename will be func_results_ref_FUNCNAME.cpkl where FUNCNAME
is the name of the function being tested.
If you would like more control over the name of the pickle file the results
are stored in, you can pass the result_basename keyword argument to the
function you are testing. The check_results decorator will use the value
of the keyword to construct the filename of the results data file. If
result_basename is not specified, the name of the testing function is used.
This will raise an exception if the results are not correct.
Examples
--------
>>> @check_results
... def my_func(ds):
... return ds.domain_width
>>> my_func(ds)
>>> @check_results
... def field_checker(dd, field_name):
... return dd[field_name]
>>> field_checker(ds.all_data(), 'density', result_basename='density')
"""
def compute_results(func):
@functools.wraps(func)
def _func(*args, **kwargs):
name = kwargs.pop("result_basename", func.__name__)
rv = func(*args, **kwargs)
if hasattr(rv, "convert_to_base"):
rv.convert_to_base()
_rv = rv.ndarray_view()
else:
_rv = rv
mi = _rv.min()
ma = _rv.max()
st = _rv.std(dtype="float64")
su = _rv.sum(dtype="float64")
si = _rv.size
ha = hashlib.md5(_rv.tostring()).hexdigest()
fn = f"func_results_ref_{name}.cpkl"
with open(fn, "wb") as f:
pickle.dump((mi, ma, st, su, si, ha), f)
return rv
return _func
from yt.mods import unparsed_args
if "--answer-reference" in unparsed_args:
return compute_results(func)
def compare_results(func):
@functools.wraps(func)
def _func(*args, **kwargs):
name = kwargs.pop("result_basename", func.__name__)
rv = func(*args, **kwargs)
if hasattr(rv, "convert_to_base"):
rv.convert_to_base()
_rv = rv.ndarray_view()
else:
_rv = rv
vals = (
_rv.min(),
_rv.max(),
_rv.std(dtype="float64"),
_rv.sum(dtype="float64"),
_rv.size,
hashlib.md5(_rv.tostring()).hexdigest(),
)
fn = f"func_results_ref_{name}.cpkl"
if not os.path.exists(fn):
print("Answers need to be created with --answer-reference .")
return False
with open(fn, "rb") as f:
ref = pickle.load(f)
print(f"Sizes: {vals[4] == ref[4]} ({vals[4]}, {ref[4]})")
assert_allclose(vals[0], ref[0], 1e-8, err_msg="min")
assert_allclose(vals[1], ref[1], 1e-8, err_msg="max")
assert_allclose(vals[2], ref[2], 1e-8, err_msg="std")
assert_allclose(vals[3], ref[3], 1e-8, err_msg="sum")
assert_equal(vals[4], ref[4])
print("Hashes equal: %s" % (vals[-1] == ref[-1]))
return rv
return _func
return compare_results(func)
def periodicity_cases(ds):
# This is a generator that yields things near the corners. It's good for
# getting different places to check periodicity.
yield (ds.domain_left_edge + ds.domain_right_edge) / 2.0
dx = ds.domain_width / ds.domain_dimensions
# We start one dx in, and only go to one in as well.
for i in (1, ds.domain_dimensions[0] - 2):
for j in (1, ds.domain_dimensions[1] - 2):
for k in (1, ds.domain_dimensions[2] - 2):
center = dx * np.array([i, j, k]) + ds.domain_left_edge
yield center
def run_nose(
verbose=False,
run_answer_tests=False,
answer_big_data=False,
call_pdb=False,
module=None,
):
import sys
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.on_demand_imports import _nose
orig_level = mylog.getEffectiveLevel()
mylog.setLevel(50)
nose_argv = sys.argv
nose_argv += ["--exclude=answer_testing", "--detailed-errors", "--exe"]
if call_pdb:
nose_argv += ["--pdb", "--pdb-failures"]
if verbose:
nose_argv.append("-v")
if run_answer_tests:
nose_argv.append("--with-answer-testing")
if answer_big_data:
nose_argv.append("--answer-big-data")
if module:
nose_argv.append(module)
initial_dir = os.getcwd()
yt_file = os.path.abspath(__file__)
yt_dir = os.path.dirname(yt_file)
if os.path.samefile(os.path.dirname(yt_dir), initial_dir):
# Provide a nice error message to work around nose bug
# see https://github.com/nose-devs/nose/issues/701
raise RuntimeError(
"""
The yt.run_nose function does not work correctly when invoked in
the same directory as the installed yt package. Try starting
a python session in a different directory before invoking yt.run_nose
again. Alternatively, you can also run the "nosetests" executable in
the current directory like so:
$ nosetests
"""
)
os.chdir(yt_dir)
try:
_nose.run(argv=nose_argv)
finally:
os.chdir(initial_dir)
mylog.setLevel(orig_level)
def assert_allclose_units(actual, desired, rtol=1e-7, atol=0, **kwargs):
"""Raise an error if two objects are not equal up to desired tolerance
This is a wrapper for :func:`numpy.testing.assert_allclose` that also
verifies unit consistency
Parameters
----------
actual : array-like
Array obtained (possibly with attached units)
desired : array-like
Array to compare with (possibly with attached units)
rtol : float, optional
Relative tolerance, defaults to 1e-7
atol : float or quantity, optional
Absolute tolerance. If units are attached, they must be consistent
with the units of ``actual`` and ``desired``. If no units are attached,
assumes the same units as ``desired``. Defaults to zero.
Notes
-----
Also accepts additional keyword arguments accepted by
:func:`numpy.testing.assert_allclose`, see the documentation of that
function for details.
"""
# Create a copy to ensure this function does not alter input arrays
act = YTArray(actual)
des = YTArray(desired)
try:
des = des.in_units(act.units)
except UnitOperationError as e:
raise AssertionError(
"Units of actual (%s) and desired (%s) do not have "
"equivalent dimensions" % (act.units, des.units)
) from e
rt = YTArray(rtol)
if not rt.units.is_dimensionless:
raise AssertionError(f"Units of rtol ({rt.units}) are not dimensionless")
if not isinstance(atol, YTArray):
at = YTQuantity(atol, des.units)
try:
at = at.in_units(act.units)
except UnitOperationError as e:
raise AssertionError(
"Units of atol (%s) and actual (%s) do not have "
"equivalent dimensions" % (at.units, act.units)
) from e
# units have been validated, so we strip units before calling numpy
# to avoid spurious errors
act = act.value
des = des.value
rt = rt.value
at = at.value
return assert_allclose(act, des, rt, at, **kwargs)
def assert_fname(fname):
"""Function that checks file type using libmagic"""
if fname is None:
return
with open(fname, "rb") as fimg:
data = fimg.read()
image_type = ""
# see http://www.w3.org/TR/PNG/#5PNG-file-signature
if data.startswith(b"\211PNG\r\n\032\n"):
image_type = ".png"
# see http://www.mathguide.de/info/tools/media-types/image/jpeg
elif data.startswith(b"\377\330"):
image_type = ".jpeg"
elif data.startswith(b"%!PS-Adobe"):
data_str = data.decode("utf-8", "ignore")
if "EPSF" in data_str[: data_str.index("\n")]:
image_type = ".eps"
else:
image_type = ".ps"
elif data.startswith(b"%PDF"):
image_type = ".pdf"
extension = os.path.splitext(fname)[1]
assert image_type == extension, (
"Expected an image of type '%s' but '%s' is an image of type '%s'"
% (extension, fname, image_type)
)
def requires_backend(backend):
""" Decorator to check for a specified matplotlib backend.
This decorator returns the decorated function if the specified `backend`
is same as of `matplotlib.get_backend()`, otherwise returns null function.
It could be used to execute function only when a particular `backend` of
matplotlib is being used.
Parameters
----------
backend : String
The value which is compared with the current matplotlib backend in use.
Returns
-------
Decorated function or null function
"""
import pytest
def ffalse(func):
# returning a lambda : None causes an error when using pytest. Having
# a function (skip) that returns None does work, but pytest marks the
# test as having passed, which seems bad, since it wasn't actually run.
# Using pytest.skip() means that a change to test_requires_backend was
# needed since None is no longer returned, so we check for the skip
# exception in the xfail case for that test
def skip(*args, **kwargs):
msg = f"`{backend}` backend not found, skipping: `{func.__name__}`"
print(msg)
pytest.skip(msg)
if ytcfg.getboolean("yt", "__withinpytest"):
return skip
else:
return lambda: None
def ftrue(func):
return func
if backend.lower() == matplotlib.get_backend().lower():
return ftrue
return ffalse
class TempDirTest(unittest.TestCase):
"""
A test class that runs in a temporary directory and
removes it afterward.
"""
def setUp(self):
self.curdir = os.getcwd()
self.tmpdir = tempfile.mkdtemp()
os.chdir(self.tmpdir)
def tearDown(self):
os.chdir(self.curdir)
shutil.rmtree(self.tmpdir)
class ParticleSelectionComparison:
"""
This is a test helper class that takes a particle dataset, caches the
particles it has on disk (manually reading them using lower-level IO
routines) and then received a data object that it compares against manually
running the data object's selection routines. All supplied data objects
must be created from the input dataset.
"""
def __init__(self, ds):
self.ds = ds
# Construct an index so that we get all the data_files
ds.index
particles = {}
# hsml is the smoothing length we use for radial selection
hsml = {}
for data_file in ds.index.data_files:
for ptype, pos_arr in ds.index.io._yield_coordinates(data_file):
particles.setdefault(ptype, []).append(pos_arr)
if ptype in getattr(ds, "_sph_ptypes", ()):
hsml.setdefault(ptype, []).append(
ds.index.io._get_smoothing_length(
data_file, pos_arr.dtype, pos_arr.shape
)
)
for ptype in particles:
particles[ptype] = np.concatenate(particles[ptype])
if ptype in hsml:
hsml[ptype] = np.concatenate(hsml[ptype])
self.particles = particles
self.hsml = hsml
def compare_dobj_selection(self, dobj):
for ptype in sorted(self.particles):
x, y, z = self.particles[ptype].T
# Set our radii to zero for now, I guess?
radii = self.hsml.get(ptype, 0.0)
sel_index = dobj.selector.select_points(x, y, z, radii)
if sel_index is None:
sel_pos = np.empty((0, 3))
else:
sel_pos = self.particles[ptype][sel_index, :]
obj_results = []
for chunk in dobj.chunks([], "io"):
obj_results.append(chunk[ptype, "particle_position"])
if any(_.size > 0 for _ in obj_results):
obj_results = np.concatenate(obj_results, axis=0)
else:
obj_results = np.empty((0, 3))
# Sometimes we get unitary scaling or other floating point noise. 5
# NULP should be OK. This is mostly for stuff like Rockstar, where
# the f32->f64 casting happens at different places depending on
# which code path we use.
assert_array_almost_equal_nulp(sel_pos, obj_results, 5)
def run_defaults(self):
"""
This runs lots of samples that touch different types of wraparounds.
Specifically, it does:
* sphere in center with radius 0.1 unitary
* sphere in center with radius 0.2 unitary
* sphere in each of the eight corners of the domain with radius 0.1 unitary
* sphere in center with radius 0.5 unitary
* box that covers 0.1 .. 0.9
* box from 0.8 .. 0.85
* box from 0.3..0.6, 0.2..0.8, 0.0..0.1
"""
sp1 = self.ds.sphere("c", (0.1, "unitary"))
self.compare_dobj_selection(sp1)
sp2 = self.ds.sphere("c", (0.2, "unitary"))
self.compare_dobj_selection(sp2)
centers = [
[0.04, 0.5, 0.5],
[0.5, 0.04, 0.5],
[0.5, 0.5, 0.04],
[0.04, 0.04, 0.04],
[0.96, 0.5, 0.5],
[0.5, 0.96, 0.5],
[0.5, 0.5, 0.96],
[0.96, 0.96, 0.96],
]
r = self.ds.quan(0.1, "unitary")
for center in centers:
c = self.ds.arr(center, "unitary") + self.ds.domain_left_edge.in_units(
"unitary"
)
if not all(self.ds.periodicity):
# filter out the periodic bits for non-periodic datasets
if any(c - r < self.ds.domain_left_edge) or any(
c + r > self.ds.domain_right_edge
):
continue
sp = self.ds.sphere(c, (0.1, "unitary"))
self.compare_dobj_selection(sp)
sp = self.ds.sphere("c", (0.5, "unitary"))
self.compare_dobj_selection(sp)
dd = self.ds.all_data()
self.compare_dobj_selection(dd)
# This is in raw numbers, so we can offset for the left edge
LE = self.ds.domain_left_edge.in_units("unitary").d
reg1 = self.ds.r[
(0.1 + LE[0], "unitary") : (0.9 + LE[0], "unitary"),
(0.1 + LE[1], "unitary") : (0.9 + LE[1], "unitary"),
(0.1 + LE[2], "unitary") : (0.9 + LE[2], "unitary"),
]
self.compare_dobj_selection(reg1)
reg2 = self.ds.r[
(0.8 + LE[0], "unitary") : (0.85 + LE[0], "unitary"),
(0.8 + LE[1], "unitary") : (0.85 + LE[1], "unitary"),
(0.8 + LE[2], "unitary") : (0.85 + LE[2], "unitary"),
]
self.compare_dobj_selection(reg2)
reg3 = self.ds.r[
(0.3 + LE[0], "unitary") : (0.6 + LE[0], "unitary"),
(0.2 + LE[1], "unitary") : (0.8 + LE[1], "unitary"),
(0.0 + LE[2], "unitary") : (0.1 + LE[2], "unitary"),
]
self.compare_dobj_selection(reg3)
| import functools
import hashlib
import importlib
import itertools as it
import os
import pickle
import shutil
import tempfile
import unittest
import matplotlib
import numpy as np
from numpy.random import RandomState
from unyt.exceptions import UnitOperationError
from yt.config import ytcfg
from yt.funcs import iterable
from yt.loaders import load
from yt.units.yt_array import YTArray, YTQuantity
# we import this in a weird way from numpy.testing to avoid triggering
# flake8 errors from the unused imports. These test functions are imported
# elsewhere in yt from here so we want them to be imported here.
from numpy.testing import assert_array_equal, assert_almost_equal # NOQA isort:skip
from numpy.testing import assert_equal, assert_array_less # NOQA isort:skip
from numpy.testing import assert_string_equal # NOQA isort:skip
from numpy.testing import assert_array_almost_equal_nulp # NOQA isort:skip
from numpy.testing import assert_allclose, assert_raises # NOQA isort:skip
from numpy.testing import assert_approx_equal # NOQA isort:skip
from numpy.testing import assert_array_almost_equal # NOQA isort:skip
ANSWER_TEST_TAG = "answer_test"
# Expose assert_true and assert_less_equal from unittest.TestCase
# this is adopted from nose. Doing this here allows us to avoid importing
# nose at the top level.
class _Dummy(unittest.TestCase):
def nop():
pass
_t = _Dummy("nop")
assert_true = getattr(_t, "assertTrue") # noqa: B009
assert_less_equal = getattr(_t, "assertLessEqual") # noqa: B009
def assert_rel_equal(a1, a2, decimals, err_msg="", verbose=True):
# We have nan checks in here because occasionally we have fields that get
# weighted without non-zero weights. I'm looking at you, particle fields!
if isinstance(a1, np.ndarray):
assert a1.size == a2.size
# Mask out NaNs
assert (np.isnan(a1) == np.isnan(a2)).all()
a1[np.isnan(a1)] = 1.0
a2[np.isnan(a2)] = 1.0
# Mask out 0
ind1 = np.array(np.abs(a1) < np.finfo(a1.dtype).eps)
ind2 = np.array(np.abs(a2) < np.finfo(a2.dtype).eps)
assert (ind1 == ind2).all()
a1[ind1] = 1.0
a2[ind2] = 1.0
elif np.any(np.isnan(a1)) and np.any(np.isnan(a2)):
return True
if not isinstance(a1, np.ndarray) and a1 == a2 == 0.0:
# NANS!
a1 = a2 = 1.0
return assert_almost_equal(
np.array(a1) / np.array(a2), 1.0, decimals, err_msg=err_msg, verbose=verbose
)
def amrspace(extent, levels=7, cells=8):
"""Creates two numpy arrays representing the left and right bounds of
an AMR grid as well as an array for the AMR level of each cell.
Parameters
----------
extent : array-like
This a sequence of length 2*ndims that is the bounds of each dimension.
For example, the 2D unit square would be given by [0.0, 1.0, 0.0, 1.0].
A 3D cylindrical grid may look like [0.0, 2.0, -1.0, 1.0, 0.0, 2*np.pi].
levels : int or sequence of ints, optional
This is the number of AMR refinement levels. If given as a sequence (of
length ndims), then each dimension will be refined down to this level.
All values in this array must be the same or zero. A zero valued dimension
indicates that this dim should not be refined. Taking the 3D cylindrical
example above if we don't want refine theta but want r and z at 5 we would
set levels=(5, 5, 0).
cells : int, optional
This is the number of cells per refinement level.
Returns
-------
left : float ndarray, shape=(npoints, ndims)
The left AMR grid points.
right : float ndarray, shape=(npoints, ndims)
The right AMR grid points.
level : int ndarray, shape=(npoints,)
The AMR level for each point.
Examples
--------
>>> l, r, lvl = amrspace([0.0, 2.0, 1.0, 2.0, 0.0, 3.14], levels=(3,3,0), cells=2)
>>> print l
[[ 0. 1. 0. ]
[ 0.25 1. 0. ]
[ 0. 1.125 0. ]
[ 0.25 1.125 0. ]
[ 0.5 1. 0. ]
[ 0. 1.25 0. ]
[ 0.5 1.25 0. ]
[ 1. 1. 0. ]
[ 0. 1.5 0. ]
[ 1. 1.5 0. ]]
"""
extent = np.asarray(extent, dtype="f8")
dextent = extent[1::2] - extent[::2]
ndims = len(dextent)
if isinstance(levels, int):
minlvl = maxlvl = levels
levels = np.array([levels] * ndims, dtype="int32")
else:
levels = np.asarray(levels, dtype="int32")
minlvl = levels.min()
maxlvl = levels.max()
if minlvl != maxlvl and (minlvl != 0 or set([minlvl, maxlvl]) != set(levels)):
raise ValueError("all levels must have the same value or zero.")
dims_zero = levels == 0
dims_nonzero = ~dims_zero
ndims_nonzero = dims_nonzero.sum()
npoints = (cells ** ndims_nonzero - 1) * maxlvl + 1
left = np.empty((npoints, ndims), dtype="float64")
right = np.empty((npoints, ndims), dtype="float64")
level = np.empty(npoints, dtype="int32")
# fill zero dims
left[:, dims_zero] = extent[::2][dims_zero]
right[:, dims_zero] = extent[1::2][dims_zero]
# fill non-zero dims
dcell = 1.0 / cells
left_slice = tuple(
[
slice(extent[2 * n], extent[2 * n + 1], extent[2 * n + 1])
if dims_zero[n]
else slice(0.0, 1.0, dcell)
for n in range(ndims)
]
)
right_slice = tuple(
[
slice(extent[2 * n + 1], extent[2 * n], -extent[2 * n + 1])
if dims_zero[n]
else slice(dcell, 1.0 + dcell, dcell)
for n in range(ndims)
]
)
left_norm_grid = np.reshape(np.mgrid[left_slice].T.flat[ndims:], (-1, ndims))
lng_zero = left_norm_grid[:, dims_zero]
lng_nonzero = left_norm_grid[:, dims_nonzero]
right_norm_grid = np.reshape(np.mgrid[right_slice].T.flat[ndims:], (-1, ndims))
rng_zero = right_norm_grid[:, dims_zero]
rng_nonzero = right_norm_grid[:, dims_nonzero]
level[0] = maxlvl
left[0, :] = extent[::2]
right[0, dims_zero] = extent[1::2][dims_zero]
right[0, dims_nonzero] = (dcell ** maxlvl) * dextent[dims_nonzero] + extent[::2][
dims_nonzero
]
for i, lvl in enumerate(range(maxlvl, 0, -1)):
start = (cells ** ndims_nonzero - 1) * i + 1
stop = (cells ** ndims_nonzero - 1) * (i + 1) + 1
dsize = dcell ** (lvl - 1) * dextent[dims_nonzero]
level[start:stop] = lvl
left[start:stop, dims_zero] = lng_zero
left[start:stop, dims_nonzero] = lng_nonzero * dsize + extent[::2][dims_nonzero]
right[start:stop, dims_zero] = rng_zero
right[start:stop, dims_nonzero] = (
rng_nonzero * dsize + extent[::2][dims_nonzero]
)
return left, right, level
def fake_random_ds(
ndims,
peak_value=1.0,
fields=("density", "velocity_x", "velocity_y", "velocity_z"),
units=("g/cm**3", "cm/s", "cm/s", "cm/s"),
particle_fields=None,
particle_field_units=None,
negative=False,
nprocs=1,
particles=0,
length_unit=1.0,
unit_system="cgs",
bbox=None,
):
from yt.loaders import load_uniform_grid
prng = RandomState(0x4D3D3D3)
if not iterable(ndims):
ndims = [ndims, ndims, ndims]
else:
assert len(ndims) == 3
if not iterable(negative):
negative = [negative for f in fields]
assert len(fields) == len(negative)
offsets = []
for n in negative:
if n:
offsets.append(0.5)
else:
offsets.append(0.0)
data = {}
for field, offset, u in zip(fields, offsets, units):
v = (prng.random_sample(ndims) - offset) * peak_value
if field[0] == "all":
v = v.ravel()
data[field] = (v, u)
if particles:
if particle_fields is not None:
for field, unit in zip(particle_fields, particle_field_units):
if field in ("particle_position", "particle_velocity"):
data["io", field] = (prng.random_sample((int(particles), 3)), unit)
else:
data["io", field] = (prng.random_sample(size=int(particles)), unit)
else:
for f in (f"particle_position_{ax}" for ax in "xyz"):
data["io", f] = (prng.random_sample(size=particles), "code_length")
for f in (f"particle_velocity_{ax}" for ax in "xyz"):
data["io", f] = (prng.random_sample(size=particles) - 0.5, "cm/s")
data["io", "particle_mass"] = (prng.random_sample(particles), "g")
ug = load_uniform_grid(
data,
ndims,
length_unit=length_unit,
nprocs=nprocs,
unit_system=unit_system,
bbox=bbox,
)
return ug
_geom_transforms = {
# These are the bounds we want. Cartesian we just assume goes 0 .. 1.
"cartesian": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)),
"spherical": ((0.0, 0.0, 0.0), (1.0, np.pi, 2 * np.pi)),
"cylindrical": ((0.0, 0.0, 0.0), (1.0, 1.0, 2.0 * np.pi)), # rzt
"polar": ((0.0, 0.0, 0.0), (1.0, 2.0 * np.pi, 1.0)), # rtz
"geographic": ((-90.0, -180.0, 0.0), (90.0, 180.0, 1000.0)), # latlonalt
"internal_geographic": ((-90.0, -180.0, 0.0), (90.0, 180.0, 1000.0)), # latlondep
}
def fake_amr_ds(
fields=("Density",), geometry="cartesian", particles=0, length_unit=None
):
from yt.loaders import load_amr_grids
prng = RandomState(0x4D3D3D3)
LE, RE = _geom_transforms[geometry]
LE = np.array(LE)
RE = np.array(RE)
data = []
for gspec in _amr_grid_index:
level, left_edge, right_edge, dims = gspec
left_edge = left_edge * (RE - LE) + LE
right_edge = right_edge * (RE - LE) + LE
gdata = dict(
level=level, left_edge=left_edge, right_edge=right_edge, dimensions=dims
)
for f in fields:
gdata[f] = prng.random_sample(dims)
if particles:
for i, f in enumerate(f"particle_position_{ax}" for ax in "xyz"):
pdata = prng.random_sample(particles)
pdata /= right_edge[i] - left_edge[i]
pdata += left_edge[i]
gdata["io", f] = (pdata, "code_length")
for f in (f"particle_velocity_{ax}" for ax in "xyz"):
gdata["io", f] = (prng.random_sample(particles) - 0.5, "cm/s")
gdata["io", "particle_mass"] = (prng.random_sample(particles), "g")
data.append(gdata)
bbox = np.array([LE, RE]).T
return load_amr_grids(
data, [32, 32, 32], geometry=geometry, bbox=bbox, length_unit=length_unit
)
def fake_particle_ds(
fields=(
"particle_position_x",
"particle_position_y",
"particle_position_z",
"particle_mass",
"particle_velocity_x",
"particle_velocity_y",
"particle_velocity_z",
),
units=("cm", "cm", "cm", "g", "cm/s", "cm/s", "cm/s"),
negative=(False, False, False, False, True, True, True),
npart=16 ** 3,
length_unit=1.0,
data=None,
):
from yt.loaders import load_particles
prng = RandomState(0x4D3D3D3)
if not iterable(negative):
negative = [negative for f in fields]
assert len(fields) == len(negative)
offsets = []
for n in negative:
if n:
offsets.append(0.5)
else:
offsets.append(0.0)
data = data if data else {}
for field, offset, u in zip(fields, offsets, units):
if field in data:
v = data[field]
continue
if "position" in field:
v = prng.normal(loc=0.5, scale=0.25, size=npart)
np.clip(v, 0.0, 1.0, v)
v = prng.random_sample(npart) - offset
data[field] = (v, u)
bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])
ds = load_particles(data, 1.0, bbox=bbox)
return ds
def fake_tetrahedral_ds():
from yt.frontends.stream.sample_data.tetrahedral_mesh import (
_connectivity,
_coordinates,
)
from yt.loaders import load_unstructured_mesh
prng = RandomState(0x4D3D3D3)
# the distance from the origin
node_data = {}
dist = np.sum(_coordinates ** 2, 1)
node_data[("connect1", "test")] = dist[_connectivity]
# each element gets a random number
elem_data = {}
elem_data[("connect1", "elem")] = prng.rand(_connectivity.shape[0])
ds = load_unstructured_mesh(
_connectivity, _coordinates, node_data=node_data, elem_data=elem_data
)
return ds
def fake_hexahedral_ds():
from yt.frontends.stream.sample_data.hexahedral_mesh import (
_connectivity,
_coordinates,
)
from yt.loaders import load_unstructured_mesh
prng = RandomState(0x4D3D3D3)
# the distance from the origin
node_data = {}
dist = np.sum(_coordinates ** 2, 1)
node_data[("connect1", "test")] = dist[_connectivity - 1]
# each element gets a random number
elem_data = {}
elem_data[("connect1", "elem")] = prng.rand(_connectivity.shape[0])
ds = load_unstructured_mesh(
_connectivity - 1, _coordinates, node_data=node_data, elem_data=elem_data
)
return ds
def small_fake_hexahedral_ds():
from yt.loaders import load_unstructured_mesh
_coordinates = np.array(
[
[-1.0, -1.0, -1.0],
[0.0, -1.0, -1.0],
[-0.0, 0.0, -1.0],
[-1.0, -0.0, -1.0],
[-1.0, -1.0, 0.0],
[-0.0, -1.0, 0.0],
[-0.0, 0.0, -0.0],
[-1.0, 0.0, -0.0],
]
)
_connectivity = np.array([[1, 2, 3, 4, 5, 6, 7, 8]])
# the distance from the origin
node_data = {}
dist = np.sum(_coordinates ** 2, 1)
node_data[("connect1", "test")] = dist[_connectivity - 1]
ds = load_unstructured_mesh(_connectivity - 1, _coordinates, node_data=node_data)
return ds
def fake_vr_orientation_test_ds(N=96, scale=1):
"""
create a toy dataset that puts a sphere at (0,0,0), a single cube
on +x, two cubes on +y, and three cubes on +z in a domain from
[-1*scale,1*scale]**3. The lower planes
(x = -1*scale, y = -1*scale, z = -1*scale) are also given non-zero
values.
This dataset allows you to easily explore orientations and
handiness in VR and other renderings
Parameters
----------
N : integer
The number of cells along each direction
scale : float
A spatial scale, the domain boundaries will be multiplied by scale to
test datasets that have spatial different scales (e.g. data in CGS units)
"""
from yt.loaders import load_uniform_grid
xmin = ymin = zmin = -1.0 * scale
xmax = ymax = zmax = 1.0 * scale
dcoord = (xmax - xmin) / N
arr = np.zeros((N, N, N), dtype=np.float64)
arr[:, :, :] = 1.0e-4
bbox = np.array([[xmin, xmax], [ymin, ymax], [zmin, zmax]])
# coordinates -- in the notation data[i, j, k]
x = (np.arange(N) + 0.5) * dcoord + xmin
y = (np.arange(N) + 0.5) * dcoord + ymin
z = (np.arange(N) + 0.5) * dcoord + zmin
x3d, y3d, z3d = np.meshgrid(x, y, z, indexing="ij")
# sphere at the origin
c = np.array([0.5 * (xmin + xmax), 0.5 * (ymin + ymax), 0.5 * (zmin + zmax)])
r = np.sqrt((x3d - c[0]) ** 2 + (y3d - c[1]) ** 2 + (z3d - c[2]) ** 2)
arr[r < 0.05] = 1.0
arr[abs(x3d - xmin) < 2 * dcoord] = 0.3
arr[abs(y3d - ymin) < 2 * dcoord] = 0.3
arr[abs(z3d - zmin) < 2 * dcoord] = 0.3
# single cube on +x
xc = 0.75 * scale
dx = 0.05 * scale
idx = np.logical_and(
np.logical_and(x3d > xc - dx, x3d < xc + dx),
np.logical_and(
np.logical_and(y3d > -dx, y3d < dx), np.logical_and(z3d > -dx, z3d < dx)
),
)
arr[idx] = 1.0
# two cubes on +y
dy = 0.05 * scale
for yc in [0.65 * scale, 0.85 * scale]:
idx = np.logical_and(
np.logical_and(y3d > yc - dy, y3d < yc + dy),
np.logical_and(
np.logical_and(x3d > -dy, x3d < dy), np.logical_and(z3d > -dy, z3d < dy)
),
)
arr[idx] = 0.8
# three cubes on +z
dz = 0.05 * scale
for zc in [0.5 * scale, 0.7 * scale, 0.9 * scale]:
idx = np.logical_and(
np.logical_and(z3d > zc - dz, z3d < zc + dz),
np.logical_and(
np.logical_and(x3d > -dz, x3d < dz), np.logical_and(y3d > -dz, y3d < dz)
),
)
arr[idx] = 0.6
data = dict(density=(arr, "g/cm**3"))
ds = load_uniform_grid(data, arr.shape, bbox=bbox)
return ds
def fake_sph_orientation_ds():
"""Returns an in-memory SPH dataset useful for testing
This dataset should have one particle at the origin, one more particle
along the x axis, two along y, and three along z. All particles will
have non-overlapping smoothing regions with a radius of 0.25, masses of 1,
and densities of 1, and zero velocity.
"""
from yt import load_particles
npart = 7
# one particle at the origin, one particle along x-axis, two along y,
# three along z
data = {
"particle_position_x": (np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]), "cm"),
"particle_position_y": (np.array([0.0, 0.0, 1.0, 2.0, 0.0, 0.0, 0.0]), "cm"),
"particle_position_z": (np.array([0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0]), "cm"),
"particle_mass": (np.ones(npart), "g"),
"particle_velocity_x": (np.zeros(npart), "cm/s"),
"particle_velocity_y": (np.zeros(npart), "cm/s"),
"particle_velocity_z": (np.zeros(npart), "cm/s"),
"smoothing_length": (0.25 * np.ones(npart), "cm"),
"density": (np.ones(npart), "g/cm**3"),
"temperature": (np.ones(npart), "K"),
}
bbox = np.array([[-4, 4], [-4, 4], [-4, 4]])
return load_particles(data=data, length_unit=1.0, bbox=bbox)
def fake_sph_grid_ds(hsml_factor=1.0):
"""Returns an in-memory SPH dataset useful for testing
This dataset should have 27 particles with the particles arranged uniformly
on a 3D grid. The bottom left corner is (0.5,0.5,0.5) and the top right
corner is (2.5,2.5,2.5). All particles will have non-overlapping smoothing
regions with a radius of 0.05, masses of 1, and densities of 1, and zero
velocity.
"""
from yt import load_particles
npart = 27
x = np.empty(npart)
y = np.empty(npart)
z = np.empty(npart)
tot = 0
for i in range(0, 3):
for j in range(0, 3):
for k in range(0, 3):
x[tot] = i + 0.5
y[tot] = j + 0.5
z[tot] = k + 0.5
tot += 1
data = {
"particle_position_x": (x, "cm"),
"particle_position_y": (y, "cm"),
"particle_position_z": (z, "cm"),
"particle_mass": (np.ones(npart), "g"),
"particle_velocity_x": (np.zeros(npart), "cm/s"),
"particle_velocity_y": (np.zeros(npart), "cm/s"),
"particle_velocity_z": (np.zeros(npart), "cm/s"),
"smoothing_length": (0.05 * np.ones(npart) * hsml_factor, "cm"),
"density": (np.ones(npart), "g/cm**3"),
"temperature": (np.ones(npart), "K"),
}
bbox = np.array([[0, 3], [0, 3], [0, 3]])
return load_particles(data=data, length_unit=1.0, bbox=bbox)
def construct_octree_mask(prng=RandomState(0x1D3D3D3), refined=None): # noqa B008
# Implementation taken from url:
# http://docs.hyperion-rt.org/en/stable/advanced/indepth_oct.html
if refined in (None, True):
refined = [True]
if not refined:
refined = [False]
return refined
# Loop over subcells
for _ in range(8):
# Insert criterion for whether cell should be sub-divided. Here we
# just use a random number to demonstrate.
divide = prng.random_sample() < 0.12
# Append boolean to overall list
refined.append(divide)
# If the cell is sub-divided, recursively divide it further
if divide:
construct_octree_mask(prng, refined)
return refined
def fake_octree_ds(
prng=RandomState(0x4D3D3D3), # noqa B008
refined=None,
quantities=None,
bbox=None,
sim_time=0.0,
length_unit=None,
mass_unit=None,
time_unit=None,
velocity_unit=None,
magnetic_unit=None,
periodicity=(True, True, True),
over_refine_factor=1,
partial_coverage=1,
unit_system="cgs",
):
from yt.loaders import load_octree
octree_mask = np.asarray(
construct_octree_mask(prng=prng, refined=refined), dtype=np.uint8
)
particles = np.sum(np.invert(octree_mask))
if quantities is None:
quantities = {}
quantities[("gas", "density")] = prng.random_sample((particles, 1))
quantities[("gas", "velocity_x")] = prng.random_sample((particles, 1))
quantities[("gas", "velocity_y")] = prng.random_sample((particles, 1))
quantities[("gas", "velocity_z")] = prng.random_sample((particles, 1))
ds = load_octree(
octree_mask=octree_mask,
data=quantities,
bbox=bbox,
sim_time=sim_time,
length_unit=length_unit,
mass_unit=mass_unit,
time_unit=time_unit,
velocity_unit=velocity_unit,
magnetic_unit=magnetic_unit,
periodicity=periodicity,
partial_coverage=partial_coverage,
over_refine_factor=over_refine_factor,
unit_system=unit_system,
)
return ds
def add_noise_fields(ds):
"""Add 4 classes of noise fields to a dataset"""
prng = RandomState(0x4D3D3D3)
def _binary_noise(field, data):
"""random binary data"""
res = prng.random_integers(0, 1, data.size).astype("float64")
return res
def _positive_noise(field, data):
"""random strictly positive data"""
return prng.random_sample(data.size) + 1e-16
def _negative_noise(field, data):
"""random negative data"""
return -prng.random_sample(data.size)
def _even_noise(field, data):
"""random data with mixed signs"""
return 2 * prng.random_sample(data.size) - 1
ds.add_field("noise0", _binary_noise, sampling_type="cell")
ds.add_field("noise1", _positive_noise, sampling_type="cell")
ds.add_field("noise2", _negative_noise, sampling_type="cell")
ds.add_field("noise3", _even_noise, sampling_type="cell")
def expand_keywords(keywords, full=False):
"""
expand_keywords is a means for testing all possible keyword
arguments in the nosetests. Simply pass it a dictionary of all the
keyword arguments and all of the values for these arguments that you
want to test.
It will return a list of kwargs dicts containing combinations of
the various kwarg values you passed it. These can then be passed
to the appropriate function in nosetests.
If full=True, then every possible combination of keywords is produced,
otherwise, every keyword option is included at least once in the output
list. Be careful, by using full=True, you may be in for an exponentially
larger number of tests!
Parameters
----------
keywords : dict
a dictionary where the keys are the keywords for the function,
and the values of each key are the possible values that this key
can take in the function
full : bool
if set to True, every possible combination of given keywords is
returned
Returns
-------
array of dicts
An array of dictionaries to be individually passed to the appropriate
function matching these kwargs.
Examples
--------
>>> keywords = {}
>>> keywords['dpi'] = (50, 100, 200)
>>> keywords['cmap'] = ('arbre', 'kelp')
>>> list_of_kwargs = expand_keywords(keywords)
>>> print(list_of_kwargs)
array([{'cmap': 'arbre', 'dpi': 50},
{'cmap': 'kelp', 'dpi': 100},
{'cmap': 'arbre', 'dpi': 200}], dtype=object)
>>> list_of_kwargs = expand_keywords(keywords, full=True)
>>> print(list_of_kwargs)
array([{'cmap': 'arbre', 'dpi': 50},
{'cmap': 'arbre', 'dpi': 100},
{'cmap': 'arbre', 'dpi': 200},
{'cmap': 'kelp', 'dpi': 50},
{'cmap': 'kelp', 'dpi': 100},
{'cmap': 'kelp', 'dpi': 200}], dtype=object)
>>> for kwargs in list_of_kwargs:
... write_projection(*args, **kwargs)
"""
# if we want every possible combination of keywords, use iter magic
if full:
keys = sorted(keywords)
list_of_kwarg_dicts = np.array(
[
dict(zip(keys, prod))
for prod in it.product(*(keywords[key] for key in keys))
]
)
# if we just want to probe each keyword, but not necessarily every
# combination
else:
# Determine the maximum number of values any of the keywords has
num_lists = 0
for val in keywords.values():
if isinstance(val, str):
num_lists = max(1.0, num_lists)
else:
num_lists = max(len(val), num_lists)
# Construct array of kwargs dicts, each element of the list is a different
# **kwargs dict. each kwargs dict gives a different combination of
# the possible values of the kwargs
# initialize array
list_of_kwarg_dicts = np.array([dict() for x in range(num_lists)])
# fill in array
for i in np.arange(num_lists):
list_of_kwarg_dicts[i] = {}
for key in keywords.keys():
# if it's a string, use it (there's only one)
if isinstance(keywords[key], str):
list_of_kwarg_dicts[i][key] = keywords[key]
# if there are more options, use the i'th val
elif i < len(keywords[key]):
list_of_kwarg_dicts[i][key] = keywords[key][i]
# if there are not more options, use the 0'th val
else:
list_of_kwarg_dicts[i][key] = keywords[key][0]
return list_of_kwarg_dicts
def requires_module(module):
"""
Decorator that takes a module name as an argument and tries to import it.
If the module imports without issue, the function is returned, but if not,
a null function is returned. This is so tests that depend on certain modules
being imported will not fail if the module is not installed on the testing
platform.
"""
from nose import SkipTest
def ffalse(func):
@functools.wraps(func)
def false_wrapper(*args, **kwargs):
raise SkipTest
return false_wrapper
def ftrue(func):
@functools.wraps(func)
def true_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return true_wrapper
try:
importlib.import_module(module)
except ImportError:
return ffalse
else:
return ftrue
def requires_file(req_file):
from nose import SkipTest
path = ytcfg.get("yt", "test_data_dir")
def ffalse(func):
@functools.wraps(func)
def false_wrapper(*args, **kwargs):
if ytcfg.getboolean("yt", "__strict_requires"):
raise FileNotFoundError(req_file)
raise SkipTest
return false_wrapper
def ftrue(func):
@functools.wraps(func)
def true_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return true_wrapper
if os.path.exists(req_file):
return ftrue
else:
if os.path.exists(os.path.join(path, req_file)):
return ftrue
else:
return ffalse
def disable_dataset_cache(func):
@functools.wraps(func)
def newfunc(*args, **kwargs):
restore_cfg_state = False
if ytcfg.get("yt", "skip_dataset_cache") == "False":
ytcfg["yt", "skip_dataset_cache"] = "True"
rv = func(*args, **kwargs)
if restore_cfg_state:
ytcfg["yt", "skip_dataset_cache"] = "False"
return rv
return newfunc
@disable_dataset_cache
def units_override_check(fn):
units_list = ["length", "time", "mass", "velocity", "magnetic", "temperature"]
ds1 = load(fn)
units_override = {}
attrs1 = []
attrs2 = []
for u in units_list:
unit_attr = getattr(ds1, f"{u}_unit", None)
if unit_attr is not None:
attrs1.append(unit_attr)
units_override[f"{u}_unit"] = (unit_attr.v, unit_attr.units)
del ds1
ds2 = load(fn, units_override=units_override)
assert len(ds2.units_override) > 0
for u in units_list:
unit_attr = getattr(ds2, f"{u}_unit", None)
if unit_attr is not None:
attrs2.append(unit_attr)
assert_equal(attrs1, attrs2)
# This is an export of the 40 grids in IsolatedGalaxy that are of level 4 or
# lower. It's just designed to give a sample AMR index to deal with.
_amr_grid_index = [
[0, [0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [32, 32, 32]],
[1, [0.25, 0.21875, 0.25], [0.5, 0.5, 0.5], [16, 18, 16]],
[1, [0.5, 0.21875, 0.25], [0.75, 0.5, 0.5], [16, 18, 16]],
[1, [0.21875, 0.5, 0.25], [0.5, 0.75, 0.5], [18, 16, 16]],
[1, [0.5, 0.5, 0.25], [0.75, 0.75, 0.5], [16, 16, 16]],
[1, [0.25, 0.25, 0.5], [0.5, 0.5, 0.75], [16, 16, 16]],
[1, [0.5, 0.25, 0.5], [0.75, 0.5, 0.75], [16, 16, 16]],
[1, [0.25, 0.5, 0.5], [0.5, 0.75, 0.75], [16, 16, 16]],
[1, [0.5, 0.5, 0.5], [0.75, 0.75, 0.75], [16, 16, 16]],
[2, [0.5, 0.5, 0.5], [0.71875, 0.71875, 0.71875], [28, 28, 28]],
[3, [0.5, 0.5, 0.5], [0.6640625, 0.65625, 0.6796875], [42, 40, 46]],
[4, [0.5, 0.5, 0.5], [0.59765625, 0.6015625, 0.6015625], [50, 52, 52]],
[2, [0.28125, 0.5, 0.5], [0.5, 0.734375, 0.71875], [28, 30, 28]],
[3, [0.3359375, 0.5, 0.5], [0.5, 0.671875, 0.6640625], [42, 44, 42]],
[4, [0.40625, 0.5, 0.5], [0.5, 0.59765625, 0.59765625], [48, 50, 50]],
[2, [0.5, 0.28125, 0.5], [0.71875, 0.5, 0.71875], [28, 28, 28]],
[3, [0.5, 0.3359375, 0.5], [0.671875, 0.5, 0.6640625], [44, 42, 42]],
[4, [0.5, 0.40625, 0.5], [0.6015625, 0.5, 0.59765625], [52, 48, 50]],
[2, [0.28125, 0.28125, 0.5], [0.5, 0.5, 0.71875], [28, 28, 28]],
[3, [0.3359375, 0.3359375, 0.5], [0.5, 0.5, 0.671875], [42, 42, 44]],
[
4,
[0.46484375, 0.37890625, 0.50390625],
[0.4765625, 0.390625, 0.515625],
[6, 6, 6],
],
[4, [0.40625, 0.40625, 0.5], [0.5, 0.5, 0.59765625], [48, 48, 50]],
[2, [0.5, 0.5, 0.28125], [0.71875, 0.71875, 0.5], [28, 28, 28]],
[3, [0.5, 0.5, 0.3359375], [0.6796875, 0.6953125, 0.5], [46, 50, 42]],
[4, [0.5, 0.5, 0.40234375], [0.59375, 0.6015625, 0.5], [48, 52, 50]],
[2, [0.265625, 0.5, 0.28125], [0.5, 0.71875, 0.5], [30, 28, 28]],
[3, [0.3359375, 0.5, 0.328125], [0.5, 0.65625, 0.5], [42, 40, 44]],
[4, [0.40234375, 0.5, 0.40625], [0.5, 0.60546875, 0.5], [50, 54, 48]],
[2, [0.5, 0.265625, 0.28125], [0.71875, 0.5, 0.5], [28, 30, 28]],
[3, [0.5, 0.3203125, 0.328125], [0.6640625, 0.5, 0.5], [42, 46, 44]],
[4, [0.5, 0.3984375, 0.40625], [0.546875, 0.5, 0.5], [24, 52, 48]],
[4, [0.546875, 0.41796875, 0.4453125], [0.5625, 0.4375, 0.5], [8, 10, 28]],
[4, [0.546875, 0.453125, 0.41796875], [0.5546875, 0.48046875, 0.4375], [4, 14, 10]],
[4, [0.546875, 0.4375, 0.4375], [0.609375, 0.5, 0.5], [32, 32, 32]],
[4, [0.546875, 0.4921875, 0.41796875], [0.56640625, 0.5, 0.4375], [10, 4, 10]],
[
4,
[0.546875, 0.48046875, 0.41796875],
[0.5703125, 0.4921875, 0.4375],
[12, 6, 10],
],
[4, [0.55859375, 0.46875, 0.43359375], [0.5703125, 0.48046875, 0.4375], [6, 6, 2]],
[2, [0.265625, 0.28125, 0.28125], [0.5, 0.5, 0.5], [30, 28, 28]],
[3, [0.328125, 0.3359375, 0.328125], [0.5, 0.5, 0.5], [44, 42, 44]],
[4, [0.4140625, 0.40625, 0.40625], [0.5, 0.5, 0.5], [44, 48, 48]],
]
def check_results(func):
r"""This is a decorator for a function to verify that the (numpy ndarray)
result of a function is what it should be.
This function is designed to be used for very light answer testing.
Essentially, it wraps around a larger function that returns a numpy array,
and that has results that should not change. It is not necessarily used
inside the testing scripts themselves, but inside testing scripts written
by developers during the testing of pull requests and new functionality.
If a hash is specified, it "wins" and the others are ignored. Otherwise,
tolerance is 1e-8 (just above single precision.)
The correct results will be stored if the command line contains
--answer-reference , and otherwise it will compare against the results on
disk. The filename will be func_results_ref_FUNCNAME.cpkl where FUNCNAME
is the name of the function being tested.
If you would like more control over the name of the pickle file the results
are stored in, you can pass the result_basename keyword argument to the
function you are testing. The check_results decorator will use the value
of the keyword to construct the filename of the results data file. If
result_basename is not specified, the name of the testing function is used.
This will raise an exception if the results are not correct.
Examples
--------
>>> @check_results
... def my_func(ds):
... return ds.domain_width
>>> my_func(ds)
>>> @check_results
... def field_checker(dd, field_name):
... return dd[field_name]
>>> field_checker(ds.all_data(), 'density', result_basename='density')
"""
def compute_results(func):
@functools.wraps(func)
def _func(*args, **kwargs):
name = kwargs.pop("result_basename", func.__name__)
rv = func(*args, **kwargs)
if hasattr(rv, "convert_to_base"):
rv.convert_to_base()
_rv = rv.ndarray_view()
else:
_rv = rv
mi = _rv.min()
ma = _rv.max()
st = _rv.std(dtype="float64")
su = _rv.sum(dtype="float64")
si = _rv.size
ha = hashlib.md5(_rv.tostring()).hexdigest()
fn = f"func_results_ref_{name}.cpkl"
with open(fn, "wb") as f:
pickle.dump((mi, ma, st, su, si, ha), f)
return rv
return _func
from yt.mods import unparsed_args
if "--answer-reference" in unparsed_args:
return compute_results(func)
def compare_results(func):
@functools.wraps(func)
def _func(*args, **kwargs):
name = kwargs.pop("result_basename", func.__name__)
rv = func(*args, **kwargs)
if hasattr(rv, "convert_to_base"):
rv.convert_to_base()
_rv = rv.ndarray_view()
else:
_rv = rv
vals = (
_rv.min(),
_rv.max(),
_rv.std(dtype="float64"),
_rv.sum(dtype="float64"),
_rv.size,
hashlib.md5(_rv.tostring()).hexdigest(),
)
fn = f"func_results_ref_{name}.cpkl"
if not os.path.exists(fn):
print("Answers need to be created with --answer-reference .")
return False
with open(fn, "rb") as f:
ref = pickle.load(f)
print(f"Sizes: {vals[4] == ref[4]} ({vals[4]}, {ref[4]})")
assert_allclose(vals[0], ref[0], 1e-8, err_msg="min")
assert_allclose(vals[1], ref[1], 1e-8, err_msg="max")
assert_allclose(vals[2], ref[2], 1e-8, err_msg="std")
assert_allclose(vals[3], ref[3], 1e-8, err_msg="sum")
assert_equal(vals[4], ref[4])
print("Hashes equal: %s" % (vals[-1] == ref[-1]))
return rv
return _func
return compare_results(func)
def periodicity_cases(ds):
# This is a generator that yields things near the corners. It's good for
# getting different places to check periodicity.
yield (ds.domain_left_edge + ds.domain_right_edge) / 2.0
dx = ds.domain_width / ds.domain_dimensions
# We start one dx in, and only go to one in as well.
for i in (1, ds.domain_dimensions[0] - 2):
for j in (1, ds.domain_dimensions[1] - 2):
for k in (1, ds.domain_dimensions[2] - 2):
center = dx * np.array([i, j, k]) + ds.domain_left_edge
yield center
def run_nose(
verbose=False,
run_answer_tests=False,
answer_big_data=False,
call_pdb=False,
module=None,
):
import sys
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.on_demand_imports import _nose
orig_level = mylog.getEffectiveLevel()
mylog.setLevel(50)
nose_argv = sys.argv
nose_argv += ["--exclude=answer_testing", "--detailed-errors", "--exe"]
if call_pdb:
nose_argv += ["--pdb", "--pdb-failures"]
if verbose:
nose_argv.append("-v")
if run_answer_tests:
nose_argv.append("--with-answer-testing")
if answer_big_data:
nose_argv.append("--answer-big-data")
if module:
nose_argv.append(module)
initial_dir = os.getcwd()
yt_file = os.path.abspath(__file__)
yt_dir = os.path.dirname(yt_file)
if os.path.samefile(os.path.dirname(yt_dir), initial_dir):
# Provide a nice error message to work around nose bug
# see https://github.com/nose-devs/nose/issues/701
raise RuntimeError(
"""
The yt.run_nose function does not work correctly when invoked in
the same directory as the installed yt package. Try starting
a python session in a different directory before invoking yt.run_nose
again. Alternatively, you can also run the "nosetests" executable in
the current directory like so:
$ nosetests
"""
)
os.chdir(yt_dir)
try:
_nose.run(argv=nose_argv)
finally:
os.chdir(initial_dir)
mylog.setLevel(orig_level)
def assert_allclose_units(actual, desired, rtol=1e-7, atol=0, **kwargs):
"""Raise an error if two objects are not equal up to desired tolerance
This is a wrapper for :func:`numpy.testing.assert_allclose` that also
verifies unit consistency
Parameters
----------
actual : array-like
Array obtained (possibly with attached units)
desired : array-like
Array to compare with (possibly with attached units)
rtol : float, optional
Relative tolerance, defaults to 1e-7
atol : float or quantity, optional
Absolute tolerance. If units are attached, they must be consistent
with the units of ``actual`` and ``desired``. If no units are attached,
assumes the same units as ``desired``. Defaults to zero.
Notes
-----
Also accepts additional keyword arguments accepted by
:func:`numpy.testing.assert_allclose`, see the documentation of that
function for details.
"""
# Create a copy to ensure this function does not alter input arrays
act = YTArray(actual)
des = YTArray(desired)
try:
des = des.in_units(act.units)
except UnitOperationError as e:
raise AssertionError(
"Units of actual (%s) and desired (%s) do not have "
"equivalent dimensions" % (act.units, des.units)
) from e
rt = YTArray(rtol)
if not rt.units.is_dimensionless:
raise AssertionError(f"Units of rtol ({rt.units}) are not dimensionless")
if not isinstance(atol, YTArray):
at = YTQuantity(atol, des.units)
try:
at = at.in_units(act.units)
except UnitOperationError as e:
raise AssertionError(
"Units of atol (%s) and actual (%s) do not have "
"equivalent dimensions" % (at.units, act.units)
) from e
# units have been validated, so we strip units before calling numpy
# to avoid spurious errors
act = act.value
des = des.value
rt = rt.value
at = at.value
return assert_allclose(act, des, rt, at, **kwargs)
def assert_fname(fname):
"""Function that checks file type using libmagic"""
if fname is None:
return
with open(fname, "rb") as fimg:
data = fimg.read()
image_type = ""
# see http://www.w3.org/TR/PNG/#5PNG-file-signature
if data.startswith(b"\211PNG\r\n\032\n"):
image_type = ".png"
# see http://www.mathguide.de/info/tools/media-types/image/jpeg
elif data.startswith(b"\377\330"):
image_type = ".jpeg"
elif data.startswith(b"%!PS-Adobe"):
data_str = data.decode("utf-8", "ignore")
if "EPSF" in data_str[: data_str.index("\n")]:
image_type = ".eps"
else:
image_type = ".ps"
elif data.startswith(b"%PDF"):
image_type = ".pdf"
extension = os.path.splitext(fname)[1]
assert image_type == extension, (
"Expected an image of type '%s' but '%s' is an image of type '%s'"
% (extension, fname, image_type)
)
def requires_backend(backend):
""" Decorator to check for a specified matplotlib backend.
This decorator returns the decorated function if the specified `backend`
is same as of `matplotlib.get_backend()`, otherwise returns null function.
It could be used to execute function only when a particular `backend` of
matplotlib is being used.
Parameters
----------
backend : String
The value which is compared with the current matplotlib backend in use.
Returns
-------
Decorated function or null function
"""
import pytest
def ffalse(func):
# returning a lambda : None causes an error when using pytest. Having
# a function (skip) that returns None does work, but pytest marks the
# test as having passed, which seems bad, since it wasn't actually run.
# Using pytest.skip() means that a change to test_requires_backend was
# needed since None is no longer returned, so we check for the skip
# exception in the xfail case for that test
def skip(*args, **kwargs):
msg = f"`{backend}` backend not found, skipping: `{func.__name__}`"
print(msg)
pytest.skip(msg)
if ytcfg.getboolean("yt", "__withinpytest"):
return skip
else:
return lambda: None
def ftrue(func):
return func
if backend.lower() == matplotlib.get_backend().lower():
return ftrue
return ffalse
class TempDirTest(unittest.TestCase):
"""
A test class that runs in a temporary directory and
removes it afterward.
"""
def setUp(self):
self.curdir = os.getcwd()
self.tmpdir = tempfile.mkdtemp()
os.chdir(self.tmpdir)
def tearDown(self):
os.chdir(self.curdir)
shutil.rmtree(self.tmpdir)
class ParticleSelectionComparison:
"""
This is a test helper class that takes a particle dataset, caches the
particles it has on disk (manually reading them using lower-level IO
routines) and then received a data object that it compares against manually
running the data object's selection routines. All supplied data objects
must be created from the input dataset.
"""
def __init__(self, ds):
self.ds = ds
# Construct an index so that we get all the data_files
ds.index
particles = {}
# hsml is the smoothing length we use for radial selection
hsml = {}
for data_file in ds.index.data_files:
for ptype, pos_arr in ds.index.io._yield_coordinates(data_file):
particles.setdefault(ptype, []).append(pos_arr)
if ptype in getattr(ds, "_sph_ptypes", ()):
hsml.setdefault(ptype, []).append(
ds.index.io._get_smoothing_length(
data_file, pos_arr.dtype, pos_arr.shape
)
)
for ptype in particles:
particles[ptype] = np.concatenate(particles[ptype])
if ptype in hsml:
hsml[ptype] = np.concatenate(hsml[ptype])
self.particles = particles
self.hsml = hsml
def compare_dobj_selection(self, dobj):
for ptype in sorted(self.particles):
x, y, z = self.particles[ptype].T
# Set our radii to zero for now, I guess?
radii = self.hsml.get(ptype, 0.0)
sel_index = dobj.selector.select_points(x, y, z, radii)
if sel_index is None:
sel_pos = np.empty((0, 3))
else:
sel_pos = self.particles[ptype][sel_index, :]
obj_results = []
for chunk in dobj.chunks([], "io"):
obj_results.append(chunk[ptype, "particle_position"])
if any(_.size > 0 for _ in obj_results):
obj_results = np.concatenate(obj_results, axis=0)
else:
obj_results = np.empty((0, 3))
# Sometimes we get unitary scaling or other floating point noise. 5
# NULP should be OK. This is mostly for stuff like Rockstar, where
# the f32->f64 casting happens at different places depending on
# which code path we use.
assert_array_almost_equal_nulp(sel_pos, obj_results, 5)
def run_defaults(self):
"""
This runs lots of samples that touch different types of wraparounds.
Specifically, it does:
* sphere in center with radius 0.1 unitary
* sphere in center with radius 0.2 unitary
* sphere in each of the eight corners of the domain with radius 0.1 unitary
* sphere in center with radius 0.5 unitary
* box that covers 0.1 .. 0.9
* box from 0.8 .. 0.85
* box from 0.3..0.6, 0.2..0.8, 0.0..0.1
"""
sp1 = self.ds.sphere("c", (0.1, "unitary"))
self.compare_dobj_selection(sp1)
sp2 = self.ds.sphere("c", (0.2, "unitary"))
self.compare_dobj_selection(sp2)
centers = [
[0.04, 0.5, 0.5],
[0.5, 0.04, 0.5],
[0.5, 0.5, 0.04],
[0.04, 0.04, 0.04],
[0.96, 0.5, 0.5],
[0.5, 0.96, 0.5],
[0.5, 0.5, 0.96],
[0.96, 0.96, 0.96],
]
r = self.ds.quan(0.1, "unitary")
for center in centers:
c = self.ds.arr(center, "unitary") + self.ds.domain_left_edge.in_units(
"unitary"
)
if not all(self.ds.periodicity):
# filter out the periodic bits for non-periodic datasets
if any(c - r < self.ds.domain_left_edge) or any(
c + r > self.ds.domain_right_edge
):
continue
sp = self.ds.sphere(c, (0.1, "unitary"))
self.compare_dobj_selection(sp)
sp = self.ds.sphere("c", (0.5, "unitary"))
self.compare_dobj_selection(sp)
dd = self.ds.all_data()
self.compare_dobj_selection(dd)
# This is in raw numbers, so we can offset for the left edge
LE = self.ds.domain_left_edge.in_units("unitary").d
reg1 = self.ds.r[
(0.1 + LE[0], "unitary") : (0.9 + LE[0], "unitary"),
(0.1 + LE[1], "unitary") : (0.9 + LE[1], "unitary"),
(0.1 + LE[2], "unitary") : (0.9 + LE[2], "unitary"),
]
self.compare_dobj_selection(reg1)
reg2 = self.ds.r[
(0.8 + LE[0], "unitary") : (0.85 + LE[0], "unitary"),
(0.8 + LE[1], "unitary") : (0.85 + LE[1], "unitary"),
(0.8 + LE[2], "unitary") : (0.85 + LE[2], "unitary"),
]
self.compare_dobj_selection(reg2)
reg3 = self.ds.r[
(0.3 + LE[0], "unitary") : (0.6 + LE[0], "unitary"),
(0.2 + LE[1], "unitary") : (0.8 + LE[1], "unitary"),
(0.0 + LE[2], "unitary") : (0.1 + LE[2], "unitary"),
]
self.compare_dobj_selection(reg3) | en | 0.788411 | # we import this in a weird way from numpy.testing to avoid triggering # flake8 errors from the unused imports. These test functions are imported # elsewhere in yt from here so we want them to be imported here. # NOQA isort:skip # NOQA isort:skip # NOQA isort:skip # NOQA isort:skip # NOQA isort:skip # NOQA isort:skip # NOQA isort:skip # Expose assert_true and assert_less_equal from unittest.TestCase # this is adopted from nose. Doing this here allows us to avoid importing # nose at the top level. # noqa: B009 # noqa: B009 # We have nan checks in here because occasionally we have fields that get # weighted without non-zero weights. I'm looking at you, particle fields! # Mask out NaNs # Mask out 0 # NANS! Creates two numpy arrays representing the left and right bounds of an AMR grid as well as an array for the AMR level of each cell. Parameters ---------- extent : array-like This a sequence of length 2*ndims that is the bounds of each dimension. For example, the 2D unit square would be given by [0.0, 1.0, 0.0, 1.0]. A 3D cylindrical grid may look like [0.0, 2.0, -1.0, 1.0, 0.0, 2*np.pi]. levels : int or sequence of ints, optional This is the number of AMR refinement levels. If given as a sequence (of length ndims), then each dimension will be refined down to this level. All values in this array must be the same or zero. A zero valued dimension indicates that this dim should not be refined. Taking the 3D cylindrical example above if we don't want refine theta but want r and z at 5 we would set levels=(5, 5, 0). cells : int, optional This is the number of cells per refinement level. Returns ------- left : float ndarray, shape=(npoints, ndims) The left AMR grid points. right : float ndarray, shape=(npoints, ndims) The right AMR grid points. level : int ndarray, shape=(npoints,) The AMR level for each point. Examples -------- >>> l, r, lvl = amrspace([0.0, 2.0, 1.0, 2.0, 0.0, 3.14], levels=(3,3,0), cells=2) >>> print l [[ 0. 1. 0. ] [ 0.25 1. 0. ] [ 0. 1.125 0. ] [ 0.25 1.125 0. ] [ 0.5 1. 0. ] [ 0. 1.25 0. ] [ 0.5 1.25 0. ] [ 1. 1. 0. ] [ 0. 1.5 0. ] [ 1. 1.5 0. ]] # fill zero dims # fill non-zero dims # These are the bounds we want. Cartesian we just assume goes 0 .. 1. # rzt # rtz # latlonalt # latlondep # the distance from the origin # each element gets a random number # the distance from the origin # each element gets a random number # the distance from the origin create a toy dataset that puts a sphere at (0,0,0), a single cube on +x, two cubes on +y, and three cubes on +z in a domain from [-1*scale,1*scale]**3. The lower planes (x = -1*scale, y = -1*scale, z = -1*scale) are also given non-zero values. This dataset allows you to easily explore orientations and handiness in VR and other renderings Parameters ---------- N : integer The number of cells along each direction scale : float A spatial scale, the domain boundaries will be multiplied by scale to test datasets that have spatial different scales (e.g. data in CGS units) # coordinates -- in the notation data[i, j, k] # sphere at the origin # single cube on +x # two cubes on +y # three cubes on +z Returns an in-memory SPH dataset useful for testing This dataset should have one particle at the origin, one more particle along the x axis, two along y, and three along z. All particles will have non-overlapping smoothing regions with a radius of 0.25, masses of 1, and densities of 1, and zero velocity. # one particle at the origin, one particle along x-axis, two along y, # three along z Returns an in-memory SPH dataset useful for testing This dataset should have 27 particles with the particles arranged uniformly on a 3D grid. The bottom left corner is (0.5,0.5,0.5) and the top right corner is (2.5,2.5,2.5). All particles will have non-overlapping smoothing regions with a radius of 0.05, masses of 1, and densities of 1, and zero velocity. # noqa B008 # Implementation taken from url: # http://docs.hyperion-rt.org/en/stable/advanced/indepth_oct.html # Loop over subcells # Insert criterion for whether cell should be sub-divided. Here we # just use a random number to demonstrate. # Append boolean to overall list # If the cell is sub-divided, recursively divide it further # noqa B008 Add 4 classes of noise fields to a dataset random binary data random strictly positive data random negative data random data with mixed signs expand_keywords is a means for testing all possible keyword arguments in the nosetests. Simply pass it a dictionary of all the keyword arguments and all of the values for these arguments that you want to test. It will return a list of kwargs dicts containing combinations of the various kwarg values you passed it. These can then be passed to the appropriate function in nosetests. If full=True, then every possible combination of keywords is produced, otherwise, every keyword option is included at least once in the output list. Be careful, by using full=True, you may be in for an exponentially larger number of tests! Parameters ---------- keywords : dict a dictionary where the keys are the keywords for the function, and the values of each key are the possible values that this key can take in the function full : bool if set to True, every possible combination of given keywords is returned Returns ------- array of dicts An array of dictionaries to be individually passed to the appropriate function matching these kwargs. Examples -------- >>> keywords = {} >>> keywords['dpi'] = (50, 100, 200) >>> keywords['cmap'] = ('arbre', 'kelp') >>> list_of_kwargs = expand_keywords(keywords) >>> print(list_of_kwargs) array([{'cmap': 'arbre', 'dpi': 50}, {'cmap': 'kelp', 'dpi': 100}, {'cmap': 'arbre', 'dpi': 200}], dtype=object) >>> list_of_kwargs = expand_keywords(keywords, full=True) >>> print(list_of_kwargs) array([{'cmap': 'arbre', 'dpi': 50}, {'cmap': 'arbre', 'dpi': 100}, {'cmap': 'arbre', 'dpi': 200}, {'cmap': 'kelp', 'dpi': 50}, {'cmap': 'kelp', 'dpi': 100}, {'cmap': 'kelp', 'dpi': 200}], dtype=object) >>> for kwargs in list_of_kwargs: ... write_projection(*args, **kwargs) # if we want every possible combination of keywords, use iter magic # if we just want to probe each keyword, but not necessarily every # combination # Determine the maximum number of values any of the keywords has # Construct array of kwargs dicts, each element of the list is a different # **kwargs dict. each kwargs dict gives a different combination of # the possible values of the kwargs # initialize array # fill in array # if it's a string, use it (there's only one) # if there are more options, use the i'th val # if there are not more options, use the 0'th val Decorator that takes a module name as an argument and tries to import it. If the module imports without issue, the function is returned, but if not, a null function is returned. This is so tests that depend on certain modules being imported will not fail if the module is not installed on the testing platform. # This is an export of the 40 grids in IsolatedGalaxy that are of level 4 or # lower. It's just designed to give a sample AMR index to deal with. This is a decorator for a function to verify that the (numpy ndarray) result of a function is what it should be. This function is designed to be used for very light answer testing. Essentially, it wraps around a larger function that returns a numpy array, and that has results that should not change. It is not necessarily used inside the testing scripts themselves, but inside testing scripts written by developers during the testing of pull requests and new functionality. If a hash is specified, it "wins" and the others are ignored. Otherwise, tolerance is 1e-8 (just above single precision.) The correct results will be stored if the command line contains --answer-reference , and otherwise it will compare against the results on disk. The filename will be func_results_ref_FUNCNAME.cpkl where FUNCNAME is the name of the function being tested. If you would like more control over the name of the pickle file the results are stored in, you can pass the result_basename keyword argument to the function you are testing. The check_results decorator will use the value of the keyword to construct the filename of the results data file. If result_basename is not specified, the name of the testing function is used. This will raise an exception if the results are not correct. Examples -------- >>> @check_results ... def my_func(ds): ... return ds.domain_width >>> my_func(ds) >>> @check_results ... def field_checker(dd, field_name): ... return dd[field_name] >>> field_checker(ds.all_data(), 'density', result_basename='density') # This is a generator that yields things near the corners. It's good for # getting different places to check periodicity. # We start one dx in, and only go to one in as well. # Provide a nice error message to work around nose bug # see https://github.com/nose-devs/nose/issues/701 The yt.run_nose function does not work correctly when invoked in the same directory as the installed yt package. Try starting a python session in a different directory before invoking yt.run_nose again. Alternatively, you can also run the "nosetests" executable in the current directory like so: $ nosetests Raise an error if two objects are not equal up to desired tolerance This is a wrapper for :func:`numpy.testing.assert_allclose` that also verifies unit consistency Parameters ---------- actual : array-like Array obtained (possibly with attached units) desired : array-like Array to compare with (possibly with attached units) rtol : float, optional Relative tolerance, defaults to 1e-7 atol : float or quantity, optional Absolute tolerance. If units are attached, they must be consistent with the units of ``actual`` and ``desired``. If no units are attached, assumes the same units as ``desired``. Defaults to zero. Notes ----- Also accepts additional keyword arguments accepted by :func:`numpy.testing.assert_allclose`, see the documentation of that function for details. # Create a copy to ensure this function does not alter input arrays # units have been validated, so we strip units before calling numpy # to avoid spurious errors Function that checks file type using libmagic # see http://www.w3.org/TR/PNG/#5PNG-file-signature # see http://www.mathguide.de/info/tools/media-types/image/jpeg Decorator to check for a specified matplotlib backend. This decorator returns the decorated function if the specified `backend` is same as of `matplotlib.get_backend()`, otherwise returns null function. It could be used to execute function only when a particular `backend` of matplotlib is being used. Parameters ---------- backend : String The value which is compared with the current matplotlib backend in use. Returns ------- Decorated function or null function # returning a lambda : None causes an error when using pytest. Having # a function (skip) that returns None does work, but pytest marks the # test as having passed, which seems bad, since it wasn't actually run. # Using pytest.skip() means that a change to test_requires_backend was # needed since None is no longer returned, so we check for the skip # exception in the xfail case for that test A test class that runs in a temporary directory and removes it afterward. This is a test helper class that takes a particle dataset, caches the particles it has on disk (manually reading them using lower-level IO routines) and then received a data object that it compares against manually running the data object's selection routines. All supplied data objects must be created from the input dataset. # Construct an index so that we get all the data_files # hsml is the smoothing length we use for radial selection # Set our radii to zero for now, I guess? # Sometimes we get unitary scaling or other floating point noise. 5 # NULP should be OK. This is mostly for stuff like Rockstar, where # the f32->f64 casting happens at different places depending on # which code path we use. This runs lots of samples that touch different types of wraparounds. Specifically, it does: * sphere in center with radius 0.1 unitary * sphere in center with radius 0.2 unitary * sphere in each of the eight corners of the domain with radius 0.1 unitary * sphere in center with radius 0.5 unitary * box that covers 0.1 .. 0.9 * box from 0.8 .. 0.85 * box from 0.3..0.6, 0.2..0.8, 0.0..0.1 # filter out the periodic bits for non-periodic datasets # This is in raw numbers, so we can offset for the left edge | 2.279377 | 2 |
fastestimator/architecture/retinanet.py | fastestimator-util/test_nightly | 0 | 6625533 | <filename>fastestimator/architecture/retinanet.py
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import layers, models
def classification_sub_net(num_classes, num_anchor=9):
"""Creates an object classification sub-network for the RetinaNet.
Args:
num_classes (int): number of classes.
num_anchor (int, optional): number of anchor boxes. Defaults to 9.
Returns:
'Model' object: classification sub-network.
"""
model = models.Sequential()
model.add(
layers.Conv2D(256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(
layers.Conv2D(256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(
layers.Conv2D(256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(
layers.Conv2D(256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(
layers.Conv2D(num_classes * num_anchor,
kernel_size=3,
strides=1,
padding='same',
activation='sigmoid',
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
bias_initializer=tf.initializers.constant(np.log(1 / 99))))
model.add(layers.Reshape((-1, num_classes))) # the output dimension is [batch, #anchor, #classes]
return model
def regression_sub_net(num_anchor=9):
"""Creates a regression sub-network for the RetinaNet.
Args:
num_anchor (int, optional): number of anchor boxes. Defaults to 9.
Returns:
'Model' object: regression sub-network.
"""
model = models.Sequential()
model.add(
layers.Conv2D(256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(
layers.Conv2D(256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(
layers.Conv2D(256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(
layers.Conv2D(256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(
layers.Conv2D(4 * num_anchor,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(layers.Reshape((-1, 4))) # the output dimension is [batch, #anchor, 4]
return model
def RetinaNet(input_shape, num_classes, num_anchor=9):
"""Creates the RetinaNet. RetinaNet is composed of an FPN, a classification sub-network and a localization
regression sub-network.
Args:
input_shape (tuple): shape of input image.
num_classes (int): number of classes.
num_anchor (int, optional): number of anchor boxes. Defaults to 9.
Returns:
'Model' object: RetinaNet.
"""
inputs = tf.keras.Input(shape=input_shape)
# FPN
resnet50 = tf.keras.applications.ResNet50(weights="imagenet", include_top=False, input_tensor=inputs, pooling=None)
assert resnet50.layers[80].name == "conv3_block4_out"
C3 = resnet50.layers[80].output
assert resnet50.layers[142].name == "conv4_block6_out"
C4 = resnet50.layers[142].output
assert resnet50.layers[-1].name == "conv5_block3_out"
C5 = resnet50.layers[-1].output
P5 = layers.Conv2D(256, kernel_size=1, strides=1, padding='same')(C5)
P5_upsampling = layers.UpSampling2D()(P5)
P4 = layers.Conv2D(256, kernel_size=1, strides=1, padding='same')(C4)
P4 = layers.Add()([P5_upsampling, P4])
P4_upsampling = layers.UpSampling2D()(P4)
P3 = layers.Conv2D(256, kernel_size=1, strides=1, padding='same')(C3)
P3 = layers.Add()([P4_upsampling, P3])
P6 = layers.Conv2D(256, kernel_size=3, strides=2, padding='same', name="P6")(C5)
P7 = layers.Activation('relu')(P6)
P7 = layers.Conv2D(256, kernel_size=3, strides=2, padding='same', name="P7")(P7)
P5 = layers.Conv2D(256, kernel_size=3, strides=1, padding='same', name="P5")(P5)
P4 = layers.Conv2D(256, kernel_size=3, strides=1, padding='same', name="P4")(P4)
P3 = layers.Conv2D(256, kernel_size=3, strides=1, padding='same', name="P3")(P3)
# classification subnet
cls_subnet = classification_sub_net(num_classes=num_classes, num_anchor=num_anchor)
P3_cls = cls_subnet(P3)
P4_cls = cls_subnet(P4)
P5_cls = cls_subnet(P5)
P6_cls = cls_subnet(P6)
P7_cls = cls_subnet(P7)
cls_output = layers.Concatenate(axis=-2)([P3_cls, P4_cls, P5_cls, P6_cls, P7_cls])
# localization subnet
loc_subnet = regression_sub_net(num_anchor=num_anchor)
P3_loc = loc_subnet(P3)
P4_loc = loc_subnet(P4)
P5_loc = loc_subnet(P5)
P6_loc = loc_subnet(P6)
P7_loc = loc_subnet(P7)
loc_output = layers.Concatenate(axis=-2)([P3_loc, P4_loc, P5_loc, P6_loc, P7_loc])
return tf.keras.Model(inputs=inputs, outputs=[cls_output, loc_output])
def get_fpn_anchor_box(input_shape):
"""Returns the anchor boxes of the Feature Pyramid Net.
Args:
input_shape (tuple): shape of input image.
Returns:
array: numpy array with all anchor boxes.
"""
assert len(input_shape) == 3
h, w, _ = input_shape
assert h % 32 == 0 and w % 32 == 0
shapes = [(int(h / 8), int(w / 8))] # P3
num_pixel = np.prod(shapes)
for _ in range(4): # P4 through P7
shapes.append((int(np.ceil(shapes[-1][0] / 2)), int(np.ceil(shapes[-1][1] / 2))))
num_pixel += np.prod(shapes[-1])
anchorbox = np.zeros((9 * num_pixel, 4))
base_multipliers = [2**(0.0), 2**(1 / 3), 2**(2 / 3)]
aspect_ratio_multiplier = [(1.0, 1.0), (2.0, 1.0), (1.0, 2.0)]
anchor_idx = 0
for shape in shapes:
p_h, p_w = shape
base_y = 1 / p_h
base_x = 1 / p_w
for i in range(p_h):
for j in range(p_w):
for base_multiplier in base_multipliers:
for aspect_x, aspect_y in aspect_ratio_multiplier:
center_y = (i + 1 / 2) * base_y
center_x = (j + 1 / 2) * base_x
anchorbox[anchor_idx, 0] = max(center_x - base_x * base_multiplier * aspect_x, 0.0) # x1
anchorbox[anchor_idx, 1] = max(center_y - base_y * base_multiplier * aspect_y, 0.0) # y1
anchorbox[anchor_idx, 2] = min(center_x + base_x * base_multiplier * aspect_x, 1.0) # x2
anchorbox[anchor_idx, 3] = min(center_y + base_y * base_multiplier * aspect_y, 1.0) # y2
anchor_idx += 1
if p_h == 1 and p_w == 1: # the next level of 1x1 feature map is still 1x1, therefore ignore
break
return np.float32(anchorbox)
def get_target(anchorbox, label, x1, y1, x2, y2, num_classes=10):
"""Generates classification and localization ground-truths.
Args:
anchorbox (array): anchor boxes
label (array): labels for each anchor box.
x1 (array): x-coordinate of top left point of the box.
y1 (array): y-coordinate of top left point of the box.
x2 (array): x-coordinate of bottom right point of the box.
y2 (array): x-coordinate of bottom right point of the box.
num_classes (int, optional): number of classes. Defaults to 10.
Returns:
array: classification groundtruths for each anchor box.
array: localization groundtruths for each anchor box.
"""
num_anchor = anchorbox.shape[0]
target_cls = np.zeros(shape=(num_anchor), dtype=np.int64)
target_loc = np.zeros(shape=(num_anchor, 4), dtype=np.float32)
for _label, _x1, _y1, _x2, _y2 in zip(label, x1, y1, x2, y2):
best_iou = 0.0
for anchor_idx in range(num_anchor):
iou = get_iou((_x1, _y1, _x2, _y2), anchorbox[anchor_idx])
if iou > best_iou:
best_iou = iou
best_anchor_idx = anchor_idx
if iou > 0.5:
target_cls[anchor_idx] = _label
target_loc[anchor_idx] = get_loc_offset((_x1, _y1, _x2, _y2), anchorbox[anchor_idx])
elif iou > 0.4:
target_cls[anchor_idx] = -2 # ignore this example
else:
target_cls[anchor_idx] = -1 # background class
if best_iou > 0 and best_iou < 0.5: # if gt has no >0.5 iou with any anchor
target_cls[best_anchor_idx] = _label
target_loc[best_anchor_idx] = get_loc_offset((_x1, _y1, _x2, _y2), anchorbox[best_anchor_idx])
return target_cls, target_loc
def get_loc_offset(box_gt, box_anchor):
"""Computes the offset of a groundtruth box and an anchor box.
Args:
box_gt (array): groundtruth box.
box_anchor (array): anchor box.
Returns:
float: offset between x1 coordinate of the two boxes.
float: offset between y1 coordinate of the two boxes.
float: offset between x2 coordinate of the two boxes.
float: offset between y2 coordinate of the two boxes.
"""
gt_x1, gt_y1, gt_x2, gt_y2 = tuple(box_gt)
ac_x1, ac_y1, ac_x2, ac_y2 = tuple(box_anchor)
anchor_width = ac_x2 - ac_x1
anchor_height = ac_y2 - ac_y1
dx1 = (gt_x1 - ac_x1) / anchor_width
dy1 = (gt_y1 - ac_y1) / anchor_height
dx2 = (gt_x2 - ac_x2) / anchor_width
dy2 = (gt_y2 - ac_y2) / anchor_height
return dx1, dy1, dx2, dy2
def get_iou(box1, box2):
"""Computes the value of intersection over union (IoU) of two boxes.
Args:
box1 (array): first box
box2 (array): second box
Returns:
float: IoU value
"""
b1_x1, b1_y1, b1_x2, b1_y2 = tuple(box1)
b2_x1, b2_y1, b2_x2, b2_y2 = tuple(box2)
xA = max(b1_x1, b2_x1)
yA = max(b1_y1, b2_y1)
xB = min(b1_x2, b2_x2)
yB = min(b1_y2, b2_y2)
interArea = max(0, xB - xA) * max(0, yB - yA)
if interArea == 0:
iou = 0
else:
box1Area = (b1_x2 - b1_x1) * (b1_y2 - b1_y1)
box2Area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1)
iou = interArea / (box1Area + box2Area - interArea)
return iou
| <filename>fastestimator/architecture/retinanet.py
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import layers, models
def classification_sub_net(num_classes, num_anchor=9):
"""Creates an object classification sub-network for the RetinaNet.
Args:
num_classes (int): number of classes.
num_anchor (int, optional): number of anchor boxes. Defaults to 9.
Returns:
'Model' object: classification sub-network.
"""
model = models.Sequential()
model.add(
layers.Conv2D(256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(
layers.Conv2D(256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(
layers.Conv2D(256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(
layers.Conv2D(256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(
layers.Conv2D(num_classes * num_anchor,
kernel_size=3,
strides=1,
padding='same',
activation='sigmoid',
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
bias_initializer=tf.initializers.constant(np.log(1 / 99))))
model.add(layers.Reshape((-1, num_classes))) # the output dimension is [batch, #anchor, #classes]
return model
def regression_sub_net(num_anchor=9):
"""Creates a regression sub-network for the RetinaNet.
Args:
num_anchor (int, optional): number of anchor boxes. Defaults to 9.
Returns:
'Model' object: regression sub-network.
"""
model = models.Sequential()
model.add(
layers.Conv2D(256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(
layers.Conv2D(256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(
layers.Conv2D(256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(
layers.Conv2D(256,
kernel_size=3,
strides=1,
padding='same',
activation='relu',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(
layers.Conv2D(4 * num_anchor,
kernel_size=3,
strides=1,
padding='same',
kernel_initializer=tf.random_normal_initializer(stddev=0.01)))
model.add(layers.Reshape((-1, 4))) # the output dimension is [batch, #anchor, 4]
return model
def RetinaNet(input_shape, num_classes, num_anchor=9):
"""Creates the RetinaNet. RetinaNet is composed of an FPN, a classification sub-network and a localization
regression sub-network.
Args:
input_shape (tuple): shape of input image.
num_classes (int): number of classes.
num_anchor (int, optional): number of anchor boxes. Defaults to 9.
Returns:
'Model' object: RetinaNet.
"""
inputs = tf.keras.Input(shape=input_shape)
# FPN
resnet50 = tf.keras.applications.ResNet50(weights="imagenet", include_top=False, input_tensor=inputs, pooling=None)
assert resnet50.layers[80].name == "conv3_block4_out"
C3 = resnet50.layers[80].output
assert resnet50.layers[142].name == "conv4_block6_out"
C4 = resnet50.layers[142].output
assert resnet50.layers[-1].name == "conv5_block3_out"
C5 = resnet50.layers[-1].output
P5 = layers.Conv2D(256, kernel_size=1, strides=1, padding='same')(C5)
P5_upsampling = layers.UpSampling2D()(P5)
P4 = layers.Conv2D(256, kernel_size=1, strides=1, padding='same')(C4)
P4 = layers.Add()([P5_upsampling, P4])
P4_upsampling = layers.UpSampling2D()(P4)
P3 = layers.Conv2D(256, kernel_size=1, strides=1, padding='same')(C3)
P3 = layers.Add()([P4_upsampling, P3])
P6 = layers.Conv2D(256, kernel_size=3, strides=2, padding='same', name="P6")(C5)
P7 = layers.Activation('relu')(P6)
P7 = layers.Conv2D(256, kernel_size=3, strides=2, padding='same', name="P7")(P7)
P5 = layers.Conv2D(256, kernel_size=3, strides=1, padding='same', name="P5")(P5)
P4 = layers.Conv2D(256, kernel_size=3, strides=1, padding='same', name="P4")(P4)
P3 = layers.Conv2D(256, kernel_size=3, strides=1, padding='same', name="P3")(P3)
# classification subnet
cls_subnet = classification_sub_net(num_classes=num_classes, num_anchor=num_anchor)
P3_cls = cls_subnet(P3)
P4_cls = cls_subnet(P4)
P5_cls = cls_subnet(P5)
P6_cls = cls_subnet(P6)
P7_cls = cls_subnet(P7)
cls_output = layers.Concatenate(axis=-2)([P3_cls, P4_cls, P5_cls, P6_cls, P7_cls])
# localization subnet
loc_subnet = regression_sub_net(num_anchor=num_anchor)
P3_loc = loc_subnet(P3)
P4_loc = loc_subnet(P4)
P5_loc = loc_subnet(P5)
P6_loc = loc_subnet(P6)
P7_loc = loc_subnet(P7)
loc_output = layers.Concatenate(axis=-2)([P3_loc, P4_loc, P5_loc, P6_loc, P7_loc])
return tf.keras.Model(inputs=inputs, outputs=[cls_output, loc_output])
def get_fpn_anchor_box(input_shape):
"""Returns the anchor boxes of the Feature Pyramid Net.
Args:
input_shape (tuple): shape of input image.
Returns:
array: numpy array with all anchor boxes.
"""
assert len(input_shape) == 3
h, w, _ = input_shape
assert h % 32 == 0 and w % 32 == 0
shapes = [(int(h / 8), int(w / 8))] # P3
num_pixel = np.prod(shapes)
for _ in range(4): # P4 through P7
shapes.append((int(np.ceil(shapes[-1][0] / 2)), int(np.ceil(shapes[-1][1] / 2))))
num_pixel += np.prod(shapes[-1])
anchorbox = np.zeros((9 * num_pixel, 4))
base_multipliers = [2**(0.0), 2**(1 / 3), 2**(2 / 3)]
aspect_ratio_multiplier = [(1.0, 1.0), (2.0, 1.0), (1.0, 2.0)]
anchor_idx = 0
for shape in shapes:
p_h, p_w = shape
base_y = 1 / p_h
base_x = 1 / p_w
for i in range(p_h):
for j in range(p_w):
for base_multiplier in base_multipliers:
for aspect_x, aspect_y in aspect_ratio_multiplier:
center_y = (i + 1 / 2) * base_y
center_x = (j + 1 / 2) * base_x
anchorbox[anchor_idx, 0] = max(center_x - base_x * base_multiplier * aspect_x, 0.0) # x1
anchorbox[anchor_idx, 1] = max(center_y - base_y * base_multiplier * aspect_y, 0.0) # y1
anchorbox[anchor_idx, 2] = min(center_x + base_x * base_multiplier * aspect_x, 1.0) # x2
anchorbox[anchor_idx, 3] = min(center_y + base_y * base_multiplier * aspect_y, 1.0) # y2
anchor_idx += 1
if p_h == 1 and p_w == 1: # the next level of 1x1 feature map is still 1x1, therefore ignore
break
return np.float32(anchorbox)
def get_target(anchorbox, label, x1, y1, x2, y2, num_classes=10):
"""Generates classification and localization ground-truths.
Args:
anchorbox (array): anchor boxes
label (array): labels for each anchor box.
x1 (array): x-coordinate of top left point of the box.
y1 (array): y-coordinate of top left point of the box.
x2 (array): x-coordinate of bottom right point of the box.
y2 (array): x-coordinate of bottom right point of the box.
num_classes (int, optional): number of classes. Defaults to 10.
Returns:
array: classification groundtruths for each anchor box.
array: localization groundtruths for each anchor box.
"""
num_anchor = anchorbox.shape[0]
target_cls = np.zeros(shape=(num_anchor), dtype=np.int64)
target_loc = np.zeros(shape=(num_anchor, 4), dtype=np.float32)
for _label, _x1, _y1, _x2, _y2 in zip(label, x1, y1, x2, y2):
best_iou = 0.0
for anchor_idx in range(num_anchor):
iou = get_iou((_x1, _y1, _x2, _y2), anchorbox[anchor_idx])
if iou > best_iou:
best_iou = iou
best_anchor_idx = anchor_idx
if iou > 0.5:
target_cls[anchor_idx] = _label
target_loc[anchor_idx] = get_loc_offset((_x1, _y1, _x2, _y2), anchorbox[anchor_idx])
elif iou > 0.4:
target_cls[anchor_idx] = -2 # ignore this example
else:
target_cls[anchor_idx] = -1 # background class
if best_iou > 0 and best_iou < 0.5: # if gt has no >0.5 iou with any anchor
target_cls[best_anchor_idx] = _label
target_loc[best_anchor_idx] = get_loc_offset((_x1, _y1, _x2, _y2), anchorbox[best_anchor_idx])
return target_cls, target_loc
def get_loc_offset(box_gt, box_anchor):
"""Computes the offset of a groundtruth box and an anchor box.
Args:
box_gt (array): groundtruth box.
box_anchor (array): anchor box.
Returns:
float: offset between x1 coordinate of the two boxes.
float: offset between y1 coordinate of the two boxes.
float: offset between x2 coordinate of the two boxes.
float: offset between y2 coordinate of the two boxes.
"""
gt_x1, gt_y1, gt_x2, gt_y2 = tuple(box_gt)
ac_x1, ac_y1, ac_x2, ac_y2 = tuple(box_anchor)
anchor_width = ac_x2 - ac_x1
anchor_height = ac_y2 - ac_y1
dx1 = (gt_x1 - ac_x1) / anchor_width
dy1 = (gt_y1 - ac_y1) / anchor_height
dx2 = (gt_x2 - ac_x2) / anchor_width
dy2 = (gt_y2 - ac_y2) / anchor_height
return dx1, dy1, dx2, dy2
def get_iou(box1, box2):
"""Computes the value of intersection over union (IoU) of two boxes.
Args:
box1 (array): first box
box2 (array): second box
Returns:
float: IoU value
"""
b1_x1, b1_y1, b1_x2, b1_y2 = tuple(box1)
b2_x1, b2_y1, b2_x2, b2_y2 = tuple(box2)
xA = max(b1_x1, b2_x1)
yA = max(b1_y1, b2_y1)
xB = min(b1_x2, b2_x2)
yB = min(b1_y2, b2_y2)
interArea = max(0, xB - xA) * max(0, yB - yA)
if interArea == 0:
iou = 0
else:
box1Area = (b1_x2 - b1_x1) * (b1_y2 - b1_y1)
box2Area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1)
iou = interArea / (box1Area + box2Area - interArea)
return iou
| en | 0.695196 | # Copyright 2019 The FastEstimator Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Creates an object classification sub-network for the RetinaNet. Args: num_classes (int): number of classes. num_anchor (int, optional): number of anchor boxes. Defaults to 9. Returns: 'Model' object: classification sub-network. # the output dimension is [batch, #anchor, #classes] Creates a regression sub-network for the RetinaNet. Args: num_anchor (int, optional): number of anchor boxes. Defaults to 9. Returns: 'Model' object: regression sub-network. # the output dimension is [batch, #anchor, 4] Creates the RetinaNet. RetinaNet is composed of an FPN, a classification sub-network and a localization regression sub-network. Args: input_shape (tuple): shape of input image. num_classes (int): number of classes. num_anchor (int, optional): number of anchor boxes. Defaults to 9. Returns: 'Model' object: RetinaNet. # FPN # classification subnet # localization subnet Returns the anchor boxes of the Feature Pyramid Net. Args: input_shape (tuple): shape of input image. Returns: array: numpy array with all anchor boxes. # P3 # P4 through P7 # x1 # y1 # x2 # y2 # the next level of 1x1 feature map is still 1x1, therefore ignore Generates classification and localization ground-truths. Args: anchorbox (array): anchor boxes label (array): labels for each anchor box. x1 (array): x-coordinate of top left point of the box. y1 (array): y-coordinate of top left point of the box. x2 (array): x-coordinate of bottom right point of the box. y2 (array): x-coordinate of bottom right point of the box. num_classes (int, optional): number of classes. Defaults to 10. Returns: array: classification groundtruths for each anchor box. array: localization groundtruths for each anchor box. # ignore this example # background class # if gt has no >0.5 iou with any anchor Computes the offset of a groundtruth box and an anchor box. Args: box_gt (array): groundtruth box. box_anchor (array): anchor box. Returns: float: offset between x1 coordinate of the two boxes. float: offset between y1 coordinate of the two boxes. float: offset between x2 coordinate of the two boxes. float: offset between y2 coordinate of the two boxes. Computes the value of intersection over union (IoU) of two boxes. Args: box1 (array): first box box2 (array): second box Returns: float: IoU value | 2.529352 | 3 |
easygraph/functions/centrality/clossness.py | easy-graph/Easy-Graph | 41 | 6625534 | from easygraph.functions.path import *
__all__ = [
'closeness_centrality',
]
def closeness_centrality(G, weight=None):
'''Compute closeness centrality for nodes.
.. math::
C_{WF}(u) = \frac{n-1}{N-1} \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
Notice that the closeness distance function computes the
outcoming distance to `u` for directed graphs. To use
incoming distance, act on `G.reverse()`.
Parameters
----------
G : graph
A easygraph graph
weight : None or string, optional (default=None)
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
Returns
-------
nodes : dictionary
Dictionary of nodes with closeness centrality as the value.
'''
result_dict = dict()
nodes = G.nodes
length = len(nodes)
import functools
if weight is not None:
path_length = functools.partial(single_source_dijkstra, weight=weight)
else:
path_length = functools.partial(single_source_bfs)
for node in nodes:
x = path_length(G, node)
dist = sum(x.values())
cnt = len(x)
if dist == 0:
result_dict[node] = 0
else:
result_dict[node] = (cnt-1)*(cnt-1)/(dist*(length-1))
return result_dict
| from easygraph.functions.path import *
__all__ = [
'closeness_centrality',
]
def closeness_centrality(G, weight=None):
'''Compute closeness centrality for nodes.
.. math::
C_{WF}(u) = \frac{n-1}{N-1} \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
Notice that the closeness distance function computes the
outcoming distance to `u` for directed graphs. To use
incoming distance, act on `G.reverse()`.
Parameters
----------
G : graph
A easygraph graph
weight : None or string, optional (default=None)
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
Returns
-------
nodes : dictionary
Dictionary of nodes with closeness centrality as the value.
'''
result_dict = dict()
nodes = G.nodes
length = len(nodes)
import functools
if weight is not None:
path_length = functools.partial(single_source_dijkstra, weight=weight)
else:
path_length = functools.partial(single_source_bfs)
for node in nodes:
x = path_length(G, node)
dist = sum(x.values())
cnt = len(x)
if dist == 0:
result_dict[node] = 0
else:
result_dict[node] = (cnt-1)*(cnt-1)/(dist*(length-1))
return result_dict
| en | 0.71934 | Compute closeness centrality for nodes. .. math:: C_{WF}(u) = \frac{n-1}{N-1} \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, Notice that the closeness distance function computes the outcoming distance to `u` for directed graphs. To use incoming distance, act on `G.reverse()`. Parameters ---------- G : graph A easygraph graph weight : None or string, optional (default=None) If None, all edge weights are considered equal. Otherwise holds the name of the edge attribute used as weight. Returns ------- nodes : dictionary Dictionary of nodes with closeness centrality as the value. | 3.454573 | 3 |
settingsUI.py | 5parkp1ug/ytDLDR | 1 | 6625535 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'settings1.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupSettingsUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(400, 300)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.themeComboBox = QtGui.QComboBox(self.groupBox)
self.themeComboBox.setObjectName(_fromUtf8("themeComboBox"))
self.horizontalLayout.addWidget(self.themeComboBox)
self.gridLayout_2.addLayout(self.horizontalLayout, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox)
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.verticalLayout.addWidget(self.groupBox_2)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "Display", None))
self.label.setText(_translate("Form", "Select Theme", None))
self.groupBox_2.setTitle(_translate("Form", "Cofiguration", None))
| # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'settings1.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupSettingsUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(400, 300)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.themeComboBox = QtGui.QComboBox(self.groupBox)
self.themeComboBox.setObjectName(_fromUtf8("themeComboBox"))
self.horizontalLayout.addWidget(self.themeComboBox)
self.gridLayout_2.addLayout(self.horizontalLayout, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox)
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.verticalLayout.addWidget(self.groupBox_2)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "Display", None))
self.label.setText(_translate("Form", "Select Theme", None))
self.groupBox_2.setTitle(_translate("Form", "Cofiguration", None))
| en | 0.769136 | # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'settings1.ui' # # Created by: PyQt4 UI code generator 4.11.4 # # WARNING! All changes made in this file will be lost! | 1.707077 | 2 |
lio/losses/classification.py | YivanZhang/lio | 8 | 6625536 | from typing import Callable, List
import torch
import torch.nn.functional as F
from torch.distributions import Categorical
def direct_observation_loss(t: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return F.cross_entropy(t, y)
def indirect_observation_loss(transition_matrix: torch.Tensor, activation: Callable = None) -> Callable:
if activation is None:
activation = lambda t: F.softmax(t, dim=1)
def loss(t: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
p_z = activation(t)
p_y = p_z @ transition_matrix.to(y.device)
return F.nll_loss(torch.log(p_y + 1e-32), y)
return loss
def pairwise_similarity_loss(activation: Callable = None) -> Callable:
if activation is None:
activation = lambda t: F.softmax(t, dim=1)
def loss(ts: List[torch.Tensor], y: torch.Tensor) -> torch.Tensor:
t1, t2 = ts
p_z1 = activation(t1)
p_z2 = activation(t2)
p_y1 = (p_z1 * p_z2).sum(dim=1)
p_y0 = 1. - p_y1
p_y = torch.stack((p_y0, p_y1), dim=1)
return F.nll_loss(torch.log(p_y + 1e-32), y)
return loss
def triplet_comparison_loss(activation: Callable = None) -> Callable:
if activation is None:
activation = lambda t: F.softmax(t, dim=1)
def loss(ts: List[torch.Tensor], y: torch.Tensor) -> torch.Tensor:
t1, t2, t3 = ts
p_z1 = activation(t1)
p_z2 = activation(t2)
p_z3 = activation(t3)
p_z12 = (p_z1 * p_z2).sum(dim=1)
p_z13 = (p_z1 * p_z3).sum(dim=1)
p_y1 = p_z12 * (1. - p_z13)
p_y2 = (1. - p_z12) * p_z13
p_y0 = 1. - p_y1 - p_y2
p_y = torch.stack((p_y0, p_y1, p_y2), dim=1)
return F.nll_loss(torch.log(p_y + 1e-32), y)
return loss
# ----------------------------------------------------------------------------------------------------------------------
def soft_bootstrapping_loss(beta: float) -> Callable:
# https://arxiv.org/abs/1412.6596
# entropy regularization
def loss(t: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
nll = F.cross_entropy(t, y)
reg = -Categorical(probs=F.softmax(t, dim=1)).entropy().mean()
return beta * nll + (1. - beta) * reg
return loss
def hard_bootstrapping_loss(beta: float) -> Callable:
# https://arxiv.org/abs/1412.6596
# log-likelihood regularization
def loss(t: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
nll = F.cross_entropy(t, y)
reg = F.cross_entropy(t, t.argmax(dim=1))
return beta * nll + (1. - beta) * reg
return loss
def focal_loss(gamma: float) -> Callable:
# https://arxiv.org/abs/1708.02002
def loss(t: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return ((1. - F.softmax(t, dim=1)[range(len(y)), y]) ** gamma *
F.cross_entropy(t, y, reduction='none')).mean()
return loss
def generalized_cross_entropy_loss(q: float) -> Callable:
# https://arxiv.org/abs/1805.07836
def loss(t: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return (1. - F.softmax(t, dim=1)[range(len(y)), y] ** q).mean() / q
return loss
| from typing import Callable, List
import torch
import torch.nn.functional as F
from torch.distributions import Categorical
def direct_observation_loss(t: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return F.cross_entropy(t, y)
def indirect_observation_loss(transition_matrix: torch.Tensor, activation: Callable = None) -> Callable:
if activation is None:
activation = lambda t: F.softmax(t, dim=1)
def loss(t: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
p_z = activation(t)
p_y = p_z @ transition_matrix.to(y.device)
return F.nll_loss(torch.log(p_y + 1e-32), y)
return loss
def pairwise_similarity_loss(activation: Callable = None) -> Callable:
if activation is None:
activation = lambda t: F.softmax(t, dim=1)
def loss(ts: List[torch.Tensor], y: torch.Tensor) -> torch.Tensor:
t1, t2 = ts
p_z1 = activation(t1)
p_z2 = activation(t2)
p_y1 = (p_z1 * p_z2).sum(dim=1)
p_y0 = 1. - p_y1
p_y = torch.stack((p_y0, p_y1), dim=1)
return F.nll_loss(torch.log(p_y + 1e-32), y)
return loss
def triplet_comparison_loss(activation: Callable = None) -> Callable:
if activation is None:
activation = lambda t: F.softmax(t, dim=1)
def loss(ts: List[torch.Tensor], y: torch.Tensor) -> torch.Tensor:
t1, t2, t3 = ts
p_z1 = activation(t1)
p_z2 = activation(t2)
p_z3 = activation(t3)
p_z12 = (p_z1 * p_z2).sum(dim=1)
p_z13 = (p_z1 * p_z3).sum(dim=1)
p_y1 = p_z12 * (1. - p_z13)
p_y2 = (1. - p_z12) * p_z13
p_y0 = 1. - p_y1 - p_y2
p_y = torch.stack((p_y0, p_y1, p_y2), dim=1)
return F.nll_loss(torch.log(p_y + 1e-32), y)
return loss
# ----------------------------------------------------------------------------------------------------------------------
def soft_bootstrapping_loss(beta: float) -> Callable:
# https://arxiv.org/abs/1412.6596
# entropy regularization
def loss(t: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
nll = F.cross_entropy(t, y)
reg = -Categorical(probs=F.softmax(t, dim=1)).entropy().mean()
return beta * nll + (1. - beta) * reg
return loss
def hard_bootstrapping_loss(beta: float) -> Callable:
# https://arxiv.org/abs/1412.6596
# log-likelihood regularization
def loss(t: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
nll = F.cross_entropy(t, y)
reg = F.cross_entropy(t, t.argmax(dim=1))
return beta * nll + (1. - beta) * reg
return loss
def focal_loss(gamma: float) -> Callable:
# https://arxiv.org/abs/1708.02002
def loss(t: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return ((1. - F.softmax(t, dim=1)[range(len(y)), y]) ** gamma *
F.cross_entropy(t, y, reduction='none')).mean()
return loss
def generalized_cross_entropy_loss(q: float) -> Callable:
# https://arxiv.org/abs/1805.07836
def loss(t: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return (1. - F.softmax(t, dim=1)[range(len(y)), y] ** q).mean() / q
return loss
| en | 0.401144 | # ---------------------------------------------------------------------------------------------------------------------- # https://arxiv.org/abs/1412.6596 # entropy regularization # https://arxiv.org/abs/1412.6596 # log-likelihood regularization # https://arxiv.org/abs/1708.02002 # https://arxiv.org/abs/1805.07836 | 2.219853 | 2 |
tools/CountColor.py | WanderMax/notepad2 | 1 | 6625537 | #!/usr/bin/env python3
#-*- coding: UTF-8 -*-
import sys
import os.path
import operator
import re
kReColorHex = re.compile(r'#[0-9A-Fa-f]{6}')
def parse_key_value(line):
line = line.strip()
if not line or line[0] in ';#[':
return None
items = line.split('=', 2)
if not items or len(items) != 2:
return None
items[0] = items[0].strip()
items[1] = items[1].strip()
if not items[0] or not items[1]:
return None
return items
def find_color_in_file(path, color_map):
for line in open(path).readlines():
items = parse_key_value(line)
if not items:
continue
colors = kReColorHex.findall(items[1])
if not colors:
continue
key = items[0]
for color in colors:
color = color.upper()
if color in color_map:
color_stat = color_map[color]
color_stat['total_count'] += 1
if key not in color_stat['usage']:
color_stat['usage'][key] = 1
else:
color_stat['usage'][key] += 1
else:
color_stat = {
'total_count': 1,
'usage': {
key: 1,
},
}
color_map[color] = color_stat
def print_color_count(color_map):
for color, color_stat in color_map.items():
print('%s\t%d' % (color, color_stat['total_count']))
usage = color_stat['usage']
for key, count in usage.items():
print('\t%d\t%s' % (count, key))
def count_color(path):
# { color : { total_count: total_count, usage: { key: count}}}
color_map = {}
find_color_in_file(path, color_map)
colors = sorted(color_map.items(), key=operator.itemgetter(0))
colors = sorted(colors, key=lambda m: m[1]['total_count'], reverse=True)
color_map = dict(colors)
for color_stat in color_map.values():
usage = color_stat['usage']
usage = sorted(usage.items(), key=operator.itemgetter(0))
usage = sorted(usage, key=operator.itemgetter(1), reverse=True)
color_stat['usage'] = dict(usage)
print_color_count(color_map)
if __name__ == '__main__':
if len(sys.argv) > 1 and os.path.isfile(sys.argv[1]):
count_color(sys.argv[1])
else:
print("""Usage: %s path""" % sys.argv[0])
| #!/usr/bin/env python3
#-*- coding: UTF-8 -*-
import sys
import os.path
import operator
import re
kReColorHex = re.compile(r'#[0-9A-Fa-f]{6}')
def parse_key_value(line):
line = line.strip()
if not line or line[0] in ';#[':
return None
items = line.split('=', 2)
if not items or len(items) != 2:
return None
items[0] = items[0].strip()
items[1] = items[1].strip()
if not items[0] or not items[1]:
return None
return items
def find_color_in_file(path, color_map):
for line in open(path).readlines():
items = parse_key_value(line)
if not items:
continue
colors = kReColorHex.findall(items[1])
if not colors:
continue
key = items[0]
for color in colors:
color = color.upper()
if color in color_map:
color_stat = color_map[color]
color_stat['total_count'] += 1
if key not in color_stat['usage']:
color_stat['usage'][key] = 1
else:
color_stat['usage'][key] += 1
else:
color_stat = {
'total_count': 1,
'usage': {
key: 1,
},
}
color_map[color] = color_stat
def print_color_count(color_map):
for color, color_stat in color_map.items():
print('%s\t%d' % (color, color_stat['total_count']))
usage = color_stat['usage']
for key, count in usage.items():
print('\t%d\t%s' % (count, key))
def count_color(path):
# { color : { total_count: total_count, usage: { key: count}}}
color_map = {}
find_color_in_file(path, color_map)
colors = sorted(color_map.items(), key=operator.itemgetter(0))
colors = sorted(colors, key=lambda m: m[1]['total_count'], reverse=True)
color_map = dict(colors)
for color_stat in color_map.values():
usage = color_stat['usage']
usage = sorted(usage.items(), key=operator.itemgetter(0))
usage = sorted(usage, key=operator.itemgetter(1), reverse=True)
color_stat['usage'] = dict(usage)
print_color_count(color_map)
if __name__ == '__main__':
if len(sys.argv) > 1 and os.path.isfile(sys.argv[1]):
count_color(sys.argv[1])
else:
print("""Usage: %s path""" % sys.argv[0])
| en | 0.352775 | #!/usr/bin/env python3 #-*- coding: UTF-8 -*- #[': # { color : { total_count: total_count, usage: { key: count}}} Usage: %s path | 3.206065 | 3 |
Rent 4.0/__dependency__.py | girisakar365/Project-Rent | 2 | 6625538 | <gh_stars>1-10
from tkinter import *
from tkinter import messagebox
from db import db
bg = db.cache(0,'get')
bg_dict = {
'1': '#fdfddb',
'2': '#cecece',
'3': '#f2d000',
'4':'#ff0000',
'5':'#2bc760',
'6':'#143d8c',
'7':'#8abadb',
'8':'#936cca'
}
apply_bg = bg_dict['{}'.format(bg)]
#\defaultfunction\
def default():
print('No function inserted!')
class Dp:
def text_label(win,text='',bg=apply_bg,fontSize=14,fontStyle='Bookman Old Style',fontType='normal',x=0,y=0):
label = Label(win,text=text,bg=bg,font=(fontStyle,fontSize,fontType))
label.place(x=x,y=y)
return label
def image_label(win,image='',x=0,y=0,bg=apply_bg):
imglabel = Label(win,image=image,bg=bg)
imglabel.image = image
imglabel.place(x=x,y=y)
return imglabel
def text_button(win,text='',bg=apply_bg,fontSize=10,fontStyle='Bookman Old Style',fontType='normal',x=0,y=0,func=default):
button = Button(win,text=text,bg=bg,font=(fontStyle,fontSize,fontType),command=func)
button.place(x=x,y=y)
return button
def image_button(win,image='',x=0,y=0,bg=apply_bg,func=default):
button = Button(win,image=image,bg=bg,borderwidth=0,command=func,activebackground=bg)
button.image = image
button.place(x=x,y=y)
return button
def entry(win,width=20,bd=1.5,x=0,y=0):
entry = Entry(win,width=width,borderwidth=bd)
entry.place(x=x,y=y)
return entry
def image_loader(imagename=''):
from PIL import Image,ImageTk
img = ImageTk.PhotoImage(file=imagename)
return img
class Include:
def digital_clock(win,vartext='Admin',x=0,y=0):
from time import strftime
def clock():
hour = strftime("%I")
minute = strftime("%M")
second = strftime("%S")
am_pm = strftime('%p')
day = strftime('%d-%B-%Y %A')
digidate.config(text=day)
digiclock.config(text=hour+':'+minute+':'+second+' '+am_pm)
digiclock.after(1000,clock)
digiclock = Dp.text_label(win,x=x,y=y)
#Include.tip(win,digiclock,'Time')
welcome_user = Dp.text_label(win,x=2,y=650,text='User: {}'.format(vartext))
digidate = Dp.text_label(win,x=x+700,y=y)
#Include.tip(win,digidate,'Date')
clock()
return digiclock,digidate,welcome_user
def conform(func,title,msg):
get = messagebox.askyesno(title,msg)
if get == True:
func()
return True
else:
pass
def combobox(win,x=0,y=0,z=0,p=0):
s_00 = ['Month',
'January','February','March','April','May','June',
'July','August','September','October','November','December']
s_01 = ['Year']
start = 0
for i in range(33):
i = i+1
start = 2019 + i
s_01.append(start)
#selectmonth
month = ttk.Combobox(win,value=s_00,width=10)
month.current(0)
month.bind('<<ComboboxSelected>>')
month.config(state='readonly')
Tip(month,'Select Month')
month.place(x=x,y=y)
#selectyear
year = ttk.Combobox(win,value=s_01,width=10)
year.current(0)
year.config(state='readonly')
year.bind('<<ComboboxSelected>>')
Tip(year,'Select Year')
year.place(x=z,y=p)
return month,year,s_00,s_01
def eye(win,widget,x=0,y=0,bg=apply_bg):
not_shown = Dp.image_loader('__img__\\nshow.png')
shown = Dp.image_loader('__img__\\show.png')
def unshow():
s0 = Dp.image_button(win,not_shown,x=x,y=y,bg=bg)
s0['command']=lambda:[show()]
s0['activebackground']=bg
widget['show'] = '●'
#Include.tip(win,widget,'Show')
s0.place(x=x,y=y)
def show():
s1 = Dp.image_button(win,shown,x=x,y=y,bg=bg)
s1['command']=lambda:[unshow()]
s1['activebackground']=bg
widget['show'] = ''
replace = widget.get()
widget.delete(0,END)
widget.insert(0,replace)
#Include.tip(win,widget,'Unshow')
s1.place(x=x,y=y)
unshow()
class Tip(object):
def __init__(self, widget, text='widget info'):
self.waittime = 1000 #miliseconds
self.wraplength = 180 #pixels
self.widget = widget
self.text = text
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.leave)
self.widget.bind("<ButtonPress>", self.leave)
self.id = None
self.tw = None
def enter(self, event=None):
self.unschedule()
self.schedule()
def leave(self, event=None):
self.unschedule()
self.hidetip()
def schedule(self):
self.id = self.widget.after(self.waittime, self.showtip)
def unschedule(self):
id = self.id
self.id = None
if id:
self.widget.after_cancel(id)
def showtip(self, event=None):
x = y = 0
x += self.widget.winfo_rootx() + 20
y += self.widget.winfo_rooty() + 25
# creates a toplevel window
self.tw = Toplevel(self.widget)
# Leaves only the label and removes the app window
self.tw.wm_overrideredirect(True)
self.tw.wm_geometry("+%d+%d" % (x, y))
label = Label(self.tw, text=self.text, justify='left',
background="#eeeac4", relief='raised', borderwidth=1,
wraplength = self.wraplength)
label.pack(ipadx=1)
def hidetip(self):
tw = self.tw
self.tw= None
if tw:
tw.destroy() | from tkinter import *
from tkinter import messagebox
from db import db
bg = db.cache(0,'get')
bg_dict = {
'1': '#fdfddb',
'2': '#cecece',
'3': '#f2d000',
'4':'#ff0000',
'5':'#2bc760',
'6':'#143d8c',
'7':'#8abadb',
'8':'#936cca'
}
apply_bg = bg_dict['{}'.format(bg)]
#\defaultfunction\
def default():
print('No function inserted!')
class Dp:
def text_label(win,text='',bg=apply_bg,fontSize=14,fontStyle='Bookman Old Style',fontType='normal',x=0,y=0):
label = Label(win,text=text,bg=bg,font=(fontStyle,fontSize,fontType))
label.place(x=x,y=y)
return label
def image_label(win,image='',x=0,y=0,bg=apply_bg):
imglabel = Label(win,image=image,bg=bg)
imglabel.image = image
imglabel.place(x=x,y=y)
return imglabel
def text_button(win,text='',bg=apply_bg,fontSize=10,fontStyle='Bookman Old Style',fontType='normal',x=0,y=0,func=default):
button = Button(win,text=text,bg=bg,font=(fontStyle,fontSize,fontType),command=func)
button.place(x=x,y=y)
return button
def image_button(win,image='',x=0,y=0,bg=apply_bg,func=default):
button = Button(win,image=image,bg=bg,borderwidth=0,command=func,activebackground=bg)
button.image = image
button.place(x=x,y=y)
return button
def entry(win,width=20,bd=1.5,x=0,y=0):
entry = Entry(win,width=width,borderwidth=bd)
entry.place(x=x,y=y)
return entry
def image_loader(imagename=''):
from PIL import Image,ImageTk
img = ImageTk.PhotoImage(file=imagename)
return img
class Include:
def digital_clock(win,vartext='Admin',x=0,y=0):
from time import strftime
def clock():
hour = strftime("%I")
minute = strftime("%M")
second = strftime("%S")
am_pm = strftime('%p')
day = strftime('%d-%B-%Y %A')
digidate.config(text=day)
digiclock.config(text=hour+':'+minute+':'+second+' '+am_pm)
digiclock.after(1000,clock)
digiclock = Dp.text_label(win,x=x,y=y)
#Include.tip(win,digiclock,'Time')
welcome_user = Dp.text_label(win,x=2,y=650,text='User: {}'.format(vartext))
digidate = Dp.text_label(win,x=x+700,y=y)
#Include.tip(win,digidate,'Date')
clock()
return digiclock,digidate,welcome_user
def conform(func,title,msg):
get = messagebox.askyesno(title,msg)
if get == True:
func()
return True
else:
pass
def combobox(win,x=0,y=0,z=0,p=0):
s_00 = ['Month',
'January','February','March','April','May','June',
'July','August','September','October','November','December']
s_01 = ['Year']
start = 0
for i in range(33):
i = i+1
start = 2019 + i
s_01.append(start)
#selectmonth
month = ttk.Combobox(win,value=s_00,width=10)
month.current(0)
month.bind('<<ComboboxSelected>>')
month.config(state='readonly')
Tip(month,'Select Month')
month.place(x=x,y=y)
#selectyear
year = ttk.Combobox(win,value=s_01,width=10)
year.current(0)
year.config(state='readonly')
year.bind('<<ComboboxSelected>>')
Tip(year,'Select Year')
year.place(x=z,y=p)
return month,year,s_00,s_01
def eye(win,widget,x=0,y=0,bg=apply_bg):
not_shown = Dp.image_loader('__img__\\nshow.png')
shown = Dp.image_loader('__img__\\show.png')
def unshow():
s0 = Dp.image_button(win,not_shown,x=x,y=y,bg=bg)
s0['command']=lambda:[show()]
s0['activebackground']=bg
widget['show'] = '●'
#Include.tip(win,widget,'Show')
s0.place(x=x,y=y)
def show():
s1 = Dp.image_button(win,shown,x=x,y=y,bg=bg)
s1['command']=lambda:[unshow()]
s1['activebackground']=bg
widget['show'] = ''
replace = widget.get()
widget.delete(0,END)
widget.insert(0,replace)
#Include.tip(win,widget,'Unshow')
s1.place(x=x,y=y)
unshow()
class Tip(object):
def __init__(self, widget, text='widget info'):
self.waittime = 1000 #miliseconds
self.wraplength = 180 #pixels
self.widget = widget
self.text = text
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.leave)
self.widget.bind("<ButtonPress>", self.leave)
self.id = None
self.tw = None
def enter(self, event=None):
self.unschedule()
self.schedule()
def leave(self, event=None):
self.unschedule()
self.hidetip()
def schedule(self):
self.id = self.widget.after(self.waittime, self.showtip)
def unschedule(self):
id = self.id
self.id = None
if id:
self.widget.after_cancel(id)
def showtip(self, event=None):
x = y = 0
x += self.widget.winfo_rootx() + 20
y += self.widget.winfo_rooty() + 25
# creates a toplevel window
self.tw = Toplevel(self.widget)
# Leaves only the label and removes the app window
self.tw.wm_overrideredirect(True)
self.tw.wm_geometry("+%d+%d" % (x, y))
label = Label(self.tw, text=self.text, justify='left',
background="#eeeac4", relief='raised', borderwidth=1,
wraplength = self.wraplength)
label.pack(ipadx=1)
def hidetip(self):
tw = self.tw
self.tw= None
if tw:
tw.destroy() | en | 0.557155 | #\defaultfunction\ #Include.tip(win,digiclock,'Time') #Include.tip(win,digidate,'Date') #selectmonth #selectyear #Include.tip(win,widget,'Show') #Include.tip(win,widget,'Unshow') #miliseconds #pixels # creates a toplevel window # Leaves only the label and removes the app window | 3.024162 | 3 |
code/analysis/old/calc_pairwise_KL_divergence.py | tkc-morita/variational_inference_DP_mix_HDP_topic_ngram | 4 | 6625539 | # coding: utf-8
import pandas as pd
import numpy as np
import sys, os.path, itertools
def get_pairwise_KL_divergence(df_post_ngram, context_frequency):
# print np.max(np.abs(df_post_ngram.groupby(['sublex_id','context']).prob.sum() - 1))
df_post_ngram = df_post_ngram.sort_values(['sublex_id','context','value'])
df_post_ngram['log_prob'] = np.log(df_post_ngram.prob)
df_post_ngram['neg_entropy'] = df_post_ngram.prob * df_post_ngram.log_prob
df_results = pd.DataFrame(columns=['context','context_in_data','sublex_A','sublex_B','kl_divergence_AB','kl_divergence_BA'])
sublex_list = sorted(df_post_ngram.sublex_id.drop_duplicates().tolist())
for sublex_A in sublex_list[:-1]:
df_sublex_A = df_post_ngram[df_post_ngram.sublex_id == sublex_A].reset_index(drop=True)
# print 'A'
# print df_sublex_A
neg_entropy_A = df_sublex_A.groupby('context').neg_entropy.sum()
for sublex_B in sublex_list[sublex_A+1:]:
df_sublex_B = df_post_ngram[df_post_ngram.sublex_id == sublex_B].reset_index(drop=True)
# print 'B'
# print df_sublex_B
df_sublex_B['kl_divergence_AB'] = df_sublex_A.neg_entropy - (df_sublex_A.prob * df_sublex_B.log_prob)
df_sublex_B['kl_divergence_BA'] = df_sublex_B.neg_entropy - (df_sublex_B.prob * df_sublex_A.log_prob)
df_results_sub = df_sublex_B.groupby('context')['kl_divergence_AB','kl_divergence_BA'].sum()
df_results_sub['context'] = df_results_sub.index
df_results_sub = df_results_sub.sort_values('context')
df_results_sub['sublex_A'] = sublex_A
df_results_sub['sublex_B'] = sublex_B
df_results_sub['context_frequency_A'] = df_results_sub.context.map(lambda context: context_frequency[context][sublex_A])
df_results_sub['context_frequency_B'] = df_results_sub.context.map(lambda context: context_frequency[context][sublex_B])
df_results_sub['context_frequency_all'] = df_results_sub.context.map(lambda context: np.sum(context_frequency[context]))
df_results_sub['context_in_data'] = df_sublex_A[df_sublex_A.value==0].sort_values('context').context_in_data.tolist()
df_results = df_results.append(df_results_sub, ignore_index=True)
df_results['kl_divergence_avg'] = (df_results.kl_divergence_AB + df_results.kl_divergence_BA) / 2.0
return df_results
def code_data(csv_data_list, symbol2code):
return [map(lambda key: symbol2code[key], string.split(',')) for string in csv_data_list]
def get_context_frequency(df_sublex_assignment, coded_data, start_code, n):
df_sublex_assignment = df_sublex_assignment.loc[:,df_sublex_assignment.columns.str.startswith('sublex_')]
num_sublex = df_sublex_assignment.shape[1]
inventory = list(set(itertools.chain.from_iterable(coded_data)))
inventory.append(len(inventory))
context_frequency = {'_'.join(map(str, context_list)):np.zeros(num_sublex)
for context_list
in itertools.product(inventory, repeat=n-1)
}
mat_assignments = df_sublex_assignment.rename(
columns={
col_name:int(col_name.split('_')[1])
for col_name in df_sublex_assignment.columns.tolist()
}
).ix[:,range(num_sublex)].values
for coded_string,sublex_assignment in zip(coded_data, mat_assignments):
for ngram_window in zip(*[([start_code]*(n-1)+coded_string)[i:] for i in range(n)]):
context = '_'.join(map(str, ngram_window[:-1]))
context_frequency[context] += sublex_assignment
return context_frequency
if __name__ == '__main__':
path = sys.argv[1]
result_dir,filename = os.path.split(path)
n = int(list(filename.split('gram')[0])[-1])
df_post_ngram = pd.read_csv(path)
df_sublex_assignment = pd.read_csv(os.path.join(result_dir, 'SubLexica_assignment.csv'))
df_data = pd.read_csv('../data/BCCWJ_frequencylist_suw_ver1_0_core-nouns.tsv', sep='\t', encoding='utf-8')
df_code = pd.read_csv(os.path.join(result_dir, 'symbol_coding.csv'), encoding='utf-8')
df_code.set_index('symbol', inplace=True)
coded_data = code_data(df_data.IPA_csv.tolist(), df_code.to_dict()['code'])
start_code = df_code.to_dict()['code']['START']
context_frequency = get_context_frequency(df_sublex_assignment, coded_data, start_code, n)
df_kl_div = get_pairwise_KL_divergence(
df_post_ngram,
context_frequency
).sort_values('kl_divergence_avg', ascending = False)
df_kl_div.to_csv(os.path.join(result_dir, 'kl-divergence_bw_sublex.csv'), index=False)
| # coding: utf-8
import pandas as pd
import numpy as np
import sys, os.path, itertools
def get_pairwise_KL_divergence(df_post_ngram, context_frequency):
# print np.max(np.abs(df_post_ngram.groupby(['sublex_id','context']).prob.sum() - 1))
df_post_ngram = df_post_ngram.sort_values(['sublex_id','context','value'])
df_post_ngram['log_prob'] = np.log(df_post_ngram.prob)
df_post_ngram['neg_entropy'] = df_post_ngram.prob * df_post_ngram.log_prob
df_results = pd.DataFrame(columns=['context','context_in_data','sublex_A','sublex_B','kl_divergence_AB','kl_divergence_BA'])
sublex_list = sorted(df_post_ngram.sublex_id.drop_duplicates().tolist())
for sublex_A in sublex_list[:-1]:
df_sublex_A = df_post_ngram[df_post_ngram.sublex_id == sublex_A].reset_index(drop=True)
# print 'A'
# print df_sublex_A
neg_entropy_A = df_sublex_A.groupby('context').neg_entropy.sum()
for sublex_B in sublex_list[sublex_A+1:]:
df_sublex_B = df_post_ngram[df_post_ngram.sublex_id == sublex_B].reset_index(drop=True)
# print 'B'
# print df_sublex_B
df_sublex_B['kl_divergence_AB'] = df_sublex_A.neg_entropy - (df_sublex_A.prob * df_sublex_B.log_prob)
df_sublex_B['kl_divergence_BA'] = df_sublex_B.neg_entropy - (df_sublex_B.prob * df_sublex_A.log_prob)
df_results_sub = df_sublex_B.groupby('context')['kl_divergence_AB','kl_divergence_BA'].sum()
df_results_sub['context'] = df_results_sub.index
df_results_sub = df_results_sub.sort_values('context')
df_results_sub['sublex_A'] = sublex_A
df_results_sub['sublex_B'] = sublex_B
df_results_sub['context_frequency_A'] = df_results_sub.context.map(lambda context: context_frequency[context][sublex_A])
df_results_sub['context_frequency_B'] = df_results_sub.context.map(lambda context: context_frequency[context][sublex_B])
df_results_sub['context_frequency_all'] = df_results_sub.context.map(lambda context: np.sum(context_frequency[context]))
df_results_sub['context_in_data'] = df_sublex_A[df_sublex_A.value==0].sort_values('context').context_in_data.tolist()
df_results = df_results.append(df_results_sub, ignore_index=True)
df_results['kl_divergence_avg'] = (df_results.kl_divergence_AB + df_results.kl_divergence_BA) / 2.0
return df_results
def code_data(csv_data_list, symbol2code):
return [map(lambda key: symbol2code[key], string.split(',')) for string in csv_data_list]
def get_context_frequency(df_sublex_assignment, coded_data, start_code, n):
df_sublex_assignment = df_sublex_assignment.loc[:,df_sublex_assignment.columns.str.startswith('sublex_')]
num_sublex = df_sublex_assignment.shape[1]
inventory = list(set(itertools.chain.from_iterable(coded_data)))
inventory.append(len(inventory))
context_frequency = {'_'.join(map(str, context_list)):np.zeros(num_sublex)
for context_list
in itertools.product(inventory, repeat=n-1)
}
mat_assignments = df_sublex_assignment.rename(
columns={
col_name:int(col_name.split('_')[1])
for col_name in df_sublex_assignment.columns.tolist()
}
).ix[:,range(num_sublex)].values
for coded_string,sublex_assignment in zip(coded_data, mat_assignments):
for ngram_window in zip(*[([start_code]*(n-1)+coded_string)[i:] for i in range(n)]):
context = '_'.join(map(str, ngram_window[:-1]))
context_frequency[context] += sublex_assignment
return context_frequency
if __name__ == '__main__':
path = sys.argv[1]
result_dir,filename = os.path.split(path)
n = int(list(filename.split('gram')[0])[-1])
df_post_ngram = pd.read_csv(path)
df_sublex_assignment = pd.read_csv(os.path.join(result_dir, 'SubLexica_assignment.csv'))
df_data = pd.read_csv('../data/BCCWJ_frequencylist_suw_ver1_0_core-nouns.tsv', sep='\t', encoding='utf-8')
df_code = pd.read_csv(os.path.join(result_dir, 'symbol_coding.csv'), encoding='utf-8')
df_code.set_index('symbol', inplace=True)
coded_data = code_data(df_data.IPA_csv.tolist(), df_code.to_dict()['code'])
start_code = df_code.to_dict()['code']['START']
context_frequency = get_context_frequency(df_sublex_assignment, coded_data, start_code, n)
df_kl_div = get_pairwise_KL_divergence(
df_post_ngram,
context_frequency
).sort_values('kl_divergence_avg', ascending = False)
df_kl_div.to_csv(os.path.join(result_dir, 'kl-divergence_bw_sublex.csv'), index=False)
| en | 0.366787 | # coding: utf-8 # print np.max(np.abs(df_post_ngram.groupby(['sublex_id','context']).prob.sum() - 1)) # print 'A' # print df_sublex_A # print 'B' # print df_sublex_B | 2.535523 | 3 |
utils/src/ave/__init__.py | yiu31802/ave | 17 | 6625540 | <gh_stars>10-100
# Copyright (C) 2013 Sony Mobile Communications AB.
# All rights, including trade secret rights, reserved.
import pkg_resources
import modulefinder
# the ave Python package is implemented in several Debian packages (git trees
# really). this causes problems when importing different ave modules from
# different source paths. consider the following template which is used in many
# unit test jobs for various ave modules:
#
# path = os.path.dirname(os.path.dirname(__file__))
# path = os.path.join(path, 'src')
# sys.path.insert(0, path)
# import runners
# runnsers.all_git()
#
# after "sys.path.insert(0, path)", the interpreter won't be able to find any
# ave modules which are not implemented in the current tree. the following two
# lines work around that by adding the tree-local modules to another name space
# with the same name as the system-installed modules.
pkg_resources.declare_namespace(__name__)
for p in __path__:
modulefinder.AddPackagePath(__name__, p)
# make sure that this __init__.py is NOT INSTALLED TO THE SYSTEM! the "common"
# package owns that file. | # Copyright (C) 2013 Sony Mobile Communications AB.
# All rights, including trade secret rights, reserved.
import pkg_resources
import modulefinder
# the ave Python package is implemented in several Debian packages (git trees
# really). this causes problems when importing different ave modules from
# different source paths. consider the following template which is used in many
# unit test jobs for various ave modules:
#
# path = os.path.dirname(os.path.dirname(__file__))
# path = os.path.join(path, 'src')
# sys.path.insert(0, path)
# import runners
# runnsers.all_git()
#
# after "sys.path.insert(0, path)", the interpreter won't be able to find any
# ave modules which are not implemented in the current tree. the following two
# lines work around that by adding the tree-local modules to another name space
# with the same name as the system-installed modules.
pkg_resources.declare_namespace(__name__)
for p in __path__:
modulefinder.AddPackagePath(__name__, p)
# make sure that this __init__.py is NOT INSTALLED TO THE SYSTEM! the "common"
# package owns that file. | en | 0.839371 | # Copyright (C) 2013 Sony Mobile Communications AB. # All rights, including trade secret rights, reserved. # the ave Python package is implemented in several Debian packages (git trees # really). this causes problems when importing different ave modules from # different source paths. consider the following template which is used in many # unit test jobs for various ave modules: # # path = os.path.dirname(os.path.dirname(__file__)) # path = os.path.join(path, 'src') # sys.path.insert(0, path) # import runners # runnsers.all_git() # # after "sys.path.insert(0, path)", the interpreter won't be able to find any # ave modules which are not implemented in the current tree. the following two # lines work around that by adding the tree-local modules to another name space # with the same name as the system-installed modules. # make sure that this __init__.py is NOT INSTALLED TO THE SYSTEM! the "common" # package owns that file. | 1.939206 | 2 |
Examples/DiskMargin.py | UASLab/OpenFlightAnalysis | 7 | 6625541 | #%%
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches
# Hack to allow loading the Core package
if __name__ == "__main__" and __package__ is None:
from sys import path, argv
from os.path import dirname, abspath, join
path.insert(0, abspath(join(dirname(argv[0]), "..")))
path.insert(0, abspath(join(dirname(argv[0]), "..", 'Core')))
del path, argv, dirname, abspath, join
import FreqTrans
#%%
pCrit = -1+0j
T = np.array([-0.5 - 0.5j])
TUnc = np.array([0.5 + 0.25j])
rCritNom, rCritUnc, rCrit, pCont = FreqTrans.DistCritEllipse(T, TUnc, pCrit = pCrit)
#rCritNomCirc, rCritUncCirc, rCritCirc = FreqTrans.DistCritCirc(T, TUnc, pCrit = pCrit, typeNorm = 'RMS')
rCirc = np.sqrt(0.5) * np.abs(TUnc) # RMS
#rCirc = np.max([TUnc.real, TUnc.imag]) # Max
#rCirc = np.mean([TUnc.real, TUnc.imag]) # Mean
#rCirc = np.abs(TUnc) # RSS
TUncCirc = np.array([rCirc+1j*rCirc])
rCritNomCirc, rCritUncCirc, rCritCirc, pContCirc = FreqTrans.DistCritEllipse(T, TUncCirc, pCrit = pCrit)
#%
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot(T.real, T.imag, 'b*-')
ax.plot([pCrit.real, T.real], [pCrit.imag, T.imag], 'r*:')
ellipse = matplotlib.patches.Ellipse(xy = [T.real, T.imag], width=2*TUnc.real, height=2*TUnc.imag, color='b', alpha = 0.5)
ax.add_patch(ellipse)
ax.plot([pCrit.real, pCont.real], [pCrit.imag, pCont.imag], 'b*--')
circ = matplotlib.patches.Ellipse(xy = [T.real, T.imag], width=2*TUncCirc.real, height=2*TUncCirc.imag, color='g', alpha = 0.5)
ax.add_patch(circ)
ax.plot([pCrit.real, pContCirc.real], [pCrit.imag, pContCirc.imag], 'g*--')
ax.axis('equal')
fig.suptitle(['Nom: ' + str(rCritNom[0]) + ' Ellipse: ' + str(rCrit[0]) + ', Circle: ' + str(rCritCirc[0])])
| #%%
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches
# Hack to allow loading the Core package
if __name__ == "__main__" and __package__ is None:
from sys import path, argv
from os.path import dirname, abspath, join
path.insert(0, abspath(join(dirname(argv[0]), "..")))
path.insert(0, abspath(join(dirname(argv[0]), "..", 'Core')))
del path, argv, dirname, abspath, join
import FreqTrans
#%%
pCrit = -1+0j
T = np.array([-0.5 - 0.5j])
TUnc = np.array([0.5 + 0.25j])
rCritNom, rCritUnc, rCrit, pCont = FreqTrans.DistCritEllipse(T, TUnc, pCrit = pCrit)
#rCritNomCirc, rCritUncCirc, rCritCirc = FreqTrans.DistCritCirc(T, TUnc, pCrit = pCrit, typeNorm = 'RMS')
rCirc = np.sqrt(0.5) * np.abs(TUnc) # RMS
#rCirc = np.max([TUnc.real, TUnc.imag]) # Max
#rCirc = np.mean([TUnc.real, TUnc.imag]) # Mean
#rCirc = np.abs(TUnc) # RSS
TUncCirc = np.array([rCirc+1j*rCirc])
rCritNomCirc, rCritUncCirc, rCritCirc, pContCirc = FreqTrans.DistCritEllipse(T, TUncCirc, pCrit = pCrit)
#%
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot(T.real, T.imag, 'b*-')
ax.plot([pCrit.real, T.real], [pCrit.imag, T.imag], 'r*:')
ellipse = matplotlib.patches.Ellipse(xy = [T.real, T.imag], width=2*TUnc.real, height=2*TUnc.imag, color='b', alpha = 0.5)
ax.add_patch(ellipse)
ax.plot([pCrit.real, pCont.real], [pCrit.imag, pCont.imag], 'b*--')
circ = matplotlib.patches.Ellipse(xy = [T.real, T.imag], width=2*TUncCirc.real, height=2*TUncCirc.imag, color='g', alpha = 0.5)
ax.add_patch(circ)
ax.plot([pCrit.real, pContCirc.real], [pCrit.imag, pContCirc.imag], 'g*--')
ax.axis('equal')
fig.suptitle(['Nom: ' + str(rCritNom[0]) + ' Ellipse: ' + str(rCrit[0]) + ', Circle: ' + str(rCritCirc[0])])
| en | 0.225316 | #%% # Hack to allow loading the Core package #%% #rCritNomCirc, rCritUncCirc, rCritCirc = FreqTrans.DistCritCirc(T, TUnc, pCrit = pCrit, typeNorm = 'RMS') # RMS #rCirc = np.max([TUnc.real, TUnc.imag]) # Max #rCirc = np.mean([TUnc.real, TUnc.imag]) # Mean #rCirc = np.abs(TUnc) # RSS #% | 2.012405 | 2 |
corehq/apps/export/migrations/0005_datafile_blobmeta.py | dannyroberts/commcare-hq | 0 | 6625542 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-08-03 17:32
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
from corehq.blobs import CODES
from corehq.sql_db.util import get_db_alias_for_partitioned_doc
def move_datafile_to_blobmeta(apps, schema_editor):
DataFile = apps.get_model('export', 'DataFile')
BlobMeta = apps.get_model('blobs', 'BlobMeta')
# At time of writing there are only 91 DataFile rows on prod, 1 on icds-new
# this may need to be changed if envs exist having many many more
#
# '_default' is the bucket name from the old blob db API.
for datafile in DataFile.objects.all():
db = get_db_alias_for_partitioned_doc(datafile.domain)
BlobMeta(
domain=datafile.domain,
parent_id=datafile.domain,
type_code=CODES.data_file,
key="_default/" + datafile.blob_id,
properties={"description": datafile.description},
content_type=datafile.content_type,
content_length=datafile.content_length,
expires_on=datafile.delete_after,
).save(using=db)
class Migration(migrations.Migration):
dependencies = [
('export', '0004_datafile_delete_after'),
]
operations = [
migrations.RunPython(move_datafile_to_blobmeta),
migrations.DeleteModel(name='DataFile'),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-08-03 17:32
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
from corehq.blobs import CODES
from corehq.sql_db.util import get_db_alias_for_partitioned_doc
def move_datafile_to_blobmeta(apps, schema_editor):
DataFile = apps.get_model('export', 'DataFile')
BlobMeta = apps.get_model('blobs', 'BlobMeta')
# At time of writing there are only 91 DataFile rows on prod, 1 on icds-new
# this may need to be changed if envs exist having many many more
#
# '_default' is the bucket name from the old blob db API.
for datafile in DataFile.objects.all():
db = get_db_alias_for_partitioned_doc(datafile.domain)
BlobMeta(
domain=datafile.domain,
parent_id=datafile.domain,
type_code=CODES.data_file,
key="_default/" + datafile.blob_id,
properties={"description": datafile.description},
content_type=datafile.content_type,
content_length=datafile.content_length,
expires_on=datafile.delete_after,
).save(using=db)
class Migration(migrations.Migration):
dependencies = [
('export', '0004_datafile_delete_after'),
]
operations = [
migrations.RunPython(move_datafile_to_blobmeta),
migrations.DeleteModel(name='DataFile'),
]
| en | 0.887535 | # -*- coding: utf-8 -*- # Generated by Django 1.11.14 on 2018-08-03 17:32 # At time of writing there are only 91 DataFile rows on prod, 1 on icds-new # this may need to be changed if envs exist having many many more # # '_default' is the bucket name from the old blob db API. | 1.922354 | 2 |
train.py | ruthcrasto/meta-optim-public | 38 | 6625543 | <reponame>ruthcrasto/meta-optim-public
# =============================================================================
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
"""Training utilities.
Author: <NAME> (<EMAIL>)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import os
import six
import tensorflow as tf
from collections import namedtuple
from tqdm import tqdm
from get_dataset import get_dataset
from logger import get as get_logger
from models import get_mnist_mlp_config, get_mnist_mlp_model
log = get_logger()
# Training curves.
Results = namedtuple('Results', [
'step', 'train_xent', 'train_acc', 'test_xent', 'test_acc', 'lr', 'decay'
])
def save_results(fname, results):
"""Saves training results."""
if not os.path.exists(os.path.dirname(fname)):
os.makedirs(os.path.dirname(fname))
np.save(fname, np.array(results._asdict(), dtype=object))
def train_steps(sess,
m,
data_list,
init_lr=0.1,
decay_const=0.0,
time_const=5000.0):
"""Train an MLP for MNIST for certain amount of steps.
Args:
sess: TensorFlow session object.
m: Model object.
data_list: List of tuples of x and y's.
init_lr: Float. Initial learning rate.
decay: Float. Decay constant.
Returns:
cost: Final cost by the end of the training.
"""
for ii, (xd, yd) in enumerate(data_list):
if decay_const > 0.0:
lr_ = init_lr / ((1.0 + ii / time_const)**decay_const)
m.optimizer.assign_hyperparam(sess, 'lr', lr_)
if lr_ > 1e-6:
cost_, _ = sess.run(
[m.cost, m.train_op], feed_dict={
m.x: xd,
m.y: yd
})
final_cost = 0.0
for ii, (xd, yd) in enumerate(data_list[:600]):
final_cost += sess.run(m.cost, feed_dict={m.x: xd, m.y: yd}) / 600.0
return cost_, final_cost
def train_mnist_mlp_with_test(init_lr=0.1,
momentum=0.9,
num_steps=50000,
middle_decay=False,
inverse_decay=False,
decay_const=0.0,
time_const=5000.0,
steps_per_eval=100,
batch_size=100,
pretrain_ckpt=None,
save_ckpt=None,
print_step=False,
data_list=None,
data_list_eval=None,
data_list_test=None):
"""Train an MLP for MNIST.
Args:
init_lr:
momentum:
num_steps:
middle_decay:
pretrain_ckpt:
Returns:
results: Results tuple object.
"""
if data_list is None:
dataset = get_dataset('mnist')
if data_list_eval is None:
dataset_train = get_dataset('mnist')
if data_list_test is None:
dataset_test = get_dataset('mnist', test=True)
x = tf.placeholder(tf.float32, [None, 28, 28, 1], name="x")
y = tf.placeholder(tf.int64, [None], name="y")
config = get_mnist_mlp_config(init_lr, momentum)
with tf.name_scope('Train'):
with tf.variable_scope('Model'):
m = get_mnist_mlp_model(config, x, y, training=True)
with tf.name_scope('Test'):
with tf.variable_scope('Model', reuse=True):
mtest = get_mnist_mlp_model(config, x, y, training=False)
final_lr = 1e-4
midpoint = num_steps // 2
if True:
num_train = 60000
num_test = 10000
lr_ = init_lr
bsize = batch_size
steps_per_epoch = num_train // bsize
steps_test_per_epoch = num_test // bsize
tau = (num_steps - midpoint) / np.log(init_lr / final_lr)
train_xent_list = []
train_cost_list = []
train_acc_list = []
test_xent_list = []
test_cost_list = []
test_acc_list = []
lr_list = []
step_list = []
var_to_restore = list(
filter(lambda x: 'momentum' not in x.name.lower(),
tf.global_variables()))
var_to_restore = list(
filter(lambda x: 'global_step' not in x.name.lower(), var_to_restore))
var_to_restore = list(
filter(lambda x: 'lr' not in x.name.lower(), var_to_restore))
var_to_restore = list(
filter(lambda x: 'mom' not in x.name.lower(), var_to_restore))
var_to_restore = list(
filter(lambda x: 'decay' not in x.name.lower(), var_to_restore))
var_to_init = list(
filter(lambda x: x not in var_to_restore, tf.global_variables()))
restorer = tf.train.Saver(var_to_restore)
if inverse_decay:
log.info(
'Applying inverse decay with time constant = {:.3e} and decay constant = {:.3e}'.
format(time_const, decay_const))
if middle_decay:
log.info(
'Applying decay at midpoint with final learning rate = {:.3e}'.
format(final_lr))
assert not (
inverse_decay and middle_decay
), 'Inverse decay and middle decay cannot be applied at the same time.'
with tf.Session() as sess:
if pretrain_ckpt is None:
sess.run(tf.global_variables_initializer())
else:
sess.run(tf.variables_initializer(var_to_init))
restorer.restore(sess, pretrain_ckpt)
# Assign initial learning rate.
m.optimizer.assign_hyperparam(sess, 'lr', lr_)
train_iter = six.moves.xrange(num_steps)
if not print_step:
train_iter = tqdm(train_iter, ncols=0)
for ii in train_iter:
if data_list is None:
xd, yd = dataset.next_batch(bsize)
else:
xd, yd = data_list[ii]
if lr_ > 1e-6:
cost_, _ = sess.run(
[m.cost, m.train_op], feed_dict={
x: xd,
y: yd
})
test_acc = 0.0
test_xent = 0.0
train_acc = 0.0
train_xent = 0.0
epoch = ii // steps_per_epoch
if inverse_decay:
lr_ = init_lr / ((1.0 + ii / time_const)**decay_const)
if middle_decay and ii > midpoint:
lr_ = np.exp(-(ii - midpoint) / tau) * init_lr
m.optimizer.assign_hyperparam(sess, 'lr', lr_)
# Evaluate every certain number of steps.
if ii == 0 or (ii + 1) % steps_per_eval == 0:
for jj in six.moves.xrange(steps_per_epoch):
if data_list_eval is None:
xd, yd = dataset_train.next_batch(bsize)
else:
xd, yd = data_list_eval[jj]
xent_, acc_ = sess.run(
[m.cost, m.acc], feed_dict={
x: xd,
y: yd
})
train_xent += xent_ / float(steps_per_epoch)
train_acc += acc_ / float(steps_per_epoch)
step_list.append(ii + 1)
train_xent_list.append(train_xent)
train_acc_list.append(train_acc)
if data_list_eval is None:
dataset_train.reset()
for jj in six.moves.xrange(steps_test_per_epoch):
if data_list_test is None:
xd, yd = dataset_test.next_batch(bsize)
else:
xd, yd = data_list_test[jj]
xent_, acc_ = sess.run(
[mtest.cost, mtest.acc], feed_dict={
x: xd,
y: yd
})
test_xent += xent_ / float(steps_test_per_epoch)
test_acc += acc_ / float(steps_test_per_epoch)
test_xent_list.append(test_xent)
test_acc_list.append(test_acc)
if data_list_test is None:
dataset_test.reset()
lr_list.append(lr_)
if print_step:
log.info((
'Steps {:d} T Xent {:.3e} T Acc {:.3f} V Xent {:.3e} V Acc {:.3f} '
'LR {:.3e}').format(ii + 1, train_xent,
train_acc * 100.0, test_xent,
test_acc * 100.0, lr_))
if save_ckpt is not None:
saver = tf.train.Saver()
saver.save(sess, save_ckpt)
return Results(
step=np.array(step_list),
train_xent=np.array(train_xent_list),
train_acc=np.array(train_acc_list),
test_xent=np.array(test_xent_list),
test_acc=np.array(test_acc_list),
lr=np.array(lr_list),
decay=decay_const)
def meta_step(sess, model, data_list, look_ahead_ops, hp_grads_op,
hp_grads_plh, meta_train_op, eval_data_list):
"""Run a meta step.
Args:
model: Model
data_list: List of tuples of inputs and labels.
look_ahead_ops: TensorFlow ops that accumulates hyperparameter gradients.
hp_grads_op: TensorFlow ops to calculate the final hyperparameter gradients.
hp_grads_plh: Placeholders for hyperparameter gradients.
meta_train_op: TensorFlow ops that updates the hyperparameters.
Returns:
cost: Loss of the network at the end of the look ahead steps.
hp: A dictionary maps from hyperparameter names to their current values.
"""
assert len(data_list) > 1, 'We need to look ahead more than 1 step.'
# Run till the second last item.
for ii, (xd, yd) in enumerate(data_list):
fdict = {model.x: xd, model.y: yd}
sess.run(look_ahead_ops, feed_dict=fdict)
sess.run(model.train_op, feed_dict=fdict)
cost = 0.0
grads = [0.0] * len(hp_grads_plh)
neval = len(eval_data_list)
for ii, (xd, yd) in enumerate(eval_data_list):
fdict = {model.x: xd, model.y: yd}
results = sess.run([model.cost] + hp_grads_op, feed_dict=fdict)
cost += results[0] / float(neval)
for jj, rr in enumerate(results[1:]):
grads[jj] += rr / float(neval)
hp_fict = dict(zip(hp_grads_plh, grads))
sess.run(meta_train_op, feed_dict=hp_fict)
hp = sess.run(model.optimizer.hyperparams)
return cost, hp
| # =============================================================================
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
"""Training utilities.
Author: <NAME> (<EMAIL>)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import os
import six
import tensorflow as tf
from collections import namedtuple
from tqdm import tqdm
from get_dataset import get_dataset
from logger import get as get_logger
from models import get_mnist_mlp_config, get_mnist_mlp_model
log = get_logger()
# Training curves.
Results = namedtuple('Results', [
'step', 'train_xent', 'train_acc', 'test_xent', 'test_acc', 'lr', 'decay'
])
def save_results(fname, results):
"""Saves training results."""
if not os.path.exists(os.path.dirname(fname)):
os.makedirs(os.path.dirname(fname))
np.save(fname, np.array(results._asdict(), dtype=object))
def train_steps(sess,
m,
data_list,
init_lr=0.1,
decay_const=0.0,
time_const=5000.0):
"""Train an MLP for MNIST for certain amount of steps.
Args:
sess: TensorFlow session object.
m: Model object.
data_list: List of tuples of x and y's.
init_lr: Float. Initial learning rate.
decay: Float. Decay constant.
Returns:
cost: Final cost by the end of the training.
"""
for ii, (xd, yd) in enumerate(data_list):
if decay_const > 0.0:
lr_ = init_lr / ((1.0 + ii / time_const)**decay_const)
m.optimizer.assign_hyperparam(sess, 'lr', lr_)
if lr_ > 1e-6:
cost_, _ = sess.run(
[m.cost, m.train_op], feed_dict={
m.x: xd,
m.y: yd
})
final_cost = 0.0
for ii, (xd, yd) in enumerate(data_list[:600]):
final_cost += sess.run(m.cost, feed_dict={m.x: xd, m.y: yd}) / 600.0
return cost_, final_cost
def train_mnist_mlp_with_test(init_lr=0.1,
momentum=0.9,
num_steps=50000,
middle_decay=False,
inverse_decay=False,
decay_const=0.0,
time_const=5000.0,
steps_per_eval=100,
batch_size=100,
pretrain_ckpt=None,
save_ckpt=None,
print_step=False,
data_list=None,
data_list_eval=None,
data_list_test=None):
"""Train an MLP for MNIST.
Args:
init_lr:
momentum:
num_steps:
middle_decay:
pretrain_ckpt:
Returns:
results: Results tuple object.
"""
if data_list is None:
dataset = get_dataset('mnist')
if data_list_eval is None:
dataset_train = get_dataset('mnist')
if data_list_test is None:
dataset_test = get_dataset('mnist', test=True)
x = tf.placeholder(tf.float32, [None, 28, 28, 1], name="x")
y = tf.placeholder(tf.int64, [None], name="y")
config = get_mnist_mlp_config(init_lr, momentum)
with tf.name_scope('Train'):
with tf.variable_scope('Model'):
m = get_mnist_mlp_model(config, x, y, training=True)
with tf.name_scope('Test'):
with tf.variable_scope('Model', reuse=True):
mtest = get_mnist_mlp_model(config, x, y, training=False)
final_lr = 1e-4
midpoint = num_steps // 2
if True:
num_train = 60000
num_test = 10000
lr_ = init_lr
bsize = batch_size
steps_per_epoch = num_train // bsize
steps_test_per_epoch = num_test // bsize
tau = (num_steps - midpoint) / np.log(init_lr / final_lr)
train_xent_list = []
train_cost_list = []
train_acc_list = []
test_xent_list = []
test_cost_list = []
test_acc_list = []
lr_list = []
step_list = []
var_to_restore = list(
filter(lambda x: 'momentum' not in x.name.lower(),
tf.global_variables()))
var_to_restore = list(
filter(lambda x: 'global_step' not in x.name.lower(), var_to_restore))
var_to_restore = list(
filter(lambda x: 'lr' not in x.name.lower(), var_to_restore))
var_to_restore = list(
filter(lambda x: 'mom' not in x.name.lower(), var_to_restore))
var_to_restore = list(
filter(lambda x: 'decay' not in x.name.lower(), var_to_restore))
var_to_init = list(
filter(lambda x: x not in var_to_restore, tf.global_variables()))
restorer = tf.train.Saver(var_to_restore)
if inverse_decay:
log.info(
'Applying inverse decay with time constant = {:.3e} and decay constant = {:.3e}'.
format(time_const, decay_const))
if middle_decay:
log.info(
'Applying decay at midpoint with final learning rate = {:.3e}'.
format(final_lr))
assert not (
inverse_decay and middle_decay
), 'Inverse decay and middle decay cannot be applied at the same time.'
with tf.Session() as sess:
if pretrain_ckpt is None:
sess.run(tf.global_variables_initializer())
else:
sess.run(tf.variables_initializer(var_to_init))
restorer.restore(sess, pretrain_ckpt)
# Assign initial learning rate.
m.optimizer.assign_hyperparam(sess, 'lr', lr_)
train_iter = six.moves.xrange(num_steps)
if not print_step:
train_iter = tqdm(train_iter, ncols=0)
for ii in train_iter:
if data_list is None:
xd, yd = dataset.next_batch(bsize)
else:
xd, yd = data_list[ii]
if lr_ > 1e-6:
cost_, _ = sess.run(
[m.cost, m.train_op], feed_dict={
x: xd,
y: yd
})
test_acc = 0.0
test_xent = 0.0
train_acc = 0.0
train_xent = 0.0
epoch = ii // steps_per_epoch
if inverse_decay:
lr_ = init_lr / ((1.0 + ii / time_const)**decay_const)
if middle_decay and ii > midpoint:
lr_ = np.exp(-(ii - midpoint) / tau) * init_lr
m.optimizer.assign_hyperparam(sess, 'lr', lr_)
# Evaluate every certain number of steps.
if ii == 0 or (ii + 1) % steps_per_eval == 0:
for jj in six.moves.xrange(steps_per_epoch):
if data_list_eval is None:
xd, yd = dataset_train.next_batch(bsize)
else:
xd, yd = data_list_eval[jj]
xent_, acc_ = sess.run(
[m.cost, m.acc], feed_dict={
x: xd,
y: yd
})
train_xent += xent_ / float(steps_per_epoch)
train_acc += acc_ / float(steps_per_epoch)
step_list.append(ii + 1)
train_xent_list.append(train_xent)
train_acc_list.append(train_acc)
if data_list_eval is None:
dataset_train.reset()
for jj in six.moves.xrange(steps_test_per_epoch):
if data_list_test is None:
xd, yd = dataset_test.next_batch(bsize)
else:
xd, yd = data_list_test[jj]
xent_, acc_ = sess.run(
[mtest.cost, mtest.acc], feed_dict={
x: xd,
y: yd
})
test_xent += xent_ / float(steps_test_per_epoch)
test_acc += acc_ / float(steps_test_per_epoch)
test_xent_list.append(test_xent)
test_acc_list.append(test_acc)
if data_list_test is None:
dataset_test.reset()
lr_list.append(lr_)
if print_step:
log.info((
'Steps {:d} T Xent {:.3e} T Acc {:.3f} V Xent {:.3e} V Acc {:.3f} '
'LR {:.3e}').format(ii + 1, train_xent,
train_acc * 100.0, test_xent,
test_acc * 100.0, lr_))
if save_ckpt is not None:
saver = tf.train.Saver()
saver.save(sess, save_ckpt)
return Results(
step=np.array(step_list),
train_xent=np.array(train_xent_list),
train_acc=np.array(train_acc_list),
test_xent=np.array(test_xent_list),
test_acc=np.array(test_acc_list),
lr=np.array(lr_list),
decay=decay_const)
def meta_step(sess, model, data_list, look_ahead_ops, hp_grads_op,
hp_grads_plh, meta_train_op, eval_data_list):
"""Run a meta step.
Args:
model: Model
data_list: List of tuples of inputs and labels.
look_ahead_ops: TensorFlow ops that accumulates hyperparameter gradients.
hp_grads_op: TensorFlow ops to calculate the final hyperparameter gradients.
hp_grads_plh: Placeholders for hyperparameter gradients.
meta_train_op: TensorFlow ops that updates the hyperparameters.
Returns:
cost: Loss of the network at the end of the look ahead steps.
hp: A dictionary maps from hyperparameter names to their current values.
"""
assert len(data_list) > 1, 'We need to look ahead more than 1 step.'
# Run till the second last item.
for ii, (xd, yd) in enumerate(data_list):
fdict = {model.x: xd, model.y: yd}
sess.run(look_ahead_ops, feed_dict=fdict)
sess.run(model.train_op, feed_dict=fdict)
cost = 0.0
grads = [0.0] * len(hp_grads_plh)
neval = len(eval_data_list)
for ii, (xd, yd) in enumerate(eval_data_list):
fdict = {model.x: xd, model.y: yd}
results = sess.run([model.cost] + hp_grads_op, feed_dict=fdict)
cost += results[0] / float(neval)
for jj, rr in enumerate(results[1:]):
grads[jj] += rr / float(neval)
hp_fict = dict(zip(hp_grads_plh, grads))
sess.run(meta_train_op, feed_dict=hp_fict)
hp = sess.run(model.optimizer.hyperparams)
return cost, hp | en | 0.721122 | # ============================================================================= # Copyright (c) 2018 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ============================================================================= Training utilities. Author: <NAME> (<EMAIL>) # Training curves. Saves training results. Train an MLP for MNIST for certain amount of steps. Args: sess: TensorFlow session object. m: Model object. data_list: List of tuples of x and y's. init_lr: Float. Initial learning rate. decay: Float. Decay constant. Returns: cost: Final cost by the end of the training. Train an MLP for MNIST. Args: init_lr: momentum: num_steps: middle_decay: pretrain_ckpt: Returns: results: Results tuple object. # Assign initial learning rate. # Evaluate every certain number of steps. Run a meta step. Args: model: Model data_list: List of tuples of inputs and labels. look_ahead_ops: TensorFlow ops that accumulates hyperparameter gradients. hp_grads_op: TensorFlow ops to calculate the final hyperparameter gradients. hp_grads_plh: Placeholders for hyperparameter gradients. meta_train_op: TensorFlow ops that updates the hyperparameters. Returns: cost: Loss of the network at the end of the look ahead steps. hp: A dictionary maps from hyperparameter names to their current values. # Run till the second last item. | 1.462948 | 1 |
enigmatoolbox/vtk_interface/io_support/freesurfer_support.py | saratheriver/ENIGMA | 0 | 6625544 | <gh_stars>0
"""VTK read/write filters for FreeSurfer geometry files."""
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
import re
import numpy as np
from vtk import vtkPolyData
from vtk.util.vtkAlgorithm import VTKPythonAlgorithmBase
from ..checks import has_only_triangle
from ..decorators import wrap_input
from ...mesh.mesh_creation import build_polydata
TRIANGLE_MAGIC = 16777214
QUAD_MAGIC = 16777215
NEW_QUAD_MAGIC = 16777213
def _fread3(fobj):
"""Read a 3-byte int from an open binary file object
Parameters
----------
fobj : file
File descriptor
Returns
-------
n : int
A 3 byte int
"""
b1, b2, b3 = np.fromfile(fobj, ">u1", 3)
return (b1 << 16) + (b2 << 8) + b3
def _fread3_many(fobj, n):
"""Read 3-byte ints from an open binary file object.
Parameters
----------
fobj : file
File descriptor
Returns
-------
out : 1D array
An array of 3 byte int
"""
b1, b2, b3 = np.fromfile(fobj, ">u1", 3 * n).reshape(-1, 3).astype(np.int).T
return (b1 << 16) + (b2 << 8) + b3
def _read_geometry_fs(ipth, is_ascii=False):
"""Adapted from nibabel. Add ascii support."""
if is_ascii:
with open(ipth) as fh:
re_header = re.compile('^#!ascii version (.*)$')
fname_header = re_header.match(fh.readline()).group(1)
re_npoints_cells = re.compile('[\s]*(\d+)[\s]*(\d+)[\s]*$')
re_n = re_npoints_cells.match(fh.readline())
n_points, n_cells = int(re_n.group(1)), int(re_n.group(2))
x_points = np.zeros((n_points, 3))
for i in range(n_points):
x_points[i, :] = [float(v) for v in fh.readline().split()[:3]]
x_cells = np.zeros((n_cells, 3), dtype=np.uintp)
for i in range(n_cells):
x_cells[i] = [np.uintp(v) for v in fh.readline().split()[:3]]
else:
with open(ipth, 'rb') as fh:
magic = _fread3(fh)
if magic not in [TRIANGLE_MAGIC, QUAD_MAGIC, NEW_QUAD_MAGIC]:
raise IOError('File does not appear to be a '
'FreeSurfer surface.')
if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC): # Quad file
n_points, n_quad = _fread3(fh), _fread3(fh)
(fmt, div) = ('>i2', 100) if magic == QUAD_MAGIC else ('>f4', 1)
x_points = np.fromfile(fh, fmt, n_points * 3).astype(np.float64)
x_points /= div
x_points = x_points.reshape(-1, 3)
quads = _fread3_many(fh, n_quad * 4)
quads = quads.reshape(n_quad, 4)
n_cells = 2 * n_quad
x_cells = np.zeros((n_cells, 3), dtype=np.uintp)
# Face splitting follows (Remove loop in nib) -> Not tested!
m0 = (quads[:, 0] % 2) == 0
m0d = np.repeat(m0, 2)
x_cells[m0d].flat[:] = quads[m0][:, [0, 1, 3, 2, 3, 1]]
x_cells[~m0d].flat[:] = quads[~m0][:, [0, 1, 2, 0, 2, 3]]
elif magic == TRIANGLE_MAGIC: # Triangle file
# create_stamp = fh.readline().rstrip(b'\n').decode('utf-8')
fh.readline()
fh.readline()
n_points, n_cells = np.fromfile(fh, '>i4', 2)
x_points = np.fromfile(fh, '>f4', n_points * 3)
x_points = x_points.reshape(n_points, 3).astype(np.float64)
x_cells = np.zeros((n_cells, 3), dtype=np.uintp)
x_cells.flat[:] = np.fromfile(fh, '>i4', n_cells * 3)
return build_polydata(x_points, cells=x_cells).VTKObject
@wrap_input(0)
def _write_geometry_fs(pd, opth, fname_header=None, is_ascii=False):
"""Adapted from nibabel. Add ascii support."""
if not has_only_triangle(pd):
raise ValueError('FreeSurfer writer only accepts triangles.')
n_points, n_cells = pd.GetNumberOfPoints(), pd.GetNumberOfCells()
x_points = np.zeros((n_points, 4), dtype=np.float32)
x_points[:, :3] = pd.GetPoints()
x_cells = np.zeros((n_cells, 4), dtype=np.uintp)
x_cells[:, :3] = pd.GetPolygons().reshape(-1, 4)[:, 1:]
if is_ascii:
header = '#!ascii version of {fname}\n'.\
format(fname='...' if fname_header is None else fname_header)
npoints_cells = '{npoints} {ncells}\n'.\
format(npoints=n_points, ncells=n_cells)
with open(opth, 'w') as fh:
fh.write(header)
fh.write(npoints_cells)
np.savetxt(fh, x_points, fmt=['%.6f', '%.6f', '%.6f', '%d'],
delimiter=' ')
np.savetxt(fh, x_cells, fmt='%d', delimiter=' ')
else:
magic_bytes = np.array([255, 255, 254], dtype=np.uint8)
create_stamp = 'created by {0}'.\
format('...' if fname_header is None else fname_header)
with open(opth, 'wb') as fobj:
magic_bytes.tofile(fobj)
fobj.write('{0}%s\n\n'.format(create_stamp).encode('utf-8'))
np.array([n_points, n_cells], dtype='>i4').tofile(fobj)
# Coerce types, just to be safe
x_points[:, :3].astype('>f4').reshape(-1).tofile(fobj)
x_cells[:, :3].astype('>i4').reshape(-1).tofile(fobj)
###############################################################################
# VTK Reader and Writer for FreeSurfer surfaces
###############################################################################
class vtkFSReader(VTKPythonAlgorithmBase):
"""VTK-like FreeSurfer surface geometry reader.
Supports both binary and ASCII files. Default is binary.
"""
def __init__(self):
super().__init__(nInputPorts=0, nOutputPorts=1,
outputType='vtkPolyData')
self.__FileName = ''
self.__is_ascii = False
def RequestData(self, request, inInfo, outInfo):
opt = vtkPolyData.GetData(outInfo, 0)
if self.__is_ascii or self.__FileName.split('.')[-1] == 'asc':
s = _read_geometry_fs(self.__FileName, is_ascii=True)
else:
s = _read_geometry_fs(self.__FileName, is_ascii=False)
opt.ShallowCopy(s)
return 1
def SetFileTypeToBinary(self):
if self.__is_ascii:
self.__is_ascii = False
self.Modified()
def SetFileTypeToASCII(self):
if not self.__is_ascii:
self.__is_ascii = True
self.Modified()
def SetFileName(self, fname):
if fname != self.__FileName:
self.__FileName = fname
self.Modified()
def GetFileName(self):
return self.__FileName
def GetOutput(self, p_int=0):
return self.GetOutputDataObject(p_int)
class vtkFSWriter(VTKPythonAlgorithmBase):
"""VTK-like FreeSurfer surface geometry writer.
Only writes surface geometry/topology (points and cells).
Supports both binary and ASCII files. Default is binary.
"""
def __init__(self):
super().__init__(nInputPorts=1, inputType='vtkPolyData', nOutputPorts=0)
self.__FileName = ''
self.__is_ascii = False
def RequestData(self, request, inInfo, outInfo):
_write_geometry_fs(vtkPolyData.GetData(inInfo[0], 0),
self.__FileName, fname_header=None,
is_ascii=self.__is_ascii)
return 1
def SetFileName(self, fname):
if fname != self.__FileName:
self.__FileName = fname
self.Modified()
def GetFileName(self):
return self.__FileName
def SetFileTypeToBinary(self):
if self.__is_ascii:
self.__is_ascii = False
self.Modified()
def SetFileTypeToASCII(self):
if not self.__is_ascii:
self.__is_ascii = True
self.Modified()
def Write(self):
self.Update()
def SetInputData(self, *args):
# Signature is SetInputData(self, port, vtkDataObject) or simply
# SetInputData(self, vtkDataObject)
# A way to manage overloading in C++, because port is optional
self.SetInputDataObject(*args)
| """VTK read/write filters for FreeSurfer geometry files."""
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
import re
import numpy as np
from vtk import vtkPolyData
from vtk.util.vtkAlgorithm import VTKPythonAlgorithmBase
from ..checks import has_only_triangle
from ..decorators import wrap_input
from ...mesh.mesh_creation import build_polydata
TRIANGLE_MAGIC = 16777214
QUAD_MAGIC = 16777215
NEW_QUAD_MAGIC = 16777213
def _fread3(fobj):
"""Read a 3-byte int from an open binary file object
Parameters
----------
fobj : file
File descriptor
Returns
-------
n : int
A 3 byte int
"""
b1, b2, b3 = np.fromfile(fobj, ">u1", 3)
return (b1 << 16) + (b2 << 8) + b3
def _fread3_many(fobj, n):
"""Read 3-byte ints from an open binary file object.
Parameters
----------
fobj : file
File descriptor
Returns
-------
out : 1D array
An array of 3 byte int
"""
b1, b2, b3 = np.fromfile(fobj, ">u1", 3 * n).reshape(-1, 3).astype(np.int).T
return (b1 << 16) + (b2 << 8) + b3
def _read_geometry_fs(ipth, is_ascii=False):
"""Adapted from nibabel. Add ascii support."""
if is_ascii:
with open(ipth) as fh:
re_header = re.compile('^#!ascii version (.*)$')
fname_header = re_header.match(fh.readline()).group(1)
re_npoints_cells = re.compile('[\s]*(\d+)[\s]*(\d+)[\s]*$')
re_n = re_npoints_cells.match(fh.readline())
n_points, n_cells = int(re_n.group(1)), int(re_n.group(2))
x_points = np.zeros((n_points, 3))
for i in range(n_points):
x_points[i, :] = [float(v) for v in fh.readline().split()[:3]]
x_cells = np.zeros((n_cells, 3), dtype=np.uintp)
for i in range(n_cells):
x_cells[i] = [np.uintp(v) for v in fh.readline().split()[:3]]
else:
with open(ipth, 'rb') as fh:
magic = _fread3(fh)
if magic not in [TRIANGLE_MAGIC, QUAD_MAGIC, NEW_QUAD_MAGIC]:
raise IOError('File does not appear to be a '
'FreeSurfer surface.')
if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC): # Quad file
n_points, n_quad = _fread3(fh), _fread3(fh)
(fmt, div) = ('>i2', 100) if magic == QUAD_MAGIC else ('>f4', 1)
x_points = np.fromfile(fh, fmt, n_points * 3).astype(np.float64)
x_points /= div
x_points = x_points.reshape(-1, 3)
quads = _fread3_many(fh, n_quad * 4)
quads = quads.reshape(n_quad, 4)
n_cells = 2 * n_quad
x_cells = np.zeros((n_cells, 3), dtype=np.uintp)
# Face splitting follows (Remove loop in nib) -> Not tested!
m0 = (quads[:, 0] % 2) == 0
m0d = np.repeat(m0, 2)
x_cells[m0d].flat[:] = quads[m0][:, [0, 1, 3, 2, 3, 1]]
x_cells[~m0d].flat[:] = quads[~m0][:, [0, 1, 2, 0, 2, 3]]
elif magic == TRIANGLE_MAGIC: # Triangle file
# create_stamp = fh.readline().rstrip(b'\n').decode('utf-8')
fh.readline()
fh.readline()
n_points, n_cells = np.fromfile(fh, '>i4', 2)
x_points = np.fromfile(fh, '>f4', n_points * 3)
x_points = x_points.reshape(n_points, 3).astype(np.float64)
x_cells = np.zeros((n_cells, 3), dtype=np.uintp)
x_cells.flat[:] = np.fromfile(fh, '>i4', n_cells * 3)
return build_polydata(x_points, cells=x_cells).VTKObject
@wrap_input(0)
def _write_geometry_fs(pd, opth, fname_header=None, is_ascii=False):
"""Adapted from nibabel. Add ascii support."""
if not has_only_triangle(pd):
raise ValueError('FreeSurfer writer only accepts triangles.')
n_points, n_cells = pd.GetNumberOfPoints(), pd.GetNumberOfCells()
x_points = np.zeros((n_points, 4), dtype=np.float32)
x_points[:, :3] = pd.GetPoints()
x_cells = np.zeros((n_cells, 4), dtype=np.uintp)
x_cells[:, :3] = pd.GetPolygons().reshape(-1, 4)[:, 1:]
if is_ascii:
header = '#!ascii version of {fname}\n'.\
format(fname='...' if fname_header is None else fname_header)
npoints_cells = '{npoints} {ncells}\n'.\
format(npoints=n_points, ncells=n_cells)
with open(opth, 'w') as fh:
fh.write(header)
fh.write(npoints_cells)
np.savetxt(fh, x_points, fmt=['%.6f', '%.6f', '%.6f', '%d'],
delimiter=' ')
np.savetxt(fh, x_cells, fmt='%d', delimiter=' ')
else:
magic_bytes = np.array([255, 255, 254], dtype=np.uint8)
create_stamp = 'created by {0}'.\
format('...' if fname_header is None else fname_header)
with open(opth, 'wb') as fobj:
magic_bytes.tofile(fobj)
fobj.write('{0}%s\n\n'.format(create_stamp).encode('utf-8'))
np.array([n_points, n_cells], dtype='>i4').tofile(fobj)
# Coerce types, just to be safe
x_points[:, :3].astype('>f4').reshape(-1).tofile(fobj)
x_cells[:, :3].astype('>i4').reshape(-1).tofile(fobj)
###############################################################################
# VTK Reader and Writer for FreeSurfer surfaces
###############################################################################
class vtkFSReader(VTKPythonAlgorithmBase):
"""VTK-like FreeSurfer surface geometry reader.
Supports both binary and ASCII files. Default is binary.
"""
def __init__(self):
super().__init__(nInputPorts=0, nOutputPorts=1,
outputType='vtkPolyData')
self.__FileName = ''
self.__is_ascii = False
def RequestData(self, request, inInfo, outInfo):
opt = vtkPolyData.GetData(outInfo, 0)
if self.__is_ascii or self.__FileName.split('.')[-1] == 'asc':
s = _read_geometry_fs(self.__FileName, is_ascii=True)
else:
s = _read_geometry_fs(self.__FileName, is_ascii=False)
opt.ShallowCopy(s)
return 1
def SetFileTypeToBinary(self):
if self.__is_ascii:
self.__is_ascii = False
self.Modified()
def SetFileTypeToASCII(self):
if not self.__is_ascii:
self.__is_ascii = True
self.Modified()
def SetFileName(self, fname):
if fname != self.__FileName:
self.__FileName = fname
self.Modified()
def GetFileName(self):
return self.__FileName
def GetOutput(self, p_int=0):
return self.GetOutputDataObject(p_int)
class vtkFSWriter(VTKPythonAlgorithmBase):
"""VTK-like FreeSurfer surface geometry writer.
Only writes surface geometry/topology (points and cells).
Supports both binary and ASCII files. Default is binary.
"""
def __init__(self):
super().__init__(nInputPorts=1, inputType='vtkPolyData', nOutputPorts=0)
self.__FileName = ''
self.__is_ascii = False
def RequestData(self, request, inInfo, outInfo):
_write_geometry_fs(vtkPolyData.GetData(inInfo[0], 0),
self.__FileName, fname_header=None,
is_ascii=self.__is_ascii)
return 1
def SetFileName(self, fname):
if fname != self.__FileName:
self.__FileName = fname
self.Modified()
def GetFileName(self):
return self.__FileName
def SetFileTypeToBinary(self):
if self.__is_ascii:
self.__is_ascii = False
self.Modified()
def SetFileTypeToASCII(self):
if not self.__is_ascii:
self.__is_ascii = True
self.Modified()
def Write(self):
self.Update()
def SetInputData(self, *args):
# Signature is SetInputData(self, port, vtkDataObject) or simply
# SetInputData(self, vtkDataObject)
# A way to manage overloading in C++, because port is optional
self.SetInputDataObject(*args) | en | 0.533094 | VTK read/write filters for FreeSurfer geometry files. # Author: <NAME> <<EMAIL>> # License: BSD 3 clause Read a 3-byte int from an open binary file object Parameters ---------- fobj : file File descriptor Returns ------- n : int A 3 byte int Read 3-byte ints from an open binary file object. Parameters ---------- fobj : file File descriptor Returns ------- out : 1D array An array of 3 byte int Adapted from nibabel. Add ascii support. #!ascii version (.*)$') # Quad file # Face splitting follows (Remove loop in nib) -> Not tested! # Triangle file # create_stamp = fh.readline().rstrip(b'\n').decode('utf-8') Adapted from nibabel. Add ascii support. # Coerce types, just to be safe ############################################################################### # VTK Reader and Writer for FreeSurfer surfaces ############################################################################### VTK-like FreeSurfer surface geometry reader. Supports both binary and ASCII files. Default is binary. VTK-like FreeSurfer surface geometry writer. Only writes surface geometry/topology (points and cells). Supports both binary and ASCII files. Default is binary. # Signature is SetInputData(self, port, vtkDataObject) or simply # SetInputData(self, vtkDataObject) # A way to manage overloading in C++, because port is optional | 2.38857 | 2 |
statistic_analysis/result_analysis_hist_Impact_K_OE.py | proroklab/magat_pathplanning | 40 | 6625545 | <reponame>proroklab/magat_pathplanning
from scipy.io import loadmat
import numpy as np
import os
import csv
import matplotlib.pyplot as plt
import matplotlib.font_manager
matplotlib.font_manager._rebuild()
plt.rcParams['font.family'] = "serif"
import matplotlib.ticker as ticker
plt.rcParams.update({'font.size': 22})
import pandas as pd
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
class StatisticAnalysis:
def __init__(self, data_root, SAVEDATA_FOLDER, exp_setup, trained_num_agent, list_testing_num_agent):
self.DATA_FOLDER = data_root
self.SAVEDATA_FOLDER = SAVEDATA_FOLDER
self.exp_setup = exp_setup
self.trained_num_agent = trained_num_agent
self.list_testing_num_agent = list_testing_num_agent
self.load_data()
def load_data(self):
data = {
'dcp': {},
'dcpOE': {},
'rdcp': {},
'rdcpOE': {},
}
data_list = []
for data_type in data.keys():
for subdir, dirs, files in os.walk(os.path.join(self.DATA_FOLDER, data_type)):
for file in files:
# print os.path.join(subdir, file)
filepath = subdir + os.sep + file
if filepath.endswith(".mat"):
# print(subdir, file)
mat_data = loadmat(filepath)
rate_ReachGoal = mat_data['rate_ReachGoal'][0][0]
mean_deltaFT = mat_data['mean_deltaFT'][0][0]
mean_deltaMP = mat_data['mean_deltaMP'][0][0]
hidden_state = mat_data['hidden_state'][0][0]
num_agents_trained = mat_data['num_agents_trained'][0][0]
num_agents_testing = mat_data['num_agents_testing'][0][0]
K = mat_data['K'][0][0]
cleaned_data = {
'filename': file,
'type': data_type,
'exp_stamps': mat_data['exp_stamps'][0],
'map_size_trained': mat_data['map_size_trained'][0],
'map_density_trained': mat_data['map_density_trained'][0][0],
'num_agents_trained': mat_data['num_agents_trained'][0][0],
'map_size_testing': mat_data['map_size_testing'][0],
'map_density_testing': mat_data['map_density_testing'][0][0],
'num_agents_testing': mat_data['num_agents_testing'][0][0],
'K': K,
'hidden_state': hidden_state,
'rate_ReachGoal': rate_ReachGoal,
'mean_deltaFT': mean_deltaFT,
'std_deltaMP': mat_data['std_deltaMP'][0][0],
'mean_deltaMP': mean_deltaMP,
'std_deltaFT': mat_data['std_deltaFT'][0][0],
'list_numAgentReachGoal': mat_data['list_numAgentReachGoal'][0],
'hist_numAgentReachGoal': mat_data['hist_numAgentReachGoal'][0],
}
data_list.append(cleaned_data)
data[data_type].setdefault(num_agents_trained, {}).setdefault(num_agents_testing, []).append(
cleaned_data)
self.data_list = data_list
self.data = data
# print(len(data_list))
# return data
def plot_hist_data(self, title_setup, text_legend):
for index, testing_num_agent in enumerate(self.list_testing_num_agent):
print(testing_num_agent)
title_text = "{}_TE{}".format(title_setup, testing_num_agent)
label_set1 = self.exp_setup[0]
label_set1_type = label_set1.split(' ')[0].lower()
label_set1_K = int(label_set1.split('K')[1].split('-HS')[0])
label_set1_HS = int(label_set1.split('-HS')[1])
searched_results_set1 = [item for item in self.data_list
if item['num_agents_trained'] == self.trained_num_agent
and item['num_agents_testing'] == testing_num_agent
and item['type'].lower() == label_set1_type
and item['K'] == label_set1_K
and item['hidden_state'] == label_set1_HS
]
label_set2 = self.exp_setup[1]
label_set2_type = label_set2.split(' ')[0].lower()
label_set2_K = int(label_set2.split('K')[1].split('-HS')[0])
label_set2_HS = int(label_set2.split('-HS')[1])
searched_results_set2 = [item for item in self.data_list
if item['num_agents_trained'] == self.trained_num_agent
and item['num_agents_testing'] == testing_num_agent
and item['type'].lower() == label_set2_type
and item['K'] == label_set2_K
and item['hidden_state'] == label_set2_HS
]
if len(searched_results_set1) == 0:
pass
else:
hist_numAgentReachGoal_set1 = searched_results_set1[0]['hist_numAgentReachGoal']
print(label_set1, hist_numAgentReachGoal_set1)
hist_numAgentReachGoal_set2 = searched_results_set2[0]['hist_numAgentReachGoal']
print(label_set2, hist_numAgentReachGoal_set2)
total_num_cases = sum(hist_numAgentReachGoal_set1)
hist_numAgentReachGoal_norm_set1 = []
hist_numAgentReachGoal_norm_set2 = []
list_numAgents = []
for index in range(len(hist_numAgentReachGoal_set1)):
list_numAgents.append(str(index))
hist_numAgentReachGoal_norm_set1.append(hist_numAgentReachGoal_set1[index]/total_num_cases)
hist_numAgentReachGoal_norm_set2.append(hist_numAgentReachGoal_set2[index]/total_num_cases)
self.plot_figure(testing_num_agent, list_numAgents, total_num_cases, hist_numAgentReachGoal_norm_set1, hist_numAgentReachGoal_norm_set2, label_set1_K, title_text, text_legend)
pass
def plot_figure(self, testing_num_agent, list_numAgents, total_num_cases, hist_data_set1, hist_data_set2, label_set1_K, title_text, text_legend, use_log_scale=False):
self.fig, self.ax = plt.subplots()
self.fig.set_size_inches(8, 6)
# title_exp_setup = ('trained on {} agents and tested on {} agents'.format(self.trained_num_agent, testing_num_agent))
# self.title_text = 'Histogram of percentage (# agents reach goal among {} cases) \n in network is {}.'.format(total_num_cases, title_exp_setup)
#
# self.ax.set_title(self.title_text)
self.ax.set_xlabel('# robots')
width = 0.35 # the width of the bars
label_width = 1.05
if len(list_numAgents)<20 and label_set1_K == 2:
step_size = 2
elif len(list_numAgents)==60:
step_size = 6
else:
step_size = 5
self.ax.set_ylabel('Proportion of cases'.format(total_num_cases))
label_pos = np.arange(len(list_numAgents))
# rects1 = self.ax.bar(x - label_width / 2 + width * 1, hist_numAgentReachGoal, width, label=text_legend)
hist_set1 = self.ax.bar(label_pos, hist_data_set1, align='center', label='{}'.format(text_legend[0]), ls='dotted', lw=3, fc=(0, 0, 1, 0.5))
hist_set2 = self.ax.bar(label_pos, hist_data_set2, align='center', label='{}'.format(text_legend[1]),lw=3, fc=(1, 0, 0, 0.5))
start, end = self.ax.get_xlim()
self.ax.xaxis.set_ticks(np.arange(0,len(list_numAgents), step_size))
# plt.xticks(label_pos)
# self.ax.set_xticklabels(label_pos)
# self.autolabel(rects1)
if use_log_scale:
self.ax.set_yscale('log')
self.ax.legend()
# plt.grid()
plt.show()
self.save_fig(title_text)
def show(self):
plt.show()
def save_fig(self, title):
# name_save_fig = os.path.join(self.SAVEDATA_FOLDER, "{}_{}.pdf".format(self.title_text, title))
name_save_fig = os.path.join(self.SAVEDATA_FOLDER, "{}.jpg".format(title))
name_save_fig_pdf = os.path.join(self.SAVEDATA_FOLDER, "{}.pdf".format(title))
self.fig.savefig(name_save_fig, bbox_inches='tight', pad_inches=0)
self.fig.savefig(name_save_fig_pdf, bbox_inches='tight', pad_inches=0)
def autolabel(self, rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
if height in [0.7558, 0.7596]:
self.ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(-6, 15), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', rotation=0, fontweight='bold')
continue
self.ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(-6, 15), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', rotation=0)
if __name__ == '__main__':
#
# trained_num_agent = 8
# list_testing_num_agent = [8, 12, 16, 32]
trained_num_agent = 10
list_testing_num_agent = [60]
# # list_testing_num_agent = [10, 40]
# list_testing_num_agent = [14, 20, 40]
# list_testing_num_agent = [20, 30, 40, 50]
#
# trained_num_agent = 12
# list_testing_num_agent = [12, 14, 20, 40]
#####################################################################################
#####################################################################################
# label_exp_setup = "ImpactK"
# label_exp = 'GNN'
# select_label = ["DCP - K2-HS0", "DCP - K3-HS0"]
# text_legend = [
# "GNN - K=2", "GNN - K=3"
# ]
label_exp_setup = "ImpactK"
label_exp = 'GNNOE'
select_label = ["DCPOE - K2-HS0", "DCPOE - K3-HS0"]
text_legend = [
"GNN(OE) - K=2", "GNN(OE) - K=3"
]
# label_exp_setup = "ImpactK"
# label_exp = 'GNNOE'
# select_label = ["DCPOE - K2-HS0", "DCPOE - K3-HS0"]
# text_legend = [
# "GNN - K=2", "GNN - K=3"
# ]
#####################################################################################
# label_exp_setup = "ImpactOE"
# label_exp = 'K2'
# select_label = ["DCP - K2-HS0", "DCPOE - K2-HS0"]
# text_legend = [
# "GNN - K=2", "GNN(OE) - K=2"
# ]
#
# label_exp_setup = "ImpactOE"
# label_exp = 'K3'
# select_label = ["DCP - K3-HS0", "DCPOE - K3-HS0"]
# text_legend = [
# "GNN - K=3", "GNN(OE) - K=3"
# ]
#####################################################################################
#####################################################################################
title_text = "{}_{}".format(label_exp, label_exp_setup)
DATA_FOLDER = '../MultiAgentDataset/Results_best/Statistics_generalization_LargeScale/Set3/Statistics_generalization/'
epoch_text = "IROS"
title_text = "{}_TR_{}".format(title_text, trained_num_agent)
SAVEDATA_FOLDER = os.path.join(DATA_FOLDER, 'Summary', title_text)
try:
# Create target Directory
os.makedirs(SAVEDATA_FOLDER)
print("Directory ", SAVEDATA_FOLDER, " Created ")
except FileExistsError:
pass
ResultAnalysis = StatisticAnalysis(DATA_FOLDER, SAVEDATA_FOLDER, select_label, trained_num_agent, list_testing_num_agent)
ResultAnalysis.plot_hist_data(title_text, text_legend)
| from scipy.io import loadmat
import numpy as np
import os
import csv
import matplotlib.pyplot as plt
import matplotlib.font_manager
matplotlib.font_manager._rebuild()
plt.rcParams['font.family'] = "serif"
import matplotlib.ticker as ticker
plt.rcParams.update({'font.size': 22})
import pandas as pd
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
class StatisticAnalysis:
def __init__(self, data_root, SAVEDATA_FOLDER, exp_setup, trained_num_agent, list_testing_num_agent):
self.DATA_FOLDER = data_root
self.SAVEDATA_FOLDER = SAVEDATA_FOLDER
self.exp_setup = exp_setup
self.trained_num_agent = trained_num_agent
self.list_testing_num_agent = list_testing_num_agent
self.load_data()
def load_data(self):
data = {
'dcp': {},
'dcpOE': {},
'rdcp': {},
'rdcpOE': {},
}
data_list = []
for data_type in data.keys():
for subdir, dirs, files in os.walk(os.path.join(self.DATA_FOLDER, data_type)):
for file in files:
# print os.path.join(subdir, file)
filepath = subdir + os.sep + file
if filepath.endswith(".mat"):
# print(subdir, file)
mat_data = loadmat(filepath)
rate_ReachGoal = mat_data['rate_ReachGoal'][0][0]
mean_deltaFT = mat_data['mean_deltaFT'][0][0]
mean_deltaMP = mat_data['mean_deltaMP'][0][0]
hidden_state = mat_data['hidden_state'][0][0]
num_agents_trained = mat_data['num_agents_trained'][0][0]
num_agents_testing = mat_data['num_agents_testing'][0][0]
K = mat_data['K'][0][0]
cleaned_data = {
'filename': file,
'type': data_type,
'exp_stamps': mat_data['exp_stamps'][0],
'map_size_trained': mat_data['map_size_trained'][0],
'map_density_trained': mat_data['map_density_trained'][0][0],
'num_agents_trained': mat_data['num_agents_trained'][0][0],
'map_size_testing': mat_data['map_size_testing'][0],
'map_density_testing': mat_data['map_density_testing'][0][0],
'num_agents_testing': mat_data['num_agents_testing'][0][0],
'K': K,
'hidden_state': hidden_state,
'rate_ReachGoal': rate_ReachGoal,
'mean_deltaFT': mean_deltaFT,
'std_deltaMP': mat_data['std_deltaMP'][0][0],
'mean_deltaMP': mean_deltaMP,
'std_deltaFT': mat_data['std_deltaFT'][0][0],
'list_numAgentReachGoal': mat_data['list_numAgentReachGoal'][0],
'hist_numAgentReachGoal': mat_data['hist_numAgentReachGoal'][0],
}
data_list.append(cleaned_data)
data[data_type].setdefault(num_agents_trained, {}).setdefault(num_agents_testing, []).append(
cleaned_data)
self.data_list = data_list
self.data = data
# print(len(data_list))
# return data
def plot_hist_data(self, title_setup, text_legend):
for index, testing_num_agent in enumerate(self.list_testing_num_agent):
print(testing_num_agent)
title_text = "{}_TE{}".format(title_setup, testing_num_agent)
label_set1 = self.exp_setup[0]
label_set1_type = label_set1.split(' ')[0].lower()
label_set1_K = int(label_set1.split('K')[1].split('-HS')[0])
label_set1_HS = int(label_set1.split('-HS')[1])
searched_results_set1 = [item for item in self.data_list
if item['num_agents_trained'] == self.trained_num_agent
and item['num_agents_testing'] == testing_num_agent
and item['type'].lower() == label_set1_type
and item['K'] == label_set1_K
and item['hidden_state'] == label_set1_HS
]
label_set2 = self.exp_setup[1]
label_set2_type = label_set2.split(' ')[0].lower()
label_set2_K = int(label_set2.split('K')[1].split('-HS')[0])
label_set2_HS = int(label_set2.split('-HS')[1])
searched_results_set2 = [item for item in self.data_list
if item['num_agents_trained'] == self.trained_num_agent
and item['num_agents_testing'] == testing_num_agent
and item['type'].lower() == label_set2_type
and item['K'] == label_set2_K
and item['hidden_state'] == label_set2_HS
]
if len(searched_results_set1) == 0:
pass
else:
hist_numAgentReachGoal_set1 = searched_results_set1[0]['hist_numAgentReachGoal']
print(label_set1, hist_numAgentReachGoal_set1)
hist_numAgentReachGoal_set2 = searched_results_set2[0]['hist_numAgentReachGoal']
print(label_set2, hist_numAgentReachGoal_set2)
total_num_cases = sum(hist_numAgentReachGoal_set1)
hist_numAgentReachGoal_norm_set1 = []
hist_numAgentReachGoal_norm_set2 = []
list_numAgents = []
for index in range(len(hist_numAgentReachGoal_set1)):
list_numAgents.append(str(index))
hist_numAgentReachGoal_norm_set1.append(hist_numAgentReachGoal_set1[index]/total_num_cases)
hist_numAgentReachGoal_norm_set2.append(hist_numAgentReachGoal_set2[index]/total_num_cases)
self.plot_figure(testing_num_agent, list_numAgents, total_num_cases, hist_numAgentReachGoal_norm_set1, hist_numAgentReachGoal_norm_set2, label_set1_K, title_text, text_legend)
pass
def plot_figure(self, testing_num_agent, list_numAgents, total_num_cases, hist_data_set1, hist_data_set2, label_set1_K, title_text, text_legend, use_log_scale=False):
self.fig, self.ax = plt.subplots()
self.fig.set_size_inches(8, 6)
# title_exp_setup = ('trained on {} agents and tested on {} agents'.format(self.trained_num_agent, testing_num_agent))
# self.title_text = 'Histogram of percentage (# agents reach goal among {} cases) \n in network is {}.'.format(total_num_cases, title_exp_setup)
#
# self.ax.set_title(self.title_text)
self.ax.set_xlabel('# robots')
width = 0.35 # the width of the bars
label_width = 1.05
if len(list_numAgents)<20 and label_set1_K == 2:
step_size = 2
elif len(list_numAgents)==60:
step_size = 6
else:
step_size = 5
self.ax.set_ylabel('Proportion of cases'.format(total_num_cases))
label_pos = np.arange(len(list_numAgents))
# rects1 = self.ax.bar(x - label_width / 2 + width * 1, hist_numAgentReachGoal, width, label=text_legend)
hist_set1 = self.ax.bar(label_pos, hist_data_set1, align='center', label='{}'.format(text_legend[0]), ls='dotted', lw=3, fc=(0, 0, 1, 0.5))
hist_set2 = self.ax.bar(label_pos, hist_data_set2, align='center', label='{}'.format(text_legend[1]),lw=3, fc=(1, 0, 0, 0.5))
start, end = self.ax.get_xlim()
self.ax.xaxis.set_ticks(np.arange(0,len(list_numAgents), step_size))
# plt.xticks(label_pos)
# self.ax.set_xticklabels(label_pos)
# self.autolabel(rects1)
if use_log_scale:
self.ax.set_yscale('log')
self.ax.legend()
# plt.grid()
plt.show()
self.save_fig(title_text)
def show(self):
plt.show()
def save_fig(self, title):
# name_save_fig = os.path.join(self.SAVEDATA_FOLDER, "{}_{}.pdf".format(self.title_text, title))
name_save_fig = os.path.join(self.SAVEDATA_FOLDER, "{}.jpg".format(title))
name_save_fig_pdf = os.path.join(self.SAVEDATA_FOLDER, "{}.pdf".format(title))
self.fig.savefig(name_save_fig, bbox_inches='tight', pad_inches=0)
self.fig.savefig(name_save_fig_pdf, bbox_inches='tight', pad_inches=0)
def autolabel(self, rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
if height in [0.7558, 0.7596]:
self.ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(-6, 15), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', rotation=0, fontweight='bold')
continue
self.ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(-6, 15), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', rotation=0)
if __name__ == '__main__':
#
# trained_num_agent = 8
# list_testing_num_agent = [8, 12, 16, 32]
trained_num_agent = 10
list_testing_num_agent = [60]
# # list_testing_num_agent = [10, 40]
# list_testing_num_agent = [14, 20, 40]
# list_testing_num_agent = [20, 30, 40, 50]
#
# trained_num_agent = 12
# list_testing_num_agent = [12, 14, 20, 40]
#####################################################################################
#####################################################################################
# label_exp_setup = "ImpactK"
# label_exp = 'GNN'
# select_label = ["DCP - K2-HS0", "DCP - K3-HS0"]
# text_legend = [
# "GNN - K=2", "GNN - K=3"
# ]
label_exp_setup = "ImpactK"
label_exp = 'GNNOE'
select_label = ["DCPOE - K2-HS0", "DCPOE - K3-HS0"]
text_legend = [
"GNN(OE) - K=2", "GNN(OE) - K=3"
]
# label_exp_setup = "ImpactK"
# label_exp = 'GNNOE'
# select_label = ["DCPOE - K2-HS0", "DCPOE - K3-HS0"]
# text_legend = [
# "GNN - K=2", "GNN - K=3"
# ]
#####################################################################################
# label_exp_setup = "ImpactOE"
# label_exp = 'K2'
# select_label = ["DCP - K2-HS0", "DCPOE - K2-HS0"]
# text_legend = [
# "GNN - K=2", "GNN(OE) - K=2"
# ]
#
# label_exp_setup = "ImpactOE"
# label_exp = 'K3'
# select_label = ["DCP - K3-HS0", "DCPOE - K3-HS0"]
# text_legend = [
# "GNN - K=3", "GNN(OE) - K=3"
# ]
#####################################################################################
#####################################################################################
title_text = "{}_{}".format(label_exp, label_exp_setup)
DATA_FOLDER = '../MultiAgentDataset/Results_best/Statistics_generalization_LargeScale/Set3/Statistics_generalization/'
epoch_text = "IROS"
title_text = "{}_TR_{}".format(title_text, trained_num_agent)
SAVEDATA_FOLDER = os.path.join(DATA_FOLDER, 'Summary', title_text)
try:
# Create target Directory
os.makedirs(SAVEDATA_FOLDER)
print("Directory ", SAVEDATA_FOLDER, " Created ")
except FileExistsError:
pass
ResultAnalysis = StatisticAnalysis(DATA_FOLDER, SAVEDATA_FOLDER, select_label, trained_num_agent, list_testing_num_agent)
ResultAnalysis.plot_hist_data(title_text, text_legend) | en | 0.467584 | # print os.path.join(subdir, file) # print(subdir, file) # print(len(data_list)) # return data # title_exp_setup = ('trained on {} agents and tested on {} agents'.format(self.trained_num_agent, testing_num_agent)) # self.title_text = 'Histogram of percentage (# agents reach goal among {} cases) \n in network is {}.'.format(total_num_cases, title_exp_setup) # # self.ax.set_title(self.title_text) # the width of the bars # rects1 = self.ax.bar(x - label_width / 2 + width * 1, hist_numAgentReachGoal, width, label=text_legend) # plt.xticks(label_pos) # self.ax.set_xticklabels(label_pos) # self.autolabel(rects1) # plt.grid() # name_save_fig = os.path.join(self.SAVEDATA_FOLDER, "{}_{}.pdf".format(self.title_text, title)) Attach a text label above each bar in *rects*, displaying its height. # 3 points vertical offset # 3 points vertical offset # # trained_num_agent = 8 # list_testing_num_agent = [8, 12, 16, 32] # # list_testing_num_agent = [10, 40] # list_testing_num_agent = [14, 20, 40] # list_testing_num_agent = [20, 30, 40, 50] # # trained_num_agent = 12 # list_testing_num_agent = [12, 14, 20, 40] ##################################################################################### ##################################################################################### # label_exp_setup = "ImpactK" # label_exp = 'GNN' # select_label = ["DCP - K2-HS0", "DCP - K3-HS0"] # text_legend = [ # "GNN - K=2", "GNN - K=3" # ] # label_exp_setup = "ImpactK" # label_exp = 'GNNOE' # select_label = ["DCPOE - K2-HS0", "DCPOE - K3-HS0"] # text_legend = [ # "GNN - K=2", "GNN - K=3" # ] ##################################################################################### # label_exp_setup = "ImpactOE" # label_exp = 'K2' # select_label = ["DCP - K2-HS0", "DCPOE - K2-HS0"] # text_legend = [ # "GNN - K=2", "GNN(OE) - K=2" # ] # # label_exp_setup = "ImpactOE" # label_exp = 'K3' # select_label = ["DCP - K3-HS0", "DCPOE - K3-HS0"] # text_legend = [ # "GNN - K=3", "GNN(OE) - K=3" # ] ##################################################################################### ##################################################################################### # Create target Directory | 2.421584 | 2 |
Code/compute_all_test.py | Noixas/Evaluating-Bias-In-Dutch-Word-Embeddings | 0 | 6625546 | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from IPython import get_ipython
# %% [markdown]
# # Streamlined testing for word embeddings
# %%
import numpy as np
import pandas as pd
from numpy import linalg
import fasttext.util
from gensim.models.fasttext import FastText, load_facebook_vectors, load_facebook_model
from gensim.models import KeyedVectors
from tqdm import tqdm
import random
import string
random_state = 1
random.seed(random_state)
# %%
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import json
import bias_neighbors as bias_neighbors
import bias_projection as bias_projection
import Utils_R as util_r
import WEAT
import debias_weat as debias_weat
from relation import Relation
import pickle
# %%
#visualize imports
import matplotlib as mpl
import matplotlib.pyplot as plt
from cycler import cycler
get_ipython().run_line_magic('matplotlib', 'inline')
mpl.rc("savefig", dpi=200)
mpl.rcParams['figure.figsize'] = (8,8)
mpl.rcParams['axes.prop_cycle'] = cycler(color='rc')
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
# %% [markdown]
# ## Load models
# Methods used to load different combinations of models
# %%
embed_path = "../Rodrigo-data/Embeddings/"
# %%
def load_fasttext(debiased = False, model_name = 'fasttext_320'):
load_path = embed_path+'FastText/'
model_fast = load_facebook_vectors(load_path+model_name+".bin")# old name -> "cc.nl.300_fasttext.bin")
model_fast_debiased = KeyedVectors.load(load_path+"Debiased/"+model_name+".model") if debiased else None
return [{"model":model_fast,"vec_len":300,"name":model_name,"model_debiased":model_fast_debiased,"load_path":load_path}]
# %%
def load_cow(debiased = False, model_name_small = 'cow-320', model_name_big = 'cow-big', big=True, small=True):
load_path = embed_path+'Clips/COW/'
model_cow_small = KeyedVectors.load_word2vec_format(load_path+model_name_small+".txt", binary=False,unicode_errors='replace') if small else None# uncomment if there is some problem when using embedding,limit = 603304) #from txt?
model_cow_big = KeyedVectors.load_word2vec_format(load_path+model_name_big+".txt", binary=False,unicode_errors='replace') if big else None
model_cow_small_debiased = KeyedVectors.load(load_path+"/Debiased/"+model_name_small+".model") if small and debiased else None
model_cow_big_debiased = KeyedVectors.load(load_path+"/Debiased/"+model_name_big+".model") if big and debiased else None
return [
{"model":model_cow_small,"vec_len":320,"name":model_name_small,"model_debiased":model_cow_small_debiased,"load_path":load_path},
{"model":model_cow_big,"vec_len":320,"name":model_name_big,"model_debiased":model_cow_big_debiased,"load_path":load_path}]
# %%
def load_sonar(debiased = False, model_name_160 = 'sonar-160', model_name_320 = 'sonar-320', big=True, small=True):
load_path = embed_path+'Clips/Sonar/'
model_sonar_160 = KeyedVectors.load_word2vec_format(load_path+model_name_160+".txt", binary=False,unicode_errors='replace') if small else None# uncomment if there is some problem when using embedding,limit = 603304) #from txt?
model_sonar_320 = KeyedVectors.load_word2vec_format(load_path+model_name_320+".txt", binary=False,unicode_errors='replace') if big else None
model_sonar_160_debiased = KeyedVectors.load(load_path+"/Debiased/"+model_name_160+".model") if small and debiased else None
model_sonar_320_debiased = KeyedVectors.load(load_path+"/Debiased/"+model_name_320+".model") if big and debiased else None
return [
{"model":model_sonar_160,"vec_len":160,"name":model_name_160,"model_debiased":model_sonar_160_debiased,"load_path":load_path},
{"model":model_sonar_320,"vec_len":320,"name":model_name_320,"model_debiased":model_sonar_320_debiased,"load_path":load_path}]
# %% [markdown]
# # Main
# The main code with functions and other stuff goes down here.
# %% [markdown]
# ## Projection steps
# %%
def projection_bias_steps(vocab_limited, wv_limited, model, gender_bias_projection, model_debiased):
""" Encapsulates the steps related to the projection method.
1. Compute bias projection.
2. Encode lists of male & female words.
3. Generate 2 clusters by using KMeans.
- Get cluster statistic based on how accurate we can separate male and female words.
Parameters:
vocab_limited (list[word]): vocab of model without excluded words (gender specific words).
wv_limited (list[i,vector]): the vectors corresponding to the vocab_limited list.
model : current model from gensim.
"""
size = 500
male, female = bias_projection.get_male_and_female_lists(gender_bias_projection, size)
male_female = male + female
y_true = [0]*size + [1]*size
X_orig = bias_projection.extract_vectors(male_female, model)#get bias and debiased here
X_debiased = bias_projection.extract_vectors(male_female, model_debiased)
cluster_metric_a = bias_projection.cluster_and_visualize(male_female, X_orig, X_debiased, random_state, y_true)
return cluster_metric_a
# %% [markdown]
# ## Pipeline
# %%
def compute_all_tests(model,model_vec_len, model_name, exclude_words,cluster_results, downstream_results, model_debiased = None):
"""
Parameters:
cluster_results: Referenced dict, modify in place and reuse per every model. No need to use return.
"""
print("----------------Processing new model!------------------------------------------------------")
print("NAME:",model_name)
# get the embeddings without the excluded words to make the analysis -R
vocab_limited, wv_limited = util_r.limit_vocab(model, exclude = exclude_words, vec_len=model_vec_len)
########################################################################################################
# compute bias-by-projection before and after debiasing
gender_bias_projection = bias_projection.compute_bias_by_projection(wv_limited, vocab_limited, model)
bias_projection.report_bias(gender_bias_projection)
########################################################################################################
up_name = model_name.upper()
print("PROJECTION STEP:",up_name)
#Projection
cluster_metric_a = projection_bias_steps(vocab_limited, wv_limited, model, gender_bias_projection, model_debiased)
cluster_results[model_name] = cluster_metric_a
print('Cluster metric results: [orig,debiased] ',cluster_metric_a)
# cluster_results[model_name+' debiased'] = cluster_metric_a[1]
################################################################################################################
#WEAT
print("WEAT ORIGINAL STEP:",up_name)
results_weat = WEAT.WEAT_Test(model, model_name,verbose=False)
results_weat_2 = results_weat.copy()
print("WEAT DEBIASED STEP:",up_name)
results_weat_debiased = WEAT.WEAT_Test(model_debiased, model_name+'_debiased',verbose=False)
results_weat_debiased.drop(['Model','XYAB'], axis=1,inplace=True)
########################################################################################################
print("LATEX:")
latex_ = util_r.create_latex_table_weat(results_weat_2,results_weat_debiased)
save_latex = '../Rodrigo-data/Results/Latex_tables/latex_'+model_name+'.txt'
print(latex_,file=open(save_latex, 'w'))
########################################################################################################
#Downstream task
print("(LONG WAIT)DOWNSTREAM STEP:",up_name)
questions_task = "WEAT_clips/data/question-words.txt"
biased_down = Relation(questions_task).test_model_2020(model)
debiased_down = Relation(questions_task).test_model_2020(model_debiased)
downstream_results[model_name] = [biased_down[0],debiased_down[0]]
print('Downstream biased:',biased_down[0])
print('Downstream debiased:',debiased_down[0])
pickle_path= '../Rodrigo-data/Results/downstream_pickle/'
pickle.dump(biased_down, open( pickle_path+model_name+"_biased.p", "wb" ) ) #save for later processing
pickle.dump(debiased_down, open( pickle_path+model_name+"_debiased.p", "wb" ) )
########################################################################################################
print("END of model:", up_name)
return results_weat
# %%
# #SAVE PICKE
# """SAVE PICKLE"""
# modedl_name = 'testtt'
# pickle_path= '../Rodrigo-data/Results/downstream_pickle/'
# biased_down = ['a','b']
# pickle.dump(biased_down, open(pickle_path+modedl_name+"_biased.p", "wb" ) )
# # pickle.dump(debiased_down, open( pickle_path+modedl_name+"_debiased.p", "wb" ) )
# %%
# #LOAD PICKE
# """LOAD PICKLE"""
# favorite_color = pickle.load(open(pickle_path+modedl_name+"_biased.p", "rb" ) )
# favorite_color
# %% [markdown]
# # Call functions
#
# %%
exclude_words = debias_weat.load_gender_specific_words()
gender_specific_words = debias_weat.load_gender_specific_words()
defs, equalize_pairs = debias_weat.load_def_and_equ_words()
# %%
cluster_1817 = {}
downstream_1817 = {}
debias_save_models = True
from multiprocessing import Pool
if __name__ == '__main__':
p = Pool(5)
print(p.map(f, [1, 2, 3]))
# %%
if debias_save_models:
models = None
models = load_fasttext(True) #biggest bottleneck
for model_info in models:
res_weat = compute_all_tests(model_info['model'],model_info['vec_len'],model_info['name'], exclude_words, cluster_1817, downstream_1817, model_info['model_debiased'])
print("RESULTS WEAT")
print(res_weat)
print("ACTUALLY END................................................................................")
model_info = None #free memory
# %%
if debias_save_models:
models = None
models = load_cow(True) #biggest bottleneck
for model_info in models:
res_weat = compute_all_tests(model_info['model'],model_info['vec_len'],model_info['name'], exclude_words, cluster_1817, downstream_1817, model_info['model_debiased'])
print("RESULTS WEAT")
print(res_weat)
print("ACTUALLY END................................................................................")
model_info = None #free memory
# %%
if debias_save_models:
models = None
models = load_sonar(True) #biggest bottleneck
for model_info in models:
res_weat = compute_all_tests(model_info['model'],model_info['vec_len'],model_info['name'], exclude_words, cluster_1817, downstream_1817, model_info['model_debiased'])
print("RESULTS WEAT")
print(res_weat)
print("ACTUALLY END................................................................................")
model_info = None #free memory
# %%
d_res_latex = util_r.create_latex_table_downstream(downstream_1817)
print(d_res_latex)
c_res_latex = util_r.create_latex_table_cluster(cluster_1817)
print(c_res_latex)
| # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from IPython import get_ipython
# %% [markdown]
# # Streamlined testing for word embeddings
# %%
import numpy as np
import pandas as pd
from numpy import linalg
import fasttext.util
from gensim.models.fasttext import FastText, load_facebook_vectors, load_facebook_model
from gensim.models import KeyedVectors
from tqdm import tqdm
import random
import string
random_state = 1
random.seed(random_state)
# %%
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import json
import bias_neighbors as bias_neighbors
import bias_projection as bias_projection
import Utils_R as util_r
import WEAT
import debias_weat as debias_weat
from relation import Relation
import pickle
# %%
#visualize imports
import matplotlib as mpl
import matplotlib.pyplot as plt
from cycler import cycler
get_ipython().run_line_magic('matplotlib', 'inline')
mpl.rc("savefig", dpi=200)
mpl.rcParams['figure.figsize'] = (8,8)
mpl.rcParams['axes.prop_cycle'] = cycler(color='rc')
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
# %% [markdown]
# ## Load models
# Methods used to load different combinations of models
# %%
embed_path = "../Rodrigo-data/Embeddings/"
# %%
def load_fasttext(debiased = False, model_name = 'fasttext_320'):
load_path = embed_path+'FastText/'
model_fast = load_facebook_vectors(load_path+model_name+".bin")# old name -> "cc.nl.300_fasttext.bin")
model_fast_debiased = KeyedVectors.load(load_path+"Debiased/"+model_name+".model") if debiased else None
return [{"model":model_fast,"vec_len":300,"name":model_name,"model_debiased":model_fast_debiased,"load_path":load_path}]
# %%
def load_cow(debiased = False, model_name_small = 'cow-320', model_name_big = 'cow-big', big=True, small=True):
load_path = embed_path+'Clips/COW/'
model_cow_small = KeyedVectors.load_word2vec_format(load_path+model_name_small+".txt", binary=False,unicode_errors='replace') if small else None# uncomment if there is some problem when using embedding,limit = 603304) #from txt?
model_cow_big = KeyedVectors.load_word2vec_format(load_path+model_name_big+".txt", binary=False,unicode_errors='replace') if big else None
model_cow_small_debiased = KeyedVectors.load(load_path+"/Debiased/"+model_name_small+".model") if small and debiased else None
model_cow_big_debiased = KeyedVectors.load(load_path+"/Debiased/"+model_name_big+".model") if big and debiased else None
return [
{"model":model_cow_small,"vec_len":320,"name":model_name_small,"model_debiased":model_cow_small_debiased,"load_path":load_path},
{"model":model_cow_big,"vec_len":320,"name":model_name_big,"model_debiased":model_cow_big_debiased,"load_path":load_path}]
# %%
def load_sonar(debiased = False, model_name_160 = 'sonar-160', model_name_320 = 'sonar-320', big=True, small=True):
load_path = embed_path+'Clips/Sonar/'
model_sonar_160 = KeyedVectors.load_word2vec_format(load_path+model_name_160+".txt", binary=False,unicode_errors='replace') if small else None# uncomment if there is some problem when using embedding,limit = 603304) #from txt?
model_sonar_320 = KeyedVectors.load_word2vec_format(load_path+model_name_320+".txt", binary=False,unicode_errors='replace') if big else None
model_sonar_160_debiased = KeyedVectors.load(load_path+"/Debiased/"+model_name_160+".model") if small and debiased else None
model_sonar_320_debiased = KeyedVectors.load(load_path+"/Debiased/"+model_name_320+".model") if big and debiased else None
return [
{"model":model_sonar_160,"vec_len":160,"name":model_name_160,"model_debiased":model_sonar_160_debiased,"load_path":load_path},
{"model":model_sonar_320,"vec_len":320,"name":model_name_320,"model_debiased":model_sonar_320_debiased,"load_path":load_path}]
# %% [markdown]
# # Main
# The main code with functions and other stuff goes down here.
# %% [markdown]
# ## Projection steps
# %%
def projection_bias_steps(vocab_limited, wv_limited, model, gender_bias_projection, model_debiased):
""" Encapsulates the steps related to the projection method.
1. Compute bias projection.
2. Encode lists of male & female words.
3. Generate 2 clusters by using KMeans.
- Get cluster statistic based on how accurate we can separate male and female words.
Parameters:
vocab_limited (list[word]): vocab of model without excluded words (gender specific words).
wv_limited (list[i,vector]): the vectors corresponding to the vocab_limited list.
model : current model from gensim.
"""
size = 500
male, female = bias_projection.get_male_and_female_lists(gender_bias_projection, size)
male_female = male + female
y_true = [0]*size + [1]*size
X_orig = bias_projection.extract_vectors(male_female, model)#get bias and debiased here
X_debiased = bias_projection.extract_vectors(male_female, model_debiased)
cluster_metric_a = bias_projection.cluster_and_visualize(male_female, X_orig, X_debiased, random_state, y_true)
return cluster_metric_a
# %% [markdown]
# ## Pipeline
# %%
def compute_all_tests(model,model_vec_len, model_name, exclude_words,cluster_results, downstream_results, model_debiased = None):
"""
Parameters:
cluster_results: Referenced dict, modify in place and reuse per every model. No need to use return.
"""
print("----------------Processing new model!------------------------------------------------------")
print("NAME:",model_name)
# get the embeddings without the excluded words to make the analysis -R
vocab_limited, wv_limited = util_r.limit_vocab(model, exclude = exclude_words, vec_len=model_vec_len)
########################################################################################################
# compute bias-by-projection before and after debiasing
gender_bias_projection = bias_projection.compute_bias_by_projection(wv_limited, vocab_limited, model)
bias_projection.report_bias(gender_bias_projection)
########################################################################################################
up_name = model_name.upper()
print("PROJECTION STEP:",up_name)
#Projection
cluster_metric_a = projection_bias_steps(vocab_limited, wv_limited, model, gender_bias_projection, model_debiased)
cluster_results[model_name] = cluster_metric_a
print('Cluster metric results: [orig,debiased] ',cluster_metric_a)
# cluster_results[model_name+' debiased'] = cluster_metric_a[1]
################################################################################################################
#WEAT
print("WEAT ORIGINAL STEP:",up_name)
results_weat = WEAT.WEAT_Test(model, model_name,verbose=False)
results_weat_2 = results_weat.copy()
print("WEAT DEBIASED STEP:",up_name)
results_weat_debiased = WEAT.WEAT_Test(model_debiased, model_name+'_debiased',verbose=False)
results_weat_debiased.drop(['Model','XYAB'], axis=1,inplace=True)
########################################################################################################
print("LATEX:")
latex_ = util_r.create_latex_table_weat(results_weat_2,results_weat_debiased)
save_latex = '../Rodrigo-data/Results/Latex_tables/latex_'+model_name+'.txt'
print(latex_,file=open(save_latex, 'w'))
########################################################################################################
#Downstream task
print("(LONG WAIT)DOWNSTREAM STEP:",up_name)
questions_task = "WEAT_clips/data/question-words.txt"
biased_down = Relation(questions_task).test_model_2020(model)
debiased_down = Relation(questions_task).test_model_2020(model_debiased)
downstream_results[model_name] = [biased_down[0],debiased_down[0]]
print('Downstream biased:',biased_down[0])
print('Downstream debiased:',debiased_down[0])
pickle_path= '../Rodrigo-data/Results/downstream_pickle/'
pickle.dump(biased_down, open( pickle_path+model_name+"_biased.p", "wb" ) ) #save for later processing
pickle.dump(debiased_down, open( pickle_path+model_name+"_debiased.p", "wb" ) )
########################################################################################################
print("END of model:", up_name)
return results_weat
# %%
# #SAVE PICKE
# """SAVE PICKLE"""
# modedl_name = 'testtt'
# pickle_path= '../Rodrigo-data/Results/downstream_pickle/'
# biased_down = ['a','b']
# pickle.dump(biased_down, open(pickle_path+modedl_name+"_biased.p", "wb" ) )
# # pickle.dump(debiased_down, open( pickle_path+modedl_name+"_debiased.p", "wb" ) )
# %%
# #LOAD PICKE
# """LOAD PICKLE"""
# favorite_color = pickle.load(open(pickle_path+modedl_name+"_biased.p", "rb" ) )
# favorite_color
# %% [markdown]
# # Call functions
#
# %%
exclude_words = debias_weat.load_gender_specific_words()
gender_specific_words = debias_weat.load_gender_specific_words()
defs, equalize_pairs = debias_weat.load_def_and_equ_words()
# %%
cluster_1817 = {}
downstream_1817 = {}
debias_save_models = True
from multiprocessing import Pool
if __name__ == '__main__':
p = Pool(5)
print(p.map(f, [1, 2, 3]))
# %%
if debias_save_models:
models = None
models = load_fasttext(True) #biggest bottleneck
for model_info in models:
res_weat = compute_all_tests(model_info['model'],model_info['vec_len'],model_info['name'], exclude_words, cluster_1817, downstream_1817, model_info['model_debiased'])
print("RESULTS WEAT")
print(res_weat)
print("ACTUALLY END................................................................................")
model_info = None #free memory
# %%
if debias_save_models:
models = None
models = load_cow(True) #biggest bottleneck
for model_info in models:
res_weat = compute_all_tests(model_info['model'],model_info['vec_len'],model_info['name'], exclude_words, cluster_1817, downstream_1817, model_info['model_debiased'])
print("RESULTS WEAT")
print(res_weat)
print("ACTUALLY END................................................................................")
model_info = None #free memory
# %%
if debias_save_models:
models = None
models = load_sonar(True) #biggest bottleneck
for model_info in models:
res_weat = compute_all_tests(model_info['model'],model_info['vec_len'],model_info['name'], exclude_words, cluster_1817, downstream_1817, model_info['model_debiased'])
print("RESULTS WEAT")
print(res_weat)
print("ACTUALLY END................................................................................")
model_info = None #free memory
# %%
d_res_latex = util_r.create_latex_table_downstream(downstream_1817)
print(d_res_latex)
c_res_latex = util_r.create_latex_table_cluster(cluster_1817)
print(c_res_latex)
| en | 0.394655 | # To add a new cell, type '# %%' # To add a new markdown cell, type '# %% [markdown]' # %% # %% [markdown] # # Streamlined testing for word embeddings # %% # %% # %% #visualize imports # %% [markdown] # ## Load models # Methods used to load different combinations of models # %% # %% # old name -> "cc.nl.300_fasttext.bin") # %% # uncomment if there is some problem when using embedding,limit = 603304) #from txt? # %% # uncomment if there is some problem when using embedding,limit = 603304) #from txt? # %% [markdown] # # Main # The main code with functions and other stuff goes down here. # %% [markdown] # ## Projection steps # %% Encapsulates the steps related to the projection method. 1. Compute bias projection. 2. Encode lists of male & female words. 3. Generate 2 clusters by using KMeans. - Get cluster statistic based on how accurate we can separate male and female words. Parameters: vocab_limited (list[word]): vocab of model without excluded words (gender specific words). wv_limited (list[i,vector]): the vectors corresponding to the vocab_limited list. model : current model from gensim. #get bias and debiased here # %% [markdown] # ## Pipeline # %% Parameters: cluster_results: Referenced dict, modify in place and reuse per every model. No need to use return. # get the embeddings without the excluded words to make the analysis -R ######################################################################################################## # compute bias-by-projection before and after debiasing ######################################################################################################## #Projection # cluster_results[model_name+' debiased'] = cluster_metric_a[1] ################################################################################################################ #WEAT ######################################################################################################## ######################################################################################################## #Downstream task #save for later processing ######################################################################################################## # %% # #SAVE PICKE # """SAVE PICKLE""" # modedl_name = 'testtt' # pickle_path= '../Rodrigo-data/Results/downstream_pickle/' # biased_down = ['a','b'] # pickle.dump(biased_down, open(pickle_path+modedl_name+"_biased.p", "wb" ) ) # # pickle.dump(debiased_down, open( pickle_path+modedl_name+"_debiased.p", "wb" ) ) # %% # #LOAD PICKE # """LOAD PICKLE""" # favorite_color = pickle.load(open(pickle_path+modedl_name+"_biased.p", "rb" ) ) # favorite_color # %% [markdown] # # Call functions # # %% # %% # %% #biggest bottleneck #free memory # %% #biggest bottleneck #free memory # %% #biggest bottleneck #free memory # %% | 2.281811 | 2 |
django_tenants/models.py | safaariman/django-tenants | 0 | 6625547 | <filename>django_tenants/models.py
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.management import call_command
from django.db import models, connections, transaction
from django.urls import reverse
from django_tenants.clone import CloneSchema
from .postgresql_backend.base import _check_schema_name
from .signals import post_schema_sync, schema_needs_to_be_sync
from .utils import get_creation_fakes_migrations, get_tenant_base_schema
from .utils import schema_exists, get_tenant_domain_model, get_public_schema_name, get_tenant_database_alias
class TenantMixin(models.Model):
"""
All tenant models must inherit this class.
"""
auto_drop_schema = False
"""
USE THIS WITH CAUTION!
Set this flag to true on a parent class if you want the schema to be
automatically deleted if the tenant row gets deleted.
"""
auto_create_schema = True
"""
Set this flag to false on a parent class if you don't want the schema
to be automatically created upon save.
"""
schema_name = models.CharField(max_length=63, unique=True,
validators=[_check_schema_name])
domain_url = None
"""
Leave this as None. Stores the current domain url so it can be used in the logs
"""
_previous_tenant = []
class Meta:
abstract = True
def __enter__(self):
"""
Syntax sugar which helps in celery tasks, cron jobs, and other scripts
Usage:
with Tenant.objects.get(schema_name='test') as tenant:
# run some code in tenant test
# run some code in previous tenant (public probably)
"""
connection = connections[get_tenant_database_alias()]
self._previous_tenant.append(connection.tenant)
self.activate()
def __exit__(self, exc_type, exc_val, exc_tb):
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self._previous_tenant.pop())
def activate(self):
"""
Syntax sugar that helps at django shell with fast tenant changing
Usage:
Tenant.objects.get(schema_name='test').activate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self)
@classmethod
def deactivate(cls):
"""
Syntax sugar, return to public schema
Usage:
test_tenant.deactivate()
# or simpler
Tenant.deactivate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_schema_to_public()
def save(self, verbosity=1, *args, **kwargs):
connection = connections[get_tenant_database_alias()]
is_new = self.pk is None
has_schema = hasattr(connection, 'schema_name')
if has_schema and is_new and connection.schema_name != get_public_schema_name():
raise Exception("Can't create tenant outside the public schema. "
"Current schema is %s." % connection.schema_name)
elif has_schema and not is_new and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't update tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
super().save(*args, **kwargs)
if has_schema and is_new and self.auto_create_schema:
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the tenant, delete what we created and
# re-raise the exception
self.delete(force_drop=True)
raise
elif is_new:
# although we are not using the schema functions directly, the signal might be registered by a listener
schema_needs_to_be_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
elif not is_new and self.auto_create_schema and not schema_exists(self.schema_name):
# Create schemas for existing models, deleting only the schema on failure
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the schema, delete what we created and
# re-raise the exception
self._drop_schema()
raise
def serializable_fields(self):
""" in certain cases the user model isn't serializable so you may want to only send the id """
return self
def _drop_schema(self, force_drop=False):
""" Drops the schema"""
connection = connections[get_tenant_database_alias()]
has_schema = hasattr(connection, 'schema_name')
if has_schema and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't delete tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
if has_schema and schema_exists(self.schema_name) and (self.auto_drop_schema or force_drop):
self.pre_drop()
cursor = connection.cursor()
cursor.execute('DROP SCHEMA %s CASCADE' % self.schema_name)
def pre_drop(self):
"""
This is a routine which you could override to backup the tenant schema before dropping.
:return:
"""
def delete(self, force_drop=False, *args, **kwargs):
"""
Deletes this row. Drops the tenant's schema if the attribute
auto_drop_schema set to True.
"""
self._drop_schema(force_drop)
super().delete(*args, **kwargs)
def create_schema(self, check_if_exists=False, sync_schema=True,
verbosity=1):
"""
Creates the schema 'schema_name' for this tenant. Optionally checks if
the schema already exists before creating it. Returns true if the
schema was created, false otherwise.
"""
# safety check
connection = connections[get_tenant_database_alias()]
_check_schema_name(self.schema_name)
cursor = connection.cursor()
if check_if_exists and schema_exists(self.schema_name):
return False
fake_migrations = get_creation_fakes_migrations()
if sync_schema:
if fake_migrations:
# copy tables and data from provided model schema
base_schema = get_tenant_base_schema()
clone_schema = CloneSchema()
clone_schema.clone_schema(base_schema, self.schema_name)
call_command('migrate_schemas',
tenant=True,
fake=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
else:
# create the schema
cursor.execute('CREATE SCHEMA %s' % self.schema_name)
call_command('migrate_schemas',
tenant=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
connection.set_schema_to_public()
def get_primary_domain(self):
"""
Returns the primary domain of the tenant
"""
try:
domain = self.domains.get(is_primary=True)
return domain
except get_tenant_domain_model().DoesNotExist:
return None
def reverse(self, request, view_name):
"""
Returns the URL of this tenant.
"""
http_type = 'https://' if request.is_secure() else 'http://'
domain = get_current_site(request).domain
url = ''.join((http_type, self.schema_name, '.', domain, reverse(view_name)))
return url
class DomainMixin(models.Model):
"""
All models that store the domains must inherit this class
"""
domain = models.CharField(max_length=253, unique=True, db_index=True)
tenant = models.ForeignKey(settings.TENANT_MODEL, db_index=True, related_name='domains',
on_delete=models.CASCADE)
# Set this to true if this is the primary domain
is_primary = models.BooleanField(default=True)
@transaction.atomic
def save(self, *args, **kwargs):
# Get all other primary domains with the same tenant
domain_list = self.__class__.objects.filter(tenant=self.tenant, is_primary=True).exclude(pk=self.pk)
# If we have no primary domain yet, set as primary domain by default
self.is_primary = self.is_primary or (not domain_list.exists())
if self.is_primary:
# Remove primary status of existing domains for tenant
domain_list.update(is_primary=False)
super().save(*args, **kwargs)
class Meta:
abstract = True
| <filename>django_tenants/models.py
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.management import call_command
from django.db import models, connections, transaction
from django.urls import reverse
from django_tenants.clone import CloneSchema
from .postgresql_backend.base import _check_schema_name
from .signals import post_schema_sync, schema_needs_to_be_sync
from .utils import get_creation_fakes_migrations, get_tenant_base_schema
from .utils import schema_exists, get_tenant_domain_model, get_public_schema_name, get_tenant_database_alias
class TenantMixin(models.Model):
"""
All tenant models must inherit this class.
"""
auto_drop_schema = False
"""
USE THIS WITH CAUTION!
Set this flag to true on a parent class if you want the schema to be
automatically deleted if the tenant row gets deleted.
"""
auto_create_schema = True
"""
Set this flag to false on a parent class if you don't want the schema
to be automatically created upon save.
"""
schema_name = models.CharField(max_length=63, unique=True,
validators=[_check_schema_name])
domain_url = None
"""
Leave this as None. Stores the current domain url so it can be used in the logs
"""
_previous_tenant = []
class Meta:
abstract = True
def __enter__(self):
"""
Syntax sugar which helps in celery tasks, cron jobs, and other scripts
Usage:
with Tenant.objects.get(schema_name='test') as tenant:
# run some code in tenant test
# run some code in previous tenant (public probably)
"""
connection = connections[get_tenant_database_alias()]
self._previous_tenant.append(connection.tenant)
self.activate()
def __exit__(self, exc_type, exc_val, exc_tb):
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self._previous_tenant.pop())
def activate(self):
"""
Syntax sugar that helps at django shell with fast tenant changing
Usage:
Tenant.objects.get(schema_name='test').activate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self)
@classmethod
def deactivate(cls):
"""
Syntax sugar, return to public schema
Usage:
test_tenant.deactivate()
# or simpler
Tenant.deactivate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_schema_to_public()
def save(self, verbosity=1, *args, **kwargs):
connection = connections[get_tenant_database_alias()]
is_new = self.pk is None
has_schema = hasattr(connection, 'schema_name')
if has_schema and is_new and connection.schema_name != get_public_schema_name():
raise Exception("Can't create tenant outside the public schema. "
"Current schema is %s." % connection.schema_name)
elif has_schema and not is_new and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't update tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
super().save(*args, **kwargs)
if has_schema and is_new and self.auto_create_schema:
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the tenant, delete what we created and
# re-raise the exception
self.delete(force_drop=True)
raise
elif is_new:
# although we are not using the schema functions directly, the signal might be registered by a listener
schema_needs_to_be_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
elif not is_new and self.auto_create_schema and not schema_exists(self.schema_name):
# Create schemas for existing models, deleting only the schema on failure
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the schema, delete what we created and
# re-raise the exception
self._drop_schema()
raise
def serializable_fields(self):
""" in certain cases the user model isn't serializable so you may want to only send the id """
return self
def _drop_schema(self, force_drop=False):
""" Drops the schema"""
connection = connections[get_tenant_database_alias()]
has_schema = hasattr(connection, 'schema_name')
if has_schema and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't delete tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
if has_schema and schema_exists(self.schema_name) and (self.auto_drop_schema or force_drop):
self.pre_drop()
cursor = connection.cursor()
cursor.execute('DROP SCHEMA %s CASCADE' % self.schema_name)
def pre_drop(self):
"""
This is a routine which you could override to backup the tenant schema before dropping.
:return:
"""
def delete(self, force_drop=False, *args, **kwargs):
"""
Deletes this row. Drops the tenant's schema if the attribute
auto_drop_schema set to True.
"""
self._drop_schema(force_drop)
super().delete(*args, **kwargs)
def create_schema(self, check_if_exists=False, sync_schema=True,
verbosity=1):
"""
Creates the schema 'schema_name' for this tenant. Optionally checks if
the schema already exists before creating it. Returns true if the
schema was created, false otherwise.
"""
# safety check
connection = connections[get_tenant_database_alias()]
_check_schema_name(self.schema_name)
cursor = connection.cursor()
if check_if_exists and schema_exists(self.schema_name):
return False
fake_migrations = get_creation_fakes_migrations()
if sync_schema:
if fake_migrations:
# copy tables and data from provided model schema
base_schema = get_tenant_base_schema()
clone_schema = CloneSchema()
clone_schema.clone_schema(base_schema, self.schema_name)
call_command('migrate_schemas',
tenant=True,
fake=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
else:
# create the schema
cursor.execute('CREATE SCHEMA %s' % self.schema_name)
call_command('migrate_schemas',
tenant=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
connection.set_schema_to_public()
def get_primary_domain(self):
"""
Returns the primary domain of the tenant
"""
try:
domain = self.domains.get(is_primary=True)
return domain
except get_tenant_domain_model().DoesNotExist:
return None
def reverse(self, request, view_name):
"""
Returns the URL of this tenant.
"""
http_type = 'https://' if request.is_secure() else 'http://'
domain = get_current_site(request).domain
url = ''.join((http_type, self.schema_name, '.', domain, reverse(view_name)))
return url
class DomainMixin(models.Model):
"""
All models that store the domains must inherit this class
"""
domain = models.CharField(max_length=253, unique=True, db_index=True)
tenant = models.ForeignKey(settings.TENANT_MODEL, db_index=True, related_name='domains',
on_delete=models.CASCADE)
# Set this to true if this is the primary domain
is_primary = models.BooleanField(default=True)
@transaction.atomic
def save(self, *args, **kwargs):
# Get all other primary domains with the same tenant
domain_list = self.__class__.objects.filter(tenant=self.tenant, is_primary=True).exclude(pk=self.pk)
# If we have no primary domain yet, set as primary domain by default
self.is_primary = self.is_primary or (not domain_list.exists())
if self.is_primary:
# Remove primary status of existing domains for tenant
domain_list.update(is_primary=False)
super().save(*args, **kwargs)
class Meta:
abstract = True
| en | 0.818159 | All tenant models must inherit this class. USE THIS WITH CAUTION! Set this flag to true on a parent class if you want the schema to be automatically deleted if the tenant row gets deleted. Set this flag to false on a parent class if you don't want the schema to be automatically created upon save. Leave this as None. Stores the current domain url so it can be used in the logs Syntax sugar which helps in celery tasks, cron jobs, and other scripts Usage: with Tenant.objects.get(schema_name='test') as tenant: # run some code in tenant test # run some code in previous tenant (public probably) Syntax sugar that helps at django shell with fast tenant changing Usage: Tenant.objects.get(schema_name='test').activate() Syntax sugar, return to public schema Usage: test_tenant.deactivate() # or simpler Tenant.deactivate() # We failed creating the tenant, delete what we created and # re-raise the exception # although we are not using the schema functions directly, the signal might be registered by a listener # Create schemas for existing models, deleting only the schema on failure # We failed creating the schema, delete what we created and # re-raise the exception in certain cases the user model isn't serializable so you may want to only send the id Drops the schema This is a routine which you could override to backup the tenant schema before dropping. :return: Deletes this row. Drops the tenant's schema if the attribute auto_drop_schema set to True. Creates the schema 'schema_name' for this tenant. Optionally checks if the schema already exists before creating it. Returns true if the schema was created, false otherwise. # safety check # copy tables and data from provided model schema # create the schema Returns the primary domain of the tenant Returns the URL of this tenant. All models that store the domains must inherit this class # Set this to true if this is the primary domain # Get all other primary domains with the same tenant # If we have no primary domain yet, set as primary domain by default # Remove primary status of existing domains for tenant | 2.050719 | 2 |
sdpp_seller/seller_websockets.py | AdnanMuhib/DDM | 0 | 6625548 | <filename>sdpp_seller/seller_websockets.py<gh_stars>0
# Copyright (c) 2018, Autonomous Networks Research Group. All rights reserved.
# Read license file in main directory for more details
#!/usr/bin/env python
import asyncio
import json
import random
import iota
import websockets
# Connect to the tangle
seed = ""
client = "http://node02.iotatoken.nl:14265"
iota_api = iota.Iota(client, seed)
# TODO receive it from the buyer
payment_address = iota.Address(
'RFQASBVGDTTPDEYVSPIWHG9YUMHAGHFDUZVVXEMDRNNMWJHQYBWHXWQ9JST9NZFBFMFPPFETFLE9RMUJCTNXFZJDGW')
def sendTransaction(transaction):
try:
bundle = iota_api.send_transfer(depth=2, transfers=[transaction])
url = "https://thetangle.org/bundle/" + str(bundle["bundle"].hash)
print("Invoice - " + url)
except iota.adapter.BadApiResponse as error:
print(error)
def prepareTransaction(message=None, value=0):
transaction = iota.ProposedTransaction(
address=payment_address,
value=value,
# TODO: put the actual value
message=iota.TryteString.from_string("Data Invoice"),
tag=iota.Tag(b"SDPPBUYER")
)
return sendTransaction(transaction)
def read_data_from_file(data):
data_type = data['type']
filepath = "actual_data/" + data_type + ".txt"
lines = []
with open(filepath) as f:
for i, line in enumerate(f):
if i >= data['quantity']:
break
lines.append(line.strip())
return lines
async def time(websocket, path):
print("Data Transfer starts!")
while True:
data = await websocket.recv()
data = read_data_from_file(json.loads(data))
print(data)
k = 3
counter = 1
for d in data:
if counter % k == 0:
prepareTransaction()
await websocket.send(d)
counter = counter + 1
print("Data Transfer completed!\n\n")
break
# await asyncio.sleep(random.random() * 3)
start_server = websockets.serve(time, '127.0.0.1', 5678)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
| <filename>sdpp_seller/seller_websockets.py<gh_stars>0
# Copyright (c) 2018, Autonomous Networks Research Group. All rights reserved.
# Read license file in main directory for more details
#!/usr/bin/env python
import asyncio
import json
import random
import iota
import websockets
# Connect to the tangle
seed = ""
client = "http://node02.iotatoken.nl:14265"
iota_api = iota.Iota(client, seed)
# TODO receive it from the buyer
payment_address = iota.Address(
'RFQASBVGDTTPDEYVSPIWHG9YUMHAGHFDUZVVXEMDRNNMWJHQYBWHXWQ9JST9NZFBFMFPPFETFLE9RMUJCTNXFZJDGW')
def sendTransaction(transaction):
try:
bundle = iota_api.send_transfer(depth=2, transfers=[transaction])
url = "https://thetangle.org/bundle/" + str(bundle["bundle"].hash)
print("Invoice - " + url)
except iota.adapter.BadApiResponse as error:
print(error)
def prepareTransaction(message=None, value=0):
transaction = iota.ProposedTransaction(
address=payment_address,
value=value,
# TODO: put the actual value
message=iota.TryteString.from_string("Data Invoice"),
tag=iota.Tag(b"SDPPBUYER")
)
return sendTransaction(transaction)
def read_data_from_file(data):
data_type = data['type']
filepath = "actual_data/" + data_type + ".txt"
lines = []
with open(filepath) as f:
for i, line in enumerate(f):
if i >= data['quantity']:
break
lines.append(line.strip())
return lines
async def time(websocket, path):
print("Data Transfer starts!")
while True:
data = await websocket.recv()
data = read_data_from_file(json.loads(data))
print(data)
k = 3
counter = 1
for d in data:
if counter % k == 0:
prepareTransaction()
await websocket.send(d)
counter = counter + 1
print("Data Transfer completed!\n\n")
break
# await asyncio.sleep(random.random() * 3)
start_server = websockets.serve(time, '127.0.0.1', 5678)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
| en | 0.71355 | # Copyright (c) 2018, Autonomous Networks Research Group. All rights reserved. # Read license file in main directory for more details #!/usr/bin/env python # Connect to the tangle # TODO receive it from the buyer # TODO: put the actual value # await asyncio.sleep(random.random() * 3) | 2.46214 | 2 |
music player.py | vijayeshmt/Musicplayer | 1 | 6625549 | <reponame>vijayeshmt/Musicplayer
from pygame import mixer
mixer.init()
l = ['Nadiyonpaar.mp3','chandh.mp3']
m = int(input("Choose song\n1.Nadiyoonpaar\n2.Chandh"))
s = l[m-1]
mixer.music.load(s)
mixer.music.set_volume(0.7)
mixer.music.play()
while True:
print("Press 'p' to pause, 'r' to resume")
print("Press 'e' to exit the program")
inst = input(" ")
if inst == 'p':
# Pausing the music
mixer.music.pause()
elif inst == 'r':
# Resuming the music
mixer.music.unpause()
elif inst == 'e':
# Stop the mixer
mixer.music.stop()
break
| from pygame import mixer
mixer.init()
l = ['Nadiyonpaar.mp3','chandh.mp3']
m = int(input("Choose song\n1.Nadiyoonpaar\n2.Chandh"))
s = l[m-1]
mixer.music.load(s)
mixer.music.set_volume(0.7)
mixer.music.play()
while True:
print("Press 'p' to pause, 'r' to resume")
print("Press 'e' to exit the program")
inst = input(" ")
if inst == 'p':
# Pausing the music
mixer.music.pause()
elif inst == 'r':
# Resuming the music
mixer.music.unpause()
elif inst == 'e':
# Stop the mixer
mixer.music.stop()
break | en | 0.718533 | # Pausing the music # Resuming the music # Stop the mixer | 3.123015 | 3 |
test/test_convert.py | NextSecurity/sast-scanner-modified | 1 | 6625550 | import lib.convert as convertLib
import lib.issue as issueLib
import importlib
import json
import os
import tempfile
import uuid
def test_nodejsscan_convert_empty():
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=True) as cfile:
data = convertLib.report("nodejsscan", [], ".", {}, {}, [], cfile.name)
jsondata = json.loads(data)
assert (
jsondata["runs"][0]["tool"]["driver"]["name"]
== "Static security code scan by NodeJsScan"
)
assert (
jsondata["runs"][0]["automationDetails"]["description"]["text"]
== "Static Analysis Security Test results using @AppThreat/sast-scan"
)
assert uuid.UUID(jsondata["inlineExternalProperties"][0]["guid"]).version == 4
assert not jsondata["runs"][0]["results"]
assert jsondata["runs"][0]["properties"]["metrics"] == {
"total": 0,
"critical": 0,
"high": 0,
"low": 0,
"medium": 0,
}
def test_nodejsscan_convert_issue():
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=True) as cfile:
data = convertLib.report(
"nodejsscan",
[],
".",
{},
{},
[
{
"description": "MD5 is a a weak hash which is known to have collision. Use a strong hashing function.",
"filename": "InsufficientPasswordHash.js",
"line": 3,
"lines": 'function hashPassword(password) {\n var crypto = require("crypto");\n var hasher = crypto.createHash(\'md5\');\n var hashed = hasher.update(password).digest("hex"); // BAD\n return hashed;\n}',
"path": "/github/workspace/CWE-916/examples/InsufficientPasswordHash.js",
"sha2": "bfc3a2dfec54a8e77e41c3e3d7a6d87477ea1ed6d1cb3b1b60b8e135b0d18368",
"tag": "node",
"title": "Weak Hash used - MD5",
}
],
cfile.name,
)
jsondata = json.loads(data)
assert (
jsondata["runs"][0]["tool"]["driver"]["name"]
== "Static security code scan by NodeJsScan"
)
assert (
jsondata["runs"][0]["results"][0]["message"]["text"]
== "MD5 is a a weak hash which is known to have collision. Use a strong hashing function."
)
def test_nodejsscan_convert_metrics():
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=True) as cfile:
data = convertLib.report(
"nodejsscan",
[],
".",
{
"total_count": {"good": 0, "mis": 8, "sec": 4},
"vuln_count": {
"Loading of untrusted YAML can cause Remote Code Injection": 1,
"Weak Hash used - MD5": 1,
"XSS - Reflected Cross Site Scripting": 2,
},
},
{},
[],
cfile.name,
)
jsondata = json.loads(data)
assert (
jsondata["runs"][0]["tool"]["driver"]["name"]
== "Static security code scan by NodeJsScan"
)
assert jsondata["runs"][0]["properties"]["metrics"]
def test_create_result():
issue = issueLib.issue_from_dict(
{
"description": "MD5 is a a weak hash which is known to have collision. Use a strong hashing function.",
"filename": "InsufficientPasswordHash.js",
"line": 3,
"lines": 'function hashPassword(password) {\n var crypto = require("crypto");\n var hasher = crypto.createHash(\'md5\');\n var hashed = hasher.update(password).digest("hex"); // BAD\n return hashed;\n}',
"path": "/app/src/CWE-916/examples/InsufficientPasswordHash.js",
"sha2": "bfc3a2dfec54a8e77e41c3e3d7a6d87477ea1ed6d1cb3b1b60b8e135b0d18368",
"tag": "node",
"title": "Weak Hash used - MD5",
}
)
data = convertLib.create_result("nodetest", issue, {}, {}, None, "/app/src")
assert (
data.locations[0].physical_location.artifact_location.uri
== "file:///app/src/CWE-916/examples/InsufficientPasswordHash.js"
)
# Override the workspace and check the location
os.environ["WORKSPACE"] = "/foo/bar"
importlib.reload(convertLib)
data = convertLib.create_result("nodetest", issue, {}, {}, None, "/app/src")
assert (
data.locations[0].physical_location.artifact_location.uri
== "file:///foo/bar/CWE-916/examples/InsufficientPasswordHash.js"
)
# Override the workspace and check the location
os.environ["WORKSPACE"] = "https://github.com/appthreat/cdxgen/blob/master"
importlib.reload(convertLib)
data = convertLib.create_result("nodetest", issue, {}, {}, None, "/app/src")
assert (
data.locations[0].physical_location.artifact_location.uri
== "https://github.com/appthreat/cdxgen/blob/master/CWE-916/examples/InsufficientPasswordHash.js"
)
def test_create_result_relative():
os.environ["WORKSPACE"] = ""
importlib.reload(convertLib)
issue = issueLib.issue_from_dict(
{
"line": "VERY_REDACTED ",
"offender": "REDACTED",
"commit": "06fd7b1f844f88fb7821df498ce6d209cb9ad875",
"repo": "app",
"rule": "Generic Credential",
"commitMessage": "Add secret\n",
"author": "<NAME>",
"email": "<EMAIL>",
"file": "src/main/README-new.md",
"date": "2020-01-12T19:45:43Z",
"tags": "key, API, generic",
}
)
data = convertLib.create_result("gitleaks", issue, {}, {}, None, "/app")
assert (
data.locations[0].physical_location.artifact_location.uri
== "file:///app/src/main/README-new.md"
)
def test_credscan_convert_issue():
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=True) as cfile:
data = convertLib.report(
"credscan",
[],
".",
{},
{},
[
{
"line": "VERY_SECRET_TOO = 'f6CGV4aMM9zedoh3OUNbSakBymo7yplB' ",
"offender": "SECRET_TOO = 'f6CGV4aMM9zedoh3OUNbSakBymo7yplB'",
"commit": "f5cf9d795d00ac5540f3ba26a1d98d9bc9c4bbbc",
"repo": "app",
"rule": "Generic Credential",
"commitMessage": "password\n",
"author": "<NAME>",
"email": "<EMAIL>",
"file": "README.md",
"date": "2020-01-02T21:02:40Z",
"tags": "key, API, generic",
}
],
cfile.name,
)
jsondata = json.loads(data)
assert jsondata["runs"][0]["tool"]["driver"]["name"] == "credscan"
assert jsondata["runs"][0]["results"][0]["message"]["text"]
assert jsondata["runs"][0]["properties"]["metrics"] == {
"high": 1,
"total": 1,
"critical": 0,
"medium": 0,
"low": 0,
}
def test_gosec_convert_issue():
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=True) as cfile:
data = convertLib.report(
"gosec",
[],
".",
{},
{},
[
{
"severity": "MEDIUM",
"confidence": "HIGH",
"rule_id": "G104",
"details": "Errors unhandled.",
"file": "/app/lib/plugins/capture/capture.go",
"code": "io.Copy(reqbody, cwc.r.Request.Body)",
"line": "57",
}
],
cfile.name,
)
jsondata = json.loads(data)
assert (
jsondata["runs"][0]["tool"]["driver"]["name"]
== "Golang security checks by gosec"
)
assert jsondata["runs"][0]["results"][0]["message"]["text"]
assert jsondata["runs"][0]["properties"]["metrics"] == {
"medium": 1,
"total": 1,
"critical": 0,
"high": 0,
"low": 0,
}
def test_tfsec_convert_issue():
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=True) as cfile:
data = convertLib.report(
"tfsec",
[],
".",
{},
{},
[
{
"rule_id": "AWS018",
"link": "https://github.com/liamg/tfsec/wiki/AWS018",
"location": {
"filename": "/app/main.tf",
"start_line": 1,
"end_line": 4,
},
"description": "Resource 'aws_security_group_rule.my-rule' should include a description for auditing purposes.",
"severity": "ERROR",
}
],
cfile.name,
)
jsondata = json.loads(data)
assert (
jsondata["runs"][0]["tool"]["driver"]["name"]
== "Terraform static analysis by tfsec"
)
assert (
jsondata["runs"][0]["results"][0]["message"]["text"]
== "Resource 'aws_security_group_rule.my-rule' should include a description for auditing purposes."
)
assert jsondata["runs"][0]["properties"]["metrics"] == {
"critical": 1,
"total": 1,
"high": 0,
"medium": 0,
"low": 0,
}
def test_staticcheck_convert_issue():
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=True) as cfile:
data = convertLib.report(
"staticcheck",
[],
".",
{},
{},
[
{
"code": "ST1005",
"severity": "error",
"location": {
"file": "/Users/prabhu/go/kube-score/cmd/kube-score/main.go",
"line": 156,
"column": 10,
},
"end": {
"file": "/Users/prabhu/go/kube-score/cmd/kube-score/main.go",
"line": 156,
"column": 86,
},
"message": "error strings should not be capitalized",
}
],
cfile.name,
)
jsondata = json.loads(data)
assert jsondata["runs"][0]["tool"]["driver"]["name"] == "Go static analysis"
assert (
jsondata["runs"][0]["results"][0]["message"]["text"]
== "error strings should not be capitalized."
)
assert jsondata["runs"][0]["properties"]["metrics"] == {
"critical": 0,
"total": 1,
"high": 0,
"medium": 1,
"low": 0,
}
| import lib.convert as convertLib
import lib.issue as issueLib
import importlib
import json
import os
import tempfile
import uuid
def test_nodejsscan_convert_empty():
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=True) as cfile:
data = convertLib.report("nodejsscan", [], ".", {}, {}, [], cfile.name)
jsondata = json.loads(data)
assert (
jsondata["runs"][0]["tool"]["driver"]["name"]
== "Static security code scan by NodeJsScan"
)
assert (
jsondata["runs"][0]["automationDetails"]["description"]["text"]
== "Static Analysis Security Test results using @AppThreat/sast-scan"
)
assert uuid.UUID(jsondata["inlineExternalProperties"][0]["guid"]).version == 4
assert not jsondata["runs"][0]["results"]
assert jsondata["runs"][0]["properties"]["metrics"] == {
"total": 0,
"critical": 0,
"high": 0,
"low": 0,
"medium": 0,
}
def test_nodejsscan_convert_issue():
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=True) as cfile:
data = convertLib.report(
"nodejsscan",
[],
".",
{},
{},
[
{
"description": "MD5 is a a weak hash which is known to have collision. Use a strong hashing function.",
"filename": "InsufficientPasswordHash.js",
"line": 3,
"lines": 'function hashPassword(password) {\n var crypto = require("crypto");\n var hasher = crypto.createHash(\'md5\');\n var hashed = hasher.update(password).digest("hex"); // BAD\n return hashed;\n}',
"path": "/github/workspace/CWE-916/examples/InsufficientPasswordHash.js",
"sha2": "bfc3a2dfec54a8e77e41c3e3d7a6d87477ea1ed6d1cb3b1b60b8e135b0d18368",
"tag": "node",
"title": "Weak Hash used - MD5",
}
],
cfile.name,
)
jsondata = json.loads(data)
assert (
jsondata["runs"][0]["tool"]["driver"]["name"]
== "Static security code scan by NodeJsScan"
)
assert (
jsondata["runs"][0]["results"][0]["message"]["text"]
== "MD5 is a a weak hash which is known to have collision. Use a strong hashing function."
)
def test_nodejsscan_convert_metrics():
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=True) as cfile:
data = convertLib.report(
"nodejsscan",
[],
".",
{
"total_count": {"good": 0, "mis": 8, "sec": 4},
"vuln_count": {
"Loading of untrusted YAML can cause Remote Code Injection": 1,
"Weak Hash used - MD5": 1,
"XSS - Reflected Cross Site Scripting": 2,
},
},
{},
[],
cfile.name,
)
jsondata = json.loads(data)
assert (
jsondata["runs"][0]["tool"]["driver"]["name"]
== "Static security code scan by NodeJsScan"
)
assert jsondata["runs"][0]["properties"]["metrics"]
def test_create_result():
issue = issueLib.issue_from_dict(
{
"description": "MD5 is a a weak hash which is known to have collision. Use a strong hashing function.",
"filename": "InsufficientPasswordHash.js",
"line": 3,
"lines": 'function hashPassword(password) {\n var crypto = require("crypto");\n var hasher = crypto.createHash(\'md5\');\n var hashed = hasher.update(password).digest("hex"); // BAD\n return hashed;\n}',
"path": "/app/src/CWE-916/examples/InsufficientPasswordHash.js",
"sha2": "bfc3a2dfec54a8e77e41c3e3d7a6d87477ea1ed6d1cb3b1b60b8e135b0d18368",
"tag": "node",
"title": "Weak Hash used - MD5",
}
)
data = convertLib.create_result("nodetest", issue, {}, {}, None, "/app/src")
assert (
data.locations[0].physical_location.artifact_location.uri
== "file:///app/src/CWE-916/examples/InsufficientPasswordHash.js"
)
# Override the workspace and check the location
os.environ["WORKSPACE"] = "/foo/bar"
importlib.reload(convertLib)
data = convertLib.create_result("nodetest", issue, {}, {}, None, "/app/src")
assert (
data.locations[0].physical_location.artifact_location.uri
== "file:///foo/bar/CWE-916/examples/InsufficientPasswordHash.js"
)
# Override the workspace and check the location
os.environ["WORKSPACE"] = "https://github.com/appthreat/cdxgen/blob/master"
importlib.reload(convertLib)
data = convertLib.create_result("nodetest", issue, {}, {}, None, "/app/src")
assert (
data.locations[0].physical_location.artifact_location.uri
== "https://github.com/appthreat/cdxgen/blob/master/CWE-916/examples/InsufficientPasswordHash.js"
)
def test_create_result_relative():
os.environ["WORKSPACE"] = ""
importlib.reload(convertLib)
issue = issueLib.issue_from_dict(
{
"line": "VERY_REDACTED ",
"offender": "REDACTED",
"commit": "06fd7b1f844f88fb7821df498ce6d209cb9ad875",
"repo": "app",
"rule": "Generic Credential",
"commitMessage": "Add secret\n",
"author": "<NAME>",
"email": "<EMAIL>",
"file": "src/main/README-new.md",
"date": "2020-01-12T19:45:43Z",
"tags": "key, API, generic",
}
)
data = convertLib.create_result("gitleaks", issue, {}, {}, None, "/app")
assert (
data.locations[0].physical_location.artifact_location.uri
== "file:///app/src/main/README-new.md"
)
def test_credscan_convert_issue():
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=True) as cfile:
data = convertLib.report(
"credscan",
[],
".",
{},
{},
[
{
"line": "VERY_SECRET_TOO = 'f6CGV4aMM9zedoh3OUNbSakBymo7yplB' ",
"offender": "SECRET_TOO = 'f6CGV4aMM9zedoh3OUNbSakBymo7yplB'",
"commit": "f5cf9d795d00ac5540f3ba26a1d98d9bc9c4bbbc",
"repo": "app",
"rule": "Generic Credential",
"commitMessage": "password\n",
"author": "<NAME>",
"email": "<EMAIL>",
"file": "README.md",
"date": "2020-01-02T21:02:40Z",
"tags": "key, API, generic",
}
],
cfile.name,
)
jsondata = json.loads(data)
assert jsondata["runs"][0]["tool"]["driver"]["name"] == "credscan"
assert jsondata["runs"][0]["results"][0]["message"]["text"]
assert jsondata["runs"][0]["properties"]["metrics"] == {
"high": 1,
"total": 1,
"critical": 0,
"medium": 0,
"low": 0,
}
def test_gosec_convert_issue():
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=True) as cfile:
data = convertLib.report(
"gosec",
[],
".",
{},
{},
[
{
"severity": "MEDIUM",
"confidence": "HIGH",
"rule_id": "G104",
"details": "Errors unhandled.",
"file": "/app/lib/plugins/capture/capture.go",
"code": "io.Copy(reqbody, cwc.r.Request.Body)",
"line": "57",
}
],
cfile.name,
)
jsondata = json.loads(data)
assert (
jsondata["runs"][0]["tool"]["driver"]["name"]
== "Golang security checks by gosec"
)
assert jsondata["runs"][0]["results"][0]["message"]["text"]
assert jsondata["runs"][0]["properties"]["metrics"] == {
"medium": 1,
"total": 1,
"critical": 0,
"high": 0,
"low": 0,
}
def test_tfsec_convert_issue():
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=True) as cfile:
data = convertLib.report(
"tfsec",
[],
".",
{},
{},
[
{
"rule_id": "AWS018",
"link": "https://github.com/liamg/tfsec/wiki/AWS018",
"location": {
"filename": "/app/main.tf",
"start_line": 1,
"end_line": 4,
},
"description": "Resource 'aws_security_group_rule.my-rule' should include a description for auditing purposes.",
"severity": "ERROR",
}
],
cfile.name,
)
jsondata = json.loads(data)
assert (
jsondata["runs"][0]["tool"]["driver"]["name"]
== "Terraform static analysis by tfsec"
)
assert (
jsondata["runs"][0]["results"][0]["message"]["text"]
== "Resource 'aws_security_group_rule.my-rule' should include a description for auditing purposes."
)
assert jsondata["runs"][0]["properties"]["metrics"] == {
"critical": 1,
"total": 1,
"high": 0,
"medium": 0,
"low": 0,
}
def test_staticcheck_convert_issue():
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", delete=True) as cfile:
data = convertLib.report(
"staticcheck",
[],
".",
{},
{},
[
{
"code": "ST1005",
"severity": "error",
"location": {
"file": "/Users/prabhu/go/kube-score/cmd/kube-score/main.go",
"line": 156,
"column": 10,
},
"end": {
"file": "/Users/prabhu/go/kube-score/cmd/kube-score/main.go",
"line": 156,
"column": 86,
},
"message": "error strings should not be capitalized",
}
],
cfile.name,
)
jsondata = json.loads(data)
assert jsondata["runs"][0]["tool"]["driver"]["name"] == "Go static analysis"
assert (
jsondata["runs"][0]["results"][0]["message"]["text"]
== "error strings should not be capitalized."
)
assert jsondata["runs"][0]["properties"]["metrics"] == {
"critical": 0,
"total": 1,
"high": 0,
"medium": 1,
"low": 0,
}
| en | 0.782037 | # Override the workspace and check the location # Override the workspace and check the location | 2.524684 | 3 |