hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7295e891b8f82eac3622be486b13ebd9a4de231 | 582 | py | Python | math/increment_num.py | ethyl2/code_challenges | 3c9ccca1782f92728e60a515a7ca797f6d470e81 | [
"MIT"
] | null | null | null | math/increment_num.py | ethyl2/code_challenges | 3c9ccca1782f92728e60a515a7ca797f6d470e81 | [
"MIT"
] | null | null | null | math/increment_num.py | ethyl2/code_challenges | 3c9ccca1782f92728e60a515a7ca797f6d470e81 | [
"MIT"
] | null | null | null | """
https://leetcode.com/problems/plus-one/
Given a non-empty arr of digits representing a non-neg int, increment that int by 1.
Most significant digit is at the head of the list.
Each el contains a single digit.
The int doesn't contain leading zeros, except for the int 0 itself.
examples:
[1,2,3] -> [1,2,4]
[4,3,2,1] -> [4,3,2,2]
"""
from typing import List
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
digits[-1] += 1
return int(''.join(map(str, digits)))
s = Solution()
print(s.plusOne([1, 2, 3]))
print(s.plusOne([4, 3, 2, 1]))
| 23.28 | 84 | 0.652921 | from typing import List
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
digits[-1] += 1
return int(''.join(map(str, digits)))
s = Solution()
print(s.plusOne([1, 2, 3]))
print(s.plusOne([4, 3, 2, 1]))
| true | true |
f729608a38e760963ece4719b36116f0a7dd4182 | 511 | py | Python | py/signal.py | jzhou/ai | a5efbfb5e93e404129c974491705c24e9bc49c9d | [
"MIT"
] | null | null | null | py/signal.py | jzhou/ai | a5efbfb5e93e404129c974491705c24e9bc49c9d | [
"MIT"
] | null | null | null | py/signal.py | jzhou/ai | a5efbfb5e93e404129c974491705c24e9bc49c9d | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import signal, os
import time
def handler(signum, frame):
print ("Signal handler called with signal: " +str(signum))
print ("complete operations needed when alarm received")
def main():
signal.signal(signal.SIGALRM, handler)
print ("set alarm signal")
signal.alarm(3)
print ("before alarm")
time.sleep(5)
signal.alarm(0) # Disable the alarm
print ("after alarm, alarm disabled")
if __name__=="__main__":
main()
| 22.217391 | 62 | 0.643836 |
import signal, os
import time
def handler(signum, frame):
print ("Signal handler called with signal: " +str(signum))
print ("complete operations needed when alarm received")
def main():
signal.signal(signal.SIGALRM, handler)
print ("set alarm signal")
signal.alarm(3)
print ("before alarm")
time.sleep(5)
signal.alarm(0)
print ("after alarm, alarm disabled")
if __name__=="__main__":
main()
| true | true |
f72960a610aeb01762a3fbc0fba450b38edd8a76 | 489 | py | Python | pyhammer/tasks/svn/tortoisesvncommittask.py | webbers/pyhammer | 84efafed65ab05c071a55944b91343b9fd1ef58e | [
"MIT"
] | 2 | 2015-07-06T15:57:33.000Z | 2016-09-10T11:46:24.000Z | pyhammer/tasks/svn/tortoisesvncommittask.py | webbers/pyhammer | 84efafed65ab05c071a55944b91343b9fd1ef58e | [
"MIT"
] | null | null | null | pyhammer/tasks/svn/tortoisesvncommittask.py | webbers/pyhammer | 84efafed65ab05c071a55944b91343b9fd1ef58e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import subprocess
from pyhammer.tasks.taskbase import TaskBase
class TortoiseSvnCommitTask( TaskBase ):
def __init__( self, path ):
super(TortoiseSvnCommitTask, self).__init__()
self.__path = path
def RunCmd( self, CmdLine, CmdDir = None ):
p = subprocess.Popen( CmdLine, cwd = CmdDir )
return p.wait()
def build( self ):
self.RunCmd( "TortoiseProc /command:commit /path:" + self.__path )
return True | 28.764706 | 74 | 0.642127 |
import subprocess
from pyhammer.tasks.taskbase import TaskBase
class TortoiseSvnCommitTask( TaskBase ):
def __init__( self, path ):
super(TortoiseSvnCommitTask, self).__init__()
self.__path = path
def RunCmd( self, CmdLine, CmdDir = None ):
p = subprocess.Popen( CmdLine, cwd = CmdDir )
return p.wait()
def build( self ):
self.RunCmd( "TortoiseProc /command:commit /path:" + self.__path )
return True | true | true |
f72960c7b2824b79159bedb69ae89d8779ae572d | 454 | py | Python | app/utils/data_imports.py | thiagomurtinho/Iris_Classification | 8b04fed7f7162c3a6bd276c0dbfb2c291c02492c | [
"MIT"
] | null | null | null | app/utils/data_imports.py | thiagomurtinho/Iris_Classification | 8b04fed7f7162c3a6bd276c0dbfb2c291c02492c | [
"MIT"
] | null | null | null | app/utils/data_imports.py | thiagomurtinho/Iris_Classification | 8b04fed7f7162c3a6bd276c0dbfb2c291c02492c | [
"MIT"
] | null | null | null | def extraction_colunms_value(DataFrame, DataCompare, ColumName):
data = []
index = DataFrame.Species.str.contains(DataCompare)
if(ColumName == 'SepalLengthCm'):
data = DataFrame[index].SepalLengthCm
if(ColumName == 'SepalWidthCm'):
data = DataFrame[index].SepalWidthCm
if(ColumName == 'PetalWidthCm'):
data = DataFrame[index].PetalWidthCm
if(ColumName == 'PetalLengthCm'):
data = DataFrame[index].PetalLengthCm
return data | 32.428571 | 64 | 0.72467 | def extraction_colunms_value(DataFrame, DataCompare, ColumName):
data = []
index = DataFrame.Species.str.contains(DataCompare)
if(ColumName == 'SepalLengthCm'):
data = DataFrame[index].SepalLengthCm
if(ColumName == 'SepalWidthCm'):
data = DataFrame[index].SepalWidthCm
if(ColumName == 'PetalWidthCm'):
data = DataFrame[index].PetalWidthCm
if(ColumName == 'PetalLengthCm'):
data = DataFrame[index].PetalLengthCm
return data | true | true |
f72961a71ee62380a011d47d8b613f6a186272ad | 3,766 | py | Python | modules/fluid/fluid_variationalform.py | marchirschvogel/ambit | 9c21852d2c7c562b7accdd34025fc6b829eb1d3e | [
"BSD-4-Clause"
] | 3 | 2021-03-22T14:17:09.000Z | 2021-05-03T15:24:09.000Z | modules/fluid/fluid_variationalform.py | marchirschvogel/ambit | 9c21852d2c7c562b7accdd34025fc6b829eb1d3e | [
"BSD-4-Clause"
] | null | null | null | modules/fluid/fluid_variationalform.py | marchirschvogel/ambit | 9c21852d2c7c562b7accdd34025fc6b829eb1d3e | [
"BSD-4-Clause"
] | 2 | 2021-03-29T10:52:09.000Z | 2021-11-26T15:56:38.000Z | #!/usr/bin/env python3
# Copyright (c) 2019-2022, Dr.-Ing. Marc Hirschvogel
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import ufl
# fluid mechanics variational forms class
# Principle of Virtual Power
# TeX: \delta \mathcal{P} = \delta \mathcal{P}_{\mathrm{kin}} + \delta \mathcal{P}_{\mathrm{int}} - \delta \mathcal{P}_{\mathrm{ext}} = 0, \quad \forall \; \delta\boldsymbol{v}
class variationalform:
def __init__(self, var_v, dv, var_p, dp, n=None):
self.var_v = var_v
self.var_p = var_p
self.dv = dv
self.dp = dp
self.n = n
### Kinetic virtual power
# TeX: \delta \mathcal{P}_{\mathrm{kin}} := \int\limits_{\Omega} \rho \left(\frac{\partial\boldsymbol{v}}{\partial t} + (\boldsymbol{\nabla}\otimes\boldsymbol{v})^{\mathrm{T}}\boldsymbol{v}\right) \cdot \delta\boldsymbol{v} \,\mathrm{d}v
def deltaP_kin(self, a, v, rho, ddomain, v_old=None):
if v_old is None:
return rho*ufl.dot(a + ufl.grad(v) * v, self.var_v)*ddomain
else:
return rho*ufl.dot(a + ufl.grad(v) * v_old, self.var_v)*ddomain
### Internal virtual power
# TeX: \delta \mathcal{P}_{\mathrm{int}} := \int\limits_{\Omega} \boldsymbol{\sigma} : \delta\boldsymbol{\gamma} \,\mathrm{d}v
def deltaP_int(self, sig, ddomain):
# TeX: \int\limits_{\Omega}\boldsymbol{\sigma} : \delta \boldsymbol{\gamma}\,\mathrm{d}v
var_gamma = 0.5*(ufl.grad(self.var_v).T + ufl.grad(self.var_v))
return ufl.inner(sig, var_gamma)*ddomain
def deltaP_int_pres(self, v, ddomain):
# TeX: \int\limits_{\Omega}\mathrm{div}\boldsymbol{v}\,\delta p\,\mathrm{d}v
return ufl.div(v)*self.var_p*ddomain
def residual_v_strong(self, a, v, rho, sig):
return rho*(a + ufl.grad(v) * v) - ufl.div(sig)
def residual_p_strong(self, v):
return ufl.div(v)
def f_inert(self, a, v, rho):
return rho*(a + ufl.grad(v) * v)
def f_viscous(self, sig):
return ufl.div(dev(sig))
### External virtual power
# Neumann load (Cauchy traction)
# TeX: \int\limits_{\Gamma} \hat{\boldsymbol{t}} \cdot \delta\boldsymbol{v} \,\mathrm{d}a
def deltaP_ext_neumann(self, func, dboundary):
return ufl.dot(func, self.var_v)*dboundary
# Neumann load in normal direction (Cauchy traction)
# TeX: \int\limits_{\Gamma} p\,\boldsymbol{n}\cdot\delta\boldsymbol{v}\;\mathrm{d}a
def deltaP_ext_neumann_normal(self, func, dboundary):
return func*ufl.dot(self.n, self.var_v)*dboundary
# Robin condition (dashpot)
# TeX: \int\limits_{\Gamma} c\,\boldsymbol{v}\cdot\delta\boldsymbol{v}\;\mathrm{d}a
def deltaP_ext_robin_dashpot(self, v, c, dboundary):
return -c*(ufl.dot(v, self.var_v)*dboundary)
# Robin condition (dashpot) in normal direction
# TeX: \int\limits_{\Gamma} (\boldsymbol{n}\otimes \boldsymbol{n})\,c\,\boldsymbol{v}\cdot\delta\boldsymbol{v}\;\mathrm{d}a
def deltaP_ext_robin_dashpot_normal(self, v, c_n, dboundary):
return -c_n*(ufl.dot(v, self.n)*ufl.dot(self.n, self.var_v)*dboundary)
### Flux coupling conditions
# flux
# TeX: \int\limits_{\Gamma} \boldsymbol{n}\cdot\boldsymbol{v}\;\mathrm{d}a
def flux(self, v, dboundary):
return ufl.dot(self.n, v)*dboundary
# surface - derivative of pressure load w.r.t. pressure
# TeX: \int\limits_{\Gamma} \boldsymbol{n}\cdot\delta\boldsymbol{v}\;\mathrm{d}a
def surface(self, dboundary):
return ufl.dot(self.n, self.var_v)*dboundary
| 36.211538 | 241 | 0.624535 |
import ufl
class variationalform:
def __init__(self, var_v, dv, var_p, dp, n=None):
self.var_v = var_v
self.var_p = var_p
self.dv = dv
self.dp = dp
self.n = n
ddomain, v_old=None):
if v_old is None:
return rho*ufl.dot(a + ufl.grad(v) * v, self.var_v)*ddomain
else:
return rho*ufl.dot(a + ufl.grad(v) * v_old, self.var_v)*ddomain
var_gamma = 0.5*(ufl.grad(self.var_v).T + ufl.grad(self.var_v))
return ufl.inner(sig, var_gamma)*ddomain
def deltaP_int_pres(self, v, ddomain):
return ufl.div(v)*self.var_p*ddomain
def residual_v_strong(self, a, v, rho, sig):
return rho*(a + ufl.grad(v) * v) - ufl.div(sig)
def residual_p_strong(self, v):
return ufl.div(v)
def f_inert(self, a, v, rho):
return rho*(a + ufl.grad(v) * v)
def f_viscous(self, sig):
return ufl.div(dev(sig))
func, dboundary):
return ufl.dot(func, self.var_v)*dboundary
def deltaP_ext_neumann_normal(self, func, dboundary):
return func*ufl.dot(self.n, self.var_v)*dboundary
def deltaP_ext_robin_dashpot(self, v, c, dboundary):
return -c*(ufl.dot(v, self.var_v)*dboundary)
def deltaP_ext_robin_dashpot_normal(self, v, c_n, dboundary):
return -c_n*(ufl.dot(v, self.n)*ufl.dot(self.n, self.var_v)*dboundary)
return ufl.dot(self.n, v)*dboundary
def surface(self, dboundary):
return ufl.dot(self.n, self.var_v)*dboundary
| true | true |
f72961c755fe3f726acfd0ab5f4be711323edce6 | 51 | py | Python | paddlenlp/transformers/nezha/__init__.py | tanhanzhuo/PaddleNLP | d0d20678f2bec820570b4f09ca49cd402d20c3b6 | [
"Apache-2.0"
] | 7,091 | 2021-02-05T13:56:25.000Z | 2022-03-31T11:42:50.000Z | paddlenlp/transformers/nezha/__init__.py | xihuanafeng/PaddleNLP | 14c3209118b2cadcce9a8f66b760c9cddb3a02ad | [
"Apache-2.0"
] | 844 | 2021-02-10T01:09:29.000Z | 2022-03-31T12:12:58.000Z | paddlenlp/transformers/nezha/__init__.py | xihuanafeng/PaddleNLP | 14c3209118b2cadcce9a8f66b760c9cddb3a02ad | [
"Apache-2.0"
] | 1,035 | 2021-02-05T14:26:48.000Z | 2022-03-31T11:42:57.000Z | from .modeling import *
from .tokenizer import *
| 17 | 25 | 0.72549 | from .modeling import *
from .tokenizer import *
| true | true |
f72961fc4ed12b1c027b054640810df1e55e9e82 | 31,380 | py | Python | hyperparameter_hunter/reporting.py | mdjabc/hyperparameter_hunter | bfbd1faf63272a62e6f971d7e9a0487d71aea8f6 | [
"MIT"
] | 1 | 2019-04-22T02:22:03.000Z | 2019-04-22T02:22:03.000Z | hyperparameter_hunter/reporting.py | mdjabc/hyperparameter_hunter | bfbd1faf63272a62e6f971d7e9a0487d71aea8f6 | [
"MIT"
] | null | null | null | hyperparameter_hunter/reporting.py | mdjabc/hyperparameter_hunter | bfbd1faf63272a62e6f971d7e9a0487d71aea8f6 | [
"MIT"
] | null | null | null | ##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter import exceptions
from hyperparameter_hunter.settings import G
from hyperparameter_hunter.utils.general_utils import now_time, expand_mins_secs
##################################################
# Import Miscellaneous Assets
##################################################
from contextlib import suppress
from datetime import datetime
import inspect
import logging
import os.path
import sys
class ReportingHandler(object):
def __init__(
self,
heartbeat_path=None,
float_format="{:.5f}",
console_params=None,
heartbeat_params=None,
add_frame=False,
):
"""Class in control of logging methods, log formatting, and initializing Experiment logging
Parameters
----------
heartbeat_path: Str path, or None, default=None
If string and valid heartbeat path, logging messages will also be saved in this file
float_format: String, default='{:.5f}'
If not default, must be a valid formatting string for floating point values. If invalid,
default will be used
console_params: Dict, or None, default=None
Parameters passed to :meth:`_configure_console_handler`
heartbeat_params: Dict, or None, default=None
Parameters passed to :meth:`_configure_heartbeat_handler`
add_frame: Boolean, default=False
If True, whenever :meth:`log` is called, the source of the call will be prepended to
the content being logged"""
self.reporting_type = "logging" # TODO: Add `reporting_type` kwarg (logging, advanced)
self.heartbeat_path = heartbeat_path
self.float_format = float_format
self.console_params = console_params or {}
self.heartbeat_params = heartbeat_params or {}
self.add_frame = add_frame
self._validate_parameters()
self._configure_reporting_type()
def _validate_parameters(self):
"""Ensure all logging parameters are properly formatted"""
#################### reporting_type ####################
valid_types = ["logging", "standard", "advanced"]
if not isinstance(self.reporting_type, str):
raise TypeError(f"reporting_type must be a str. Received {self.reporting_type}")
if self.reporting_type not in valid_types:
raise ValueError(f"reporting_type must be in {valid_types}, not {self.reporting_type}")
#################### heartbeat_path ####################
if self.heartbeat_path is not None:
if not isinstance(self.heartbeat_path, str):
raise TypeError(f"heartbeat_path must be a str. Received {self.heartbeat_path}")
head, tail = os.path.split(self.heartbeat_path)
if not tail.endswith(".log"):
raise ValueError(f"heartbeat_path must end in '.log'. Given {self.heartbeat_path}")
if not os.path.exists(head):
raise FileNotFoundError(
f"heartbeat_path must start with an existing dir. Given {self.heartbeat_path}"
)
#################### float_format ####################
if not isinstance(self.float_format, str):
raise TypeError(f"float_format must be a format str. Received {self.float_format}")
if (not self.float_format.startswith("{")) or (not self.float_format.endswith("}")):
raise ValueError(f"float_format must be inside '{{' and '}}'. Got {self.float_format}")
#################### console_params ####################
if not isinstance(self.console_params, dict):
raise TypeError(f"console_params must be dict or None. Given {self.console_params}")
#################### heartbeat_params ####################
if not isinstance(self.heartbeat_params, dict):
raise TypeError(f"heartbeat_params must be dict or None. Given {self.heartbeat_params}")
def _configure_reporting_type(self):
"""Set placeholder logging methods to :attr:`reporting_type` specs and initialize logging"""
if self.reporting_type == "standard":
raise ValueError("Standard logging is not yet implemented. Please choose 'logging'")
# setattr(self, 'log', self._standard_log)
# setattr(self, 'debug', self._standard_debug)
# setattr(self, 'warn', self._standard_warn)
elif self.reporting_type == "logging":
setattr(self, "log", self._logging_log)
setattr(self, "debug", self._logging_debug)
setattr(self, "warn", self._logging_warn)
self._initialize_logging_logging()
elif self.reporting_type == "advanced":
raise ValueError("Advanced logging unimplemented. Please use 'logging'")
def _initialize_logging_logging(self):
"""Initialize and configure logging to be handled by the `logging` library"""
#################### Clear Logging Configuration ####################
root = logging.getLogger()
list(map(root.removeHandler, root.handlers[:]))
list(map(root.removeFilter, root.filters[:]))
#################### Configure Logging ####################
exceptions.hook_exception_handler()
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
handlers = [self._configure_console_handler(**self.console_params)]
# Suppress FileExistsError - Raised when self.heartbeat_path is None, meaning heartbeat blacklisted
with suppress(FileExistsError):
handlers.append(self._configure_heartbeat_handler(**self.heartbeat_params))
logging.basicConfig(handlers=handlers, level=logging.DEBUG)
self.debug("Logging Logging has been initialized!")
# noinspection PyUnusedLocal
@staticmethod
def _configure_console_handler(level="INFO", fmt=None, datefmt="%H:%M:%S", style="%", **kwargs):
"""Configure the console handler in charge of printing log messages
Parameters
----------
level: String, or Int, default='DEBUG'
Minimum message level for the console. Passed to :meth:`logging.StreamHandler.setlevel`
fmt: String, or None, default=None
Message formatting string for the console. Passed to :meth:`logging.Formatter.__init__`
datefmt: String, or None, default="%H:%M:%S"
Date formatting string for the console. Passed to :meth:`logging.Formatter.__init__`.
For the `logging` library default, use `datefmt=None` ("%Y-%m-%d %H:%M:%S" + <ms>)
style: String, default='%'
Type of string formatting used. Passed to :meth:`logging.Formatter.__init__`
**kwargs: Dict
Extra keyword arguments
Returns
-------
console_handler: `logging.StreamHandler` instance
The instantiated handler for the console"""
console_handler = logging.StreamHandler(stream=sys.stdout)
console_handler.setLevel(level)
fmt = fmt or "<%(asctime)s> %(message)s"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt, style=style)
console_handler.setFormatter(formatter)
return console_handler
# noinspection PyUnusedLocal
def _configure_heartbeat_handler(
self, level="DEBUG", fmt=None, datefmt=None, style="%", **kwargs
):
"""Configure the file handler in charge of adding log messages to the heartbeat file
Parameters
----------
level: String, or Int, default='DEBUG'
Minimum message level for the heartbeat file. Passed to
:meth:`logging.FileHandler.setlevel`
fmt: String, or None, default=None
Message formatting string for the heartbeat file. Passed to
:meth:`logging.Formatter.__init__`
datefmt: String, or None, default=None
Date formatting string for the heartbeat file. Passed to
:meth:`logging.Formatter.__init__`
style: String, default='%'
Type of string formatting used. Passed to :meth:`logging.Formatter.__init__`
**kwargs: Dict
Extra keyword arguments
Returns
-------
file_handler: `logging.FileHandler` instance
The instantiated handler for the heartbeat file"""
if self.heartbeat_path is None:
raise FileExistsError
file_handler = logging.FileHandler(self.heartbeat_path, mode="w")
file_handler.setLevel(level)
fmt = fmt or "<%(asctime)s> %(levelname)-8s - %(message)s"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt, style=style)
file_handler.setFormatter(formatter)
return file_handler
##################################################
# Placeholder Methods:
##################################################
def log(self, content, **kwargs):
"""Placeholder method before proper initialization"""
def debug(self, content, **kwargs):
"""Placeholder method before proper initialization"""
def warn(self, content, **kwargs):
"""Placeholder method before proper initialization"""
##################################################
# Logging-Logging Methods:
##################################################
# noinspection PyUnusedLocal
def _logging_log(
self, content, verbose_threshold=None, previous_frame=None, add_time=False, **kwargs
):
"""Log an info message via the `logging` library
Parameters
----------
content: String
The message to log
verbose_threshold: Int, or None, default=None
If None, `content` logged normally. If int and `G.Env.verbose` >= `verbose_threshold`,
`content` is logged normally. Else if int and `G.Env.verbose` < `verbose_threshold`,
then `content` is logged on the `logging.debug` level, instead of `logging.info`
previous_frame: Frame, or None, default=None
The frame preceding the log call. If not provided, it will be inferred
add_time: Boolean, default=False
If True, the current time will be added to `content` before logging
**kwargs: Dict
Extra keyword arguments"""
if self.add_frame is True:
previous_frame = previous_frame or inspect.currentframe().f_back
try:
frame_source = format_frame_source(previous_frame)
finally:
del previous_frame
content = f"{frame_source} - {content}"
content = add_time_to_content(content, add_time=add_time)
if (verbose_threshold is None) or (G.Env.verbose >= verbose_threshold):
logging.info(content)
else:
logging.debug(content)
# noinspection PyUnusedLocal
def _logging_debug(self, content, previous_frame=None, add_time=False, **kwargs):
"""Log a debug message via the `logging` library
Parameters
----------
content: String
The message to log
previous_frame: Frame, or None, default=None
The frame preceding the debug call. If not provided, it will be inferred
add_time: Boolean, default=False
If True, the current time will be added to `content` before logging
**kwargs: Dict
Extra keyword arguments"""
if self.add_frame is True:
previous_frame = previous_frame or inspect.currentframe().f_back
try:
frame_source = format_frame_source(previous_frame)
finally:
del previous_frame
content = f"{frame_source} - {content}"
content = add_time_to_content(content, add_time=add_time)
logging.debug(content)
# noinspection PyUnusedLocal
def _logging_warn(self, content, **kwargs):
"""Log a warning message via the `logging` library
Parameters
----------
content: String
The message to log
**kwargs: Dict
Extra keyword arguments"""
if self.add_frame is True:
previous_frame = inspect.currentframe().f_back
try:
frame_source = format_frame_source(previous_frame)
finally:
del previous_frame
content = f"{frame_source} - {content}"
logging.warning(content)
class _Color:
"""Object defining color codes for use with logging"""
BLUE = "\033[34m"
CYAN = "\033[36m"
GREEN = "\033[32m"
MAGENTA = "\033[35m"
RED = "\033[31m"
STOP = "\033[0m"
class OptimizationReporter:
def __init__(self, parameter_names, verbose=1, show_experiment_id=8, do_maximize=True):
"""A MixIn class for reporting the results of hyperparameter optimization rounds
Parameters
----------
parameter_names: List
The names of the hyperparameters being evaluated and optimized
verbose: Int in [0, 1, 2], default=1
If 0, all but critical logging is silenced. If 1, normal logging is performed. If 2,
detailed logging is performed
show_experiment_id: Int, or Boolean, default=8
If True, the experiment_id will be printed in each result row. If False, it will not.
If int, the first `show_experiment_id`-many characters of each experiment_id will be
printed in each row
do_maximize: Boolean, default=True
If False, smaller metric values will be considered preferred and will be highlighted to
stand out. Else larger metric values will be treated as preferred"""
self.original_parameter_names = parameter_names
self.verbose = verbose
self.show_experiment_id = (
36 if (show_experiment_id is True or show_experiment_id > 36) else show_experiment_id
)
self.do_maximize = do_maximize
self.end = " | "
self.y_max = None
self.x_max = None
self.iteration = 0
self.start_time = datetime.now()
self.last_round = datetime.now()
skip = ("model_init_params", "model_extra_params", "feature_engineer", "feature_selector")
self.parameter_names = [_[1:] if _[0] in skip else _ for _ in self.original_parameter_names]
self.parameter_names = [_[1:] if _[0] == "params" else _ for _ in self.parameter_names]
self.parameter_names = [
_[0] if len(_) == 1 else str(_).replace("'", "").replace('"', "")
for _ in self.parameter_names
]
self.sizes = [max(len(_), 7) for _ in self.parameter_names]
self.sorted_indexes = sorted(
range(len(self.parameter_names)), key=self.parameter_names.__getitem__
)
def print_saved_results_header(self):
"""Print a header signifying that saved Experiment results are being read"""
header = f"{_Color.RED}Saved Result Files{_Color.STOP}"
self.print_header(header, (_Color.RED + "_" * self._line_len() + _Color.STOP))
def print_random_points_header(self):
"""Print a header signifying that random point evaluation rounds are starting"""
header = f"{_Color.RED}Random Point Evaluation{_Color.STOP}"
self.print_header(header, (_Color.RED + "_" * self._line_len() + _Color.STOP))
def print_optimization_header(self):
"""Print a header signifying that Optimization rounds are starting"""
header = f"{_Color.RED}Hyperparameter Optimization{_Color.STOP}"
self.print_header(header, (_Color.RED + "_" * self._line_len() + _Color.STOP))
def _line_len(self):
"""Calculate number of characters a header's underlining should span
Returns
-------
line_len: Int
The number of characters the line should span"""
line_len = 29
line_len += sum([_ + 5 for _ in self.sizes])
line_len += self.show_experiment_id + 3 if self.show_experiment_id else 0
return line_len
def print_header(self, header, line):
"""Utility to perform actual printing of headers given formatted inputs
Parameters
----------
header: String
Specifies the stage of optimization being entered, and the type of results to follow
line: String
The underlining to follow `header`"""
print(header)
print(line)
self._print_column_name("Step", 5)
if self.show_experiment_id:
self._print_column_name("ID", self.show_experiment_id)
self._print_column_name("Time", 6)
self._print_column_name("Value", 10)
for index in self.sorted_indexes:
self._print_column_name(self.parameter_names[index], self.sizes[index] + 2)
print("")
def _print_column_name(self, value, size):
"""Print a column name within a specified `size` constraint
Parameters
----------
value: String
The name of the column to print
size: Int
The number of characters that `value` should span"""
try:
print("{0:>{1}}".format(value, size), end=self.end)
except TypeError: # Probably given tuple including param origin (init_params, extra_params, etc.)
if len(value) == 1:
print("{0:>{1}}".format(value[0], size), end=self.end)
else:
print("{0:>{1}}".format(str(value), size), end=self.end)
def print_result(self, hyperparameters, evaluation, experiment_id=None):
"""Print a row containing the results of an Experiment just executed
Parameters
----------
hyperparameters: List
List of hyperparameter values in the same order as :attr:`parameter_names`
evaluation: Float
An evaluation of the performance of `hyperparameters`
experiment_id: Str, or None, default=None
If not None, should be a string that is the UUID of the Experiment"""
if not self.verbose:
return
print("{:>5d}".format(self.iteration), end=self.end)
#################### Experiment ID ####################
if self.show_experiment_id:
if experiment_id is not None:
print("{}".format(experiment_id[: self.show_experiment_id]), end=self.end)
else:
print(" " * self.show_experiment_id, end=self.end)
#################### Time Elapsed ####################
minutes, seconds = divmod((datetime.now() - self.last_round).total_seconds(), 60)
print(expand_mins_secs(minutes, seconds), end=self.end)
#################### Evaluation Result ####################
if (
(self.y_max is None) # First evaluation
or (self.do_maximize and self.y_max < evaluation) # Found new max (best)
or (not self.do_maximize and self.y_max > evaluation) # Found new min (best)
):
self.y_max, self.x_max = evaluation, hyperparameters
self._print_target_value(evaluation, pre=_Color.MAGENTA, post=_Color.STOP)
self._print_input_values(hyperparameters, pre=_Color.GREEN, post=_Color.STOP)
else:
self._print_target_value(evaluation)
self._print_input_values(hyperparameters)
print("")
self.last_round = datetime.now()
self.iteration += 1
def _print_target_value(self, value, pre="", post=""):
"""Print the utility of an Experiment
Parameters
----------
value: String
The utility value to print
pre: String, default=''
Content to prepend to the formatted `value` string before printing
post: String, default=''
Content to append to the formatted `value` string before printing"""
content = pre + "{: >10.5f}".format(value) + post
print(content, end=self.end)
def _print_input_values(self, values, pre="", post=""):
"""Print the value of a hyperparameter used by an Experiment
Parameters
----------
value: String
The hyperparameter value to print
pre: String, default=''
Content to prepend to the formatted `value` string before printing
post: String, default=''
Content to append to the formatted `value` string before printing"""
for index in self.sorted_indexes:
if isinstance(values[index], float):
content = "{0: >{1}.{2}f}".format(
values[index], self.sizes[index] + 2, min(self.sizes[index] - 3, 6 - 2)
)
else:
content = "{0: >{1}}".format(values[index], self.sizes[index] + 2)
print(pre + content + post, end=self.end)
def reset_timer(self):
"""Set :attr:`start_time`, and :attr:`last_round` to the current time"""
self.start_time = datetime.now()
self.last_round = datetime.now()
def print_summary(self):
"""Print a summary of the results of hyperparameter optimization upon completion"""
# TODO: Finish this
if not self.verbose:
return
def format_frame_source(previous_frame, **kwargs):
"""Construct a string describing the location at which a call was made
Parameters
----------
previous_frame: Frame
A frame depicting the location at which a call was made
**kwargs: Dict
Any additional kwargs to supply to :func:`reporting.stringify_frame_source`
Returns
-------
The stringified frame source information of `previous_frame`"""
source = inspect.getframeinfo(previous_frame)
src_script, src_line_no, src_func, src_class = source[0], source[1], source[2], None
with suppress(AttributeError, KeyError):
src_class = type(previous_frame.f_locals["self"]).__name__
return stringify_frame_source(src_script, src_line_no, src_func, src_class, **kwargs)
def stringify_frame_source(
src_file,
src_line_no,
src_func,
src_class,
add_line_no=True,
max_line_no_size=4,
total_max_size=80,
):
"""Construct a string that neatly displays the location in the code at which a call was made
Parameters
----------
src_file: Str
A filepath
src_line_no: Int
The line number in `src_file` at which the call was made
src_func: Str
The name of the function in `src_file` in which the call was made
src_class: Str, or None
If not None, the class in `src_file` in which the call was made
add_line_no: Boolean, default=False
If True, the line number will be included in the `source_content` result
max_line_no_size: Int, default=4
Total number (including padding) of characters to be occupied by `src_line_no`. For
example, if `src_line_no`=32, and `max_line_no_size`=4, `src_line_no` will be padded to
become '32 ' in order to occupy four characters
total_max_size: Int, default=80
Total number (including padding) of characters to be occupied by the `source_content` result
Returns
-------
source_content: Str
A formatted string containing the location in the code at which a call was made
Examples
--------
>>> stringify_frame_source("reporting.py", 570, "stringify_frame_source", None)
'570 - reporting.stringify_frame_source() '
>>> stringify_frame_source("reporting.py", 12, "bar", "Foo")
'12 - reporting.Foo.bar() '
>>> stringify_frame_source("reporting.py", 12, "bar", "Foo", add_line_no=False)
'reporting.Foo.bar() '
>>> stringify_frame_source("reporting.py", 12, "bar", "Foo", total_max_size=60)
'12 - reporting.Foo.bar() '"""
source_content = ""
if add_line_no is True:
# Left-align line_no to size: max_line_no_size
source_content += "{0:<{1}}".format(src_line_no, max_line_no_size)
source_content += " - "
script_name = os.path.splitext(os.path.basename(src_file))[0]
if src_class is not None:
source_content += "{}.{}.{}()".format(script_name, src_class, src_func)
else:
source_content += "{}.{}()".format(script_name, src_func)
source_content = "{0:<{1}}".format(source_content, total_max_size)
return source_content
def add_time_to_content(content, add_time=False):
"""Construct a string containing the original `content`, in addition to the current time
Parameters
----------
content: Str
The original string, to which the current time will be concatenated
add_time: Boolean, default=False
If True, the current time will be concatenated onto the end of `content`
Returns
-------
content: Str
Str containing original `content`, along with current time, and additional formatting"""
add_content = ""
add_time = now_time() if add_time is True else add_time
add_content += "Time: {}".format(add_time) if add_time else ""
#################### Combine Original and New Content ####################
if add_content != "":
content += " " if ((content != "") and (not content.endswith(" "))) else ""
content += add_content
return content
def format_fold_run(rep=None, fold=None, run=None, mode="concise"):
"""Construct a string to display the repetition, fold, and run currently being executed
Parameters
----------
rep: Int, or None, default=None
The repetition number currently being executed
fold: Int, or None, default=None
The fold number currently being executed
run: Int, or None, default=None
The run number currently being executed
mode: {"concise", "verbose"}, default="concise"
If "concise", the result will contain abbreviations for rep/fold/run
Returns
-------
content: Str
A clean display of the current repetition/fold/run
Examples
--------
>>> format_fold_run(rep=0, fold=3, run=2, mode="concise")
'R0-f3-r2'
>>> format_fold_run(rep=0, fold=3, run=2, mode="verbose")
'Rep-Fold-Run: 0-3-2'
>>> format_fold_run(rep=0, fold=3, run="*", mode="concise")
'R0-f3-r*'
>>> format_fold_run(rep=0, fold=3, run=2, mode="foo")
Traceback (most recent call last):
File "reporting.py", line ?, in format_fold_run
ValueError: Received invalid mode value: 'foo'"""
content = ""
if mode == "verbose":
content += format("Rep" if rep is not None else "")
content += format("-" if rep is not None and fold is not None else "")
content += format("Fold" if fold is not None else "")
content += format("-" if fold is not None and run is not None else "")
content += format("Run" if run is not None else "")
content += format(": " if any(_ is not None for _ in [rep, fold, run]) else "")
content += format(rep if rep is not None else "")
content += format("-" if rep is not None and fold is not None else "")
content += format(fold if fold is not None else "")
content += format("-" if fold is not None and run is not None else "")
content += format(run if run is not None else "")
elif mode == "concise":
content += format("R" if rep is not None else "")
content += format(rep if rep is not None else "")
content += format("-" if rep is not None and fold is not None else "")
content += format("f" if fold is not None else "")
content += format(fold if fold is not None else "")
content += format("-" if fold is not None and run is not None else "")
content += format("r" if run is not None else "")
content += format(run if run is not None else "")
else:
raise ValueError("Received invalid mode value: '{}'".format(mode))
return content
def format_evaluation(results, separator=" | ", float_format="{:.5f}"):
"""Construct a string to neatly display the results of a model evaluation
Parameters
----------
results: Dict
The results of a model evaluation, in which keys represent the dataset type evaluated, and
values are dicts containing metrics as keys, and metric values as values
separator: Str, default=' | '
The string used to join all the metric values into a single string
float_format: Str, default='{:.5f}'
A python string float formatter, applied to floating metric values
Returns
-------
content: Str
The model's evaluation results"""
content = []
for data_type, values in results.items():
if values is None:
continue
data_type = "OOF" if data_type == "oof" else data_type
data_type = "Holdout" if data_type == "holdout" else data_type
data_type = "In-Fold" if data_type == "in_fold" else data_type
metric_entry = "{}(".format(data_type)
metric_entry_vals = []
for metric_id, metric_value in values.items():
try:
formatted_value = float_format.format(metric_value)
except ValueError:
formatted_value = "{}".format(metric_value)
metric_entry_vals.append("{}={}".format(metric_id, formatted_value))
metric_entry += ", ".join(metric_entry_vals) + ")"
content.append(metric_entry)
content = separator.join(content)
return content
# ADVANCED_FIT_LOGGING_DISPLAY_LAYOUT = [
# {
# "column_name": "General",
# "sub_columns_names": [
# ["fold", "Fold"],
# ["run", "Run"],
# ["seed", "Seed"],
# ["step", "Step"],
# ["start_time", "Start Time"],
# ["end_time", "End Time"],
# ["time_elapsed", "Time Elapsed"]
# ],
# "sub_column_min_sizes": [10, 10, 10, 20, 12, 12, 12]
# },
# # Will need to alter default "Score" sub-columns according to what metrics are actually being used
# {
# "column_name": "OOF Scores",
# "sub_columns_names": [
# ["oof_f1", "F1"],
# ["oof_roc_auc", "ROC_AUC"]
# ]
# },
# # Check that Holdout dataset is in use before adding "Holdout Scores" column
# {
# "column_name": "Holdout Scores",
# "sub_columns_names": [
# ["holdout_f1", "F1"],
# ["holdout_roc_auc", "ROC_AUC"]
# ]
# },
# {
# "column_name": "Losses",
# "sub_columns_names": [
# ["train_loss", "Train"],
# ["validation_loss", "Validation"]
# ]
# },
# ]
#
#
# class AdvancedDisplayLayout(object):
# def __init__(self):
# pass
#
#
# class AdvancedFitLogging(object):
# def __init__(self, display_layout=None, ):
# self.display_layout = display_layout or ADVANCED_FIT_LOGGING_DISPLAY_LAYOUT
#
# def _validate_parameters(self):
# pass
#
# def validate_display_layout(self):
# pass
| 40.438144 | 107 | 0.605736 | true | true | |
f7296230975ff63d0a67cb8dd9d9df2ce6ab8ea5 | 11,474 | py | Python | ez_setup.py | mrmin123/snovault | 88cbd863ef556c1032c88fdc0fc243e07fb2f922 | [
"MIT"
] | null | null | null | ez_setup.py | mrmin123/snovault | 88cbd863ef556c1032c88fdc0fc243e07fb2f922 | [
"MIT"
] | null | null | null | ez_setup.py | mrmin123/snovault | 88cbd863ef556c1032c88fdc0fc243e07fb2f922 | [
"MIT"
] | 2 | 2021-07-07T18:41:25.000Z | 2021-07-27T23:45:27.000Z | #!/usr/bin/env python
"""
Setuptools bootstrapping installer.
Run this script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import tempfile
import zipfile
import optparse
import subprocess
import platform
import textwrap
import contextlib
import warnings
from distutils import log
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "18.5"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
DEFAULT_SAVE_DIR = os.curdir
def _python_cmd(*args):
"""
Execute a command.
Return True if the command succeeded.
"""
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(archive_filename, install_args=()):
"""Install Setuptools."""
with archive_context(archive_filename):
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
def _build_egg(egg, archive_filename, to_dir):
"""Build Setuptools egg."""
with archive_context(archive_filename):
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
class ContextualZipFile(zipfile.ZipFile):
"""Supplement ZipFile class to support context manager for Python 2.6."""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""Construct a ZipFile or ContextualZipFile as appropriate."""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
@contextlib.contextmanager
def archive_context(filename):
"""
Unzip filename to a temporary directory, set to the cwd.
The unzipped target is cleaned up after.
"""
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
with ContextualZipFile(filename) as archive:
archive.extractall()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
yield
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _do_download(version, download_base, to_dir, download_delay):
"""Download Setuptools."""
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
archive = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, archive, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=DEFAULT_SAVE_DIR, download_delay=15):
"""
Ensure that a setuptools version is installed.
Return None. Raise SystemExit if the requested version
or later cannot be installed.
"""
to_dir = os.path.abspath(to_dir)
# prior to importing, capture the module state for
# representative modules.
rep_modules = 'pkg_resources', 'setuptools'
imported = set(sys.modules).intersection(rep_modules)
try:
import pkg_resources
pkg_resources.require("setuptools>=" + version)
# a suitable version is already installed
return
except ImportError:
# pkg_resources not available; setuptools is not installed; download
pass
except pkg_resources.DistributionNotFound:
# no version of setuptools was found; allow download
pass
except pkg_resources.VersionConflict as VC_err:
if imported:
_conflict_bail(VC_err, version)
# otherwise, unload pkg_resources to allow the downloaded version to
# take precedence.
del pkg_resources
_unload_pkg_resources()
return _do_download(version, download_base, to_dir, download_delay)
def _conflict_bail(VC_err, version):
"""
Setuptools was imported prior to invocation, so it is
unsafe to unload it. Bail out.
"""
conflict_tmpl = textwrap.dedent("""
The required version of setuptools (>={version}) is not available,
and can't be installed while this script is running. Please
install a more recent version first, using
'easy_install -U setuptools'.
(Currently using {VC_err.args[0]!r})
""")
msg = conflict_tmpl.format(**locals())
sys.stderr.write(msg)
sys.exit(2)
def _unload_pkg_resources():
del_modules = [
name for name in sys.modules
if name.startswith('pkg_resources')
]
for mod_name in del_modules:
del sys.modules[mod_name]
def _clean_check(cmd, target):
"""
Run the command to download target.
If the command fails, clean up before re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell.
Powershell will validate trust.
Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
ps_cmd = (
"[System.Net.WebRequest]::DefaultWebProxy.Credentials = "
"[System.Net.CredentialCache]::DefaultCredentials; "
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)"
% vars()
)
cmd = [
'powershell',
'-Command',
ps_cmd,
]
_clean_check(cmd, target)
def has_powershell():
"""Determine if Powershell is available."""
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""Use Python to download the file, without connection authentication."""
src = urlopen(url)
try:
# Read all the data in one block.
data = src.read()
finally:
src.close()
# Write all the data in one block to avoid creating a partial file.
with open(target, "wb") as dst:
dst.write(data)
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = (
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
)
viable_downloaders = (dl for dl in downloaders if dl.viable())
return next(viable_downloaders, None)
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=DEFAULT_SAVE_DIR, delay=15,
downloader_factory=get_best_downloader):
"""
Download setuptools from a specified location and return its filename.
`version` should be a valid setuptools version number that is available
as an sdist for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
zip_name = "setuptools-%s.zip" % version
url = download_base + zip_name
saveto = os.path.join(to_dir, zip_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package.
Returns list of command line arguments.
"""
return ['--user'] if options.user_install else []
def _parse_args():
"""Parse the command line for options."""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
parser.add_option(
'--version', help="Specify which version to download",
default=DEFAULT_VERSION,
)
parser.add_option(
'--to-dir',
help="Directory to save (and re-use) package",
default=DEFAULT_SAVE_DIR,
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def _download_args(options):
"""Return args for download_setuptools function from cmdline args."""
return dict(
version=options.version,
download_base=options.download_base,
downloader_factory=options.downloader_factory,
to_dir=options.to_dir,
)
def main():
"""Install or upgrade setuptools and EasyInstall."""
options = _parse_args()
archive = download_setuptools(**_download_args(options))
return _install(archive, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
| 29.270408 | 79 | 0.66228 |
import os
import shutil
import sys
import tempfile
import zipfile
import optparse
import subprocess
import platform
import textwrap
import contextlib
import warnings
from distutils import log
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "18.5"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
DEFAULT_SAVE_DIR = os.curdir
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(archive_filename, install_args=()):
with archive_context(archive_filename):
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
return 2
def _build_egg(egg, archive_filename, to_dir):
with archive_context(archive_filename):
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
class ContextualZipFile(zipfile.ZipFile):
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
@contextlib.contextmanager
def archive_context(filename):
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
with ContextualZipFile(filename) as archive:
archive.extractall()
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
yield
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
archive = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, archive, to_dir)
sys.path.insert(0, egg)
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=DEFAULT_SAVE_DIR, download_delay=15):
to_dir = os.path.abspath(to_dir)
rep_modules = 'pkg_resources', 'setuptools'
imported = set(sys.modules).intersection(rep_modules)
try:
import pkg_resources
pkg_resources.require("setuptools>=" + version)
return
except ImportError:
pass
except pkg_resources.DistributionNotFound:
pass
except pkg_resources.VersionConflict as VC_err:
if imported:
_conflict_bail(VC_err, version)
del pkg_resources
_unload_pkg_resources()
return _do_download(version, download_base, to_dir, download_delay)
def _conflict_bail(VC_err, version):
conflict_tmpl = textwrap.dedent("""
The required version of setuptools (>={version}) is not available,
and can't be installed while this script is running. Please
install a more recent version first, using
'easy_install -U setuptools'.
(Currently using {VC_err.args[0]!r})
""")
msg = conflict_tmpl.format(**locals())
sys.stderr.write(msg)
sys.exit(2)
def _unload_pkg_resources():
del_modules = [
name for name in sys.modules
if name.startswith('pkg_resources')
]
for mod_name in del_modules:
del sys.modules[mod_name]
def _clean_check(cmd, target):
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
target = os.path.abspath(target)
ps_cmd = (
"[System.Net.WebRequest]::DefaultWebProxy.Credentials = "
"[System.Net.CredentialCache]::DefaultCredentials; "
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)"
% vars()
)
cmd = [
'powershell',
'-Command',
ps_cmd,
]
_clean_check(cmd, target)
def has_powershell():
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
src = urlopen(url)
try:
# Read all the data in one block.
data = src.read()
finally:
src.close()
# Write all the data in one block to avoid creating a partial file.
with open(target, "wb") as dst:
dst.write(data)
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = (
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
)
viable_downloaders = (dl for dl in downloaders if dl.viable())
return next(viable_downloaders, None)
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=DEFAULT_SAVE_DIR, delay=15,
downloader_factory=get_best_downloader):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
zip_name = "setuptools-%s.zip" % version
url = download_base + zip_name
saveto = os.path.join(to_dir, zip_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _build_install_args(options):
return ['--user'] if options.user_install else []
def _parse_args():
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
parser.add_option(
'--version', help="Specify which version to download",
default=DEFAULT_VERSION,
)
parser.add_option(
'--to-dir',
help="Directory to save (and re-use) package",
default=DEFAULT_SAVE_DIR,
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def _download_args(options):
return dict(
version=options.version,
download_base=options.download_base,
downloader_factory=options.downloader_factory,
to_dir=options.to_dir,
)
def main():
options = _parse_args()
archive = download_setuptools(**_download_args(options))
return _install(archive, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
| true | true |
f72962892eadfb62069ec0593f97f3237cf5cc30 | 4,662 | py | Python | rsnet/dataset/raster.py | xyt556/rsnet | 5f20f5308f89695e9f26ee4724d5591201d0c52d | [
"MIT"
] | 1 | 2022-03-01T08:47:14.000Z | 2022-03-01T08:47:14.000Z | rsnet/dataset/raster.py | xyt556/rsnet | 5f20f5308f89695e9f26ee4724d5591201d0c52d | [
"MIT"
] | null | null | null | rsnet/dataset/raster.py | xyt556/rsnet | 5f20f5308f89695e9f26ee4724d5591201d0c52d | [
"MIT"
] | 1 | 2022-03-07T06:08:38.000Z | 2022-03-07T06:08:38.000Z | import os
import rasterio
import numpy as np
from ..utils import pair, bytescale
from .base import BaseRasterData
class RasterSampleDataset(BaseRasterData):
"""Dataset wrapper for remote sensing data.
Args:
fname:
win_size:
step_size:
pad_size:
band_index:
"""
def __init__(self,
fname,
win_size=512,
step_size=512,
pad_size=0,
band_index=None,
to_type=None,
data_format='channel_last',
transform=None):
super().__init__(fname=fname)
assert data_format in (
'channel_first',
'channel_last'), "data format must be 'channel_first' or "
f"'channel_last', but got type {data_format}"
self.data_format = data_format
self.win_size = pair(win_size)
self.step_size = pair(step_size)
self.pad_size = pair(pad_size)
total_band_index = [i + 1 for i in range(self.count)]
if band_index is None:
self.band_index = total_band_index
else:
assert set(band_index).issubset(set(total_band_index))
self.band_index = band_index
self.to_type = to_type
self.window_ids = self.get_windows_info()
self.transform = transform
self.start = 0
self.end = len(self)
def get_windows_info(self):
left, top = 0, 0
width, height = self.width, self.height
left_top_xy = [] # left-top corner coordinates (xmin, ymin)
while left < width:
if left + self.win_size[0] >= width:
left = max(width - self.win_size[0], 0)
top = 0
while top < height:
if top + self.win_size[1] >= height:
top = max(height - self.win_size[1], 0)
# right = min(left + self.win_size[0], width - 1)
# bottom = min(top + self.win_size[1], height - 1)
# save
left_top_xy.append((left, top))
if top + self.win_size[1] >= height:
break
else:
top += self.step_size[1]
if left + self.win_size[0] >= width:
break
else:
left += self.step_size[0]
return left_top_xy
def sample(self, x, y):
"""Get the values of dataset at certain positions.
"""
xmin, ymin = x, y
xsize, ysize = self.win_size
xpad, ypad = self.pad_size
xmin -= xpad
ymin -= ypad
left, top = 0, 0
if xmin < 0:
xmin = 0
xsize += xpad
left = xpad
elif xmin + xsize + 2 * xpad > self.width:
xsize += xpad
else:
xsize += 2 * xpad
if ymin < 0:
ymin = 0
ysize += ypad
top = ypad
elif ymin + ysize + 2 * ypad > self.height:
ysize += ypad
else:
ysize += 2 * ypad
# col_off, row_off, width, height
window = rasterio.windows.Window(xmin, ymin, xsize, ysize)
# with rasterio.open(self.image_file) as src:
# bands = [src.read(k, window=tile_window) for k in self.band_index]
# tile_image = np.stack(bands, axis=-1)
bands = [self._band.read(k, window=window) for k in self.band_index]
if self.to_type and np.dtype(self.to_type) != np.dtype(self.dtype):
bmin, bmax = self.minmax
msks = [
self._band.read_masks(k, window=window)
for k in self.band_index
]
bands = [
bytescale(b, msk, bmin[i], bmax[i], dtype=self.to_type)
for i, (b, msk) in enumerate(zip(bands, msks))
]
tile_image = np.stack(bands, axis=-1)
img = np.zeros(
(self.win_size[0] + 2 * xpad, self.win_size[0] + 2 * ypad,
len(self.band_index)),
dtype=tile_image.dtype)
img[top:top + ysize, left:left + xsize] = tile_image
if self.data_format == 'channel_first':
img = img.transpose(2, 0, 1)
return img
def __getitem__(self, idx):
x, y = self.window_ids[idx]
img = self.sample(x, y)
if self.transform is not None:
img = self.transform(img)
return img, x, y
def __len__(self):
return len(self.window_ids)
@property
def step(self):
return self.step_size
@property
def pad(self):
return self.pad_size
| 29.506329 | 80 | 0.515444 | import os
import rasterio
import numpy as np
from ..utils import pair, bytescale
from .base import BaseRasterData
class RasterSampleDataset(BaseRasterData):
def __init__(self,
fname,
win_size=512,
step_size=512,
pad_size=0,
band_index=None,
to_type=None,
data_format='channel_last',
transform=None):
super().__init__(fname=fname)
assert data_format in (
'channel_first',
'channel_last'), "data format must be 'channel_first' or "
f"'channel_last', but got type {data_format}"
self.data_format = data_format
self.win_size = pair(win_size)
self.step_size = pair(step_size)
self.pad_size = pair(pad_size)
total_band_index = [i + 1 for i in range(self.count)]
if band_index is None:
self.band_index = total_band_index
else:
assert set(band_index).issubset(set(total_band_index))
self.band_index = band_index
self.to_type = to_type
self.window_ids = self.get_windows_info()
self.transform = transform
self.start = 0
self.end = len(self)
def get_windows_info(self):
left, top = 0, 0
width, height = self.width, self.height
left_top_xy = []
while left < width:
if left + self.win_size[0] >= width:
left = max(width - self.win_size[0], 0)
top = 0
while top < height:
if top + self.win_size[1] >= height:
top = max(height - self.win_size[1], 0)
left_top_xy.append((left, top))
if top + self.win_size[1] >= height:
break
else:
top += self.step_size[1]
if left + self.win_size[0] >= width:
break
else:
left += self.step_size[0]
return left_top_xy
def sample(self, x, y):
xmin, ymin = x, y
xsize, ysize = self.win_size
xpad, ypad = self.pad_size
xmin -= xpad
ymin -= ypad
left, top = 0, 0
if xmin < 0:
xmin = 0
xsize += xpad
left = xpad
elif xmin + xsize + 2 * xpad > self.width:
xsize += xpad
else:
xsize += 2 * xpad
if ymin < 0:
ymin = 0
ysize += ypad
top = ypad
elif ymin + ysize + 2 * ypad > self.height:
ysize += ypad
else:
ysize += 2 * ypad
window = rasterio.windows.Window(xmin, ymin, xsize, ysize)
bands = [self._band.read(k, window=window) for k in self.band_index]
if self.to_type and np.dtype(self.to_type) != np.dtype(self.dtype):
bmin, bmax = self.minmax
msks = [
self._band.read_masks(k, window=window)
for k in self.band_index
]
bands = [
bytescale(b, msk, bmin[i], bmax[i], dtype=self.to_type)
for i, (b, msk) in enumerate(zip(bands, msks))
]
tile_image = np.stack(bands, axis=-1)
img = np.zeros(
(self.win_size[0] + 2 * xpad, self.win_size[0] + 2 * ypad,
len(self.band_index)),
dtype=tile_image.dtype)
img[top:top + ysize, left:left + xsize] = tile_image
if self.data_format == 'channel_first':
img = img.transpose(2, 0, 1)
return img
def __getitem__(self, idx):
x, y = self.window_ids[idx]
img = self.sample(x, y)
if self.transform is not None:
img = self.transform(img)
return img, x, y
def __len__(self):
return len(self.window_ids)
@property
def step(self):
return self.step_size
@property
def pad(self):
return self.pad_size
| true | true |
f729638d4432f934d5191c5b2c629e6a492d192e | 12,736 | py | Python | sedfitter/sed/sed.py | KainRasleafar/sedfitter | 4f0e9e46f7903a853166835bb74857cc15eef219 | [
"BSD-2-Clause"
] | 15 | 2015-07-04T02:00:30.000Z | 2021-05-13T09:03:10.000Z | sedfitter/sed/sed.py | KainRasleafar/sedfitter | 4f0e9e46f7903a853166835bb74857cc15eef219 | [
"BSD-2-Clause"
] | 45 | 2015-04-27T20:19:22.000Z | 2022-01-28T06:24:31.000Z | sedfitter/sed/sed.py | KainRasleafar/sedfitter | 4f0e9e46f7903a853166835bb74857cc15eef219 | [
"BSD-2-Clause"
] | 19 | 2015-04-21T15:32:04.000Z | 2022-03-02T21:53:46.000Z | from __future__ import print_function, division
import os
import numpy as np
from astropy import log
from astropy.io import fits
from astropy.table import Table
from scipy.interpolate import interp1d
from astropy import units as u
from ..utils.validator import validate_array
from .helpers import parse_unit_safe, assert_allclose_quantity, convert_flux
__all__ = ['SED']
class SED(object):
def __init__(self):
# Metadata
self.name = None
self.distance = None
# Spectral info
self.wav = None
self.nu = None
# Apertures
self.apertures = None
# Fluxes
self.flux = None
self.error = None
def __eq__(self, other):
try:
assert self.name == other.name
assert_allclose_quantity(self.distance, other.distance)
assert_allclose_quantity(self.wav, other.wav)
assert_allclose_quantity(self.nu, other.nu)
assert_allclose_quantity(self.apertures, other.apertures)
assert_allclose_quantity(self.flux, other.flux)
assert_allclose_quantity(self.error, other.error)
except AssertionError:
raise
return False
else:
return True
def copy(self):
from copy import deepcopy
return deepcopy(self)
def scale_to_distance(self, distance):
"""
Returns the SED scaled to distance `distance`
Parameters
----------
distance : float
The distance in cm
Returns
-------
sed : SED
The SED, scaled to the new distance
"""
sed = self.copy()
sed.distance = distance * u.cm
sed.flux = sed.flux * (self.distance.to(u.cm) / sed.distance) ** 2
sed.error = sed.error * (self.distance.to(u.cm) / sed.distance) ** 2
return sed
def scale_to_av(self, av, law):
sed = self.copy()
sed.flux = sed.flux * 10. ** (av * law(sed.wav))
sed.error = sed.error * 10. ** (av * law(sed.wav))
return sed
@property
def wav(self):
"""
The wavelengths at which the SED is defined
"""
if self._wav is None and self._nu is not None:
return self._nu.to(u.micron, equivalencies=u.spectral())
else:
return self._wav
@wav.setter
def wav(self, value):
if value is None:
self._wav = None
else:
self._wav = validate_array('wav', value, domain='positive', ndim=1,
shape=None if self.nu is None else (len(self.nu),),
physical_type='length')
@property
def nu(self):
"""
The frequencies at which the SED is defined
"""
if self._nu is None and self._wav is not None:
return self._wav.to(u.Hz, equivalencies=u.spectral())
else:
return self._nu
@nu.setter
def nu(self, value):
if value is None:
self._nu = None
else:
self._nu = validate_array('nu', value, domain='positive', ndim=1,
shape=None if self.wav is None else (len(self.wav),),
physical_type='frequency')
@property
def apertures(self):
"""
The apertures at which the SED is defined
"""
return self._apertures
@apertures.setter
def apertures(self, value):
if value is None:
self._apertures = None
else:
self._apertures = validate_array('apertures', value, domain='positive',
ndim=1, physical_type='length')
@property
def flux(self):
"""
The SED fluxes
"""
return self._flux
@flux.setter
def flux(self, value):
if value is None:
self._flux = value
else:
self._flux = validate_array('flux', value, ndim=2,
shape=(self.n_ap, self.n_wav),
physical_type=('power', 'flux', 'spectral flux density'))
@property
def error(self):
"""
The convolved flux errors
"""
return self._error
@error.setter
def error(self, value):
if value is None:
self._error = value
else:
self._error = validate_array('error', value, ndim=2,
shape=(self.n_ap, self.n_wav),
physical_type=('power', 'flux', 'spectral flux density'))
@property
def n_ap(self):
if self.apertures is None:
return 1
else:
return len(self.apertures)
@property
def n_wav(self):
if self.wav is None:
return None
else:
return len(self.wav)
@classmethod
def read(cls, filename, unit_wav=u.micron, unit_freq=u.Hz,
unit_flux=u.erg / u.cm ** 2 / u.s, order='nu'):
"""
Read an SED from a FITS file.
Parameters
----------
filename: str
The name of the file to read the SED from.
unit_wav: `~astropy.units.Unit`, optional
The units to convert the wavelengths to.
unit_freq: `~astropy.units.Unit`, optional
The units to convert the frequency to.
unit_flux: `~astropy.units.Unit`, optional
The units to convert the flux to.
order: str, optional
Whether to sort the SED by increasing wavelength (`wav`) or
frequency ('nu').
"""
# Instantiate SED class
sed = cls()
# Assume that the filename may be missing the .gz extension
if not os.path.exists(filename) and os.path.exists(filename + '.gz'):
filename += ".gz"
# Open FILE file
hdulist = fits.open(filename, memmap=False)
# Extract model name
sed.name = hdulist[0].header['MODEL']
# Check if distance is specified in header, otherwise assume 1kpc
if 'DISTANCE' in hdulist[0].header:
sed.distance = hdulist[0].header['DISTANCE'] * u.cm
else:
log.debug("No distance found in SED file, assuming 1kpc")
sed.distance = 1. * u.kpc
# Extract SED values
wav = hdulist[1].data.field('WAVELENGTH') * parse_unit_safe(hdulist[1].columns[0].unit)
nu = hdulist[1].data.field('FREQUENCY') * parse_unit_safe(hdulist[1].columns[1].unit)
ap = hdulist[2].data.field('APERTURE') * parse_unit_safe(hdulist[2].columns[0].unit)
flux = hdulist[3].data.field('TOTAL_FLUX') * parse_unit_safe(hdulist[3].columns[0].unit)
error = hdulist[3].data.field('TOTAL_FLUX_ERR') * parse_unit_safe(hdulist[3].columns[1].unit)
# Set SED attributes
sed.apertures = ap
# Convert wavelength and frequencies to requested units
sed.wav = wav.to(unit_wav)
sed.nu = nu.to(unit_freq)
# Set fluxes
sed.flux = convert_flux(nu, flux, unit_flux, distance=sed.distance)
sed.error = convert_flux(nu, error, unit_flux, distance=sed.distance)
# Sort SED
if order not in ('nu', 'wav'):
raise ValueError('order should be nu or wav')
if (order == 'nu' and sed.nu[0] > sed.nu[-1]) or \
(order == 'wav' and sed.wav[0] > sed.wav[-1]):
sed.wav = sed.wav[::-1]
sed.nu = sed.nu[::-1]
sed.flux = sed.flux[..., ::-1]
sed.error = sed.error[..., ::-1]
return sed
def write(self, filename, overwrite=False):
"""
Write an SED to a FITS file.
Parameters
----------
filename: str
The name of the file to write the SED to.
"""
# Create first HDU with meta-data
hdu0 = fits.PrimaryHDU()
if self.name is None:
raise ValueError("Model name is not set")
else:
hdu0.header['MODEL'] = self.name
if self.distance is None:
raise ValueError("Model distance is not set")
else:
hdu0.header['DISTANCE'] = self.distance.to(u.cm).value
hdu0.header['NAP'] = self.n_ap
hdu0.header['NWAV'] = self.n_wav
# Create wavelength table
twav = Table()
if self.wav is None:
raise ValueError("Wavelengths are not set")
else:
twav['WAVELENGTH'] = self.wav
if self.nu is None:
raise ValueError("Frequencies are not set")
else:
twav['FREQUENCY'] = self.nu
twav.sort('FREQUENCY')
# TODO: here sorting needs to be applied to fluxes too?
hdu1 = fits.BinTableHDU(np.array(twav))
hdu1.columns[0].unit = self.wav.unit.to_string(format='fits')
hdu1.columns[1].unit = self.nu.unit.to_string(format='fits')
hdu1.header['EXTNAME'] = "WAVELENGTHS"
# Create aperture table
tap = Table()
if self.apertures is None:
tap['APERTURE'] = [1.e-30]
else:
tap['APERTURE'] = self.apertures
hdu2 = fits.BinTableHDU(np.array(tap))
if self.apertures is None:
hdu2.columns[0].unit = 'cm'
else:
hdu2.columns[0].unit = self.apertures.unit.to_string(format='fits')
hdu2.header['EXTNAME'] = "APERTURES"
# Create flux table
tflux = Table()
tflux['TOTAL_FLUX'] = self.flux
if self.flux is None:
raise ValueError("Fluxes are not set")
else:
tflux['TOTAL_FLUX'] = self.flux
if self.error is None:
raise ValueError("Errors are not set")
else:
tflux['TOTAL_FLUX_ERR'] = self.error
hdu3 = fits.BinTableHDU(np.array(tflux))
hdu3.columns[0].unit = self.flux.unit.to_string(format='fits')
hdu3.columns[1].unit = self.error.unit.to_string(format='fits')
hdu3.header['EXTNAME'] = "SEDS"
hdus = [hdu0, hdu1, hdu2, hdu3]
# Create overall FITS file
hdulist = fits.HDUList(hdus)
hdulist.writeto(filename, clobber=overwrite)
def interpolate(self, apertures):
"""
Interpolate the SED to different apertures
"""
# If there is only one aperture, we can't interpolate, we can only repeat
if self.n_ap == 1:
return np.repeat(self.flux[0, :], len(apertures)).reshape(self.n_wav, len(apertures))
# Create interpolating function
flux_interp = interp1d(self.apertures, self.flux.swapaxes(0, 1))
# If any apertures are larger than the defined max, reset to max
apertures[apertures > self.apertures.max()] = self.apertures.max()
# If any apertures are smaller than the defined min, raise Exception
if np.any(apertures < self.apertures.min()):
raise Exception("Aperture(s) requested too small")
return flux_interp(apertures)
def interpolate_variable(self, wavelengths, apertures):
"""
Interpolate the SED to a variable aperture as a function of
wavelength. This method should be called with an interpolating
function for aperture as a function of wavelength, in log10 space.
"""
if self.n_ap == 1:
return self.flux[0, :]
sed_apertures = self.apertures.to(u.au).value
sed_wav = self.wav.to(u.micron).value
# If any apertures are larger than the defined max, reset to max
apertures[apertures > sed_apertures.max()] = sed_apertures.max() * 0.999
# If any apertures are smaller than the defined min, raise Exception
if np.any(apertures < sed_apertures.min()):
raise Exception("Aperture(s) requested too small")
# Find wavelength order
order = np.argsort(wavelengths)
# Interpolate apertures vs wavelength
log10_ap_interp = interp1d(np.log10(wavelengths[order]), np.log10(apertures[order]), bounds_error=False, fill_value=np.nan)
# Create interpolating function
flux_interp = interp1d(sed_apertures, self.flux.swapaxes(0, 1))
# Interpolate the apertures
apertures = 10. ** log10_ap_interp(np.log10(sed_wav))
# Extrapolate on either side
apertures[np.log10(sed_wav) < log10_ap_interp.x[0]] = 10. ** log10_ap_interp.y[0]
apertures[np.log10(sed_wav) > log10_ap_interp.x[-1]] = 10. ** log10_ap_interp.y[-1]
# Interpolate and return only diagonal elements
return flux_interp(apertures).diagonal()
| 31.760599 | 131 | 0.568153 | from __future__ import print_function, division
import os
import numpy as np
from astropy import log
from astropy.io import fits
from astropy.table import Table
from scipy.interpolate import interp1d
from astropy import units as u
from ..utils.validator import validate_array
from .helpers import parse_unit_safe, assert_allclose_quantity, convert_flux
__all__ = ['SED']
class SED(object):
def __init__(self):
self.name = None
self.distance = None
self.wav = None
self.nu = None
self.apertures = None
self.flux = None
self.error = None
def __eq__(self, other):
try:
assert self.name == other.name
assert_allclose_quantity(self.distance, other.distance)
assert_allclose_quantity(self.wav, other.wav)
assert_allclose_quantity(self.nu, other.nu)
assert_allclose_quantity(self.apertures, other.apertures)
assert_allclose_quantity(self.flux, other.flux)
assert_allclose_quantity(self.error, other.error)
except AssertionError:
raise
return False
else:
return True
def copy(self):
from copy import deepcopy
return deepcopy(self)
def scale_to_distance(self, distance):
sed = self.copy()
sed.distance = distance * u.cm
sed.flux = sed.flux * (self.distance.to(u.cm) / sed.distance) ** 2
sed.error = sed.error * (self.distance.to(u.cm) / sed.distance) ** 2
return sed
def scale_to_av(self, av, law):
sed = self.copy()
sed.flux = sed.flux * 10. ** (av * law(sed.wav))
sed.error = sed.error * 10. ** (av * law(sed.wav))
return sed
@property
def wav(self):
if self._wav is None and self._nu is not None:
return self._nu.to(u.micron, equivalencies=u.spectral())
else:
return self._wav
@wav.setter
def wav(self, value):
if value is None:
self._wav = None
else:
self._wav = validate_array('wav', value, domain='positive', ndim=1,
shape=None if self.nu is None else (len(self.nu),),
physical_type='length')
@property
def nu(self):
if self._nu is None and self._wav is not None:
return self._wav.to(u.Hz, equivalencies=u.spectral())
else:
return self._nu
@nu.setter
def nu(self, value):
if value is None:
self._nu = None
else:
self._nu = validate_array('nu', value, domain='positive', ndim=1,
shape=None if self.wav is None else (len(self.wav),),
physical_type='frequency')
@property
def apertures(self):
return self._apertures
@apertures.setter
def apertures(self, value):
if value is None:
self._apertures = None
else:
self._apertures = validate_array('apertures', value, domain='positive',
ndim=1, physical_type='length')
@property
def flux(self):
return self._flux
@flux.setter
def flux(self, value):
if value is None:
self._flux = value
else:
self._flux = validate_array('flux', value, ndim=2,
shape=(self.n_ap, self.n_wav),
physical_type=('power', 'flux', 'spectral flux density'))
@property
def error(self):
return self._error
@error.setter
def error(self, value):
if value is None:
self._error = value
else:
self._error = validate_array('error', value, ndim=2,
shape=(self.n_ap, self.n_wav),
physical_type=('power', 'flux', 'spectral flux density'))
@property
def n_ap(self):
if self.apertures is None:
return 1
else:
return len(self.apertures)
@property
def n_wav(self):
if self.wav is None:
return None
else:
return len(self.wav)
@classmethod
def read(cls, filename, unit_wav=u.micron, unit_freq=u.Hz,
unit_flux=u.erg / u.cm ** 2 / u.s, order='nu'):
sed = cls()
if not os.path.exists(filename) and os.path.exists(filename + '.gz'):
filename += ".gz"
hdulist = fits.open(filename, memmap=False)
sed.name = hdulist[0].header['MODEL']
if 'DISTANCE' in hdulist[0].header:
sed.distance = hdulist[0].header['DISTANCE'] * u.cm
else:
log.debug("No distance found in SED file, assuming 1kpc")
sed.distance = 1. * u.kpc
wav = hdulist[1].data.field('WAVELENGTH') * parse_unit_safe(hdulist[1].columns[0].unit)
nu = hdulist[1].data.field('FREQUENCY') * parse_unit_safe(hdulist[1].columns[1].unit)
ap = hdulist[2].data.field('APERTURE') * parse_unit_safe(hdulist[2].columns[0].unit)
flux = hdulist[3].data.field('TOTAL_FLUX') * parse_unit_safe(hdulist[3].columns[0].unit)
error = hdulist[3].data.field('TOTAL_FLUX_ERR') * parse_unit_safe(hdulist[3].columns[1].unit)
sed.apertures = ap
sed.wav = wav.to(unit_wav)
sed.nu = nu.to(unit_freq)
sed.flux = convert_flux(nu, flux, unit_flux, distance=sed.distance)
sed.error = convert_flux(nu, error, unit_flux, distance=sed.distance)
if order not in ('nu', 'wav'):
raise ValueError('order should be nu or wav')
if (order == 'nu' and sed.nu[0] > sed.nu[-1]) or \
(order == 'wav' and sed.wav[0] > sed.wav[-1]):
sed.wav = sed.wav[::-1]
sed.nu = sed.nu[::-1]
sed.flux = sed.flux[..., ::-1]
sed.error = sed.error[..., ::-1]
return sed
def write(self, filename, overwrite=False):
hdu0 = fits.PrimaryHDU()
if self.name is None:
raise ValueError("Model name is not set")
else:
hdu0.header['MODEL'] = self.name
if self.distance is None:
raise ValueError("Model distance is not set")
else:
hdu0.header['DISTANCE'] = self.distance.to(u.cm).value
hdu0.header['NAP'] = self.n_ap
hdu0.header['NWAV'] = self.n_wav
twav = Table()
if self.wav is None:
raise ValueError("Wavelengths are not set")
else:
twav['WAVELENGTH'] = self.wav
if self.nu is None:
raise ValueError("Frequencies are not set")
else:
twav['FREQUENCY'] = self.nu
twav.sort('FREQUENCY')
hdu1 = fits.BinTableHDU(np.array(twav))
hdu1.columns[0].unit = self.wav.unit.to_string(format='fits')
hdu1.columns[1].unit = self.nu.unit.to_string(format='fits')
hdu1.header['EXTNAME'] = "WAVELENGTHS"
tap = Table()
if self.apertures is None:
tap['APERTURE'] = [1.e-30]
else:
tap['APERTURE'] = self.apertures
hdu2 = fits.BinTableHDU(np.array(tap))
if self.apertures is None:
hdu2.columns[0].unit = 'cm'
else:
hdu2.columns[0].unit = self.apertures.unit.to_string(format='fits')
hdu2.header['EXTNAME'] = "APERTURES"
tflux = Table()
tflux['TOTAL_FLUX'] = self.flux
if self.flux is None:
raise ValueError("Fluxes are not set")
else:
tflux['TOTAL_FLUX'] = self.flux
if self.error is None:
raise ValueError("Errors are not set")
else:
tflux['TOTAL_FLUX_ERR'] = self.error
hdu3 = fits.BinTableHDU(np.array(tflux))
hdu3.columns[0].unit = self.flux.unit.to_string(format='fits')
hdu3.columns[1].unit = self.error.unit.to_string(format='fits')
hdu3.header['EXTNAME'] = "SEDS"
hdus = [hdu0, hdu1, hdu2, hdu3]
hdulist = fits.HDUList(hdus)
hdulist.writeto(filename, clobber=overwrite)
def interpolate(self, apertures):
if self.n_ap == 1:
return np.repeat(self.flux[0, :], len(apertures)).reshape(self.n_wav, len(apertures))
# Create interpolating function
flux_interp = interp1d(self.apertures, self.flux.swapaxes(0, 1))
# If any apertures are larger than the defined max, reset to max
apertures[apertures > self.apertures.max()] = self.apertures.max()
# If any apertures are smaller than the defined min, raise Exception
if np.any(apertures < self.apertures.min()):
raise Exception("Aperture(s) requested too small")
return flux_interp(apertures)
def interpolate_variable(self, wavelengths, apertures):
if self.n_ap == 1:
return self.flux[0, :]
sed_apertures = self.apertures.to(u.au).value
sed_wav = self.wav.to(u.micron).value
# If any apertures are larger than the defined max, reset to max
apertures[apertures > sed_apertures.max()] = sed_apertures.max() * 0.999
# If any apertures are smaller than the defined min, raise Exception
if np.any(apertures < sed_apertures.min()):
raise Exception("Aperture(s) requested too small")
# Find wavelength order
order = np.argsort(wavelengths)
# Interpolate apertures vs wavelength
log10_ap_interp = interp1d(np.log10(wavelengths[order]), np.log10(apertures[order]), bounds_error=False, fill_value=np.nan)
# Create interpolating function
flux_interp = interp1d(sed_apertures, self.flux.swapaxes(0, 1))
# Interpolate the apertures
apertures = 10. ** log10_ap_interp(np.log10(sed_wav))
# Extrapolate on either side
apertures[np.log10(sed_wav) < log10_ap_interp.x[0]] = 10. ** log10_ap_interp.y[0]
apertures[np.log10(sed_wav) > log10_ap_interp.x[-1]] = 10. ** log10_ap_interp.y[-1]
# Interpolate and return only diagonal elements
return flux_interp(apertures).diagonal()
| true | true |
f729647581b758f0c2709642d4b1e0819a3ee950 | 1,310 | py | Python | shopyo/config.py | ChristianCelora/shopyo | 9e602b1f6bc118850875c33f7f2ae5d179767c88 | [
"MIT"
] | 1 | 2020-12-23T18:22:21.000Z | 2020-12-23T18:22:21.000Z | shopyo/config.py | ChristianCelora/shopyo | 9e602b1f6bc118850875c33f7f2ae5d179767c88 | [
"MIT"
] | null | null | null | shopyo/config.py | ChristianCelora/shopyo | 9e602b1f6bc118850875c33f7f2ae5d179767c88 | [
"MIT"
] | null | null | null | import os
base_path = os.path.dirname(os.path.abspath(__file__))
class Config:
"""Parent configuration class."""
DEBUG = False
SQLALCHEMY_DATABASE_URI = "sqlite:///shopyo.db"
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = os.urandom(24)
BASE_DIR = base_path
STATIC = os.path.join(BASE_DIR, "static")
UPLOADED_PATH_IMAGE = os.path.join(STATIC, "uploads", "images")
UPLOADED_PATH_THUM = os.path.join(STATIC, "uploads", "thumbs")
UPLOADED_PRODUCTPHOTOS_DEST = os.path.join(STATIC, "uploads", "products")
UPLOADED_CATEGORYPHOTOS_DEST = os.path.join(STATIC, "uploads", "category")
UPLOADED_SUBCATEGORYPHOTOS_DEST = os.path.join(STATIC, "uploads", "subcategory")
PASSWORD_SALT = "abcdefghi"
class DevelopmentConfig(Config):
"""Configurations for development"""
ENV = "development"
DEBUG = True
EXPLAIN_TEMPLATE_LOADING = True
LOGIN_DISABLED = True
class TestingConfig(Config):
"""Configurations for testsing"""
SQLALCHEMY_DATABASE_URI = "sqlite:///testing.db"
DEBUG = True
LIVESERVER_PORT = 8943
LIVESERVER_TIMEOUT = 10
BCRYPT_LOG_ROUNDS = 4
TESTING = True
WTF_CSRF_ENABLED = False
app_config = {
"development": DevelopmentConfig,
"production": Config,
"testing": TestingConfig,
}
| 25.686275 | 84 | 0.701527 | import os
base_path = os.path.dirname(os.path.abspath(__file__))
class Config:
DEBUG = False
SQLALCHEMY_DATABASE_URI = "sqlite:///shopyo.db"
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = os.urandom(24)
BASE_DIR = base_path
STATIC = os.path.join(BASE_DIR, "static")
UPLOADED_PATH_IMAGE = os.path.join(STATIC, "uploads", "images")
UPLOADED_PATH_THUM = os.path.join(STATIC, "uploads", "thumbs")
UPLOADED_PRODUCTPHOTOS_DEST = os.path.join(STATIC, "uploads", "products")
UPLOADED_CATEGORYPHOTOS_DEST = os.path.join(STATIC, "uploads", "category")
UPLOADED_SUBCATEGORYPHOTOS_DEST = os.path.join(STATIC, "uploads", "subcategory")
PASSWORD_SALT = "abcdefghi"
class DevelopmentConfig(Config):
ENV = "development"
DEBUG = True
EXPLAIN_TEMPLATE_LOADING = True
LOGIN_DISABLED = True
class TestingConfig(Config):
SQLALCHEMY_DATABASE_URI = "sqlite:///testing.db"
DEBUG = True
LIVESERVER_PORT = 8943
LIVESERVER_TIMEOUT = 10
BCRYPT_LOG_ROUNDS = 4
TESTING = True
WTF_CSRF_ENABLED = False
app_config = {
"development": DevelopmentConfig,
"production": Config,
"testing": TestingConfig,
}
| true | true |
f7296659d3869c20f2288c83afc6f34918f2cc2f | 10,867 | py | Python | main/MCN.py | Rick0514/VPR_SMCN | 7a00dc8e4de0c21438474c05a4a7be18d05367fa | [
"MIT"
] | null | null | null | main/MCN.py | Rick0514/VPR_SMCN | 7a00dc8e4de0c21438474c05a4a7be18d05367fa | [
"MIT"
] | null | null | null | main/MCN.py | Rick0514/VPR_SMCN | 7a00dc8e4de0c21438474c05a4a7be18d05367fa | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import main.utils as utils
import time
# ---------------------------- 说明 ----------------------------------
# MCN的python复现
# ---------------------------- 说明 ----------------------------------
class MCNParams:
"""
a struct define the input params MCN class use
"""
def __init__(self, probAddCon, nCellPerCol, nConPerCol,
minColActivity, nColPerPattern, kActiveCol):
self.probAddCon = probAddCon
self.nCellPerCol = nCellPerCol
self.nConPerCol = nConPerCol
self.minColActivity = minColActivity
self.nColPerPattern = nColPerPattern
self.kActiveCol = kActiveCol
class MCN:
def __init__(self, params):
# MCNParams class define the params
self.params = params
self.nCols = 0
self.winnerCells = []
self.prevWinnerCells = []
self.FF = np.empty((self.params.nConPerCol, self.nCols), dtype=np.int)
self.P = np.empty((self.params.nCellPerCol, self.nCols), dtype=np.bool)
self.prevP = np.empty_like(self.P, dtype=np.bool)
self.burstedCol = np.empty((self.nCols, ), dtype=np.bool)
self.predicitionConnections = []
def prepareNewIteration(self):
# winnerCells and P need to reset each time
self.prevWinnerCells = self.winnerCells
self.prevP = self.P
self.winnerCells = []
if self.nCols > 0:
self.P = np.zeros_like(self.P)
self.burstedCol = np.zeros_like(self.burstedCol)
def resetPredP(self):
self.prevP = np.empty((self.params.nCellPerCol, self.nCols), dtype=np.bool)
def createNewColumn(self, inputSDR, nNewColumn):
nonZeroIdx = np.where(inputSDR > 0)[0]
start_id = self.nCols
for i in range(nNewColumn):
self.nCols += 1
sampleIdx = np.random.randint(0, len(nonZeroIdx), self.params.nConPerCol)
tmp = nonZeroIdx[sampleIdx].reshape((-1, 1))
self.FF = np.concatenate((self.FF, tmp), axis=1)
newPcol = np.zeros((self.params.nCellPerCol, 1), dtype=np.bool)
self.P = np.concatenate((self.P, newPcol), axis=1)
self.prevP = np.concatenate((self.prevP, newPcol), axis=1)
self.burstedCol = np.concatenate((self.burstedCol, np.array([0], dtype=bool)))
for k in range(nNewColumn * self.params.nCellPerCol):
self.predicitionConnections.append([])
return np.arange(start_id, self.nCols)
def compute(self, inputSDR, supressLearningFlag):
"""
compute sequence descriptor
:param inputSDR:
:param supressLearningFlag: in case of inference, not learning
:return:
"""
self.prepareNewIteration()
# compare SDR with minicolumn
simScore = np.sum(inputSDR[self.FF], axis=0) / self.params.nConPerCol
sort_idx = np.argsort(simScore)
topk_sort_idx = sort_idx[-self.params.kActiveCol:]
topk_sort_score = simScore[topk_sort_idx]
if not supressLearningFlag:
# if all activities below threshold, then create a new
# activity and make it active
# otherwise select the top k most active ones
if len(simScore):
activeCols = topk_sort_idx[topk_sort_score > self.params.minColActivity]
# activeCols = np.array(self.getActiveCols(simScore, supressLearningFlag), dtype=np.int)
else:
activeCols = np.empty((0, ), dtype=np.int)
activeCols = np.concatenate((activeCols, self.createNewColumn(inputSDR, max(0, self.params.nColPerPattern - len(activeCols)))))
else:
# in non-learning mode, take the k most active columns
# activeCols = np.array(self.getActiveCols(simScore, supressLearningFlag), dtype=np.int)
activeCols = topk_sort_idx
# if len(activeCols) == 0:
# sort_idx = np.argsort(simScore)
# activeCols = sort_idx[-self.params.nColPerPattern:]
for eachActiveCol in activeCols:
predictedIdx = np.where(self.prevP[:, eachActiveCol] > 0)[0]
if len(predictedIdx):
for each_predictedIdx in predictedIdx:
self.activatePredictions(eachActiveCol, each_predictedIdx)
self.winnerCells.append(eachActiveCol * self.params.nCellPerCol + each_predictedIdx)
else:
winnerCell = self.burst(eachActiveCol, supressLearningFlag)
for each in winnerCell:
self.winnerCells.append(eachActiveCol * self.params.nCellPerCol + each)
if not supressLearningFlag:
self.learnPreditions()
# predict newly learned preditions, i think it's useless
for colIdx in range(self.nCols):
if self.burstedCol[colIdx]:
for i in range(self.params.nCellPerCol):
self.activatePredictions(colIdx, i)
return self.winnerCells
def activatePredictions(self, colIdx, cellIdx):
predIdx = self.predicitionConnections[colIdx * self.params.nCellPerCol + cellIdx]
for each in predIdx:
c = each // self.params.nCellPerCol
r = each % self.params.nCellPerCol
self.P[r, c] = True
def burst(self, colIdx, supressLearningFlag):
self.burstedCol[colIdx] = True
for i in range(self.params.nCellPerCol):
self.activatePredictions(colIdx, i)
# winnerCell is the cells with fewest connections with other cells
st = colIdx * self.params.nCellPerCol
nCon = []
for i in range(self.params.nCellPerCol):
nCon.append(len(self.predicitionConnections[st + i]))
if not supressLearningFlag:
# inhibit winning cells from the last iteration
for i in self.prevWinnerCells:
col = i // self.params.nCellPerCol
if col == colIdx:
nCon[i % self.params.nCellPerCol] += self.params.nCellPerCol
# find the fewest ones
candidateIdx = [0]
minV = nCon[0]
for i in range(1, len(nCon)):
if nCon[i] < minV:
candidateIdx = [i]
minV = nCon[i]
elif nCon[i] == minV:
candidateIdx.append(i)
nCan = len(candidateIdx)
if nCan == 1:
return [candidateIdx[0]]
else:
chosenIdx = np.random.randint(0, nCan, 1)
return [candidateIdx[chosenIdx[0]]]
else:
# in case of inference, return all used winner cells
winnerIdx = np.where(np.array(nCon) > 0)[0]
if len(winnerIdx):
return winnerIdx
return [np.random.randint(0, self.params.nCellPerCol, 1)[0]]
def learnPreditions(self):
for prevIdx in self.prevWinnerCells:
prevIdxCol = prevIdx // self.params.nCellPerCol
for curIdx in self.winnerCells:
curIdxCol = curIdx // self.params.nCellPerCol
if prevIdxCol == curIdxCol:
continue
existingPredConFlag = self.checkExistingPredCon(prevIdxCol, curIdx)
if not existingPredConFlag or np.random.rand() <= self.params.probAddCon:
if curIdx not in self.predicitionConnections[prevIdx]:
self.predicitionConnections[prevIdx].append(curIdx)
def checkExistingPredCon(self, prevColIdx, curCellIdx):
st = prevColIdx * self.params.nCellPerCol
for i in range(self.params.nCellPerCol):
if curCellIdx in self.predicitionConnections[st + i]:
return True
return False
def visualizeCon(self, displayCol=10):
plt.figure()
dis = 5
dCol = displayCol
plt.title('Prediction Connections')
plt.xlim(0, dCol * dis)
plt.ylim(0, self.params.nCellPerCol * dis)
for k, con in enumerate(self.predicitionConnections):
x = k // self.params.nCellPerCol * dis
if x >= dCol * dis:
break
y = k % self.params.nCellPerCol
y = (self.params.nCellPerCol - 1 - y) * dis
plt.plot(x, y, 'o', color='blue')
if len(con):
for each in con:
cx = each // self.params.nCellPerCol * dis
cy = each % self.params.nCellPerCol
cy = (self.params.nCellPerCol - 1 - cy) * dis
plt.plot([x, cx], [y, cy], '-', color='red')
def getSim(w1, w2):
"""
:param w1: winner cell which should be a list
:param w2:
:return: simularity score
"""
w1 = set(w1)
w2 = set(w2)
return len(w1 & w2) / len(w1 | w2)
def runMCN(params, dbFeat, qFeat, gt):
# st = time.time()
_, old_dims = dbFeat.shape
new_dims = 8192
P = np.random.rand(old_dims, new_dims // 2)
P /= np.linalg.norm(P, axis=1, keepdims=True)
D1_slsbh = utils.getLSBH(dbFeat, P, 0.25)
D2_slsbh = utils.getLSBH(qFeat, P, 0.25)
mcn = MCN(params)
train_winnerCells = []
for i in range(D1_slsbh.shape[0]):
train_winnerCells.append(mcn.compute(D1_slsbh[i, :], False))
valid_winnerCells = []
mcn.resetPredP()
for i in range(D2_slsbh.shape[0]):
valid_winnerCells.append(mcn.compute(D2_slsbh[i, :], True))
# print('Done! cost : %.3f' % (time.time() - st))
# get similarity matrix
S_mcn = np.zeros((dbFeat.shape[0], qFeat.shape[0]))
for k1, each_v in enumerate(valid_winnerCells):
for k2, each_t in enumerate(train_winnerCells):
S_mcn[k2, k1] = getSim(each_v, each_t)
# time_cost = time.time() - st
# P, R = utils.drawPR(S_mcn, gt)
# ap = utils.calAvgPred(P, R)
del train_winnerCells, valid_winnerCells, mcn
return S_mcn
def runMCN_SDR(params, dbFeat, qFeat, gt):
mcn = MCN(params)
train_winnerCells = []
for i in range(dbFeat.shape[0]):
train_winnerCells.append(mcn.compute(dbFeat[i, :], False))
valid_winnerCells = []
mcn.resetPredP()
for i in range(qFeat.shape[0]):
valid_winnerCells.append(mcn.compute(qFeat[i, :], True))
# print('Done! cost : %.3f' % (time.time() - st))
# get similarity matrix
S_mcn = np.zeros((dbFeat.shape[0], qFeat.shape[0]))
for k1, each_v in enumerate(valid_winnerCells):
for k2, each_t in enumerate(train_winnerCells):
S_mcn[k2, k1] = getSim(each_v, each_t)
# time_cost = time.time() - st
# P, R = utils.drawPR(S_mcn, gt)
# ap = utils.calAvgPred(P, R)
del train_winnerCells, valid_winnerCells, mcn
return S_mcn
| 35.168285 | 139 | 0.587743 | import numpy as np
import matplotlib.pyplot as plt
import main.utils as utils
import time
class MCNParams:
def __init__(self, probAddCon, nCellPerCol, nConPerCol,
minColActivity, nColPerPattern, kActiveCol):
self.probAddCon = probAddCon
self.nCellPerCol = nCellPerCol
self.nConPerCol = nConPerCol
self.minColActivity = minColActivity
self.nColPerPattern = nColPerPattern
self.kActiveCol = kActiveCol
class MCN:
def __init__(self, params):
self.params = params
self.nCols = 0
self.winnerCells = []
self.prevWinnerCells = []
self.FF = np.empty((self.params.nConPerCol, self.nCols), dtype=np.int)
self.P = np.empty((self.params.nCellPerCol, self.nCols), dtype=np.bool)
self.prevP = np.empty_like(self.P, dtype=np.bool)
self.burstedCol = np.empty((self.nCols, ), dtype=np.bool)
self.predicitionConnections = []
def prepareNewIteration(self):
self.prevWinnerCells = self.winnerCells
self.prevP = self.P
self.winnerCells = []
if self.nCols > 0:
self.P = np.zeros_like(self.P)
self.burstedCol = np.zeros_like(self.burstedCol)
def resetPredP(self):
self.prevP = np.empty((self.params.nCellPerCol, self.nCols), dtype=np.bool)
def createNewColumn(self, inputSDR, nNewColumn):
nonZeroIdx = np.where(inputSDR > 0)[0]
start_id = self.nCols
for i in range(nNewColumn):
self.nCols += 1
sampleIdx = np.random.randint(0, len(nonZeroIdx), self.params.nConPerCol)
tmp = nonZeroIdx[sampleIdx].reshape((-1, 1))
self.FF = np.concatenate((self.FF, tmp), axis=1)
newPcol = np.zeros((self.params.nCellPerCol, 1), dtype=np.bool)
self.P = np.concatenate((self.P, newPcol), axis=1)
self.prevP = np.concatenate((self.prevP, newPcol), axis=1)
self.burstedCol = np.concatenate((self.burstedCol, np.array([0], dtype=bool)))
for k in range(nNewColumn * self.params.nCellPerCol):
self.predicitionConnections.append([])
return np.arange(start_id, self.nCols)
def compute(self, inputSDR, supressLearningFlag):
self.prepareNewIteration()
simScore = np.sum(inputSDR[self.FF], axis=0) / self.params.nConPerCol
sort_idx = np.argsort(simScore)
topk_sort_idx = sort_idx[-self.params.kActiveCol:]
topk_sort_score = simScore[topk_sort_idx]
if not supressLearningFlag:
if len(simScore):
activeCols = topk_sort_idx[topk_sort_score > self.params.minColActivity]
else:
activeCols = np.empty((0, ), dtype=np.int)
activeCols = np.concatenate((activeCols, self.createNewColumn(inputSDR, max(0, self.params.nColPerPattern - len(activeCols)))))
else:
activeCols = topk_sort_idx
for eachActiveCol in activeCols:
predictedIdx = np.where(self.prevP[:, eachActiveCol] > 0)[0]
if len(predictedIdx):
for each_predictedIdx in predictedIdx:
self.activatePredictions(eachActiveCol, each_predictedIdx)
self.winnerCells.append(eachActiveCol * self.params.nCellPerCol + each_predictedIdx)
else:
winnerCell = self.burst(eachActiveCol, supressLearningFlag)
for each in winnerCell:
self.winnerCells.append(eachActiveCol * self.params.nCellPerCol + each)
if not supressLearningFlag:
self.learnPreditions()
for colIdx in range(self.nCols):
if self.burstedCol[colIdx]:
for i in range(self.params.nCellPerCol):
self.activatePredictions(colIdx, i)
return self.winnerCells
def activatePredictions(self, colIdx, cellIdx):
predIdx = self.predicitionConnections[colIdx * self.params.nCellPerCol + cellIdx]
for each in predIdx:
c = each // self.params.nCellPerCol
r = each % self.params.nCellPerCol
self.P[r, c] = True
def burst(self, colIdx, supressLearningFlag):
self.burstedCol[colIdx] = True
for i in range(self.params.nCellPerCol):
self.activatePredictions(colIdx, i)
# winnerCell is the cells with fewest connections with other cells
st = colIdx * self.params.nCellPerCol
nCon = []
for i in range(self.params.nCellPerCol):
nCon.append(len(self.predicitionConnections[st + i]))
if not supressLearningFlag:
# inhibit winning cells from the last iteration
for i in self.prevWinnerCells:
col = i // self.params.nCellPerCol
if col == colIdx:
nCon[i % self.params.nCellPerCol] += self.params.nCellPerCol
# find the fewest ones
candidateIdx = [0]
minV = nCon[0]
for i in range(1, len(nCon)):
if nCon[i] < minV:
candidateIdx = [i]
minV = nCon[i]
elif nCon[i] == minV:
candidateIdx.append(i)
nCan = len(candidateIdx)
if nCan == 1:
return [candidateIdx[0]]
else:
chosenIdx = np.random.randint(0, nCan, 1)
return [candidateIdx[chosenIdx[0]]]
else:
# in case of inference, return all used winner cells
winnerIdx = np.where(np.array(nCon) > 0)[0]
if len(winnerIdx):
return winnerIdx
return [np.random.randint(0, self.params.nCellPerCol, 1)[0]]
def learnPreditions(self):
for prevIdx in self.prevWinnerCells:
prevIdxCol = prevIdx // self.params.nCellPerCol
for curIdx in self.winnerCells:
curIdxCol = curIdx // self.params.nCellPerCol
if prevIdxCol == curIdxCol:
continue
existingPredConFlag = self.checkExistingPredCon(prevIdxCol, curIdx)
if not existingPredConFlag or np.random.rand() <= self.params.probAddCon:
if curIdx not in self.predicitionConnections[prevIdx]:
self.predicitionConnections[prevIdx].append(curIdx)
def checkExistingPredCon(self, prevColIdx, curCellIdx):
st = prevColIdx * self.params.nCellPerCol
for i in range(self.params.nCellPerCol):
if curCellIdx in self.predicitionConnections[st + i]:
return True
return False
def visualizeCon(self, displayCol=10):
plt.figure()
dis = 5
dCol = displayCol
plt.title('Prediction Connections')
plt.xlim(0, dCol * dis)
plt.ylim(0, self.params.nCellPerCol * dis)
for k, con in enumerate(self.predicitionConnections):
x = k // self.params.nCellPerCol * dis
if x >= dCol * dis:
break
y = k % self.params.nCellPerCol
y = (self.params.nCellPerCol - 1 - y) * dis
plt.plot(x, y, 'o', color='blue')
if len(con):
for each in con:
cx = each // self.params.nCellPerCol * dis
cy = each % self.params.nCellPerCol
cy = (self.params.nCellPerCol - 1 - cy) * dis
plt.plot([x, cx], [y, cy], '-', color='red')
def getSim(w1, w2):
w1 = set(w1)
w2 = set(w2)
return len(w1 & w2) / len(w1 | w2)
def runMCN(params, dbFeat, qFeat, gt):
# st = time.time()
_, old_dims = dbFeat.shape
new_dims = 8192
P = np.random.rand(old_dims, new_dims // 2)
P /= np.linalg.norm(P, axis=1, keepdims=True)
D1_slsbh = utils.getLSBH(dbFeat, P, 0.25)
D2_slsbh = utils.getLSBH(qFeat, P, 0.25)
mcn = MCN(params)
train_winnerCells = []
for i in range(D1_slsbh.shape[0]):
train_winnerCells.append(mcn.compute(D1_slsbh[i, :], False))
valid_winnerCells = []
mcn.resetPredP()
for i in range(D2_slsbh.shape[0]):
valid_winnerCells.append(mcn.compute(D2_slsbh[i, :], True))
# print('Done! cost : %.3f' % (time.time() - st))
# get similarity matrix
S_mcn = np.zeros((dbFeat.shape[0], qFeat.shape[0]))
for k1, each_v in enumerate(valid_winnerCells):
for k2, each_t in enumerate(train_winnerCells):
S_mcn[k2, k1] = getSim(each_v, each_t)
# time_cost = time.time() - st
# P, R = utils.drawPR(S_mcn, gt)
# ap = utils.calAvgPred(P, R)
del train_winnerCells, valid_winnerCells, mcn
return S_mcn
def runMCN_SDR(params, dbFeat, qFeat, gt):
mcn = MCN(params)
train_winnerCells = []
for i in range(dbFeat.shape[0]):
train_winnerCells.append(mcn.compute(dbFeat[i, :], False))
valid_winnerCells = []
mcn.resetPredP()
for i in range(qFeat.shape[0]):
valid_winnerCells.append(mcn.compute(qFeat[i, :], True))
# print('Done! cost : %.3f' % (time.time() - st))
# get similarity matrix
S_mcn = np.zeros((dbFeat.shape[0], qFeat.shape[0]))
for k1, each_v in enumerate(valid_winnerCells):
for k2, each_t in enumerate(train_winnerCells):
S_mcn[k2, k1] = getSim(each_v, each_t)
# time_cost = time.time() - st
# P, R = utils.drawPR(S_mcn, gt)
# ap = utils.calAvgPred(P, R)
del train_winnerCells, valid_winnerCells, mcn
return S_mcn
| true | true |
f7296673f74ba92c24a65b3326f5d137140b8592 | 25,486 | py | Python | pytorch3dunet/augment/transforms.py | FynnBe/pytorch-3dunet | 34918e82c3afeff02360b03964de973eac3a4f75 | [
"MIT"
] | null | null | null | pytorch3dunet/augment/transforms.py | FynnBe/pytorch-3dunet | 34918e82c3afeff02360b03964de973eac3a4f75 | [
"MIT"
] | null | null | null | pytorch3dunet/augment/transforms.py | FynnBe/pytorch-3dunet | 34918e82c3afeff02360b03964de973eac3a4f75 | [
"MIT"
] | null | null | null | import importlib
import numpy as np
import torch
from scipy.ndimage import rotate, map_coordinates, gaussian_filter
from scipy.ndimage.filters import convolve
from skimage.filters import gaussian
from skimage.segmentation import find_boundaries
from torchvision.transforms import Compose
# WARN: use fixed random state for reproducibility; if you want to randomize on each run seed with `time.time()` e.g.
GLOBAL_RANDOM_STATE = np.random.RandomState(47)
class RandomFlip:
"""
Randomly flips the image across the given axes. Image can be either 3D (DxHxW) or 4D (CxDxHxW).
When creating make sure that the provided RandomStates are consistent between raw and labeled datasets,
otherwise the models won't converge.
"""
def __init__(self, random_state, axis_prob=0.5, **kwargs):
assert random_state is not None, 'RandomState cannot be None'
self.random_state = random_state
self.axes = (0, 1, 2)
self.axis_prob = axis_prob
def __call__(self, m):
assert m.ndim in [3, 4], 'Supports only 3D (DxHxW) or 4D (CxDxHxW) images'
for axis in self.axes:
if self.random_state.uniform() > self.axis_prob:
if m.ndim == 3:
m = np.flip(m, axis)
else:
channels = [np.flip(m[c], axis) for c in range(m.shape[0])]
m = np.stack(channels, axis=0)
return m
class RandomRotate90:
"""
Rotate an array by 90 degrees around a randomly chosen plane. Image can be either 3D (DxHxW) or 4D (CxDxHxW).
When creating make sure that the provided RandomStates are consistent between raw and labeled datasets,
otherwise the models won't converge.
IMPORTANT: assumes DHW axis order (that's why rotation is performed across (1,2) axis)
"""
def __init__(self, random_state, **kwargs):
self.random_state = random_state
# always rotate around z-axis
self.axis = (1, 2)
def __call__(self, m):
assert m.ndim in [3, 4], 'Supports only 3D (DxHxW) or 4D (CxDxHxW) images'
# pick number of rotations at random
k = self.random_state.randint(0, 4)
# rotate k times around a given plane
if m.ndim == 3:
m = np.rot90(m, k, self.axis)
else:
channels = [np.rot90(m[c], k, self.axis) for c in range(m.shape[0])]
m = np.stack(channels, axis=0)
return m
class RandomRotate:
"""
Rotate an array by a random degrees from taken from (-angle_spectrum, angle_spectrum) interval.
Rotation axis is picked at random from the list of provided axes.
"""
def __init__(self, random_state, angle_spectrum=30, axes=None, mode='reflect', order=0, **kwargs):
if axes is None:
axes = [(1, 0), (2, 1), (2, 0)]
else:
assert isinstance(axes, list) and len(axes) > 0
self.random_state = random_state
self.angle_spectrum = angle_spectrum
self.axes = axes
self.mode = mode
self.order = order
def __call__(self, m):
axis = self.axes[self.random_state.randint(len(self.axes))]
angle = self.random_state.randint(-self.angle_spectrum, self.angle_spectrum)
if m.ndim == 3:
m = rotate(m, angle, axes=axis, reshape=False, order=self.order, mode=self.mode, cval=-1)
else:
channels = [rotate(m[c], angle, axes=axis, reshape=False, order=self.order, mode=self.mode, cval=-1) for c
in range(m.shape[0])]
m = np.stack(channels, axis=0)
return m
class RandomContrast:
"""
Adjust contrast by scaling each voxel to `mean + alpha * (v - mean)`.
"""
def __init__(self, random_state, alpha=(0.5, 1.5), mean=0.0, execution_probability=0.1, **kwargs):
self.random_state = random_state
assert len(alpha) == 2
self.alpha = alpha
self.mean = mean
self.execution_probability = execution_probability
def __call__(self, m):
if self.random_state.uniform() < self.execution_probability:
alpha = self.random_state.uniform(self.alpha[0], self.alpha[1])
result = self.mean + alpha * (m - self.mean)
return np.clip(result, -1, 1)
return m
# it's relatively slow, i.e. ~1s per patch of size 64x200x200, so use multiple workers in the DataLoader
# remember to use spline_order=0 when transforming the labels
class ElasticDeformation:
"""
Apply elasitc deformations of 3D patches on a per-voxel mesh. Assumes ZYX axis order (or CZYX if the data is 4D).
Based on: https://github.com/fcalvet/image_tools/blob/master/image_augmentation.py#L62
"""
def __init__(self, random_state, spline_order, alpha=2000, sigma=50, execution_probability=0.1, apply_3d=True,
**kwargs):
"""
:param spline_order: the order of spline interpolation (use 0 for labeled images)
:param alpha: scaling factor for deformations
:param sigma: smoothing factor for Gaussian filter
:param execution_probability: probability of executing this transform
:param apply_3d: if True apply deformations in each axis
"""
self.random_state = random_state
self.spline_order = spline_order
self.alpha = alpha
self.sigma = sigma
self.execution_probability = execution_probability
self.apply_3d = apply_3d
def __call__(self, m):
if self.random_state.uniform() < self.execution_probability:
assert m.ndim in [3, 4]
if m.ndim == 3:
volume_shape = m.shape
else:
volume_shape = m[0].shape
if self.apply_3d:
dz = gaussian_filter(self.random_state.randn(*volume_shape), self.sigma, mode="reflect") * self.alpha
else:
dz = np.zeros_like(m)
dy, dx = [
gaussian_filter(
self.random_state.randn(*volume_shape),
self.sigma, mode="reflect"
) * self.alpha for _ in range(2)
]
z_dim, y_dim, x_dim = volume_shape
z, y, x = np.meshgrid(np.arange(z_dim), np.arange(y_dim), np.arange(x_dim), indexing='ij')
indices = z + dz, y + dy, x + dx
if m.ndim == 3:
return map_coordinates(m, indices, order=self.spline_order, mode='reflect')
else:
channels = [map_coordinates(c, indices, order=self.spline_order, mode='reflect') for c in m]
return np.stack(channels, axis=0)
return m
def blur_boundary(boundary, sigma):
boundary = gaussian(boundary, sigma=sigma)
boundary[boundary >= 0.5] = 1
boundary[boundary < 0.5] = 0
return boundary
class CropToFixed:
def __init__(self, random_state, size=(256, 256), centered=False, **kwargs):
self.random_state = random_state
self.crop_y, self.crop_x = size
self.centered = centered
def __call__(self, m):
def _padding(pad_total):
half_total = pad_total // 2
return (half_total, pad_total - half_total)
def _rand_range_and_pad(crop_size, max_size):
"""
Returns a tuple:
max_value (int) for the corner dimension. The corner dimension is chosen as `self.random_state(max_value)`
pad (int): padding in both directions; if crop_size is lt max_size the pad is 0
"""
if crop_size < max_size:
return max_size - crop_size, (0, 0)
else:
return 1, _padding(crop_size - max_size)
def _start_and_pad(crop_size, max_size):
if crop_size < max_size:
return (max_size - crop_size) // 2, (0, 0)
else:
return 0, _padding(crop_size - max_size)
_, y, x = m.shape
if not self.centered:
y_range, y_pad = _rand_range_and_pad(self.crop_y, y)
x_range, x_pad = _rand_range_and_pad(self.crop_x, x)
y_start = self.random_state.randint(y_range)
x_start = self.random_state.randint(x_range)
else:
y_start, y_pad = _start_and_pad(self.crop_y, y)
x_start, x_pad = _start_and_pad(self.crop_x, x)
result = m[:, y_start:y_start + self.crop_y, x_start:x_start + self.crop_x]
return np.pad(result, pad_width=((0, 0), y_pad, x_pad), mode='reflect')
class AbstractLabelToBoundary:
AXES_TRANSPOSE = [
(0, 1, 2), # X
(0, 2, 1), # Y
(2, 0, 1) # Z
]
def __init__(self, ignore_index=None, aggregate_affinities=False, append_label=False, **kwargs):
"""
:param ignore_index: label to be ignored in the output, i.e. after computing the boundary the label ignore_index
will be restored where is was in the patch originally
:param aggregate_affinities: aggregate affinities with the same offset across Z,Y,X axes
:param append_label: if True append the orignal ground truth labels to the last channel
:param blur: Gaussian blur the boundaries
:param sigma: standard deviation for Gaussian kernel
"""
self.ignore_index = ignore_index
self.aggregate_affinities = aggregate_affinities
self.append_label = append_label
def __call__(self, m):
"""
Extract boundaries from a given 3D label tensor.
:param m: input 3D tensor
:return: binary mask, with 1-label corresponding to the boundary and 0-label corresponding to the background
"""
assert m.ndim == 3
kernels = self.get_kernels()
boundary_arr = [np.where(np.abs(convolve(m, kernel)) > 0, 1, 0) for kernel in kernels]
channels = np.stack(boundary_arr)
results = []
if self.aggregate_affinities:
assert len(kernels) % 3 == 0, "Number of kernels must be divided by 3 (one kernel per offset per Z,Y,X axes"
# aggregate affinities with the same offset
for i in range(0, len(kernels), 3):
# merge across X,Y,Z axes (logical OR)
xyz_aggregated_affinities = np.logical_or.reduce(channels[i:i + 3, ...]).astype(np.int)
# recover ignore index
xyz_aggregated_affinities = _recover_ignore_index(xyz_aggregated_affinities, m, self.ignore_index)
results.append(xyz_aggregated_affinities)
else:
results = [_recover_ignore_index(channels[i], m, self.ignore_index) for i in range(channels.shape[0])]
if self.append_label:
# append original input data
results.append(m)
# stack across channel dim
return np.stack(results, axis=0)
@staticmethod
def create_kernel(axis, offset):
# create conv kernel
k_size = offset + 1
k = np.zeros((1, 1, k_size), dtype=np.int)
k[0, 0, 0] = 1
k[0, 0, offset] = -1
return np.transpose(k, axis)
def get_kernels(self):
raise NotImplementedError
class StandardLabelToBoundary:
def __init__(self, ignore_index=None, append_label=False, blur=False, sigma=1, mode='thick', blobs=False, **kwargs):
self.ignore_index = ignore_index
self.append_label = append_label
self.blur = blur
self.sigma = sigma
self.mode = mode
self.blobs = blobs
def __call__(self, m):
assert m.ndim == 3
boundaries = find_boundaries(m, connectivity=2, mode=self.mode)
if self.blur:
boundaries = blur_boundary(boundaries, self.sigma)
results = []
if self.blobs:
blobs = (m > 0).astype('uint8')
results.append(_recover_ignore_index(blobs, m, self.ignore_index))
results.append(_recover_ignore_index(boundaries, m, self.ignore_index))
if self.append_label:
# append original input data
results.append(m)
return np.stack(results, axis=0)
class BlobsWithBoundary:
def __init__(self, mode=None, append_label=False, blur=False, sigma=1, **kwargs):
if mode is None:
mode = ['thick', 'inner', 'outer']
self.mode = mode
self.append_label = append_label
self.blur = blur
self.sigma = sigma
def __call__(self, m):
assert m.ndim == 3
# get the segmentation mask
results = [(m > 0).astype('uint8')]
for bm in self.mode:
boundary = find_boundaries(m, connectivity=2, mode=bm)
if self.blur:
boundary = blur_boundary(boundary, self.sigma)
results.append(boundary)
if self.append_label:
results.append(m)
return np.stack(results, axis=0)
class BlobsToMask:
"""
Returns binary mask from labeled image, i.e. every label greater than 0 is treated as foreground.
"""
def __init__(self, append_label=False, boundary=False, cross_entropy=False, **kwargs):
self.cross_entropy = cross_entropy
self.boundary = boundary
self.append_label = append_label
def __call__(self, m):
assert m.ndim == 3
# get the segmentation mask
mask = (m > 0).astype('uint8')
results = [mask]
if self.boundary:
outer = find_boundaries(m, connectivity=2, mode='outer')
if self.cross_entropy:
# boundary is class 2
mask[outer > 0] = 2
results = [mask]
else:
results.append(outer)
if self.append_label:
results.append(m)
return np.stack(results, axis=0)
class RandomLabelToAffinities(AbstractLabelToBoundary):
"""
Converts a given volumetric label array to binary mask corresponding to borders between labels.
One specify the max_offset (thickness) of the border. Then the offset is picked at random every time you call
the transformer (offset is picked form the range 1:max_offset) for each axis and the boundary computed.
One may use this scheme in order to make the network more robust against various thickness of borders in the ground
truth (think of it as a boundary denoising scheme).
"""
def __init__(self, random_state, max_offset=10, ignore_index=None, append_label=False, z_offset_scale=2, **kwargs):
super().__init__(ignore_index=ignore_index, append_label=append_label, aggregate_affinities=False)
self.random_state = random_state
self.offsets = tuple(range(1, max_offset + 1))
self.z_offset_scale = z_offset_scale
def get_kernels(self):
rand_offset = self.random_state.choice(self.offsets)
axis_ind = self.random_state.randint(3)
# scale down z-affinities due to anisotropy
if axis_ind == 2:
rand_offset = max(1, rand_offset // self.z_offset_scale)
rand_axis = self.AXES_TRANSPOSE[axis_ind]
# return a single kernel
return [self.create_kernel(rand_axis, rand_offset)]
class LabelToAffinities(AbstractLabelToBoundary):
"""
Converts a given volumetric label array to binary mask corresponding to borders between labels (which can be seen
as an affinity graph: https://arxiv.org/pdf/1706.00120.pdf)
One specify the offsets (thickness) of the border. The boundary will be computed via the convolution operator.
"""
def __init__(self, offsets, ignore_index=None, append_label=False, aggregate_affinities=False, z_offsets=None,
**kwargs):
super().__init__(ignore_index=ignore_index, append_label=append_label,
aggregate_affinities=aggregate_affinities)
assert isinstance(offsets, list) or isinstance(offsets, tuple), 'offsets must be a list or a tuple'
assert all(a > 0 for a in offsets), "'offsets must be positive"
assert len(set(offsets)) == len(offsets), "'offsets' must be unique"
if z_offsets is not None:
assert len(offsets) == len(z_offsets), 'z_offsets length must be the same as the length of offsets'
else:
# if z_offsets is None just use the offsets for z-affinities
z_offsets = list(offsets)
self.z_offsets = z_offsets
self.kernels = []
# create kernel for every axis-offset pair
for xy_offset, z_offset in zip(offsets, z_offsets):
for axis_ind, axis in enumerate(self.AXES_TRANSPOSE):
final_offset = xy_offset
if axis_ind == 2:
final_offset = z_offset
# create kernels for a given offset in every direction
self.kernels.append(self.create_kernel(axis, final_offset))
def get_kernels(self):
return self.kernels
class LabelToZAffinities(AbstractLabelToBoundary):
"""
Converts a given volumetric label array to binary mask corresponding to borders between labels (which can be seen
as an affinity graph: https://arxiv.org/pdf/1706.00120.pdf)
One specify the offsets (thickness) of the border. The boundary will be computed via the convolution operator.
"""
def __init__(self, offsets, ignore_index=None, append_label=False, **kwargs):
super().__init__(ignore_index=ignore_index, append_label=append_label)
assert isinstance(offsets, list) or isinstance(offsets, tuple), 'offsets must be a list or a tuple'
assert all(a > 0 for a in offsets), "'offsets must be positive"
assert len(set(offsets)) == len(offsets), "'offsets' must be unique"
self.kernels = []
z_axis = self.AXES_TRANSPOSE[2]
# create kernels
for z_offset in offsets:
self.kernels.append(self.create_kernel(z_axis, z_offset))
def get_kernels(self):
return self.kernels
class LabelToBoundaryAndAffinities:
"""
Combines the StandardLabelToBoundary and LabelToAffinities in the hope
that that training the network to predict both would improve the main task: boundary prediction.
"""
def __init__(self, xy_offsets, z_offsets, append_label=False, blur=False, sigma=1, ignore_index=None, mode='thick',
blobs=False, **kwargs):
# blur only StandardLabelToBoundary results; we don't want to blur the affinities
self.l2b = StandardLabelToBoundary(blur=blur, sigma=sigma, ignore_index=ignore_index, mode=mode, blobs=blobs)
self.l2a = LabelToAffinities(offsets=xy_offsets, z_offsets=z_offsets, append_label=append_label,
ignore_index=ignore_index)
def __call__(self, m):
boundary = self.l2b(m)
affinities = self.l2a(m)
return np.concatenate((boundary, affinities), axis=0)
class FlyWingBoundary:
"""
Use if the volume contains a single pixel boundaries between labels. Gives the single pixel boundary in the 1st
channel and the 'thick' boundary in the 2nd channel and optional z-affinities
"""
def __init__(self, append_label=False, thick_boundary=True, ignore_index=None, z_offsets=None, **kwargs):
self.append_label = append_label
self.thick_boundary = thick_boundary
self.ignore_index = ignore_index
self.lta = None
if z_offsets is not None:
self.lta = LabelToZAffinities(z_offsets, ignore_index=ignore_index)
def __call__(self, m):
boundary = (m == 0).astype('uint8')
results = [boundary]
if self.thick_boundary:
t_boundary = find_boundaries(m, connectivity=1, mode='outer', background=0)
results.append(t_boundary)
if self.lta is not None:
z_affs = self.lta(m)
for z_aff in z_affs:
results.append(z_aff)
if self.ignore_index is not None:
for b in results:
b[m == self.ignore_index] = self.ignore_index
if self.append_label:
# append original input data
results.append(m)
return np.stack(results, axis=0)
class LabelToMaskAndAffinities:
def __init__(self, xy_offsets, z_offsets, append_label=False, background=0, ignore_index=None, **kwargs):
self.background = background
self.l2a = LabelToAffinities(offsets=xy_offsets, z_offsets=z_offsets, append_label=append_label,
ignore_index=ignore_index)
def __call__(self, m):
mask = m > self.background
mask = np.expand_dims(mask.astype(np.uint8), axis=0)
affinities = self.l2a(m)
return np.concatenate((mask, affinities), axis=0)
class Standardize:
"""
Apply Z-score normalization to a given input tensor, i.e. re-scaling the values to be 0-mean and 1-std.
Mean and std parameter have to be provided explicitly.
"""
def __init__(self, mean, std, eps=1e-6, **kwargs):
self.mean = mean
self.std = std
self.eps = eps
def __call__(self, m):
return (m - self.mean) / np.clip(self.std, a_min=self.eps, a_max=None)
class Normalize:
"""
Apply simple min-max scaling to a given input tensor, i.e. shrinks the range of the data in a fixed range of [-1, 1].
"""
def __init__(self, min_value, max_value, **kwargs):
assert max_value > min_value
self.min_value = min_value
self.value_range = max_value - min_value
def __call__(self, m):
norm_0_1 = (m - self.min_value) / self.value_range
return np.clip(2 * norm_0_1 - 1, -1, 1)
class AdditiveGaussianNoise:
def __init__(self, random_state, scale=(0.0, 1.0), execution_probability=0.1, **kwargs):
self.execution_probability = execution_probability
self.random_state = random_state
self.scale = scale
def __call__(self, m):
if self.random_state.uniform() < self.execution_probability:
std = self.random_state.uniform(self.scale[0], self.scale[1])
gaussian_noise = self.random_state.normal(0, std, size=m.shape)
return m + gaussian_noise
return m
class AdditivePoissonNoise:
def __init__(self, random_state, lam=(0.0, 1.0), execution_probability=0.1, **kwargs):
self.execution_probability = execution_probability
self.random_state = random_state
self.lam = lam
def __call__(self, m):
if self.random_state.uniform() < self.execution_probability:
lam = self.random_state.uniform(self.lam[0], self.lam[1])
poisson_noise = self.random_state.poisson(lam, size=m.shape)
return m + poisson_noise
return m
class ToTensor:
"""
Converts a given input numpy.ndarray into torch.Tensor. Adds additional 'channel' axis when the input is 3D
and expand_dims=True (use for raw data of the shape (D, H, W)).
"""
def __init__(self, expand_dims, dtype=np.float32, **kwargs):
self.expand_dims = expand_dims
self.dtype = dtype
def __call__(self, m):
assert m.ndim in [3, 4], 'Supports only 3D (DxHxW) or 4D (CxDxHxW) images'
# add channel dimension
if self.expand_dims and m.ndim == 3:
m = np.expand_dims(m, axis=0)
return torch.from_numpy(m.astype(dtype=self.dtype))
class Relabel:
"""
Relabel a numpy array of labels into a consecutive numbers, e.g.
[10,10, 0, 6, 6] -> [2, 2, 0, 1, 1]. Useful when one has an instance segmentation volume
at hand and would like to create a one-hot-encoding for it. Without a consecutive labeling the task would be harder.
"""
def __init__(self, **kwargs):
pass
def __call__(self, m):
_, unique_labels = np.unique(m, return_inverse=True)
m = unique_labels.reshape(m.shape)
return m
class Identity:
def __init__(self, **kwargs):
pass
def __call__(self, m):
return m
def get_transformer(config, min_value, max_value, mean, std):
base_config = {'min_value': min_value, 'max_value': max_value, 'mean': mean, 'std': std}
return Transformer(config, base_config)
class Transformer:
def __init__(self, phase_config, base_config):
self.phase_config = phase_config
self.config_base = base_config
self.seed = GLOBAL_RANDOM_STATE.randint(10000000)
def raw_transform(self):
return self._create_transform('raw')
def label_transform(self):
return self._create_transform('label')
def weight_transform(self):
return self._create_transform('weight')
@staticmethod
def _transformer_class(class_name):
m = importlib.import_module('pytorch3dunet.augment.transforms')
clazz = getattr(m, class_name)
return clazz
def _create_transform(self, name):
assert name in self.phase_config, f'Could not find {name} transform'
return Compose([
self._create_augmentation(c) for c in self.phase_config[name]
])
def _create_augmentation(self, c):
config = dict(self.config_base)
config.update(c)
config['random_state'] = np.random.RandomState(self.seed)
aug_class = self._transformer_class(config['name'])
return aug_class(**config)
def _recover_ignore_index(input, orig, ignore_index):
if ignore_index is not None:
mask = orig == ignore_index
input[mask] = ignore_index
return input
| 36.723343 | 122 | 0.635996 | import importlib
import numpy as np
import torch
from scipy.ndimage import rotate, map_coordinates, gaussian_filter
from scipy.ndimage.filters import convolve
from skimage.filters import gaussian
from skimage.segmentation import find_boundaries
from torchvision.transforms import Compose
GLOBAL_RANDOM_STATE = np.random.RandomState(47)
class RandomFlip:
def __init__(self, random_state, axis_prob=0.5, **kwargs):
assert random_state is not None, 'RandomState cannot be None'
self.random_state = random_state
self.axes = (0, 1, 2)
self.axis_prob = axis_prob
def __call__(self, m):
assert m.ndim in [3, 4], 'Supports only 3D (DxHxW) or 4D (CxDxHxW) images'
for axis in self.axes:
if self.random_state.uniform() > self.axis_prob:
if m.ndim == 3:
m = np.flip(m, axis)
else:
channels = [np.flip(m[c], axis) for c in range(m.shape[0])]
m = np.stack(channels, axis=0)
return m
class RandomRotate90:
def __init__(self, random_state, **kwargs):
self.random_state = random_state
self.axis = (1, 2)
def __call__(self, m):
assert m.ndim in [3, 4], 'Supports only 3D (DxHxW) or 4D (CxDxHxW) images'
k = self.random_state.randint(0, 4)
if m.ndim == 3:
m = np.rot90(m, k, self.axis)
else:
channels = [np.rot90(m[c], k, self.axis) for c in range(m.shape[0])]
m = np.stack(channels, axis=0)
return m
class RandomRotate:
def __init__(self, random_state, angle_spectrum=30, axes=None, mode='reflect', order=0, **kwargs):
if axes is None:
axes = [(1, 0), (2, 1), (2, 0)]
else:
assert isinstance(axes, list) and len(axes) > 0
self.random_state = random_state
self.angle_spectrum = angle_spectrum
self.axes = axes
self.mode = mode
self.order = order
def __call__(self, m):
axis = self.axes[self.random_state.randint(len(self.axes))]
angle = self.random_state.randint(-self.angle_spectrum, self.angle_spectrum)
if m.ndim == 3:
m = rotate(m, angle, axes=axis, reshape=False, order=self.order, mode=self.mode, cval=-1)
else:
channels = [rotate(m[c], angle, axes=axis, reshape=False, order=self.order, mode=self.mode, cval=-1) for c
in range(m.shape[0])]
m = np.stack(channels, axis=0)
return m
class RandomContrast:
def __init__(self, random_state, alpha=(0.5, 1.5), mean=0.0, execution_probability=0.1, **kwargs):
self.random_state = random_state
assert len(alpha) == 2
self.alpha = alpha
self.mean = mean
self.execution_probability = execution_probability
def __call__(self, m):
if self.random_state.uniform() < self.execution_probability:
alpha = self.random_state.uniform(self.alpha[0], self.alpha[1])
result = self.mean + alpha * (m - self.mean)
return np.clip(result, -1, 1)
return m
# remember to use spline_order=0 when transforming the labels
class ElasticDeformation:
def __init__(self, random_state, spline_order, alpha=2000, sigma=50, execution_probability=0.1, apply_3d=True,
**kwargs):
self.random_state = random_state
self.spline_order = spline_order
self.alpha = alpha
self.sigma = sigma
self.execution_probability = execution_probability
self.apply_3d = apply_3d
def __call__(self, m):
if self.random_state.uniform() < self.execution_probability:
assert m.ndim in [3, 4]
if m.ndim == 3:
volume_shape = m.shape
else:
volume_shape = m[0].shape
if self.apply_3d:
dz = gaussian_filter(self.random_state.randn(*volume_shape), self.sigma, mode="reflect") * self.alpha
else:
dz = np.zeros_like(m)
dy, dx = [
gaussian_filter(
self.random_state.randn(*volume_shape),
self.sigma, mode="reflect"
) * self.alpha for _ in range(2)
]
z_dim, y_dim, x_dim = volume_shape
z, y, x = np.meshgrid(np.arange(z_dim), np.arange(y_dim), np.arange(x_dim), indexing='ij')
indices = z + dz, y + dy, x + dx
if m.ndim == 3:
return map_coordinates(m, indices, order=self.spline_order, mode='reflect')
else:
channels = [map_coordinates(c, indices, order=self.spline_order, mode='reflect') for c in m]
return np.stack(channels, axis=0)
return m
def blur_boundary(boundary, sigma):
boundary = gaussian(boundary, sigma=sigma)
boundary[boundary >= 0.5] = 1
boundary[boundary < 0.5] = 0
return boundary
class CropToFixed:
def __init__(self, random_state, size=(256, 256), centered=False, **kwargs):
self.random_state = random_state
self.crop_y, self.crop_x = size
self.centered = centered
def __call__(self, m):
def _padding(pad_total):
half_total = pad_total // 2
return (half_total, pad_total - half_total)
def _rand_range_and_pad(crop_size, max_size):
if crop_size < max_size:
return max_size - crop_size, (0, 0)
else:
return 1, _padding(crop_size - max_size)
def _start_and_pad(crop_size, max_size):
if crop_size < max_size:
return (max_size - crop_size) // 2, (0, 0)
else:
return 0, _padding(crop_size - max_size)
_, y, x = m.shape
if not self.centered:
y_range, y_pad = _rand_range_and_pad(self.crop_y, y)
x_range, x_pad = _rand_range_and_pad(self.crop_x, x)
y_start = self.random_state.randint(y_range)
x_start = self.random_state.randint(x_range)
else:
y_start, y_pad = _start_and_pad(self.crop_y, y)
x_start, x_pad = _start_and_pad(self.crop_x, x)
result = m[:, y_start:y_start + self.crop_y, x_start:x_start + self.crop_x]
return np.pad(result, pad_width=((0, 0), y_pad, x_pad), mode='reflect')
class AbstractLabelToBoundary:
AXES_TRANSPOSE = [
(0, 1, 2), # X
(0, 2, 1), # Y
(2, 0, 1) # Z
]
def __init__(self, ignore_index=None, aggregate_affinities=False, append_label=False, **kwargs):
self.ignore_index = ignore_index
self.aggregate_affinities = aggregate_affinities
self.append_label = append_label
def __call__(self, m):
assert m.ndim == 3
kernels = self.get_kernels()
boundary_arr = [np.where(np.abs(convolve(m, kernel)) > 0, 1, 0) for kernel in kernels]
channels = np.stack(boundary_arr)
results = []
if self.aggregate_affinities:
assert len(kernels) % 3 == 0, "Number of kernels must be divided by 3 (one kernel per offset per Z,Y,X axes"
# aggregate affinities with the same offset
for i in range(0, len(kernels), 3):
# merge across X,Y,Z axes (logical OR)
xyz_aggregated_affinities = np.logical_or.reduce(channels[i:i + 3, ...]).astype(np.int)
# recover ignore index
xyz_aggregated_affinities = _recover_ignore_index(xyz_aggregated_affinities, m, self.ignore_index)
results.append(xyz_aggregated_affinities)
else:
results = [_recover_ignore_index(channels[i], m, self.ignore_index) for i in range(channels.shape[0])]
if self.append_label:
# append original input data
results.append(m)
# stack across channel dim
return np.stack(results, axis=0)
@staticmethod
def create_kernel(axis, offset):
# create conv kernel
k_size = offset + 1
k = np.zeros((1, 1, k_size), dtype=np.int)
k[0, 0, 0] = 1
k[0, 0, offset] = -1
return np.transpose(k, axis)
def get_kernels(self):
raise NotImplementedError
class StandardLabelToBoundary:
def __init__(self, ignore_index=None, append_label=False, blur=False, sigma=1, mode='thick', blobs=False, **kwargs):
self.ignore_index = ignore_index
self.append_label = append_label
self.blur = blur
self.sigma = sigma
self.mode = mode
self.blobs = blobs
def __call__(self, m):
assert m.ndim == 3
boundaries = find_boundaries(m, connectivity=2, mode=self.mode)
if self.blur:
boundaries = blur_boundary(boundaries, self.sigma)
results = []
if self.blobs:
blobs = (m > 0).astype('uint8')
results.append(_recover_ignore_index(blobs, m, self.ignore_index))
results.append(_recover_ignore_index(boundaries, m, self.ignore_index))
if self.append_label:
# append original input data
results.append(m)
return np.stack(results, axis=0)
class BlobsWithBoundary:
def __init__(self, mode=None, append_label=False, blur=False, sigma=1, **kwargs):
if mode is None:
mode = ['thick', 'inner', 'outer']
self.mode = mode
self.append_label = append_label
self.blur = blur
self.sigma = sigma
def __call__(self, m):
assert m.ndim == 3
# get the segmentation mask
results = [(m > 0).astype('uint8')]
for bm in self.mode:
boundary = find_boundaries(m, connectivity=2, mode=bm)
if self.blur:
boundary = blur_boundary(boundary, self.sigma)
results.append(boundary)
if self.append_label:
results.append(m)
return np.stack(results, axis=0)
class BlobsToMask:
def __init__(self, append_label=False, boundary=False, cross_entropy=False, **kwargs):
self.cross_entropy = cross_entropy
self.boundary = boundary
self.append_label = append_label
def __call__(self, m):
assert m.ndim == 3
# get the segmentation mask
mask = (m > 0).astype('uint8')
results = [mask]
if self.boundary:
outer = find_boundaries(m, connectivity=2, mode='outer')
if self.cross_entropy:
# boundary is class 2
mask[outer > 0] = 2
results = [mask]
else:
results.append(outer)
if self.append_label:
results.append(m)
return np.stack(results, axis=0)
class RandomLabelToAffinities(AbstractLabelToBoundary):
def __init__(self, random_state, max_offset=10, ignore_index=None, append_label=False, z_offset_scale=2, **kwargs):
super().__init__(ignore_index=ignore_index, append_label=append_label, aggregate_affinities=False)
self.random_state = random_state
self.offsets = tuple(range(1, max_offset + 1))
self.z_offset_scale = z_offset_scale
def get_kernels(self):
rand_offset = self.random_state.choice(self.offsets)
axis_ind = self.random_state.randint(3)
# scale down z-affinities due to anisotropy
if axis_ind == 2:
rand_offset = max(1, rand_offset // self.z_offset_scale)
rand_axis = self.AXES_TRANSPOSE[axis_ind]
# return a single kernel
return [self.create_kernel(rand_axis, rand_offset)]
class LabelToAffinities(AbstractLabelToBoundary):
def __init__(self, offsets, ignore_index=None, append_label=False, aggregate_affinities=False, z_offsets=None,
**kwargs):
super().__init__(ignore_index=ignore_index, append_label=append_label,
aggregate_affinities=aggregate_affinities)
assert isinstance(offsets, list) or isinstance(offsets, tuple), 'offsets must be a list or a tuple'
assert all(a > 0 for a in offsets), "'offsets must be positive"
assert len(set(offsets)) == len(offsets), "'offsets' must be unique"
if z_offsets is not None:
assert len(offsets) == len(z_offsets), 'z_offsets length must be the same as the length of offsets'
else:
z_offsets = list(offsets)
self.z_offsets = z_offsets
self.kernels = []
for xy_offset, z_offset in zip(offsets, z_offsets):
for axis_ind, axis in enumerate(self.AXES_TRANSPOSE):
final_offset = xy_offset
if axis_ind == 2:
final_offset = z_offset
self.kernels.append(self.create_kernel(axis, final_offset))
def get_kernels(self):
return self.kernels
class LabelToZAffinities(AbstractLabelToBoundary):
def __init__(self, offsets, ignore_index=None, append_label=False, **kwargs):
super().__init__(ignore_index=ignore_index, append_label=append_label)
assert isinstance(offsets, list) or isinstance(offsets, tuple), 'offsets must be a list or a tuple'
assert all(a > 0 for a in offsets), "'offsets must be positive"
assert len(set(offsets)) == len(offsets), "'offsets' must be unique"
self.kernels = []
z_axis = self.AXES_TRANSPOSE[2]
# create kernels
for z_offset in offsets:
self.kernels.append(self.create_kernel(z_axis, z_offset))
def get_kernels(self):
return self.kernels
class LabelToBoundaryAndAffinities:
def __init__(self, xy_offsets, z_offsets, append_label=False, blur=False, sigma=1, ignore_index=None, mode='thick',
blobs=False, **kwargs):
# blur only StandardLabelToBoundary results; we don't want to blur the affinities
self.l2b = StandardLabelToBoundary(blur=blur, sigma=sigma, ignore_index=ignore_index, mode=mode, blobs=blobs)
self.l2a = LabelToAffinities(offsets=xy_offsets, z_offsets=z_offsets, append_label=append_label,
ignore_index=ignore_index)
def __call__(self, m):
boundary = self.l2b(m)
affinities = self.l2a(m)
return np.concatenate((boundary, affinities), axis=0)
class FlyWingBoundary:
def __init__(self, append_label=False, thick_boundary=True, ignore_index=None, z_offsets=None, **kwargs):
self.append_label = append_label
self.thick_boundary = thick_boundary
self.ignore_index = ignore_index
self.lta = None
if z_offsets is not None:
self.lta = LabelToZAffinities(z_offsets, ignore_index=ignore_index)
def __call__(self, m):
boundary = (m == 0).astype('uint8')
results = [boundary]
if self.thick_boundary:
t_boundary = find_boundaries(m, connectivity=1, mode='outer', background=0)
results.append(t_boundary)
if self.lta is not None:
z_affs = self.lta(m)
for z_aff in z_affs:
results.append(z_aff)
if self.ignore_index is not None:
for b in results:
b[m == self.ignore_index] = self.ignore_index
if self.append_label:
results.append(m)
return np.stack(results, axis=0)
class LabelToMaskAndAffinities:
def __init__(self, xy_offsets, z_offsets, append_label=False, background=0, ignore_index=None, **kwargs):
self.background = background
self.l2a = LabelToAffinities(offsets=xy_offsets, z_offsets=z_offsets, append_label=append_label,
ignore_index=ignore_index)
def __call__(self, m):
mask = m > self.background
mask = np.expand_dims(mask.astype(np.uint8), axis=0)
affinities = self.l2a(m)
return np.concatenate((mask, affinities), axis=0)
class Standardize:
def __init__(self, mean, std, eps=1e-6, **kwargs):
self.mean = mean
self.std = std
self.eps = eps
def __call__(self, m):
return (m - self.mean) / np.clip(self.std, a_min=self.eps, a_max=None)
class Normalize:
def __init__(self, min_value, max_value, **kwargs):
assert max_value > min_value
self.min_value = min_value
self.value_range = max_value - min_value
def __call__(self, m):
norm_0_1 = (m - self.min_value) / self.value_range
return np.clip(2 * norm_0_1 - 1, -1, 1)
class AdditiveGaussianNoise:
def __init__(self, random_state, scale=(0.0, 1.0), execution_probability=0.1, **kwargs):
self.execution_probability = execution_probability
self.random_state = random_state
self.scale = scale
def __call__(self, m):
if self.random_state.uniform() < self.execution_probability:
std = self.random_state.uniform(self.scale[0], self.scale[1])
gaussian_noise = self.random_state.normal(0, std, size=m.shape)
return m + gaussian_noise
return m
class AdditivePoissonNoise:
def __init__(self, random_state, lam=(0.0, 1.0), execution_probability=0.1, **kwargs):
self.execution_probability = execution_probability
self.random_state = random_state
self.lam = lam
def __call__(self, m):
if self.random_state.uniform() < self.execution_probability:
lam = self.random_state.uniform(self.lam[0], self.lam[1])
poisson_noise = self.random_state.poisson(lam, size=m.shape)
return m + poisson_noise
return m
class ToTensor:
def __init__(self, expand_dims, dtype=np.float32, **kwargs):
self.expand_dims = expand_dims
self.dtype = dtype
def __call__(self, m):
assert m.ndim in [3, 4], 'Supports only 3D (DxHxW) or 4D (CxDxHxW) images'
if self.expand_dims and m.ndim == 3:
m = np.expand_dims(m, axis=0)
return torch.from_numpy(m.astype(dtype=self.dtype))
class Relabel:
def __init__(self, **kwargs):
pass
def __call__(self, m):
_, unique_labels = np.unique(m, return_inverse=True)
m = unique_labels.reshape(m.shape)
return m
class Identity:
def __init__(self, **kwargs):
pass
def __call__(self, m):
return m
def get_transformer(config, min_value, max_value, mean, std):
base_config = {'min_value': min_value, 'max_value': max_value, 'mean': mean, 'std': std}
return Transformer(config, base_config)
class Transformer:
def __init__(self, phase_config, base_config):
self.phase_config = phase_config
self.config_base = base_config
self.seed = GLOBAL_RANDOM_STATE.randint(10000000)
def raw_transform(self):
return self._create_transform('raw')
def label_transform(self):
return self._create_transform('label')
def weight_transform(self):
return self._create_transform('weight')
@staticmethod
def _transformer_class(class_name):
m = importlib.import_module('pytorch3dunet.augment.transforms')
clazz = getattr(m, class_name)
return clazz
def _create_transform(self, name):
assert name in self.phase_config, f'Could not find {name} transform'
return Compose([
self._create_augmentation(c) for c in self.phase_config[name]
])
def _create_augmentation(self, c):
config = dict(self.config_base)
config.update(c)
config['random_state'] = np.random.RandomState(self.seed)
aug_class = self._transformer_class(config['name'])
return aug_class(**config)
def _recover_ignore_index(input, orig, ignore_index):
if ignore_index is not None:
mask = orig == ignore_index
input[mask] = ignore_index
return input
| true | true |
f7296753d900057b6ee906f435a24ab68c98469f | 2,040 | py | Python | tests/providers/google/cloud/operators/test_video_intelligence_system.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 79 | 2021-10-15T07:32:27.000Z | 2022-03-28T04:10:19.000Z | tests/providers/google/cloud/operators/test_video_intelligence_system.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 210 | 2021-07-17T00:25:52.000Z | 2021-12-29T00:44:48.000Z | tests/providers/google/cloud/operators/test_video_intelligence_system.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 23 | 2021-10-15T02:36:37.000Z | 2022-03-17T02:59:27.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pytest
from airflow.providers.google.cloud.example_dags.example_video_intelligence import GCP_BUCKET_NAME
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_AI_KEY, GCP_GCS_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCP_VIDEO_SOURCE_URL = "https://www.sample-videos.com/video123/mp4/720/big_buck_bunny_720p_1mb.mp4"
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_AI_KEY)
class CloudVideoIntelligenceExampleDagsTest(GoogleSystemTest):
@provide_gcp_context(GCP_AI_KEY)
def setUp(self):
self.create_gcs_bucket(GCP_BUCKET_NAME, location="europe-north1")
self.execute_with_ctx(
cmd=["bash", "-c", f"curl {GCP_VIDEO_SOURCE_URL} | gsutil cp - gs://{GCP_BUCKET_NAME}/video.mp4"],
key=GCP_GCS_KEY,
)
super().setUp()
@provide_gcp_context(GCP_AI_KEY)
def tearDown(self):
self.delete_gcs_bucket(GCP_BUCKET_NAME)
super().tearDown()
@provide_gcp_context(GCP_AI_KEY)
def test_example_dag(self):
self.run_dag('example_gcp_video_intelligence', CLOUD_DAG_FOLDER)
| 40.8 | 110 | 0.763725 |
import os
import pytest
from airflow.providers.google.cloud.example_dags.example_video_intelligence import GCP_BUCKET_NAME
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_AI_KEY, GCP_GCS_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCP_VIDEO_SOURCE_URL = "https://www.sample-videos.com/video123/mp4/720/big_buck_bunny_720p_1mb.mp4"
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_AI_KEY)
class CloudVideoIntelligenceExampleDagsTest(GoogleSystemTest):
@provide_gcp_context(GCP_AI_KEY)
def setUp(self):
self.create_gcs_bucket(GCP_BUCKET_NAME, location="europe-north1")
self.execute_with_ctx(
cmd=["bash", "-c", f"curl {GCP_VIDEO_SOURCE_URL} | gsutil cp - gs://{GCP_BUCKET_NAME}/video.mp4"],
key=GCP_GCS_KEY,
)
super().setUp()
@provide_gcp_context(GCP_AI_KEY)
def tearDown(self):
self.delete_gcs_bucket(GCP_BUCKET_NAME)
super().tearDown()
@provide_gcp_context(GCP_AI_KEY)
def test_example_dag(self):
self.run_dag('example_gcp_video_intelligence', CLOUD_DAG_FOLDER)
| true | true |
f7296786eaf8c32bfc2c367f3dc5235847f3635e | 2,002 | py | Python | python3.4Smartforest/lib/python3.4/site-packages/setuptools/lib2to3_ex.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | 1 | 2019-06-30T20:04:39.000Z | 2019-06-30T20:04:39.000Z | python3.4Smartforest/lib/python3.4/site-packages/setuptools/lib2to3_ex.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | 4 | 2021-03-18T20:36:02.000Z | 2022-01-13T00:47:28.000Z | python3.4Smartforest/lib/python3.4/site-packages/setuptools/lib2to3_ex.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | 2 | 2016-07-23T22:12:34.000Z | 2018-06-23T03:40:58.000Z | """
Customized Mixin2to3 support:
- adds support for converting doctests
This module raises an ImportError on Python 2.
"""
from distutils.util import Mixin2to3 as _Mixin2to3
from distutils import log
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
import setuptools
class DistutilsRefactoringTool(RefactoringTool):
def log_error(self, msg, *args, **kw):
log.error(msg, *args)
def log_message(self, msg, *args):
log.info(msg, *args)
def log_debug(self, msg, *args):
log.debug(msg, *args)
class Mixin2to3(_Mixin2to3):
def run_2to3(self, files, doctests = False):
# See of the distribution option has been set, otherwise check the
# setuptools default.
if self.distribution.use_2to3 is not True:
return
if not files:
return
log.info("Fixing "+" ".join(files))
self.__build_fixer_names()
self.__exclude_fixers()
if doctests:
if setuptools.run_2to3_on_doctests:
r = DistutilsRefactoringTool(self.fixer_names)
r.refactor(files, write=True, doctests_only=True)
else:
_Mixin2to3.run_2to3(self, files)
def __build_fixer_names(self):
if self.fixer_names: return
self.fixer_names = []
for p in setuptools.lib2to3_fixer_packages:
self.fixer_names.extend(get_fixers_from_package(p))
if self.distribution.use_2to3_fixers is not None:
for p in self.distribution.use_2to3_fixers:
self.fixer_names.extend(get_fixers_from_package(p))
def __exclude_fixers(self):
excluded_fixers = getattr(self, 'exclude_fixers', [])
if self.distribution.use_2to3_exclude_fixers is not None:
excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers)
for fixer_name in excluded_fixers:
if fixer_name in self.fixer_names:
self.fixer_names.remove(fixer_name)
| 31.777778 | 77 | 0.668332 |
from distutils.util import Mixin2to3 as _Mixin2to3
from distutils import log
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
import setuptools
class DistutilsRefactoringTool(RefactoringTool):
def log_error(self, msg, *args, **kw):
log.error(msg, *args)
def log_message(self, msg, *args):
log.info(msg, *args)
def log_debug(self, msg, *args):
log.debug(msg, *args)
class Mixin2to3(_Mixin2to3):
def run_2to3(self, files, doctests = False):
if self.distribution.use_2to3 is not True:
return
if not files:
return
log.info("Fixing "+" ".join(files))
self.__build_fixer_names()
self.__exclude_fixers()
if doctests:
if setuptools.run_2to3_on_doctests:
r = DistutilsRefactoringTool(self.fixer_names)
r.refactor(files, write=True, doctests_only=True)
else:
_Mixin2to3.run_2to3(self, files)
def __build_fixer_names(self):
if self.fixer_names: return
self.fixer_names = []
for p in setuptools.lib2to3_fixer_packages:
self.fixer_names.extend(get_fixers_from_package(p))
if self.distribution.use_2to3_fixers is not None:
for p in self.distribution.use_2to3_fixers:
self.fixer_names.extend(get_fixers_from_package(p))
def __exclude_fixers(self):
excluded_fixers = getattr(self, 'exclude_fixers', [])
if self.distribution.use_2to3_exclude_fixers is not None:
excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers)
for fixer_name in excluded_fixers:
if fixer_name in self.fixer_names:
self.fixer_names.remove(fixer_name)
| true | true |
f729699287f520c8ea12e89ebedc305b0d14814f | 512 | py | Python | pavement.py | yueranyuan/pyscreenshot | 3287b798691de8791bc3b3314f2545f7b0b1cb99 | [
"BSD-2-Clause"
] | null | null | null | pavement.py | yueranyuan/pyscreenshot | 3287b798691de8791bc3b3314f2545f7b0b1cb99 | [
"BSD-2-Clause"
] | null | null | null | pavement.py | yueranyuan/pyscreenshot | 3287b798691de8791bc3b3314f2545f7b0b1cb99 | [
"BSD-2-Clause"
] | null | null | null | from path import Path
from paver.doctools import cog, html
from paver.easy import options
from paver.options import Bunch
from paver.setuputils import setup
IMPORTS=[cog, html, setup]
options(
cog=Bunch(
basedir='.',
pattern='README.rst',
includedir='pyscreenshot',
beginspec='#--',
endspec='--#',
endoutput='#-#',
)
)
# get info from setup.py
setup_py = ''.join(
[x for x in Path('setup.py').lines() if 'setuptools' not in x])
exec(setup_py)
| 19.692308 | 67 | 0.621094 | from path import Path
from paver.doctools import cog, html
from paver.easy import options
from paver.options import Bunch
from paver.setuputils import setup
IMPORTS=[cog, html, setup]
options(
cog=Bunch(
basedir='.',
pattern='README.rst',
includedir='pyscreenshot',
beginspec='#--',
endspec='--#',
endoutput='#-#',
)
)
setup_py = ''.join(
[x for x in Path('setup.py').lines() if 'setuptools' not in x])
exec(setup_py)
| true | true |
f72969afd180df2df3d7f7508b8f7475f20eba44 | 213 | py | Python | .history/my_classes/FirstClassFunctions/reducing_functions_20210707134010.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/FirstClassFunctions/reducing_functions_20210707134010.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/FirstClassFunctions/reducing_functions_20210707134010.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | """Reducing Functions in Python
These are functions that recombine an iterable recursively, ending up with a single return value
Also called accumulators, aggregators, or folding functions
Example
""" | 21.3 | 96 | 0.769953 | true | true | |
f72969ff813909f4ad42f15958aa9f086b6a58bf | 2,694 | py | Python | qgis3script-importernvdbdata.py | alexdiem/nvdbapi-V3 | 18265ee6d02aed17d6199e5ed42fe731c9320a08 | [
"MIT"
] | null | null | null | qgis3script-importernvdbdata.py | alexdiem/nvdbapi-V3 | 18265ee6d02aed17d6199e5ed42fe731c9320a08 | [
"MIT"
] | null | null | null | qgis3script-importernvdbdata.py | alexdiem/nvdbapi-V3 | 18265ee6d02aed17d6199e5ed42fe731c9320a08 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Script for å interaktivt legge til NVDB-vegnett og fagdata via python
kommandolinje i QGIS. Se dokumentasjon på bruk av nvdbapi - funskjoner på
https://github.com/LtGlahn/nvdbapi-V3
Legg dette scriptet et sted hvor det er lettvint
å finne fra QGIS. F.eks. C:/Users/<dittbrukernavn>.
EKSEMPLER
#Vegnett europaveger Trondheim kommune
v = nvdbVegnett()
v.addfilter_geo({ 'kommune' : 1601, 'vegreferanse' : 'E' })" )
nvdbsok2qgis( v, lagnavn='Europaveger Trondheim')
# Vegnett innenfor kartutsnitt
v = nvdbVegnett()
nvdb2kart( v, iface)
# Bomstasjoner
b = nvdbFagdata(45)
nvdbsok2qgis( b)
# Søk etter fartsgrenser innenfor kartflaten, legg til
f = nvdbFagdata(105)
nvdb2kart( f, iface)
# Søk etter kjent objektID, legg til kartflaten
nvdb2kart( 572672190, iface )
"""
import sys
#########################################################33
##
## Endre stien til den mappen der du har lastet ned dette
## reposet https://github.com/LtGlahn/nvdbapi-V3
##
## Merk at hvis du laster ned repos som zip-fil og høyrekikker->Pakk ut alle
## så vil stien være NEDLASTING\\nvdbapi-V3-master\\nvdbapi-V3-master
##
# nvdblibrary = 'C:/Data/leveranser/nvdbapi-V3'
nvdblibrary = 'C:\\Users\\jajens\Downloads\\nvdbapi-V3-master\\nvdbapi-V3-master'
# nvdblibrary = 'C:\Users\<DITT BRUKERNAVN>\Downloads\\nvdbapi-V3-master\nvdbapi-V3-master'
# nvdblibrary = '/home/jan/Documents/jobb/nvdbapi-V3'
## Hvis vi ikke klarer å importere nvdbapiv3 så prøver vi å føye
## mappen nvdblibrary til søkestien.
try:
import nvdbapiv3
except ModuleNotFoundError:
print( "Fant ikke nvdbapiv3 i sys.path, legger til mappen", nvdblibrary)
sys.path.append( nvdblibrary )
try:
import nvdbapiv3
except ModuleNotFoundError as e:
print( "\nImport av nvdbapiv3 feiler for", nvdblibrary )
raise ModuleNotFoundError( "==> Variabel nvdblibrary skal peke til mappen https://github.com/LtGlahn/nvdbapi-V3 <==" )
else:
print( "SUKSESS - kan importere nvdbapiv3 etter at vi la til", nvdblibrary, "i sys.path" )
else:
print( "HURRA - vi kan importere nvdbapiv3 " )
## Her importerer vi de funksjonene vi trenger
from nvdbapiv3 import nvdbFagdata, nvdbVegnett
from nvdbapiV3qgis3 import nvdb2kart, nvdbsok2qgis, url2kart, nvdb2kartListe
## Bruk linjene nedenfor for debugging
## Funksjonskallene på python-konsollet i QGIS blir da
##
## >>> sok = nvdbapiv3.nvdbFagdata(86)
## >>> nvdbapiV3qgis3.nvdb2kart( sok, iface )
##
# import importlib
# import nvdbapiV3qgis3
# import nvdbapiv3
# importlib.reload(nvdbapiV3qgis3 )
# importlib.reload(nvdbapiv3 )
| 30.269663 | 128 | 0.69562 |
import sys
| true | true |
f7296b28fa92684ef445111a79a67994059f7ec4 | 1,221 | py | Python | uer/layers/layer_norm.py | krevas/ET-BERT | 464ce3e7942d4450f55021e267ceb9dd48a36b1f | [
"MIT"
] | null | null | null | uer/layers/layer_norm.py | krevas/ET-BERT | 464ce3e7942d4450f55021e267ceb9dd48a36b1f | [
"MIT"
] | null | null | null | uer/layers/layer_norm.py | krevas/ET-BERT | 464ce3e7942d4450f55021e267ceb9dd48a36b1f | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""
Layer Normalization.
https://arxiv.org/abs/1607.06450
"""
def __init__(self, hidden_size, eps=1e-6):
super(LayerNorm, self).__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
hidden_states = self.gamma * (x-mean) / (std + self.eps)
return hidden_states + self.beta
class T5LayerNorm(nn.Module):
"""
Construct a layernorm module in the T5 style No bias and no subtraction of mean.
"""
def __init__(self, hidden_size, eps=1e-6):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# layer norm should always be calculated in float32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.type_as(self.weight)
| 30.525 | 85 | 0.648649 | import torch
import torch.nn as nn
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
super(LayerNorm, self).__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
hidden_states = self.gamma * (x-mean) / (std + self.eps)
return hidden_states + self.beta
class T5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.type_as(self.weight)
| true | true |
f7296ddca1f1526d81de73d1fd2bc229a1a53869 | 777 | py | Python | chapter_3/my_service/scrap/zoo.py | rinjyu/the_red | c099e830ae3ee9063c3e9d29f4ee627241c7eeed | [
"Apache-2.0"
] | 13 | 2021-07-26T06:09:19.000Z | 2022-03-22T07:01:22.000Z | chapter_3/my_service/scrap/zoo.py | rinjyu/the_red | c099e830ae3ee9063c3e9d29f4ee627241c7eeed | [
"Apache-2.0"
] | 11 | 2021-07-25T03:35:25.000Z | 2021-08-13T23:05:38.000Z | chapter_3/my_service/scrap/zoo.py | rinjyu/the_red | c099e830ae3ee9063c3e9d29f4ee627241c7eeed | [
"Apache-2.0"
] | 8 | 2021-09-02T14:54:17.000Z | 2022-03-14T10:28:37.000Z | from kazoo.client import KazooClient
from kazoo.exceptions import NoNodeError
from kazoo.exceptions import NodeExistsError
_callback = None
_zk = None
def init_kazoo(hosts, data_path, callback, children=True):
global _zk
global _callback
_zk = KazooClient(hosts=hosts)
_zk.start()
_callback = callback
if data_path:
if children:
@_zk.ChildrenWatch(data_path)
def watch_children(children):
print("Watch Children")
if _callback:
_callback(children)
else:
@_zk.DataWatch(data_path)
def watch_node(data, stat):
print("Watch Node")
if _callback:
_callback(data, stat)
return _zk
| 23.545455 | 58 | 0.594595 | from kazoo.client import KazooClient
from kazoo.exceptions import NoNodeError
from kazoo.exceptions import NodeExistsError
_callback = None
_zk = None
def init_kazoo(hosts, data_path, callback, children=True):
global _zk
global _callback
_zk = KazooClient(hosts=hosts)
_zk.start()
_callback = callback
if data_path:
if children:
@_zk.ChildrenWatch(data_path)
def watch_children(children):
print("Watch Children")
if _callback:
_callback(children)
else:
@_zk.DataWatch(data_path)
def watch_node(data, stat):
print("Watch Node")
if _callback:
_callback(data, stat)
return _zk
| true | true |
f7296e2a3e9328088302e2141a4ad4afd4c901b1 | 1,166 | py | Python | docstringer/decorators.py | ttamg/docstringer | f28cdc178e8cb5dd7ca16c885f3837a052807e16 | [
"MIT"
] | null | null | null | docstringer/decorators.py | ttamg/docstringer | f28cdc178e8cb5dd7ca16c885f3837a052807e16 | [
"MIT"
] | null | null | null | docstringer/decorators.py | ttamg/docstringer | f28cdc178e8cb5dd7ca16c885f3837a052807e16 | [
"MIT"
] | null | null | null | import functools
from .events import FunctionEvent
from .formatters import BaseFormatter, DefaultFormatter
def docstringer(
_func=None, *, active=True, formatter: BaseFormatter = DefaultFormatter()
):
"""
A decorator that will output the function docstring, call values and return value when the function is called.
Add this decorator to all functions you want to have documented this way.
Parameters:
- active (bool) default=True - this controls if the docstringer is active
- formatter (Formatter instance) - this allows docstringer to output the results in a different format or data structure
"""
def wrapper(func):
@functools.wraps(func)
def inner(*args, **kwargs):
if not active:
return func(*args, **kwargs)
event = FunctionEvent(func, *args, **kwargs)
formatter.call(event)
return_value = func(*args, **kwargs)
event.return_value = return_value
formatter.end(event)
return return_value
return inner
if _func is None:
return wrapper
else:
return wrapper(_func)
| 27.116279 | 124 | 0.651801 | import functools
from .events import FunctionEvent
from .formatters import BaseFormatter, DefaultFormatter
def docstringer(
_func=None, *, active=True, formatter: BaseFormatter = DefaultFormatter()
):
def wrapper(func):
@functools.wraps(func)
def inner(*args, **kwargs):
if not active:
return func(*args, **kwargs)
event = FunctionEvent(func, *args, **kwargs)
formatter.call(event)
return_value = func(*args, **kwargs)
event.return_value = return_value
formatter.end(event)
return return_value
return inner
if _func is None:
return wrapper
else:
return wrapper(_func)
| true | true |
f7296e3c7738ac9586479e919858bf2e64daf411 | 2,066 | py | Python | api/core/management/commands/populate_db.py | mf-tech-solutions/cusgeo | 7e15b707bc7f1ae1fd7a091e64c41a6f7c8092c3 | [
"MIT"
] | null | null | null | api/core/management/commands/populate_db.py | mf-tech-solutions/cusgeo | 7e15b707bc7f1ae1fd7a091e64c41a6f7c8092c3 | [
"MIT"
] | null | null | null | api/core/management/commands/populate_db.py | mf-tech-solutions/cusgeo | 7e15b707bc7f1ae1fd7a091e64c41a6f7c8092c3 | [
"MIT"
] | null | null | null | from django.core.management.base import BaseCommand
from django.db.utils import OperationalError
from customers.models import Customer
from geolocation.models import Location
import csv
import sys
class Command(BaseCommand):
"""
Command that populates the Customers table
"""
def __init__(self, *args, **kwargs):
super().__init__()
self.customers = self._get_customer_from_file()
self.cities = [c['city'] for c in self.customers]
def handle(self, *args, **options):
sys.stdout.write("Populating db...\n")
try:
for customer in self.customers:
Customer.objects.get_or_create(
id=customer['id'],
email=customer['email'],
first_name=customer['first_name'],
last_name=customer['last_name'],
gender=customer['gender'],
company=customer['company'],
title=customer['title']
)
i = 1
for city in self.cities:
customer = Customer.objects.get(id=i)
Location.objects.get_or_create(
customer=customer,
city=city,
latitude=0,
longitude=0
)
i += 1
except OperationalError as error:
raise error
sys.stdout.write("Db populated\n")
def _get_customer_from_file(self):
with open('./customers.csv') as file:
reader = csv.DictReader(file)
return [{
'id': row['id'],
'email': row['email'],
'first_name': row['first_name'],
'last_name': row['last_name'],
'gender': row['gender'],
'company': row['company'],
'title': row['title'],
'city': row['city']
}
for row in reader]
| 30.835821 | 57 | 0.484511 | from django.core.management.base import BaseCommand
from django.db.utils import OperationalError
from customers.models import Customer
from geolocation.models import Location
import csv
import sys
class Command(BaseCommand):
def __init__(self, *args, **kwargs):
super().__init__()
self.customers = self._get_customer_from_file()
self.cities = [c['city'] for c in self.customers]
def handle(self, *args, **options):
sys.stdout.write("Populating db...\n")
try:
for customer in self.customers:
Customer.objects.get_or_create(
id=customer['id'],
email=customer['email'],
first_name=customer['first_name'],
last_name=customer['last_name'],
gender=customer['gender'],
company=customer['company'],
title=customer['title']
)
i = 1
for city in self.cities:
customer = Customer.objects.get(id=i)
Location.objects.get_or_create(
customer=customer,
city=city,
latitude=0,
longitude=0
)
i += 1
except OperationalError as error:
raise error
sys.stdout.write("Db populated\n")
def _get_customer_from_file(self):
with open('./customers.csv') as file:
reader = csv.DictReader(file)
return [{
'id': row['id'],
'email': row['email'],
'first_name': row['first_name'],
'last_name': row['last_name'],
'gender': row['gender'],
'company': row['company'],
'title': row['title'],
'city': row['city']
}
for row in reader]
| true | true |
f7296e7682b97a09000c105cac73b1070d2a032f | 2,284 | py | Python | lib/rtc.py | FlorianPoot/M5StickCWatch | c5e63b5915b1163636084666a14f1d61ae708adf | [
"MIT"
] | 21 | 2019-11-15T15:29:12.000Z | 2022-03-20T12:15:48.000Z | lib/rtc.py | FlorianPoot/M5StickCWatch | c5e63b5915b1163636084666a14f1d61ae708adf | [
"MIT"
] | 4 | 2019-10-01T08:48:04.000Z | 2020-11-17T12:05:43.000Z | lib/rtc.py | FlorianPoot/M5StickCWatch | c5e63b5915b1163636084666a14f1d61ae708adf | [
"MIT"
] | 1 | 2019-11-15T15:29:17.000Z | 2019-11-15T15:29:17.000Z | import ustruct
import i2c_bus
class RTC:
def __init__(self):
self.addr = 0x51
self.i2c = i2c_bus.get(i2c_bus.M_BUS)
def get_time(self):
buf = self._regchar(0x02, buf=bytearray(3))
seconds = self.bcd2_to_byte(buf[0] & 0x7f)
minutes = self.bcd2_to_byte(buf[1] & 0x7f)
hours = self.bcd2_to_byte(buf[2] & 0x3f)
return hours, minutes, seconds
def set_time(self, hours, minutes, seconds):
seconds = self.byte_to_bcd2(seconds)
minutes = self.byte_to_bcd2(minutes)
hours = self.byte_to_bcd2(hours)
self._regchar(0x02, (seconds, minutes, hours), buf=bytearray(3))
def get_date(self):
buf = self._regchar(0x05, buf=bytearray(4))
date = self.bcd2_to_byte(buf[0] & 0x3f)
week_day = self.bcd2_to_byte(buf[1] & 0x07)
month = self.bcd2_to_byte(buf[2] & 0x1f)
if buf[2] & 0x80:
year = 1900 + self.bcd2_to_byte(buf[3] & 0xff)
else:
year = 2000 + self.bcd2_to_byte(buf[3] & 0xff)
return year, month, date, week_day
def set_date(self, year, month, date, week_day):
date = self.byte_to_bcd2(date)
week_day = self.byte_to_bcd2(week_day)
if year < 2000:
month = self.byte_to_bcd2(month | 0x80)
else:
month = self.byte_to_bcd2(month | 0x00)
year = self.byte_to_bcd2(year % 100)
self._regchar(0x05, (date, week_day, month, year), buf=bytearray(4))
def _regchar(self, reg, value=None, buf=bytearray(1)):
if value is None:
self.i2c.readfrom_mem_into(self.addr, reg, buf)
if len(buf) == 1:
return buf[0]
else:
return buf
if type(value) is int:
ustruct.pack_into('<b', buf, 0, value)
else:
ustruct.pack_into('<%db' % len(value), buf, 0, *value)
return self.i2c.writeto_mem(self.addr, reg, buf)
@staticmethod
def bcd2_to_byte(value):
tmp = ((value & 0xF0) >> 0x4) * 10
return tmp + (value & 0x0F)
@staticmethod
def byte_to_bcd2(value):
bcdhigh = 0
while value >= 10:
bcdhigh += 1
value -= 10
return (bcdhigh << 4) | value
| 27.518072 | 76 | 0.569177 | import ustruct
import i2c_bus
class RTC:
def __init__(self):
self.addr = 0x51
self.i2c = i2c_bus.get(i2c_bus.M_BUS)
def get_time(self):
buf = self._regchar(0x02, buf=bytearray(3))
seconds = self.bcd2_to_byte(buf[0] & 0x7f)
minutes = self.bcd2_to_byte(buf[1] & 0x7f)
hours = self.bcd2_to_byte(buf[2] & 0x3f)
return hours, minutes, seconds
def set_time(self, hours, minutes, seconds):
seconds = self.byte_to_bcd2(seconds)
minutes = self.byte_to_bcd2(minutes)
hours = self.byte_to_bcd2(hours)
self._regchar(0x02, (seconds, minutes, hours), buf=bytearray(3))
def get_date(self):
buf = self._regchar(0x05, buf=bytearray(4))
date = self.bcd2_to_byte(buf[0] & 0x3f)
week_day = self.bcd2_to_byte(buf[1] & 0x07)
month = self.bcd2_to_byte(buf[2] & 0x1f)
if buf[2] & 0x80:
year = 1900 + self.bcd2_to_byte(buf[3] & 0xff)
else:
year = 2000 + self.bcd2_to_byte(buf[3] & 0xff)
return year, month, date, week_day
def set_date(self, year, month, date, week_day):
date = self.byte_to_bcd2(date)
week_day = self.byte_to_bcd2(week_day)
if year < 2000:
month = self.byte_to_bcd2(month | 0x80)
else:
month = self.byte_to_bcd2(month | 0x00)
year = self.byte_to_bcd2(year % 100)
self._regchar(0x05, (date, week_day, month, year), buf=bytearray(4))
def _regchar(self, reg, value=None, buf=bytearray(1)):
if value is None:
self.i2c.readfrom_mem_into(self.addr, reg, buf)
if len(buf) == 1:
return buf[0]
else:
return buf
if type(value) is int:
ustruct.pack_into('<b', buf, 0, value)
else:
ustruct.pack_into('<%db' % len(value), buf, 0, *value)
return self.i2c.writeto_mem(self.addr, reg, buf)
@staticmethod
def bcd2_to_byte(value):
tmp = ((value & 0xF0) >> 0x4) * 10
return tmp + (value & 0x0F)
@staticmethod
def byte_to_bcd2(value):
bcdhigh = 0
while value >= 10:
bcdhigh += 1
value -= 10
return (bcdhigh << 4) | value
| true | true |
f7296e825616df59c56e8fca75e277ba8e588d8a | 622 | py | Python | hw/ip/mono_fm/transform.py | xupsh/pp4fpgas-cn-hls | d14bd0769ce7f9674f206faf93b7622c5bf905bf | [
"Apache-2.0"
] | 152 | 2018-08-06T14:08:59.000Z | 2022-03-29T23:15:05.000Z | hw/ip/mono_fm/transform.py | sinjinchang/pp4fpgas-cn-hls | d14bd0769ce7f9674f206faf93b7622c5bf905bf | [
"Apache-2.0"
] | 2 | 2019-04-12T16:30:25.000Z | 2019-08-13T19:59:03.000Z | hw/ip/mono_fm/transform.py | sinjinchang/pp4fpgas-cn-hls | d14bd0769ce7f9674f206faf93b7622c5bf905bf | [
"Apache-2.0"
] | 63 | 2018-08-25T10:43:04.000Z | 2022-03-26T09:12:35.000Z | import numpy as np
detection_file = 'samples.npy'
detections = None
if detection_file is not None:
detections = np.load(detection_file)
np.savetxt('samples.txt', detections, fmt='%0.18f')
f = open('samples.txt')
out = open('complex.txt', "w")
lines = f.readlines()
for line in lines:
for i in line:
if i == "+":
out.write(" ")
elif i == "-":
out.write(" -")
elif i == "(":
i = i
elif i == ")":
i = i
elif i == "j":
i = i
else:
out.write(str(i))
#out.write("\n")
#print(line)
f.close
| 20.733333 | 51 | 0.485531 | import numpy as np
detection_file = 'samples.npy'
detections = None
if detection_file is not None:
detections = np.load(detection_file)
np.savetxt('samples.txt', detections, fmt='%0.18f')
f = open('samples.txt')
out = open('complex.txt', "w")
lines = f.readlines()
for line in lines:
for i in line:
if i == "+":
out.write(" ")
elif i == "-":
out.write(" -")
elif i == "(":
i = i
elif i == ")":
i = i
elif i == "j":
i = i
else:
out.write(str(i))
f.close
| true | true |
f7296efbb9bd563afe90dc691e822b8ad26cace4 | 15,124 | py | Python | coremltools/converters/mil/mil/ops/defs/recurrent.py | freedomtan/coremltools | 5ee9b537b81c44c140a2fa7571e547dfaa24e1ea | [
"BSD-3-Clause"
] | 1 | 2020-12-23T15:42:01.000Z | 2020-12-23T15:42:01.000Z | coremltools/converters/mil/mil/ops/defs/recurrent.py | freedomtan/coremltools | 5ee9b537b81c44c140a2fa7571e547dfaa24e1ea | [
"BSD-3-Clause"
] | 75 | 2020-11-24T05:37:45.000Z | 2022-02-25T15:14:23.000Z | coremltools/converters/mil/mil/ops/defs/recurrent.py | freedomtan/coremltools | 5ee9b537b81c44c140a2fa7571e547dfaa24e1ea | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil import get_new_symbol
from ._op_reqs import *
@register_op(doc_str="")
class gru(Operation):
r"""
Gated recurrent unit (GRU).
.. math::
r_t = \rm{recurrent\_activation}(W_{ir} x_t + b_{ir} + W_{hr} h_{t-1} + b_{hr})
.. math::
z_t = \rm{recurrent\_activation}(W_{iz} x_t + b_{iz} + W_{hz} h_(t−1) + b_{hz})
.. math::
o_t = activation(W_{io} x_t + b_{io} + r_t * (W_{ho} h_(t−1) + b_{ho}))
.. math::
h_t = (1 − z_t) * o_t + z_t * h_{(t−1)}
Where:
* ``W_{ir}``, ``W_{iz}``, and `` W_{io}`` state input-hidden weight for reset, update
and output gate, respectively.
* Similar to the above, ``W_{h[r/z/o]}`` states hidden-hidden / recurrent weights.
* ``h_t`` is the hidden state at time ``t``.
* ``x_t`` is the input at time ``t``.
* ``h_(t-1)`` is the hidden state of the layer at time ``t-1`` or the initial
hidden state at time ``0``.
* ``r_t``, ``z_t``, and ``o_t`` are the reset, update, and new gates, respectively.
* ``*`` is elementwise product.
Parameters
----------
x: <s, b, I, T> (Required)
* ``s`` is the sequence length, ``b`` is the batch size, and ``I`` is the
input dimension.
initial_h: <b, H, T> (Required)
* ``H`` denotes hidden size.
weight: const<I+H, 3*H, T> (Required) - Weight matrix
* ``weight[:I] = [W_{iz} | W_{ir} | W_{io}]`` where ``[a|b]`` denotes column
concatenation and ``[a, b]`` denotes row concatenation. ``W_{iz}``,
``W_{ir}``, and ``W_{io}`` have shape ``(I, H)``.
* ``weight[I:] = [W_{hz} | W_{hr} | W_{hn}]``: ``W_{hz}``, ``W_{hr}``, and
``W_{hn}`` have shape ``(H, H)``.
bias: const<2, 3*H, T> (Optional) [Default all 0s]
* ``bias[0]`` and ``bias[1]`` are input-hidden and hidden-hidden
bias, respectively.
* ``3*H`` are biases for ``[b_{ir} + b_{hr}, b_{iz} + b_{hz}, b_{io} + b_{ho}]``.
direction: const<str> (Optional) [Default=forward]
* Either ``forward`` or ``reverse``.
output_sequence: const<bool> (Optional) [Default=False]
* Outputs every step if ``True``.
recurrent_activation: const<str> (Optional) [Default=sigmoid]
* Activation applied on update and reset gate.
activation: const<str> (Optional) [Default=tanh]
* Activation applied on output gate.
Returns
-------
<s, b, H, T> or <1, b, H, T>
* If ``output_sequence == True`` (hidden states from every step):
``<s, b, H, T>``.
* Else ``<1, b, H, T>`` (hidden states of the final step).
<b, H, T>
* Hidden states of the final step.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
x=TensorInputType(),
initial_h=TensorInputType(),
weight=TensorInputType(const=True),
bias=TensorInputType(const=True, optional=True, default=None),
direction=StringInputType(const=True, default="forward"),
output_sequence=BoolInputType(const=True, default=False),
recurrent_activation=StringInputType(const=True, default="sigmoid"),
activation=StringInputType(const=True, default="tanh")
)
def __init__(self, **kwargs):
super(gru, self).__init__(**kwargs)
def type_inference(self):
if self.x.rank != 3:
raise ValueError(
"Invalid input shape. Expecting Rank 3 input, got {}".format(
len(self.x.shape)
)
)
sequence_length, batch_size, input_size = self.x.shape
if self.weight.rank != 2:
raise ValueError(
"Invalid weight shape. Expecting Rank 2 input, got {}".format(
len(self.weight.shape)
)
)
input_hidden_size, hidden_dim = self.weight.shape
hidden_size = input_hidden_size - input_size
direction = self.direction.val
valid_directions = {"forward", "reverse"}
if direction not in valid_directions:
raise ValueError(
"Direction {} not supported. Supported directions: {}".format(
direction, valid_directions
)
)
dim_factor = 3
if hidden_size != (hidden_dim // dim_factor):
raise ValueError(
"Incorrect weight matrix: hidden dim size mismatch. \
Provided {}. Expecting <b, 3*H>".format(
self.weight.shape
)
)
out_seq_len = sequence_length if self.output_sequence.val else 1
output_shape = [out_seq_len, batch_size, hidden_size]
output_h_shape = [batch_size, hidden_size]
return (
types.tensor(self.x.dtype, tuple(output_shape)),
types.tensor(self.x.dtype, tuple(output_h_shape)),
)
@register_op(doc_str="")
class lstm(Operation):
r"""
Single long short-term memory (LSTM) sequence.
.. math::
i_t = \rm{recurrent\_activation}(W_{ii} x_t + B_{ii} + W_{hi} h_(t-1) + B_{hi})
.. math::
f_t = \rm{recurrent\_activation}(W_{if} x_t + B_{if} + W_{hf} h_(t-1) + B_{hf})
.. math::
z_t = cell_activation(W_{iz} x_t + B_{iz} + W_{hz} h_(t-1) + B_{hz})
.. math::
o_t = \rm{recurrent\_activation}(W_{io} x_t + B_{io} + W_{ho} h_(t-1) + B_{ho})
.. math::
c_t = f_t * c_(t-1) + i_t * z_t
.. math::
h_t = o_t * activation(c_t)
Where:
* ``i_t``, ``f_t``, ``o_t``, and ``z_t`` are input, forget, output, and cell gates,
respectively, at time ``t``.
* ``c_t`` is cell state at time ``t``.
* ``h_t`` is the hidden state at time ``t``.
* ``W_{ii}``, ``W_{if}``, ``W_{io}``, and ``W_{iz}`` are input weights for input,
forget, output and cell gate, respectively.
* ``W_{hi}``, ``W_{hf}``, ``W_{ho}``, and ``W_{hz}`` are recurrent weights for input,
forget, output and cell gate, respectively.
Parameters
----------
x: <s, b, I, T> (Required)
* ``s`` is the sequence length, ``b`` is the batch size, and ``I`` is the
input dimension.
initial_h: <b, DIRECTION*H, T> (Required)
* Initial hidden state. ``DIRECTION = 1`` for uni-directional, ``2`` for
bi-directional LSTM.
* ``H`` denotes hidden size.
* ``[b, :H]`` and ``[b, H:]`` represents forward and reverse direction
values, respectively.
initial_c: <b, DIRECTION*H, T> (Required)
* Initial cell state.
* Format is same as ``initial_h``.
weight: const<I+H, 4*DIRECTION*H, T> (Required) - Weight matrix
* Weight tensor should be in order of
``[input_gate, forget_gate, output_gate, cell_gate]``.
* ``[I+H, :4*H]`` and ``[I+H, 4*H:]`` represent forward and reverse direction
values, respectively.
bias: const<2, 4*DIRECTION*H, T> (Optional) [Default all 0s]
* ``bias[0]`` and ``bias[1]`` are input-hidden and hidden-hidden
bias, respectively.
direction: const<str> (Optional) [Default=forward]
* One of the following: ``forward``, ``reverse``, or ``bidirectional``.
* Must match ``DIRECTIONAL`` in initial states and weight parameters.
output_sequence: const<bool> (Optional) [Default=False]
* Outputs every step if ``True``.
recurrent_activation: const<str> (Optional) [Default=sigmoid]
* Activation applied on input, forget, and output gates.
cell_activation: const<str> (Optional) [Default=tang]
* Activation applied on cell gate.
activation: const<str> (Optional) [Default=tanh]
* Activation applied on output gate.
peephole: const<3*DIRECTION*H, T> (Optional, default to 0)
* Weight tensor for peephole.
* Order is ``[input_gate, forget_gate, output_gate]``.
* Shape of each peephole vector is ``(H,)`` (``H`` is hidden size).
clip: const<fp32> (optional) [Default=None]
* Cell gate is clipped to ``[-clip, +clip]``.
Returns
-------
<s, b, DIRECTION*H, T> or <1, b, DIRECTION*H, T>
* If ``output_sequence == True`` (hidden states from every step):
``<s, b, DIRECTION*H, T>``.
* Else ``<1, b, DIRECTION*H, T>`` (hidden states of the final step).
<b, DIRECTION*H, T>
* Hidden states of the final step.
<b, DIRECTION*H, T>
* Memory state of the final step.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
x=TensorInputType(),
initial_h=TensorInputType(),
initial_c=TensorInputType(),
weight=TensorInputType(const=True), # ifoz layout
bias=TensorInputType(const=True, optional=True, default=None), # ifoz layout
direction=StringInputType(const=True, default="forward"),
output_sequence=BoolInputType(const=True, default=False),
recurrent_activation=StringInputType(const=True, default="sigmoid"),
cell_activation=StringInputType(const=True, default="tanh"),
activation=StringInputType(const=True, default="tanh"),
peephole=TensorInputType(const=True, optional=True, default=None), # ifo layout
clip=FloatInputType(const=True, optional=True, default=None),
)
def __init__(self, **kwargs):
super(lstm, self).__init__(**kwargs)
def type_inference(self):
if self.x.rank != 3:
raise ValueError(
"Invalid input shape. Expecting Rank 3 input, got {}".format(
len(self.x.shape)
)
)
sequence_length, batch_size, input_size = self.x.shape
if self.weight.rank != 2:
raise ValueError(
"Invalid weight shape. Expecting Rank 2 input, got {}".format(
len(self.weight.shape)
)
)
input_hidden_size, hidden_dim = self.weight.shape
hidden_size = input_hidden_size - input_size
direction = self.direction.val
valid_directions = {"forward", "reverse", "bidirectional"}
if direction not in valid_directions:
raise ValueError(
"Direction {} not supported. Supported directions: {}".format(
direction, valid_directions
)
)
dim_factor = 8 if direction == "bidirectional" else 4
if hidden_size != (hidden_dim // dim_factor):
raise ValueError(
"Incorrect weight matrix: hidden dim size mismatch. \
Provided {}. Expecting <b, 4*DIRECTION*H>".format(
self.weight.shape
)
)
out_seq_len = sequence_length if self.output_sequence.val else 1
num_directions = dim_factor // 4
output_shape = [out_seq_len, batch_size, num_directions * hidden_size]
output_h_shape = [batch_size, num_directions * hidden_size]
output_c_shape = [batch_size, num_directions * hidden_size]
return (
types.tensor(self.x.dtype, tuple(output_shape)),
types.tensor(self.x.dtype, tuple(output_h_shape)),
types.tensor(self.x.dtype, tuple(output_c_shape)),
)
@register_op(doc_str="")
class rnn(Operation):
"""
Recurrent neural network (RNN).
.. math::
h_t = activation(W_{ih} x_t + b_{ih} + W_{hh} h_(t−1) + b_{hh})
Where:
* ``W_{ih}`` is input weight.
* ``W_{hh}`` is hidden/recurrent weight.
* ``h_t`` is the hidden state at time ``t``.
* ``x_t`` is the input at time ``t``.
* ``h_(t-1)`` is the hidden state of the layer at time ``t-1`` or the initial
hidden state at time ``0``.
Parameters
----------
x: <s, b, I, T> (Required)
* ``s`` is the sequence length, ``b`` is the batch size, and ``I`` is the
input dimension.
initial_h: <b, H, T> (Required)
* ``H`` denotes hidden size.
weight: const<I+H, 3*H, T> (Required) - Weight matrix
bias: const<2, H, T> (Optional) [Default all 0s]
* ``bias[0]`` and ``bias[1]`` are input-hidden and hidden-hidden
bias, respectively.
direction: const<str> (Optional) [Default=forward]
* Either ``forward`` or ``reverse``.
output_sequence: const<bool> (Optional) [Default=False]
* Outputs every step if ``True``.
activation: const<str> (Optional) [Default=tanh]
* Supported activation functions: ``relu``, ``tanh``, ``sigmoid``,
``sigmoid_hard``, ``scaled_tanh``, and ``linear``.
Returns
-------
<s, b, H, T> or <1, b, H, T>
* If ``output_sequence == True`` (hidden states from every step):
``<s, b, H, T>``.
* Else ``<1, b, H, T>`` (hidden states of the final step).
<b, H, T>
* Hidden states of the final step.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
x=TensorInputType(),
initial_h=TensorInputType(),
weight=TensorInputType(const=True),
bias=TensorInputType(const=True, optional=True, default=None),
direction=StringInputType(const=True, default="forward"),
output_sequence=BoolInputType(const=True, default=False),
activation=StringInputType(const=True, default="tanh"),
)
def __init__(self, **kwargs):
super(rnn, self).__init__(**kwargs)
def type_inference(self):
if self.x.rank != 3:
raise ValueError(
"Invalid input shape. Expecting Rank 3 input, got {}".format(
len(self.x.shape)
)
)
sequence_length, batch_size, input_size = self.x.shape
if self.weight.rank != 2:
raise ValueError(
"Invalid weight shape. Expecting Rank 2 input, got {}".format(
len(self.weight.shape)
)
)
_, hidden_size = self.weight.shape
direction = self.direction.val
valid_directions = {"forward", "reverse"}
if direction not in valid_directions:
raise ValueError(
"Direction {} not supported. Supported directions: {}".format(
direction, valid_directions
)
)
out_seq_len = sequence_length if self.output_sequence.val else 1
output_shape = [out_seq_len, batch_size, hidden_size]
output_h_shape = [batch_size, hidden_size]
return (
types.tensor(self.x.dtype, tuple(output_shape)),
types.tensor(self.x.dtype, tuple(output_h_shape)),
)
| 36.009524 | 89 | 0.560698 |
from coremltools.converters.mil.mil import get_new_symbol
from ._op_reqs import *
@register_op(doc_str="")
class gru(Operation):
input_spec = InputSpec(
x=TensorInputType(),
initial_h=TensorInputType(),
weight=TensorInputType(const=True),
bias=TensorInputType(const=True, optional=True, default=None),
direction=StringInputType(const=True, default="forward"),
output_sequence=BoolInputType(const=True, default=False),
recurrent_activation=StringInputType(const=True, default="sigmoid"),
activation=StringInputType(const=True, default="tanh")
)
def __init__(self, **kwargs):
super(gru, self).__init__(**kwargs)
def type_inference(self):
if self.x.rank != 3:
raise ValueError(
"Invalid input shape. Expecting Rank 3 input, got {}".format(
len(self.x.shape)
)
)
sequence_length, batch_size, input_size = self.x.shape
if self.weight.rank != 2:
raise ValueError(
"Invalid weight shape. Expecting Rank 2 input, got {}".format(
len(self.weight.shape)
)
)
input_hidden_size, hidden_dim = self.weight.shape
hidden_size = input_hidden_size - input_size
direction = self.direction.val
valid_directions = {"forward", "reverse"}
if direction not in valid_directions:
raise ValueError(
"Direction {} not supported. Supported directions: {}".format(
direction, valid_directions
)
)
dim_factor = 3
if hidden_size != (hidden_dim // dim_factor):
raise ValueError(
"Incorrect weight matrix: hidden dim size mismatch. \
Provided {}. Expecting <b, 3*H>".format(
self.weight.shape
)
)
out_seq_len = sequence_length if self.output_sequence.val else 1
output_shape = [out_seq_len, batch_size, hidden_size]
output_h_shape = [batch_size, hidden_size]
return (
types.tensor(self.x.dtype, tuple(output_shape)),
types.tensor(self.x.dtype, tuple(output_h_shape)),
)
@register_op(doc_str="")
class lstm(Operation):
input_spec = InputSpec(
x=TensorInputType(),
initial_h=TensorInputType(),
initial_c=TensorInputType(),
weight=TensorInputType(const=True),
bias=TensorInputType(const=True, optional=True, default=None),
direction=StringInputType(const=True, default="forward"),
output_sequence=BoolInputType(const=True, default=False),
recurrent_activation=StringInputType(const=True, default="sigmoid"),
cell_activation=StringInputType(const=True, default="tanh"),
activation=StringInputType(const=True, default="tanh"),
peephole=TensorInputType(const=True, optional=True, default=None),
clip=FloatInputType(const=True, optional=True, default=None),
)
def __init__(self, **kwargs):
super(lstm, self).__init__(**kwargs)
def type_inference(self):
if self.x.rank != 3:
raise ValueError(
"Invalid input shape. Expecting Rank 3 input, got {}".format(
len(self.x.shape)
)
)
sequence_length, batch_size, input_size = self.x.shape
if self.weight.rank != 2:
raise ValueError(
"Invalid weight shape. Expecting Rank 2 input, got {}".format(
len(self.weight.shape)
)
)
input_hidden_size, hidden_dim = self.weight.shape
hidden_size = input_hidden_size - input_size
direction = self.direction.val
valid_directions = {"forward", "reverse", "bidirectional"}
if direction not in valid_directions:
raise ValueError(
"Direction {} not supported. Supported directions: {}".format(
direction, valid_directions
)
)
dim_factor = 8 if direction == "bidirectional" else 4
if hidden_size != (hidden_dim // dim_factor):
raise ValueError(
"Incorrect weight matrix: hidden dim size mismatch. \
Provided {}. Expecting <b, 4*DIRECTION*H>".format(
self.weight.shape
)
)
out_seq_len = sequence_length if self.output_sequence.val else 1
num_directions = dim_factor // 4
output_shape = [out_seq_len, batch_size, num_directions * hidden_size]
output_h_shape = [batch_size, num_directions * hidden_size]
output_c_shape = [batch_size, num_directions * hidden_size]
return (
types.tensor(self.x.dtype, tuple(output_shape)),
types.tensor(self.x.dtype, tuple(output_h_shape)),
types.tensor(self.x.dtype, tuple(output_c_shape)),
)
@register_op(doc_str="")
class rnn(Operation):
input_spec = InputSpec(
x=TensorInputType(),
initial_h=TensorInputType(),
weight=TensorInputType(const=True),
bias=TensorInputType(const=True, optional=True, default=None),
direction=StringInputType(const=True, default="forward"),
output_sequence=BoolInputType(const=True, default=False),
activation=StringInputType(const=True, default="tanh"),
)
def __init__(self, **kwargs):
super(rnn, self).__init__(**kwargs)
def type_inference(self):
if self.x.rank != 3:
raise ValueError(
"Invalid input shape. Expecting Rank 3 input, got {}".format(
len(self.x.shape)
)
)
sequence_length, batch_size, input_size = self.x.shape
if self.weight.rank != 2:
raise ValueError(
"Invalid weight shape. Expecting Rank 2 input, got {}".format(
len(self.weight.shape)
)
)
_, hidden_size = self.weight.shape
direction = self.direction.val
valid_directions = {"forward", "reverse"}
if direction not in valid_directions:
raise ValueError(
"Direction {} not supported. Supported directions: {}".format(
direction, valid_directions
)
)
out_seq_len = sequence_length if self.output_sequence.val else 1
output_shape = [out_seq_len, batch_size, hidden_size]
output_h_shape = [batch_size, hidden_size]
return (
types.tensor(self.x.dtype, tuple(output_shape)),
types.tensor(self.x.dtype, tuple(output_h_shape)),
)
| true | true |
f7296f92c8bd8dfa5fbda73084e6170fffd08c76 | 1,403 | py | Python | RPSJacobi2.py.py | KuhlersClassroom/PythonRPS | 6393a4d8a2758433bcb5a16190d1699239f8991b | [
"MIT"
] | null | null | null | RPSJacobi2.py.py | KuhlersClassroom/PythonRPS | 6393a4d8a2758433bcb5a16190d1699239f8991b | [
"MIT"
] | null | null | null | RPSJacobi2.py.py | KuhlersClassroom/PythonRPS | 6393a4d8a2758433bcb5a16190d1699239f8991b | [
"MIT"
] | null | null | null | import random
symbols = ['rock', 'paper', 'scissors']
player_wins = 0
computer_wins = 0
while max([player_wins, computer_wins]) < 3:
player_symbol = None
while player_symbol is None:
input_symbol = input("What symbol do you want? ")
if input_symbol in symbols:
player_symbol = input_symbol
else:
print('Please pick rock, paper, or scissors.')
computer_symbol = random.choice(symbols)
print('Player: ', player_symbol)
print('Computer: ', computer_symbol)
if player_symbol == computer_symbol:
print('Tie!')
elif player_symbol == 'rock':
if computer_symbol == 'paper':
print('computer wins!')
computer_wins += 1
else:
print('player wins!')
player_wins += 1
elif player_symbol == 'paper':
if computer_symbol == 'scissors':
print('computer wins!')
computer_wins += 1
else:
print('player wins!')
player_wins += 1
elif player_symbol == 'scissors':
if computer_symbol == 'rock':
print('computer wins!')
computer_wins += 1
else:
print('player wins!')
player_wins += 1
print('player wins!: ')
print(player_wins)
print('computer wins!: ')
print(computer_wins)
| 26.471698 | 59 | 0.548824 | import random
symbols = ['rock', 'paper', 'scissors']
player_wins = 0
computer_wins = 0
while max([player_wins, computer_wins]) < 3:
player_symbol = None
while player_symbol is None:
input_symbol = input("What symbol do you want? ")
if input_symbol in symbols:
player_symbol = input_symbol
else:
print('Please pick rock, paper, or scissors.')
computer_symbol = random.choice(symbols)
print('Player: ', player_symbol)
print('Computer: ', computer_symbol)
if player_symbol == computer_symbol:
print('Tie!')
elif player_symbol == 'rock':
if computer_symbol == 'paper':
print('computer wins!')
computer_wins += 1
else:
print('player wins!')
player_wins += 1
elif player_symbol == 'paper':
if computer_symbol == 'scissors':
print('computer wins!')
computer_wins += 1
else:
print('player wins!')
player_wins += 1
elif player_symbol == 'scissors':
if computer_symbol == 'rock':
print('computer wins!')
computer_wins += 1
else:
print('player wins!')
player_wins += 1
print('player wins!: ')
print(player_wins)
print('computer wins!: ')
print(computer_wins)
| true | true |
f729700d53fe2914422fc5bfb94c839529955fce | 9,972 | py | Python | src/openfermion/utils/rdm_mapping_functions_test.py | Emieeel/OpenFermion | c19d9667c5970473893f9bc0183556c4cd354dd7 | [
"Apache-2.0"
] | 1,291 | 2017-09-27T22:00:26.000Z | 2022-03-25T14:34:50.000Z | src/openfermion/utils/rdm_mapping_functions_test.py | SamarthVadia/OpenFermion | 865d8591cad9b0681f6dd25a391a5292ed2de1d4 | [
"Apache-2.0"
] | 521 | 2017-09-27T21:36:17.000Z | 2022-03-02T12:45:56.000Z | src/openfermion/utils/rdm_mapping_functions_test.py | SamarthVadia/OpenFermion | 865d8591cad9b0681f6dd25a391a5292ed2de1d4 | [
"Apache-2.0"
] | 365 | 2017-09-27T21:25:38.000Z | 2022-03-29T19:28:46.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for rdm_mapping_functions.py"""
import os
import unittest
import numpy
import h5py
from openfermion.config import DATA_DIRECTORY, THIS_DIRECTORY
from openfermion.chem import MolecularData
from openfermion.utils.rdm_mapping_functions import (
kronecker_delta, map_two_pdm_to_two_hole_dm, map_two_pdm_to_one_pdm,
map_one_pdm_to_one_hole_dm, map_one_hole_dm_to_one_pdm,
map_two_pdm_to_particle_hole_dm, map_two_hole_dm_to_two_pdm,
map_two_hole_dm_to_one_hole_dm, map_particle_hole_dm_to_one_pdm,
map_particle_hole_dm_to_two_pdm)
class RDMMappingTest(unittest.TestCase):
def setUp(self):
# load files and marginals from testing folder
tqdm_h2_sto3g = os.path.join(THIS_DIRECTORY,
'testing/tqdm_H2_sto-3g_singlet_1.4.hdf5')
with h5py.File(tqdm_h2_sto3g, 'r') as fid:
self.tqdm_h2_sto3g = fid['tqdm'][...]
phdm_h2_sto3g = os.path.join(THIS_DIRECTORY,
'testing/phdm_H2_sto-3g_singlet_1.4.hdf5')
with h5py.File(phdm_h2_sto3g, 'r') as fid:
self.phdm_h2_sto3g = fid['phdm'][...]
tqdm_h2_6_31g = os.path.join(THIS_DIRECTORY,
'testing/tqdm_H2_6-31g_singlet_0.75.hdf5')
with h5py.File(tqdm_h2_6_31g, 'r') as fid:
self.tqdm_h2_6_31g = fid['tqdm'][...]
phdm_h2_6_31g = os.path.join(THIS_DIRECTORY,
'testing/phdm_H2_6-31g_singlet_0.75.hdf5')
with h5py.File(phdm_h2_6_31g, 'r') as fid:
self.phdm_h2_6_31g = fid['phdm'][...]
tqdm_lih_sto3g = os.path.join(
THIS_DIRECTORY, 'testing/tqdm_H1-Li1_sto-3g_singlet_1.45.hdf5')
with h5py.File(tqdm_lih_sto3g, 'r') as fid:
self.tqdm_lih_sto3g = fid['tqdm'][...]
phdm_lih_sto3g = os.path.join(
THIS_DIRECTORY, 'testing/phdm_H1-Li1_sto-3g_singlet_1.45.hdf5')
with h5py.File(phdm_lih_sto3g, 'r') as fid:
self.phdm_lih_sto3g = fid['phdm'][...]
def test_kronecker_delta_00(self):
assert kronecker_delta(0, 0) == 1
def test_kronecker_delta_01(self):
assert kronecker_delta(0, 1) == 0
def test_kronecker_delta_10(self):
assert kronecker_delta(1, 0) == 0
def test_kronecker_delta_11(self):
assert kronecker_delta(1, 1) == 1
def test_kronecker_delta_nonunit_args(self):
assert kronecker_delta(3, 3) == 1
def test_tpdm_to_opdm(self):
# for all files in datadirectory check if this map holds
for file in filter(lambda x: x.endswith(".hdf5"),
os.listdir(DATA_DIRECTORY)):
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, file))
if (molecule.fci_one_rdm is not None and
molecule.fci_two_rdm is not None):
test_opdm = map_two_pdm_to_one_pdm(molecule.fci_two_rdm,
molecule.n_electrons)
assert numpy.allclose(test_opdm, molecule.fci_one_rdm)
def test_opdm_to_oqdm(self):
for file in filter(lambda x: x.endswith(".hdf5"),
os.listdir(DATA_DIRECTORY)):
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, file))
if molecule.fci_one_rdm is not None:
test_oqdm = map_one_pdm_to_one_hole_dm(molecule.fci_one_rdm)
true_oqdm = numpy.eye(molecule.n_qubits) - molecule.fci_one_rdm
assert numpy.allclose(test_oqdm, true_oqdm)
def test_oqdm_to_opdm(self):
for file in filter(lambda x: x.endswith(".hdf5"),
os.listdir(DATA_DIRECTORY)):
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, file))
if molecule.fci_one_rdm is not None:
true_oqdm = numpy.eye(molecule.n_qubits) - molecule.fci_one_rdm
test_opdm = map_one_hole_dm_to_one_pdm(true_oqdm)
assert numpy.allclose(test_opdm, molecule.fci_one_rdm)
def test_tqdm_conversions_h2_631g(self):
# construct the 2-hole-RDM for LiH the slow way
# TODO: speed up this calculation by directly contracting from the wf.
filename = "H2_6-31g_singlet_0.75.hdf5"
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, filename))
true_tqdm = self.tqdm_h2_6_31g
test_tqdm = map_two_pdm_to_two_hole_dm(molecule.fci_two_rdm,
molecule.fci_one_rdm)
assert numpy.allclose(true_tqdm, test_tqdm)
true_oqdm = numpy.eye(molecule.n_qubits) - molecule.fci_one_rdm
test_oqdm = map_two_hole_dm_to_one_hole_dm(
true_tqdm, molecule.n_qubits - molecule.n_electrons)
assert numpy.allclose(true_oqdm, test_oqdm)
test_tpdm = map_two_hole_dm_to_two_pdm(true_tqdm, molecule.fci_one_rdm)
assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)
def test_tqdm_conversions_h2_sto3g(self):
filename = "H2_sto-3g_singlet_1.4.hdf5"
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, filename))
true_tqdm = self.tqdm_h2_sto3g
test_tqdm = map_two_pdm_to_two_hole_dm(molecule.fci_two_rdm,
molecule.fci_one_rdm)
assert numpy.allclose(true_tqdm, test_tqdm)
true_oqdm = numpy.eye(molecule.n_qubits) - molecule.fci_one_rdm
test_oqdm = map_two_hole_dm_to_one_hole_dm(
true_tqdm, molecule.n_qubits - molecule.n_electrons)
assert numpy.allclose(true_oqdm, test_oqdm)
test_tpdm = map_two_hole_dm_to_two_pdm(true_tqdm, molecule.fci_one_rdm)
assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)
def test_tqdm_conversions_lih_sto3g(self):
filename = "H1-Li1_sto-3g_singlet_1.45.hdf5"
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, filename))
true_tqdm = self.tqdm_lih_sto3g
test_tqdm = map_two_pdm_to_two_hole_dm(molecule.fci_two_rdm,
molecule.fci_one_rdm)
assert numpy.allclose(true_tqdm, test_tqdm)
true_oqdm = numpy.eye(molecule.n_qubits) - molecule.fci_one_rdm
test_oqdm = map_two_hole_dm_to_one_hole_dm(
true_tqdm, molecule.n_qubits - molecule.n_electrons)
assert numpy.allclose(true_oqdm, test_oqdm)
test_tpdm = map_two_hole_dm_to_two_pdm(true_tqdm, molecule.fci_one_rdm)
assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)
def test_phdm_conversions_h2_631g(self):
filename = "H2_6-31g_singlet_0.75.hdf5"
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, filename))
true_phdm = self.phdm_h2_6_31g
test_phdm = map_two_pdm_to_particle_hole_dm(molecule.fci_two_rdm,
molecule.fci_one_rdm)
assert numpy.allclose(test_phdm, true_phdm)
test_opdm = map_particle_hole_dm_to_one_pdm(true_phdm,
molecule.n_electrons,
molecule.n_qubits)
assert numpy.allclose(test_opdm, molecule.fci_one_rdm)
test_tpdm = map_particle_hole_dm_to_two_pdm(true_phdm,
molecule.fci_one_rdm)
assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)
def test_phdm_conversions_h2_sto3g(self):
filename = "H2_sto-3g_singlet_1.4.hdf5"
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, filename))
true_phdm = self.phdm_h2_sto3g
test_phdm = map_two_pdm_to_particle_hole_dm(molecule.fci_two_rdm,
molecule.fci_one_rdm)
assert numpy.allclose(test_phdm, true_phdm)
test_opdm = map_particle_hole_dm_to_one_pdm(true_phdm,
molecule.n_electrons,
molecule.n_qubits)
assert numpy.allclose(test_opdm, molecule.fci_one_rdm)
test_tpdm = map_particle_hole_dm_to_two_pdm(true_phdm,
molecule.fci_one_rdm)
assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)
def test_phdm_conversions_lih_sto3g(self):
filename = "H1-Li1_sto-3g_singlet_1.45.hdf5"
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, filename))
true_phdm = self.phdm_lih_sto3g
test_phdm = map_two_pdm_to_particle_hole_dm(molecule.fci_two_rdm,
molecule.fci_one_rdm)
assert numpy.allclose(test_phdm, true_phdm)
test_opdm = map_particle_hole_dm_to_one_pdm(true_phdm,
molecule.n_electrons,
molecule.n_qubits)
assert numpy.allclose(test_opdm, molecule.fci_one_rdm)
test_tpdm = map_particle_hole_dm_to_two_pdm(true_phdm,
molecule.fci_one_rdm)
assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)
| 45.122172 | 79 | 0.632872 |
import os
import unittest
import numpy
import h5py
from openfermion.config import DATA_DIRECTORY, THIS_DIRECTORY
from openfermion.chem import MolecularData
from openfermion.utils.rdm_mapping_functions import (
kronecker_delta, map_two_pdm_to_two_hole_dm, map_two_pdm_to_one_pdm,
map_one_pdm_to_one_hole_dm, map_one_hole_dm_to_one_pdm,
map_two_pdm_to_particle_hole_dm, map_two_hole_dm_to_two_pdm,
map_two_hole_dm_to_one_hole_dm, map_particle_hole_dm_to_one_pdm,
map_particle_hole_dm_to_two_pdm)
class RDMMappingTest(unittest.TestCase):
def setUp(self):
tqdm_h2_sto3g = os.path.join(THIS_DIRECTORY,
'testing/tqdm_H2_sto-3g_singlet_1.4.hdf5')
with h5py.File(tqdm_h2_sto3g, 'r') as fid:
self.tqdm_h2_sto3g = fid['tqdm'][...]
phdm_h2_sto3g = os.path.join(THIS_DIRECTORY,
'testing/phdm_H2_sto-3g_singlet_1.4.hdf5')
with h5py.File(phdm_h2_sto3g, 'r') as fid:
self.phdm_h2_sto3g = fid['phdm'][...]
tqdm_h2_6_31g = os.path.join(THIS_DIRECTORY,
'testing/tqdm_H2_6-31g_singlet_0.75.hdf5')
with h5py.File(tqdm_h2_6_31g, 'r') as fid:
self.tqdm_h2_6_31g = fid['tqdm'][...]
phdm_h2_6_31g = os.path.join(THIS_DIRECTORY,
'testing/phdm_H2_6-31g_singlet_0.75.hdf5')
with h5py.File(phdm_h2_6_31g, 'r') as fid:
self.phdm_h2_6_31g = fid['phdm'][...]
tqdm_lih_sto3g = os.path.join(
THIS_DIRECTORY, 'testing/tqdm_H1-Li1_sto-3g_singlet_1.45.hdf5')
with h5py.File(tqdm_lih_sto3g, 'r') as fid:
self.tqdm_lih_sto3g = fid['tqdm'][...]
phdm_lih_sto3g = os.path.join(
THIS_DIRECTORY, 'testing/phdm_H1-Li1_sto-3g_singlet_1.45.hdf5')
with h5py.File(phdm_lih_sto3g, 'r') as fid:
self.phdm_lih_sto3g = fid['phdm'][...]
def test_kronecker_delta_00(self):
assert kronecker_delta(0, 0) == 1
def test_kronecker_delta_01(self):
assert kronecker_delta(0, 1) == 0
def test_kronecker_delta_10(self):
assert kronecker_delta(1, 0) == 0
def test_kronecker_delta_11(self):
assert kronecker_delta(1, 1) == 1
def test_kronecker_delta_nonunit_args(self):
assert kronecker_delta(3, 3) == 1
def test_tpdm_to_opdm(self):
for file in filter(lambda x: x.endswith(".hdf5"),
os.listdir(DATA_DIRECTORY)):
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, file))
if (molecule.fci_one_rdm is not None and
molecule.fci_two_rdm is not None):
test_opdm = map_two_pdm_to_one_pdm(molecule.fci_two_rdm,
molecule.n_electrons)
assert numpy.allclose(test_opdm, molecule.fci_one_rdm)
def test_opdm_to_oqdm(self):
for file in filter(lambda x: x.endswith(".hdf5"),
os.listdir(DATA_DIRECTORY)):
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, file))
if molecule.fci_one_rdm is not None:
test_oqdm = map_one_pdm_to_one_hole_dm(molecule.fci_one_rdm)
true_oqdm = numpy.eye(molecule.n_qubits) - molecule.fci_one_rdm
assert numpy.allclose(test_oqdm, true_oqdm)
def test_oqdm_to_opdm(self):
for file in filter(lambda x: x.endswith(".hdf5"),
os.listdir(DATA_DIRECTORY)):
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, file))
if molecule.fci_one_rdm is not None:
true_oqdm = numpy.eye(molecule.n_qubits) - molecule.fci_one_rdm
test_opdm = map_one_hole_dm_to_one_pdm(true_oqdm)
assert numpy.allclose(test_opdm, molecule.fci_one_rdm)
def test_tqdm_conversions_h2_631g(self):
filename = "H2_6-31g_singlet_0.75.hdf5"
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, filename))
true_tqdm = self.tqdm_h2_6_31g
test_tqdm = map_two_pdm_to_two_hole_dm(molecule.fci_two_rdm,
molecule.fci_one_rdm)
assert numpy.allclose(true_tqdm, test_tqdm)
true_oqdm = numpy.eye(molecule.n_qubits) - molecule.fci_one_rdm
test_oqdm = map_two_hole_dm_to_one_hole_dm(
true_tqdm, molecule.n_qubits - molecule.n_electrons)
assert numpy.allclose(true_oqdm, test_oqdm)
test_tpdm = map_two_hole_dm_to_two_pdm(true_tqdm, molecule.fci_one_rdm)
assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)
def test_tqdm_conversions_h2_sto3g(self):
filename = "H2_sto-3g_singlet_1.4.hdf5"
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, filename))
true_tqdm = self.tqdm_h2_sto3g
test_tqdm = map_two_pdm_to_two_hole_dm(molecule.fci_two_rdm,
molecule.fci_one_rdm)
assert numpy.allclose(true_tqdm, test_tqdm)
true_oqdm = numpy.eye(molecule.n_qubits) - molecule.fci_one_rdm
test_oqdm = map_two_hole_dm_to_one_hole_dm(
true_tqdm, molecule.n_qubits - molecule.n_electrons)
assert numpy.allclose(true_oqdm, test_oqdm)
test_tpdm = map_two_hole_dm_to_two_pdm(true_tqdm, molecule.fci_one_rdm)
assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)
def test_tqdm_conversions_lih_sto3g(self):
filename = "H1-Li1_sto-3g_singlet_1.45.hdf5"
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, filename))
true_tqdm = self.tqdm_lih_sto3g
test_tqdm = map_two_pdm_to_two_hole_dm(molecule.fci_two_rdm,
molecule.fci_one_rdm)
assert numpy.allclose(true_tqdm, test_tqdm)
true_oqdm = numpy.eye(molecule.n_qubits) - molecule.fci_one_rdm
test_oqdm = map_two_hole_dm_to_one_hole_dm(
true_tqdm, molecule.n_qubits - molecule.n_electrons)
assert numpy.allclose(true_oqdm, test_oqdm)
test_tpdm = map_two_hole_dm_to_two_pdm(true_tqdm, molecule.fci_one_rdm)
assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)
def test_phdm_conversions_h2_631g(self):
filename = "H2_6-31g_singlet_0.75.hdf5"
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, filename))
true_phdm = self.phdm_h2_6_31g
test_phdm = map_two_pdm_to_particle_hole_dm(molecule.fci_two_rdm,
molecule.fci_one_rdm)
assert numpy.allclose(test_phdm, true_phdm)
test_opdm = map_particle_hole_dm_to_one_pdm(true_phdm,
molecule.n_electrons,
molecule.n_qubits)
assert numpy.allclose(test_opdm, molecule.fci_one_rdm)
test_tpdm = map_particle_hole_dm_to_two_pdm(true_phdm,
molecule.fci_one_rdm)
assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)
def test_phdm_conversions_h2_sto3g(self):
filename = "H2_sto-3g_singlet_1.4.hdf5"
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, filename))
true_phdm = self.phdm_h2_sto3g
test_phdm = map_two_pdm_to_particle_hole_dm(molecule.fci_two_rdm,
molecule.fci_one_rdm)
assert numpy.allclose(test_phdm, true_phdm)
test_opdm = map_particle_hole_dm_to_one_pdm(true_phdm,
molecule.n_electrons,
molecule.n_qubits)
assert numpy.allclose(test_opdm, molecule.fci_one_rdm)
test_tpdm = map_particle_hole_dm_to_two_pdm(true_phdm,
molecule.fci_one_rdm)
assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)
def test_phdm_conversions_lih_sto3g(self):
filename = "H1-Li1_sto-3g_singlet_1.45.hdf5"
molecule = MolecularData(
filename=os.path.join(DATA_DIRECTORY, filename))
true_phdm = self.phdm_lih_sto3g
test_phdm = map_two_pdm_to_particle_hole_dm(molecule.fci_two_rdm,
molecule.fci_one_rdm)
assert numpy.allclose(test_phdm, true_phdm)
test_opdm = map_particle_hole_dm_to_one_pdm(true_phdm,
molecule.n_electrons,
molecule.n_qubits)
assert numpy.allclose(test_opdm, molecule.fci_one_rdm)
test_tpdm = map_particle_hole_dm_to_two_pdm(true_phdm,
molecule.fci_one_rdm)
assert numpy.allclose(test_tpdm, molecule.fci_two_rdm)
| true | true |
f72971baca3ca517c2e3eac6e71581887c328f74 | 20,278 | py | Python | botorch/test_functions/multi_objective.py | NTR0314/botorch | f0310c9a415947f3264dac7f3438744784843323 | [
"MIT"
] | null | null | null | botorch/test_functions/multi_objective.py | NTR0314/botorch | f0310c9a415947f3264dac7f3438744784843323 | [
"MIT"
] | 1 | 2021-04-17T11:04:24.000Z | 2021-04-17T11:18:12.000Z | botorch/test_functions/multi_objective.py | NTR0314/botorch | f0310c9a415947f3264dac7f3438744784843323 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Multi-objective optimization benchmark problems.
References
.. [Deb2005dtlz]
K. Deb, L. Thiele, M. Laumanns, E. Zitzler, A. Abraham, L. Jain, R. Goldberg.
"Scalable test problems for evolutionary multi-objective optimization"
in Evolutionary Multiobjective Optimization, London, U.K.: Springer-Verlag,
pp. 105-145, 2005.
.. [GarridoMerchan2020]
E. C. Garrido-Merch ́an and D. Hern ́andez-Lobato. Parallel Predictive Entropy
Search for Multi-objective Bayesian Optimization with Constraints.
arXiv e-prints, arXiv:2004.00601, Apr. 2020.
.. [Gelbart2014]
Michael A. Gelbart, Jasper Snoek, and Ryan P. Adams. 2014. Bayesian
optimization with unknown constraints. In Proceedings of the Thirtieth
Conference on Uncertainty in Artificial Intelligence (UAI’14).
AUAI Press, Arlington, Virginia, USA, 250–259.
.. [Oszycka1995]
A. Osyczka, S. Kundu. 1995. A new method to solve generalized multicriteria
optimization problems using the simple genetic algorithm. In Structural
Optimization 10. 94–99.
.. [Tanabe2020]
Ryoji Tanabe, Hisao Ishibuchi, An easy-to-use real-world multi-objective
optimization problem suite, Applied Soft Computing,Volume 89, 2020.
.. [Yang2019a]
K. Yang, M. Emmerich, A. Deutz, and T. Bäck. 2019.
"Multi-Objective Bayesian Global Optimization using expected hypervolume
improvement gradient" in Swarm and evolutionary computation 44, pp. 945--956,
2019.
.. [Zitzler2000]
E. Zitzler, K. Deb, and L. Thiele, “Comparison of multiobjective
evolutionary algorithms: Empirical results,” Evol. Comput., vol. 8, no. 2,
pp. 173–195, 2000.
"""
from __future__ import annotations
import math
from typing import Optional
import torch
from botorch.test_functions.base import (
ConstrainedBaseTestProblem,
MultiObjectiveTestProblem,
)
from botorch.test_functions.synthetic import Branin
from botorch.utils.sampling import sample_hypersphere, sample_simplex
from botorch.utils.transforms import unnormalize
from scipy.special import gamma
from torch import Tensor
class BraninCurrin(MultiObjectiveTestProblem):
r"""Two objective problem composed of the Branin and Currin functions.
Branin (rescaled):
f(x) = (
15*x_1 - 5.1 * (15 * x_0 - 5) ** 2 / (4 * pi ** 2) + 5 * (15 * x_0 - 5)
/ pi - 5
) ** 2 + (10 - 10 / (8 * pi)) * cos(15 * x_0 - 5))
Currin:
f(x) = (1 - exp(-1 / (2 * x_1))) * (
2300 * x_0 ** 3 + 1900 * x_0 ** 2 + 2092 * x_0 + 60
) / 100 * x_0 ** 3 + 500 * x_0 ** 2 + 4 * x_0 + 20
"""
dim = 2
num_objectives = 2
_bounds = [(0.0, 1.0), (0.0, 1.0)]
_ref_point = [18.0, 6.0]
_max_hv = 59.36011874867746 # this is approximated using NSGA-II
def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:
r"""Constructor for Branin-Currin.
Args:
noise_std: Standard deviation of the observation noise.
negate: If True, negate the objectives.
"""
super().__init__(noise_std=noise_std, negate=negate)
self._branin = Branin()
def _rescaled_branin(self, X: Tensor) -> Tensor:
# return to Branin bounds
x_0 = 15 * X[..., 0] - 5
x_1 = 15 * X[..., 1]
return self._branin(torch.stack([x_0, x_1], dim=-1))
@staticmethod
def _currin(X: Tensor) -> Tensor:
x_0 = X[..., 0]
x_1 = X[..., 1]
factor1 = 1 - torch.exp(-1 / (2 * x_1))
numer = 2300 * x_0.pow(3) + 1900 * x_0.pow(2) + 2092 * x_0 + 60
denom = 100 * x_0.pow(3) + 500 * x_0.pow(2) + 4 * x_0 + 20
return factor1 * numer / denom
def evaluate_true(self, X: Tensor) -> Tensor:
# branin rescaled with inputsto [0,1]^2
branin = self._rescaled_branin(X=X)
currin = self._currin(X=X)
return torch.stack([branin, currin], dim=-1)
class DTLZ(MultiObjectiveTestProblem):
r"""Base class for DTLZ problems.
See [Deb2005dtlz]_ for more details on DTLZ.
"""
def __init__(
self,
dim: int,
num_objectives: int = 2,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
if dim <= num_objectives:
raise ValueError(
f"dim must be > num_objectives, but got {dim} and {num_objectives}"
)
self.num_objectives = num_objectives
self.dim = dim
self.k = self.dim - self.num_objectives + 1
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
self._ref_point = [self._ref_val for _ in range(num_objectives)]
super().__init__(noise_std=noise_std, negate=negate)
class DTLZ1(DTLZ):
r"""DLTZ1 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = 0.5 * x_0 * (1 + g(x))
f_1(x) = 0.5 * (1 - x_0) * (1 + g(x))
g(x) = 100 * \sum_{i=m}^{n-1} (
k + (x_i - 0.5)^2 - cos(20 * pi * (x_i - 0.5))
)
where k = n - m + 1.
The pareto front is given by the line (or hyperplane) \sum_i f_i(x) = 0.5.
The goal is to minimize both objectives. The reference point comes from [Yang2019]_.
"""
_ref_val = 400.0
@property
def _max_hv(self) -> float:
return self._ref_val ** self.num_objectives - 1 / 2 ** self.num_objectives
def evaluate_true(self, X: Tensor) -> Tensor:
X_m = X[..., -self.k :]
X_m_minus_half = X_m - 0.5
sum_term = (
X_m_minus_half.pow(2) - torch.cos(20 * math.pi * X_m_minus_half)
).sum(dim=-1)
g_X_m = 100 * (self.k + sum_term)
g_X_m_term = 0.5 * (1 + g_X_m)
fs = []
for i in range(self.num_objectives):
idx = self.num_objectives - 1 - i
f_i = g_X_m_term * X[..., :idx].prod(dim=-1)
if i > 0:
f_i *= 1 - X[..., idx]
fs.append(f_i)
return torch.stack(fs, dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
r"""Generate `n` pareto optimal points.
The pareto points randomly sampled from the hyperplane sum_i f(x_i) = 0.5.
"""
f_X = 0.5 * sample_simplex(
n=n,
d=self.num_objectives,
qmc=True,
dtype=self.ref_point.dtype,
device=self.ref_point.device,
)
if self.negate:
f_X *= -1
return f_X
class DTLZ2(DTLZ):
r"""DLTZ2 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = (1 + g(x)) * cos(x_0 * pi / 2)
f_1(x) = (1 + g(x)) * sin(x_0 * pi / 2)
g(x) = \sum_{i=m}^{n-1} (x_i - 0.5)^2
The pareto front is given by the unit hypersphere \sum{i} f_i^2 = 1.
Note: the pareto front is completely concave. The goal is to minimize
both objectives.
"""
_ref_val = 1.1
@property
def _max_hv(self) -> float:
# hypercube - volume of hypersphere in R^n such that all coordinates are
# positive
hypercube_vol = self._ref_val ** self.num_objectives
pos_hypersphere_vol = (
math.pi ** (self.num_objectives / 2)
/ gamma(self.num_objectives / 2 + 1)
/ 2 ** self.num_objectives
)
return hypercube_vol - pos_hypersphere_vol
def evaluate_true(self, X: Tensor) -> Tensor:
X_m = X[..., -self.k :]
g_X = (X_m - 0.5).pow(2).sum(dim=-1)
g_X_plus1 = 1 + g_X
fs = []
pi_over_2 = math.pi / 2
for i in range(self.num_objectives):
idx = self.num_objectives - 1 - i
f_i = g_X_plus1.clone()
f_i *= torch.cos(X[..., :idx] * pi_over_2).prod(dim=-1)
if i > 0:
f_i *= torch.sin(X[..., idx] * pi_over_2)
fs.append(f_i)
return torch.stack(fs, dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
r"""Generate `n` pareto optimal points.
The pareto points are randomly sampled from the hypersphere's
positive section.
"""
f_X = sample_hypersphere(
n=n,
d=self.num_objectives,
dtype=self.ref_point.dtype,
device=self.ref_point.device,
qmc=True,
).abs()
if self.negate:
f_X *= -1
return f_X
class VehicleSafety(MultiObjectiveTestProblem):
r"""Optimize Vehicle crash-worthiness.
See [Tanabe2020]_ for details.
The reference point is 1.1 * the nadir point from
approximate front provided by [Tanabe2020]_.
The maximum hypervolume is computed using the approximate
pareto front from [Tanabe2020]_.
"""
_ref_point = [1864.72022, 11.81993945, 0.2903999384]
_max_hv = 246.81607081187002
_bounds = [(1.0, 3.0)] * 5
dim = 5
num_objectives = 3
def evaluate_true(self, X: Tensor) -> Tensor:
X1, X2, X3, X4, X5 = torch.split(X, 1, -1)
f1 = (
1640.2823
+ 2.3573285 * X1
+ 2.3220035 * X2
+ 4.5688768 * X3
+ 7.7213633 * X4
+ 4.4559504 * X5
)
f2 = (
6.5856
+ 1.15 * X1
- 1.0427 * X2
+ 0.9738 * X3
+ 0.8364 * X4
- 0.3695 * X1 * X4
+ 0.0861 * X1 * X5
+ 0.3628 * X2 * X4
- 0.1106 * X1.pow(2)
- 0.3437 * X3.pow(2)
+ 0.1764 * X4.pow(2)
)
f3 = (
-0.0551
+ 0.0181 * X1
+ 0.1024 * X2
+ 0.0421 * X3
- 0.0073 * X1 * X2
+ 0.024 * X2 * X3
- 0.0118 * X2 * X4
- 0.0204 * X3 * X4
- 0.008 * X3 * X5
- 0.0241 * X2.pow(2)
+ 0.0109 * X4.pow(2)
)
f_X = torch.cat([f1, f2, f3], dim=-1)
return f_X
class ZDT(MultiObjectiveTestProblem):
r"""Base class for ZDT problems.
See [Zitzler2000]_ for more details on ZDT.
"""
_ref_point = [11.0, 11.0]
def __init__(
self,
dim: int,
num_objectives: int = 2,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
if num_objectives != 2:
raise NotImplementedError(
f"{type(self).__name__} currently only supports 2 objectives."
)
if dim < num_objectives:
raise ValueError(
f"dim must be >= num_objectives, but got {dim} and {num_objectives}"
)
self.num_objectives = num_objectives
self.dim = dim
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
super().__init__(noise_std=noise_std, negate=negate)
@staticmethod
def _g(X: Tensor) -> Tensor:
return 1 + 9 * X[..., 1:].mean(dim=-1)
class ZDT1(ZDT):
r"""ZDT1 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = x_0
f_1(x) = g(x) * (1 - sqrt(x_0 / g(x))
g(x) = 1 + 9 / (d - 1) * \sum_{i=1}^{d-1} x_i
The reference point comes from [Yang2019a]_.
The pareto front is convex.
"""
_max_hv = 120 + 2 / 3
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
g = self._g(X=X)
f_1 = g * (1 - (f_0 / g).sqrt())
return torch.stack([f_0, f_1], dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
f_0 = torch.linspace(
0, 1, n, dtype=self.bounds.dtype, device=self.bounds.device
)
f_1 = 1 - f_0.sqrt()
f_X = torch.stack([f_0, f_1], dim=-1)
if self.negate:
f_X *= -1
return f_X
class ZDT2(ZDT):
r"""ZDT2 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = x_0
f_1(x) = g(x) * (1 - (x_0 / g(x))^2)
g(x) = 1 + 9 / (d - 1) * \sum_{i=1}^{d-1} x_i
The reference point comes from [Yang2019a]_.
The pareto front is concave.
"""
_max_hv = 120 + 1 / 3
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
g = self._g(X=X)
f_1 = g * (1 - (f_0 / g).pow(2))
return torch.stack([f_0, f_1], dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
f_0 = torch.linspace(
0, 1, n, dtype=self.bounds.dtype, device=self.bounds.device
)
f_1 = 1 - f_0.pow(2)
f_X = torch.stack([f_0, f_1], dim=-1)
if self.negate:
f_X *= -1
return f_X
class ZDT3(ZDT):
r"""ZDT3 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = x_0
f_1(x) = 1 - sqrt(x_0 / g(x)) - x_0 / g * sin(10 * pi * x_0)
g(x) = 1 + 9 / (d - 1) * \sum_{i=1}^{d-1} x_i
The reference point comes from [Yang2019a]_.
The pareto front consists of several discontinuous convex parts.
"""
_max_hv = 128.77811613069076060
_parts = [
# this interval includes both end points
[0, 0.0830015349],
# this interval includes only the right end points
[0.1822287280, 0.2577623634],
[0.4093136748, 0.4538821041],
[0.6183967944, 0.6525117038],
[0.8233317983, 0.8518328654],
]
# nugget to make sure linspace returns elements within the specified range
_eps = 1e-6
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
g = self._g(X=X)
f_1 = 1 - (f_0 / g).sqrt() - f_0 / g * torch.sin(10 * math.pi * f_0)
return torch.stack([f_0, f_1], dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
n_parts = len(self._parts)
n_per_part = torch.full(
torch.Size([n_parts]),
n // n_parts,
dtype=torch.long,
device=self.bounds.device,
)
left_over = n % n_parts
n_per_part[:left_over] += 1
f_0s = []
for i, p in enumerate(self._parts):
left, right = p
f_0s.append(
torch.linspace(
left + self._eps,
right - self._eps,
n_per_part[i],
dtype=self.bounds.dtype,
device=self.bounds.device,
)
)
f_0 = torch.cat(f_0s, dim=0)
f_1 = 1 - f_0.sqrt() - f_0 * torch.sin(10 * math.pi * f_0)
f_X = torch.stack([f_0, f_1], dim=-1)
if self.negate:
f_X *= -1
return f_X
# ------ Constrained Multi-Objective Test Problems ----- #
class BNH(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""The constrained BNH problem.
See [GarridoMerchan2020]_ for more details on this problem. Note that this is a
minimization problem.
"""
dim = 2
num_objectives = 2
num_constraints = 2
_bounds = [(0.0, 5.0), (0.0, 3.0)]
_ref_point = [0.0, 0.0] # TODO: Determine proper reference point
def evaluate_true(self, X: Tensor) -> Tensor:
return torch.stack(
[4.0 * (X ** 2).sum(dim=-1), ((X - 5.0) ** 2).sum(dim=-1)], dim=-1
)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
c1 = 25.0 - (X[..., 0] - 5.0) ** 2 - X[..., 1] ** 2
c2 = (X[..., 0] - 8.0) ** 2 + (X[..., 1] + 3.0) ** 2 - 7.7
return torch.stack([c1, c2], dim=-1)
class SRN(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""The constrained SRN problem.
See [GarridoMerchan2020]_ for more details on this problem. Note that this is a
minimization problem.
"""
dim = 2
num_objectives = 2
num_constraints = 2
_bounds = [(-20.0, 20.0), (-20.0, 20.0)]
_ref_point = [0.0, 0.0] # TODO: Determine proper reference point
def evaluate_true(self, X: Tensor) -> Tensor:
obj1 = 2.0 + ((X - 2.0) ** 2).sum(dim=-1)
obj2 = 9.0 * X[..., 0] - (X[..., 1] - 1.0) ** 2
return torch.stack([obj1, obj2], dim=-1)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
c1 = 225.0 - ((X ** 2) ** 2).sum(dim=-1)
c2 = -10.0 - X[..., 0] + 3 * X[..., 1]
return torch.stack([c1, c2], dim=-1)
class CONSTR(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""The constrained CONSTR problem.
See [GarridoMerchan2020]_ for more details on this problem. Note that this is a
minimization problem.
"""
dim = 2
num_objectives = 2
num_constraints = 2
_bounds = [(0.1, 10.0), (0.0, 5.0)]
_ref_point = [10.0, 10.0]
def evaluate_true(self, X: Tensor) -> Tensor:
obj1 = X[..., 0]
obj2 = (1.0 + X[..., 1]) / X[..., 0]
return torch.stack([obj1, obj2], dim=-1)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
c1 = 9.0 * X[..., 0] + X[..., 1] - 6.0
c2 = 9.0 * X[..., 0] - X[..., 1] - 1.0
return torch.stack([c1, c2], dim=-1)
class ConstrainedBraninCurrin(BraninCurrin, ConstrainedBaseTestProblem):
r"""Constrained Branin Currin Function.
This uses the disk constraint from [Gelbart2014]_.
"""
dim = 2
num_objectives = 2
num_constraints = 1
_bounds = [(0.0, 1.0), (0.0, 1.0)]
_con_bounds = [(-5.0, 10.0), (0.0, 15.0)]
_ref_point = [80.0, 12.0]
_max_hv = 608.4004237022673 # from NSGA-II with 90k evaluations
def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:
super().__init__(noise_std=noise_std, negate=negate)
con_bounds = torch.tensor(self._con_bounds, dtype=torch.float).transpose(-1, -2)
self.register_buffer("con_bounds", con_bounds)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
X_tf = unnormalize(X, self.con_bounds)
return 50 - (X_tf[..., 0:1] - 2.5).pow(2) - (X_tf[..., 1:2] - 7.5).pow(2)
class C2DTLZ2(DTLZ2, ConstrainedBaseTestProblem):
num_constraints = 1
_r = 0.2
# approximate from nsga-ii, TODO: replace with analytic
_max_hv = 0.3996406303723544
def evaluate_slack_true(self, X: Tensor) -> Tensor:
if X.ndim > 2:
raise NotImplementedError("Batch X is not supported.")
f_X = self.evaluate_true(X)
term1 = (f_X - 1).pow(2)
mask = ~(torch.eye(f_X.shape[-1], device=f_X.device).bool())
indices = torch.arange(f_X.shape[1], device=f_X.device).repeat(f_X.shape[1], 1)
indexer = indices[mask].view(f_X.shape[1], f_X.shape[-1] - 1)
term2_inner = (
f_X.unsqueeze(1)
.expand(f_X.shape[0], f_X.shape[-1], f_X.shape[-1])
.gather(dim=-1, index=indexer.repeat(f_X.shape[0], 1, 1))
)
term2 = (term2_inner.pow(2) - self._r ** 2).sum(dim=-1)
min1 = (term1 + term2).min(dim=-1).values
min2 = ((f_X - 1 / math.sqrt(f_X.shape[-1])).pow(2) - self._r ** 2).sum(dim=-1)
return -torch.min(min1, min2).unsqueeze(-1)
class OSY(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""
The OSY test problem from [Oszycka1995]_.
Implementation from
https://github.com/msu-coinlab/pymoo/blob/master/pymoo/problems/multi/osy.py
Note that this implementation assumes minimization, so please choose negate=True.
"""
dim = 6
num_constraints = 6
num_objectives = 2
_bounds = [
(0.0, 10.0),
(0.0, 10.0),
(1.0, 5.0),
(0.0, 6.0),
(1.0, 5.0),
(0.0, 10.0),
]
_ref_point = [-75.0, 75.0]
def evaluate_true(self, X: Tensor) -> Tensor:
f1 = -(
25 * (X[..., 0] - 2) ** 2
+ (X[..., 1] - 2) ** 2
+ (X[..., 2] - 1) ** 2
+ (X[..., 3] - 4) ** 2
+ (X[..., 4] - 1) ** 2
)
f2 = (X ** 2).sum(-1)
return torch.stack([f1, f2], dim=-1)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
g1 = X[..., 0] + X[..., 1] - 2.0
g2 = 6.0 - X[..., 0] - X[..., 1]
g3 = 2.0 - X[..., 1] + X[..., 0]
g4 = 2.0 - X[..., 0] + 3.0 * X[..., 1]
g5 = 4.0 - (X[..., 2] - 3.0) ** 2 - X[..., 3]
g6 = (X[..., 4] - 3.0) ** 2 + X[..., 5] - 4.0
return torch.stack([g1, g2, g3, g4, g5, g6], dim=-1)
| 31.149002 | 88 | 0.547589 |
from __future__ import annotations
import math
from typing import Optional
import torch
from botorch.test_functions.base import (
ConstrainedBaseTestProblem,
MultiObjectiveTestProblem,
)
from botorch.test_functions.synthetic import Branin
from botorch.utils.sampling import sample_hypersphere, sample_simplex
from botorch.utils.transforms import unnormalize
from scipy.special import gamma
from torch import Tensor
class BraninCurrin(MultiObjectiveTestProblem):
dim = 2
num_objectives = 2
_bounds = [(0.0, 1.0), (0.0, 1.0)]
_ref_point = [18.0, 6.0]
_max_hv = 59.36011874867746
def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:
super().__init__(noise_std=noise_std, negate=negate)
self._branin = Branin()
def _rescaled_branin(self, X: Tensor) -> Tensor:
x_0 = 15 * X[..., 0] - 5
x_1 = 15 * X[..., 1]
return self._branin(torch.stack([x_0, x_1], dim=-1))
@staticmethod
def _currin(X: Tensor) -> Tensor:
x_0 = X[..., 0]
x_1 = X[..., 1]
factor1 = 1 - torch.exp(-1 / (2 * x_1))
numer = 2300 * x_0.pow(3) + 1900 * x_0.pow(2) + 2092 * x_0 + 60
denom = 100 * x_0.pow(3) + 500 * x_0.pow(2) + 4 * x_0 + 20
return factor1 * numer / denom
def evaluate_true(self, X: Tensor) -> Tensor:
branin = self._rescaled_branin(X=X)
currin = self._currin(X=X)
return torch.stack([branin, currin], dim=-1)
class DTLZ(MultiObjectiveTestProblem):
def __init__(
self,
dim: int,
num_objectives: int = 2,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
if dim <= num_objectives:
raise ValueError(
f"dim must be > num_objectives, but got {dim} and {num_objectives}"
)
self.num_objectives = num_objectives
self.dim = dim
self.k = self.dim - self.num_objectives + 1
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
self._ref_point = [self._ref_val for _ in range(num_objectives)]
super().__init__(noise_std=noise_std, negate=negate)
class DTLZ1(DTLZ):
_ref_val = 400.0
@property
def _max_hv(self) -> float:
return self._ref_val ** self.num_objectives - 1 / 2 ** self.num_objectives
def evaluate_true(self, X: Tensor) -> Tensor:
X_m = X[..., -self.k :]
X_m_minus_half = X_m - 0.5
sum_term = (
X_m_minus_half.pow(2) - torch.cos(20 * math.pi * X_m_minus_half)
).sum(dim=-1)
g_X_m = 100 * (self.k + sum_term)
g_X_m_term = 0.5 * (1 + g_X_m)
fs = []
for i in range(self.num_objectives):
idx = self.num_objectives - 1 - i
f_i = g_X_m_term * X[..., :idx].prod(dim=-1)
if i > 0:
f_i *= 1 - X[..., idx]
fs.append(f_i)
return torch.stack(fs, dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
f_X = 0.5 * sample_simplex(
n=n,
d=self.num_objectives,
qmc=True,
dtype=self.ref_point.dtype,
device=self.ref_point.device,
)
if self.negate:
f_X *= -1
return f_X
class DTLZ2(DTLZ):
_ref_val = 1.1
@property
def _max_hv(self) -> float:
hypercube_vol = self._ref_val ** self.num_objectives
pos_hypersphere_vol = (
math.pi ** (self.num_objectives / 2)
/ gamma(self.num_objectives / 2 + 1)
/ 2 ** self.num_objectives
)
return hypercube_vol - pos_hypersphere_vol
def evaluate_true(self, X: Tensor) -> Tensor:
X_m = X[..., -self.k :]
g_X = (X_m - 0.5).pow(2).sum(dim=-1)
g_X_plus1 = 1 + g_X
fs = []
pi_over_2 = math.pi / 2
for i in range(self.num_objectives):
idx = self.num_objectives - 1 - i
f_i = g_X_plus1.clone()
f_i *= torch.cos(X[..., :idx] * pi_over_2).prod(dim=-1)
if i > 0:
f_i *= torch.sin(X[..., idx] * pi_over_2)
fs.append(f_i)
return torch.stack(fs, dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
f_X = sample_hypersphere(
n=n,
d=self.num_objectives,
dtype=self.ref_point.dtype,
device=self.ref_point.device,
qmc=True,
).abs()
if self.negate:
f_X *= -1
return f_X
class VehicleSafety(MultiObjectiveTestProblem):
_ref_point = [1864.72022, 11.81993945, 0.2903999384]
_max_hv = 246.81607081187002
_bounds = [(1.0, 3.0)] * 5
dim = 5
num_objectives = 3
def evaluate_true(self, X: Tensor) -> Tensor:
X1, X2, X3, X4, X5 = torch.split(X, 1, -1)
f1 = (
1640.2823
+ 2.3573285 * X1
+ 2.3220035 * X2
+ 4.5688768 * X3
+ 7.7213633 * X4
+ 4.4559504 * X5
)
f2 = (
6.5856
+ 1.15 * X1
- 1.0427 * X2
+ 0.9738 * X3
+ 0.8364 * X4
- 0.3695 * X1 * X4
+ 0.0861 * X1 * X5
+ 0.3628 * X2 * X4
- 0.1106 * X1.pow(2)
- 0.3437 * X3.pow(2)
+ 0.1764 * X4.pow(2)
)
f3 = (
-0.0551
+ 0.0181 * X1
+ 0.1024 * X2
+ 0.0421 * X3
- 0.0073 * X1 * X2
+ 0.024 * X2 * X3
- 0.0118 * X2 * X4
- 0.0204 * X3 * X4
- 0.008 * X3 * X5
- 0.0241 * X2.pow(2)
+ 0.0109 * X4.pow(2)
)
f_X = torch.cat([f1, f2, f3], dim=-1)
return f_X
class ZDT(MultiObjectiveTestProblem):
_ref_point = [11.0, 11.0]
def __init__(
self,
dim: int,
num_objectives: int = 2,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
if num_objectives != 2:
raise NotImplementedError(
f"{type(self).__name__} currently only supports 2 objectives."
)
if dim < num_objectives:
raise ValueError(
f"dim must be >= num_objectives, but got {dim} and {num_objectives}"
)
self.num_objectives = num_objectives
self.dim = dim
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
super().__init__(noise_std=noise_std, negate=negate)
@staticmethod
def _g(X: Tensor) -> Tensor:
return 1 + 9 * X[..., 1:].mean(dim=-1)
class ZDT1(ZDT):
_max_hv = 120 + 2 / 3
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
g = self._g(X=X)
f_1 = g * (1 - (f_0 / g).sqrt())
return torch.stack([f_0, f_1], dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
f_0 = torch.linspace(
0, 1, n, dtype=self.bounds.dtype, device=self.bounds.device
)
f_1 = 1 - f_0.sqrt()
f_X = torch.stack([f_0, f_1], dim=-1)
if self.negate:
f_X *= -1
return f_X
class ZDT2(ZDT):
_max_hv = 120 + 1 / 3
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
g = self._g(X=X)
f_1 = g * (1 - (f_0 / g).pow(2))
return torch.stack([f_0, f_1], dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
f_0 = torch.linspace(
0, 1, n, dtype=self.bounds.dtype, device=self.bounds.device
)
f_1 = 1 - f_0.pow(2)
f_X = torch.stack([f_0, f_1], dim=-1)
if self.negate:
f_X *= -1
return f_X
class ZDT3(ZDT):
_max_hv = 128.77811613069076060
_parts = [
[0, 0.0830015349],
[0.1822287280, 0.2577623634],
[0.4093136748, 0.4538821041],
[0.6183967944, 0.6525117038],
[0.8233317983, 0.8518328654],
]
_eps = 1e-6
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
g = self._g(X=X)
f_1 = 1 - (f_0 / g).sqrt() - f_0 / g * torch.sin(10 * math.pi * f_0)
return torch.stack([f_0, f_1], dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
n_parts = len(self._parts)
n_per_part = torch.full(
torch.Size([n_parts]),
n // n_parts,
dtype=torch.long,
device=self.bounds.device,
)
left_over = n % n_parts
n_per_part[:left_over] += 1
f_0s = []
for i, p in enumerate(self._parts):
left, right = p
f_0s.append(
torch.linspace(
left + self._eps,
right - self._eps,
n_per_part[i],
dtype=self.bounds.dtype,
device=self.bounds.device,
)
)
f_0 = torch.cat(f_0s, dim=0)
f_1 = 1 - f_0.sqrt() - f_0 * torch.sin(10 * math.pi * f_0)
f_X = torch.stack([f_0, f_1], dim=-1)
if self.negate:
f_X *= -1
return f_X
class BNH(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
dim = 2
num_objectives = 2
num_constraints = 2
_bounds = [(0.0, 5.0), (0.0, 3.0)]
_ref_point = [0.0, 0.0]
def evaluate_true(self, X: Tensor) -> Tensor:
return torch.stack(
[4.0 * (X ** 2).sum(dim=-1), ((X - 5.0) ** 2).sum(dim=-1)], dim=-1
)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
c1 = 25.0 - (X[..., 0] - 5.0) ** 2 - X[..., 1] ** 2
c2 = (X[..., 0] - 8.0) ** 2 + (X[..., 1] + 3.0) ** 2 - 7.7
return torch.stack([c1, c2], dim=-1)
class SRN(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
dim = 2
num_objectives = 2
num_constraints = 2
_bounds = [(-20.0, 20.0), (-20.0, 20.0)]
_ref_point = [0.0, 0.0]
def evaluate_true(self, X: Tensor) -> Tensor:
obj1 = 2.0 + ((X - 2.0) ** 2).sum(dim=-1)
obj2 = 9.0 * X[..., 0] - (X[..., 1] - 1.0) ** 2
return torch.stack([obj1, obj2], dim=-1)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
c1 = 225.0 - ((X ** 2) ** 2).sum(dim=-1)
c2 = -10.0 - X[..., 0] + 3 * X[..., 1]
return torch.stack([c1, c2], dim=-1)
class CONSTR(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
dim = 2
num_objectives = 2
num_constraints = 2
_bounds = [(0.1, 10.0), (0.0, 5.0)]
_ref_point = [10.0, 10.0]
def evaluate_true(self, X: Tensor) -> Tensor:
obj1 = X[..., 0]
obj2 = (1.0 + X[..., 1]) / X[..., 0]
return torch.stack([obj1, obj2], dim=-1)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
c1 = 9.0 * X[..., 0] + X[..., 1] - 6.0
c2 = 9.0 * X[..., 0] - X[..., 1] - 1.0
return torch.stack([c1, c2], dim=-1)
class ConstrainedBraninCurrin(BraninCurrin, ConstrainedBaseTestProblem):
dim = 2
num_objectives = 2
num_constraints = 1
_bounds = [(0.0, 1.0), (0.0, 1.0)]
_con_bounds = [(-5.0, 10.0), (0.0, 15.0)]
_ref_point = [80.0, 12.0]
_max_hv = 608.4004237022673
def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:
super().__init__(noise_std=noise_std, negate=negate)
con_bounds = torch.tensor(self._con_bounds, dtype=torch.float).transpose(-1, -2)
self.register_buffer("con_bounds", con_bounds)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
X_tf = unnormalize(X, self.con_bounds)
return 50 - (X_tf[..., 0:1] - 2.5).pow(2) - (X_tf[..., 1:2] - 7.5).pow(2)
class C2DTLZ2(DTLZ2, ConstrainedBaseTestProblem):
num_constraints = 1
_r = 0.2
_max_hv = 0.3996406303723544
def evaluate_slack_true(self, X: Tensor) -> Tensor:
if X.ndim > 2:
raise NotImplementedError("Batch X is not supported.")
f_X = self.evaluate_true(X)
term1 = (f_X - 1).pow(2)
mask = ~(torch.eye(f_X.shape[-1], device=f_X.device).bool())
indices = torch.arange(f_X.shape[1], device=f_X.device).repeat(f_X.shape[1], 1)
indexer = indices[mask].view(f_X.shape[1], f_X.shape[-1] - 1)
term2_inner = (
f_X.unsqueeze(1)
.expand(f_X.shape[0], f_X.shape[-1], f_X.shape[-1])
.gather(dim=-1, index=indexer.repeat(f_X.shape[0], 1, 1))
)
term2 = (term2_inner.pow(2) - self._r ** 2).sum(dim=-1)
min1 = (term1 + term2).min(dim=-1).values
min2 = ((f_X - 1 / math.sqrt(f_X.shape[-1])).pow(2) - self._r ** 2).sum(dim=-1)
return -torch.min(min1, min2).unsqueeze(-1)
class OSY(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
dim = 6
num_constraints = 6
num_objectives = 2
_bounds = [
(0.0, 10.0),
(0.0, 10.0),
(1.0, 5.0),
(0.0, 6.0),
(1.0, 5.0),
(0.0, 10.0),
]
_ref_point = [-75.0, 75.0]
def evaluate_true(self, X: Tensor) -> Tensor:
f1 = -(
25 * (X[..., 0] - 2) ** 2
+ (X[..., 1] - 2) ** 2
+ (X[..., 2] - 1) ** 2
+ (X[..., 3] - 4) ** 2
+ (X[..., 4] - 1) ** 2
)
f2 = (X ** 2).sum(-1)
return torch.stack([f1, f2], dim=-1)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
g1 = X[..., 0] + X[..., 1] - 2.0
g2 = 6.0 - X[..., 0] - X[..., 1]
g3 = 2.0 - X[..., 1] + X[..., 0]
g4 = 2.0 - X[..., 0] + 3.0 * X[..., 1]
g5 = 4.0 - (X[..., 2] - 3.0) ** 2 - X[..., 3]
g6 = (X[..., 4] - 3.0) ** 2 + X[..., 5] - 4.0
return torch.stack([g1, g2, g3, g4, g5, g6], dim=-1)
| true | true |
f7297230ad6a4050958a36c1f28f9d0abd69b6a2 | 15,799 | py | Python | ptf-tests/tests/lib/helper.py | robertmacdavid/up4-abstract | b1d184ef5528b7a96da9c26c3d22ff2616d41fa3 | [
"Apache-2.0"
] | 3 | 2021-11-18T00:00:13.000Z | 2021-11-18T02:09:19.000Z | ptf-tests/tests/lib/helper.py | robertmacdavid/up4-abstract | b1d184ef5528b7a96da9c26c3d22ff2616d41fa3 | [
"Apache-2.0"
] | null | null | null | ptf-tests/tests/lib/helper.py | robertmacdavid/up4-abstract | b1d184ef5528b7a96da9c26c3d22ff2616d41fa3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020-2021 Open Networking Foundation
# Copyright 2021-present Princeton University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import google.protobuf.text_format
import grpc
from ptf import testutils as testutils
from p4.config.v1 import p4info_pb2
from p4.v1 import p4runtime_pb2, p4runtime_pb2_grpc, p4data_pb2
from convert import encode
def get_match_field_value(match_field):
match_type = match_field.WhichOneof("field_match_type")
if match_type == 'valid':
return match_field.valid.value
elif match_type == 'exact':
return match_field.exact.value
elif match_type == 'lpm':
return match_field.lpm.value, match_field.lpm.prefix_len
elif match_type == 'ternary':
return match_field.ternary.value, match_field.ternary.mask
elif match_type == 'range':
return match_field.range.low, match_field.range.high
else:
raise Exception("Unsupported match type with type %r" % match_type)
class P4InfoHelper(object):
def __init__(self, p4info):
self.p4info = p4info
self.next_mbr_id = 1
self.next_grp_id = 1
def get_next_mbr_id(self):
mbr_id = self.next_mbr_id
self.next_mbr_id = self.next_mbr_id + 1
return mbr_id
def read_pkt_count(self, c_name, line_id):
counter = self.read_counter(c_name, line_id, typ="BOTH")
return counter.data.packet_count
def read_byte_count(self, c_name, line_id):
counter = self.read_counter(c_name, line_id, typ="BYTES")
return counter.data.byte_count
def read_counter(self, c_name, c_index, typ):
# Check counter type with P4Info
counter = self.get_obj('counters', c_name)
counter_type_unit = p4info_pb2.CounterSpec.Unit.items()[counter.spec.unit][0]
if counter_type_unit != "BOTH" and counter_type_unit != typ:
raise Exception("Counter " + c_name + " is of type " + counter_type_unit +
", but requested: " + typ)
req = self.get_new_read_request()
entity = req.entities.add()
counter_entry = entity.counter_entry
c_id = self.get_id('counters', c_name)
counter_entry.counter_id = c_id
index = counter_entry.index
index.index = c_index
for entity in self.read_request(req):
if entity.HasField("counter_entry"):
return entity.counter_entry
return None
def clear_counters(self):
pass
def read_request(self, req):
entities = []
grpc_addr = testutils.test_param_get("grpcaddr")
channel = grpc.insecure_channel(grpc_addr)
stub = p4runtime_pb2_grpc.P4RuntimeStub(channel)
try:
for resp in stub.Read(req):
entities.extend(resp.entities)
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.UNKNOWN:
raise e
raise P4RuntimeException(e)
return entities
def write_request(self, req, store=True):
rep = self._write(req)
if store:
self.reqs.append(req)
return rep
def get_new_write_request(self):
req = p4runtime_pb2.WriteRequest()
req.device_id = int(testutils.test_param_get("device_id"))
election_id = req.election_id
election_id.high = 0
election_id.low = self.election_id
return req
def get_new_read_request(self):
req = p4runtime_pb2.ReadRequest()
req.device_id = int(testutils.test_param_get("device_id"))
return req
def get_next_grp_id(self):
grp_id = self.next_grp_id
self.next_grp_id = self.next_grp_id + 1
return grp_id
def get_enum_member_val(self, enum_name, enum_member):
members = self.get_enum_members(name=enum_name)
val = members.get(enum_member, None)
if val is None:
raise Exception("%s not a member of enum %s. Available Members: %s" \
% (enum_member, enum_name, str(list(members.keys()))))
return val
def get_enum_obj(self, name):
if "type_info" in dir(self.p4info):
type_info = self.p4info.type_info
if "serializable_enums" in dir(type_info):
for key, val in type_info.serializable_enums.items():
if key == name:
return val
raise AttributeError("Could not find enum named %s" % name)
def get_enum_members(self, name=None, obj=None):
if obj is None:
if name is None:
raise AssertionError("Must provide either an enum name or enum object")
obj = self.get_enum_obj(name)
return {member.name: member.value for member in obj.members}
def get_enum_width(self, name):
return self.get_enum_obj(name).underlying_type.bitwidth
def get(self, entity_type, name=None, id=None):
if name is not None and id is not None:
raise AssertionError("name or id must be None")
for o in getattr(self.p4info, entity_type):
pre = o.preamble
if name:
if pre.name == name:
return o
else:
if pre.id == id:
return o
if name:
raise AttributeError("Could not find %r of type %s" % (name, entity_type))
else:
raise AttributeError("Could not find id %r of type %s" % (id, entity_type))
def get_id(self, entity_type, name):
return self.get(entity_type, name=name).preamble.id
def get_name(self, entity_type, id):
return self.get(entity_type, id=id).preamble.name
def get_obj(self, entity_type, name):
return self.get(entity_type, name=name)
def __getattr__(self, attr):
# Synthesize convenience functions for name to id lookups for top-level
# entities e.g. get_tables_id(name_string) or
# get_actions_id(name_string)
m = re.search(r"^get_(\w+)_id$", attr)
if m:
primitive = m.group(1)
return lambda name: self.get_id(primitive, name)
# Synthesize convenience functions for id to name lookups
# e.g. get_tables_name(id) or get_actions_name(id)
m = re.search(r"^get_(\w+)_name$", attr)
if m:
primitive = m.group(1)
return lambda x: self.get_name(primitive, x)
raise AttributeError("%r object has no attribute %r (check your P4Info)" %
(self.__class__, attr))
def get_match_field(self, table_name, name=None, id=None):
t = None
for t in self.p4info.tables:
if t.preamble.name == table_name:
break
if not t:
raise AttributeError("No such table %r in P4Info" % table_name)
for mf in t.match_fields:
if name is not None:
if mf.name == name:
return mf
elif id is not None:
if mf.id == id:
return mf
raise AttributeError("%r has no match field %r (check your P4Info)" %
(table_name, name if name is not None else id))
def get_packet_metadata(self, meta_type, name=None, id=None):
for t in self.p4info.controller_packet_metadata:
pre = t.preamble
if pre.name == meta_type:
for m in t.metadata:
if name is not None:
if m.name == name:
return m
elif id is not None:
if m.id == id:
return m
raise AttributeError("ControllerPacketMetadata %r has no metadata %r (check your P4Info)" %
(meta_type, name if name is not None else id))
def get_match_field_id(self, table_name, match_field_name):
return self.get_match_field(table_name, name=match_field_name).id
def get_match_field_name(self, table_name, match_field_id):
return self.get_match_field(table_name, id=match_field_id).name
def get_match_field_pb(self, table_name, match_field_name, value):
p4info_match = self.get_match_field(table_name, match_field_name)
bitwidth = p4info_match.bitwidth
p4runtime_match = p4runtime_pb2.FieldMatch()
p4runtime_match.field_id = p4info_match.id
match_type = p4info_match.match_type
if match_type == p4info_pb2.MatchField.EXACT:
exact = p4runtime_match.exact
exact.value = encode(value, bitwidth)
elif match_type == p4info_pb2.MatchField.LPM:
if type(value) is str and '/' in value:
value = value.split('/')
value[1] = int(value[1])
lpm = p4runtime_match.lpm
lpm.value = encode(value[0], bitwidth)
lpm.prefix_len = value[1]
elif match_type == p4info_pb2.MatchField.TERNARY:
lpm = p4runtime_match.ternary
lpm.value = encode(value[0], bitwidth)
lpm.mask = encode(value[1], bitwidth)
elif match_type == p4info_pb2.MatchField.RANGE:
lpm = p4runtime_match.range
lpm.low = encode(value[0], bitwidth)
lpm.high = encode(value[1], bitwidth)
else:
raise Exception("Unsupported match type with type %r" % match_type)
return p4runtime_match
def get_action_param(self, action_name, name=None, id=None):
for a in self.p4info.actions:
pre = a.preamble
if pre.name == action_name:
for p in a.params:
if name is not None:
if p.name == name:
return p
elif id is not None:
if p.id == id:
return p
raise AttributeError("Action %r has no param %r (check your P4Info)" %
(action_name, name if name is not None else id))
def get_counter(self, counter_name):
for a in self.p4info.direct_counters:
pre = a.preamble
if pre.name == counter_name:
return a
raise AttributeError("Counter %r doesnt exist (check your P4Info)" % (counter_name))
def get_action_param_id(self, action_name, param_name):
return self.get_action_param(action_name, name=param_name).id
def get_action_param_name(self, action_name, param_id):
return self.get_action_param(action_name, id=param_id).name
def get_action_param_pb(self, action_name, param_name, value):
p4info_param = self.get_action_param(action_name, param_name)
p4runtime_param = p4runtime_pb2.Action.Param()
p4runtime_param.param_id = p4info_param.id
p4runtime_param.value = encode(value, p4info_param.bitwidth)
return p4runtime_param
def build_table_entry(self, table_name, match_fields=None, default_action=False,
action_name=None, action_params=None, group_id=None, priority=None):
table_entry = p4runtime_pb2.TableEntry()
table_entry.table_id = self.get_tables_id(table_name)
if priority is not None:
table_entry.priority = priority
if match_fields:
table_entry.match.extend([
self.get_match_field_pb(table_name, match_field_name, value)
for match_field_name, value in match_fields.items()
])
if default_action:
table_entry.is_default_action = True
if action_name:
action = table_entry.action.action
action.CopyFrom(self.build_action(action_name, action_params))
if group_id:
table_entry.action.action_profile_group_id = group_id
return table_entry
def build_action(self, action_name, action_params=None):
action = p4runtime_pb2.Action()
action.action_id = self.get_actions_id(action_name)
if action_params:
action.params.extend([
self.get_action_param_pb(action_name, field_name, value)
for field_name, value in action_params.items()
])
return action
def build_act_prof_member(self, act_prof_name, action_name, action_params=None, member_id=None):
member = p4runtime_pb2.ActionProfileMember()
member.action_profile_id = self.get_action_profiles_id(act_prof_name)
member.member_id = member_id if member_id else self.get_next_mbr_id()
member.action.CopyFrom(self.build_action(action_name, action_params))
return member
def build_act_prof_group(self, act_prof_name, group_id, actions=()):
messages = []
group = p4runtime_pb2.ActionProfileGroup()
group.action_profile_id = self.get_action_profiles_id(act_prof_name)
group.group_id = group_id
for action in actions:
action_name = action[0]
if len(action) > 1:
action_params = action[1]
else:
action_params = None
member = self.build_act_prof_member(act_prof_name, action_name, action_params)
messages.extend([member])
group_member = p4runtime_pb2.ActionProfileGroup.Member()
group_member.member_id = member.member_id
group_member.weight = 1
group.members.extend([group_member])
messages.append(group)
return messages
def build_packet_out(self, payload, metadata=None):
packet_out = p4runtime_pb2.PacketOut()
packet_out.payload = bytes(payload)
if not metadata:
return packet_out
for name, value in metadata.items():
p4info_meta = self.get_packet_metadata("packet_out", name)
meta = packet_out.metadata.add()
meta.metadata_id = p4info_meta.id
meta.value = encode(value, p4info_meta.bitwidth)
return packet_out
def build_packet_in(self, payload, metadata=None):
packet_in = p4runtime_pb2.PacketIn()
packet_in.payload = bytes(payload)
if not metadata:
return packet_in
for name, value in metadata.items():
p4info_meta = self.get_packet_metadata("packet_in", name)
meta = packet_in.metadata.add()
meta.metadata_id = p4info_meta.id
meta.value = encode(value, p4info_meta.bitwidth)
return packet_in
def build_digest_entry(self, digest_name, max_timeout_ns, max_list_size, ack_timeout_ns):
digest_entry = p4runtime_pb2.DigestEntry()
digest_entry.digest_id = self.get_digests_id(digest_name)
config = digest_entry.config
config.max_timeout_ns = max_timeout_ns
config.max_list_size = max_list_size
config.ack_timeout_ns = ack_timeout_ns
return digest_entry
def build_p4data_bitstring(self, value):
data = p4data_pb2.P4Data()
data.bitstring = value
return data
def build_p4data_struct(self, members):
data = p4data_pb2.P4Data()
struct = data.struct
for m in members:
x = struct.members.add()
x.CopyFrom(m)
return data
| 39.009877 | 100 | 0.62352 |
import re
import google.protobuf.text_format
import grpc
from ptf import testutils as testutils
from p4.config.v1 import p4info_pb2
from p4.v1 import p4runtime_pb2, p4runtime_pb2_grpc, p4data_pb2
from convert import encode
def get_match_field_value(match_field):
match_type = match_field.WhichOneof("field_match_type")
if match_type == 'valid':
return match_field.valid.value
elif match_type == 'exact':
return match_field.exact.value
elif match_type == 'lpm':
return match_field.lpm.value, match_field.lpm.prefix_len
elif match_type == 'ternary':
return match_field.ternary.value, match_field.ternary.mask
elif match_type == 'range':
return match_field.range.low, match_field.range.high
else:
raise Exception("Unsupported match type with type %r" % match_type)
class P4InfoHelper(object):
def __init__(self, p4info):
self.p4info = p4info
self.next_mbr_id = 1
self.next_grp_id = 1
def get_next_mbr_id(self):
mbr_id = self.next_mbr_id
self.next_mbr_id = self.next_mbr_id + 1
return mbr_id
def read_pkt_count(self, c_name, line_id):
counter = self.read_counter(c_name, line_id, typ="BOTH")
return counter.data.packet_count
def read_byte_count(self, c_name, line_id):
counter = self.read_counter(c_name, line_id, typ="BYTES")
return counter.data.byte_count
def read_counter(self, c_name, c_index, typ):
counter = self.get_obj('counters', c_name)
counter_type_unit = p4info_pb2.CounterSpec.Unit.items()[counter.spec.unit][0]
if counter_type_unit != "BOTH" and counter_type_unit != typ:
raise Exception("Counter " + c_name + " is of type " + counter_type_unit +
", but requested: " + typ)
req = self.get_new_read_request()
entity = req.entities.add()
counter_entry = entity.counter_entry
c_id = self.get_id('counters', c_name)
counter_entry.counter_id = c_id
index = counter_entry.index
index.index = c_index
for entity in self.read_request(req):
if entity.HasField("counter_entry"):
return entity.counter_entry
return None
def clear_counters(self):
pass
def read_request(self, req):
entities = []
grpc_addr = testutils.test_param_get("grpcaddr")
channel = grpc.insecure_channel(grpc_addr)
stub = p4runtime_pb2_grpc.P4RuntimeStub(channel)
try:
for resp in stub.Read(req):
entities.extend(resp.entities)
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.UNKNOWN:
raise e
raise P4RuntimeException(e)
return entities
def write_request(self, req, store=True):
rep = self._write(req)
if store:
self.reqs.append(req)
return rep
def get_new_write_request(self):
req = p4runtime_pb2.WriteRequest()
req.device_id = int(testutils.test_param_get("device_id"))
election_id = req.election_id
election_id.high = 0
election_id.low = self.election_id
return req
def get_new_read_request(self):
req = p4runtime_pb2.ReadRequest()
req.device_id = int(testutils.test_param_get("device_id"))
return req
def get_next_grp_id(self):
grp_id = self.next_grp_id
self.next_grp_id = self.next_grp_id + 1
return grp_id
def get_enum_member_val(self, enum_name, enum_member):
members = self.get_enum_members(name=enum_name)
val = members.get(enum_member, None)
if val is None:
raise Exception("%s not a member of enum %s. Available Members: %s" \
% (enum_member, enum_name, str(list(members.keys()))))
return val
def get_enum_obj(self, name):
if "type_info" in dir(self.p4info):
type_info = self.p4info.type_info
if "serializable_enums" in dir(type_info):
for key, val in type_info.serializable_enums.items():
if key == name:
return val
raise AttributeError("Could not find enum named %s" % name)
def get_enum_members(self, name=None, obj=None):
if obj is None:
if name is None:
raise AssertionError("Must provide either an enum name or enum object")
obj = self.get_enum_obj(name)
return {member.name: member.value for member in obj.members}
def get_enum_width(self, name):
return self.get_enum_obj(name).underlying_type.bitwidth
def get(self, entity_type, name=None, id=None):
if name is not None and id is not None:
raise AssertionError("name or id must be None")
for o in getattr(self.p4info, entity_type):
pre = o.preamble
if name:
if pre.name == name:
return o
else:
if pre.id == id:
return o
if name:
raise AttributeError("Could not find %r of type %s" % (name, entity_type))
else:
raise AttributeError("Could not find id %r of type %s" % (id, entity_type))
def get_id(self, entity_type, name):
return self.get(entity_type, name=name).preamble.id
def get_name(self, entity_type, id):
return self.get(entity_type, id=id).preamble.name
def get_obj(self, entity_type, name):
return self.get(entity_type, name=name)
def __getattr__(self, attr):
m = re.search(r"^get_(\w+)_id$", attr)
if m:
primitive = m.group(1)
return lambda name: self.get_id(primitive, name)
m = re.search(r"^get_(\w+)_name$", attr)
if m:
primitive = m.group(1)
return lambda x: self.get_name(primitive, x)
raise AttributeError("%r object has no attribute %r (check your P4Info)" %
(self.__class__, attr))
def get_match_field(self, table_name, name=None, id=None):
t = None
for t in self.p4info.tables:
if t.preamble.name == table_name:
break
if not t:
raise AttributeError("No such table %r in P4Info" % table_name)
for mf in t.match_fields:
if name is not None:
if mf.name == name:
return mf
elif id is not None:
if mf.id == id:
return mf
raise AttributeError("%r has no match field %r (check your P4Info)" %
(table_name, name if name is not None else id))
def get_packet_metadata(self, meta_type, name=None, id=None):
for t in self.p4info.controller_packet_metadata:
pre = t.preamble
if pre.name == meta_type:
for m in t.metadata:
if name is not None:
if m.name == name:
return m
elif id is not None:
if m.id == id:
return m
raise AttributeError("ControllerPacketMetadata %r has no metadata %r (check your P4Info)" %
(meta_type, name if name is not None else id))
def get_match_field_id(self, table_name, match_field_name):
return self.get_match_field(table_name, name=match_field_name).id
def get_match_field_name(self, table_name, match_field_id):
return self.get_match_field(table_name, id=match_field_id).name
def get_match_field_pb(self, table_name, match_field_name, value):
p4info_match = self.get_match_field(table_name, match_field_name)
bitwidth = p4info_match.bitwidth
p4runtime_match = p4runtime_pb2.FieldMatch()
p4runtime_match.field_id = p4info_match.id
match_type = p4info_match.match_type
if match_type == p4info_pb2.MatchField.EXACT:
exact = p4runtime_match.exact
exact.value = encode(value, bitwidth)
elif match_type == p4info_pb2.MatchField.LPM:
if type(value) is str and '/' in value:
value = value.split('/')
value[1] = int(value[1])
lpm = p4runtime_match.lpm
lpm.value = encode(value[0], bitwidth)
lpm.prefix_len = value[1]
elif match_type == p4info_pb2.MatchField.TERNARY:
lpm = p4runtime_match.ternary
lpm.value = encode(value[0], bitwidth)
lpm.mask = encode(value[1], bitwidth)
elif match_type == p4info_pb2.MatchField.RANGE:
lpm = p4runtime_match.range
lpm.low = encode(value[0], bitwidth)
lpm.high = encode(value[1], bitwidth)
else:
raise Exception("Unsupported match type with type %r" % match_type)
return p4runtime_match
def get_action_param(self, action_name, name=None, id=None):
for a in self.p4info.actions:
pre = a.preamble
if pre.name == action_name:
for p in a.params:
if name is not None:
if p.name == name:
return p
elif id is not None:
if p.id == id:
return p
raise AttributeError("Action %r has no param %r (check your P4Info)" %
(action_name, name if name is not None else id))
def get_counter(self, counter_name):
for a in self.p4info.direct_counters:
pre = a.preamble
if pre.name == counter_name:
return a
raise AttributeError("Counter %r doesnt exist (check your P4Info)" % (counter_name))
def get_action_param_id(self, action_name, param_name):
return self.get_action_param(action_name, name=param_name).id
def get_action_param_name(self, action_name, param_id):
return self.get_action_param(action_name, id=param_id).name
def get_action_param_pb(self, action_name, param_name, value):
p4info_param = self.get_action_param(action_name, param_name)
p4runtime_param = p4runtime_pb2.Action.Param()
p4runtime_param.param_id = p4info_param.id
p4runtime_param.value = encode(value, p4info_param.bitwidth)
return p4runtime_param
def build_table_entry(self, table_name, match_fields=None, default_action=False,
action_name=None, action_params=None, group_id=None, priority=None):
table_entry = p4runtime_pb2.TableEntry()
table_entry.table_id = self.get_tables_id(table_name)
if priority is not None:
table_entry.priority = priority
if match_fields:
table_entry.match.extend([
self.get_match_field_pb(table_name, match_field_name, value)
for match_field_name, value in match_fields.items()
])
if default_action:
table_entry.is_default_action = True
if action_name:
action = table_entry.action.action
action.CopyFrom(self.build_action(action_name, action_params))
if group_id:
table_entry.action.action_profile_group_id = group_id
return table_entry
def build_action(self, action_name, action_params=None):
action = p4runtime_pb2.Action()
action.action_id = self.get_actions_id(action_name)
if action_params:
action.params.extend([
self.get_action_param_pb(action_name, field_name, value)
for field_name, value in action_params.items()
])
return action
def build_act_prof_member(self, act_prof_name, action_name, action_params=None, member_id=None):
member = p4runtime_pb2.ActionProfileMember()
member.action_profile_id = self.get_action_profiles_id(act_prof_name)
member.member_id = member_id if member_id else self.get_next_mbr_id()
member.action.CopyFrom(self.build_action(action_name, action_params))
return member
def build_act_prof_group(self, act_prof_name, group_id, actions=()):
messages = []
group = p4runtime_pb2.ActionProfileGroup()
group.action_profile_id = self.get_action_profiles_id(act_prof_name)
group.group_id = group_id
for action in actions:
action_name = action[0]
if len(action) > 1:
action_params = action[1]
else:
action_params = None
member = self.build_act_prof_member(act_prof_name, action_name, action_params)
messages.extend([member])
group_member = p4runtime_pb2.ActionProfileGroup.Member()
group_member.member_id = member.member_id
group_member.weight = 1
group.members.extend([group_member])
messages.append(group)
return messages
def build_packet_out(self, payload, metadata=None):
packet_out = p4runtime_pb2.PacketOut()
packet_out.payload = bytes(payload)
if not metadata:
return packet_out
for name, value in metadata.items():
p4info_meta = self.get_packet_metadata("packet_out", name)
meta = packet_out.metadata.add()
meta.metadata_id = p4info_meta.id
meta.value = encode(value, p4info_meta.bitwidth)
return packet_out
def build_packet_in(self, payload, metadata=None):
packet_in = p4runtime_pb2.PacketIn()
packet_in.payload = bytes(payload)
if not metadata:
return packet_in
for name, value in metadata.items():
p4info_meta = self.get_packet_metadata("packet_in", name)
meta = packet_in.metadata.add()
meta.metadata_id = p4info_meta.id
meta.value = encode(value, p4info_meta.bitwidth)
return packet_in
def build_digest_entry(self, digest_name, max_timeout_ns, max_list_size, ack_timeout_ns):
digest_entry = p4runtime_pb2.DigestEntry()
digest_entry.digest_id = self.get_digests_id(digest_name)
config = digest_entry.config
config.max_timeout_ns = max_timeout_ns
config.max_list_size = max_list_size
config.ack_timeout_ns = ack_timeout_ns
return digest_entry
def build_p4data_bitstring(self, value):
data = p4data_pb2.P4Data()
data.bitstring = value
return data
def build_p4data_struct(self, members):
data = p4data_pb2.P4Data()
struct = data.struct
for m in members:
x = struct.members.add()
x.CopyFrom(m)
return data
| true | true |
f729748acc8620809f6dd195df4ac38d7f1b2eb8 | 5,457 | py | Python | distributed/protocol/compression.py | gjoseph92/distributed | af64e07a01e8ce0d76744099a93ca2155d835ba8 | [
"BSD-3-Clause"
] | null | null | null | distributed/protocol/compression.py | gjoseph92/distributed | af64e07a01e8ce0d76744099a93ca2155d835ba8 | [
"BSD-3-Clause"
] | null | null | null | distributed/protocol/compression.py | gjoseph92/distributed | af64e07a01e8ce0d76744099a93ca2155d835ba8 | [
"BSD-3-Clause"
] | null | null | null | """
Record known compressors
Includes utilities for determining whether or not to compress
"""
from __future__ import print_function, division, absolute_import
import logging
import random
import dask
from toolz import identity, partial
try:
import blosc
n = blosc.set_nthreads(2)
if hasattr("blosc", "releasegil"):
blosc.set_releasegil(True)
except ImportError:
blosc = False
from ..utils import ignoring, ensure_bytes
compressions = {None: {"compress": identity, "decompress": identity}}
compressions[False] = compressions[None] # alias
default_compression = None
logger = logging.getLogger(__name__)
with ignoring(ImportError):
import zlib
compressions["zlib"] = {"compress": zlib.compress, "decompress": zlib.decompress}
with ignoring(ImportError):
import snappy
def _fixed_snappy_decompress(data):
# snappy.decompress() doesn't accept memoryviews
if isinstance(data, (memoryview, bytearray)):
data = bytes(data)
return snappy.decompress(data)
compressions["snappy"] = {
"compress": snappy.compress,
"decompress": _fixed_snappy_decompress,
}
default_compression = "snappy"
with ignoring(ImportError):
import lz4
try:
# try using the new lz4 API
import lz4.block
lz4_compress = lz4.block.compress
lz4_decompress = lz4.block.decompress
except ImportError:
# fall back to old one
lz4_compress = lz4.LZ4_compress
lz4_decompress = lz4.LZ4_uncompress
# helper to bypass missing memoryview support in current lz4
# (fixed in later versions)
def _fixed_lz4_compress(data):
try:
return lz4_compress(data)
except TypeError:
if isinstance(data, (memoryview, bytearray)):
return lz4_compress(bytes(data))
else:
raise
def _fixed_lz4_decompress(data):
try:
return lz4_decompress(data)
except (ValueError, TypeError):
if isinstance(data, (memoryview, bytearray)):
return lz4_decompress(bytes(data))
else:
raise
compressions["lz4"] = {
"compress": _fixed_lz4_compress,
"decompress": _fixed_lz4_decompress,
}
default_compression = "lz4"
with ignoring(ImportError):
import blosc
compressions["blosc"] = {
"compress": partial(blosc.compress, clevel=5, cname="lz4"),
"decompress": blosc.decompress,
}
default = dask.config.get("distributed.comm.compression")
if default != "auto":
if default in compressions:
default_compression = default
else:
raise ValueError(
"Default compression '%s' not found.\n"
"Choices include auto, %s"
% (default, ", ".join(sorted(map(str, compressions))))
)
def byte_sample(b, size, n):
""" Sample a bytestring from many locations
Parameters
----------
b: bytes or memoryview
size: int
size of each sample to collect
n: int
number of samples to collect
"""
starts = [random.randint(0, len(b) - size) for j in range(n)]
ends = []
for i, start in enumerate(starts[:-1]):
ends.append(min(start + size, starts[i + 1]))
ends.append(starts[-1] + size)
parts = [b[start:end] for start, end in zip(starts, ends)]
return b"".join(map(ensure_bytes, parts))
def maybe_compress(payload, min_size=1e4, sample_size=1e4, nsamples=5):
"""
Maybe compress payload
1. We don't compress small messages
2. We sample the payload in a few spots, compress that, and if it doesn't
do any good we return the original
3. We then compress the full original, it it doesn't compress well then we
return the original
4. We return the compressed result
"""
compression = dask.config.get("distributed.comm.compression")
if compression == "auto":
compression = default_compression
if not compression:
return None, payload
if len(payload) < min_size:
return None, payload
if len(payload) > 2 ** 31: # Too large, compression libraries often fail
return None, payload
min_size = int(min_size)
sample_size = int(sample_size)
compress = compressions[compression]["compress"]
# Compress a sample, return original if not very compressed
sample = byte_sample(payload, sample_size, nsamples)
if len(compress(sample)) > 0.9 * len(sample): # sample not very compressible
return None, payload
if type(payload) is memoryview:
nbytes = payload.itemsize * len(payload)
else:
nbytes = len(payload)
if default_compression and blosc and type(payload) is memoryview:
# Blosc does itemsize-aware shuffling, resulting in better compression
compressed = blosc.compress(
payload, typesize=payload.itemsize, cname="lz4", clevel=5
)
compression = "blosc"
else:
compressed = compress(ensure_bytes(payload))
if len(compressed) > 0.9 * nbytes: # full data not very compressible
return None, payload
else:
return compression, compressed
def decompress(header, frames):
""" Decompress frames according to information in the header """
return [
compressions[c]["decompress"](frame)
for c, frame in zip(header["compression"], frames)
]
| 27.560606 | 85 | 0.645226 | from __future__ import print_function, division, absolute_import
import logging
import random
import dask
from toolz import identity, partial
try:
import blosc
n = blosc.set_nthreads(2)
if hasattr("blosc", "releasegil"):
blosc.set_releasegil(True)
except ImportError:
blosc = False
from ..utils import ignoring, ensure_bytes
compressions = {None: {"compress": identity, "decompress": identity}}
compressions[False] = compressions[None]
default_compression = None
logger = logging.getLogger(__name__)
with ignoring(ImportError):
import zlib
compressions["zlib"] = {"compress": zlib.compress, "decompress": zlib.decompress}
with ignoring(ImportError):
import snappy
def _fixed_snappy_decompress(data):
if isinstance(data, (memoryview, bytearray)):
data = bytes(data)
return snappy.decompress(data)
compressions["snappy"] = {
"compress": snappy.compress,
"decompress": _fixed_snappy_decompress,
}
default_compression = "snappy"
with ignoring(ImportError):
import lz4
try:
# try using the new lz4 API
import lz4.block
lz4_compress = lz4.block.compress
lz4_decompress = lz4.block.decompress
except ImportError:
# fall back to old one
lz4_compress = lz4.LZ4_compress
lz4_decompress = lz4.LZ4_uncompress
# helper to bypass missing memoryview support in current lz4
# (fixed in later versions)
def _fixed_lz4_compress(data):
try:
return lz4_compress(data)
except TypeError:
if isinstance(data, (memoryview, bytearray)):
return lz4_compress(bytes(data))
else:
raise
def _fixed_lz4_decompress(data):
try:
return lz4_decompress(data)
except (ValueError, TypeError):
if isinstance(data, (memoryview, bytearray)):
return lz4_decompress(bytes(data))
else:
raise
compressions["lz4"] = {
"compress": _fixed_lz4_compress,
"decompress": _fixed_lz4_decompress,
}
default_compression = "lz4"
with ignoring(ImportError):
import blosc
compressions["blosc"] = {
"compress": partial(blosc.compress, clevel=5, cname="lz4"),
"decompress": blosc.decompress,
}
default = dask.config.get("distributed.comm.compression")
if default != "auto":
if default in compressions:
default_compression = default
else:
raise ValueError(
"Default compression '%s' not found.\n"
"Choices include auto, %s"
% (default, ", ".join(sorted(map(str, compressions))))
)
def byte_sample(b, size, n):
starts = [random.randint(0, len(b) - size) for j in range(n)]
ends = []
for i, start in enumerate(starts[:-1]):
ends.append(min(start + size, starts[i + 1]))
ends.append(starts[-1] + size)
parts = [b[start:end] for start, end in zip(starts, ends)]
return b"".join(map(ensure_bytes, parts))
def maybe_compress(payload, min_size=1e4, sample_size=1e4, nsamples=5):
compression = dask.config.get("distributed.comm.compression")
if compression == "auto":
compression = default_compression
if not compression:
return None, payload
if len(payload) < min_size:
return None, payload
if len(payload) > 2 ** 31: # Too large, compression libraries often fail
return None, payload
min_size = int(min_size)
sample_size = int(sample_size)
compress = compressions[compression]["compress"]
# Compress a sample, return original if not very compressed
sample = byte_sample(payload, sample_size, nsamples)
if len(compress(sample)) > 0.9 * len(sample): # sample not very compressible
return None, payload
if type(payload) is memoryview:
nbytes = payload.itemsize * len(payload)
else:
nbytes = len(payload)
if default_compression and blosc and type(payload) is memoryview:
# Blosc does itemsize-aware shuffling, resulting in better compression
compressed = blosc.compress(
payload, typesize=payload.itemsize, cname="lz4", clevel=5
)
compression = "blosc"
else:
compressed = compress(ensure_bytes(payload))
if len(compressed) > 0.9 * nbytes: # full data not very compressible
return None, payload
else:
return compression, compressed
def decompress(header, frames):
return [
compressions[c]["decompress"](frame)
for c, frame in zip(header["compression"], frames)
]
| true | true |
f72974964b34bc8965a973df3379b8aa79b3c1da | 1,819 | py | Python | src/sas/sasgui/perspectives/calculator/console.py | opendatafit/sasview | c470220eecfc9f6d8a0e27e2ea8919dcb1b38e39 | [
"BSD-3-Clause"
] | null | null | null | src/sas/sasgui/perspectives/calculator/console.py | opendatafit/sasview | c470220eecfc9f6d8a0e27e2ea8919dcb1b38e39 | [
"BSD-3-Clause"
] | 1 | 2021-09-20T13:20:35.000Z | 2021-09-20T13:20:35.000Z | src/sas/sasgui/perspectives/calculator/console.py | opendatafit/sasview | c470220eecfc9f6d8a0e27e2ea8919dcb1b38e39 | [
"BSD-3-Clause"
] | null | null | null | """
Console Module display message of a dialog
"""
import wx
import sys
from sas.sascalc.dataloader.loader import Loader
_BOX_WIDTH = 60
CONSOLE_WIDTH = 340
CONSOLE_HEIGHT = 240
if sys.platform.count("win32") > 0:
_STATICBOX_WIDTH = 450
PANEL_WIDTH = 500
PANEL_HEIGHT = 550
FONT_VARIANT = 0
else:
_STATICBOX_WIDTH = 480
PANEL_WIDTH = 530
PANEL_HEIGHT = 560
FONT_VARIANT = 1
class ConsoleDialog(wx.Dialog):
"""
Data summary dialog
"""
def __init__(self, parent=None, manager=None, data=None,
title="Data Summary", size=(PANEL_WIDTH, PANEL_HEIGHT)):
wx.Dialog.__init__(self, parent=parent, title=title, size=size)
self.parent = parent
self._manager = manager
self._data = data
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.msg_txt = wx.TextCtrl(self, size=(PANEL_WIDTH - 40,
PANEL_HEIGHT - 60),
style=wx.TE_MULTILINE)
self.msg_txt.SetEditable(False)
self.msg_txt.SetValue('No message available')
self.sizer.Add(self.msg_txt, 1, wx.EXPAND | wx.ALL, 10)
if self._data is not None:
self.set_message(msg=self._data.__str__())
self.SetSizer(self.sizer)
def set_manager(self, manager):
"""
Set the manager of this window
"""
self._manager = manager
def set_message(self, msg=""):
"""
Display the message received
"""
self.msg_txt.SetValue(str(msg))
if __name__ == "__main__":
app = wx.App()
# Instantiate a loader
loader = Loader()
# Load data
test_data = loader.load("MAR07232_rest.ASC")
dlg = ConsoleDialog(data=test_data)
dlg.ShowModal()
app.MainLoop()
| 27.149254 | 76 | 0.59978 | import wx
import sys
from sas.sascalc.dataloader.loader import Loader
_BOX_WIDTH = 60
CONSOLE_WIDTH = 340
CONSOLE_HEIGHT = 240
if sys.platform.count("win32") > 0:
_STATICBOX_WIDTH = 450
PANEL_WIDTH = 500
PANEL_HEIGHT = 550
FONT_VARIANT = 0
else:
_STATICBOX_WIDTH = 480
PANEL_WIDTH = 530
PANEL_HEIGHT = 560
FONT_VARIANT = 1
class ConsoleDialog(wx.Dialog):
def __init__(self, parent=None, manager=None, data=None,
title="Data Summary", size=(PANEL_WIDTH, PANEL_HEIGHT)):
wx.Dialog.__init__(self, parent=parent, title=title, size=size)
self.parent = parent
self._manager = manager
self._data = data
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.msg_txt = wx.TextCtrl(self, size=(PANEL_WIDTH - 40,
PANEL_HEIGHT - 60),
style=wx.TE_MULTILINE)
self.msg_txt.SetEditable(False)
self.msg_txt.SetValue('No message available')
self.sizer.Add(self.msg_txt, 1, wx.EXPAND | wx.ALL, 10)
if self._data is not None:
self.set_message(msg=self._data.__str__())
self.SetSizer(self.sizer)
def set_manager(self, manager):
self._manager = manager
def set_message(self, msg=""):
self.msg_txt.SetValue(str(msg))
if __name__ == "__main__":
app = wx.App()
loader = Loader()
test_data = loader.load("MAR07232_rest.ASC")
dlg = ConsoleDialog(data=test_data)
dlg.ShowModal()
app.MainLoop()
| true | true |
f72974cef8a7f46849e53080deca073565237e4d | 305 | py | Python | core/libs/sqlsyntax.py | PanDAWMS/panda-bigmon-core-new | 4b806af8b0616657dcf293af376c48f61c32b86f | [
"Apache-2.0"
] | null | null | null | core/libs/sqlsyntax.py | PanDAWMS/panda-bigmon-core-new | 4b806af8b0616657dcf293af376c48f61c32b86f | [
"Apache-2.0"
] | null | null | null | core/libs/sqlsyntax.py | PanDAWMS/panda-bigmon-core-new | 4b806af8b0616657dcf293af376c48f61c32b86f | [
"Apache-2.0"
] | null | null | null | """
A set of functions to handle syntax differences between DBs
"""
def bind_var(var, db='oracle'):
"""Format of named bind variable"""
if db == 'postgresql':
return '%({})s'.format(var)
elif db == 'oracle':
return ':{}'.format(var)
else:
return ':{}'.format(var) | 23.461538 | 59 | 0.567213 |
def bind_var(var, db='oracle'):
if db == 'postgresql':
return '%({})s'.format(var)
elif db == 'oracle':
return ':{}'.format(var)
else:
return ':{}'.format(var) | true | true |
f729751bd00eae316fcc00d7b6a627402edd0ae3 | 6,271 | py | Python | corelib.py | jaredvann/UntitledLanguage | 456f3a636d62028a85ee61eb3a9f04214f799e78 | [
"MIT"
] | 2 | 2020-02-07T13:20:03.000Z | 2020-06-29T15:58:30.000Z | corelib.py | jaredvann/UntitledLanguage | 456f3a636d62028a85ee61eb3a9f04214f799e78 | [
"MIT"
] | 1 | 2020-05-09T07:22:42.000Z | 2020-05-09T07:22:42.000Z | corelib.py | jaredvann/Untitled-Language | 456f3a636d62028a85ee61eb3a9f04214f799e78 | [
"MIT"
] | null | null | null | import ctypes
import llvmlite.ir as ir
from CodeGen import LLVMCodeGenerator
from coretypes import *
from scopes import Scope
from typelib import *
ZERO = ir.Constant(ir.IntType(64), 0)
def _eq_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fcmp_ordered("==", args[0], args[1])
def _neq_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fcmp_ordered("!=", args[0], args[1])
def _eq_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.icmp_signed("==", args[0], args[1])
def _neq_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.icmp_signed("!=", args[0], args[1])
def _lt_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fcmp_ordered("<", args[0], args[1])
def _gt_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fcmp_ordered(">", args[0], args[1])
def _lte_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fcmp_ordered("<=", args[0], args[1])
def _gte_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fcmp_ordered(">=", args[0], args[1])
def _lt_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.icmp_signed("<", args[0], args[1])
def _gt_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.icmp_signed(">", args[0], args[1])
def _lte_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.icmp_signed("<=", args[0], args[1])
def _gte_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.icmp_signed(">=", args[0], args[1])
def _add_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.add(args[0], args[1])
def _sub_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.sub(args[0], args[1])
def _mul_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.mul(args[0], args[1])
def _div_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.sdiv(args[0], args[1])
def _rem_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.srem(args[0], args[1])
def _add_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fadd(args[0], args[1])
def _sub_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fsub(args[0], args[1])
def _mul_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fmul(args[0], args[1])
def _div_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fdiv(args[0], args[1])
def _rem_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.frem(args[0], args[1])
def _pow_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.call(ir.Function(cg.module, ir.FunctionType(arg_types[0], arg_types), "pow"), args)
def _abs_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.call(ir.Function(cg.module, ir.FunctionType(arg_types[0], arg_types), "fabs"), args)
def _index(cg: LLVMCodeGenerator, args, arg_types):
arr_ptr, index = args
return cg.builder.gep(arr_ptr, [ZERO, index])
scope = Scope()
scope.add_type(Array)
scope.add_type(Bool)
scope.add_type(Float)
scope.add_type(Int)
# Equalities
scope.add_function(FunctionType("==", [Bool, Bool], Bool, _eq_Int))
scope.add_function(FunctionType("!=", [Bool, Bool], Bool, _neq_Int))
scope.add_function(FunctionType("==", [Float, Float], Bool, _eq_Float))
scope.add_function(FunctionType("!=", [Float, Float], Bool, _neq_Float))
scope.add_function(FunctionType("==", [Int, Int], Bool, _eq_Int))
scope.add_function(FunctionType("!=", [Int, Int], Bool, _neq_Int))
# Comparisons
scope.add_function(FunctionType("<", [Float, Float], Bool, _lt_Float))
scope.add_function(FunctionType(">", [Float, Float], Bool, _gt_Float))
scope.add_function(FunctionType("<=", [Float, Float], Bool, _lte_Float))
scope.add_function(FunctionType(">=", [Float, Float], Bool, _gte_Float))
scope.add_function(FunctionType("<", [Int, Int], Bool, _lt_Int))
scope.add_function(FunctionType(">", [Int, Int], Bool, _gt_Int))
scope.add_function(FunctionType("<=", [Int, Int], Bool, _lte_Int))
scope.add_function(FunctionType(">=", [Int, Int], Bool, _gte_Int))
# Math Operators
scope.add_function(FunctionType("+", [Float, Float], Float, _add_Float))
scope.add_function(FunctionType("-", [Float, Float], Float, _sub_Float))
scope.add_function(FunctionType("*", [Float, Float], Float, _mul_Float))
scope.add_function(FunctionType("/", [Float, Float], Float, _div_Float))
scope.add_function(FunctionType("%", [Float, Float], Float, _rem_Float))
scope.add_function(FunctionType("^", [Float, Float], Float, _pow_Float))
scope.add_function(FunctionType("+", [Int, Int], Int, _add_Int))
scope.add_function(FunctionType("-", [Int, Int], Int, _sub_Int))
scope.add_function(FunctionType("*", [Int, Int], Int, _mul_Int))
scope.add_function(FunctionType("/", [Int, Int], Int, _div_Int))
scope.add_function(FunctionType("%", [Int, Int], Int, _rem_Int))
# Math Functions
scope.add_function(FunctionType("abs", [Float], Float, _abs_Float, no_mangle=True))
scope.add_function(FunctionType("floor", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("ceil", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("round", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("sqrt", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("exp", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("log", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("log10", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("log2", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("sin", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("cos", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("tan", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("index", [Type("Array", [Bool], ['N']), Int], Bool.to_ref(), _index))
scope.add_function(FunctionType("index", [Type("Array", [Int], ['N']), Int], Int.to_ref(), _index))
scope.add_function(FunctionType("index", [Type("Array", [Float], ['N']), Int], Float.to_ref(), _index))
scope.add_function(FunctionType("putchar", [Int], Int, no_mangle=True))
# scope.add_function(FunctionType("sum", [Type("Array", [Int], ['N'])], Int)) | 39.19375 | 106 | 0.721735 | import ctypes
import llvmlite.ir as ir
from CodeGen import LLVMCodeGenerator
from coretypes import *
from scopes import Scope
from typelib import *
ZERO = ir.Constant(ir.IntType(64), 0)
def _eq_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fcmp_ordered("==", args[0], args[1])
def _neq_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fcmp_ordered("!=", args[0], args[1])
def _eq_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.icmp_signed("==", args[0], args[1])
def _neq_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.icmp_signed("!=", args[0], args[1])
def _lt_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fcmp_ordered("<", args[0], args[1])
def _gt_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fcmp_ordered(">", args[0], args[1])
def _lte_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fcmp_ordered("<=", args[0], args[1])
def _gte_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fcmp_ordered(">=", args[0], args[1])
def _lt_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.icmp_signed("<", args[0], args[1])
def _gt_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.icmp_signed(">", args[0], args[1])
def _lte_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.icmp_signed("<=", args[0], args[1])
def _gte_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.icmp_signed(">=", args[0], args[1])
def _add_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.add(args[0], args[1])
def _sub_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.sub(args[0], args[1])
def _mul_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.mul(args[0], args[1])
def _div_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.sdiv(args[0], args[1])
def _rem_Int(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.srem(args[0], args[1])
def _add_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fadd(args[0], args[1])
def _sub_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fsub(args[0], args[1])
def _mul_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fmul(args[0], args[1])
def _div_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.fdiv(args[0], args[1])
def _rem_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.frem(args[0], args[1])
def _pow_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.call(ir.Function(cg.module, ir.FunctionType(arg_types[0], arg_types), "pow"), args)
def _abs_Float(cg: LLVMCodeGenerator, args, arg_types):
return cg.builder.call(ir.Function(cg.module, ir.FunctionType(arg_types[0], arg_types), "fabs"), args)
def _index(cg: LLVMCodeGenerator, args, arg_types):
arr_ptr, index = args
return cg.builder.gep(arr_ptr, [ZERO, index])
scope = Scope()
scope.add_type(Array)
scope.add_type(Bool)
scope.add_type(Float)
scope.add_type(Int)
scope.add_function(FunctionType("==", [Bool, Bool], Bool, _eq_Int))
scope.add_function(FunctionType("!=", [Bool, Bool], Bool, _neq_Int))
scope.add_function(FunctionType("==", [Float, Float], Bool, _eq_Float))
scope.add_function(FunctionType("!=", [Float, Float], Bool, _neq_Float))
scope.add_function(FunctionType("==", [Int, Int], Bool, _eq_Int))
scope.add_function(FunctionType("!=", [Int, Int], Bool, _neq_Int))
scope.add_function(FunctionType("<", [Float, Float], Bool, _lt_Float))
scope.add_function(FunctionType(">", [Float, Float], Bool, _gt_Float))
scope.add_function(FunctionType("<=", [Float, Float], Bool, _lte_Float))
scope.add_function(FunctionType(">=", [Float, Float], Bool, _gte_Float))
scope.add_function(FunctionType("<", [Int, Int], Bool, _lt_Int))
scope.add_function(FunctionType(">", [Int, Int], Bool, _gt_Int))
scope.add_function(FunctionType("<=", [Int, Int], Bool, _lte_Int))
scope.add_function(FunctionType(">=", [Int, Int], Bool, _gte_Int))
scope.add_function(FunctionType("+", [Float, Float], Float, _add_Float))
scope.add_function(FunctionType("-", [Float, Float], Float, _sub_Float))
scope.add_function(FunctionType("*", [Float, Float], Float, _mul_Float))
scope.add_function(FunctionType("/", [Float, Float], Float, _div_Float))
scope.add_function(FunctionType("%", [Float, Float], Float, _rem_Float))
scope.add_function(FunctionType("^", [Float, Float], Float, _pow_Float))
scope.add_function(FunctionType("+", [Int, Int], Int, _add_Int))
scope.add_function(FunctionType("-", [Int, Int], Int, _sub_Int))
scope.add_function(FunctionType("*", [Int, Int], Int, _mul_Int))
scope.add_function(FunctionType("/", [Int, Int], Int, _div_Int))
scope.add_function(FunctionType("%", [Int, Int], Int, _rem_Int))
scope.add_function(FunctionType("abs", [Float], Float, _abs_Float, no_mangle=True))
scope.add_function(FunctionType("floor", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("ceil", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("round", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("sqrt", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("exp", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("log", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("log10", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("log2", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("sin", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("cos", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("tan", [Float], Float, no_mangle=True))
scope.add_function(FunctionType("index", [Type("Array", [Bool], ['N']), Int], Bool.to_ref(), _index))
scope.add_function(FunctionType("index", [Type("Array", [Int], ['N']), Int], Int.to_ref(), _index))
scope.add_function(FunctionType("index", [Type("Array", [Float], ['N']), Int], Float.to_ref(), _index))
scope.add_function(FunctionType("putchar", [Int], Int, no_mangle=True))
| true | true |
f7297760253eb2b85eb98ece71815fc89a9a9ce5 | 16 | py | Python | mp_sort/virtenv/lib/python3.6/site-packages/transcrypt/development/manual_tests/incidental_tests/indirect_import_error/main.py | ang-jason/fip_powerx_mini_projects-foxtrot | 37e3671969b516369e2d1c7cab5890b75c489f56 | [
"MIT"
] | 2,200 | 2016-10-12T16:47:13.000Z | 2022-03-30T16:40:35.000Z | mp_sort/virtenv/lib/python3.6/site-packages/transcrypt/development/manual_tests/incidental_tests/indirect_import_error/main.py | ang-jason/fip_powerx_mini_projects-foxtrot | 37e3671969b516369e2d1c7cab5890b75c489f56 | [
"MIT"
] | 672 | 2016-10-12T16:36:48.000Z | 2022-03-25T00:57:04.000Z | mp_sort/virtenv/lib/python3.6/site-packages/transcrypt/development/manual_tests/incidental_tests/indirect_import_error/main.py | ang-jason/fip_powerx_mini_projects-foxtrot | 37e3671969b516369e2d1c7cab5890b75c489f56 | [
"MIT"
] | 230 | 2016-10-20T14:31:40.000Z | 2022-03-16T15:57:15.000Z | import module1
| 8 | 15 | 0.8125 | import module1
| true | true |
f72977954a5e1f601a1400481980aba248b7012f | 736 | py | Python | bird/tests/test_mdct_tools.py | mmoussallam/bird | 6a362de7d3a52dfcddaed13e8c736d039b03fbb4 | [
"BSD-3-Clause"
] | 11 | 2015-02-02T21:41:41.000Z | 2022-03-12T17:23:01.000Z | bird/tests/test_mdct_tools.py | mmoussallam/bird | 6a362de7d3a52dfcddaed13e8c736d039b03fbb4 | [
"BSD-3-Clause"
] | 1 | 2021-01-03T20:45:36.000Z | 2021-01-04T16:02:49.000Z | bird/tests/test_mdct_tools.py | mmoussallam/bird | 6a362de7d3a52dfcddaed13e8c736d039b03fbb4 | [
"BSD-3-Clause"
] | 5 | 2016-04-06T20:42:27.000Z | 2021-01-03T20:42:53.000Z | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Manuel Moussallam <manuel.moussallam@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from numpy.testing import assert_array_almost_equal
from bird.mdct_tools import mdct, imdct
def test_mdct():
"Test mdct and imdct tight frame property"
sfreq = 1000. # Hz
f = 7. # Hz
x1 = np.sin(2. * np.pi * f * np.arange(128, dtype=float) / sfreq)
x2 = np.sin(2. * np.pi * f * np.arange(512, dtype=float) / sfreq)
rng = np.random.RandomState(42)
x3 = rng.standard_normal(x1.shape)
wsize = 32
for x in [x1, x2, x3]:
X = mdct(x, wsize)
xp = imdct(X, wsize)
assert_array_almost_equal(x, xp, decimal=12)
| 26.285714 | 69 | 0.638587 |
import numpy as np
from numpy.testing import assert_array_almost_equal
from bird.mdct_tools import mdct, imdct
def test_mdct():
sfreq = 1000.
f = 7.
x1 = np.sin(2. * np.pi * f * np.arange(128, dtype=float) / sfreq)
x2 = np.sin(2. * np.pi * f * np.arange(512, dtype=float) / sfreq)
rng = np.random.RandomState(42)
x3 = rng.standard_normal(x1.shape)
wsize = 32
for x in [x1, x2, x3]:
X = mdct(x, wsize)
xp = imdct(X, wsize)
assert_array_almost_equal(x, xp, decimal=12)
| true | true |
f729799211cb994510bf480b3445854fbf4b7f51 | 230 | py | Python | worms/criteria/__init__.py | abiedermann/worms | 026c45a88d5c71b0e035ac83de6f4dc107316ed8 | [
"Apache-2.0"
] | 4 | 2018-01-30T23:13:43.000Z | 2021-02-12T22:36:54.000Z | worms/criteria/__init__.py | abiedermann/worms | 026c45a88d5c71b0e035ac83de6f4dc107316ed8 | [
"Apache-2.0"
] | 9 | 2018-02-23T00:52:25.000Z | 2022-01-26T00:02:32.000Z | worms/criteria/__init__.py | abiedermann/worms | 026c45a88d5c71b0e035ac83de6f4dc107316ed8 | [
"Apache-2.0"
] | 4 | 2018-06-28T21:30:14.000Z | 2022-03-30T17:50:42.000Z | from .base import *
from .hash_util import *
from .cyclic import *
from .bounded import *
from .unbounded import *
from .dihedral_lattice import *
from .null import *
from .bridge import *
from .stack import *
from .f222 import *
| 20.909091 | 31 | 0.73913 | from .base import *
from .hash_util import *
from .cyclic import *
from .bounded import *
from .unbounded import *
from .dihedral_lattice import *
from .null import *
from .bridge import *
from .stack import *
from .f222 import *
| true | true |
f7297b112e68f422996bc6c06de8e71039f120a1 | 3,024 | py | Python | 2018/day12/puzzle2.py | tcmitchell/AdventOfCode | caaac1aa37c999d4804f9f4154bf7033a06e98af | [
"MIT"
] | null | null | null | 2018/day12/puzzle2.py | tcmitchell/AdventOfCode | caaac1aa37c999d4804f9f4154bf7033a06e98af | [
"MIT"
] | null | null | null | 2018/day12/puzzle2.py | tcmitchell/AdventOfCode | caaac1aa37c999d4804f9f4154bf7033a06e98af | [
"MIT"
] | null | null | null | import argparse
import collections
import datetime
import itertools
import logging
import re
import sys
import time
# 500 17458
# 5000 179458
# 50000 1799458
# 50B 1799999999458
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("input", type=argparse.FileType('r'),
metavar="PUZZLE_INPUT")
parser.add_argument('-d', '--debug', action='store_true')
args = parser.parse_args()
return args
def init_logging(debug=False):
msgFormat = '%(asctime)s %(levelname)s %(message)s'
dateFormat = '%m/%d/%Y %H:%M:%S'
level = logging.INFO
if debug:
level = logging.DEBUG
logging.basicConfig(format=msgFormat, datefmt=dateFormat, level=level)
def load_input(fp):
pots = fp.readline().strip().split()[2]
fp.readline()
rules = [line.strip().split() for line in fp]
rules = {r[0]: r[2] for r in rules}
return pots, rules
def next_gen(rules, pots):
pots = ''.join(pots)
if pots in rules:
# logging.debug('{} => {}'.format(pots, rules[pots]))
return rules[pots]
else:
# logging.debug('{} => . *'.format(pots))
return '.'
# Dynamically pad on both left and right.
# Track left padding for subtraction in final computation
def main(argv):
if not argv:
argv = sys.argv
args = parse_args(argv)
# Init logging
init_logging(args.debug)
pots, rules = load_input(args.input)
logging.info('pots: {}'.format(pots))
logging.info('rules: {}'.format(rules))
# Convert pots to a list so elements can be replaced
pots = list(pots)
lpadding = 0
extension_size = 3
# Maybe pad the pots on either side
if '#' in pots[:3]:
for i in range(extension_size):
pots.insert(0, '.')
lpadding += extension_size
if '#' in pots[-3:]:
pots.extend(['.'] * extension_size)
# logging.debug(' 0: {}'.format(''.join(pots)))
generations = 50000000000
generations = 500
generations = 5000
generations = 50000
# generations = 20
for gen in range(generations):
new_pots = [pots[0], pots[1]]
for i in range(len(pots) - 4):
new_pots.append(next_gen(rules, pots[i:i + 5]))
new_pots.extend(pots[-2:])
pots = new_pots
# Maybe pad the pots on either side
if '#' in pots[:3]:
for i in range(extension_size):
pots.insert(0, '.')
lpadding += extension_size
if '#' in pots[-3:]:
pots.extend(['.'] * extension_size)
# if gen % 10000 == 0:
# logging.debug(gen)
# logging.debug('{: >2d}: {}'.format(gen + 1, ''.join(pots)))
logging.debug('pots length = {}'.format(len(pots)))
potsum = 0
for i in range(len(pots)):
if pots[i] == '#':
logging.debug('pot[{}] has a plant'.format(i - 20))
potsum += i - lpadding
print('Answer is {}'.format(potsum))
if __name__ == '__main__':
main(sys.argv)
| 26.068966 | 74 | 0.585317 | import argparse
import collections
import datetime
import itertools
import logging
import re
import sys
import time
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("input", type=argparse.FileType('r'),
metavar="PUZZLE_INPUT")
parser.add_argument('-d', '--debug', action='store_true')
args = parser.parse_args()
return args
def init_logging(debug=False):
msgFormat = '%(asctime)s %(levelname)s %(message)s'
dateFormat = '%m/%d/%Y %H:%M:%S'
level = logging.INFO
if debug:
level = logging.DEBUG
logging.basicConfig(format=msgFormat, datefmt=dateFormat, level=level)
def load_input(fp):
pots = fp.readline().strip().split()[2]
fp.readline()
rules = [line.strip().split() for line in fp]
rules = {r[0]: r[2] for r in rules}
return pots, rules
def next_gen(rules, pots):
pots = ''.join(pots)
if pots in rules:
return rules[pots]
else:
return '.'
def main(argv):
if not argv:
argv = sys.argv
args = parse_args(argv)
init_logging(args.debug)
pots, rules = load_input(args.input)
logging.info('pots: {}'.format(pots))
logging.info('rules: {}'.format(rules))
pots = list(pots)
lpadding = 0
extension_size = 3
if '#' in pots[:3]:
for i in range(extension_size):
pots.insert(0, '.')
lpadding += extension_size
if '#' in pots[-3:]:
pots.extend(['.'] * extension_size)
generations = 50000000000
generations = 500
generations = 5000
generations = 50000
for gen in range(generations):
new_pots = [pots[0], pots[1]]
for i in range(len(pots) - 4):
new_pots.append(next_gen(rules, pots[i:i + 5]))
new_pots.extend(pots[-2:])
pots = new_pots
if '#' in pots[:3]:
for i in range(extension_size):
pots.insert(0, '.')
lpadding += extension_size
if '#' in pots[-3:]:
pots.extend(['.'] * extension_size)
logging.debug('pots length = {}'.format(len(pots)))
potsum = 0
for i in range(len(pots)):
if pots[i] == '#':
logging.debug('pot[{}] has a plant'.format(i - 20))
potsum += i - lpadding
print('Answer is {}'.format(potsum))
if __name__ == '__main__':
main(sys.argv)
| true | true |
f7297b3454c8f6b937a03623d2c2441074999c25 | 4,066 | py | Python | src/aks-preview/azext_aks_preview/_completers.py | santosh02iiit/azure-cli-extensions | 24247cfa19e2a5894937f19e17fbdc8308b28ef6 | [
"MIT"
] | 1 | 2021-08-03T18:32:54.000Z | 2021-08-03T18:32:54.000Z | src/aks-preview/azext_aks_preview/_completers.py | santosh02iiit/azure-cli-extensions | 24247cfa19e2a5894937f19e17fbdc8308b28ef6 | [
"MIT"
] | 4 | 2020-09-07T12:56:24.000Z | 2021-02-04T12:19:20.000Z | src/aks-preview/azext_aks_preview/_completers.py | santosh02iiit/azure-cli-extensions | 24247cfa19e2a5894937f19e17fbdc8308b28ef6 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands.parameters import get_one_of_subscription_locations
from azure.cli.core.decorators import Completer
# pylint: disable=line-too-long
from azext_aks_preview.vendored_sdks.azure_mgmt_preview_aks.v2021_07_01.models import ContainerServiceVMSizeTypes
@Completer
def get_k8s_upgrades_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
"""Return Kubernetes versions available for upgrading an existing cluster."""
resource_group = getattr(namespace, 'resource_group_name', None)
name = getattr(namespace, 'name', None)
return get_k8s_upgrades(cmd.cli_ctx, resource_group, name) if resource_group and name else None
def get_k8s_upgrades(cli_ctx, resource_group, name):
from ._client_factory import cf_managed_clusters
results = cf_managed_clusters(cli_ctx).get_upgrade_profile(resource_group, name).as_dict()
return results['control_plane_profile']['upgrades']
@Completer
def get_k8s_versions_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
"""Return Kubernetes versions available for provisioning a new cluster."""
location = _get_location(cmd.cli_ctx, namespace)
return get_k8s_versions(cmd.cli_ctx, location) if location else None
def get_k8s_versions(cli_ctx, location):
"""Return a list of Kubernetes versions available for a new cluster."""
from ._client_factory import cf_container_services
from jmespath import search # pylint: disable=import-error
results = cf_container_services(cli_ctx).list_orchestrators(location, resource_type='managedClusters').as_dict()
# Flatten all the "orchestrator_version" fields into one array
return search('orchestrators[*].orchestrator_version', results)
@Completer
def get_vm_size_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
"""Return the intersection of the VM sizes allowed by the ACS SDK with those returned by the Compute Service."""
location = _get_location(cmd.cli_ctx, namespace)
result = get_vm_sizes(cmd.cli_ctx, location)
return set(r.name for r in result) & set(c.value for c in ContainerServiceVMSizeTypes)
def get_vm_sizes(cli_ctx, location):
from ._client_factory import cf_compute_service
return cf_compute_service(cli_ctx).virtual_machine_sizes.list(location)
@Completer
def get_ossku_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
"""Return the list of allowed os-sku values"""
return ["Ubuntu", "CBLMariner"]
def _get_location(cli_ctx, namespace):
"""
Return an Azure location by using an explicit `--location` argument, then by `--resource-group`, and
finally by the subscription if neither argument was provided.
"""
location = None
if getattr(namespace, 'location', None):
location = namespace.location
elif getattr(namespace, 'resource_group_name', None):
location = _get_location_from_resource_group(cli_ctx, namespace.resource_group_name)
if not location:
location = get_one_of_subscription_locations(cli_ctx)
return location
def _get_location_from_resource_group(cli_ctx, resource_group_name):
from ._client_factory import cf_resource_groups
from msrestazure.azure_exceptions import CloudError
try:
rg = cf_resource_groups(cli_ctx).get(resource_group_name)
return rg.location
except CloudError as err:
# Print a warning if the user hit [TAB] but the `--resource-group` argument was incorrect.
# For example: "Warning: Resource group 'bogus' could not be found."
from argcomplete import warn
warn('Warning: {}'.format(err.message))
| 43.255319 | 116 | 0.727496 |
from azure.cli.core.commands.parameters import get_one_of_subscription_locations
from azure.cli.core.decorators import Completer
from azext_aks_preview.vendored_sdks.azure_mgmt_preview_aks.v2021_07_01.models import ContainerServiceVMSizeTypes
@Completer
def get_k8s_upgrades_completion_list(cmd, prefix, namespace, **kwargs):
resource_group = getattr(namespace, 'resource_group_name', None)
name = getattr(namespace, 'name', None)
return get_k8s_upgrades(cmd.cli_ctx, resource_group, name) if resource_group and name else None
def get_k8s_upgrades(cli_ctx, resource_group, name):
from ._client_factory import cf_managed_clusters
results = cf_managed_clusters(cli_ctx).get_upgrade_profile(resource_group, name).as_dict()
return results['control_plane_profile']['upgrades']
@Completer
def get_k8s_versions_completion_list(cmd, prefix, namespace, **kwargs):
location = _get_location(cmd.cli_ctx, namespace)
return get_k8s_versions(cmd.cli_ctx, location) if location else None
def get_k8s_versions(cli_ctx, location):
from ._client_factory import cf_container_services
from jmespath import search
results = cf_container_services(cli_ctx).list_orchestrators(location, resource_type='managedClusters').as_dict()
return search('orchestrators[*].orchestrator_version', results)
@Completer
def get_vm_size_completion_list(cmd, prefix, namespace, **kwargs):
location = _get_location(cmd.cli_ctx, namespace)
result = get_vm_sizes(cmd.cli_ctx, location)
return set(r.name for r in result) & set(c.value for c in ContainerServiceVMSizeTypes)
def get_vm_sizes(cli_ctx, location):
from ._client_factory import cf_compute_service
return cf_compute_service(cli_ctx).virtual_machine_sizes.list(location)
@Completer
def get_ossku_completion_list(cmd, prefix, namespace, **kwargs):
return ["Ubuntu", "CBLMariner"]
def _get_location(cli_ctx, namespace):
location = None
if getattr(namespace, 'location', None):
location = namespace.location
elif getattr(namespace, 'resource_group_name', None):
location = _get_location_from_resource_group(cli_ctx, namespace.resource_group_name)
if not location:
location = get_one_of_subscription_locations(cli_ctx)
return location
def _get_location_from_resource_group(cli_ctx, resource_group_name):
from ._client_factory import cf_resource_groups
from msrestazure.azure_exceptions import CloudError
try:
rg = cf_resource_groups(cli_ctx).get(resource_group_name)
return rg.location
except CloudError as err:
from argcomplete import warn
warn('Warning: {}'.format(err.message))
| true | true |
f7297b8786de15ec51495c45a86897ab6e14ca5f | 1,167 | py | Python | activators/src/motdactivator.py | alexandruavadanii/cm-plugins | 5c3f9f389f46f719579ac4cd4065490b1723ebff | [
"Apache-2.0"
] | null | null | null | activators/src/motdactivator.py | alexandruavadanii/cm-plugins | 5c3f9f389f46f719579ac4cd4065490b1723ebff | [
"Apache-2.0"
] | null | null | null | activators/src/motdactivator.py | alexandruavadanii/cm-plugins | 5c3f9f389f46f719579ac4cd4065490b1723ebff | [
"Apache-2.0"
] | 1 | 2021-04-24T16:48:17.000Z | 2021-04-24T16:48:17.000Z | #! /usr/bin/python
# Copyright 2019 Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cmframework.apis import cmactivator
class motdactivator(cmactivator.CMGlobalActivator):
playbook = '/opt/openstack-ansible/playbooks/motd.yml'
def __init__(self):
super(motdactivator, self).__init__()
def get_subscription_info(self):
return 'cloud.motd'
def activate_set(self, props):
self._activate()
def activate_delete(self, props):
self._activate()
def activate_full(self, target):
self._activate(target=target)
def _activate(self, target=None):
self.run_playbook(self.playbook, target)
| 30.710526 | 74 | 0.725793 |
from cmframework.apis import cmactivator
class motdactivator(cmactivator.CMGlobalActivator):
playbook = '/opt/openstack-ansible/playbooks/motd.yml'
def __init__(self):
super(motdactivator, self).__init__()
def get_subscription_info(self):
return 'cloud.motd'
def activate_set(self, props):
self._activate()
def activate_delete(self, props):
self._activate()
def activate_full(self, target):
self._activate(target=target)
def _activate(self, target=None):
self.run_playbook(self.playbook, target)
| true | true |
f7297bf8b18239cca9640934586a3c6e0d1b0006 | 563 | py | Python | code/202011/20201119_283_moveZeroes.py | ace7chan/leetcode-daily | d07fe58fdb635dfa2a092515e8e235c476826b09 | [
"MIT"
] | null | null | null | code/202011/20201119_283_moveZeroes.py | ace7chan/leetcode-daily | d07fe58fdb635dfa2a092515e8e235c476826b09 | [
"MIT"
] | null | null | null | code/202011/20201119_283_moveZeroes.py | ace7chan/leetcode-daily | d07fe58fdb635dfa2a092515e8e235c476826b09 | [
"MIT"
] | null | null | null | from typing import List
class Solution:
def moveZeroes(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
cur_idx = 0
for num in nums:
if num == 0:
continue
nums[cur_idx] = num
cur_idx += 1
while cur_idx < len(nums):
nums[cur_idx] = 0
cur_idx += 1
# return nums
if __name__ == '__main__':
solution = Solution()
nums = [0, 1, 0, 3, 12]
print(solution.moveZeroes(nums))
| 22.52 | 61 | 0.50444 | from typing import List
class Solution:
def moveZeroes(self, nums: List[int]) -> None:
cur_idx = 0
for num in nums:
if num == 0:
continue
nums[cur_idx] = num
cur_idx += 1
while cur_idx < len(nums):
nums[cur_idx] = 0
cur_idx += 1
if __name__ == '__main__':
solution = Solution()
nums = [0, 1, 0, 3, 12]
print(solution.moveZeroes(nums))
| true | true |
f7297e07cf922dfa791b82c22a538dc4f2b6e22c | 1,588 | py | Python | misc/zkbreaker.py | hubo1016/vlcp | 61c4c2595b610675ac0cbc4dbc46f70ec40090d3 | [
"Apache-2.0"
] | 252 | 2015-11-17T14:21:50.000Z | 2022-03-11T10:19:47.000Z | misc/zkbreaker.py | SarahZarei/vlcp | 61c4c2595b610675ac0cbc4dbc46f70ec40090d3 | [
"Apache-2.0"
] | 23 | 2018-01-09T13:28:52.000Z | 2019-12-12T06:11:44.000Z | misc/zkbreaker.py | SarahZarei/vlcp | 61c4c2595b610675ac0cbc4dbc46f70ec40090d3 | [
"Apache-2.0"
] | 37 | 2016-08-03T04:42:22.000Z | 2021-12-30T16:57:10.000Z | '''
Created on 2016/10/25
:author: hubo
'''
from vlcp.config import config
from vlcp.protocol.zookeeper import ZooKeeper
import vlcp.protocol.zookeeper
from random import random
from vlcp.event.core import syscall_clearqueue
from logging import getLogger
_logger = getLogger(__name__)
@config('protocol.zookeeper')
class BreakingZooKeeper(ZooKeeper):
'''
This evil protocol breaks ZooKeeper connection from time to time to validate your client
and service code
'''
_default_senddrop = 0.001
_default_receivedrop = 0.01
async def _senddata(self, connection, data, container, priority = 0):
if random() < self.senddrop:
_logger.warning("Oops, I break a connection when sending")
await connection.reset(True)
return await ZooKeeper._senddata(self, connection, data, container, priority)
async def requests(self, connection, requests, container, callback=None, priority = 0):
def evil_callback(request, response):
if random() < self.receivedrop:
_logger.warning("Oops, I break a connection when receiving")
connection.subroutine(connection.reset(True), False)
connection.subroutine(connection.syscall_noreturn(syscall_clearqueue(connection.scheduler.queue[('message', connection)])))
if callback:
callback(request, response)
return await ZooKeeper.requests(self, connection, requests, container, evil_callback, priority)
def patch_zookeeper():
vlcp.protocol.zookeeper.ZooKeeper = BreakingZooKeeper
| 34.521739 | 139 | 0.707809 | from vlcp.config import config
from vlcp.protocol.zookeeper import ZooKeeper
import vlcp.protocol.zookeeper
from random import random
from vlcp.event.core import syscall_clearqueue
from logging import getLogger
_logger = getLogger(__name__)
@config('protocol.zookeeper')
class BreakingZooKeeper(ZooKeeper):
_default_senddrop = 0.001
_default_receivedrop = 0.01
async def _senddata(self, connection, data, container, priority = 0):
if random() < self.senddrop:
_logger.warning("Oops, I break a connection when sending")
await connection.reset(True)
return await ZooKeeper._senddata(self, connection, data, container, priority)
async def requests(self, connection, requests, container, callback=None, priority = 0):
def evil_callback(request, response):
if random() < self.receivedrop:
_logger.warning("Oops, I break a connection when receiving")
connection.subroutine(connection.reset(True), False)
connection.subroutine(connection.syscall_noreturn(syscall_clearqueue(connection.scheduler.queue[('message', connection)])))
if callback:
callback(request, response)
return await ZooKeeper.requests(self, connection, requests, container, evil_callback, priority)
def patch_zookeeper():
vlcp.protocol.zookeeper.ZooKeeper = BreakingZooKeeper
| true | true |
f7297e1604cfd737ba606eafbf1451eb0a2c2972 | 1,504 | py | Python | test/functional/feature_blocksdir.py | fancywarlock/bitcoinr | 12b4dee6342556c0890218b843f29cadfab06214 | [
"MIT"
] | 2 | 2020-05-31T01:06:06.000Z | 2021-06-07T22:29:32.000Z | test/functional/feature_blocksdir.py | fancywarlock/bitcoinr | 12b4dee6342556c0890218b843f29cadfab06214 | [
"MIT"
] | null | null | null | test/functional/feature_blocksdir.py | fancywarlock/bitcoinr | 12b4dee6342556c0890218b843f29cadfab06214 | [
"MIT"
] | 3 | 2020-09-24T16:46:45.000Z | 2021-06-07T22:29:33.000Z | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the blocksdir option.
"""
import os
import shutil
from test_framework.test_framework import bitcoinRTestFramework, initialize_datadir
class BlocksdirTest(bitcoinRTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.stop_node(0)
shutil.rmtree(self.nodes[0].datadir)
initialize_datadir(self.options.tmpdir, 0)
self.log.info("Starting with non exiting blocksdir ...")
blocksdir_path = os.path.join(self.options.tmpdir, 'blocksdir')
self.nodes[0].assert_start_raises_init_error(["-blocksdir=" + blocksdir_path], 'Error: Specified blocks directory "{}" does not exist.'.format(blocksdir_path))
os.mkdir(blocksdir_path)
self.log.info("Starting with exiting blocksdir ...")
self.start_node(0, ["-blocksdir=" + blocksdir_path])
self.log.info("mining blocks..")
self.nodes[0].generate(10)
assert os.path.isfile(os.path.join(blocksdir_path, "regtest", "blocks", "blk00000.dat"))
assert os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest", "blocks", "index"))
if __name__ == '__main__':
BlocksdirTest().main()
| 37.6 | 167 | 0.698803 |
import os
import shutil
from test_framework.test_framework import bitcoinRTestFramework, initialize_datadir
class BlocksdirTest(bitcoinRTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.stop_node(0)
shutil.rmtree(self.nodes[0].datadir)
initialize_datadir(self.options.tmpdir, 0)
self.log.info("Starting with non exiting blocksdir ...")
blocksdir_path = os.path.join(self.options.tmpdir, 'blocksdir')
self.nodes[0].assert_start_raises_init_error(["-blocksdir=" + blocksdir_path], 'Error: Specified blocks directory "{}" does not exist.'.format(blocksdir_path))
os.mkdir(blocksdir_path)
self.log.info("Starting with exiting blocksdir ...")
self.start_node(0, ["-blocksdir=" + blocksdir_path])
self.log.info("mining blocks..")
self.nodes[0].generate(10)
assert os.path.isfile(os.path.join(blocksdir_path, "regtest", "blocks", "blk00000.dat"))
assert os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest", "blocks", "index"))
if __name__ == '__main__':
BlocksdirTest().main()
| true | true |
f7297e96bfe1de8e74a8448f1f7dfe04aa9d2b63 | 6,235 | py | Python | tests/test_stac.py | stactools-packages/cop-dem | 25a4bb69eb60caa339e11b8293291cdc192e2de8 | [
"Apache-2.0"
] | null | null | null | tests/test_stac.py | stactools-packages/cop-dem | 25a4bb69eb60caa339e11b8293291cdc192e2de8 | [
"Apache-2.0"
] | 3 | 2021-07-29T16:58:52.000Z | 2021-08-12T18:18:42.000Z | tests/test_stac.py | stactools-packages/cop-dem | 25a4bb69eb60caa339e11b8293291cdc192e2de8 | [
"Apache-2.0"
] | 1 | 2021-08-05T23:17:51.000Z | 2021-08-05T23:17:51.000Z | import datetime
from unittest import TestCase
from pystac import Provider, MediaType
from pystac.extensions.projection import ProjectionExtension
from pystac.provider import ProviderRole
from stactools.cop_dem import stac
from tests import test_data
class StacTest(TestCase):
def setUp(self):
self.glo30_path = test_data.get_external_data(
"Copernicus_DSM_COG_10_N53_00_W115_00_DEM.tif")
self.glo90_path = test_data.get_external_data(
"Copernicus_DSM_COG_30_N53_00_W115_00_DEM.tif")
def test_create_glo30_item(self):
item = stac.create_item(self.glo30_path)
self.assertEqual(item.id, "Copernicus_DSM_COG_10_N53_00_W115_00_DEM")
self.assertIsNotNone(item.geometry)
self.assertEqual(list(item.bbox), [
-115.00020833333333, 53.00013888888889, -114.00020833333333,
54.00013888888889
])
self.assertEqual(
item.datetime,
datetime.datetime(2021, 4, 22, tzinfo=datetime.timezone.utc))
common_metadata = item.common_metadata
self.assertEqual(common_metadata.platform, "TanDEM-X")
self.assertEqual(common_metadata.gsd, 30)
expected_providers = [
Provider("European Space Agency",
roles=[ProviderRole.LICENSOR],
url=("https://spacedata.copernicus.eu/documents/20126/0/"
"CSCDA_ESA_Mission-specific+Annex.pdf")),
Provider("Sinergise",
roles=[ProviderRole.PRODUCER, ProviderRole.PROCESSOR],
url="https://registry.opendata.aws/copernicus-dem/"),
Provider("OpenTopography",
roles=[ProviderRole.HOST],
url=("https://portal.opentopography.org/"
"datasetMetadata?otCollectionID=OT.032021.4326.1"))
]
for expected, actual in zip(expected_providers,
common_metadata.providers):
self.assertDictEqual(expected.to_dict(), actual.to_dict())
self.assertEqual(common_metadata.license, "proprietary")
projection = ProjectionExtension.ext(item)
self.assertEqual(projection.epsg, 4326)
self.assertEqual(projection.shape, (3600, 2400))
self.assertEqual(list(projection.transform), [
0.00041666666666666664, 0.0, -115.00020833333333, 0.0,
-0.0002777777777777778, 54.00013888888889
])
handbook = item.get_single_link("handbook")
self.assertIsNotNone(handbook)
self.assertEqual(handbook.title, "Copernicus DEM User handbook")
self.assertEqual(handbook.rel, "handbook")
self.assertEqual(
handbook.href,
"https://object.cloud.sdsc.edu/v1/AUTH_opentopography/www/metadata"
"/Copernicus_metadata.pdf")
self.assertEqual(handbook.media_type, "application/pdf")
data = item.assets["data"]
self.assertEqual(data.href, self.glo30_path)
self.assertEqual(data.title, "N53_00_W115_00")
self.assertIsNone(data.description)
self.assertEqual(data.media_type, MediaType.COG)
self.assertEqual(data.roles, ["data"])
item.validate()
def test_create_glo90_item(self):
item = stac.create_item(self.glo90_path)
self.assertEqual(item.id, "Copernicus_DSM_COG_30_N53_00_W115_00_DEM")
self.assertIsNotNone(item.geometry)
self.assertEqual(
list(item.bbox),
[-115.000625, 53.000416666666666, -114.000625, 54.000416666666666])
self.assertEqual(
item.datetime,
datetime.datetime(2021, 4, 22, tzinfo=datetime.timezone.utc))
common_metadata = item.common_metadata
self.assertEqual(common_metadata.platform, "TanDEM-X")
self.assertEqual(common_metadata.gsd, 90)
expected_providers = [
Provider("European Space Agency",
roles=[ProviderRole.LICENSOR],
url=("https://spacedata.copernicus.eu/documents/20126/0/"
"CSCDA_ESA_Mission-specific+Annex.pdf")),
Provider("Sinergise",
roles=[ProviderRole.PRODUCER, ProviderRole.PROCESSOR],
url="https://registry.opendata.aws/copernicus-dem/"),
Provider("OpenTopography",
roles=[ProviderRole.HOST],
url=("https://portal.opentopography.org/"
"datasetMetadata?otCollectionID=OT.032021.4326.1"))
]
for expected, actual in zip(expected_providers,
common_metadata.providers):
self.assertDictEqual(expected.to_dict(), actual.to_dict())
self.assertEqual(common_metadata.license, "proprietary")
projection = ProjectionExtension.ext(item)
self.assertEqual(projection.epsg, 4326)
self.assertEqual(projection.shape, (1200, 800))
self.assertEqual(list(projection.transform), [
0.00125, 0.0, -115.000625, 0.0, -0.0008333333333333334,
54.000416666666666
])
handbook = item.get_single_link("handbook")
self.assertIsNotNone(handbook)
self.assertEqual(handbook.title, "Copernicus DEM User handbook")
self.assertEqual(handbook.rel, "handbook")
self.assertEqual(
handbook.href,
"https://object.cloud.sdsc.edu/v1/AUTH_opentopography/www/metadata"
"/Copernicus_metadata.pdf")
self.assertEqual(handbook.media_type, "application/pdf")
data = item.assets["data"]
self.assertEqual(data.href, self.glo90_path)
self.assertEqual(data.title, "N53_00_W115_00")
self.assertIsNone(data.description)
self.assertEqual(data.media_type, MediaType.COG)
self.assertEqual(data.roles, ["data"])
item.validate()
def test_create_item_with_read_href_modifier(self):
done = False
def do_it(href):
nonlocal done
done = True
return href
_ = stac.create_item(self.glo30_path, read_href_modifier=do_it)
self.assertTrue(done, "Didn't do it")
| 41.845638 | 79 | 0.631275 | import datetime
from unittest import TestCase
from pystac import Provider, MediaType
from pystac.extensions.projection import ProjectionExtension
from pystac.provider import ProviderRole
from stactools.cop_dem import stac
from tests import test_data
class StacTest(TestCase):
def setUp(self):
self.glo30_path = test_data.get_external_data(
"Copernicus_DSM_COG_10_N53_00_W115_00_DEM.tif")
self.glo90_path = test_data.get_external_data(
"Copernicus_DSM_COG_30_N53_00_W115_00_DEM.tif")
def test_create_glo30_item(self):
item = stac.create_item(self.glo30_path)
self.assertEqual(item.id, "Copernicus_DSM_COG_10_N53_00_W115_00_DEM")
self.assertIsNotNone(item.geometry)
self.assertEqual(list(item.bbox), [
-115.00020833333333, 53.00013888888889, -114.00020833333333,
54.00013888888889
])
self.assertEqual(
item.datetime,
datetime.datetime(2021, 4, 22, tzinfo=datetime.timezone.utc))
common_metadata = item.common_metadata
self.assertEqual(common_metadata.platform, "TanDEM-X")
self.assertEqual(common_metadata.gsd, 30)
expected_providers = [
Provider("European Space Agency",
roles=[ProviderRole.LICENSOR],
url=("https://spacedata.copernicus.eu/documents/20126/0/"
"CSCDA_ESA_Mission-specific+Annex.pdf")),
Provider("Sinergise",
roles=[ProviderRole.PRODUCER, ProviderRole.PROCESSOR],
url="https://registry.opendata.aws/copernicus-dem/"),
Provider("OpenTopography",
roles=[ProviderRole.HOST],
url=("https://portal.opentopography.org/"
"datasetMetadata?otCollectionID=OT.032021.4326.1"))
]
for expected, actual in zip(expected_providers,
common_metadata.providers):
self.assertDictEqual(expected.to_dict(), actual.to_dict())
self.assertEqual(common_metadata.license, "proprietary")
projection = ProjectionExtension.ext(item)
self.assertEqual(projection.epsg, 4326)
self.assertEqual(projection.shape, (3600, 2400))
self.assertEqual(list(projection.transform), [
0.00041666666666666664, 0.0, -115.00020833333333, 0.0,
-0.0002777777777777778, 54.00013888888889
])
handbook = item.get_single_link("handbook")
self.assertIsNotNone(handbook)
self.assertEqual(handbook.title, "Copernicus DEM User handbook")
self.assertEqual(handbook.rel, "handbook")
self.assertEqual(
handbook.href,
"https://object.cloud.sdsc.edu/v1/AUTH_opentopography/www/metadata"
"/Copernicus_metadata.pdf")
self.assertEqual(handbook.media_type, "application/pdf")
data = item.assets["data"]
self.assertEqual(data.href, self.glo30_path)
self.assertEqual(data.title, "N53_00_W115_00")
self.assertIsNone(data.description)
self.assertEqual(data.media_type, MediaType.COG)
self.assertEqual(data.roles, ["data"])
item.validate()
def test_create_glo90_item(self):
item = stac.create_item(self.glo90_path)
self.assertEqual(item.id, "Copernicus_DSM_COG_30_N53_00_W115_00_DEM")
self.assertIsNotNone(item.geometry)
self.assertEqual(
list(item.bbox),
[-115.000625, 53.000416666666666, -114.000625, 54.000416666666666])
self.assertEqual(
item.datetime,
datetime.datetime(2021, 4, 22, tzinfo=datetime.timezone.utc))
common_metadata = item.common_metadata
self.assertEqual(common_metadata.platform, "TanDEM-X")
self.assertEqual(common_metadata.gsd, 90)
expected_providers = [
Provider("European Space Agency",
roles=[ProviderRole.LICENSOR],
url=("https://spacedata.copernicus.eu/documents/20126/0/"
"CSCDA_ESA_Mission-specific+Annex.pdf")),
Provider("Sinergise",
roles=[ProviderRole.PRODUCER, ProviderRole.PROCESSOR],
url="https://registry.opendata.aws/copernicus-dem/"),
Provider("OpenTopography",
roles=[ProviderRole.HOST],
url=("https://portal.opentopography.org/"
"datasetMetadata?otCollectionID=OT.032021.4326.1"))
]
for expected, actual in zip(expected_providers,
common_metadata.providers):
self.assertDictEqual(expected.to_dict(), actual.to_dict())
self.assertEqual(common_metadata.license, "proprietary")
projection = ProjectionExtension.ext(item)
self.assertEqual(projection.epsg, 4326)
self.assertEqual(projection.shape, (1200, 800))
self.assertEqual(list(projection.transform), [
0.00125, 0.0, -115.000625, 0.0, -0.0008333333333333334,
54.000416666666666
])
handbook = item.get_single_link("handbook")
self.assertIsNotNone(handbook)
self.assertEqual(handbook.title, "Copernicus DEM User handbook")
self.assertEqual(handbook.rel, "handbook")
self.assertEqual(
handbook.href,
"https://object.cloud.sdsc.edu/v1/AUTH_opentopography/www/metadata"
"/Copernicus_metadata.pdf")
self.assertEqual(handbook.media_type, "application/pdf")
data = item.assets["data"]
self.assertEqual(data.href, self.glo90_path)
self.assertEqual(data.title, "N53_00_W115_00")
self.assertIsNone(data.description)
self.assertEqual(data.media_type, MediaType.COG)
self.assertEqual(data.roles, ["data"])
item.validate()
def test_create_item_with_read_href_modifier(self):
done = False
def do_it(href):
nonlocal done
done = True
return href
_ = stac.create_item(self.glo30_path, read_href_modifier=do_it)
self.assertTrue(done, "Didn't do it")
| true | true |
f7297f1d91ce8e6b39e55cc5ac6ecf497f9f71db | 1,068 | py | Python | example_workspace/inverted_hierarchy/model.py | anicokatz/PyMultiNestPlus | d223ac90bef7c1b61e337b70c2bdb41ed46cb2b7 | [
"OML"
] | null | null | null | example_workspace/inverted_hierarchy/model.py | anicokatz/PyMultiNestPlus | d223ac90bef7c1b61e337b70c2bdb41ed46cb2b7 | [
"OML"
] | null | null | null | example_workspace/inverted_hierarchy/model.py | anicokatz/PyMultiNestPlus | d223ac90bef7c1b61e337b70c2bdb41ed46cb2b7 | [
"OML"
] | null | null | null | # INVERTED HIERARCHY
import prior_handler as phandle
import math
import numpy as np
import os
cwd = os.path.dirname(os.path.realpath(__file__))
print(cwd)
prior_handler = phandle.PriorHandler(cwd)
con = prior_handler.c
n_pars = prior_handler.n_pars
def prior(cube, n_dims, n_pars):
return prior_handler.scale(cube)
def observables(pars):
# get the nuisances from the par-based seed
nui = prior_handler.get_nui(pars)
# get mt value
c13 = math.cos(pars[4])
a1 = abs(math.cos(pars[3]*c13))**2
a2 = abs(math.sin(pars[3]*c13))**2
a3 = abs(math.sin(pars[4]))**2
dm2 = pars[5]
Dm2 = pars[6]
m3 = pars[0]
m2 = math.sqrt(max([0, m3**2 + Dm2 + dm2/2]))
m1 = math.sqrt(max([0, m3**2 + Dm2 - dm2/2]))
# with pars, nui, con, start calculation:
return [abs(a1*m1*np.exp(-1j*pars[1]) + a2*m2*np.exp(-1j*pars[2]) + a3*m3 )]
def loglikelihood(pars, n_dims, n_pars):
mval = observables(pars)
mval = mval[0]
loglikelihood = (-((mval-con[0])**2)/(2*(con[1]**2)))
return loglikelihood | 26.04878 | 80 | 0.627341 |
import prior_handler as phandle
import math
import numpy as np
import os
cwd = os.path.dirname(os.path.realpath(__file__))
print(cwd)
prior_handler = phandle.PriorHandler(cwd)
con = prior_handler.c
n_pars = prior_handler.n_pars
def prior(cube, n_dims, n_pars):
return prior_handler.scale(cube)
def observables(pars):
nui = prior_handler.get_nui(pars)
c13 = math.cos(pars[4])
a1 = abs(math.cos(pars[3]*c13))**2
a2 = abs(math.sin(pars[3]*c13))**2
a3 = abs(math.sin(pars[4]))**2
dm2 = pars[5]
Dm2 = pars[6]
m3 = pars[0]
m2 = math.sqrt(max([0, m3**2 + Dm2 + dm2/2]))
m1 = math.sqrt(max([0, m3**2 + Dm2 - dm2/2]))
return [abs(a1*m1*np.exp(-1j*pars[1]) + a2*m2*np.exp(-1j*pars[2]) + a3*m3 )]
def loglikelihood(pars, n_dims, n_pars):
mval = observables(pars)
mval = mval[0]
loglikelihood = (-((mval-con[0])**2)/(2*(con[1]**2)))
return loglikelihood | true | true |
f7297fc788219d05bf55769bec2813a65e0f1710 | 1,478 | py | Python | tests/test_documentation_eager.py | burgerkingeater/io | f2de208f474d6ba4926e2c7f9e901e102ca5c254 | [
"Apache-2.0"
] | 1 | 2021-05-10T10:44:08.000Z | 2021-05-10T10:44:08.000Z | tests/test_documentation_eager.py | burgerkingeater/io | f2de208f474d6ba4926e2c7f9e901e102ca5c254 | [
"Apache-2.0"
] | 1 | 2021-02-24T10:37:32.000Z | 2021-02-24T10:37:32.000Z | tests/test_documentation_eager.py | burgerkingeater/io | f2de208f474d6ba4926e2c7f9e901e102ca5c254 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for MNIST Dataset with tf.keras."""
import os
def extract_block(filename, lang):
"""extract_block"""
source = ""
with open(filename) as f:
hit = -1
for line in f:
if hit < 0:
if line.strip().startswith("```" + lang):
hit = line.find("```" + lang)
else:
if line.strip().startswith("```") and line.find("```") == hit:
break
source += line
return source
def test_readme():
"""test_readme"""
# Note: From README.md
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", "README.md"
)
source = extract_block(filename, "python")
exec(source, globals()) # pylint: disable=exec-used
| 32.844444 | 80 | 0.587957 |
import os
def extract_block(filename, lang):
source = ""
with open(filename) as f:
hit = -1
for line in f:
if hit < 0:
if line.strip().startswith("```" + lang):
hit = line.find("```" + lang)
else:
if line.strip().startswith("```") and line.find("```") == hit:
break
source += line
return source
def test_readme():
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", "README.md"
)
source = extract_block(filename, "python")
exec(source, globals())
| true | true |
f72980142790efdf80fe4386352f892f576b4b05 | 664 | py | Python | game/combat/effects/moveeffect/avghp.py | Sipondo/ulix-dexflow | de46482fe08e3d600dd5da581f0524b55e5df961 | [
"MIT"
] | 5 | 2021-06-25T16:44:38.000Z | 2021-12-31T01:29:00.000Z | game/combat/effects/moveeffect/avghp.py | Sipondo/ulix-dexflow | de46482fe08e3d600dd5da581f0524b55e5df961 | [
"MIT"
] | null | null | null | game/combat/effects/moveeffect/avghp.py | Sipondo/ulix-dexflow | de46482fe08e3d600dd5da581f0524b55e5df961 | [
"MIT"
] | 1 | 2021-06-25T20:33:47.000Z | 2021-06-25T20:33:47.000Z | from game.combat.effects.moveeffect.basemoveeffect import BaseMoveEffect
class Avghp(BaseMoveEffect):
def after_action(self):
user_hp = self.scene.board.get_data(self.move.user).current_hp
user_max_hp = self.scene.board.get_actor(self.move.user).stats[0]
target_hp = self.scene.board.get_data(self.move.target).current_hp
target_max_hp = self.scene.board.get_actor(self.move.target).stats[0]
average = (user_hp + target_hp) // 2
self.scene.board.set_hp(self.move.user, min(average, user_max_hp))
self.scene.board.set_hp(self.move.target, min(average, target_max_hp))
return True, False, False
| 44.266667 | 78 | 0.716867 | from game.combat.effects.moveeffect.basemoveeffect import BaseMoveEffect
class Avghp(BaseMoveEffect):
def after_action(self):
user_hp = self.scene.board.get_data(self.move.user).current_hp
user_max_hp = self.scene.board.get_actor(self.move.user).stats[0]
target_hp = self.scene.board.get_data(self.move.target).current_hp
target_max_hp = self.scene.board.get_actor(self.move.target).stats[0]
average = (user_hp + target_hp) // 2
self.scene.board.set_hp(self.move.user, min(average, user_max_hp))
self.scene.board.set_hp(self.move.target, min(average, target_max_hp))
return True, False, False
| true | true |
f7298048176e7ee84c55af3bf97f19b8217d90b0 | 494 | py | Python | ProjectEuler.Problem.039.py | jihunroh/ProjectEuler-Python | 2fceaf5c3dd61038004b6128c5d9ee7a76142bca | [
"MIT"
] | null | null | null | ProjectEuler.Problem.039.py | jihunroh/ProjectEuler-Python | 2fceaf5c3dd61038004b6128c5d9ee7a76142bca | [
"MIT"
] | null | null | null | ProjectEuler.Problem.039.py | jihunroh/ProjectEuler-Python | 2fceaf5c3dd61038004b6128c5d9ee7a76142bca | [
"MIT"
] | null | null | null | from ProjectEulerCommons.Base import *
def get_triangle_length_pairs(p):
return sum([True for a in range(1, p - 2) for b in range(a, p - a - 1) if p - a - b > b and a**2 + b**2 == (p - a - b)**2])
Answer(
max_index([(p, get_triangle_length_pairs(p)) for p in range(3, 1000 + 1)])[0]
)
"""
------------------------------------------------
ProjectEuler.Problem.039.py
The Answer is: 840
Time Elasped: 43.653260707855225sec
------------------------------------------------
"""
| 29.058824 | 127 | 0.506073 | from ProjectEulerCommons.Base import *
def get_triangle_length_pairs(p):
return sum([True for a in range(1, p - 2) for b in range(a, p - a - 1) if p - a - b > b and a**2 + b**2 == (p - a - b)**2])
Answer(
max_index([(p, get_triangle_length_pairs(p)) for p in range(3, 1000 + 1)])[0]
)
| true | true |
f72981074d6cef71e7e9323c24ea3c9a0020ffe1 | 2,773 | py | Python | neptune-python-utils/neptune_python_utils/glue_neptune_connection_info.py | Alfian878787/amazon-neptune-tools | a447da238e99612a290babc66878fe772727a19e | [
"Apache-2.0"
] | null | null | null | neptune-python-utils/neptune_python_utils/glue_neptune_connection_info.py | Alfian878787/amazon-neptune-tools | a447da238e99612a290babc66878fe772727a19e | [
"Apache-2.0"
] | null | null | null | neptune-python-utils/neptune_python_utils/glue_neptune_connection_info.py | Alfian878787/amazon-neptune-tools | a447da238e99612a290babc66878fe772727a19e | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Amazon.com, Inc. or its affiliates.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import sys, boto3, os, uuid
from urllib.parse import urlparse
from botocore.credentials import Credentials
from neptune_python_utils.endpoints import Endpoints
class GlueNeptuneConnectionInfo:
def __init__(self, region, role_arn):
self.region = region
self.role_arn = role_arn
def neptune_endpoints(self, connection_name):
"""Gets Neptune endpoint information from the Glue Data Catalog.
You may need to install a Glue VPC Endpoint in your VPC for this method to work.
You can store Neptune endpoint information as JDBC connections in the Glue Data Catalog.
JDBC connection strings must begin 'jdbc:'. To store a Neptune endpoint, use the following format:
'jdbc:<protocol>://<dns_name>:<port>/<endpoint>'
For example, if you store:
'jdbc:wss://my-neptune-cluster.us-east-1.neptune.amazonaws.com:8182/gremlin'
– this method will return:
'wss://my-neptune-cluster.us-east-1.neptune.amazonaws.com:8182/gremlin'
Example:
>>> gremlin_endpoint = GlueNeptuneConnectionInfo(glueContext).neptune_endpoint('neptune')
"""
glue = boto3.client('glue', region_name=self.region)
connection = glue.get_connection(Name=connection_name)
neptune_uri = connection['Connection']['ConnectionProperties']['JDBC_CONNECTION_URL'][5:]
parse_result = urlparse(neptune_uri)
netloc_parts = parse_result.netloc.split(':')
host = netloc_parts[0]
port = netloc_parts[1]
sts = boto3.client('sts', region_name=self.region)
role = sts.assume_role(
RoleArn=self.role_arn,
RoleSessionName=uuid.uuid4().hex,
DurationSeconds=3600
)
credentials = Credentials(
access_key=role['Credentials']['AccessKeyId'],
secret_key=role['Credentials']['SecretAccessKey'],
token=role['Credentials']['SessionToken'])
return Endpoints(neptune_endpoint=host, neptune_port=port, region_name=self.region, credentials=credentials)
| 39.614286 | 116 | 0.662099 |
import sys, boto3, os, uuid
from urllib.parse import urlparse
from botocore.credentials import Credentials
from neptune_python_utils.endpoints import Endpoints
class GlueNeptuneConnectionInfo:
def __init__(self, region, role_arn):
self.region = region
self.role_arn = role_arn
def neptune_endpoints(self, connection_name):
glue = boto3.client('glue', region_name=self.region)
connection = glue.get_connection(Name=connection_name)
neptune_uri = connection['Connection']['ConnectionProperties']['JDBC_CONNECTION_URL'][5:]
parse_result = urlparse(neptune_uri)
netloc_parts = parse_result.netloc.split(':')
host = netloc_parts[0]
port = netloc_parts[1]
sts = boto3.client('sts', region_name=self.region)
role = sts.assume_role(
RoleArn=self.role_arn,
RoleSessionName=uuid.uuid4().hex,
DurationSeconds=3600
)
credentials = Credentials(
access_key=role['Credentials']['AccessKeyId'],
secret_key=role['Credentials']['SecretAccessKey'],
token=role['Credentials']['SessionToken'])
return Endpoints(neptune_endpoint=host, neptune_port=port, region_name=self.region, credentials=credentials)
| true | true |
f729829b0184b217d4f1bd32504cb211255b7245 | 4,125 | py | Python | indexclient/parser.py | palkeo/indexd | f55ca6ab9f545d4df3e37b7d48a7bff907e6b27f | [
"BSD-3-Clause"
] | 7 | 2016-10-10T09:36:43.000Z | 2020-09-11T06:55:42.000Z | indexclient/parser.py | palkeo/indexd | f55ca6ab9f545d4df3e37b7d48a7bff907e6b27f | [
"BSD-3-Clause"
] | null | null | null | indexclient/parser.py | palkeo/indexd | f55ca6ab9f545d4df3e37b7d48a7bff907e6b27f | [
"BSD-3-Clause"
] | 6 | 2016-04-11T15:57:05.000Z | 2019-01-06T10:12:29.000Z | import nodes
from parsers import parse
"""
This file contains a function to parse a query.
It will have to return a rootnode.
"""
BINARY_OPERATOR, UNARY_OPERATOR, MODIFIER, TERM = range(4)
# Contains a tuple : The first element is the node, the second is the priority of the operator (the bigger it is, the more the operator have a big priority)
OPERATORS = {
'AND': (nodes.AndNode, 5, BINARY_OPERATOR),
'OR': (nodes.OrNode, 10, BINARY_OPERATOR),
'&&': (nodes.AndNode, 5, BINARY_OPERATOR),
'||': (nodes.OrNode, 10, BINARY_OPERATOR),
#' -': (nodes.NotNode, 20, UNARY_OPERATOR),
'NOT': (nodes.NotNode, 20, UNARY_OPERATOR),
}
MODIFIERS = {
'"': nodes.ExactNode,
'`': nodes.ApproxNode,
'[': nodes.ApproxNode,
']': nodes.ApproxNode,
}
def get_type(term):
if term in OPERATORS:
return OPERATORS[term][2]
elif term in MODIFIERS:
return MODIFIER
return TERM
def parse_query(query):
def append_operator(term):
assert not(lastType in (BINARY_OPERATOR, UNARY_OPERATOR) and get_type(term) == BINARY_OPERATOR)
if get_type(term) == UNARY_OPERATOR and lastType == TERM:
operators.append('AND')
while len(operators) > 0 and OPERATORS[term][1] < OPERATORS[operators[-1]][1]:
if get_type(operators[-1]) == UNARY_OPERATOR:
terms.append( OPERATORS[ operators.pop() ][0](terms.pop()) )
else:
assert get_type(operators[-1]) == BINARY_OPERATOR
terms.append( OPERATORS[ operators.pop() ][0] ( terms.pop(), terms.pop() ) )
operators.append(term)
for r in list(OPERATORS.keys()) + list(MODIFIERS.keys()) + ['(',')']:
query = query.replace(r, ' ' + r + ' ')
query = query.split(' ')
terms = []
operators = []
lastType = BINARY_OPERATOR
parenthesis_level = 0
parenthesis_start = -1
modifier = None
modifier_terms = []
for pos, term in enumerate(query):
if not term:
continue
# Parenthesis
if term == '(':
parenthesis_level += 1
if parenthesis_level == 1:
parenthesis_start = pos + 1
elif term == ')':
parenthesis_level -= 1
if parenthesis_level == 0:
if lastType == TERM:
append_operator('AND')
terms.append( parse_query(' '.join(query[parenthesis_start:pos])) )
lastType = TERM
continue
if parenthesis_level > 0:
continue
# Modifier
if get_type(term) == MODIFIER:
if modifier is None:
modifier = MODIFIERS[term]
else:
assert MODIFIERS[term] == modifier
if lastType == TERM:
append_operator('AND')
terms.append(modifier(modifier_terms))
lastType = TERM
modifier = None
modifier_terms = []
continue
if modifier is not None:
term_list = parse(term)
modifier_terms.extend(nodes.KwNode(i) for i in term_list)
continue
# Operator or terms
if get_type(term) in (BINARY_OPERATOR, UNARY_OPERATOR):
append_operator(term)
else:
term_list = tuple(parse(term))
if len(term_list) == 0:
continue
elif len(term_list) == 1:
terms.append(nodes.KwNode(term_list[0]))
else:
terms.append(nodes.ExactNode([nodes.KwNode(i) for i in term_list]))
if lastType == TERM:
append_operator('AND')
lastType = get_type(term)
assert len(terms) > 0
while len(terms) > 1:
if get_type(operators[-1]) == UNARY_OPERATOR:
terms.append( OPERATORS[ operators.pop() ][0](terms.pop()) )
else:
assert get_type(operators[-1]) == BINARY_OPERATOR
terms.append( OPERATORS[ operators.pop() ][0] ( terms.pop(), terms.pop() ) )
return terms[0]
| 30.330882 | 156 | 0.557091 | import nodes
from parsers import parse
BINARY_OPERATOR, UNARY_OPERATOR, MODIFIER, TERM = range(4)
OPERATORS = {
'AND': (nodes.AndNode, 5, BINARY_OPERATOR),
'OR': (nodes.OrNode, 10, BINARY_OPERATOR),
'&&': (nodes.AndNode, 5, BINARY_OPERATOR),
'||': (nodes.OrNode, 10, BINARY_OPERATOR),
'NOT': (nodes.NotNode, 20, UNARY_OPERATOR),
}
MODIFIERS = {
'"': nodes.ExactNode,
'`': nodes.ApproxNode,
'[': nodes.ApproxNode,
']': nodes.ApproxNode,
}
def get_type(term):
if term in OPERATORS:
return OPERATORS[term][2]
elif term in MODIFIERS:
return MODIFIER
return TERM
def parse_query(query):
def append_operator(term):
assert not(lastType in (BINARY_OPERATOR, UNARY_OPERATOR) and get_type(term) == BINARY_OPERATOR)
if get_type(term) == UNARY_OPERATOR and lastType == TERM:
operators.append('AND')
while len(operators) > 0 and OPERATORS[term][1] < OPERATORS[operators[-1]][1]:
if get_type(operators[-1]) == UNARY_OPERATOR:
terms.append( OPERATORS[ operators.pop() ][0](terms.pop()) )
else:
assert get_type(operators[-1]) == BINARY_OPERATOR
terms.append( OPERATORS[ operators.pop() ][0] ( terms.pop(), terms.pop() ) )
operators.append(term)
for r in list(OPERATORS.keys()) + list(MODIFIERS.keys()) + ['(',')']:
query = query.replace(r, ' ' + r + ' ')
query = query.split(' ')
terms = []
operators = []
lastType = BINARY_OPERATOR
parenthesis_level = 0
parenthesis_start = -1
modifier = None
modifier_terms = []
for pos, term in enumerate(query):
if not term:
continue
# Parenthesis
if term == '(':
parenthesis_level += 1
if parenthesis_level == 1:
parenthesis_start = pos + 1
elif term == ')':
parenthesis_level -= 1
if parenthesis_level == 0:
if lastType == TERM:
append_operator('AND')
terms.append( parse_query(' '.join(query[parenthesis_start:pos])) )
lastType = TERM
continue
if parenthesis_level > 0:
continue
# Modifier
if get_type(term) == MODIFIER:
if modifier is None:
modifier = MODIFIERS[term]
else:
assert MODIFIERS[term] == modifier
if lastType == TERM:
append_operator('AND')
terms.append(modifier(modifier_terms))
lastType = TERM
modifier = None
modifier_terms = []
continue
if modifier is not None:
term_list = parse(term)
modifier_terms.extend(nodes.KwNode(i) for i in term_list)
continue
# Operator or terms
if get_type(term) in (BINARY_OPERATOR, UNARY_OPERATOR):
append_operator(term)
else:
term_list = tuple(parse(term))
if len(term_list) == 0:
continue
elif len(term_list) == 1:
terms.append(nodes.KwNode(term_list[0]))
else:
terms.append(nodes.ExactNode([nodes.KwNode(i) for i in term_list]))
if lastType == TERM:
append_operator('AND')
lastType = get_type(term)
assert len(terms) > 0
while len(terms) > 1:
if get_type(operators[-1]) == UNARY_OPERATOR:
terms.append( OPERATORS[ operators.pop() ][0](terms.pop()) )
else:
assert get_type(operators[-1]) == BINARY_OPERATOR
terms.append( OPERATORS[ operators.pop() ][0] ( terms.pop(), terms.pop() ) )
return terms[0]
| true | true |
f72982eaa140f4ef236992892c40e97c081380db | 9,193 | py | Python | src/brouwers/forum_tools/models.py | modelbrouwers/modelbrouwers | e0ba4819bf726d6144c0a648fdd4731cdc098a52 | [
"MIT"
] | 6 | 2015-03-03T13:23:07.000Z | 2021-12-19T18:12:41.000Z | src/brouwers/forum_tools/models.py | modelbrouwers/modelbrouwers | e0ba4819bf726d6144c0a648fdd4731cdc098a52 | [
"MIT"
] | 95 | 2015-02-07T00:55:39.000Z | 2022-02-08T20:22:05.000Z | src/brouwers/forum_tools/models.py | modelbrouwers/modelbrouwers | e0ba4819bf726d6144c0a648fdd4731cdc098a52 | [
"MIT"
] | 2 | 2016-03-22T16:53:26.000Z | 2019-02-09T22:46:04.000Z | import zlib
from datetime import datetime
from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.http import urlencode
from django.utils.timesince import timesince
from django.utils.translation import ugettext_lazy as _
from dateutil.relativedelta import relativedelta
from brouwers.general.utils import clean_username
from .fields import ForumToolsIDField
class ForumLinkBase(models.Model):
link_id = models.CharField(
_("link id"), max_length=128, help_text=_("HTML id of the base anchor.")
)
short_description = models.CharField(
_("short description"), max_length=64, blank=True
)
enabled = models.BooleanField(
_("enabled"), default=True, help_text=_("Enable the syncing of this link.")
)
from_date = models.DateField(
_("from date"), help_text=_("Start date from when this link is enabled.")
)
to_date = models.DateField(
_("to date"),
help_text=_("End date from when this link is enabled, this date included."),
)
class Meta:
verbose_name = _("base forum link")
verbose_name_plural = _("base forum links")
def __str__(self):
if self.short_description:
return _("base forum link: %(desc)s") % {"desc": self.short_description}
else:
return _("base forum link: %(id)s") % {"id": self.link_id}
class ForumLinkSynced(models.Model):
base = models.ForeignKey(
ForumLinkBase,
verbose_name=_("base link"),
help_text=_("Link this link syncs with."),
on_delete=models.CASCADE,
)
link_id = models.CharField(
_("link id"), max_length=128, help_text=_("HTML id of the anchor to be synced.")
)
class Meta:
verbose_name = _("synced forum link")
verbose_name_plural = _("synced forum links")
def __str__(self):
return u"%s -- %s" % (self.base, self.link_id)
class BuildReportsForum(models.Model):
"""Model which tells us which forums hold build reports"""
forum = ForumToolsIDField(_("forum"), type="forum")
class Meta:
verbose_name = _(u"build report forum")
verbose_name_plural = _(u"build report forums")
ordering = ["forum"]
def __str__(self):
return self.forum.forum_name if self.forum else _("(forum does not exist)")
class ForumCategory(models.Model):
name = models.CharField(_("name"), max_length=255)
forum = ForumToolsIDField(_("forum"), type="forum", blank=True, null=True)
icon_class = models.CharField(_("icon class"), max_length=50, blank=True)
class Meta:
verbose_name = _(u"forum category")
verbose_name_plural = _(u"forum categories")
ordering = ("name",)
def __str__(self):
return self.name
# Models to interact with the MYSQL database #############################
class ForumUser(models.Model):
"""MySQL phpBB3 user, managed by phpBB3"""
# mediumint(8) unsigned
user_id = models.AutoField(primary_key=True, help_text=_("Primary key"))
username = models.CharField(_("username"), max_length=255)
username_clean = models.CharField(_("username"), max_length=255)
user_posts = models.IntegerField()
user_email = models.CharField(_("email"), max_length=100)
# bigint(20)
user_email_hash = models.BigIntegerField(
db_column="user_email_hash",
default=0,
help_text=_("A hash of the user's email address."),
)
user_permissions = models.TextField(blank=True)
user_sig = models.TextField(blank=True)
user_interests = models.TextField(blank=True)
user_actkey = models.TextField(blank=True)
user_occ = models.TextField(blank=True)
class Meta:
managed = False
verbose_name = _("forum user")
verbose_name_plural = _("forum users")
ordering = ("username",)
db_table = u"%susers" % settings.PHPBB_TABLE_PREFIX
def __str__(self):
return self.username
def get_absolute_url(self):
qs = {
"mode": "viewprofile",
"u": self.user_id,
}
return "{0}?{1}".format(reverse("phpBB:memberlist"), urlencode(qs))
def get_email_hash(self):
email = self.user_email
h = zlib.crc32(email.lower().encode("ascii")) & 0xFFFFFFFF
return "%s%s" % (h, len(email))
def save(self, *args, **kwargs):
self.user_email_hash = self.get_email_hash()
if not self.username_clean:
self._clean_username()
super().save(*args, **kwargs)
def _clean_username(self):
self.username_clean = clean_username(self.username)
class Forum(models.Model):
"""
MySQL Forum, managed by phpBB3
"""
forum_id = models.AutoField(primary_key=True)
forum_name = models.CharField(max_length=60)
forum_topics = models.IntegerField(default=0)
forum_posts = models.IntegerField(default=0)
forum_desc = models.TextField()
parent = models.ForeignKey(
"self",
related_name="child",
null=True,
blank=True,
default=None,
on_delete=models.CASCADE,
)
def __str__(self):
return self.forum_name
def get_absolute_url(self):
qs = {"f": self.forum_id}
return "{0}?{1}".format(reverse("phpBB:viewforum"), urlencode(qs))
class Meta:
managed = False
db_table = settings.PHPBB_TABLE_PREFIX + "forums"
ordering = ["forum_name"]
class Topic(models.Model):
topic_id = models.AutoField(primary_key=True)
forum = models.ForeignKey(Forum, on_delete=models.CASCADE)
topic_title = models.CharField(max_length=255)
last_post_time = models.BigIntegerField(db_column="topic_last_post_time", default=0)
create_time = models.BigIntegerField(db_column="topic_time", default=0)
author = models.ForeignKey(
ForumUser,
db_column="topic_poster",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
class Meta:
managed = False
db_table = settings.PHPBB_TABLE_PREFIX + "topics"
ordering = ["topic_id"]
def __str__(self):
return self.topic_title
def get_absolute_url(self):
qs = {"t": self.topic_id}
if self.forum.pk:
qs["f"] = self.forum.pk
return "{0}?{1}".format(reverse("phpBB:viewtopic"), urlencode(qs))
@property
def created(self):
return datetime.utcfromtimestamp(self.create_time).replace(tzinfo=timezone.utc)
def get_last_post_time(self):
return datetime.utcfromtimestamp(self.last_post_time).replace(
tzinfo=timezone.utc
)
@property
def is_dead(self):
"""
If the last post is older than settings.TOPIC_DEAD_TIME, it's considered
dead.
"""
last = self.get_last_post_time()
lower = timezone.now() - relativedelta(months=settings.TOPIC_DEAD_TIME)
return last <= lower
@property
def age(self):
return timesince(self.get_last_post_time())
@property
def text_dead(self):
return _(
"This topic has been inactive for: {0}. Please consider "
"sending the author a private message instead of replying "
"and thus bumping the topic."
).format(self.age)
class ForumPostCountRestriction(models.Model):
"""Model to hold information on the minimum post-count and level of posting rights.
Managed by Django."""
POSTING_LEVELS = (
("T", _("Topic")),
("R", _("Reply")),
)
forum = ForumToolsIDField(_("forum id"), type="forum", blank=True, null=True)
min_posts = models.PositiveSmallIntegerField(_("minimum number of posts"))
posting_level = models.CharField(
_("posting level"), max_length=1, choices=POSTING_LEVELS
)
class Meta:
verbose_name = _("forum post count restriction")
verbose_name_plural = _("forum post count restrictions")
ordering = ["forum"]
def __str__(self):
return _("Restriction for %(forum)s") % {"forum": self.forum.forum_name}
class Report(models.Model):
"""MySQL Report model, managed by phpBB3"""
report_id = models.AutoField(primary_key=True, help_text="Primary key")
# reason_id = FK to reasons, not implement in Django yet
report_closed = models.BooleanField(
_("closed"),
default=False,
help_text=_("Closed reports need no more attention."),
)
report_time_int = models.IntegerField(
_("time"),
db_column="report_time",
help_text=_("UNIX time when the report was added."),
)
report_text = models.TextField("text", blank=True)
class Meta:
managed = False
verbose_name = _("report")
verbose_name_plural = _("reports")
db_table = u"%sreports" % settings.PHPBB_TABLE_PREFIX
permissions = (("can_see_reports", _("Can see (number of) open reports")),)
def __str__(self):
return _("Report %(id)s" % {"id": self.report_id})
def report_time(self):
return datetime.fromtimestamp(self.report_time_int)
| 30.952862 | 88 | 0.643207 | import zlib
from datetime import datetime
from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.http import urlencode
from django.utils.timesince import timesince
from django.utils.translation import ugettext_lazy as _
from dateutil.relativedelta import relativedelta
from brouwers.general.utils import clean_username
from .fields import ForumToolsIDField
class ForumLinkBase(models.Model):
link_id = models.CharField(
_("link id"), max_length=128, help_text=_("HTML id of the base anchor.")
)
short_description = models.CharField(
_("short description"), max_length=64, blank=True
)
enabled = models.BooleanField(
_("enabled"), default=True, help_text=_("Enable the syncing of this link.")
)
from_date = models.DateField(
_("from date"), help_text=_("Start date from when this link is enabled.")
)
to_date = models.DateField(
_("to date"),
help_text=_("End date from when this link is enabled, this date included."),
)
class Meta:
verbose_name = _("base forum link")
verbose_name_plural = _("base forum links")
def __str__(self):
if self.short_description:
return _("base forum link: %(desc)s") % {"desc": self.short_description}
else:
return _("base forum link: %(id)s") % {"id": self.link_id}
class ForumLinkSynced(models.Model):
base = models.ForeignKey(
ForumLinkBase,
verbose_name=_("base link"),
help_text=_("Link this link syncs with."),
on_delete=models.CASCADE,
)
link_id = models.CharField(
_("link id"), max_length=128, help_text=_("HTML id of the anchor to be synced.")
)
class Meta:
verbose_name = _("synced forum link")
verbose_name_plural = _("synced forum links")
def __str__(self):
return u"%s -- %s" % (self.base, self.link_id)
class BuildReportsForum(models.Model):
forum = ForumToolsIDField(_("forum"), type="forum")
class Meta:
verbose_name = _(u"build report forum")
verbose_name_plural = _(u"build report forums")
ordering = ["forum"]
def __str__(self):
return self.forum.forum_name if self.forum else _("(forum does not exist)")
class ForumCategory(models.Model):
name = models.CharField(_("name"), max_length=255)
forum = ForumToolsIDField(_("forum"), type="forum", blank=True, null=True)
icon_class = models.CharField(_("icon class"), max_length=50, blank=True)
class Meta:
verbose_name = _(u"forum category")
verbose_name_plural = _(u"forum categories")
ordering = ("name",)
def __str__(self):
return self.name
",
default=0,
help_text=_("A hash of the user's email address."),
)
user_permissions = models.TextField(blank=True)
user_sig = models.TextField(blank=True)
user_interests = models.TextField(blank=True)
user_actkey = models.TextField(blank=True)
user_occ = models.TextField(blank=True)
class Meta:
managed = False
verbose_name = _("forum user")
verbose_name_plural = _("forum users")
ordering = ("username",)
db_table = u"%susers" % settings.PHPBB_TABLE_PREFIX
def __str__(self):
return self.username
def get_absolute_url(self):
qs = {
"mode": "viewprofile",
"u": self.user_id,
}
return "{0}?{1}".format(reverse("phpBB:memberlist"), urlencode(qs))
def get_email_hash(self):
email = self.user_email
h = zlib.crc32(email.lower().encode("ascii")) & 0xFFFFFFFF
return "%s%s" % (h, len(email))
def save(self, *args, **kwargs):
self.user_email_hash = self.get_email_hash()
if not self.username_clean:
self._clean_username()
super().save(*args, **kwargs)
def _clean_username(self):
self.username_clean = clean_username(self.username)
class Forum(models.Model):
forum_id = models.AutoField(primary_key=True)
forum_name = models.CharField(max_length=60)
forum_topics = models.IntegerField(default=0)
forum_posts = models.IntegerField(default=0)
forum_desc = models.TextField()
parent = models.ForeignKey(
"self",
related_name="child",
null=True,
blank=True,
default=None,
on_delete=models.CASCADE,
)
def __str__(self):
return self.forum_name
def get_absolute_url(self):
qs = {"f": self.forum_id}
return "{0}?{1}".format(reverse("phpBB:viewforum"), urlencode(qs))
class Meta:
managed = False
db_table = settings.PHPBB_TABLE_PREFIX + "forums"
ordering = ["forum_name"]
class Topic(models.Model):
topic_id = models.AutoField(primary_key=True)
forum = models.ForeignKey(Forum, on_delete=models.CASCADE)
topic_title = models.CharField(max_length=255)
last_post_time = models.BigIntegerField(db_column="topic_last_post_time", default=0)
create_time = models.BigIntegerField(db_column="topic_time", default=0)
author = models.ForeignKey(
ForumUser,
db_column="topic_poster",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
class Meta:
managed = False
db_table = settings.PHPBB_TABLE_PREFIX + "topics"
ordering = ["topic_id"]
def __str__(self):
return self.topic_title
def get_absolute_url(self):
qs = {"t": self.topic_id}
if self.forum.pk:
qs["f"] = self.forum.pk
return "{0}?{1}".format(reverse("phpBB:viewtopic"), urlencode(qs))
@property
def created(self):
return datetime.utcfromtimestamp(self.create_time).replace(tzinfo=timezone.utc)
def get_last_post_time(self):
return datetime.utcfromtimestamp(self.last_post_time).replace(
tzinfo=timezone.utc
)
@property
def is_dead(self):
last = self.get_last_post_time()
lower = timezone.now() - relativedelta(months=settings.TOPIC_DEAD_TIME)
return last <= lower
@property
def age(self):
return timesince(self.get_last_post_time())
@property
def text_dead(self):
return _(
"This topic has been inactive for: {0}. Please consider "
"sending the author a private message instead of replying "
"and thus bumping the topic."
).format(self.age)
class ForumPostCountRestriction(models.Model):
POSTING_LEVELS = (
("T", _("Topic")),
("R", _("Reply")),
)
forum = ForumToolsIDField(_("forum id"), type="forum", blank=True, null=True)
min_posts = models.PositiveSmallIntegerField(_("minimum number of posts"))
posting_level = models.CharField(
_("posting level"), max_length=1, choices=POSTING_LEVELS
)
class Meta:
verbose_name = _("forum post count restriction")
verbose_name_plural = _("forum post count restrictions")
ordering = ["forum"]
def __str__(self):
return _("Restriction for %(forum)s") % {"forum": self.forum.forum_name}
class Report(models.Model):
report_id = models.AutoField(primary_key=True, help_text="Primary key")
# reason_id = FK to reasons, not implement in Django yet
report_closed = models.BooleanField(
_("closed"),
default=False,
help_text=_("Closed reports need no more attention."),
)
report_time_int = models.IntegerField(
_("time"),
db_column="report_time",
help_text=_("UNIX time when the report was added."),
)
report_text = models.TextField("text", blank=True)
class Meta:
managed = False
verbose_name = _("report")
verbose_name_plural = _("reports")
db_table = u"%sreports" % settings.PHPBB_TABLE_PREFIX
permissions = (("can_see_reports", _("Can see (number of) open reports")),)
def __str__(self):
return _("Report %(id)s" % {"id": self.report_id})
def report_time(self):
return datetime.fromtimestamp(self.report_time_int)
| true | true |
f72984a9a6003802c913b46bb52ecb628180101d | 915 | py | Python | day_09/part1.py | pawlodkowski/advent_of_code_2020 | ca41416c340747d7e37eeab60046b770c240338b | [
"MIT"
] | 2 | 2020-12-02T09:14:14.000Z | 2020-12-02T22:14:21.000Z | day_09/part1.py | pawlodkowski/advent_of_code_2020 | ca41416c340747d7e37eeab60046b770c240338b | [
"MIT"
] | null | null | null | day_09/part1.py | pawlodkowski/advent_of_code_2020 | ca41416c340747d7e37eeab60046b770c240338b | [
"MIT"
] | null | null | null | """
Part 1 of https://adventofcode.com/2020/day/9
"""
def read_data(filename: str) -> list:
with open(filename, "r") as f:
data = f.read().split("\n")
return data
def sum_to_n(n, options):
"""
Helper function adapted from Day 1 :)
"""
try:
for num in options:
complement = n - num
if complement in options:
first = num
second = complement
break
return first, second
except UnboundLocalError:
return False
if __name__ == "__main__":
data = [int(i) for i in read_data("input.txt")]
for i, num in enumerate(data):
prev25 = data[i - 25 : i]
if prev25:
if not sum_to_n(num, prev25):
print(
f"Solution: The first number that is not the sum of any two of the 25 numbers before it is {num}."
)
| 22.875 | 118 | 0.525683 |
def read_data(filename: str) -> list:
with open(filename, "r") as f:
data = f.read().split("\n")
return data
def sum_to_n(n, options):
try:
for num in options:
complement = n - num
if complement in options:
first = num
second = complement
break
return first, second
except UnboundLocalError:
return False
if __name__ == "__main__":
data = [int(i) for i in read_data("input.txt")]
for i, num in enumerate(data):
prev25 = data[i - 25 : i]
if prev25:
if not sum_to_n(num, prev25):
print(
f"Solution: The first number that is not the sum of any two of the 25 numbers before it is {num}."
)
| true | true |
f72985e3c3835b49230c4dfe3c6d5af8845591fc | 8,470 | py | Python | tests/test_txpool.py | rsoliha/LabChain | 4694913f52b3d0567f9a289dfc0b7bec30eccfb9 | [
"Apache-2.0"
] | null | null | null | tests/test_txpool.py | rsoliha/LabChain | 4694913f52b3d0567f9a289dfc0b7bec30eccfb9 | [
"Apache-2.0"
] | null | null | null | tests/test_txpool.py | rsoliha/LabChain | 4694913f52b3d0567f9a289dfc0b7bec30eccfb9 | [
"Apache-2.0"
] | null | null | null | import unittest
import os
from labchain.datastructure.txpool import TxPool
from labchain.datastructure.transaction import Transaction
from labchain.util.cryptoHelper import CryptoHelper
from labchain.datastructure.blockchain import BlockChain
from labchain.util.configReader import ConfigReader
from labchain.consensus.consensus import Consensus
class TxPoolTestCase(unittest.TestCase):
"""Class of testcases for the TxPool module"""
def init_blockchain(self):
test_resources_dic_path = os.path.abspath(os.path.join(os.path.dirname(__file__), './resources'))
test_node_config_file = test_resources_dic_path + '/node_configuration.ini'
config_reader = ConfigReader(test_node_config_file)
tolerance = config_reader.get_config(
section='BLOCK_CHAIN',
option='TOLERANCE_LEVEL')
pruning = config_reader.get_config(
section='BLOCK_CHAIN',
option='TIME_TO_PRUNE')
min_blocks = config_reader.get_config(
section='MINING',
option='NUM_OF_BLOCKS_FOR_DIFFICULTY')
consensus = Consensus()
self.block_list = []
self.blockchain_obj = BlockChain(node_id="nodeId1", tolerance_value=tolerance,
pruning_interval=pruning,
consensus_obj=consensus,
txpool_obj=self._txPoolObj,
crypto_helper_obj=self.crypto_helper_obj,
min_blocks_for_difficulty=min_blocks,
db=None,
q=None)
def setUp(self):
self.crypto_helper_obj = CryptoHelper.instance()
self.private_key1, self.public_key1 = self.crypto_helper_obj.generate_key_pair()
self.private_key2, self.public_key2 = self.crypto_helper_obj.generate_key_pair()
self._txPoolObj = TxPool(self.crypto_helper_obj)
self.init_blockchain()
t1 = Transaction(self.public_key1, self.public_key2, "a")
t1.sign_transaction(self.crypto_helper_obj, self.private_key1)
self._txPoolObj.add_transaction_if_not_exist(t1, self.blockchain_obj)
t2 = Transaction(self.public_key1, self.public_key2, "b")
t2.sign_transaction(self.crypto_helper_obj, self.private_key1)
self._txPoolObj.add_transaction_if_not_exist(t2, self.blockchain_obj)
t3 = Transaction(self.public_key1, self.public_key2, "c")
t3.sign_transaction(self.crypto_helper_obj, self.private_key1)
self._txPoolObj.add_transaction_if_not_exist(t3, self.blockchain_obj)
def tearDown(self):
self._txPoolObj._first_time = True
def test_add_transaction(self):
"""Test for add transaction, get transaction count and
get transaction"""
transaction = Transaction(self.public_key1, self.public_key2, "d")
transaction.sign_transaction(self.crypto_helper_obj, self.private_key1)
txpool_size = self._txPoolObj.get_transaction_count()
status = self._txPoolObj.add_transaction_if_not_exist(transaction, self.blockchain_obj)
self.assertEqual(status, True)
self.assertEqual(txpool_size + 1, self._txPoolObj.get_transaction_count())
self.assertEqual(transaction.get_json(), self._txPoolObj.get_transaction().get_json())
def test_get_transactions(self):
"""Test to get a set of transactions"""
tx_pool_count = self._txPoolObj.get_transaction_count()
t1 = Transaction(self.public_key1, self.public_key2, "e")
t1.sign_transaction(self.crypto_helper_obj, self.private_key1)
t2 = Transaction(self.public_key2, self.public_key1, "f")
t2.sign_transaction(self.crypto_helper_obj, self.private_key2)
self._txPoolObj.add_transaction_if_not_exist(t1, self.blockchain_obj)
self._txPoolObj.add_transaction_if_not_exist(t2, self.blockchain_obj)
self.assertEqual(3, tx_pool_count)
self.assertEqual(5, self._txPoolObj.get_transaction_count())
transactions = self._txPoolObj.get_transactions(3)
self.assertEqual(len(transactions), 3)
self.assertEqual(2, self._txPoolObj.get_transaction_count())
def test_remove_transaction(self):
"""Test remove transaction"""
transaction = Transaction(self.public_key1, self.public_key2, "g")
transaction.sign_transaction(self.crypto_helper_obj, self.private_key1)
self._txPoolObj.add_transaction_if_not_exist(transaction, self.blockchain_obj)
tx_pool_count = self._txPoolObj.get_transaction_count()
transactions = self._txPoolObj.get_transactions(tx_pool_count)
self._txPoolObj.return_transactions_to_pool(transactions, self.blockchain_obj)
self.assertTrue(transaction in transactions)
status = self._txPoolObj.remove_transaction(transaction)
self.assertEqual(status, True)
tx_pool_count = self._txPoolObj.get_transaction_count()
transactions = self._txPoolObj.get_transactions(tx_pool_count)
self.assertFalse(transaction in transactions)
def test_return_transactions_to_pool(self):
"""Test for return transactions to pool"""
t1 = Transaction(self.public_key1, self.public_key2, "h")
t1.sign_transaction(self.crypto_helper_obj, self.private_key1)
t2 = Transaction(self.public_key1, self.public_key2, "i")
t2.sign_transaction(self.crypto_helper_obj, self.private_key1)
t3 = Transaction(self.public_key1, self.public_key2, "j")
t3.sign_transaction(self.crypto_helper_obj, self.private_key1)
transactions = [t1, t2, t3]
tx_pool_count = self._txPoolObj.get_transaction_count()
status = self._txPoolObj.return_transactions_to_pool(transactions, self.blockchain_obj)
self.assertEqual(status, True)
transactions_new = self._txPoolObj.get_transactions(tx_pool_count + 3)
status = any(transaction in transactions for transaction in transactions_new)
self.assertEqual(status, True)
def test_singleton(self):
"""Test the single behaviour of the class"""
transaction = Transaction(self.public_key1, self.public_key2, "s")
transaction.sign_transaction(self.crypto_helper_obj, self.private_key1)
self._txPoolObj.add_transaction_if_not_exist(transaction, self.blockchain_obj)
tx_pool_count = self._txPoolObj.get_transaction_count()
txpool = TxPool(self.crypto_helper_obj)
self.assertEqual(txpool, self._txPoolObj)
self.assertEqual(txpool.get_transaction_count(), tx_pool_count)
def test_get_transaction_count(self):
"""Test the transaction count"""
transaction = Transaction(self.public_key1, self.public_key2, "g")
transaction.sign_transaction(self.crypto_helper_obj, self.private_key1)
status = self._txPoolObj.add_transaction_if_not_exist(transaction, self.blockchain_obj)
self.assertTrue(status)
self.assertEqual(4, self._txPoolObj.get_transaction_count())
def test_add_transaction_if_not_exist(self):
"""Test adding transaction in txpool only when it is empty"""
transaction = Transaction(self.public_key1, self.public_key2, "h")
transaction.sign_transaction(self.crypto_helper_obj, self.private_key1)
status = self._txPoolObj.add_transaction_if_not_exist(transaction, self.blockchain_obj)
self.assertTrue(status)
def test_get_transaction_by_hash(self):
"""Test getting transaction from txpool by hash"""
tx_pool_count = self._txPoolObj.get_transaction_count()
t1 = Transaction(self.public_key1, self.public_key2, "e")
t1.sign_transaction(self.crypto_helper_obj, self.private_key1)
t2 = Transaction(self.public_key1, self.public_key2, "f")
t2.sign_transaction(self.crypto_helper_obj, self.private_key1)
self._txPoolObj.add_transaction_if_not_exist(t1, self.blockchain_obj)
self._txPoolObj.add_transaction_if_not_exist(t2, self.blockchain_obj)
self.assertEqual(tx_pool_count, 3)
tx_pool_count = self._txPoolObj.get_transaction_count()
self.assertEqual(tx_pool_count, 5)
hash_val = t1.transaction_hash
transaction = self._txPoolObj.get_transaction_by_hash(hash_val)[0]
self.assertEqual(t1, transaction)
if __name__ == '__main__':
unittest.main()
| 52.608696 | 105 | 0.710862 | import unittest
import os
from labchain.datastructure.txpool import TxPool
from labchain.datastructure.transaction import Transaction
from labchain.util.cryptoHelper import CryptoHelper
from labchain.datastructure.blockchain import BlockChain
from labchain.util.configReader import ConfigReader
from labchain.consensus.consensus import Consensus
class TxPoolTestCase(unittest.TestCase):
def init_blockchain(self):
test_resources_dic_path = os.path.abspath(os.path.join(os.path.dirname(__file__), './resources'))
test_node_config_file = test_resources_dic_path + '/node_configuration.ini'
config_reader = ConfigReader(test_node_config_file)
tolerance = config_reader.get_config(
section='BLOCK_CHAIN',
option='TOLERANCE_LEVEL')
pruning = config_reader.get_config(
section='BLOCK_CHAIN',
option='TIME_TO_PRUNE')
min_blocks = config_reader.get_config(
section='MINING',
option='NUM_OF_BLOCKS_FOR_DIFFICULTY')
consensus = Consensus()
self.block_list = []
self.blockchain_obj = BlockChain(node_id="nodeId1", tolerance_value=tolerance,
pruning_interval=pruning,
consensus_obj=consensus,
txpool_obj=self._txPoolObj,
crypto_helper_obj=self.crypto_helper_obj,
min_blocks_for_difficulty=min_blocks,
db=None,
q=None)
def setUp(self):
self.crypto_helper_obj = CryptoHelper.instance()
self.private_key1, self.public_key1 = self.crypto_helper_obj.generate_key_pair()
self.private_key2, self.public_key2 = self.crypto_helper_obj.generate_key_pair()
self._txPoolObj = TxPool(self.crypto_helper_obj)
self.init_blockchain()
t1 = Transaction(self.public_key1, self.public_key2, "a")
t1.sign_transaction(self.crypto_helper_obj, self.private_key1)
self._txPoolObj.add_transaction_if_not_exist(t1, self.blockchain_obj)
t2 = Transaction(self.public_key1, self.public_key2, "b")
t2.sign_transaction(self.crypto_helper_obj, self.private_key1)
self._txPoolObj.add_transaction_if_not_exist(t2, self.blockchain_obj)
t3 = Transaction(self.public_key1, self.public_key2, "c")
t3.sign_transaction(self.crypto_helper_obj, self.private_key1)
self._txPoolObj.add_transaction_if_not_exist(t3, self.blockchain_obj)
def tearDown(self):
self._txPoolObj._first_time = True
def test_add_transaction(self):
transaction = Transaction(self.public_key1, self.public_key2, "d")
transaction.sign_transaction(self.crypto_helper_obj, self.private_key1)
txpool_size = self._txPoolObj.get_transaction_count()
status = self._txPoolObj.add_transaction_if_not_exist(transaction, self.blockchain_obj)
self.assertEqual(status, True)
self.assertEqual(txpool_size + 1, self._txPoolObj.get_transaction_count())
self.assertEqual(transaction.get_json(), self._txPoolObj.get_transaction().get_json())
def test_get_transactions(self):
tx_pool_count = self._txPoolObj.get_transaction_count()
t1 = Transaction(self.public_key1, self.public_key2, "e")
t1.sign_transaction(self.crypto_helper_obj, self.private_key1)
t2 = Transaction(self.public_key2, self.public_key1, "f")
t2.sign_transaction(self.crypto_helper_obj, self.private_key2)
self._txPoolObj.add_transaction_if_not_exist(t1, self.blockchain_obj)
self._txPoolObj.add_transaction_if_not_exist(t2, self.blockchain_obj)
self.assertEqual(3, tx_pool_count)
self.assertEqual(5, self._txPoolObj.get_transaction_count())
transactions = self._txPoolObj.get_transactions(3)
self.assertEqual(len(transactions), 3)
self.assertEqual(2, self._txPoolObj.get_transaction_count())
def test_remove_transaction(self):
transaction = Transaction(self.public_key1, self.public_key2, "g")
transaction.sign_transaction(self.crypto_helper_obj, self.private_key1)
self._txPoolObj.add_transaction_if_not_exist(transaction, self.blockchain_obj)
tx_pool_count = self._txPoolObj.get_transaction_count()
transactions = self._txPoolObj.get_transactions(tx_pool_count)
self._txPoolObj.return_transactions_to_pool(transactions, self.blockchain_obj)
self.assertTrue(transaction in transactions)
status = self._txPoolObj.remove_transaction(transaction)
self.assertEqual(status, True)
tx_pool_count = self._txPoolObj.get_transaction_count()
transactions = self._txPoolObj.get_transactions(tx_pool_count)
self.assertFalse(transaction in transactions)
def test_return_transactions_to_pool(self):
t1 = Transaction(self.public_key1, self.public_key2, "h")
t1.sign_transaction(self.crypto_helper_obj, self.private_key1)
t2 = Transaction(self.public_key1, self.public_key2, "i")
t2.sign_transaction(self.crypto_helper_obj, self.private_key1)
t3 = Transaction(self.public_key1, self.public_key2, "j")
t3.sign_transaction(self.crypto_helper_obj, self.private_key1)
transactions = [t1, t2, t3]
tx_pool_count = self._txPoolObj.get_transaction_count()
status = self._txPoolObj.return_transactions_to_pool(transactions, self.blockchain_obj)
self.assertEqual(status, True)
transactions_new = self._txPoolObj.get_transactions(tx_pool_count + 3)
status = any(transaction in transactions for transaction in transactions_new)
self.assertEqual(status, True)
def test_singleton(self):
transaction = Transaction(self.public_key1, self.public_key2, "s")
transaction.sign_transaction(self.crypto_helper_obj, self.private_key1)
self._txPoolObj.add_transaction_if_not_exist(transaction, self.blockchain_obj)
tx_pool_count = self._txPoolObj.get_transaction_count()
txpool = TxPool(self.crypto_helper_obj)
self.assertEqual(txpool, self._txPoolObj)
self.assertEqual(txpool.get_transaction_count(), tx_pool_count)
def test_get_transaction_count(self):
transaction = Transaction(self.public_key1, self.public_key2, "g")
transaction.sign_transaction(self.crypto_helper_obj, self.private_key1)
status = self._txPoolObj.add_transaction_if_not_exist(transaction, self.blockchain_obj)
self.assertTrue(status)
self.assertEqual(4, self._txPoolObj.get_transaction_count())
def test_add_transaction_if_not_exist(self):
transaction = Transaction(self.public_key1, self.public_key2, "h")
transaction.sign_transaction(self.crypto_helper_obj, self.private_key1)
status = self._txPoolObj.add_transaction_if_not_exist(transaction, self.blockchain_obj)
self.assertTrue(status)
def test_get_transaction_by_hash(self):
tx_pool_count = self._txPoolObj.get_transaction_count()
t1 = Transaction(self.public_key1, self.public_key2, "e")
t1.sign_transaction(self.crypto_helper_obj, self.private_key1)
t2 = Transaction(self.public_key1, self.public_key2, "f")
t2.sign_transaction(self.crypto_helper_obj, self.private_key1)
self._txPoolObj.add_transaction_if_not_exist(t1, self.blockchain_obj)
self._txPoolObj.add_transaction_if_not_exist(t2, self.blockchain_obj)
self.assertEqual(tx_pool_count, 3)
tx_pool_count = self._txPoolObj.get_transaction_count()
self.assertEqual(tx_pool_count, 5)
hash_val = t1.transaction_hash
transaction = self._txPoolObj.get_transaction_by_hash(hash_val)[0]
self.assertEqual(t1, transaction)
if __name__ == '__main__':
unittest.main()
| true | true |
f729864796f49818be1ba8f4a31b3aafd57c5c19 | 589 | py | Python | gdsfactory/simulation/modes/tests/test_find_modes_dispersion.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | gdsfactory/simulation/modes/tests/test_find_modes_dispersion.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | gdsfactory/simulation/modes/tests/test_find_modes_dispersion.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | import numpy as np
from gdsfactory.simulation.modes.find_mode_dispersion import find_mode_dispersion
def test_find_modes_waveguide_dispersion() -> None:
modes = find_mode_dispersion(wg_width=0.45, resolution=20, cache=None)
m1 = modes
# print(f"neff1 = {m1.neff}")
# print(f"ng1 = {m1.ng}")
# neff1 = 2.3948
# ng1 = 4.23194
neff1 = 2.362907833437435
ng1 = 4.202169359808116
assert np.isclose(m1.neff, neff1), (m1.neff, neff1)
assert np.isclose(m1.ng, ng1), (m1.ng, ng1)
if __name__ == "__main__":
test_find_modes_waveguide_dispersion()
| 23.56 | 81 | 0.689304 | import numpy as np
from gdsfactory.simulation.modes.find_mode_dispersion import find_mode_dispersion
def test_find_modes_waveguide_dispersion() -> None:
modes = find_mode_dispersion(wg_width=0.45, resolution=20, cache=None)
m1 = modes
neff1 = 2.362907833437435
ng1 = 4.202169359808116
assert np.isclose(m1.neff, neff1), (m1.neff, neff1)
assert np.isclose(m1.ng, ng1), (m1.ng, ng1)
if __name__ == "__main__":
test_find_modes_waveguide_dispersion()
| true | true |
f72986a5df5acfae915ca08e7901dfdce218c1f8 | 45,453 | py | Python | Utilities/JSBSimWriteXml.py | elke0011/OpenFlightSim | 1e28c54864ffd188f27425c8a71cce8b70a4bd7f | [
"MIT"
] | 15 | 2019-03-15T17:28:23.000Z | 2022-03-21T23:52:53.000Z | Utilities/JSBSimWriteXml.py | elke0011/OpenFlightSim | 1e28c54864ffd188f27425c8a71cce8b70a4bd7f | [
"MIT"
] | null | null | null | Utilities/JSBSimWriteXml.py | elke0011/OpenFlightSim | 1e28c54864ffd188f27425c8a71cce8b70a4bd7f | [
"MIT"
] | 5 | 2019-03-28T17:35:50.000Z | 2022-03-04T19:38:03.000Z | """
University of Minnesota
Aerospace Engineering and Mechanics - UAV Lab
Copyright 2019 Regents of the University of Minnesota.
See: LICENSE.md for complete license details.
Author: Louis Mueller, Chris Regan
"""
import os.path
from xml.etree import ElementTree as ET
import numpy as np
ft2m = 0.3048
psf2pa = 47.88026
#%% Save the XML in pretty-ish print
def SaveXml(elem, saveFile):
from xml.dom import minidom
uglyXml = ET.tostring(elem, 'utf-8')
prettyXml = minidom.parseString(uglyXml).toprettyxml(indent=' ', newl = '\r\n')
os.makedirs(os.path.dirname(saveFile), exist_ok=True)
with open(saveFile, 'w') as saveXML:
saveXML.write(prettyXml)
saveXML.close()
#%% Function
def Aircraft(oFdm, convertFdm2Jsb, saveJsbPath, aircraftName):
# Start JSB-ML with etree
elemAircraft = ET.Element('fdm_config', version = '2.0', release = 'Alpha')
# Create the Pilot input as a seperate XML file, direct the Aircraft definition to use
fcsFile = 'FlightControl.xml'
ET.SubElement(elemAircraft, 'flight_control', file = fcsFile)
SaveXml(FlightControl(oFdm), os.path.join(saveJsbPath, fcsFile))
# Effectors as a seperate XML file, direct the Aircraft definition to use
effFile = 'Effectors.xml'
ET.SubElement(elemAircraft, 'system', file = effFile)
SaveXml(Effectors(oFdm), os.path.join(saveJsbPath, effFile))
# Create the Mass Properties input as a seperate XML file, direct the Aircraft definition to use
massFile = 'Mass.xml'
ET.SubElement(elemAircraft, 'mass_balance', file = massFile)
SaveXml(MassBalance(oFdm), os.path.join(saveJsbPath, massFile))
# Create the Gear input as a seperate XML file, direct the Aircraft definition to use
gearFile = 'Gear.xml'
ET.SubElement(elemAircraft, 'ground_reactions', file = gearFile)
SaveXml(GroundReactions(oFdm), os.path.join(saveJsbPath, gearFile))
# Create the Propulsion input as a seperate XML file, direct the Aircraft definition to use
propFile = 'Propulsion.xml'
ET.SubElement(elemAircraft, 'propulsion', file = propFile)
SaveXml(Propulsion(oFdm), os.path.join(saveJsbPath, propFile))
# Metrics and Aerodynamics as a seperate XML file, direct the Aircraft definition to use
# Group the Metrics and Aero by similar naming; the dimensionalization inherent to Aero is provided by the Metrics
metricsFile = 'Metrics.xml'
ET.SubElement(elemAircraft, 'metrics', file = metricsFile)
SaveXml(Metrics(oFdm), os.path.join(saveJsbPath, metricsFile))
aeroFile = 'Aero.xml'
ET.SubElement(elemAircraft, 'aerodynamics', file = aeroFile)
SaveXml(Aerodynamics(oFdm, convertFdm2Jsb), os.path.join(saveJsbPath, aeroFile))
# Launcher as a seperate XML file, direct the Aircraft definition to use
if 'Winch' in oFdm.keys() :
winchFile = 'Winch.xml'
ET.SubElement(elemAircraft, 'external_reactions', file = winchFile)
SaveXml(Winch(oFdm), os.path.join(saveJsbPath, winchFile))
# Imu as a seperate XML file, direct the Aircraft definition to use
if 'Imu' in oFdm['Sensor'].keys() :
imuFile = 'SensorImu.xml'
ET.SubElement(elemAircraft, 'system', file = imuFile)
SaveXml(SensorImu(oFdm), os.path.join(saveJsbPath, imuFile))
# Gps as a seperate XML file, direct the Aircraft definition to use
if 'Gps' in oFdm['Sensor'].keys() :
gpsFile = 'SensorGps.xml'
ET.SubElement(elemAircraft, 'system', file = gpsFile)
SaveXml(SensorGps(oFdm), os.path.join(saveJsbPath, gpsFile))
# Pitot as a seperate XML file, direct the Aircraft definition to use
if 'Pitot' in oFdm['Sensor'].keys() :
pitotFile = 'SensorPitot.xml'
ET.SubElement(elemAircraft, 'system', file = pitotFile)
SaveXml(SensorPitot(oFdm), os.path.join(saveJsbPath, pitotFile))
# 5Hole as a seperate XML file, direct the Aircraft definition to use
if '5Hole' in oFdm['Sensor'].keys() :
fiveHoleFile = 'Sensor5Hole.xml'
ET.SubElement(elemAircraft, 'system', file = fiveHoleFile)
SaveXml(Sensor5Hole(oFdm), os.path.join(saveJsbPath, fiveHoleFile))
# Write the Aircraft XML file
saveFile = os.path.join(saveJsbPath, aircraftName + '.xml')
SaveXml(elemAircraft, saveFile)
return(elemAircraft)
#%% Table Generator, Wrapper
def TableGen(elemParent, tableArray, tableSignals, tableBreakPts):
s = tableArray.shape
iAxisRemList = []
for iAxis in range(0, len(s)):
if s[iAxis] == 1:
iAxisRemList.append(iAxis)
# for iRem in iAxisRemList: # XXX
# tableArray = tableArray.squeeze(axis=iRem)
# del tableSignals[iRem]
# del tableBreakPts[iRem]
if len(tableArray.shape)==3:
table = TableGen3D(elemParent, tableArray, tableSignals, tableBreakPts)
elif len(tableArray.shape)==2:
table = TableGen2D(elemParent, tableArray, tableSignals, tableBreakPts)
elif (len(tableArray.shape)==1) & (tableArray.size > 1):
table = TableGen1D(elemParent, tableArray, tableSignals, tableBreakPts)
else:
table = ET.SubElement(elemParent, 'value').text = str(tableArray)
return table
#%% Table Generator, 3D
def TableGen3D(elemParent, tableArray, tableSignals, tableBreakPts):
table = ET.SubElement(elemParent, 'table')
#table = ET.Element('table')
ET.SubElement(table, 'independentVar', lookup = 'row').text = tableSignals[0]
ET.SubElement(table, 'independentVar', lookup = 'column').text = tableSignals[1]
ET.SubElement(table, 'independentVar', lookup = 'table').text = tableSignals[2]
indentSpace = ' '*4
indentLvl = 4
numRows, numColumns, numTables = np.shape(tableArray)
columnHeader = indentSpace*(indentLvl)
for columnVal in tableBreakPts[1]:
columnHeader += ' '*6 + str(columnVal)
for iTable in range(0, numTables):
tableStr = ['\n' + columnHeader]
for iRow in range(0, numRows):
rowStr = str(tableArray[iRow, :, iTable]).replace('[','').replace(']','').replace('\n', '')
tableStr.append(indentLvl*indentSpace + str(tableBreakPts[0][iRow]) + indentSpace + rowStr)
tableStr = '\n'.join(tableStr) + '\n' + indentLvl*indentSpace # Replace list lines with '/n' strings
ET.SubElement(table, 'tableData', breakPoint = str(tableBreakPts[2][iTable])).text = tableStr
return table
#%% Table Generator, 2D
def TableGen2D(elemParent, tableArray, tableSignals, tableBreakPts):
table = ET.SubElement(elemParent, 'table')
ET.SubElement(table, 'independentVar', lookup = 'row').text = tableSignals[0]
ET.SubElement(table, 'independentVar', lookup = 'column').text = tableSignals[1]
indentSpace = ' '*4
indentLvl = 4
tableArray = tableArray.transpose()
numRows, numColumns = np.shape(tableArray)
columnHeader = indentSpace*(indentLvl)
for columnVal in tableBreakPts[1]:
columnHeader += ' '*6 + str(columnVal)
tableStr = ['\n' + columnHeader]
for iRow in range(0, numRows):
rowStr = str(tableArray[iRow]).replace('[','').replace(']','').replace('\n', '')
tableStr.append(indentLvl*indentSpace + str(tableBreakPts[0][iRow]) + indentSpace + rowStr)
tableStr = '\n'.join(tableStr) + '\n' + indentLvl*indentSpace # Replace list lines with '/n' strings
ET.SubElement(table, 'tableData').text = tableStr
return table
#%% Table Generator, 1D
def TableGen1D(elemParent, tableArray, tableSignals, tableBreakPts):
table = ET.SubElement(elemParent, 'table')
ET.SubElement(table, 'independentVar', lookup = 'row').text = tableSignals
indentSpace = ' '*4
indentLvl = 4
numRows = np.shape(tableArray)[0]
tableStr = ['\n']
for iRow in range(0, numRows):
rowStr = str(tableArray[iRow]).replace('[','').replace(']','').replace('\n', '')
tableStr.append(indentLvl*indentSpace + str(tableBreakPts[iRow]) + indentSpace + rowStr)
tableStr = '\n'.join(tableStr) + '\n' + indentLvl*indentSpace # Replace list lines with '/n' strings
ET.SubElement(table, 'tableData').text = tableStr
return table
#%%
def MassBalance(oFdm):
mass_balance = ET.Element('mass_balance')
# Mass
ET.SubElement(mass_balance, 'emptywt', unit = 'KG').text = str(oFdm['MassProp']['mass_kg'])
# CG
location = ET.SubElement(mass_balance, 'location', name = 'CG', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['MassProp']['rCG_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['MassProp']['rCG_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['MassProp']['rCG_S_m'][2])
# Inertia
ET.SubElement(mass_balance, 'ixx', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][0,0])
ET.SubElement(mass_balance, 'iyy', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][1,1])
ET.SubElement(mass_balance, 'izz', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][2,2])
ET.SubElement(mass_balance, 'ixy', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][0,1])
ET.SubElement(mass_balance, 'ixz', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][0,2])
ET.SubElement(mass_balance, 'iyz', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][1,2])
return(mass_balance)
#%%
def GroundReactions(oFdm):
ground_reactions = ET.Element('ground_reactions')
# Loop Each Gear
for gear in oFdm['Gear'].keys():
contact = ET.SubElement(ground_reactions, 'contact', type = 'BOGEY', name = gear)
location = ET.SubElement(contact, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['Gear'][gear]['rGear_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['Gear'][gear]['rGear_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['Gear'][gear]['rGear_S_m'][2])
ET.SubElement(contact, 'static_friction').text = str(oFdm['Gear'][gear]['FricStatic'])
ET.SubElement(contact, 'dynamic_friction').text = str(oFdm['Gear'][gear]['FricDynamic'])
ET.SubElement(contact, 'rolling_friction').text = str(oFdm['Gear'][gear]['FricRoll'])
ET.SubElement(contact, 'spring_coeff', unit = 'N/M').text = str(oFdm['Gear'][gear]['kSpring_Npm'])
ET.SubElement(contact, 'damping_coeff', unit = 'N/M/SEC').text = str(oFdm['Gear'][gear]['dampSpring_Nspm'])
ET.SubElement(contact, 'max_steer', unit = 'DEG').text = '0.0'
return(ground_reactions)
#%%
def Metrics(oFdm):
metrics = ET.Element('metrics')
# Dimensions
ET.SubElement(metrics, 'wingarea', unit = 'M2').text = str(oFdm['Aero']['Ref']['S_m2'])
ET.SubElement(metrics, 'wingspan', unit = 'M').text = str(oFdm['Aero']['Ref']['b_m'])
ET.SubElement(metrics, 'chord', unit = 'M').text = str(oFdm['Aero']['Ref']['cBar_m'])
location = ET.SubElement(metrics, 'location', name = 'AERORP', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['Aero']['Ref']['rAero_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['Aero']['Ref']['rAero_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['Aero']['Ref']['rAero_S_m'][2])
location = ET.SubElement(metrics, 'location', name = 'EYEPOINT', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['Aero']['Ref']['rAero_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['Aero']['Ref']['rAero_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['Aero']['Ref']['rAero_S_m'][2])
location = ET.SubElement(metrics, 'location', name = 'VRP', unit = 'M')
ET.SubElement(location, 'x').text = '0.0'
ET.SubElement(location, 'y').text = '0.0'
ET.SubElement(location, 'z').text = '0.0'
return(metrics)
#%%
def Aerodynamics(oFdm, convertFdm2Jsb):
import copy
# Aero Coef definitions
coefNamesFdm = convertFdm2Jsb['Coef']['oFdm']
# Aero Deriv dependencies definitions
depNamesFdm = convertFdm2Jsb['Dep']['oFdm']
depNamesJsb = convertFdm2Jsb['Dep']['jsb']
depScale = convertFdm2Jsb['Dep']['scale']
coefNamesFdm = convertFdm2Jsb['Coef']['oFdm']
# Aero Breakpoint Table defintions
indVarTable = convertFdm2Jsb['TableDef']['jsb']
breakPtsTable = convertFdm2Jsb['TableDef']['brkPts']
# Aero Table data to use
aeroTable = oFdm['Aero']['Coef']
# Define the conversion from oFdm to JSB-ML # FIXIT - switch to a CDo+CDi drag computation
coefTable = {'CL': {'axis': 'LIFT', 'scale': None, 'type': 'force', 'deriv': 'dCL'}, \
'CD': {'axis': 'DRAG', 'scale': None, 'type': 'force', 'deriv': 'dCD'}, \
'CY': {'axis': 'SIDE', 'scale': None, 'type': 'force', 'deriv': 'dCY'}, \
'CMl': {'axis': 'ROLL', 'scale': 'metrics/bw-ft', 'type': 'moment', 'deriv': 'dCMl'}, \
'CMm': {'axis': 'PITCH', 'scale': 'metrics/cbarw-ft', 'type': 'moment', 'deriv': 'dCMm'}, \
'CMn': {'axis': 'YAW', 'scale': 'metrics/bw-ft', 'type': 'moment', 'deriv': 'dCMn'}}
aerodynamics = ET.Element('aerodynamics')
#
# Create each coefficient individually, just the table look-up
coefNames = coefTable.keys()
for iCoef, coef in enumerate(coefNames):
convertCoef = coefTable[coef]
# For each coefficient: create just the table look-up, then the Multiplication, then the summation
for iDep, dep in enumerate(coefNamesFdm):
function = ET.SubElement(aerodynamics, 'function', name = str('aero/coefficient/' + coef + '__' + dep))
ET.SubElement(function, 'description').text = str(coef + '__' + dep)
# Use the Table Generator to create the properly formated Table for JSB-ML
tableArray = aeroTable[coef][dep]
tableSignals = indVarTable
tableBreakPts = breakPtsTable
table = TableGen(function, copy.deepcopy(tableArray), copy.deepcopy(tableSignals), copy.deepcopy(tableBreakPts))
# For each derivative: create just the table look-up, then the Multiplication, then the summation
deriv = convertCoef['deriv']
for iDep, dep in enumerate(depNamesFdm):
function = ET.SubElement(aerodynamics, 'function', name = str('aero/coefficient/' + deriv + '__' + dep))
ET.SubElement(function, 'description').text = str(deriv + '__' + dep)
# Use the Table Generator to create the properly formated Table for JSB-ML
tableArray = aeroTable[deriv][dep]
tableSignals = indVarTable
tableBreakPts = breakPtsTable
table = TableGen(function, copy.deepcopy(tableArray), copy.deepcopy(tableSignals), copy.deepcopy(tableBreakPts))
# Multiply each derivative by it's dependent variable
function = ET.SubElement(aerodynamics, 'function', name = str('aero/coefficient/' + coef + '__' + dep))
ET.SubElement(function, 'description').text = str(coef + '__' + dep + ' = ' + deriv + '__' + dep + ' * ' + dep)
#print(coef + '__' + dep + ' = ' + deriv + '__' + dep + ' * ' + dep)
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/coefficient/' + deriv + '__' + dep
#print(deriv + '__' + dep)
depSignal = depNamesJsb[iDep]
#print(depSignal)
if depSignal != None:
ET.SubElement(product, 'property').text = depSignal # Dependent Variable/Signal
scale = depScale[iDep]
if scale != None:
if isinstance(scale, str):
ET.SubElement(product, 'property').text = str(scale) # Dependent Variable Scaling
else:
ET.SubElement(product, 'value').text = str(scale) # Dependent Variable Scaling
# Sum the Coeficients
function = ET.SubElement(aerodynamics, 'function', name = str('aero/coefficient/' + coef))
ET.SubElement(function, 'description').text = str(coef + ' summation')
#print(coef + ' summation')
summation = ET.SubElement(function, 'sum')
for iDep, dep in enumerate(coefNamesFdm):
ET.SubElement(summation, 'property').text = 'aero/coefficient/' + coef + '__' + dep
#print(coef + '__' + dep)
for iDep, dep in enumerate(depNamesFdm):
ET.SubElement(summation, 'property').text = 'aero/coefficient/' + coef + '__' + dep
#print(coef + '__' + dep)
#
# Dimensionalize the Coefficients into Forces and Moments
for iCoef, coef in enumerate(coefNames):
convertCoef = coefTable[coef]
axis = ET.SubElement(aerodynamics, 'axis', name = convertCoef['axis'])
function = ET.SubElement(axis, 'function', name = str('aero/' + convertCoef['type'] + '/' + convertCoef['axis'] + '__' + coef))
ET.SubElement(function, 'description').text = str(convertCoef['axis'] + ' from ' + coef)
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/qbar-area' # qBar * sRef
if convertCoef['scale'] != None:
ET.SubElement(product, 'property').text = convertCoef['scale'] # Coefficient Scaling
ET.SubElement(product, 'property').text = 'aero/coefficient/' + coef
return(aerodynamics)
#%%
def Propulsion(oFdm):
propulsion = ET.Element('propulsion')
for key in oFdm['Prop'].keys():
prop = oFdm['Prop'][key]
# Motor/Engine
engine = ET.SubElement(propulsion, 'engine', file = prop['nameMotor'])
# location = ET.SubElement(engine, 'location', unit = 'M')
# ET.SubElement(location, 'x').text = str(prop['rMotor_S_m'][0])
# ET.SubElement(location, 'y').text = str(prop['rMotor_S_m'][1])
# ET.SubElement(location, 'z').text = str(prop['rMotor_S_m'][2])
# orient = ET.SubElement(engine, 'orient', unit = 'DEG')
# ET.SubElement(orient, 'roll').text = str(prop['sMotor_deg'][0])
# ET.SubElement(orient, 'pitch').text = str(prop['sMotor_deg'][1])
# ET.SubElement(orient, 'yaw').text = str(prop['sMotor_deg'][2])
# Thruster/Prop as an element of the Engine
thruster = ET.SubElement(engine, 'thruster', file = prop['nameProp'])
location = ET.SubElement(thruster, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(prop['rProp_S_m'][0])
ET.SubElement(location, 'y').text = str(prop['rProp_S_m'][1])
ET.SubElement(location, 'z').text = str(prop['rProp_S_m'][2])
orient = ET.SubElement(thruster, 'orient', unit = 'DEG')
ET.SubElement(orient, 'roll').text = str(prop['sProp_deg'][0])
ET.SubElement(orient, 'pitch').text = str(prop['sProp_deg'][1])
ET.SubElement(orient, 'yaw').text = str(prop['sProp_deg'][2])
ET.SubElement(thruster, 'sense').text = str(prop['sense']) # 1 = CW as viewed from cockpit, -1 = CCW
ET.SubElement(thruster, 'p_factor').text = str(prop['p_factor'])
return(propulsion)
#%% FCS
def FlightControl(oFdm):
# Define all the Pilot input definition
# Pilot Inputs, us the FG normalized sticks
fcsPilotDef = {}
fcsPilotDef['summer'] = {}
fcsPilotDef['gain'] = {}
fcsPilotDef['summer']['pilotRoll_norm'] = {}
fcsPilotDef['summer']['pilotRoll_norm']['inputList'] = ['fcs/aileron-cmd-norm', 'fcs/roll-trim-cmd-norm']
fcsPilotDef['summer']['pilotRoll_norm']['min'] = -1.0
fcsPilotDef['summer']['pilotRoll_norm']['max'] = 1.0
fcsPilotDef['gain']['cmdRoll_rps'] = {}
fcsPilotDef['gain']['cmdRoll_rps']['input'] = 'fcs/pilotRoll_norm'
fcsPilotDef['gain']['cmdRoll_rps']['gain'] = oFdm['FCS']['Pilot']['kRoll']
fcsPilotDef['summer']['pilotPitch_norm'] = {}
fcsPilotDef['summer']['pilotPitch_norm']['inputList'] = ['fcs/elevator-cmd-norm', 'fcs/pitch-trim-cmd-norm']
fcsPilotDef['summer']['pilotPitch_norm']['min'] = -1.0
fcsPilotDef['summer']['pilotPitch_norm']['max'] = 1.0
fcsPilotDef['gain']['cmdPitch_rps'] = {}
fcsPilotDef['gain']['cmdPitch_rps']['input'] = 'fcs/pilotPitch_norm'
fcsPilotDef['gain']['cmdPitch_rps']['gain'] = oFdm['FCS']['Pilot']['kPitch']
fcsPilotDef['summer']['pilotYaw_norm'] = {}
fcsPilotDef['summer']['pilotYaw_norm']['inputList'] = ['fcs/rudder-cmd-norm', 'fcs/yaw-trim-cmd-norm']
fcsPilotDef['summer']['pilotYaw_norm']['min'] = -1.0
fcsPilotDef['summer']['pilotYaw_norm']['max'] = 1.0
fcsPilotDef['gain']['cmdYaw_rps'] = {}
fcsPilotDef['gain']['cmdYaw_rps']['input'] = 'fcs/pilotYaw_norm'
fcsPilotDef['gain']['cmdYaw_rps']['gain'] = oFdm['FCS']['Pilot']['kYaw']
fcsPilotDef['summer']['pilotFlap_norm'] = {}
fcsPilotDef['summer']['pilotFlap_norm']['inputList'] = ['fcs/flap-cmd-norm']
fcsPilotDef['summer']['pilotFlap_norm']['min'] = -1.0
fcsPilotDef['summer']['pilotFlap_norm']['max'] = 1.0
fcsPilotDef['gain']['cmdFlap_rad'] = {}
fcsPilotDef['gain']['cmdFlap_rad']['input'] = 'fcs/pilotFlap_norm'
fcsPilotDef['gain']['cmdFlap_rad']['gain'] = oFdm['FCS']['Pilot']['kFlap']
# Create the JSB-ML
elemFCS = ET.Element('flight_control', name = 'Generic Flight Control')
pilot = ET.SubElement(elemFCS, 'channel', name = 'Pilot_Inputs')
for type in fcsPilotDef:
if type == 'summer':
for key in fcsPilotDef['summer'].keys():
entry = fcsPilotDef['summer'][key]
summer = ET.SubElement(pilot, 'summer', name = key)
for input in entry['inputList']:
ET.SubElement(summer, 'input').text = input
if ('min' in entry.keys()) or ('max' in entry.keys()):
clipto = ET.SubElement(summer, 'clipto')
if ('min' in entry.keys()): ET.SubElement(clipto, 'min').text = str(entry['min'])
if ('max' in entry.keys()): ET.SubElement(clipto, 'max').text = str(entry['max'])
ET.SubElement(summer, 'output').text = 'fcs/' + key
if type == 'gain':
for key in fcsPilotDef['gain'].keys():
entry = fcsPilotDef['gain'][key]
gain = ET.SubElement(pilot, 'pure_gain', name = key)
ET.SubElement(gain, 'input').text = entry['input']
ET.SubElement(gain, 'gain').text = str(entry['gain'])
if ('min' in entry.keys()) or ('max' in entry.keys()):
clipto = ET.SubElement(gain, 'clipto')
if ('min' in entry.keys()): ET.SubElement(clipto, 'min').text = str(entry['min'])
if ('max' in entry.keys()): ET.SubElement(clipto, 'max').text = str(entry['max'])
ET.SubElement(gain, 'output').text = 'fcs/' + key
# Control System Surface Mixer
mixer = ET.SubElement(elemFCS, 'channel', name = 'Control Mixer')
fcsMixerDef = oFdm['FCS']['Mixer']
for iSurf, surf in enumerate(fcsMixerDef['surfNames']):
cmdSurf = 'cmd' + surf + '_rad'
keyList = []
for iInput, input in enumerate(fcsMixerDef['inputs']):
val = fcsMixerDef['surfMix'][iSurf][iInput]
key = input + '_2_' + surf
if val != 0.0:
keyList.append(key)
gain = ET.SubElement(mixer, 'pure_gain', name = key.replace('fcs/',''))
ET.SubElement(gain, 'input').text = 'fcs/' + input
ET.SubElement(gain, 'gain').text = str(val)
ET.SubElement(gain, 'output').text = 'fcs/' + key
if any(keyList):
summer = ET.SubElement(mixer, 'summer', name = cmdSurf)
for key in keyList:
ET.SubElement(summer, 'input').text = 'fcs/' + key
ET.SubElement(summer, 'output').text = 'fcs/' + cmdSurf
# Inputs for External Commands, this just add property to create the node in the tree
for iSurf, surf in enumerate(fcsMixerDef['surfNames']):
cmdSurfExt = 'cmd' + surf + '_ext_rad'
prop = ET.SubElement(elemFCS, 'property').text = 'fcs/' + cmdSurfExt
name = 'Motor'
cmdMotorExt = 'cmd' + name + '_ext_nd'
motor = ET.SubElement(elemFCS, 'property').text = 'fcs/' + cmdMotorExt # Add the Motor external command
# Inputs for External Commands, this just add property to create the node in the tree
extern = ET.SubElement(elemFCS, 'channel', name = 'External Input Summations')
for iSurf, surf in enumerate(fcsMixerDef['surfNames']):
cmdSurf = 'cmd' + surf + '_rad'
cmdSurfExt = 'cmd' + surf + '_ext_rad'
summer = ET.SubElement(extern, 'summer')
ET.SubElement(summer, 'input').text = 'fcs/' + cmdSurf
ET.SubElement(summer, 'input').text = 'fcs/' + cmdSurfExt
ET.SubElement(summer, 'output').text = 'fcs/' + cmdSurf
name = 'Motor'
cmdMotor = 'cmd' + name + '_nd'
cmdMotorExt = 'cmd' + name + '_ext_nd'
summer = ET.SubElement(extern, 'summer')
ET.SubElement(summer, 'input').text = 'fcs/throttle-cmd-norm'
ET.SubElement(summer, 'input').text = 'fcs/' + cmdMotorExt
ET.SubElement(summer, 'output').text = 'fcs/throttle-pos-norm'
return(elemFCS)
#%% Effectors, for each surface define the 2nd order TF, and an 'actuator'
def Effectors(oFdm):
sysEffDef = oFdm['Act']
effectors = ET.Element('system', name = 'Effectors')
channel = ET.SubElement(effectors, 'channel', name = 'Actuator Models')
for surf in sysEffDef.keys():
cmdSurf = 'cmd' + surf + '_rad'
posSurf = 'pos' + surf + '_rad'
entry = sysEffDef[surf]
# Actuator - delay and freeplay
actuator = ET.SubElement(channel, 'actuator', name = 'act' + surf)
ET.SubElement(actuator, 'input').text = 'fcs/' + cmdSurf
ET.SubElement(actuator, 'lag').text = str(entry['lag_nd'])
ET.SubElement(actuator, 'hysteresis_width').text = str(entry['freeplay_rad'])
ET.SubElement(actuator, 'delay').text = str(entry['delay_s'])
if ('min' in entry.keys()) or ('max' in entry.keys()):
clipto = ET.SubElement(actuator, 'clipto')
if ('min' in entry.keys()): ET.SubElement(clipto, 'min').text = str(entry['min'])
if ('max' in entry.keys()): ET.SubElement(clipto, 'max').text = str(entry['max'])
ET.SubElement(actuator, 'output').text = 'fcs/' + posSurf
return(effectors)
#%%
def Winch(oFdm):
external_reactions = ET.Element('external_reactions')
# Winch
force = ET.SubElement(external_reactions, 'force', name='hitch' , frame = 'BODY', unit='N')
location = ET.SubElement(force, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['Winch']['rHook_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['Winch']['rHook_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['Winch']['rHook_S_m'][2])
direction = ET.SubElement(force, 'direction')
ET.SubElement(direction, 'x').text = str(oFdm['Winch']['sHook_deg'][0])
ET.SubElement(direction, 'y').text = str(oFdm['Winch']['sHook_deg'][1])
ET.SubElement(direction, 'z').text = str(oFdm['Winch']['sHook_deg'][2])
return(external_reactions)
#%% IMU
def SensorImu(oFdm):
imu = ET.Element('system', name = 'Sensor - IMU')
# Create time in us
function = ET.SubElement(imu, 'function', name = 'sensor/imu/time_us')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'simulation/sim-time-sec'
ET.SubElement(product, 'value').text = str(1e6)
# Accelerometers
if 'Accel' in oFdm['Sensor']['Imu'].keys() :
channel = ET.SubElement(imu, 'channel', name = 'Temp Accelerometers')
axisList = ['X', 'Y', 'Z']
for axisName in axisList:
accel = ET.SubElement(channel, 'accelerometer', name = 'Accel' + axisName)
ET.SubElement(accel, 'axis').text = axisName
location = ET.SubElement(accel, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['Sensor']['Imu']['Accel']['r_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['Sensor']['Imu']['Accel']['r_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['Sensor']['Imu']['Accel']['r_S_m'][2])
orientation = ET.SubElement(accel, 'orientation', unit='DEG')
ET.SubElement(orientation, 'roll').text = str(oFdm['Sensor']['Imu']['Accel']['s_deg'][0])
ET.SubElement(orientation, 'pitch').text = str(oFdm['Sensor']['Imu']['Accel']['s_deg'][1])
ET.SubElement(orientation, 'yaw').text = str(oFdm['Sensor']['Imu']['Accel']['s_deg'][2])
ET.SubElement(accel, 'output').text = 'sensor/imu/accel' + axisName + '_true_fps2'
# Convert Units Accelerometer to mps2
for axisName in axisList:
function = ET.SubElement(imu, 'function', name = 'sensor/imu/accel' + axisName + '_true_mps2')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'sensor/imu/accel' + axisName + '_true_fps2'
ET.SubElement(product, 'value').text = str(ft2m)
# Accelerometer Error Model
channel = ET.SubElement(imu, 'channel', name = 'Accelerometer Error Model')
errMod = oFdm['Sensor']['Imu']['Accel']
for iAxis, axisName in enumerate(axisList):
sensor = ET.SubElement(channel, 'sensor', name = 'Accel' + axisName)
ET.SubElement(sensor, 'input').text = 'sensor/imu/accel' + axisName + '_true_mps2'
ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(sensor, 'output').text = 'sensor/imu/accel' + axisName + '_mps2'
# Gyros
if 'Gyro' in oFdm['Sensor']['Imu'].keys() :
errMod = oFdm['Sensor']['Imu']['Gyro']
channel = ET.SubElement(imu, 'channel', name = 'Gyros')
for iAxis, axisName in enumerate(axisList):
gyro = ET.SubElement(channel, 'gyro', name = 'Gyro' + axisName)
ET.SubElement(gyro, 'axis').text = axisName
location = ET.SubElement(gyro, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(errMod['r_S_m'][0])
ET.SubElement(location, 'y').text = str(errMod['r_S_m'][1])
ET.SubElement(location, 'z').text = str(errMod['r_S_m'][2])
orientation = ET.SubElement(gyro, 'orientation', unit='DEG')
ET.SubElement(orientation, 'roll').text = str(errMod['s_deg'][0])
ET.SubElement(orientation, 'pitch').text = str(errMod['s_deg'][1])
ET.SubElement(orientation, 'yaw').text = str(errMod['s_deg'][2])
ET.SubElement(gyro, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(gyro, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(gyro, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(gyro, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(gyro, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(gyro, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(gyro, 'output').text = 'sensor/imu/gyro' + axisName + '_rps'
# Magnetometers
if 'Mag' in oFdm['Sensor']['Imu'].keys() :
errMod = oFdm['Sensor']['Imu']['Mag']
channel = ET.SubElement(imu, 'channel', name = 'Magnetometers')
for iAxis, axisName in enumerate(axisList):
mag = ET.SubElement(channel, 'magnetometer', name = 'Mag' + axisName)
ET.SubElement(mag, 'axis').text = axisName
location = ET.SubElement(mag, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(errMod['r_S_m'][0])
ET.SubElement(location, 'y').text = str(errMod['r_S_m'][1])
ET.SubElement(location, 'z').text = str(errMod['r_S_m'][2])
orientation = ET.SubElement(mag, 'orientation', unit='DEG')
ET.SubElement(orientation, 'roll').text = str(errMod['s_deg'][0])
ET.SubElement(orientation, 'pitch').text = str(errMod['s_deg'][1])
ET.SubElement(orientation, 'yaw').text = str(errMod['s_deg'][2])
ET.SubElement(mag, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(mag, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(mag, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(mag, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(mag, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(mag, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(mag, 'output').text = 'sensor/imu/mag' + axisName + '_nT'
# Magnetometer unit conversion
for axisName in axisList:
function = ET.SubElement(imu, 'function', name = 'sensor/imu/mag' + axisName + '_uT')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'sensor/imu/mag' + axisName + '_nT'
ET.SubElement(product, 'value').text = str(0.001)
return(imu)
#%% GPS
def SensorGps(oFdm):
gps = ET.Element('system', name = 'Sensor - GPS')
# Create time in us
function = ET.SubElement(gps, 'function', name = 'sensor/gps/time_us')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'simulation/sim-time-sec'
ET.SubElement(product, 'value').text = str(1e6)
# GPS Position
function = ET.SubElement(gps, 'function', name = 'sensor/gps/lat_true_rad')
ET.SubElement(function, 'property').text = 'position/lat-geod-rad'
function = ET.SubElement(gps, 'function', name = 'sensor/gps/long_true_rad')
ET.SubElement(function, 'property').text = 'position/long-gc-rad'
function = ET.SubElement(gps, 'function', name = 'sensor/gps/alt_true_m')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'position/h-sl-ft'
ET.SubElement(product, 'value').text = str(ft2m)
# GPS Velocity
function = ET.SubElement(gps, 'function', name = 'sensor/gps/vNorth_true_mps')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'velocities/v-north-fps'
ET.SubElement(product, 'value').text = str(ft2m)
function = ET.SubElement(gps, 'function', name = 'sensor/gps/vEast_true_mps')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'velocities/v-east-fps'
ET.SubElement(product, 'value').text = str(ft2m)
function = ET.SubElement(gps, 'function', name = 'sensor/gps/vDown_true_mps')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'velocities/v-down-fps'
ET.SubElement(product, 'value').text = str(ft2m)
# GPS Error Model
channel = ET.SubElement(gps, 'channel', name = 'GPS Error Models')
axisList = ['lat_rad', 'long_rad', 'alt_m']
errMod = oFdm['Sensor']['Gps']['Pos']
for iAxis, axisName in enumerate(axisList):
sensor = ET.SubElement(channel, 'sensor', name = axisName)
ET.SubElement(sensor, 'input').text = 'sensor/gps/' + axisName.replace('_', '_true_')
ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(sensor, 'output').text = 'sensor/gps/' + axisName
axisList = ['vNorth_mps', 'vEast_mps', 'vDown_mps']
errMod = oFdm['Sensor']['Gps']['Vel']
for iAxis, axisName in enumerate(axisList):
sensor = ET.SubElement(channel, 'sensor', name = axisName)
ET.SubElement(sensor, 'input').text = 'sensor/gps/' + axisName.replace('_', '_true_')
ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(sensor, 'output').text = 'sensor/gps/' + axisName
return(gps)
#%%
def SensorPitot(oFdm):
pitot = ET.Element('system', name = 'Sensor - Pitot-Static Probe')
# Create time in us
function = ET.SubElement(pitot, 'function', name = 'sensor/pitot/time_us')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'simulation/sim-time-sec'
ET.SubElement(product, 'value').text = str(1e6)
# Airdata Static
function = ET.SubElement(pitot, 'function', name = 'sensor/pitot/presStatic_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'atmosphere/P-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
# Airdata Tip (Dynamic ~= Impact)
function = ET.SubElement(pitot, 'function', name = 'sensor/pitot/presTip_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
# Airdata Temperature
function = ET.SubElement(pitot, 'function', name = 'sensor/pitot/temp_true_C')
product = ET.SubElement(function, 'product')
summation = ET.SubElement(product, 'sum')
ET.SubElement(summation, 'property').text = 'atmosphere/T-R'
ET.SubElement(summation, 'value').text = str(-491.67)
ET.SubElement(product, 'value').text = str(5.0/9.0)
# Pitot Error Model
channel = ET.SubElement(pitot, 'channel', name = 'Pitot Error Models')
axisList = ['presStatic_Pa', 'presTip_Pa', 'temp_C']
errMod = oFdm['Sensor']['Gps']['Vel']
for iAxis, axisName in enumerate(axisList):
sensor = ET.SubElement(channel, 'sensor', name = axisName)
ET.SubElement(sensor, 'input').text = 'sensor/pitot/' + axisName.replace('_', '_true_')
ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(sensor, 'output').text = 'sensor/pitot/' + axisName
return(pitot)
#%%
def Sensor5Hole(oFdm):
fiveHole = ET.Element('system', name = 'Sensor - 5Hole Probe')
# Determine whether method #1 or method #2
if 'alphaK1' and 'betaK1' in oFdm['Sensor']['5Hole'].keys():
method = 1
elif 'alphaK2' and 'betaK2' in oFdm['Sensor']['5Hole'].keys():
method = 2
else:
print('5Hole Probe: Need either (alphaK1 and betaK1) or (alphaK2 and betaK2)')
# Create time in us
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/time_us')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'simulation/sim-time-sec'
ET.SubElement(product, 'value').text = str(1e6)
# Airdata Static
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presStatic_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'atmosphere/P-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
# Airdata Tip (Dynamic ~= Impact)
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presTip_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
# Airdata Temperature
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/temp_true_C')
product = ET.SubElement(function, 'product')
summation = ET.SubElement(product, 'sum')
ET.SubElement(summation, 'property').text = 'atmosphere/T-R'
ET.SubElement(summation, 'value').text = str(-491.67)
ET.SubElement(product, 'value').text = str(5.0/9.0)
# [Method 1]
if method == 1:
axisList = ['presStatic_Pa', 'presTip_Pa', 'presAlphaBot_Pa', 'presAlphaTop_Pa', 'presBetaRight_Pa', 'presBetaLeft_Pa', 'temp_C']
# Alpha Difference (presAlphaBot - presAlphaTop)
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presAlphaBot_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/alpha-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['alphaK1'][0])
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presAlphaTop_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/alpha-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['alphaK1'][1])
# [Method 2] Beta Difference (presBetaRight - presBetaLeft)
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presBetaRight_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/beta-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['betaK1'][0])
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presBetaLeft_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/beta-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['betaK1'][1])
# [Method 2]
elif method == 2:
axisList = ['presStatic_Pa', 'presTip_Pa', 'presAlpha_Pa', 'presBeta_Pa', 'temp_C']
# Alpha Difference (presAlphaBot - presAlphaTop)
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presAlpha_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/alpha-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['alphaK2'])
# [Method 2] Beta Difference (presBetaRight - presBetaLeft)
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presBeta_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/beta-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['betaK2'])
# 5Hole Error Model
channel = ET.SubElement(fiveHole, 'channel', name = '5Hole Error Models')
errMod = oFdm['Sensor']['5Hole']
for iAxis, axisName in enumerate(axisList):
sensor = ET.SubElement(channel, 'sensor', name = axisName)
ET.SubElement(sensor, 'input').text = 'sensor/fiveHole/' + axisName.replace('_', '_true_')
ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(sensor, 'output').text = 'sensor/fiveHole/' + axisName
return(fiveHole)
| 43.124288 | 143 | 0.628451 |
import os.path
from xml.etree import ElementTree as ET
import numpy as np
ft2m = 0.3048
psf2pa = 47.88026
def SaveXml(elem, saveFile):
from xml.dom import minidom
uglyXml = ET.tostring(elem, 'utf-8')
prettyXml = minidom.parseString(uglyXml).toprettyxml(indent=' ', newl = '\r\n')
os.makedirs(os.path.dirname(saveFile), exist_ok=True)
with open(saveFile, 'w') as saveXML:
saveXML.write(prettyXml)
saveXML.close()
def Aircraft(oFdm, convertFdm2Jsb, saveJsbPath, aircraftName):
elemAircraft = ET.Element('fdm_config', version = '2.0', release = 'Alpha')
fcsFile = 'FlightControl.xml'
ET.SubElement(elemAircraft, 'flight_control', file = fcsFile)
SaveXml(FlightControl(oFdm), os.path.join(saveJsbPath, fcsFile))
effFile = 'Effectors.xml'
ET.SubElement(elemAircraft, 'system', file = effFile)
SaveXml(Effectors(oFdm), os.path.join(saveJsbPath, effFile))
massFile = 'Mass.xml'
ET.SubElement(elemAircraft, 'mass_balance', file = massFile)
SaveXml(MassBalance(oFdm), os.path.join(saveJsbPath, massFile))
gearFile = 'Gear.xml'
ET.SubElement(elemAircraft, 'ground_reactions', file = gearFile)
SaveXml(GroundReactions(oFdm), os.path.join(saveJsbPath, gearFile))
propFile = 'Propulsion.xml'
ET.SubElement(elemAircraft, 'propulsion', file = propFile)
SaveXml(Propulsion(oFdm), os.path.join(saveJsbPath, propFile))
metricsFile = 'Metrics.xml'
ET.SubElement(elemAircraft, 'metrics', file = metricsFile)
SaveXml(Metrics(oFdm), os.path.join(saveJsbPath, metricsFile))
aeroFile = 'Aero.xml'
ET.SubElement(elemAircraft, 'aerodynamics', file = aeroFile)
SaveXml(Aerodynamics(oFdm, convertFdm2Jsb), os.path.join(saveJsbPath, aeroFile))
if 'Winch' in oFdm.keys() :
winchFile = 'Winch.xml'
ET.SubElement(elemAircraft, 'external_reactions', file = winchFile)
SaveXml(Winch(oFdm), os.path.join(saveJsbPath, winchFile))
if 'Imu' in oFdm['Sensor'].keys() :
imuFile = 'SensorImu.xml'
ET.SubElement(elemAircraft, 'system', file = imuFile)
SaveXml(SensorImu(oFdm), os.path.join(saveJsbPath, imuFile))
if 'Gps' in oFdm['Sensor'].keys() :
gpsFile = 'SensorGps.xml'
ET.SubElement(elemAircraft, 'system', file = gpsFile)
SaveXml(SensorGps(oFdm), os.path.join(saveJsbPath, gpsFile))
if 'Pitot' in oFdm['Sensor'].keys() :
pitotFile = 'SensorPitot.xml'
ET.SubElement(elemAircraft, 'system', file = pitotFile)
SaveXml(SensorPitot(oFdm), os.path.join(saveJsbPath, pitotFile))
if '5Hole' in oFdm['Sensor'].keys() :
fiveHoleFile = 'Sensor5Hole.xml'
ET.SubElement(elemAircraft, 'system', file = fiveHoleFile)
SaveXml(Sensor5Hole(oFdm), os.path.join(saveJsbPath, fiveHoleFile))
saveFile = os.path.join(saveJsbPath, aircraftName + '.xml')
SaveXml(elemAircraft, saveFile)
return(elemAircraft)
def TableGen(elemParent, tableArray, tableSignals, tableBreakPts):
s = tableArray.shape
iAxisRemList = []
for iAxis in range(0, len(s)):
if s[iAxis] == 1:
iAxisRemList.append(iAxis)
if len(tableArray.shape)==3:
table = TableGen3D(elemParent, tableArray, tableSignals, tableBreakPts)
elif len(tableArray.shape)==2:
table = TableGen2D(elemParent, tableArray, tableSignals, tableBreakPts)
elif (len(tableArray.shape)==1) & (tableArray.size > 1):
table = TableGen1D(elemParent, tableArray, tableSignals, tableBreakPts)
else:
table = ET.SubElement(elemParent, 'value').text = str(tableArray)
return table
def TableGen3D(elemParent, tableArray, tableSignals, tableBreakPts):
table = ET.SubElement(elemParent, 'table')
ET.SubElement(table, 'independentVar', lookup = 'row').text = tableSignals[0]
ET.SubElement(table, 'independentVar', lookup = 'column').text = tableSignals[1]
ET.SubElement(table, 'independentVar', lookup = 'table').text = tableSignals[2]
indentSpace = ' '*4
indentLvl = 4
numRows, numColumns, numTables = np.shape(tableArray)
columnHeader = indentSpace*(indentLvl)
for columnVal in tableBreakPts[1]:
columnHeader += ' '*6 + str(columnVal)
for iTable in range(0, numTables):
tableStr = ['\n' + columnHeader]
for iRow in range(0, numRows):
rowStr = str(tableArray[iRow, :, iTable]).replace('[','').replace(']','').replace('\n', '')
tableStr.append(indentLvl*indentSpace + str(tableBreakPts[0][iRow]) + indentSpace + rowStr)
tableStr = '\n'.join(tableStr) + '\n' + indentLvl*indentSpace
ET.SubElement(table, 'tableData', breakPoint = str(tableBreakPts[2][iTable])).text = tableStr
return table
def TableGen2D(elemParent, tableArray, tableSignals, tableBreakPts):
table = ET.SubElement(elemParent, 'table')
ET.SubElement(table, 'independentVar', lookup = 'row').text = tableSignals[0]
ET.SubElement(table, 'independentVar', lookup = 'column').text = tableSignals[1]
indentSpace = ' '*4
indentLvl = 4
tableArray = tableArray.transpose()
numRows, numColumns = np.shape(tableArray)
columnHeader = indentSpace*(indentLvl)
for columnVal in tableBreakPts[1]:
columnHeader += ' '*6 + str(columnVal)
tableStr = ['\n' + columnHeader]
for iRow in range(0, numRows):
rowStr = str(tableArray[iRow]).replace('[','').replace(']','').replace('\n', '')
tableStr.append(indentLvl*indentSpace + str(tableBreakPts[0][iRow]) + indentSpace + rowStr)
tableStr = '\n'.join(tableStr) + '\n' + indentLvl*indentSpace
ET.SubElement(table, 'tableData').text = tableStr
return table
def TableGen1D(elemParent, tableArray, tableSignals, tableBreakPts):
table = ET.SubElement(elemParent, 'table')
ET.SubElement(table, 'independentVar', lookup = 'row').text = tableSignals
indentSpace = ' '*4
indentLvl = 4
numRows = np.shape(tableArray)[0]
tableStr = ['\n']
for iRow in range(0, numRows):
rowStr = str(tableArray[iRow]).replace('[','').replace(']','').replace('\n', '')
tableStr.append(indentLvl*indentSpace + str(tableBreakPts[iRow]) + indentSpace + rowStr)
tableStr = '\n'.join(tableStr) + '\n' + indentLvl*indentSpace
ET.SubElement(table, 'tableData').text = tableStr
return table
def MassBalance(oFdm):
mass_balance = ET.Element('mass_balance')
ET.SubElement(mass_balance, 'emptywt', unit = 'KG').text = str(oFdm['MassProp']['mass_kg'])
location = ET.SubElement(mass_balance, 'location', name = 'CG', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['MassProp']['rCG_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['MassProp']['rCG_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['MassProp']['rCG_S_m'][2])
ET.SubElement(mass_balance, 'ixx', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][0,0])
ET.SubElement(mass_balance, 'iyy', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][1,1])
ET.SubElement(mass_balance, 'izz', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][2,2])
ET.SubElement(mass_balance, 'ixy', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][0,1])
ET.SubElement(mass_balance, 'ixz', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][0,2])
ET.SubElement(mass_balance, 'iyz', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][1,2])
return(mass_balance)
def GroundReactions(oFdm):
ground_reactions = ET.Element('ground_reactions')
for gear in oFdm['Gear'].keys():
contact = ET.SubElement(ground_reactions, 'contact', type = 'BOGEY', name = gear)
location = ET.SubElement(contact, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['Gear'][gear]['rGear_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['Gear'][gear]['rGear_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['Gear'][gear]['rGear_S_m'][2])
ET.SubElement(contact, 'static_friction').text = str(oFdm['Gear'][gear]['FricStatic'])
ET.SubElement(contact, 'dynamic_friction').text = str(oFdm['Gear'][gear]['FricDynamic'])
ET.SubElement(contact, 'rolling_friction').text = str(oFdm['Gear'][gear]['FricRoll'])
ET.SubElement(contact, 'spring_coeff', unit = 'N/M').text = str(oFdm['Gear'][gear]['kSpring_Npm'])
ET.SubElement(contact, 'damping_coeff', unit = 'N/M/SEC').text = str(oFdm['Gear'][gear]['dampSpring_Nspm'])
ET.SubElement(contact, 'max_steer', unit = 'DEG').text = '0.0'
return(ground_reactions)
def Metrics(oFdm):
metrics = ET.Element('metrics')
ET.SubElement(metrics, 'wingarea', unit = 'M2').text = str(oFdm['Aero']['Ref']['S_m2'])
ET.SubElement(metrics, 'wingspan', unit = 'M').text = str(oFdm['Aero']['Ref']['b_m'])
ET.SubElement(metrics, 'chord', unit = 'M').text = str(oFdm['Aero']['Ref']['cBar_m'])
location = ET.SubElement(metrics, 'location', name = 'AERORP', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['Aero']['Ref']['rAero_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['Aero']['Ref']['rAero_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['Aero']['Ref']['rAero_S_m'][2])
location = ET.SubElement(metrics, 'location', name = 'EYEPOINT', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['Aero']['Ref']['rAero_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['Aero']['Ref']['rAero_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['Aero']['Ref']['rAero_S_m'][2])
location = ET.SubElement(metrics, 'location', name = 'VRP', unit = 'M')
ET.SubElement(location, 'x').text = '0.0'
ET.SubElement(location, 'y').text = '0.0'
ET.SubElement(location, 'z').text = '0.0'
return(metrics)
def Aerodynamics(oFdm, convertFdm2Jsb):
import copy
coefNamesFdm = convertFdm2Jsb['Coef']['oFdm']
depNamesFdm = convertFdm2Jsb['Dep']['oFdm']
depNamesJsb = convertFdm2Jsb['Dep']['jsb']
depScale = convertFdm2Jsb['Dep']['scale']
coefNamesFdm = convertFdm2Jsb['Coef']['oFdm']
indVarTable = convertFdm2Jsb['TableDef']['jsb']
breakPtsTable = convertFdm2Jsb['TableDef']['brkPts']
aeroTable = oFdm['Aero']['Coef']
e': None, 'type': 'force', 'deriv': 'dCL'}, \
'CD': {'axis': 'DRAG', 'scale': None, 'type': 'force', 'deriv': 'dCD'}, \
'CY': {'axis': 'SIDE', 'scale': None, 'type': 'force', 'deriv': 'dCY'}, \
'CMl': {'axis': 'ROLL', 'scale': 'metrics/bw-ft', 'type': 'moment', 'deriv': 'dCMl'}, \
'CMm': {'axis': 'PITCH', 'scale': 'metrics/cbarw-ft', 'type': 'moment', 'deriv': 'dCMm'}, \
'CMn': {'axis': 'YAW', 'scale': 'metrics/bw-ft', 'type': 'moment', 'deriv': 'dCMn'}}
aerodynamics = ET.Element('aerodynamics')
coefNames = coefTable.keys()
for iCoef, coef in enumerate(coefNames):
convertCoef = coefTable[coef]
for iDep, dep in enumerate(coefNamesFdm):
function = ET.SubElement(aerodynamics, 'function', name = str('aero/coefficient/' + coef + '__' + dep))
ET.SubElement(function, 'description').text = str(coef + '__' + dep)
tableArray = aeroTable[coef][dep]
tableSignals = indVarTable
tableBreakPts = breakPtsTable
table = TableGen(function, copy.deepcopy(tableArray), copy.deepcopy(tableSignals), copy.deepcopy(tableBreakPts))
deriv = convertCoef['deriv']
for iDep, dep in enumerate(depNamesFdm):
function = ET.SubElement(aerodynamics, 'function', name = str('aero/coefficient/' + deriv + '__' + dep))
ET.SubElement(function, 'description').text = str(deriv + '__' + dep)
tableArray = aeroTable[deriv][dep]
tableSignals = indVarTable
tableBreakPts = breakPtsTable
table = TableGen(function, copy.deepcopy(tableArray), copy.deepcopy(tableSignals), copy.deepcopy(tableBreakPts))
function = ET.SubElement(aerodynamics, 'function', name = str('aero/coefficient/' + coef + '__' + dep))
ET.SubElement(function, 'description').text = str(coef + '__' + dep + ' = ' + deriv + '__' + dep + ' * ' + dep)
#print(coef + '__' + dep + ' = ' + deriv + '__' + dep + ' * ' + dep)
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/coefficient/' + deriv + '__' + dep
#print(deriv + '__' + dep)
depSignal = depNamesJsb[iDep]
#print(depSignal)
if depSignal != None:
ET.SubElement(product, 'property').text = depSignal # Dependent Variable/Signal
scale = depScale[iDep]
if scale != None:
if isinstance(scale, str):
ET.SubElement(product, 'property').text = str(scale) # Dependent Variable Scaling
else:
ET.SubElement(product, 'value').text = str(scale) # Dependent Variable Scaling
# Sum the Coeficients
function = ET.SubElement(aerodynamics, 'function', name = str('aero/coefficient/' + coef))
ET.SubElement(function, 'description').text = str(coef + ' summation')
#print(coef + ' summation')
summation = ET.SubElement(function, 'sum')
for iDep, dep in enumerate(coefNamesFdm):
ET.SubElement(summation, 'property').text = 'aero/coefficient/' + coef + '__' + dep
#print(coef + '__' + dep)
for iDep, dep in enumerate(depNamesFdm):
ET.SubElement(summation, 'property').text = 'aero/coefficient/' + coef + '__' + dep
#print(coef + '__' + dep)
#
# Dimensionalize the Coefficients into Forces and Moments
for iCoef, coef in enumerate(coefNames):
convertCoef = coefTable[coef]
axis = ET.SubElement(aerodynamics, 'axis', name = convertCoef['axis'])
function = ET.SubElement(axis, 'function', name = str('aero/' + convertCoef['type'] + '/' + convertCoef['axis'] + '__' + coef))
ET.SubElement(function, 'description').text = str(convertCoef['axis'] + ' from ' + coef)
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/qbar-area' # qBar * sRef
if convertCoef['scale'] != None:
ET.SubElement(product, 'property').text = convertCoef['scale'] # Coefficient Scaling
ET.SubElement(product, 'property').text = 'aero/coefficient/' + coef
return(aerodynamics)
#%%
def Propulsion(oFdm):
propulsion = ET.Element('propulsion')
for key in oFdm['Prop'].keys():
prop = oFdm['Prop'][key]
# Motor/Engine
engine = ET.SubElement(propulsion, 'engine', file = prop['nameMotor'])
# location = ET.SubElement(engine, 'location', unit = 'M')
# ET.SubElement(location, 'x').text = str(prop['rMotor_S_m'][0])
# ET.SubElement(location, 'y').text = str(prop['rMotor_S_m'][1])
# ET.SubElement(location, 'z').text = str(prop['rMotor_S_m'][2])
# orient = ET.SubElement(engine, 'orient', unit = 'DEG')
# ET.SubElement(orient, 'roll').text = str(prop['sMotor_deg'][0])
# ET.SubElement(orient, 'pitch').text = str(prop['sMotor_deg'][1])
# ET.SubElement(orient, 'yaw').text = str(prop['sMotor_deg'][2])
# Thruster/Prop as an element of the Engine
thruster = ET.SubElement(engine, 'thruster', file = prop['nameProp'])
location = ET.SubElement(thruster, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(prop['rProp_S_m'][0])
ET.SubElement(location, 'y').text = str(prop['rProp_S_m'][1])
ET.SubElement(location, 'z').text = str(prop['rProp_S_m'][2])
orient = ET.SubElement(thruster, 'orient', unit = 'DEG')
ET.SubElement(orient, 'roll').text = str(prop['sProp_deg'][0])
ET.SubElement(orient, 'pitch').text = str(prop['sProp_deg'][1])
ET.SubElement(orient, 'yaw').text = str(prop['sProp_deg'][2])
ET.SubElement(thruster, 'sense').text = str(prop['sense']) # 1 = CW as viewed from cockpit, -1 = CCW
ET.SubElement(thruster, 'p_factor').text = str(prop['p_factor'])
return(propulsion)
#%% FCS
def FlightControl(oFdm):
# Define all the Pilot input definition
# Pilot Inputs, us the FG normalized sticks
fcsPilotDef = {}
fcsPilotDef['summer'] = {}
fcsPilotDef['gain'] = {}
fcsPilotDef['summer']['pilotRoll_norm'] = {}
fcsPilotDef['summer']['pilotRoll_norm']['inputList'] = ['fcs/aileron-cmd-norm', 'fcs/roll-trim-cmd-norm']
fcsPilotDef['summer']['pilotRoll_norm']['min'] = -1.0
fcsPilotDef['summer']['pilotRoll_norm']['max'] = 1.0
fcsPilotDef['gain']['cmdRoll_rps'] = {}
fcsPilotDef['gain']['cmdRoll_rps']['input'] = 'fcs/pilotRoll_norm'
fcsPilotDef['gain']['cmdRoll_rps']['gain'] = oFdm['FCS']['Pilot']['kRoll']
fcsPilotDef['summer']['pilotPitch_norm'] = {}
fcsPilotDef['summer']['pilotPitch_norm']['inputList'] = ['fcs/elevator-cmd-norm', 'fcs/pitch-trim-cmd-norm']
fcsPilotDef['summer']['pilotPitch_norm']['min'] = -1.0
fcsPilotDef['summer']['pilotPitch_norm']['max'] = 1.0
fcsPilotDef['gain']['cmdPitch_rps'] = {}
fcsPilotDef['gain']['cmdPitch_rps']['input'] = 'fcs/pilotPitch_norm'
fcsPilotDef['gain']['cmdPitch_rps']['gain'] = oFdm['FCS']['Pilot']['kPitch']
fcsPilotDef['summer']['pilotYaw_norm'] = {}
fcsPilotDef['summer']['pilotYaw_norm']['inputList'] = ['fcs/rudder-cmd-norm', 'fcs/yaw-trim-cmd-norm']
fcsPilotDef['summer']['pilotYaw_norm']['min'] = -1.0
fcsPilotDef['summer']['pilotYaw_norm']['max'] = 1.0
fcsPilotDef['gain']['cmdYaw_rps'] = {}
fcsPilotDef['gain']['cmdYaw_rps']['input'] = 'fcs/pilotYaw_norm'
fcsPilotDef['gain']['cmdYaw_rps']['gain'] = oFdm['FCS']['Pilot']['kYaw']
fcsPilotDef['summer']['pilotFlap_norm'] = {}
fcsPilotDef['summer']['pilotFlap_norm']['inputList'] = ['fcs/flap-cmd-norm']
fcsPilotDef['summer']['pilotFlap_norm']['min'] = -1.0
fcsPilotDef['summer']['pilotFlap_norm']['max'] = 1.0
fcsPilotDef['gain']['cmdFlap_rad'] = {}
fcsPilotDef['gain']['cmdFlap_rad']['input'] = 'fcs/pilotFlap_norm'
fcsPilotDef['gain']['cmdFlap_rad']['gain'] = oFdm['FCS']['Pilot']['kFlap']
# Create the JSB-ML
elemFCS = ET.Element('flight_control', name = 'Generic Flight Control')
pilot = ET.SubElement(elemFCS, 'channel', name = 'Pilot_Inputs')
for type in fcsPilotDef:
if type == 'summer':
for key in fcsPilotDef['summer'].keys():
entry = fcsPilotDef['summer'][key]
summer = ET.SubElement(pilot, 'summer', name = key)
for input in entry['inputList']:
ET.SubElement(summer, 'input').text = input
if ('min' in entry.keys()) or ('max' in entry.keys()):
clipto = ET.SubElement(summer, 'clipto')
if ('min' in entry.keys()): ET.SubElement(clipto, 'min').text = str(entry['min'])
if ('max' in entry.keys()): ET.SubElement(clipto, 'max').text = str(entry['max'])
ET.SubElement(summer, 'output').text = 'fcs/' + key
if type == 'gain':
for key in fcsPilotDef['gain'].keys():
entry = fcsPilotDef['gain'][key]
gain = ET.SubElement(pilot, 'pure_gain', name = key)
ET.SubElement(gain, 'input').text = entry['input']
ET.SubElement(gain, 'gain').text = str(entry['gain'])
if ('min' in entry.keys()) or ('max' in entry.keys()):
clipto = ET.SubElement(gain, 'clipto')
if ('min' in entry.keys()): ET.SubElement(clipto, 'min').text = str(entry['min'])
if ('max' in entry.keys()): ET.SubElement(clipto, 'max').text = str(entry['max'])
ET.SubElement(gain, 'output').text = 'fcs/' + key
# Control System Surface Mixer
mixer = ET.SubElement(elemFCS, 'channel', name = 'Control Mixer')
fcsMixerDef = oFdm['FCS']['Mixer']
for iSurf, surf in enumerate(fcsMixerDef['surfNames']):
cmdSurf = 'cmd' + surf + '_rad'
keyList = []
for iInput, input in enumerate(fcsMixerDef['inputs']):
val = fcsMixerDef['surfMix'][iSurf][iInput]
key = input + '_2_' + surf
if val != 0.0:
keyList.append(key)
gain = ET.SubElement(mixer, 'pure_gain', name = key.replace('fcs/',''))
ET.SubElement(gain, 'input').text = 'fcs/' + input
ET.SubElement(gain, 'gain').text = str(val)
ET.SubElement(gain, 'output').text = 'fcs/' + key
if any(keyList):
summer = ET.SubElement(mixer, 'summer', name = cmdSurf)
for key in keyList:
ET.SubElement(summer, 'input').text = 'fcs/' + key
ET.SubElement(summer, 'output').text = 'fcs/' + cmdSurf
# Inputs for External Commands, this just add property to create the node in the tree
for iSurf, surf in enumerate(fcsMixerDef['surfNames']):
cmdSurfExt = 'cmd' + surf + '_ext_rad'
prop = ET.SubElement(elemFCS, 'property').text = 'fcs/' + cmdSurfExt
name = 'Motor'
cmdMotorExt = 'cmd' + name + '_ext_nd'
motor = ET.SubElement(elemFCS, 'property').text = 'fcs/' + cmdMotorExt # Add the Motor external command
# Inputs for External Commands, this just add property to create the node in the tree
extern = ET.SubElement(elemFCS, 'channel', name = 'External Input Summations')
for iSurf, surf in enumerate(fcsMixerDef['surfNames']):
cmdSurf = 'cmd' + surf + '_rad'
cmdSurfExt = 'cmd' + surf + '_ext_rad'
summer = ET.SubElement(extern, 'summer')
ET.SubElement(summer, 'input').text = 'fcs/' + cmdSurf
ET.SubElement(summer, 'input').text = 'fcs/' + cmdSurfExt
ET.SubElement(summer, 'output').text = 'fcs/' + cmdSurf
name = 'Motor'
cmdMotor = 'cmd' + name + '_nd'
cmdMotorExt = 'cmd' + name + '_ext_nd'
summer = ET.SubElement(extern, 'summer')
ET.SubElement(summer, 'input').text = 'fcs/throttle-cmd-norm'
ET.SubElement(summer, 'input').text = 'fcs/' + cmdMotorExt
ET.SubElement(summer, 'output').text = 'fcs/throttle-pos-norm'
return(elemFCS)
#%% Effectors, for each surface define the 2nd order TF, and an 'actuator'
def Effectors(oFdm):
sysEffDef = oFdm['Act']
effectors = ET.Element('system', name = 'Effectors')
channel = ET.SubElement(effectors, 'channel', name = 'Actuator Models')
for surf in sysEffDef.keys():
cmdSurf = 'cmd' + surf + '_rad'
posSurf = 'pos' + surf + '_rad'
entry = sysEffDef[surf]
# Actuator - delay and freeplay
actuator = ET.SubElement(channel, 'actuator', name = 'act' + surf)
ET.SubElement(actuator, 'input').text = 'fcs/' + cmdSurf
ET.SubElement(actuator, 'lag').text = str(entry['lag_nd'])
ET.SubElement(actuator, 'hysteresis_width').text = str(entry['freeplay_rad'])
ET.SubElement(actuator, 'delay').text = str(entry['delay_s'])
if ('min' in entry.keys()) or ('max' in entry.keys()):
clipto = ET.SubElement(actuator, 'clipto')
if ('min' in entry.keys()): ET.SubElement(clipto, 'min').text = str(entry['min'])
if ('max' in entry.keys()): ET.SubElement(clipto, 'max').text = str(entry['max'])
ET.SubElement(actuator, 'output').text = 'fcs/' + posSurf
return(effectors)
#%%
def Winch(oFdm):
external_reactions = ET.Element('external_reactions')
# Winch
force = ET.SubElement(external_reactions, 'force', name='hitch' , frame = 'BODY', unit='N')
location = ET.SubElement(force, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['Winch']['rHook_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['Winch']['rHook_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['Winch']['rHook_S_m'][2])
direction = ET.SubElement(force, 'direction')
ET.SubElement(direction, 'x').text = str(oFdm['Winch']['sHook_deg'][0])
ET.SubElement(direction, 'y').text = str(oFdm['Winch']['sHook_deg'][1])
ET.SubElement(direction, 'z').text = str(oFdm['Winch']['sHook_deg'][2])
return(external_reactions)
#%% IMU
def SensorImu(oFdm):
imu = ET.Element('system', name = 'Sensor - IMU')
# Create time in us
function = ET.SubElement(imu, 'function', name = 'sensor/imu/time_us')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'simulation/sim-time-sec'
ET.SubElement(product, 'value').text = str(1e6)
# Accelerometers
if 'Accel' in oFdm['Sensor']['Imu'].keys() :
channel = ET.SubElement(imu, 'channel', name = 'Temp Accelerometers')
axisList = ['X', 'Y', 'Z']
for axisName in axisList:
accel = ET.SubElement(channel, 'accelerometer', name = 'Accel' + axisName)
ET.SubElement(accel, 'axis').text = axisName
location = ET.SubElement(accel, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['Sensor']['Imu']['Accel']['r_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['Sensor']['Imu']['Accel']['r_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['Sensor']['Imu']['Accel']['r_S_m'][2])
orientation = ET.SubElement(accel, 'orientation', unit='DEG')
ET.SubElement(orientation, 'roll').text = str(oFdm['Sensor']['Imu']['Accel']['s_deg'][0])
ET.SubElement(orientation, 'pitch').text = str(oFdm['Sensor']['Imu']['Accel']['s_deg'][1])
ET.SubElement(orientation, 'yaw').text = str(oFdm['Sensor']['Imu']['Accel']['s_deg'][2])
ET.SubElement(accel, 'output').text = 'sensor/imu/accel' + axisName + '_true_fps2'
# Convert Units Accelerometer to mps2
for axisName in axisList:
function = ET.SubElement(imu, 'function', name = 'sensor/imu/accel' + axisName + '_true_mps2')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'sensor/imu/accel' + axisName + '_true_fps2'
ET.SubElement(product, 'value').text = str(ft2m)
# Accelerometer Error Model
channel = ET.SubElement(imu, 'channel', name = 'Accelerometer Error Model')
errMod = oFdm['Sensor']['Imu']['Accel']
for iAxis, axisName in enumerate(axisList):
sensor = ET.SubElement(channel, 'sensor', name = 'Accel' + axisName)
ET.SubElement(sensor, 'input').text = 'sensor/imu/accel' + axisName + '_true_mps2'
ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(sensor, 'output').text = 'sensor/imu/accel' + axisName + '_mps2'
# Gyros
if 'Gyro' in oFdm['Sensor']['Imu'].keys() :
errMod = oFdm['Sensor']['Imu']['Gyro']
channel = ET.SubElement(imu, 'channel', name = 'Gyros')
for iAxis, axisName in enumerate(axisList):
gyro = ET.SubElement(channel, 'gyro', name = 'Gyro' + axisName)
ET.SubElement(gyro, 'axis').text = axisName
location = ET.SubElement(gyro, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(errMod['r_S_m'][0])
ET.SubElement(location, 'y').text = str(errMod['r_S_m'][1])
ET.SubElement(location, 'z').text = str(errMod['r_S_m'][2])
orientation = ET.SubElement(gyro, 'orientation', unit='DEG')
ET.SubElement(orientation, 'roll').text = str(errMod['s_deg'][0])
ET.SubElement(orientation, 'pitch').text = str(errMod['s_deg'][1])
ET.SubElement(orientation, 'yaw').text = str(errMod['s_deg'][2])
ET.SubElement(gyro, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(gyro, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(gyro, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(gyro, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(gyro, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(gyro, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(gyro, 'output').text = 'sensor/imu/gyro' + axisName + '_rps'
# Magnetometers
if 'Mag' in oFdm['Sensor']['Imu'].keys() :
errMod = oFdm['Sensor']['Imu']['Mag']
channel = ET.SubElement(imu, 'channel', name = 'Magnetometers')
for iAxis, axisName in enumerate(axisList):
mag = ET.SubElement(channel, 'magnetometer', name = 'Mag' + axisName)
ET.SubElement(mag, 'axis').text = axisName
location = ET.SubElement(mag, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(errMod['r_S_m'][0])
ET.SubElement(location, 'y').text = str(errMod['r_S_m'][1])
ET.SubElement(location, 'z').text = str(errMod['r_S_m'][2])
orientation = ET.SubElement(mag, 'orientation', unit='DEG')
ET.SubElement(orientation, 'roll').text = str(errMod['s_deg'][0])
ET.SubElement(orientation, 'pitch').text = str(errMod['s_deg'][1])
ET.SubElement(orientation, 'yaw').text = str(errMod['s_deg'][2])
ET.SubElement(mag, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(mag, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(mag, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(mag, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(mag, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(mag, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(mag, 'output').text = 'sensor/imu/mag' + axisName + '_nT'
# Magnetometer unit conversion
for axisName in axisList:
function = ET.SubElement(imu, 'function', name = 'sensor/imu/mag' + axisName + '_uT')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'sensor/imu/mag' + axisName + '_nT'
ET.SubElement(product, 'value').text = str(0.001)
return(imu)
#%% GPS
def SensorGps(oFdm):
gps = ET.Element('system', name = 'Sensor - GPS')
# Create time in us
function = ET.SubElement(gps, 'function', name = 'sensor/gps/time_us')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'simulation/sim-time-sec'
ET.SubElement(product, 'value').text = str(1e6)
# GPS Position
function = ET.SubElement(gps, 'function', name = 'sensor/gps/lat_true_rad')
ET.SubElement(function, 'property').text = 'position/lat-geod-rad'
function = ET.SubElement(gps, 'function', name = 'sensor/gps/long_true_rad')
ET.SubElement(function, 'property').text = 'position/long-gc-rad'
function = ET.SubElement(gps, 'function', name = 'sensor/gps/alt_true_m')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'position/h-sl-ft'
ET.SubElement(product, 'value').text = str(ft2m)
# GPS Velocity
function = ET.SubElement(gps, 'function', name = 'sensor/gps/vNorth_true_mps')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'velocities/v-north-fps'
ET.SubElement(product, 'value').text = str(ft2m)
function = ET.SubElement(gps, 'function', name = 'sensor/gps/vEast_true_mps')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'velocities/v-east-fps'
ET.SubElement(product, 'value').text = str(ft2m)
function = ET.SubElement(gps, 'function', name = 'sensor/gps/vDown_true_mps')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'velocities/v-down-fps'
ET.SubElement(product, 'value').text = str(ft2m)
# GPS Error Model
channel = ET.SubElement(gps, 'channel', name = 'GPS Error Models')
axisList = ['lat_rad', 'long_rad', 'alt_m']
errMod = oFdm['Sensor']['Gps']['Pos']
for iAxis, axisName in enumerate(axisList):
sensor = ET.SubElement(channel, 'sensor', name = axisName)
ET.SubElement(sensor, 'input').text = 'sensor/gps/' + axisName.replace('_', '_true_')
ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(sensor, 'output').text = 'sensor/gps/' + axisName
axisList = ['vNorth_mps', 'vEast_mps', 'vDown_mps']
errMod = oFdm['Sensor']['Gps']['Vel']
for iAxis, axisName in enumerate(axisList):
sensor = ET.SubElement(channel, 'sensor', name = axisName)
ET.SubElement(sensor, 'input').text = 'sensor/gps/' + axisName.replace('_', '_true_')
ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(sensor, 'output').text = 'sensor/gps/' + axisName
return(gps)
#%%
def SensorPitot(oFdm):
pitot = ET.Element('system', name = 'Sensor - Pitot-Static Probe')
# Create time in us
function = ET.SubElement(pitot, 'function', name = 'sensor/pitot/time_us')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'simulation/sim-time-sec'
ET.SubElement(product, 'value').text = str(1e6)
# Airdata Static
function = ET.SubElement(pitot, 'function', name = 'sensor/pitot/presStatic_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'atmosphere/P-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
# Airdata Tip (Dynamic ~= Impact)
function = ET.SubElement(pitot, 'function', name = 'sensor/pitot/presTip_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
# Airdata Temperature
function = ET.SubElement(pitot, 'function', name = 'sensor/pitot/temp_true_C')
product = ET.SubElement(function, 'product')
summation = ET.SubElement(product, 'sum')
ET.SubElement(summation, 'property').text = 'atmosphere/T-R'
ET.SubElement(summation, 'value').text = str(-491.67)
ET.SubElement(product, 'value').text = str(5.0/9.0)
# Pitot Error Model
channel = ET.SubElement(pitot, 'channel', name = 'Pitot Error Models')
axisList = ['presStatic_Pa', 'presTip_Pa', 'temp_C']
errMod = oFdm['Sensor']['Gps']['Vel']
for iAxis, axisName in enumerate(axisList):
sensor = ET.SubElement(channel, 'sensor', name = axisName)
ET.SubElement(sensor, 'input').text = 'sensor/pitot/' + axisName.replace('_', '_true_')
ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(sensor, 'output').text = 'sensor/pitot/' + axisName
return(pitot)
#%%
def Sensor5Hole(oFdm):
fiveHole = ET.Element('system', name = 'Sensor - 5Hole Probe')
# Determine whether method #1 or method #2
if 'alphaK1' and 'betaK1' in oFdm['Sensor']['5Hole'].keys():
method = 1
elif 'alphaK2' and 'betaK2' in oFdm['Sensor']['5Hole'].keys():
method = 2
else:
print('5Hole Probe: Need either (alphaK1 and betaK1) or (alphaK2 and betaK2)')
# Create time in us
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/time_us')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'simulation/sim-time-sec'
ET.SubElement(product, 'value').text = str(1e6)
# Airdata Static
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presStatic_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'atmosphere/P-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
# Airdata Tip (Dynamic ~= Impact)
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presTip_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
# Airdata Temperature
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/temp_true_C')
product = ET.SubElement(function, 'product')
summation = ET.SubElement(product, 'sum')
ET.SubElement(summation, 'property').text = 'atmosphere/T-R'
ET.SubElement(summation, 'value').text = str(-491.67)
ET.SubElement(product, 'value').text = str(5.0/9.0)
# [Method 1]
if method == 1:
axisList = ['presStatic_Pa', 'presTip_Pa', 'presAlphaBot_Pa', 'presAlphaTop_Pa', 'presBetaRight_Pa', 'presBetaLeft_Pa', 'temp_C']
# Alpha Difference (presAlphaBot - presAlphaTop)
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presAlphaBot_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/alpha-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['alphaK1'][0])
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presAlphaTop_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/alpha-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['alphaK1'][1])
# [Method 2] Beta Difference (presBetaRight - presBetaLeft)
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presBetaRight_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/beta-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['betaK1'][0])
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presBetaLeft_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/beta-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['betaK1'][1])
# [Method 2]
elif method == 2:
axisList = ['presStatic_Pa', 'presTip_Pa', 'presAlpha_Pa', 'presBeta_Pa', 'temp_C']
# Alpha Difference (presAlphaBot - presAlphaTop)
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presAlpha_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/alpha-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['alphaK2'])
# [Method 2] Beta Difference (presBetaRight - presBetaLeft)
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presBeta_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/beta-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['betaK2'])
# 5Hole Error Model
channel = ET.SubElement(fiveHole, 'channel', name = '5Hole Error Models')
errMod = oFdm['Sensor']['5Hole']
for iAxis, axisName in enumerate(axisList):
sensor = ET.SubElement(channel, 'sensor', name = axisName)
ET.SubElement(sensor, 'input').text = 'sensor/fiveHole/' + axisName.replace('_', '_true_')
ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(sensor, 'output').text = 'sensor/fiveHole/' + axisName
return(fiveHole)
| true | true |
f72986e13bc4533e79920b6bd0a416f26e97ee2f | 1,190 | py | Python | app.py | ajoss/tk-houdini-geometrynode | b732002c0014f78ff13bea2e86cbe23b890bbbf4 | [
"MIT"
] | 3 | 2019-04-17T12:39:20.000Z | 2019-11-04T07:25:59.000Z | app.py | ajoss/tk-houdini-geometrynode | b732002c0014f78ff13bea2e86cbe23b890bbbf4 | [
"MIT"
] | null | null | null | app.py | ajoss/tk-houdini-geometrynode | b732002c0014f78ff13bea2e86cbe23b890bbbf4 | [
"MIT"
] | 5 | 2018-09-19T08:13:14.000Z | 2020-02-15T14:50:01.000Z | # Copyright (c) 2015 Pixomondo
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the MIT License included in this
# distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the MIT License. All rights
# not expressly granted therein are reserved by Pixomondo.
"""
Geometry Output App for Houdini
"""
import sgtk
class GeometryOutputNode(sgtk.platform.Application):
def init_app(self):
module = self.import_module("tk_houdini_geometrynode")
self.handler = module.ToolkitGeometryNodeHandler(self)
def convert_to_geometry_nodes(self):
"""
Convert all Shotgun Geometry nodes found in the current Script to regular
Geometry nodes. Additional toolkit information will be stored in
user data named 'tk_*'
"""
self.handler.convert_sg_to_geometry_nodes()
def convert_from_geometry_nodes(self):
"""
Convert all regular Geometry nodes that have previously been converted
from Shotgun Geometry nodes, back into Shotgun Geometry nodes.
"""
self.handler.convert_geometry_to_sg_nodes()
| 32.162162 | 81 | 0.721008 |
import sgtk
class GeometryOutputNode(sgtk.platform.Application):
def init_app(self):
module = self.import_module("tk_houdini_geometrynode")
self.handler = module.ToolkitGeometryNodeHandler(self)
def convert_to_geometry_nodes(self):
self.handler.convert_sg_to_geometry_nodes()
def convert_from_geometry_nodes(self):
self.handler.convert_geometry_to_sg_nodes()
| true | true |
f729876b03555c0a2ee2deffa22d9c2809f617fd | 5,342 | py | Python | urbanairship/push/schedule.py | urbanairship/python-library | f59d7140c16db7aec48e8ebbaf26f31e7f02ab26 | [
"Apache-2.0"
] | 26 | 2015-01-05T21:08:07.000Z | 2021-05-13T07:27:19.000Z | urbanairship/push/schedule.py | urbanairship/python-library | f59d7140c16db7aec48e8ebbaf26f31e7f02ab26 | [
"Apache-2.0"
] | 32 | 2015-01-08T23:46:36.000Z | 2022-02-02T18:17:58.000Z | urbanairship/push/schedule.py | urbanairship/python-library | f59d7140c16db7aec48e8ebbaf26f31e7f02ab26 | [
"Apache-2.0"
] | 33 | 2015-01-21T08:02:40.000Z | 2022-03-25T06:02:04.000Z | from datetime import datetime
from urbanairship import common
from urbanairship.push import ScheduledPush
VALID_DAYS = [
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday",
]
VALID_RECURRING_TYPES = ["hourly", "daily", "weekly", "monthly", "yearly"]
class ScheduledList(common.IteratorParent):
"""
Iterator for listing all scheduled messages.
:ivar limit: Number of entries to fetch in a paginated request.
:returns Each ``next`` returns a :py:class:`ScheduledPush` object.
"""
next_url = None
data_attribute = "schedules"
id_key = "url"
instance_class = ScheduledPush
def __init__(self, airship, limit=None):
self.next_url = airship.urls.get("schedules_url")
params = {"limit": limit} if limit else {}
super(ScheduledList, self).__init__(airship, params)
def scheduled_time(timestamp):
"""Specify a time for the delivery of this push.
:param timestamp: A ``datetime.datetime`` object.
"""
return {"scheduled_time": timestamp.strftime("%Y-%m-%dT%H:%M:%S")}
def local_scheduled_time(timestamp):
"""Specify a time for the delivery of this push in device local time.
:param timestamp: A ``datetime.datetime`` object.
"""
return {"local_scheduled_time": timestamp.strftime("%Y-%m-%dT%H:%M:%S")}
def best_time(timestamp):
"""Specify a date to send the push at the best time per-device.
Only YYYY_MM_DD are needed. Hour/minute/second information is discarded.
:param timestamp: A ``datetime.datetime object.
"""
return {"best_time": {"send_date": timestamp.strftime("%Y-%m-%d")}}
def schedule_exclusion(
start_hour=None, end_hour=None, start_date=None, end_date=None, days_of_week=None
):
"""
Date-time ranges when messages are not sent.
at least one of start_hour and end_hour, start_date and end_date, or days_of_week
must be included. All dates and times are inclusive.
:param start_hour: Optional. An integer 0-23 representing the UTC hour to start
exclusion.
:param end_hour: Optional. An integer 0-23 representing the UTC hour to stop
exclusion. Must be included if start_hour is used.
:param start_date: Optional. A datetime.datetime object representing the UTC date
to start exclusion. Hour/minute/seconds will be excluded.
:param start_date: Optional. A datetime.datetime object representing the UTC date
to stop exclusion. Hour/minute/seconds will be excluded. Must be included if
start_date is used.
:param days_of_week: Optional. A list of the days of the week to exclude on.
Possible values: monday, tuesday, wednesday, thursday, friday, saturday, sunday
"""
exclusion = {}
if all([(start_hour < 24), (end_hour < 24)]):
exclusion["hour_range"] = "{}-{}".format(start_hour, end_hour)
else:
raise ValueError("start_date and end_date must be datetime.datetime")
if all([(type(start_date) == datetime), (type(end_date) == datetime)]):
exclusion["date_range"] = "{}/{}".format(
start_date.strftime("%Y-%m-%dT%H:%M:%S"),
end_date.strftime("%Y-%m-%dT%H:%M:%S"),
)
else:
raise ValueError("start_date and end_date must be datetime.datetime")
if days_of_week:
for day in days_of_week:
if day not in VALID_DAYS:
raise ValueError("days_of_week must be {}".format(VALID_DAYS))
exclusion["days_of_week"] = days_of_week
return exclusion
def recurring_schedule(
count, type, end_time=None, days_of_week=None, exclusions=None, paused=False
):
"""
Sets the cadence, end time, and excluded times for a recurring scheduled
message.
:param count: Required. The frequency of messaging corresponding to the type.
For example, a count of 2 results in a message every 2 hours, days, weeks,
months, etc based on the type.
:param type: Required. The unit of measurement for the cadence. Possible
values: hourly, daily, monthly, yearly.
:param days_of_week: Required when type is weekly. The days of the week on which
Airship can send your message.
:param end_time: Optional. A datetime.datetime object representing when the
scheduled send will end and stop sending messages.
:param exclusions: Optional. A list of urbanaiship.schedule_exclusion defining
times in which Airship will not send your message.
:param paused: Optional. A boolean value respesnting the paused state of the
scheduled message.
"""
if days_of_week is not None:
for day in days_of_week:
if day not in VALID_DAYS:
raise ValueError("days of week can only include {}".format(VALID_DAYS))
if type not in VALID_RECURRING_TYPES:
raise ValueError("type must be one of {}".format(VALID_RECURRING_TYPES))
cadence = {"type": type, "count": count}
if type == "weekly":
cadence["days_of_week"] = days_of_week
recurring = {"cadence": cadence}
if end_time:
recurring["end_time"] = end_time.strftime("%Y-%m-%dT%H:%M:%S")
if exclusions:
recurring["exclusions"] = exclusions
if paused is not None:
recurring["paused"] = paused
return {"recurring": recurring}
| 34.24359 | 87 | 0.672595 | from datetime import datetime
from urbanairship import common
from urbanairship.push import ScheduledPush
VALID_DAYS = [
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday",
]
VALID_RECURRING_TYPES = ["hourly", "daily", "weekly", "monthly", "yearly"]
class ScheduledList(common.IteratorParent):
next_url = None
data_attribute = "schedules"
id_key = "url"
instance_class = ScheduledPush
def __init__(self, airship, limit=None):
self.next_url = airship.urls.get("schedules_url")
params = {"limit": limit} if limit else {}
super(ScheduledList, self).__init__(airship, params)
def scheduled_time(timestamp):
return {"scheduled_time": timestamp.strftime("%Y-%m-%dT%H:%M:%S")}
def local_scheduled_time(timestamp):
return {"local_scheduled_time": timestamp.strftime("%Y-%m-%dT%H:%M:%S")}
def best_time(timestamp):
return {"best_time": {"send_date": timestamp.strftime("%Y-%m-%d")}}
def schedule_exclusion(
start_hour=None, end_hour=None, start_date=None, end_date=None, days_of_week=None
):
exclusion = {}
if all([(start_hour < 24), (end_hour < 24)]):
exclusion["hour_range"] = "{}-{}".format(start_hour, end_hour)
else:
raise ValueError("start_date and end_date must be datetime.datetime")
if all([(type(start_date) == datetime), (type(end_date) == datetime)]):
exclusion["date_range"] = "{}/{}".format(
start_date.strftime("%Y-%m-%dT%H:%M:%S"),
end_date.strftime("%Y-%m-%dT%H:%M:%S"),
)
else:
raise ValueError("start_date and end_date must be datetime.datetime")
if days_of_week:
for day in days_of_week:
if day not in VALID_DAYS:
raise ValueError("days_of_week must be {}".format(VALID_DAYS))
exclusion["days_of_week"] = days_of_week
return exclusion
def recurring_schedule(
count, type, end_time=None, days_of_week=None, exclusions=None, paused=False
):
if days_of_week is not None:
for day in days_of_week:
if day not in VALID_DAYS:
raise ValueError("days of week can only include {}".format(VALID_DAYS))
if type not in VALID_RECURRING_TYPES:
raise ValueError("type must be one of {}".format(VALID_RECURRING_TYPES))
cadence = {"type": type, "count": count}
if type == "weekly":
cadence["days_of_week"] = days_of_week
recurring = {"cadence": cadence}
if end_time:
recurring["end_time"] = end_time.strftime("%Y-%m-%dT%H:%M:%S")
if exclusions:
recurring["exclusions"] = exclusions
if paused is not None:
recurring["paused"] = paused
return {"recurring": recurring}
| true | true |
f72987cba217a15bd066d0afb902992a53d03c44 | 344 | py | Python | app/forms.py | LoisaKitakaya/The-Blog | 6f2abd70e7d65de938f162b6219c468e5736a1da | [
"MIT"
] | null | null | null | app/forms.py | LoisaKitakaya/The-Blog | 6f2abd70e7d65de938f162b6219c468e5736a1da | [
"MIT"
] | null | null | null | app/forms.py | LoisaKitakaya/The-Blog | 6f2abd70e7d65de938f162b6219c468e5736a1da | [
"MIT"
] | null | null | null | from django import forms
from .models import *
# create your forms
class PostArticle(forms.ModelForm):
class Meta:
model = Article
fieldS = '__all__'
exclude = ['article_author', 'slug', 'posted_on']
class PostComment(forms.ModelForm):
class Meta:
model = Comment
fields = ['comment'] | 17.2 | 57 | 0.622093 | from django import forms
from .models import *
class PostArticle(forms.ModelForm):
class Meta:
model = Article
fieldS = '__all__'
exclude = ['article_author', 'slug', 'posted_on']
class PostComment(forms.ModelForm):
class Meta:
model = Comment
fields = ['comment'] | true | true |
f72987f95fdc25137b7bca9a81ffdf51642c3e8f | 3,178 | py | Python | django_three/basicforms/basicforms/settings.py | NNDEV1/DjangoStuff | d8b850dce41a18c807a412f7644abae80f3d7c8e | [
"MIT"
] | 1 | 2021-08-14T14:48:37.000Z | 2021-08-14T14:48:37.000Z | django_three/basicforms/basicforms/settings.py | NNDEV1/DjangoStuff | d8b850dce41a18c807a412f7644abae80f3d7c8e | [
"MIT"
] | null | null | null | django_three/basicforms/basicforms/settings.py | NNDEV1/DjangoStuff | d8b850dce41a18c807a412f7644abae80f3d7c8e | [
"MIT"
] | null | null | null | """
Django settings for basicforms project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r7^%rvbmkoi%v%def+cf3s1a#&7d5w_731ctf0r$3q92hl=1p%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'basicapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'basicforms.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'basicforms.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| 25.837398 | 91 | 0.699182 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
SECRET_KEY = 'r7^%rvbmkoi%v%def+cf3s1a#&7d5w_731ctf0r$3q92hl=1p%'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'basicapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'basicforms.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'basicforms.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| true | true |
f729888ae93196c5c5c069ec12d59f2077f3c962 | 3,467 | py | Python | azure/mgmt/network/v2017_09_01/models/application_gateway_url_path_map.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | azure/mgmt/network/v2017_09_01/models/application_gateway_url_path_map.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | azure/mgmt/network/v2017_09_01/models/application_gateway_url_path_map.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayUrlPathMap(SubResource):
"""UrlPathMaps give a url path to the backend mapping information for
PathBasedRouting.
:param id: Resource ID.
:type id: str
:param default_backend_address_pool: Default backend address pool resource
of URL path map.
:type default_backend_address_pool:
~azure.mgmt.network.v2017_09_01.models.SubResource
:param default_backend_http_settings: Default backend http settings
resource of URL path map.
:type default_backend_http_settings:
~azure.mgmt.network.v2017_09_01.models.SubResource
:param default_redirect_configuration: Default redirect configuration
resource of URL path map.
:type default_redirect_configuration:
~azure.mgmt.network.v2017_09_01.models.SubResource
:param path_rules: Path rule of URL path map resource.
:type path_rules:
list[~azure.mgmt.network.v2017_09_01.models.ApplicationGatewayPathRule]
:param provisioning_state: Provisioning state of the backend http settings
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'default_backend_address_pool': {'key': 'properties.defaultBackendAddressPool', 'type': 'SubResource'},
'default_backend_http_settings': {'key': 'properties.defaultBackendHttpSettings', 'type': 'SubResource'},
'default_redirect_configuration': {'key': 'properties.defaultRedirectConfiguration', 'type': 'SubResource'},
'path_rules': {'key': 'properties.pathRules', 'type': '[ApplicationGatewayPathRule]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, id=None, default_backend_address_pool=None, default_backend_http_settings=None, default_redirect_configuration=None, path_rules=None, provisioning_state=None, name=None, etag=None, type=None):
super(ApplicationGatewayUrlPathMap, self).__init__(id=id)
self.default_backend_address_pool = default_backend_address_pool
self.default_backend_http_settings = default_backend_http_settings
self.default_redirect_configuration = default_redirect_configuration
self.path_rules = path_rules
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
| 48.830986 | 216 | 0.674358 |
from .sub_resource import SubResource
class ApplicationGatewayUrlPathMap(SubResource):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'default_backend_address_pool': {'key': 'properties.defaultBackendAddressPool', 'type': 'SubResource'},
'default_backend_http_settings': {'key': 'properties.defaultBackendHttpSettings', 'type': 'SubResource'},
'default_redirect_configuration': {'key': 'properties.defaultRedirectConfiguration', 'type': 'SubResource'},
'path_rules': {'key': 'properties.pathRules', 'type': '[ApplicationGatewayPathRule]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, id=None, default_backend_address_pool=None, default_backend_http_settings=None, default_redirect_configuration=None, path_rules=None, provisioning_state=None, name=None, etag=None, type=None):
super(ApplicationGatewayUrlPathMap, self).__init__(id=id)
self.default_backend_address_pool = default_backend_address_pool
self.default_backend_http_settings = default_backend_http_settings
self.default_redirect_configuration = default_redirect_configuration
self.path_rules = path_rules
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
| true | true |
f729894909e1053453798c214ec730a6afb2059d | 1,694 | py | Python | ivi/agilent/agilentDSOX3034A.py | edupo/python-ivi | 8105d8064503725dde781f0378d75db58defaecb | [
"MIT"
] | null | null | null | ivi/agilent/agilentDSOX3034A.py | edupo/python-ivi | 8105d8064503725dde781f0378d75db58defaecb | [
"MIT"
] | null | null | null | ivi/agilent/agilentDSOX3034A.py | edupo/python-ivi | 8105d8064503725dde781f0378d75db58defaecb | [
"MIT"
] | null | null | null | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent3000A import *
class agilentDSOX3034A(agilent3000A):
"Agilent InfiniiVision DSOX3034A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DSO-X 3034A')
super(agilentDSOX3034A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 350e6
self._init_channels()
| 37.644444 | 86 | 0.755018 |
from .agilent3000A import *
class agilentDSOX3034A(agilent3000A):
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DSO-X 3034A')
super(agilentDSOX3034A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 350e6
self._init_channels()
| true | true |
f72989e816d65be550aa2241f0ca3cd63ca452f2 | 494 | py | Python | test.py | JiamingBai/InteractiveDataScience.github.io | c43b7bda50356a4b16b6804c3eb8bcfe33ce6b13 | [
"MIT"
] | null | null | null | test.py | JiamingBai/InteractiveDataScience.github.io | c43b7bda50356a4b16b6804c3eb8bcfe33ce6b13 | [
"MIT"
] | null | null | null | test.py | JiamingBai/InteractiveDataScience.github.io | c43b7bda50356a4b16b6804c3eb8bcfe33ce6b13 | [
"MIT"
] | null | null | null | import sys
import json
import cgi
fs = cgi.FieldStorage()
sys.stdout.write("Content-Type: application/json")
sys.stdout.write("\n")
sys.stdout.write("\n")
result = {}
result['success'] = True
result['message'] = "The command Completed Successfully"
result['keys'] = ",".join(fs.keys())
d = {}
for k in fs.keys():
d[k] = fs.getvalue(k)
result['data'] = d
sys.stdout.write(json.dumps(result,indent=1))
sys.stdout.write("\n")
sys.stdout.close()
print("python script return value")
| 15.4375 | 56 | 0.672065 | import sys
import json
import cgi
fs = cgi.FieldStorage()
sys.stdout.write("Content-Type: application/json")
sys.stdout.write("\n")
sys.stdout.write("\n")
result = {}
result['success'] = True
result['message'] = "The command Completed Successfully"
result['keys'] = ",".join(fs.keys())
d = {}
for k in fs.keys():
d[k] = fs.getvalue(k)
result['data'] = d
sys.stdout.write(json.dumps(result,indent=1))
sys.stdout.write("\n")
sys.stdout.close()
print("python script return value")
| true | true |
f72989fb32f8814dbb34e9d1777eb25d5a1ab37f | 4,279 | py | Python | bcbio/pipeline/merge.py | arvados/bcbio-nextgen | 2a5cfa8c3a1d540bb2f2e66f51835042195cbc87 | [
"MIT"
] | 3 | 2015-11-18T07:17:54.000Z | 2021-04-28T13:58:37.000Z | bcbio/pipeline/merge.py | yong27/bcbio-nextgen | 9320479d8f21677b61ed1274b4da23d569c686ae | [
"MIT"
] | null | null | null | bcbio/pipeline/merge.py | yong27/bcbio-nextgen | 9320479d8f21677b61ed1274b4da23d569c686ae | [
"MIT"
] | null | null | null | """Handle multiple samples present on a single flowcell
Merges samples located in multiple lanes on a flowcell. Unique sample names identify
items to combine within a group.
"""
import os
import shutil
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.pipeline import config_utils
from bcbio.provenance import do, system
def combine_fastq_files(in_files, work_dir, config):
if len(in_files) == 1:
return in_files[0]
else:
cur1, cur2 = in_files[0]
out1 = os.path.join(work_dir, os.path.basename(cur1))
out2 = os.path.join(work_dir, os.path.basename(cur2)) if cur2 else None
if not os.path.exists(out1):
with open(out1, "a") as out_handle:
for (cur1, _) in in_files:
with open(cur1) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
if out2 and not os.path.exists(out2):
with open(out2, "a") as out_handle:
for (_, cur2) in in_files:
with open(cur2) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
for f1, f2 in in_files:
utils.save_diskspace(f1, "fastq merged to %s" % out1, config)
if f2:
utils.save_diskspace(f2, "fastq merged to %s" % out2, config)
return out1, out2
def merge_bam_files(bam_files, work_dir, config, out_file=None, batch=None):
"""Merge multiple BAM files from a sample into a single BAM for processing.
Checks system open file limit and merges in batches if necessary to avoid
file handle limits.
"""
if len(bam_files) == 1:
return bam_files[0]
else:
if out_file is None:
out_file = os.path.join(work_dir, os.path.basename(sorted(bam_files)[0]))
if batch is not None:
base, ext = os.path.splitext(out_file)
out_file = "%s-b%s%s" % (base, batch, ext)
if not utils.file_exists(out_file) or not utils.file_exists(out_file + ".bai"):
sambamba = config_utils.get_program("sambamba", config)
resources = config_utils.get_resources("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
2, "decrease").upper()
# sambamba opens 4 handles per file, so try to guess a reasonable batch size
batch_size = (system.open_file_limit() // 4) - 100
if len(bam_files) > batch_size:
bam_files = [merge_bam_files(xs, work_dir, config, out_file, i)
for i, xs in enumerate(utils.partition_all(batch_size, bam_files))]
with tx_tmpdir(config) as tmpdir:
with utils.chdir(tmpdir):
with file_transaction(config, out_file) as tx_out_file:
with file_transaction(config, "%s.list" % os.path.splitext(out_file)[0]) as tx_bam_file_list:
with open(tx_bam_file_list, "w") as out_handle:
for f in sorted(bam_files):
out_handle.write("%s\n" % f)
cmd = _sambamba_merge(bam_files)
do.run(cmd.format(**locals()), "Merge bam files to %s" % os.path.basename(out_file),
None)
for b in bam_files:
utils.save_diskspace(b, "BAM merged to %s" % out_file, config)
bam.index(out_file, config)
return out_file
def _sambamba_merge(bam_files):
"""Merge multiple BAM files with sambamba.
"""
if len(bam_files) > system.open_file_limit():
raise IOError("More files to merge (%s) than available open file descriptors (%s)\n"
"See documentation on tips for changing file limits:\n"
"https://bcbio-nextgen.readthedocs.org/en/latest/contents/"
"parallel.html#tuning-systems-for-scale"
% (len(bam_files), system.open_file_limit()))
return "{sambamba} merge {tx_out_file} -t {num_cores} `cat {tx_bam_file_list}`"
| 49.183908 | 117 | 0.592662 | import os
import shutil
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.pipeline import config_utils
from bcbio.provenance import do, system
def combine_fastq_files(in_files, work_dir, config):
if len(in_files) == 1:
return in_files[0]
else:
cur1, cur2 = in_files[0]
out1 = os.path.join(work_dir, os.path.basename(cur1))
out2 = os.path.join(work_dir, os.path.basename(cur2)) if cur2 else None
if not os.path.exists(out1):
with open(out1, "a") as out_handle:
for (cur1, _) in in_files:
with open(cur1) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
if out2 and not os.path.exists(out2):
with open(out2, "a") as out_handle:
for (_, cur2) in in_files:
with open(cur2) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
for f1, f2 in in_files:
utils.save_diskspace(f1, "fastq merged to %s" % out1, config)
if f2:
utils.save_diskspace(f2, "fastq merged to %s" % out2, config)
return out1, out2
def merge_bam_files(bam_files, work_dir, config, out_file=None, batch=None):
if len(bam_files) == 1:
return bam_files[0]
else:
if out_file is None:
out_file = os.path.join(work_dir, os.path.basename(sorted(bam_files)[0]))
if batch is not None:
base, ext = os.path.splitext(out_file)
out_file = "%s-b%s%s" % (base, batch, ext)
if not utils.file_exists(out_file) or not utils.file_exists(out_file + ".bai"):
sambamba = config_utils.get_program("sambamba", config)
resources = config_utils.get_resources("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
2, "decrease").upper()
batch_size = (system.open_file_limit() // 4) - 100
if len(bam_files) > batch_size:
bam_files = [merge_bam_files(xs, work_dir, config, out_file, i)
for i, xs in enumerate(utils.partition_all(batch_size, bam_files))]
with tx_tmpdir(config) as tmpdir:
with utils.chdir(tmpdir):
with file_transaction(config, out_file) as tx_out_file:
with file_transaction(config, "%s.list" % os.path.splitext(out_file)[0]) as tx_bam_file_list:
with open(tx_bam_file_list, "w") as out_handle:
for f in sorted(bam_files):
out_handle.write("%s\n" % f)
cmd = _sambamba_merge(bam_files)
do.run(cmd.format(**locals()), "Merge bam files to %s" % os.path.basename(out_file),
None)
for b in bam_files:
utils.save_diskspace(b, "BAM merged to %s" % out_file, config)
bam.index(out_file, config)
return out_file
def _sambamba_merge(bam_files):
if len(bam_files) > system.open_file_limit():
raise IOError("More files to merge (%s) than available open file descriptors (%s)\n"
"See documentation on tips for changing file limits:\n"
"https://bcbio-nextgen.readthedocs.org/en/latest/contents/"
"parallel.html#tuning-systems-for-scale"
% (len(bam_files), system.open_file_limit()))
return "{sambamba} merge {tx_out_file} -t {num_cores} `cat {tx_bam_file_list}`"
| true | true |
f7298bd1ea920b8cff4d35c6845a6988a31d944d | 253 | py | Python | wave/transformold/transform.py | jedhsu/wave | a05d8f4b0a96722bdc2f5a514646c7a44681982b | [
"Apache-2.0"
] | null | null | null | wave/transformold/transform.py | jedhsu/wave | a05d8f4b0a96722bdc2f5a514646c7a44681982b | [
"Apache-2.0"
] | null | null | null | wave/transformold/transform.py | jedhsu/wave | a05d8f4b0a96722bdc2f5a514646c7a44681982b | [
"Apache-2.0"
] | null | null | null | # from abc import ABCMeta, abstractmethod
# from typing import Iterator
# class Transform(metaclass=ABCMeta):
# def __init__(self):
# pass
# @property
# @abstractmethod
# def waveform(self) -> Iterator[float]:
# pass
| 18.071429 | 44 | 0.636364 | true | true | |
f7298c3dae02b6d779bb5e0dde0e7ea1087b0d3e | 2,309 | py | Python | Module1/Day04/module1_day04_variables.py | puczilka/100DaysPython | 95f761ae506b4175e96b87a43f7177dc3597e586 | [
"MIT"
] | 23 | 2019-05-31T18:00:26.000Z | 2021-11-21T19:08:19.000Z | Module1/Day04/module1_day04_variables.py | btruck552/100DaysPython | 1e45a10387da6d4ebdf8aa5fe13843a4509c8b62 | [
"MIT"
] | null | null | null | Module1/Day04/module1_day04_variables.py | btruck552/100DaysPython | 1e45a10387da6d4ebdf8aa5fe13843a4509c8b62 | [
"MIT"
] | 42 | 2019-05-31T17:54:28.000Z | 2022-02-12T22:09:51.000Z | """
Author: CaptCorpMURICA
Project: 100DaysPython
File: module1_day04_variables.py
Creation Date: 6/2/2019, 8:55 AM
Description: Learn about using variables in python.
"""
# Variables need to start with a letter or an underscore. Numbers can be used in the variable name as long as it is not
# the first character. Additionally, python is case sensitive, so the same word can store multiple items as long as the
# casing differs.
greeting = "Hello"
_name = "General Kenobi."
Greeting = "There"
_bestLine_ep3_ = "You are a bold one."
# Using string concatenation:
print(greeting + " " + Greeting + "\n\t" + _name + " " + _bestLine_ep3_)
# Using string replacement:
print("{} {}\n\t{} {}".format(greeting, Greeting, _name, _bestLine_ep3_))
# Variables can also store numeric values.
released = 2005
# Using string concatenation:
print("Revenge of the Sith was released on May 4, " + str(released) + ".")
# Using string replacement:
print("Revenge of the Sith was released on May 4, {}.".format(released))
# Variables are commonly used in arithmetic operations.
a = 3
b = 4
c = (a ** 2 + b ** 2) ** .5
print("Pythagorean Theorem: a^2 + b^2 = c^2, so when a = {} and b = {}, then c = {}".format(a, b, c))
# You can test for contents in a variable. If the test results **True**, then the tested condition is in the variable.
# Otherwise, the test returns **False**.
film = "Revenge of the Sith"
print("Sith" in film)
print("sith" in film)
print("sith" in film.lower())
# Python variables get their type with the data that is stored. Unlike other programming languages, you do not declare a
# type for the variable. Additionally, the same variable can be overwritten with new data and a different type. This
# should be taken into account when creating python programs.
var = "Variables are mutable"
type(var)
var = 3
type(var)
var = 3.5
type(var)
# If the variable contains a numeric value, it can be converted to an integer type with the int() function.
var = int(var)
type(var)
# The variable can be converted to a string with the str() function regardless of the contents.
var = str(var)
type(var)
# If the variable contains a numeric value, it can be converted to an float type with the float() function.
var = float(var)
type(var)
var = True
type(var)
| 36.078125 | 120 | 0.706366 |
greeting = "Hello"
_name = "General Kenobi."
Greeting = "There"
_bestLine_ep3_ = "You are a bold one."
print(greeting + " " + Greeting + "\n\t" + _name + " " + _bestLine_ep3_)
print("{} {}\n\t{} {}".format(greeting, Greeting, _name, _bestLine_ep3_))
released = 2005
print("Revenge of the Sith was released on May 4, " + str(released) + ".")
print("Revenge of the Sith was released on May 4, {}.".format(released))
a = 3
b = 4
c = (a ** 2 + b ** 2) ** .5
print("Pythagorean Theorem: a^2 + b^2 = c^2, so when a = {} and b = {}, then c = {}".format(a, b, c))
film = "Revenge of the Sith"
print("Sith" in film)
print("sith" in film)
print("sith" in film.lower())
var = "Variables are mutable"
type(var)
var = 3
type(var)
var = 3.5
type(var)
var = int(var)
type(var)
var = str(var)
type(var)
var = float(var)
type(var)
var = True
type(var)
| true | true |
f7298cb548fa7c1de09bf4bf092c51b5d80b4b4f | 14,552 | py | Python | ansible/modules/cloud/amazon/lambda_event.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 | 2022-01-25T22:52:58.000Z | 2022-01-25T22:52:58.000Z | ansible/modules/cloud/amazon/lambda_event.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | ansible/modules/cloud/amazon/lambda_event.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | #!/usr/bin/python
# (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lambda_event
short_description: Creates, updates or deletes AWS Lambda function event mappings.
description:
- This module allows the management of AWS Lambda function event source mappings such as DynamoDB and Kinesis stream
events via the Ansible framework. These event source mappings are relevant only in the AWS Lambda pull model, where
AWS Lambda invokes the function.
It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda
function itself and M(lambda_alias) to manage function aliases.
version_added: "2.2"
author: Pierre Jodouin (@pjodouin), Ryan Brown (@ryansb)
options:
lambda_function_arn:
description:
- The name or ARN of the lambda function.
required: true
aliases: ['function_name', 'function_arn']
state:
description:
- Describes the desired state.
required: true
default: "present"
choices: ["present", "absent"]
alias:
description:
- Name of the function alias. Mutually exclusive with C(version).
required: true
version:
description:
- Version of the Lambda function. Mutually exclusive with C(alias).
required: false
event_source:
description:
- Source of the event that triggers the lambda function.
required: false
default: stream
choices: ['stream']
source_params:
description:
- Sub-parameters required for event source.
- I(== stream event source ==)
- C(source_arn) The Amazon Resource Name (ARN) of the Kinesis or DynamoDB stream that is the event source.
- C(enabled) Indicates whether AWS Lambda should begin polling the event source. Default is True.
- C(batch_size) The largest number of records that AWS Lambda will retrieve from your event source at the
time of invoking your function. Default is 100.
- C(starting_position) The position in the stream where AWS Lambda should start reading.
Choices are TRIM_HORIZON or LATEST.
required: true
requirements:
- boto3
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
---
# Example that creates a lambda event notification for a DynamoDB stream
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: DynamoDB stream event mapping
lambda_event:
state: "{{ state | default('present') }}"
event_source: stream
function_name: "{{ function_name }}"
alias: Dev
source_params:
source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457
enabled: True
batch_size: 100
starting_position: TRIM_HORIZON
- name: Show source event
debug:
var: lambda_stream_events
'''
RETURN = '''
---
lambda_stream_events:
description: list of dictionaries returned by the API describing stream event mappings
returned: success
type: list
'''
import re
import sys
try:
import boto3
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec,
get_aws_connection_info)
# ---------------------------------------------------------------------------------------------------
#
# Helper Functions & classes
#
# ---------------------------------------------------------------------------------------------------
class AWSConnection:
"""
Create the connection object and client objects as required.
"""
def __init__(self, ansible_obj, resources, use_boto3=True):
try:
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3)
self.resource_client = dict()
if not resources:
resources = ['lambda']
resources.append('iam')
for resource in resources:
aws_connect_kwargs.update(dict(region=self.region,
endpoint=self.endpoint,
conn_type='client',
resource=resource
))
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
# if region is not provided, then get default profile/session region
if not self.region:
self.region = self.resource_client['lambda'].meta.region_name
except (ClientError, ParamValidationError, MissingParametersError) as e:
ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
# set account ID
try:
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
except (ClientError, ValueError, KeyError, IndexError):
self.account_id = ''
def client(self, resource='lambda'):
return self.resource_client[resource]
def pc(key):
"""
Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
:param key:
:return:
"""
return "".join([token.capitalize() for token in key.split('_')])
def ordered_obj(obj):
"""
Order object for comparison purposes
:param obj:
:return:
"""
if isinstance(obj, dict):
return sorted((k, ordered_obj(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered_obj(x) for x in obj)
else:
return obj
def set_api_sub_params(params):
"""
Sets module sub-parameters to those expected by the boto3 API.
:param params:
:return:
"""
api_params = dict()
for param in params.keys():
param_value = params.get(param, None)
if param_value:
api_params[pc(param)] = param_value
return api_params
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module:
:param aws:
:return:
"""
function_name = module.params['lambda_function_arn']
# validate function name
if not re.search('^[\w\-:]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)
if len(function_name) > 64 and not function_name.startswith('arn:aws:lambda:'):
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
elif len(function_name) > 140 and function_name.startswith('arn:aws:lambda:'):
module.fail_json(msg='ARN "{0}" exceeds 140 character limit'.format(function_name))
# check if 'function_name' needs to be expanded in full ARN format
if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'):
function_name = module.params['lambda_function_arn']
module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name)
qualifier = get_qualifier(module)
if qualifier:
function_arn = module.params['lambda_function_arn']
module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier)
return
def get_qualifier(module):
"""
Returns the function qualifier as a version or alias or None.
:param module:
:return:
"""
qualifier = None
if module.params['version'] > 0:
qualifier = str(module.params['version'])
elif module.params['alias']:
qualifier = str(module.params['alias'])
return qualifier
# ---------------------------------------------------------------------------------------------------
#
# Lambda Event Handlers
#
# This section defines a lambda_event_X function where X is an AWS service capable of initiating
# the execution of a Lambda function (pull only).
#
# ---------------------------------------------------------------------------------------------------
def lambda_event_stream(module, aws):
"""
Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications.
:param module:
:param aws:
:return:
"""
client = aws.client('lambda')
facts = dict()
changed = False
current_state = 'absent'
state = module.params['state']
api_params = dict(FunctionName=module.params['lambda_function_arn'])
# check if required sub-parameters are present and valid
source_params = module.params['source_params']
source_arn = source_params.get('source_arn')
if source_arn:
api_params.update(EventSourceArn=source_arn)
else:
module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.")
# check if optional sub-parameters are valid, if present
batch_size = source_params.get('batch_size')
if batch_size:
try:
source_params['batch_size'] = int(batch_size)
except ValueError:
module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size']))
# optional boolean value needs special treatment as not present does not imply False
source_param_enabled = module.boolean(source_params.get('enabled', 'True'))
# check if event mapping exist
try:
facts = client.list_event_source_mappings(**api_params)['EventSourceMappings']
if facts:
current_state = 'present'
except ClientError as e:
module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e))
if state == 'present':
if current_state == 'absent':
starting_position = source_params.get('starting_position')
if starting_position:
api_params.update(StartingPosition=starting_position)
else:
module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.")
if source_arn:
api_params.update(Enabled=source_param_enabled)
if source_params.get('batch_size'):
api_params.update(BatchSize=source_params.get('batch_size'))
try:
if not module.check_mode:
facts = client.create_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e))
else:
# current_state is 'present'
api_params = dict(FunctionName=module.params['lambda_function_arn'])
current_mapping = facts[0]
api_params.update(UUID=current_mapping['UUID'])
mapping_changed = False
# check if anything changed
if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']:
api_params.update(BatchSize=source_params['batch_size'])
mapping_changed = True
if source_param_enabled is not None:
if source_param_enabled:
if current_mapping['State'] not in ('Enabled', 'Enabling'):
api_params.update(Enabled=True)
mapping_changed = True
else:
if current_mapping['State'] not in ('Disabled', 'Disabling'):
api_params.update(Enabled=False)
mapping_changed = True
if mapping_changed:
try:
if not module.check_mode:
facts = client.update_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e))
else:
if current_state == 'present':
# remove the stream event mapping
api_params = dict(UUID=facts[0]['UUID'])
try:
if not module.check_mode:
facts = client.delete_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e))
return camel_dict_to_snake_dict(dict(changed=changed, events=facts))
def main():
"""Produce a list of function suffixes which handle lambda events."""
this_module = sys.modules[__name__]
source_choices = ["stream"]
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
lambda_function_arn=dict(required=True, default=None, aliases=['function_name', 'function_arn']),
event_source=dict(required=False, default="stream", choices=source_choices),
source_params=dict(type='dict', required=True, default=None),
alias=dict(required=False, default=None),
version=dict(type='int', required=False, default=0),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['alias', 'version']],
required_together=[]
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['lambda'])
validate_params(module, aws)
this_module_function = getattr(this_module, 'lambda_event_{0}'.format(module.params['event_source'].lower()))
results = this_module_function(module, aws)
module.exit_json(**results)
if __name__ == '__main__':
main()
| 34.24 | 134 | 0.628848 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lambda_event
short_description: Creates, updates or deletes AWS Lambda function event mappings.
description:
- This module allows the management of AWS Lambda function event source mappings such as DynamoDB and Kinesis stream
events via the Ansible framework. These event source mappings are relevant only in the AWS Lambda pull model, where
AWS Lambda invokes the function.
It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda
function itself and M(lambda_alias) to manage function aliases.
version_added: "2.2"
author: Pierre Jodouin (@pjodouin), Ryan Brown (@ryansb)
options:
lambda_function_arn:
description:
- The name or ARN of the lambda function.
required: true
aliases: ['function_name', 'function_arn']
state:
description:
- Describes the desired state.
required: true
default: "present"
choices: ["present", "absent"]
alias:
description:
- Name of the function alias. Mutually exclusive with C(version).
required: true
version:
description:
- Version of the Lambda function. Mutually exclusive with C(alias).
required: false
event_source:
description:
- Source of the event that triggers the lambda function.
required: false
default: stream
choices: ['stream']
source_params:
description:
- Sub-parameters required for event source.
- I(== stream event source ==)
- C(source_arn) The Amazon Resource Name (ARN) of the Kinesis or DynamoDB stream that is the event source.
- C(enabled) Indicates whether AWS Lambda should begin polling the event source. Default is True.
- C(batch_size) The largest number of records that AWS Lambda will retrieve from your event source at the
time of invoking your function. Default is 100.
- C(starting_position) The position in the stream where AWS Lambda should start reading.
Choices are TRIM_HORIZON or LATEST.
required: true
requirements:
- boto3
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
---
# Example that creates a lambda event notification for a DynamoDB stream
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: DynamoDB stream event mapping
lambda_event:
state: "{{ state | default('present') }}"
event_source: stream
function_name: "{{ function_name }}"
alias: Dev
source_params:
source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457
enabled: True
batch_size: 100
starting_position: TRIM_HORIZON
- name: Show source event
debug:
var: lambda_stream_events
'''
RETURN = '''
---
lambda_stream_events:
description: list of dictionaries returned by the API describing stream event mappings
returned: success
type: list
'''
import re
import sys
try:
import boto3
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec,
get_aws_connection_info)
class AWSConnection:
def __init__(self, ansible_obj, resources, use_boto3=True):
try:
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3)
self.resource_client = dict()
if not resources:
resources = ['lambda']
resources.append('iam')
for resource in resources:
aws_connect_kwargs.update(dict(region=self.region,
endpoint=self.endpoint,
conn_type='client',
resource=resource
))
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
if not self.region:
self.region = self.resource_client['lambda'].meta.region_name
except (ClientError, ParamValidationError, MissingParametersError) as e:
ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
try:
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
except (ClientError, ValueError, KeyError, IndexError):
self.account_id = ''
def client(self, resource='lambda'):
return self.resource_client[resource]
def pc(key):
return "".join([token.capitalize() for token in key.split('_')])
def ordered_obj(obj):
if isinstance(obj, dict):
return sorted((k, ordered_obj(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered_obj(x) for x in obj)
else:
return obj
def set_api_sub_params(params):
api_params = dict()
for param in params.keys():
param_value = params.get(param, None)
if param_value:
api_params[pc(param)] = param_value
return api_params
def validate_params(module, aws):
function_name = module.params['lambda_function_arn']
if not re.search('^[\w\-:]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)
if len(function_name) > 64 and not function_name.startswith('arn:aws:lambda:'):
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
elif len(function_name) > 140 and function_name.startswith('arn:aws:lambda:'):
module.fail_json(msg='ARN "{0}" exceeds 140 character limit'.format(function_name))
if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'):
function_name = module.params['lambda_function_arn']
module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name)
qualifier = get_qualifier(module)
if qualifier:
function_arn = module.params['lambda_function_arn']
module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier)
return
def get_qualifier(module):
qualifier = None
if module.params['version'] > 0:
qualifier = str(module.params['version'])
elif module.params['alias']:
qualifier = str(module.params['alias'])
return qualifier
def lambda_event_stream(module, aws):
client = aws.client('lambda')
facts = dict()
changed = False
current_state = 'absent'
state = module.params['state']
api_params = dict(FunctionName=module.params['lambda_function_arn'])
source_params = module.params['source_params']
source_arn = source_params.get('source_arn')
if source_arn:
api_params.update(EventSourceArn=source_arn)
else:
module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.")
batch_size = source_params.get('batch_size')
if batch_size:
try:
source_params['batch_size'] = int(batch_size)
except ValueError:
module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size']))
source_param_enabled = module.boolean(source_params.get('enabled', 'True'))
try:
facts = client.list_event_source_mappings(**api_params)['EventSourceMappings']
if facts:
current_state = 'present'
except ClientError as e:
module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e))
if state == 'present':
if current_state == 'absent':
starting_position = source_params.get('starting_position')
if starting_position:
api_params.update(StartingPosition=starting_position)
else:
module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.")
if source_arn:
api_params.update(Enabled=source_param_enabled)
if source_params.get('batch_size'):
api_params.update(BatchSize=source_params.get('batch_size'))
try:
if not module.check_mode:
facts = client.create_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e))
else:
api_params = dict(FunctionName=module.params['lambda_function_arn'])
current_mapping = facts[0]
api_params.update(UUID=current_mapping['UUID'])
mapping_changed = False
if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']:
api_params.update(BatchSize=source_params['batch_size'])
mapping_changed = True
if source_param_enabled is not None:
if source_param_enabled:
if current_mapping['State'] not in ('Enabled', 'Enabling'):
api_params.update(Enabled=True)
mapping_changed = True
else:
if current_mapping['State'] not in ('Disabled', 'Disabling'):
api_params.update(Enabled=False)
mapping_changed = True
if mapping_changed:
try:
if not module.check_mode:
facts = client.update_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e))
else:
if current_state == 'present':
api_params = dict(UUID=facts[0]['UUID'])
try:
if not module.check_mode:
facts = client.delete_event_source_mapping(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e))
return camel_dict_to_snake_dict(dict(changed=changed, events=facts))
def main():
this_module = sys.modules[__name__]
source_choices = ["stream"]
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
lambda_function_arn=dict(required=True, default=None, aliases=['function_name', 'function_arn']),
event_source=dict(required=False, default="stream", choices=source_choices),
source_params=dict(type='dict', required=True, default=None),
alias=dict(required=False, default=None),
version=dict(type='int', required=False, default=0),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['alias', 'version']],
required_together=[]
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['lambda'])
validate_params(module, aws)
this_module_function = getattr(this_module, 'lambda_event_{0}'.format(module.params['event_source'].lower()))
results = this_module_function(module, aws)
module.exit_json(**results)
if __name__ == '__main__':
main()
| true | true |
f7298dd321960e183c2e94eec3d29c49fdb011bf | 1,181 | py | Python | setup.py | mishrasanskriti802/island-backup | 6dd0b45ac9cb87418e05ccfeb1150657d8a8964b | [
"MIT"
] | 17 | 2016-01-28T06:09:26.000Z | 2020-01-19T08:37:01.000Z | setup.py | mishrasanskriti802/island-backup | 6dd0b45ac9cb87418e05ccfeb1150657d8a8964b | [
"MIT"
] | 28 | 2015-12-15T05:08:28.000Z | 2017-04-20T02:34:27.000Z | setup.py | mishrasanskriti802/island-backup | 6dd0b45ac9cb87418e05ccfeb1150657d8a8964b | [
"MIT"
] | 7 | 2016-06-09T14:02:21.000Z | 2020-10-01T13:55:29.000Z | from setuptools import setup, find_packages
with open('README.rst', 'r', encoding='utf8') as f:
readme = f.read()
with open('requirements.txt','r',encoding='utf8') as f:
requirements = f.readlines()
version = __import__('island_backup').version
setup(
name='island_backup',
version=version,
description="backup 4chan.org h.nimingban and kukuku.cc",
long_description=readme,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3 :: Only',
'License :: OSI Approved :: MIT License',
],
url='https://github.com/littlezz/island-backup',
author='littlezz',
author_email='zz.at.field@gmail.com',
license='MIT',
packages=find_packages(exclude=['tests*',]),
install_requires=requirements,
tests_require=['pytest'],
# include_package_data=True,
package_data={
'island_backup': ['templates/*', 'templates/static/*']
},
zip_safe=False,
entry_points={
'console_scripts': [
'island_backup=island_backup.main:cli'
]
},
)
| 26.244444 | 62 | 0.629975 | from setuptools import setup, find_packages
with open('README.rst', 'r', encoding='utf8') as f:
readme = f.read()
with open('requirements.txt','r',encoding='utf8') as f:
requirements = f.readlines()
version = __import__('island_backup').version
setup(
name='island_backup',
version=version,
description="backup 4chan.org h.nimingban and kukuku.cc",
long_description=readme,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3 :: Only',
'License :: OSI Approved :: MIT License',
],
url='https://github.com/littlezz/island-backup',
author='littlezz',
author_email='zz.at.field@gmail.com',
license='MIT',
packages=find_packages(exclude=['tests*',]),
install_requires=requirements,
tests_require=['pytest'],
package_data={
'island_backup': ['templates/*', 'templates/static/*']
},
zip_safe=False,
entry_points={
'console_scripts': [
'island_backup=island_backup.main:cli'
]
},
)
| true | true |
f7298e2e8b7319492030a6159a8f000bb0cc80c2 | 9,886 | py | Python | sphinx/util/i18n.py | rdt12/sphinx | 830b3fbe2babcc8df33f767ce3a406b16c0cac1c | [
"BSD-2-Clause"
] | 2 | 2021-11-29T04:16:41.000Z | 2021-12-06T14:59:22.000Z | sphinx/util/i18n.py | Blendify/sphinx | dd00bade705c8cb661151aef9f7504c62cbb17ff | [
"BSD-2-Clause"
] | 1 | 2021-10-16T06:34:21.000Z | 2021-10-16T06:34:21.000Z | sphinx/util/i18n.py | Blendify/sphinx | dd00bade705c8cb661151aef9f7504c62cbb17ff | [
"BSD-2-Clause"
] | 1 | 2021-10-24T01:44:26.000Z | 2021-10-24T01:44:26.000Z | """
sphinx.util.i18n
~~~~~~~~~~~~~~~~
Builder superclass for all builders.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
from datetime import datetime, timezone
from os import path
from typing import TYPE_CHECKING, Callable, Generator, List, NamedTuple, Optional, Tuple, Union
import babel.dates
from babel.messages.mofile import write_mo
from babel.messages.pofile import read_po
from sphinx.errors import SphinxError
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util.osutil import SEP, canon_path, relpath
if TYPE_CHECKING:
from sphinx.environment import BuildEnvironment
logger = logging.getLogger(__name__)
class LocaleFileInfoBase(NamedTuple):
base_dir: str
domain: str
charset: str
class CatalogInfo(LocaleFileInfoBase):
@property
def po_file(self) -> str:
return self.domain + '.po'
@property
def mo_file(self) -> str:
return self.domain + '.mo'
@property
def po_path(self) -> str:
return path.join(self.base_dir, self.po_file)
@property
def mo_path(self) -> str:
return path.join(self.base_dir, self.mo_file)
def is_outdated(self) -> bool:
return (
not path.exists(self.mo_path) or
path.getmtime(self.mo_path) < path.getmtime(self.po_path))
def write_mo(self, locale: str) -> None:
with open(self.po_path, encoding=self.charset) as file_po:
try:
po = read_po(file_po, locale)
except Exception as exc:
logger.warning(__('reading error: %s, %s'), self.po_path, exc)
return
with open(self.mo_path, 'wb') as file_mo:
try:
write_mo(file_mo, po)
except Exception as exc:
logger.warning(__('writing error: %s, %s'), self.mo_path, exc)
class CatalogRepository:
"""A repository for message catalogs."""
def __init__(self, basedir: str, locale_dirs: List[str],
language: str, encoding: str) -> None:
self.basedir = basedir
self._locale_dirs = locale_dirs
self.language = language
self.encoding = encoding
@property
def locale_dirs(self) -> Generator[str, None, None]:
if not self.language:
return
for locale_dir in self._locale_dirs:
locale_dir = path.join(self.basedir, locale_dir)
locale_path = path.join(locale_dir, self.language, 'LC_MESSAGES')
if path.exists(locale_path):
yield locale_dir
else:
logger.verbose(__('locale_dir %s does not exists'), locale_path)
@property
def pofiles(self) -> Generator[Tuple[str, str], None, None]:
for locale_dir in self.locale_dirs:
basedir = path.join(locale_dir, self.language, 'LC_MESSAGES')
for root, dirnames, filenames in os.walk(basedir):
# skip dot-directories
for dirname in dirnames:
if dirname.startswith('.'):
dirnames.remove(dirname)
for filename in filenames:
if filename.endswith('.po'):
fullpath = path.join(root, filename)
yield basedir, relpath(fullpath, basedir)
@property
def catalogs(self) -> Generator[CatalogInfo, None, None]:
for basedir, filename in self.pofiles:
domain = canon_path(path.splitext(filename)[0])
yield CatalogInfo(basedir, domain, self.encoding)
def docname_to_domain(docname: str, compaction: Union[bool, str]) -> str:
"""Convert docname to domain for catalogs."""
if isinstance(compaction, str):
return compaction
if compaction:
return docname.split(SEP, 1)[0]
else:
return docname
# date_format mappings: ustrftime() to bable.dates.format_datetime()
date_format_mappings = {
'%a': 'EEE', # Weekday as locale’s abbreviated name.
'%A': 'EEEE', # Weekday as locale’s full name.
'%b': 'MMM', # Month as locale’s abbreviated name.
'%B': 'MMMM', # Month as locale’s full name.
'%c': 'medium', # Locale’s appropriate date and time representation.
'%-d': 'd', # Day of the month as a decimal number.
'%d': 'dd', # Day of the month as a zero-padded decimal number.
'%-H': 'H', # Hour (24-hour clock) as a decimal number [0,23].
'%H': 'HH', # Hour (24-hour clock) as a zero-padded decimal number [00,23].
'%-I': 'h', # Hour (12-hour clock) as a decimal number [1,12].
'%I': 'hh', # Hour (12-hour clock) as a zero-padded decimal number [01,12].
'%-j': 'D', # Day of the year as a decimal number.
'%j': 'DDD', # Day of the year as a zero-padded decimal number.
'%-m': 'M', # Month as a decimal number.
'%m': 'MM', # Month as a zero-padded decimal number.
'%-M': 'm', # Minute as a decimal number [0,59].
'%M': 'mm', # Minute as a zero-padded decimal number [00,59].
'%p': 'a', # Locale’s equivalent of either AM or PM.
'%-S': 's', # Second as a decimal number.
'%S': 'ss', # Second as a zero-padded decimal number.
'%U': 'WW', # Week number of the year (Sunday as the first day of the week)
# as a zero padded decimal number. All days in a new year preceding
# the first Sunday are considered to be in week 0.
'%w': 'e', # Weekday as a decimal number, where 0 is Sunday and 6 is Saturday.
'%-W': 'W', # Week number of the year (Monday as the first day of the week)
# as a decimal number. All days in a new year preceding the first
# Monday are considered to be in week 0.
'%W': 'WW', # Week number of the year (Monday as the first day of the week)
# as a zero-padded decimal number.
'%x': 'medium', # Locale’s appropriate date representation.
'%X': 'medium', # Locale’s appropriate time representation.
'%y': 'YY', # Year without century as a zero-padded decimal number.
'%Y': 'yyyy', # Year with century as a decimal number.
'%Z': 'zzz', # Time zone name (no characters if no time zone exists).
'%z': 'ZZZ', # UTC offset in the form ±HHMM[SS[.ffffff]]
# (empty string if the object is naive).
'%%': '%',
}
date_format_re = re.compile('(%s)' % '|'.join(date_format_mappings))
def babel_format_date(date: datetime, format: str, locale: Optional[str],
formatter: Callable = babel.dates.format_date) -> str:
if locale is None:
locale = 'en'
# Check if we have the tzinfo attribute. If not we cannot do any time
# related formats.
if not hasattr(date, 'tzinfo'):
formatter = babel.dates.format_date
try:
return formatter(date, format, locale=locale)
except (ValueError, babel.core.UnknownLocaleError):
# fallback to English
return formatter(date, format, locale='en')
except AttributeError:
logger.warning(__('Invalid date format. Quote the string by single quote '
'if you want to output it directly: %s'), format)
return format
def format_date(format: str, date: datetime = None, language: Optional[str] = None) -> str:
if date is None:
# If time is not specified, try to use $SOURCE_DATE_EPOCH variable
# See https://wiki.debian.org/ReproducibleBuilds/TimestampsProposal
source_date_epoch = os.getenv('SOURCE_DATE_EPOCH')
if source_date_epoch is not None:
date = datetime.utcfromtimestamp(float(source_date_epoch))
else:
date = datetime.now(timezone.utc).astimezone()
result = []
tokens = date_format_re.split(format)
for token in tokens:
if token in date_format_mappings:
babel_format = date_format_mappings.get(token, '')
# Check if we have to use a different babel formatter then
# format_datetime, because we only want to format a date
# or a time.
if token == '%x':
function = babel.dates.format_date
elif token == '%X':
function = babel.dates.format_time
else:
function = babel.dates.format_datetime
result.append(babel_format_date(date, babel_format, locale=language,
formatter=function))
else:
result.append(token)
return "".join(result)
def get_image_filename_for_language(filename: str, env: "BuildEnvironment") -> str:
if not env.config.language:
return filename
filename_format = env.config.figure_language_filename
d = dict()
d['root'], d['ext'] = path.splitext(filename)
dirname = path.dirname(d['root'])
if dirname and not dirname.endswith(path.sep):
dirname += path.sep
docpath = path.dirname(env.docname)
if docpath and not docpath.endswith(path.sep):
docpath += path.sep
d['path'] = dirname
d['basename'] = path.basename(d['root'])
d['docpath'] = docpath
d['language'] = env.config.language
try:
return filename_format.format(**d)
except KeyError as exc:
raise SphinxError('Invalid figure_language_filename: %r' % exc) from exc
def search_image_for_language(filename: str, env: "BuildEnvironment") -> str:
if not env.config.language:
return filename
translated = get_image_filename_for_language(filename, env)
_, abspath = env.relfn2path(translated)
if path.exists(abspath):
return translated
else:
return filename
| 37.44697 | 95 | 0.606312 |
import os
import re
from datetime import datetime, timezone
from os import path
from typing import TYPE_CHECKING, Callable, Generator, List, NamedTuple, Optional, Tuple, Union
import babel.dates
from babel.messages.mofile import write_mo
from babel.messages.pofile import read_po
from sphinx.errors import SphinxError
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util.osutil import SEP, canon_path, relpath
if TYPE_CHECKING:
from sphinx.environment import BuildEnvironment
logger = logging.getLogger(__name__)
class LocaleFileInfoBase(NamedTuple):
base_dir: str
domain: str
charset: str
class CatalogInfo(LocaleFileInfoBase):
@property
def po_file(self) -> str:
return self.domain + '.po'
@property
def mo_file(self) -> str:
return self.domain + '.mo'
@property
def po_path(self) -> str:
return path.join(self.base_dir, self.po_file)
@property
def mo_path(self) -> str:
return path.join(self.base_dir, self.mo_file)
def is_outdated(self) -> bool:
return (
not path.exists(self.mo_path) or
path.getmtime(self.mo_path) < path.getmtime(self.po_path))
def write_mo(self, locale: str) -> None:
with open(self.po_path, encoding=self.charset) as file_po:
try:
po = read_po(file_po, locale)
except Exception as exc:
logger.warning(__('reading error: %s, %s'), self.po_path, exc)
return
with open(self.mo_path, 'wb') as file_mo:
try:
write_mo(file_mo, po)
except Exception as exc:
logger.warning(__('writing error: %s, %s'), self.mo_path, exc)
class CatalogRepository:
def __init__(self, basedir: str, locale_dirs: List[str],
language: str, encoding: str) -> None:
self.basedir = basedir
self._locale_dirs = locale_dirs
self.language = language
self.encoding = encoding
@property
def locale_dirs(self) -> Generator[str, None, None]:
if not self.language:
return
for locale_dir in self._locale_dirs:
locale_dir = path.join(self.basedir, locale_dir)
locale_path = path.join(locale_dir, self.language, 'LC_MESSAGES')
if path.exists(locale_path):
yield locale_dir
else:
logger.verbose(__('locale_dir %s does not exists'), locale_path)
@property
def pofiles(self) -> Generator[Tuple[str, str], None, None]:
for locale_dir in self.locale_dirs:
basedir = path.join(locale_dir, self.language, 'LC_MESSAGES')
for root, dirnames, filenames in os.walk(basedir):
for dirname in dirnames:
if dirname.startswith('.'):
dirnames.remove(dirname)
for filename in filenames:
if filename.endswith('.po'):
fullpath = path.join(root, filename)
yield basedir, relpath(fullpath, basedir)
@property
def catalogs(self) -> Generator[CatalogInfo, None, None]:
for basedir, filename in self.pofiles:
domain = canon_path(path.splitext(filename)[0])
yield CatalogInfo(basedir, domain, self.encoding)
def docname_to_domain(docname: str, compaction: Union[bool, str]) -> str:
if isinstance(compaction, str):
return compaction
if compaction:
return docname.split(SEP, 1)[0]
else:
return docname
date_format_mappings = {
'%a': 'EEE',
'%A': 'EEEE',
'%b': 'MMM',
'%B': 'MMMM',
'%c': 'medium',
'%-d': 'd',
'%d': 'dd',
'%-H': 'H',
'%H': 'HH',
'%-I': 'h',
'%I': 'hh',
'%-j': 'D',
'%j': 'DDD',
'%-m': 'M',
'%m': 'MM',
'%-M': 'm',
'%M': 'mm',
'%p': 'a',
'%-S': 's',
'%S': 'ss',
'%U': 'WW',
'%w': 'e',
'%-W': 'W',
'%W': 'WW',
'%x': 'medium',
'%X': 'medium',
'%y': 'YY',
'%Y': 'yyyy',
'%Z': 'zzz',
'%z': 'ZZZ',
'%%': '%',
}
date_format_re = re.compile('(%s)' % '|'.join(date_format_mappings))
def babel_format_date(date: datetime, format: str, locale: Optional[str],
formatter: Callable = babel.dates.format_date) -> str:
if locale is None:
locale = 'en'
if not hasattr(date, 'tzinfo'):
formatter = babel.dates.format_date
try:
return formatter(date, format, locale=locale)
except (ValueError, babel.core.UnknownLocaleError):
return formatter(date, format, locale='en')
except AttributeError:
logger.warning(__('Invalid date format. Quote the string by single quote '
'if you want to output it directly: %s'), format)
return format
def format_date(format: str, date: datetime = None, language: Optional[str] = None) -> str:
if date is None:
source_date_epoch = os.getenv('SOURCE_DATE_EPOCH')
if source_date_epoch is not None:
date = datetime.utcfromtimestamp(float(source_date_epoch))
else:
date = datetime.now(timezone.utc).astimezone()
result = []
tokens = date_format_re.split(format)
for token in tokens:
if token in date_format_mappings:
babel_format = date_format_mappings.get(token, '')
if token == '%x':
function = babel.dates.format_date
elif token == '%X':
function = babel.dates.format_time
else:
function = babel.dates.format_datetime
result.append(babel_format_date(date, babel_format, locale=language,
formatter=function))
else:
result.append(token)
return "".join(result)
def get_image_filename_for_language(filename: str, env: "BuildEnvironment") -> str:
if not env.config.language:
return filename
filename_format = env.config.figure_language_filename
d = dict()
d['root'], d['ext'] = path.splitext(filename)
dirname = path.dirname(d['root'])
if dirname and not dirname.endswith(path.sep):
dirname += path.sep
docpath = path.dirname(env.docname)
if docpath and not docpath.endswith(path.sep):
docpath += path.sep
d['path'] = dirname
d['basename'] = path.basename(d['root'])
d['docpath'] = docpath
d['language'] = env.config.language
try:
return filename_format.format(**d)
except KeyError as exc:
raise SphinxError('Invalid figure_language_filename: %r' % exc) from exc
def search_image_for_language(filename: str, env: "BuildEnvironment") -> str:
if not env.config.language:
return filename
translated = get_image_filename_for_language(filename, env)
_, abspath = env.relfn2path(translated)
if path.exists(abspath):
return translated
else:
return filename
| true | true |
f7298e93853808c239027914d843e53976273ae0 | 804 | py | Python | process_exec/parallel_pipe_io_base.py | sreramk/dag_process_exec | 7f5493d44b65afa7b010cdc0081fc141d9339eb3 | [
"Apache-2.0"
] | null | null | null | process_exec/parallel_pipe_io_base.py | sreramk/dag_process_exec | 7f5493d44b65afa7b010cdc0081fc141d9339eb3 | [
"Apache-2.0"
] | null | null | null | process_exec/parallel_pipe_io_base.py | sreramk/dag_process_exec | 7f5493d44b65afa7b010cdc0081fc141d9339eb3 | [
"Apache-2.0"
] | null | null | null | import abc
class WriterBase(abc.ABC):
@abc.abstractmethod
def set_pipe(self, pipe):
pass
@abc.abstractmethod
def parallel_write_end_loop(self) -> None:
pass
@abc.abstractmethod
def is_running(self):
pass
@abc.abstractmethod
def is_stopped(self):
pass
@abc.abstractmethod
def start_running(self):
pass
@abc.abstractmethod
def __call__(self):
pass
class ReaderBase(abc.ABC):
@abc.abstractmethod
def set_pipe(self, pipe):
pass
@abc.abstractmethod
def is_running(self):
pass
@abc.abstractmethod
def is_stopped(self):
pass
@abc.abstractmethod
def start_running(self):
pass
@abc.abstractmethod
def __call__(self):
pass
| 15.461538 | 46 | 0.61194 | import abc
class WriterBase(abc.ABC):
@abc.abstractmethod
def set_pipe(self, pipe):
pass
@abc.abstractmethod
def parallel_write_end_loop(self) -> None:
pass
@abc.abstractmethod
def is_running(self):
pass
@abc.abstractmethod
def is_stopped(self):
pass
@abc.abstractmethod
def start_running(self):
pass
@abc.abstractmethod
def __call__(self):
pass
class ReaderBase(abc.ABC):
@abc.abstractmethod
def set_pipe(self, pipe):
pass
@abc.abstractmethod
def is_running(self):
pass
@abc.abstractmethod
def is_stopped(self):
pass
@abc.abstractmethod
def start_running(self):
pass
@abc.abstractmethod
def __call__(self):
pass
| true | true |
f7298fd9c7a6827bc9841172f616ef3715d70dfa | 2,363 | py | Python | src/muse-combineRoiMapsIter.py | CBICA/MUSE | edd01964078f957101130993899c7f4de13d48b6 | [
"Unlicense"
] | null | null | null | src/muse-combineRoiMapsIter.py | CBICA/MUSE | edd01964078f957101130993899c7f4de13d48b6 | [
"Unlicense"
] | 1 | 2020-10-22T21:58:32.000Z | 2020-12-24T18:09:43.000Z | src/muse-combineRoiMapsIter.py | CBICA/MUSE | edd01964078f957101130993899c7f4de13d48b6 | [
"Unlicense"
] | 1 | 2021-02-24T06:38:44.000Z | 2021-02-24T06:38:44.000Z | #!/usr/bin/env python
#
# @file muse_combineRoiMapsIter.py
# @brief Combine roi probability maps for a single subject
#
# Copyright (c) 2011, 2012 University of Pennsylvania. All rights reserved.<br />
# See http://www.cbica.upenn.edu/sbia/software/license.html or COPYING file.
#
# Contact: SBIA Group <sbia-software at uphs.upenn.edu>
#
#Usage
# ############################################ #
# muse_combineRoiMapsIter.py /Path/To/Input/List.txt /Path/To/Destination/outImgName
################################################
# will read roi files listed in 'List.txt' file
#The list file must have full paths to the files
import nibabel as nib
import numpy as np
import sys
import re
import time
print(str(sys.argv))
InputList=str(sys.argv[1])
DestFile=str(sys.argv[2])
### Sanity check on the arguments
if not InputList or not DestFile:
print("ERROR: Required input options not provided!!!")
sys.exit(0)
### Printing input arguments
print('\n\n')
print('Subject Input List :', InputList)
print('Destination File :', DestFile)
print('\n\n')
### Reading input file first line
f=open(InputList)
fline = f.readline()
f.close()
### Extract roi no
match=re.search('([\w.-]+)ROI_(\d+)_([\w.-]+)', fline)
if match:
rnos = match.group(2)
rno = int(rnos)
else:
print('ERROR: No ROI_{roino} in file name !')
exit(1)
### Read img, vectorize
img = nib.load(str.rstrip(fline))
a=img.get_data()
b=np.reshape(a,-1)
isize = a.shape
vsize = b.shape
### Set index of voxels belonging to that roi, set also max values
imgMAX = b
imgIND = np.zeros(vsize)
imgIND[b>0] = rno
### Reading input file list
f=open(InputList)
lines = f.readlines()
f.close()
ctr=1
### Combine roi images
for line in lines:
print(line)
### Extract roi no
match=re.search('([\w.-]+)ROI_(\d+)_([\w.-]+)', line)
if match:
rnos = match.group(2)
rno = int(rnos)
else:
print('ERROR: No ROI_{roino} in file name !')
exit(1)
### Read img, vectorize
img = nib.load(str.rstrip(line))
a=img.get_data()
b=np.reshape(a,-1)
### Set index of voxels belonging to that roi, set also max values
imgIND.put((b>imgMAX).nonzero(), rno)
imgMAX = np.maximum(b,imgMAX)
### Write out img
imgINDM = np.reshape(imgIND,isize)
aff = img.get_affine()
hdr = img.get_header()
#hdr.set_data_dtype(np.int16)
img2 = nib.Nifti1Image(imgINDM, aff, hdr)
img2.to_filename(DestFile);
| 22.084112 | 84 | 0.66314 | true | true | |
f72990938ad2cb42f00c7cdb6eff613f89c9d0de | 1,925 | py | Python | empower/cli/lomm_lns_commands/list_lenddevs.py | ericbrinckhaus/empower-runtime-modified | ecd7c1e9f1c19a629abdcb5c55257377313246ea | [
"Apache-2.0"
] | null | null | null | empower/cli/lomm_lns_commands/list_lenddevs.py | ericbrinckhaus/empower-runtime-modified | ecd7c1e9f1c19a629abdcb5c55257377313246ea | [
"Apache-2.0"
] | null | null | null | empower/cli/lomm_lns_commands/list_lenddevs.py | ericbrinckhaus/empower-runtime-modified | ecd7c1e9f1c19a629abdcb5c55257377313246ea | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""List LoRaWAN End Devices in the LNS database."""
import argparse
from empower.cli import command
def pa_cmd(args, cmd):
"""List lEndDevs parser method.
usage: empower-ctl.py list-lenddevs <options>
optional arguments:
-h, --help show this help message and exit
-g DEVEUI, --devEUI DEVEUI
show results for a specified devEUI id only
-v, --verbose verbose
"""
usage = "%s <options>" % command.USAGE.format(cmd)
desc = command.DESCS[cmd]
parser = argparse.ArgumentParser(usage=usage, description=desc)
# required = parser.add_argument_group('required named arguments')
parser.add_argument(
'-g', '--devEUI', help='show results for a specified devEUI id only',
default=None, type=str, dest="devEUI")
parser.add_argument(
'-v', '--verbose', help='verbose', action="store_true",
default=False, dest="config")
(args, leftovers) = parser.parse_known_args(args)
return args, leftovers
def do_cmd(gargs, args, _):
"""List lEndDevs registered in the LNS."""
url = '/api/v1/lns/lenddevs/'
_, data = command.connect(gargs, ('GET', url), 200)
for entry in data:
if not args.devEUI:
print(entry)
elif entry['DevEUI'] == args.devEUI:
print(entry)
| 28.731343 | 77 | 0.657143 |
import argparse
from empower.cli import command
def pa_cmd(args, cmd):
usage = "%s <options>" % command.USAGE.format(cmd)
desc = command.DESCS[cmd]
parser = argparse.ArgumentParser(usage=usage, description=desc)
parser.add_argument(
'-g', '--devEUI', help='show results for a specified devEUI id only',
default=None, type=str, dest="devEUI")
parser.add_argument(
'-v', '--verbose', help='verbose', action="store_true",
default=False, dest="config")
(args, leftovers) = parser.parse_known_args(args)
return args, leftovers
def do_cmd(gargs, args, _):
url = '/api/v1/lns/lenddevs/'
_, data = command.connect(gargs, ('GET', url), 200)
for entry in data:
if not args.devEUI:
print(entry)
elif entry['DevEUI'] == args.devEUI:
print(entry)
| true | true |
f7299320ac92f29ec4db9777da6d4d97ea370f59 | 603 | py | Python | make_call.py | nfrumkin/ElMingle | 6e9fa6bc4a4826188c9752212ec57b7b7a225b21 | [
"MIT"
] | 1 | 2019-11-17T00:44:08.000Z | 2019-11-17T00:44:08.000Z | make_call.py | nfrumkin/ElMingle | 6e9fa6bc4a4826188c9752212ec57b7b7a225b21 | [
"MIT"
] | null | null | null | make_call.py | nfrumkin/ElMingle | 6e9fa6bc4a4826188c9752212ec57b7b7a225b21 | [
"MIT"
] | null | null | null | # Download the helper library from https://www.twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
# DANGER! This is insecure. See http://twil.io/secure
account_sid = 'ACc07d36f2bedb365988af5c81d578bfef'
auth_token = 'd7698b135d26730bf84600c176d6815f'
client = Client(account_sid, auth_token)
call = client.calls.create(
record=True,
url='http://demo.twilio.com/docs/voice.xml',
to='+19392190769',
from_='+12512835337'
)
print(call.sid) | 33.5 | 77 | 0.658375 |
from twilio.rest import Client
account_sid = 'ACc07d36f2bedb365988af5c81d578bfef'
auth_token = 'd7698b135d26730bf84600c176d6815f'
client = Client(account_sid, auth_token)
call = client.calls.create(
record=True,
url='http://demo.twilio.com/docs/voice.xml',
to='+19392190769',
from_='+12512835337'
)
print(call.sid) | true | true |
f7299343251025558429f8634c574058f7031345 | 1,529 | py | Python | generator.py | y3sar/painter_gan | 374fb91927ca584b4ef3fd8ba10922c7b5201780 | [
"MIT"
] | 1 | 2020-09-10T07:56:10.000Z | 2020-09-10T07:56:10.000Z | generator.py | y3sar/painter_gan | 374fb91927ca584b4ef3fd8ba10922c7b5201780 | [
"MIT"
] | 15 | 2020-09-26T00:22:47.000Z | 2022-03-02T14:59:36.000Z | generator.py | y3sar/painter_gan | 374fb91927ca584b4ef3fd8ba10922c7b5201780 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torchvision.transforms import ToTensor, ToPILImage
class Generator(nn.Module):
def __init__(self):
super().__init__()
self.conv_block = nn.Sequential(
nn.ConvTranspose2d(100, 512, 4, 1, 0),
nn.BatchNorm2d(512),
nn.ReLU(True),
nn.ConvTranspose2d(512, 256, 4, 2, 1),
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.ConvTranspose2d(256, 128, 4, 2, 1),
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.ConvTranspose2d(128, 64, 4, 2, 1),
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.ConvTranspose2d(64, 3, 4, 2, 1),
nn.BatchNorm2d(3),
nn.ReLU(True),
nn.ConvTranspose2d(3, 3, 4, 2, 1),
nn.Tanh(),
)
def forward(self, x):
x = self.conv_block(x)
return x
if __name__ == '__main__':
img = torch.randn(1, 100, 1, 1)
gen = Generator()
print(gen(img).shape)
| 26.362069 | 74 | 0.35121 | import torch
import torch.nn as nn
from torchvision.transforms import ToTensor, ToPILImage
class Generator(nn.Module):
def __init__(self):
super().__init__()
self.conv_block = nn.Sequential(
nn.ConvTranspose2d(100, 512, 4, 1, 0),
nn.BatchNorm2d(512),
nn.ReLU(True),
nn.ConvTranspose2d(512, 256, 4, 2, 1),
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.ConvTranspose2d(256, 128, 4, 2, 1),
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.ConvTranspose2d(128, 64, 4, 2, 1),
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.ConvTranspose2d(64, 3, 4, 2, 1),
nn.BatchNorm2d(3),
nn.ReLU(True),
nn.ConvTranspose2d(3, 3, 4, 2, 1),
nn.Tanh(),
)
def forward(self, x):
x = self.conv_block(x)
return x
if __name__ == '__main__':
img = torch.randn(1, 100, 1, 1)
gen = Generator()
print(gen(img).shape)
| true | true |
f7299350b5f08db93723645ff2abf0821f03a950 | 8,346 | py | Python | Engines/Engines.py | whitemike889/Machapi | 8c6f4d4d9d788cc0b56643ef054124c04b7e2728 | [
"Apache-2.0"
] | null | null | null | Engines/Engines.py | whitemike889/Machapi | 8c6f4d4d9d788cc0b56643ef054124c04b7e2728 | [
"Apache-2.0"
] | null | null | null | Engines/Engines.py | whitemike889/Machapi | 8c6f4d4d9d788cc0b56643ef054124c04b7e2728 | [
"Apache-2.0"
] | null | null | null | ## @package Engines
# The superior method to solving complex problems is by easily consumable and scalable design, not complex code.
#
# The engines package consists of virtual classes and methods that define the structure of the engine modules.
#
# New modules are installed simply by dropping a compliant engine module in the Engines package directory.
#
# The virtual classes and methods are what defines compliance with the engine. One could simply build these out
# manually without using these but this is highly discouraged unless you really know what you're doing, so long as
# you're okay with future changes in the engine breaking the functionality of the module you are developing.
#
# Your code scanner is probably going to whine because I deviate from PEP8. That's mostly because the PEP8 is
# worthless bullshit and its evangelical adherents are mostly just eating each others' regurgitations instead of
# thinking.
from abc import ABCMeta
from abc import abstractmethod
import configparser
import requests
## SessionSpec
#
# The SessionSpec class is the core of the module. This contains the structure of the class you must implement,
# prepackaged for you. Exceptions will be thrown as required until you have implemented all the necessary minimum
# pieces in the structure required to be used by a multiplexor, which is not required to utilize a module but forces
# continuity between modules when all modules use it. You are strongly advised not to build a module that does not
# use the base classes defined here if you wish to contribute.
#
# This file also serves as an excellent atomic, academic example of abstract method usage and OOP inheritance in Python.
class SessionSpec:
__metaclass__ = ABCMeta
## Sessionspec.__init__
#
# The initialization method for SessionSpec. It is recommended that you inherit from SessionSpec for your Session
# class and then call the parent initialization method, and then implement your own module-specific calls in your
# Session init. This allows commonalities between modules to be implemented here while also allowing modules to
# have their own, which will be a necessity as you'll notice while you're building.
def __init__( self, config ):
# bind the configuration to the Session object
self.config = config
# handle to use for getting new pages with the Session object
self.client = requests.Session()
# But wait -- there's more!
# Throwing in rudimentary proxy support:
if self.config.proxy_enabled:
proxies = { 'http', self.config.proxy }
self.client.proxies.update( proxies )
## SessionSpec.SessionError
#
# The SessionError subclass defines the Generic exception to be the Session class you are implementing.
#
# In the example modules, we use this for just about everything, but a better practice would be to use it as a base
# class for every TYPE of exception you wish to handle to make your engines more robust in error handling.
class SessionError( Exception ):
def __init__( self, value ):
self.value = value
## SessionSpec.login
#
# The method used to log in to the site your engine module supports. Self-explanatory.
#
# Note: When implementing in your module, do NOT return a boolean or use any other indicator of success. Instead,
# use SessionSpec.Parser.is_logged_in as a conditional in your logic flow for individual actions. This prevents
# your contributors from relying on stateful information in a stateless manner.
@abstractmethod
def login( self ):
pass
## SessionSpec.Config
#
# While the config file can be universal between engines so long as good naming conventions are used by the module
# implementations, each SessionSpec child class will want to import the whole config file and bind to local values
# that the module will want to implement. Here we have some operations related to that which will be common to all
# modules, providing a common set of values that I'd like the modules to have.
#
# You'll want to call the parent class's initialization and expand upon it for module-specific features.
class Config:
def __init__( self, config_file ):
settings = configparser.ConfigParser( allow_no_value=True )
settings.read( config_file )
self.useragent = settings.get( "general-client", "user_agent" )
self.proxy_enabled = settings.getboolean( "proxy", "enabled" )
if self.proxy_enabled:
self.proxy = settings.get( "proxy", "proxy" )
## SessionSpec.User
#
# The User class must be implemented by the module and must be implemented by all modules, as different sites
# give users different attributes that the module will be interested in.
class User:
@abstractmethod
def __init__(self):
pass
## SessionSpec.Parser
#
# Like all good web browsers, you'll need to interpret the code that the browser retrieves with an engine. Most
# browsers call this something else, but the concept is the same: Code goes in that's served by the endpoint, and
# usable data comes out that the browser decides how to display. This parser is the component that handles the
# conversion of that HTTP response into a usable format for the rest of the engine. The options here are pretty
# limitless to the point that building structure around it can only limit the scope of the module, so, feel free
# to experiment. Efforts were taken to design the structure in a way that allows you to do this.
#
# All methods of the Parser class are static, and extract only one datapoint. As you add more datapoints to
# extract and identifiers, you will need to implement those abstract methods in your Parser class and bind the
# identifier for its dedicated datapoint in the dictionary-based router that associates datapoints with static child
# methods to your Parser.
class Parser:
## SessionSpec.Parser.scrape
# @type datapoint: string
# @param datapoint: Identifier representing a datapoint to be extracted from a larger unprocessed body of data.
#
# @type text: string
# @param text: The unprocessed body of data from which the datapoint should be extracted.
#
# scrape defines and associates datapoint identifiers with their respective static methods that retrieve them.
#
# Returns whatever the helper method returns, which is intended to be either an extracted piece of information
# from the raw response body or an abstracted piece of data, so long as any points of aggregation are done
# statelessly.
@staticmethod
@abstractmethod
def scrape( datapoint, text ):
# this is expected to take in an identifier for the first argument representing a datapoint and the raw page
# source being interpreted from which the datapoint is extracted
pass
## SessionSpec.Parser.is_logged_in
# @type text: string
# @param text: The unprocessed page response to extract information from that identifies whether or not you have
# an active session.
#
# is_logged_in is a vital session management point in the rest of your module. This is a boolean
# return that should be checked ad-hoc any time you perform an action that requires a session.
#
# The manner in which an active session is validated will vary from site to site, so this must be implemented
# per-module but will be needed for any module that interfaces with a site that requires a login.
#
# Returns a boolean value.
@staticmethod
@abstractmethod
def is_logged_in( text ):
pass
## SessionSpec.Parser.send_message_failed
# @type text: string
# @param text: The unprocessed page response to extract information from that identifies whether or not your
# attempt to send a message failed.
#
# Returns a boolean value.
@staticmethod
@abstractmethod
def send_message_failed( text ):
pass
| 50.277108 | 120 | 0.711718 | okay with future changes in the engine breaking the functionality of the module you are developing.
# worthless bullshit and its evangelical adherents are mostly just eating each others' regurgitations instead of
from abc import ABCMeta
from abc import abstractmethod
import configparser
import requests
s SessionSpec:
__metaclass__ = ABCMeta
def __init__( self, config ):
self.config = config
self.client = requests.Session()
# Throwing in rudimentary proxy support:
if self.config.proxy_enabled:
proxies = { 'http', self.config.proxy }
self.client.proxies.update( proxies )
## SessionSpec.SessionError
#
# The SessionError subclass defines the Generic exception to be the Session class you are implementing.
#
# In the example modules, we use this for just about everything, but a better practice would be to use it as a base
# class for every TYPE of exception you wish to handle to make your engines more robust in error handling.
class SessionError( Exception ):
def __init__( self, value ):
self.value = value
## SessionSpec.login
#
# The method used to log in to the site your engine module supports. Self-explanatory.
#
# Note: When implementing in your module, do NOT return a boolean or use any other indicator of success. Instead,
# use SessionSpec.Parser.is_logged_in as a conditional in your logic flow for individual actions. This prevents
# your contributors from relying on stateful information in a stateless manner.
@abstractmethod
def login( self ):
pass
## SessionSpec.Config
#
# While the config file can be universal between engines so long as good naming conventions are used by the module
# implementations, each SessionSpec child class will want to import the whole config file and bind to local values
# that the module will want to implement. Here we have some operations related to that which will be common to all
# modules, providing a common set of values that I'd like the modules to have.
class Config:
def __init__( self, config_file ):
settings = configparser.ConfigParser( allow_no_value=True )
settings.read( config_file )
self.useragent = settings.get( "general-client", "user_agent" )
self.proxy_enabled = settings.getboolean( "proxy", "enabled" )
if self.proxy_enabled:
self.proxy = settings.get( "proxy", "proxy" )
class User:
@abstractmethod
def __init__(self):
pass
wsers call this something else, but the concept is the same: Code goes in that's served by the endpoint, and
class Parser:
@staticmethod
@abstractmethod
def scrape( datapoint, text ):
pass
@staticmethod
@abstractmethod
def is_logged_in( text ):
pass
@staticmethod
@abstractmethod
def send_message_failed( text ):
pass
| true | true |
f72994670ea9e8c9d9a4c64841e352b8c6248387 | 64 | py | Python | Conteudo das Aulas/129/filho.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | Conteudo das Aulas/129/filho.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | Conteudo das Aulas/129/filho.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | import sys, os
print("Ola do filho", os.getpid(), sys.argv[1])
| 16 | 47 | 0.65625 | import sys, os
print("Ola do filho", os.getpid(), sys.argv[1])
| true | true |
f7299518676a99f536019b7f897b01105167f5d1 | 1,028 | py | Python | events/templatetags/append_get.py | yellowjaguar5/lnldb | dea7708f5e4e103ef6ef968c9f3a4deaa58861c5 | [
"MIT"
] | 5 | 2017-09-25T21:24:59.000Z | 2021-12-18T17:08:13.000Z | events/templatetags/append_get.py | yellowjaguar5/lnldb | dea7708f5e4e103ef6ef968c9f3a4deaa58861c5 | [
"MIT"
] | 304 | 2015-03-24T17:44:22.000Z | 2022-03-29T14:09:41.000Z | events/templatetags/append_get.py | yellowjaguar5/lnldb | dea7708f5e4e103ef6ef968c9f3a4deaa58861c5 | [
"MIT"
] | 10 | 2017-10-24T02:18:12.000Z | 2021-09-20T20:40:25.000Z | from django import template
register = template.Library()
@register.simple_tag(takes_context=True)
def append_to_get(context, replace=True, **kwargs):
"""
Adds/deletes arguments to the current GET value
and returns a querystring containing it.
@argument replace: If true, any existing argument
named in kwargs will have their value overridden.
If false, kwargs are appended only.
@argument kwargs: key-val pairs to add, with a value of
None for deletion. (Use "None" if you don't want that)
"""
updated = context['request'].GET.copy()
if not replace:
# duplicates will be appended to the query
updated.update(kwargs)
else:
# duplicates will be replaced
for arg, val in kwargs.items():
updated[arg] = val
# if the kwarg is None delete it instead
for arg, val in kwargs.items():
if val is None:
updated.pop(arg)
if updated:
return "?" + updated.urlencode()
else:
return ""
| 28.555556 | 60 | 0.642023 | from django import template
register = template.Library()
@register.simple_tag(takes_context=True)
def append_to_get(context, replace=True, **kwargs):
updated = context['request'].GET.copy()
if not replace:
updated.update(kwargs)
else:
for arg, val in kwargs.items():
updated[arg] = val
for arg, val in kwargs.items():
if val is None:
updated.pop(arg)
if updated:
return "?" + updated.urlencode()
else:
return ""
| true | true |
f7299528177ca00d16a733958ceae2e9c52189ba | 35,826 | py | Python | tools/ReportConverter/ReportConverter.py | siemens/drace | 2679067783b1d8f39e4c370cd72a7626ebf5f8e8 | [
"MIT",
"MIT-0",
"BSD-3-Clause"
] | 32 | 2019-02-19T11:37:14.000Z | 2022-01-07T16:09:27.000Z | tools/ReportConverter/ReportConverter.py | siemens/drace | 2679067783b1d8f39e4c370cd72a7626ebf5f8e8 | [
"MIT",
"MIT-0",
"BSD-3-Clause"
] | 86 | 2019-03-29T08:57:37.000Z | 2021-06-30T16:13:06.000Z | tools/ReportConverter/ReportConverter.py | siemens/drace | 2679067783b1d8f39e4c370cd72a7626ebf5f8e8 | [
"MIT",
"MIT-0",
"BSD-3-Clause"
] | 4 | 2019-04-16T18:35:02.000Z | 2021-06-17T16:49:48.000Z | #
# ReportConverter: A graphical report generator for DRace
#
# Copyright 2019 Siemens AG
#
# Authors:
# <Philip Harr> <philip.harr@siemens.com>
#
# SPDX-License-Identifier: MIT
#
## \package ReportConverter
## \brief Python XML to HTML report converter for the better visualization of drace result data
import xml.etree.ElementTree as ET
import shutil
import argparse
import pathlib
import datetime
import html
import sys
from subprocess import check_call, STDOUT, DEVNULL
from functools import lru_cache
try:
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
noMatplotLib = False
except ImportError:
noMatplotLib = True
print("Matplotlib is not installed.")
#look for resources path
if getattr(sys, 'frozen', False):
SCRIPTPATH = pathlib.Path(sys.executable)
SCRIPTPATH = pathlib.Path(SCRIPTPATH / "..")
else :
SCRIPTPATH = pathlib.Path(pathlib.Path(__file__).resolve().parents[0])
if pathlib.Path(SCRIPTPATH / '../resources').is_dir():
resourcesPath = pathlib.Path(SCRIPTPATH / '../resources')
else:
if pathlib.Path(SCRIPTPATH / 'resources').is_dir():
resourcesPath = pathlib.Path(SCRIPTPATH / 'resources')
else:
print("path of resources not found")
sys.exit(-1)
#Paths
g_HTMLTEMPLATES = resourcesPath / "entries.xml"
g_CSSPATH = resourcesPath / "css"
g_JSPATH = resourcesPath / "js"
DEBUG = False
#info: blacklisting overrules whitelisting
SOURCEFILE_BL = list()
SOURCEFILE_WL = list()
WHITELISTING = False
NUMBEROFCODELINES = 400
if NUMBEROFCODELINES % 2:
print('Number of maximum of displayed code lines must be even, but is:')
print(str(NUMBEROFCODELINES))
sys.exit(-1)
#Source Directories
SOURCE_DIRECTORIES = list()
class ReportCreator:
_htmlTemplatesPath = str(g_HTMLTEMPLATES)
_topStackGraphFileName = 'topStackBarchart.png'
_errorTimesPlot = 'errorTimes.png'
try:
if check_call(['code', '--version'], stdout=DEVNULL, stderr=STDOUT, shell=True) == 0: #check if vscode is installed, for sourcefile linking
_vscodeFlag = True
else:
_vscodeFlag = False
except:
_vscodeFlag = False
def __init__(self, pathOfReport, target):
self.sourcefileList = list()
self._callStackNumber = 0
self._errorNumber = 0
self._snippets = str()
self.succesfullReportCreation = True
try:
self._htmlTemplates = (ET.parse(self._htmlTemplatesPath)).getroot()
except FileNotFoundError:
print("template file is missing")
self.succesfullReportCreation = False
return
self.SCM = SourceCodeManagement()
self._pathOfReport = pathOfReport
if self._inputValidation():
hasErrors = self._reportRoot.find('error') != None
if not noMatplotLib and hasErrors:
self._makeHistogramm(target)
self._countTopStackOccurences(target)
self._createReport()
else:
print("input file is not valid")
self.succesfullReportCreation = False
def _inputValidation(self):
try:
self._reportContent = ET.parse(self._pathOfReport)
except ET.ParseError:
return 0
self._reportRoot = self._reportContent.getroot()
if self._reportRoot.find('protocolversion') != None and \
self._reportRoot.find('protocoltool') != None and \
self._reportRoot.find('preamble') != None and \
self._reportRoot.find('pid') != None and \
self._reportRoot.find('tool') != None and \
self._reportRoot.find('args') != None and \
self._reportRoot.find('status') != None and \
self._reportRoot.tag == 'valgrindoutput':
return 1
else:
return 0
def _getHeader(self):
header = list()
status = self._reportRoot.findall('status')
if len(status) == 2:
status = status[1] ##second status contains finishing values
strDatetime = status.find('time').text
if "T" in strDatetime:
date = strDatetime.split('T')[0]
time = (strDatetime.split('T')[1])[0:-1] #last digit is 'Z' -> not needed
else:
date = ""
time = strDatetime
header.append(adjText(date))
header.append(adjText(time))
if status.find('duration') != None:
header.append(adjText(status.find('duration').text))
header.append(adjText(status.find('duration').get('unit')))
else:
header.append("")
header.append("")
else:
header.append("")
header.append("")
header.append("")
header.append("")
arguments = str()
for arg in self._reportRoot.find('args').find('vargv').findall('arg'):
arguments += arg.text
arguments += ' '
header.append(adjText(arguments[0:-1])) #remove last ' '
header.append(adjText(self._reportRoot.find('args').find('argv').find('exe').text))
header.append(adjText(self._reportRoot.find('protocolversion').text))
header.append(adjText(self._reportRoot.find('protocoltool').text))
return header
def _makeFileEntry(self, frame):
strDir = adjText(self._frameValues["dir"])
strFile = adjText(self._frameValues["file"])
strLine = adjText(self._frameValues["line"])
offset = adjText(self._frameValues["offset"])
if self._vscodeFlag:
entry = "<a href='vscode://file/" + strDir + "/" + strFile + ":" + strLine + ":" + offset +"'>"+ strFile +":" + strLine + ":" + offset + "</a>"
else:
entry = "<a href='file://"+ strDir + "/" + strFile + "'>" + strFile + ":" + strLine + "</a>"
return entry
def _readFrame(self, frame):
if frame is None:
self._frameValues = {"obj":"", "fn":"", "ip":"", "dir":"", "file":"", "line":"", "offset":""}
return
obj = frame.find('obj')
if obj is None:
obj = ""
else:
if obj.text is None:
obj = ""
else:
obj = obj.text
fn = frame.find('fn')
if fn is None:
fn = ""
else:
if fn.text is None:
fn = ""
else:
fn = fn.text
ip = frame.find('ip')
if ip is None:
ip = ""
else:
if ip.text is None:
ip = ""
else:
ip = ip.text
direc = frame.find('dir')
if direc is None:
direc = ""
else:
if direc.text is None:
direc = ""
else:
direc = direc.text
filename = frame.find('file')
if filename is None:
filename = ""
else:
if filename.text is None:
filename = ""
else:
filename = filename.text
line = frame.find('line')
if line is None:
line = "0"
else:
if line.text is None:
line = "0"
else:
line = line.text
offset = frame.find('offset')
if offset is None:
offset = "0"
else:
if offset.text is None:
offset = "0"
else:
offset = offset.text
self._frameValues = {"obj":obj, "fn":fn, "ip":ip, "dir":direc, "file":filename, "line":line, "offset":offset}
def _createSnippetEntry(self, frame, elementNumber, tag, codeIndex, buttonID):
newSnippet = self._htmlTemplates.find('snippet_entry').text
newSnippet = newSnippet.replace('*SNIPPET_VAR*', ("snippet_" + str(self._callStackNumber)))
newSnippet = newSnippet.replace('*STACK_NUMBER*', adjText(hex(elementNumber)))
newSnippet = newSnippet.replace('*OBJ*', adjText(self._frameValues["obj"]))
newSnippet = newSnippet.replace('*FUNCTION*', adjText(self._frameValues["fn"]))
newSnippet = newSnippet.replace('*INSTRUCTION_POINTER*', adjText(self._frameValues["ip"]))
newSnippet = newSnippet.replace('*CODE_TAG*', tag)
newSnippet = newSnippet.replace('*SNIPPET_BUTTON_ID*', buttonID)
if (self._frameValues["file"] != ""):
newSnippet = newSnippet.replace('*FILE_NAME_ENTRY*', self._makeFileEntry(frame))
newSnippet = newSnippet.replace('*DIRECTORY*', adjText(self._frameValues["dir"]))
newSnippet = newSnippet.replace('*SHORT_DIR*', adjText(self._makeShortDir(self._frameValues["dir"])))
newSnippet = newSnippet.replace('*LINE_OF_CODE*', adjText(self._frameValues["line"]))
if(codeIndex != -1):
newSnippet = newSnippet.replace('*CODE_ID_VAR*', "snippet_"+str(self._callStackNumber)+"_code")
newSnippet = newSnippet.replace('*LANGUAGE*', self.SCM.determineLanguage(adjText(self._frameValues["file"])))
newSnippet = newSnippet.replace('*FIRST_LINE*', str(self.SCM.getFirstLineOfCodeSnippet(codeIndex)))
else:
newSnippet = newSnippet.replace('*CODE_ID_VAR*', "'None'")
else:
newSnippet = newSnippet.replace('*FILE_NAME_ENTRY*', 'no filename avail.')
newSnippet = newSnippet.replace('*DIRECTORY*', 'no directory avail.')
newSnippet = newSnippet.replace('*SHORT_DIR*', 'no directory avail.')
self._snippets += newSnippet #append referenced code snippet
def _makeShortDir(self, strDir):
elements = None
if "\\" in strDir:
elements = strDir.split("\\")
else:
if "/" in strDir:
elements = strDir.split("/")
if elements != None:
return elements[0] + "/" + elements[1] + "/.../" + elements[-1]
else:
return ""
def _createCallStack(self, errorEntry, position, outputID):
callStack = str()
stackTemplate = self._htmlTemplates.find('stack_entry').text
stackArray = errorEntry.findall('stack')
stack = stackArray[position]
elementNumber = 0
frames = stack.findall('frame')
if frames is None:
return ""
for frame in frames:
self._readFrame(frame) #reads all frame values and fills member var
# updates frame dir if valid sourceDirectories are given, otherwise returns same value
newDir = self.SCM.searchSourceDirectories(self._frameValues["dir"], self._frameValues["file"])
self._frameValues["dir"] = adjText(newDir)
noPreview = False
buttonID = "button_" + str(self._errorNumber) + "_" + str(position) + "_" + str(elementNumber)
strOutputID = outputID+str(position)
if elementNumber == 0:
###make heading for the red box###
if len(self._errorHeading) == 0:
self._errorHeading += "<br> Obj. 1: " + (adjText(self._frameValues["obj"]) + ': "' + adjText(self._frameValues["fn"])) + '" <br> '
else:
self._errorHeading += "Obj. 2: " + (adjText(self._frameValues["obj"]) + ': "' + adjText(self._frameValues["fn"])) + '"'
#general entries (always available)
newStackElement = stackTemplate.replace('*STACK_NUMBER*', adjText(hex(elementNumber))+":")
newStackElement = newStackElement.replace('*SNIPPET_VAR*', ("snippet_" + str(self._callStackNumber)))
newStackElement = newStackElement.replace('*OUTPUT_ID*', strOutputID)
newStackElement = newStackElement.replace('*FUNCTION*', adjText(self._frameValues['fn']))
newStackElement = newStackElement.replace('*BUTTON_ID*', buttonID)
if (self._frameValues["file"]!= ""): #file is in xml report defined
codeIndex, tag = self.SCM.handleSourceCode(self._frameValues["file"], self._frameValues["dir"], self._frameValues["line"])
newStackElement = newStackElement.replace('*FILE*', adjText(self._frameValues["file"]))
if(codeIndex != -1):
newStackElement = newStackElement.replace('*CODE_VAR*', str(codeIndex))
newStackElement = newStackElement.replace('*CODE_ID_VAR*', "'snippet_"+str(self._callStackNumber)+"_code'")
newStackElement = newStackElement.replace('*LINE_OF_CODE*', adjText(self._frameValues["line"]))
newStackElement = newStackElement.replace('*FIRST_LINE*', str(self.SCM.getFirstLineOfCodeSnippet(codeIndex)))
else: #file is not available on device or file is blacklisted or not whitelisted
noPreview = True
else: #no filepath for file in xml is given
codeIndex = -1
tag = self._htmlTemplates.find('no_code_entry').text
newStackElement = newStackElement.replace('*FILE*', 'no filename avail.')
noPreview = True
if noPreview:
newStackElement = newStackElement.replace('*CODE_VAR*', "'None'")
newStackElement = newStackElement.replace('*CODE_ID_VAR*', "'None'")
newStackElement = newStackElement.replace('*LINE_OF_CODE*', "'None'")
newStackElement = newStackElement.replace('*FIRST_LINE*', "'NONE'")
searchStr = 'class="'
insertPosition = newStackElement.find(searchStr)+len(searchStr) #to add the ".grey" class the position before after class
#insertPosition += newStackElement[insertPosition:].find('"')
newStackElement = newStackElement[:insertPosition] + "grey-button " + newStackElement[insertPosition:]
self._createSnippetEntry(frame, elementNumber, tag, codeIndex, buttonID)
callStack += newStackElement #append stack element
elementNumber += 1
self._callStackNumber += 1 #increase global call stack number (used for reference variables)
return callStack
def _makeHistogramm(self, target):
errorTimes = dict()
statusNode = self._reportRoot.findall('status')[1]
if statusNode.find('duration') is None:
self._errorTimesPlot = ""
return
totalDuration = int(statusNode.find('duration').text)
errors = self._reportRoot.findall('error')
for error in errors:
timePoint = (round(float(100 * int(error.find('timestamp').text) /totalDuration))) #get occurance in %
if errorTimes.get(timePoint) != None:
value = errorTimes.pop(timePoint)
errorTimes.update({timePoint: int(value)+1})
else:
errorTimes.update({timePoint: 1})
x = list(errorTimes.keys())
y = list(errorTimes.values())
#make plot
fig = plt.figure(figsize=(10,4))
ax = plt.axes()
ax.scatter(x, y, color='#009999', edgecolor='black')
xRangeEnd = max(y)+1
if xRangeEnd < 3: #0, 1, 2 shall be always visible, even if max(y) is only 1
xRangeEnd = 3
ax.set_yticks([i for i in range(0, xRangeEnd)])
ax.set_xticks([i for i in range(0, 110, 10)])
plt.title('Error occurrences by time',fontfamily="monospace", fontweight='bold')
plt.ylabel('Occurrences', fontfamily="monospace",fontweight='bold')
plt.xlabel('Execution of program in %. \n Total execution time = ' + str(totalDuration) + 'ms', fontfamily="monospace",fontweight='bold')
fig.add_axes(ax)
#plt.show()
figPath = pathlib.Path(target+'/'+self._errorTimesPlot)
plt.savefig(str(figPath), dpi=300, format='png', bbox_inches='tight', orientation='landscape') # use format='svg' or 'pdf' for vectorial pictures
def _countTopStackOccurences(self, target):
topStackOccurences = dict()
errors = self._reportRoot.findall('error')
for error in errors:
stacks = error.findall('stack')
for i in range(0,2):
topFrame = stacks[i].find('frame') #returns first element of with frame tag
if(topFrame != None):
self._readFrame(topFrame)
tmp1 = self._frameValues["file"]
tmp2 = self._frameValues["fn"]
if(tmp1 != "None" and tmp2 != "None"):
if(len(tmp2) > 20): #split function name in half if it is too long
tmp2 = tmp2[:len(tmp2)//2] + '\n' + tmp2[len(tmp2)//2:]
identifier = tmp1 + ":\n" + tmp2
if topStackOccurences.get(identifier) != None:
value = topStackOccurences.pop(identifier)
topStackOccurences.update({identifier: int(value)+1})
else:
topStackOccurences.update({identifier: 1})
#sort dict
sortedOccurences = sorted(topStackOccurences.items(), key=lambda kv: kv[1])
x=list()
y=list()
for ele in sortedOccurences[-5:]: #append the 5 largest values in ascending order
if len(ele[0]) < 250:
x.append(ele[0]) #x values (basically the function names)
else:
x.append(ele[0][:250]+". . .")
y.append(ele[1]) #y values occurrences (bar height)
#make plot
fig = plt.figure(figsize=(10,4))
ax = plt.axes()
barWidth = 0.9 # the width of the bars
xLoc = list(range(len(y))) # the x locations for the groups
ax.barh([loc for loc in xLoc], y, barWidth, color='#009999')
ax.set_yticks([loc for loc in xLoc])
ax.set_yticklabels(reversed(['#'+str(rank) for rank in range(1,len(y)+1)]), minor=False)
legend_lines = [Line2D([0], [0], color='#009999', lw=rank) for rank in range(len(y)+1, 1, -1)]
ax.legend(legend_lines, reversed(x), loc='center', bbox_to_anchor=(0.5, -0.1*(len(y)+2)))
plt.title('Top five functions by top of stack occurrences',fontfamily="monospace", fontweight='bold')
plt.xlabel('No. of top of stack occurrences', fontfamily="monospace",fontweight='bold')
for i,v in enumerate(y):
ax.text(v, i, str(v), ha='left',color='black', fontweight='bold')
fig.add_axes(ax)
#plt.show()
figPath = pathlib.Path(target+'/'+self._topStackGraphFileName)
plt.savefig(str(figPath), dpi=300, format='png', bbox_inches='tight', orientation='landscape') # use format='svg' or 'pdf' for vectorial pictures
def _createErrorList(self):
self._strErrors = str()
errorTemplate = self._htmlTemplates.find('error_entry').text
errorList = self._reportRoot.findall('error')
self._numberOfErrors = len(errorList)
for error in errorList:
outputID = "output_"+str(self._errorNumber)+"_"
newError = errorTemplate.replace('*ERROR_ID*', adjText(error.find('unique').text))
newError = newError.replace('*ERROR_TYPE*', adjText(error.find('kind').text))
xwhat = error.findall('xwhat')
errortext1 = xwhat[0].find('text').text
#fall back to xauxhwaht -> valgrind format
if(len(xwhat) == 1):
element = error.find('xauxwhat')
if element != None:
errortext2 = element.find('text').text
else:
errortext2 = ""
else:
errortext2 = xwhat[1].find('text').text
newError = newError.replace('*XWHAT_TEXT_1*', adjText(errortext1))
newError = newError.replace('*XWHAT_TEXT_2*', adjText(errortext2))
# Resolved Address info
resolvedaddress = error.find('resolvedaddress')
if resolvedaddress != None:
raModname = resolvedaddress.find('modname')
resolvedaddressEntry = "<h5>Resolved Address</h5>" + "<p class='reduced-margin'><b>Module Name: </b>" \
+ adjText(raModname.text) + "</p>"
raSymname = resolvedaddress.find('symname')
if raSymname != None:
resolvedaddressEntry = resolvedaddressEntry + "<p class='reduced-margin'><b>Symbol Name: </b>" \
+ adjText(raSymname.text) + "</p>"
raFile = resolvedaddress.find('file')
if raFile != None:
raLine = resolvedaddress.find('line')
raOffset = resolvedaddress.find('offset')
resolvedaddressEntry = resolvedaddressEntry + "<p class='reduced-margin'><b>File: </b>" + adjText(raFile.text) + "</p> <p class='reduced-margin'><b>Line: </b>" \
+ adjText(raLine.text) + "</p> <p class='reduced-margin'><b>Offset: </b>" + adjText(raOffset.text) + "</p>"
else:
resolvedaddressEntry = ""
newError = newError.replace('*RESOLVED_ADDRESS_ENTRY*', resolvedaddressEntry)
self._errorHeading = str() #reset errorHeading, will be filled filled by _createCallStack
newError = newError.replace('*CALL_STACK_ENTRIES_1*', self._createCallStack(error, 0, outputID))
if errortext2 != "":
newError = newError.replace('*CALL_STACK_ENTRIES_2*', self._createCallStack(error, 1, outputID))
else:
newError = newError.replace('*CALL_STACK_ENTRIES_2*', "No Callstack Available")
newError = newError.replace('*OUTPUT_ID_1*', outputID+'0')
newError = newError.replace('*OUTPUT_ID_2*', outputID+'1')
newError = newError.replace('*ERROR_HEADING*', self._errorHeading)
self._errorNumber += 1
self._strErrors += newError
self.SCM.searchSourceDirectories.cache_clear()
def _createHeader(self):
hasErrors = self._reportRoot.find('error') != None
headerInformation = self._getHeader()
self.htmlReport = self._htmlTemplates.find('base_entry').text
self.htmlReport = self.htmlReport.replace('*DATE*', headerInformation[0])
self.htmlReport = self.htmlReport.replace('*TIME*', headerInformation[1])
self.htmlReport = self.htmlReport.replace('*DURATION*', headerInformation[2])
self.htmlReport = self.htmlReport.replace('*DURATION_UNIT*', headerInformation[3])
self.htmlReport = self.htmlReport.replace('*ARGS*', headerInformation[4])
self.htmlReport = self.htmlReport.replace('*EXE*', headerInformation[5])
self.htmlReport = self.htmlReport.replace('*PROTOCOLVERSION*', headerInformation[6])
self.htmlReport = self.htmlReport.replace('*PROTOCOLTOOL*', headerInformation[7])
self.htmlReport = self.htmlReport.replace('*NUMBER_OF_ERRORS*', str(self._numberOfErrors))
self.htmlReport = self.htmlReport.replace('*ERROR_ENTRIES*', self._strErrors)
if not noMatplotLib and hasErrors:
matplotlib_snippet = self._htmlTemplates.find('matplotlib_entries').text
matplotlib_snippet = matplotlib_snippet.replace('*TOP_OF_STACK_GRAPH*', self._topStackGraphFileName)
matplotlib_snippet = matplotlib_snippet.replace('*ERROR_TIMES_PLOT*', self._errorTimesPlot)
self.htmlReport = self.htmlReport.replace('*MATPLOTLIB_PICTURES*', matplotlib_snippet)
else:
self.htmlReport = self.htmlReport.replace('*MATPLOTLIB_PICTURES*', '')
def _createReport(self):
self._createErrorList()
self._createHeader()
self.htmlReport = self.htmlReport.replace("*SNIPPET_VARIABLES*", self._snippets)
self.htmlReport = self.SCM.createCodeVars(self.htmlReport)
class SourceCodeManagement:
def __init__(self):
self._sourcefilelist = list()
self._htmlTemplatesPath = str(g_HTMLTEMPLATES)
self._htmlTemplates = (ET.parse(self._htmlTemplatesPath)).getroot()
def _createSourcefileEntry(self, path, line):
#one entry consists of the full file path the line number of interest
sourceFile = open(path, mode='r')
sourceLineList = sourceFile.readlines()
if len(sourceLineList) > NUMBEROFCODELINES:
newElement = [path, int(line), False]
else:
newElement = [path, int(line), True]
self._sourcefilelist.append(newElement)
return self._sourcefilelist.index(newElement)
def _returnCode(self, fullPath, justExistance, line = 0):
returnSrc = False
try: #may throw an an exception in earlier version (until 3.6), therefore try-catch
fp = pathlib.Path(fullPath).resolve() #returns absolute path
except FileNotFoundError:
return -1
except OSError: #if path is available, but for any reason not reachable (e.g. locked by bitlocker) OSError is thrown
return -1
if fp.is_file():
for element in SOURCEFILE_BL: #blacklisting routine
if str(element) in str(fp): #both are absoulte paths, so comparing is valid
return -1
if WHITELISTING:
for element in SOURCEFILE_WL:
if str(element) in str(fullPath):
returnSrc = True
break
if not returnSrc:
return -1
if justExistance:
sourceCode = self._getLines(fullPath, line)
if sourceCode == -1: ##line was not found
return -1
return 0
else:
return -1
#if we are here we want to return the source code
return adjText(self._getLines(fullPath, line))
def _getLines(self, path, line):
sourceFile = open(path, mode='r')
sourceLineList = sourceFile.readlines()
if len(sourceLineList) < line: #the found file contains less lines than the target (e.g. wrong line number from drace)
return -1
if len(sourceLineList) > NUMBEROFCODELINES:
if line <= NUMBEROFCODELINES//2:
begin = 0
end = NUMBEROFCODELINES
else:
begin = (line - NUMBEROFCODELINES//2) - 1 #-1 because array starts with 0
end = begin + NUMBEROFCODELINES
sourceLineList = sourceLineList[begin:end]
sourceCode = str()
for sourceLine in sourceLineList:
sourceCode += sourceLine
sourceFile.close()
return sourceCode
def handleSourceCode(self, filename, directory, line):
fullPath = pathlib.Path(directory +'/'+ filename)
src = self._returnCode(fullPath, 1, int(line))
if src == -1:
return -1, self._htmlTemplates.find('no_code_entry').text
index = -1
#check if source file is already in the list
for item in self._sourcefilelist:
if item[0] == fullPath:
if item[2] or (int(line) - NUMBEROFCODELINES//10) <= item[1] <= (int(line) + NUMBEROFCODELINES//10):
index = self._sourcefilelist.index(item)
#entry = item
if index == -1:
index = self._createSourcefileEntry(fullPath, line)
strIndex = 'code_' + str(index)
return strIndex, (self._htmlTemplates.find('code_entry').text)
def createCodeVars(self, report):
codeString = str()
for sourceObject in self._sourcefilelist:
src = self._returnCode(sourceObject[0], justExistance=0, line = sourceObject[1])
tmpCode = "code_" + str(self._sourcefilelist.index(sourceObject)) + ' = "' + src + '";\n'
codeString += tmpCode
report = report.replace("*CODE_VARIABLES*", codeString)
return report
def determineLanguage(self, filename):
fileParts = filename.split('.')
if len(fileParts) == 1:
return 'cpp' #files without file endigs are treated as cpp files
else:
ending = fileParts[-1]
if ending == 'c':
return 'c'
elif ending == 'cpp':
return 'cpp'
elif ending == 'h':
return 'cpp'
elif ending == 'cs':
return 'csharp'
elif ending == 'css':
return 'css'
elif ending == 'js':
return 'javascript'
elif ending == 'html':
return 'markup'
else:
return 'cpp'
def getFirstLineOfCodeSnippet(self, index):
codeSnippet = int(index.split("_")[-1]) #index is e.g. code_3
srcObject = self._sourcefilelist[codeSnippet]
if srcObject[2]:
return 1
else:
firstLine = srcObject[1] - NUMBEROFCODELINES//2
return firstLine #srcObject[1] is line of interest of snippet
@lru_cache(maxsize=100)
def searchSourceDirectories(self, dir, file):
if pathlib.Path(pathlib.Path(dir) / file).is_file():
# path to file in xml file is valid
return dir
else:
# path to file in xml file is NOT valid
if not SOURCE_DIRECTORIES:
# no sourceDirectories args given
print(f"Cannot find file '{file}' in directory '{dir}'.")
return dir
else:
print(f"Cannot find file '{file}' in directory '{dir}'. Searching through given source directories ...")
# search in sourceDirectories given from args if applicable
for customDirPath in SOURCE_DIRECTORIES:
customDir = pathlib.Path(customDirPath)
fileInstances = customDir.glob(f'**/{file}') # generator for found file instances
try:
f1 = next(fileInstances)
try:
f2 = next(fileInstances)
# Check if next found file f2 has a parent directory which supersets that of first found file f1
if str(f1.resolve().parent) == str(f2.resolve().parent)[:len(str(f1.resolve().parent))]:
return str(f2.resolve().parent) # second valid file instance in customDirPath
else:
return str(f1.resolve().parent) # first valid file instance in customDirPath
except StopIteration:
# Only one valid file instance found in customDirPath
return str(f1.resolve().parent)
except StopIteration:
# No file instance found in customDirPath element
continue
# Search for file instances in given sourceDirectories failed
print(f"Cannot find file '{file}' in given source directories.")
return dir
def adjText(text): #change html symbols e.g. & -> &
text = text.replace('`', '\'')
text = text.replace('\\', '/')
text = text.replace('\n', '\\n')
return html.escape(text)
def parseArgumentString(fileList, strEntries):
strEntries = strEntries.replace("\\","/")
listEntries = strEntries.split(',')
for entry in listEntries:
#remove potential leading and trailing whitespaces
while entry[0] == ' ':
entry = entry[1:]
while entry[-1] == ' ':
entry = entry[:-1]
newObject = pathlib.Path(entry)
newObject = newObject.resolve()
fileList.append(newObject)
return
def returnDateString():
date = datetime.datetime.utcnow()
return date.strftime('%Y%m%d_%H%M')
def main():
global SOURCEFILE_BL, SOURCEFILE_WL, WHITELISTING, SOURCE_DIRECTORIES, DEBUG
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--inputFile", help='define <input_file>', type=str)
parser.add_argument("-o", "--outputDirectory", help='define <output_directory>', type=str)
parser.add_argument("-b", "--blacklist", help='add blacklist entries <entry1,entry2 ...>', type=str)
parser.add_argument("-w", "--whitelist", help='add whitelist entries <entry1,entry2 ...>', type=str)
parser.add_argument("-s", "--sourceDirectories", help='add source directories <entry1,entry2 ...>', type=str)
parser.add_argument("--Debug", help='Debug Mode', action="store_true")
args = parser.parse_args()
###args handling
if args.Debug:
print("Debug Mode is on")
inFile = pathlib.Path(SCRIPTPATH / 'test_files/test.xml')
targetDirectory = pathlib.Path(SCRIPTPATH / 'test_files/output')
else:
if args.inputFile != None:
inFile = pathlib.Path(args.inputFile)
else:
print("You must specify an input file")
print()
parser.print_help()
sys.exit(-1)
if not inFile.is_file():
print("Your input file does not exist")
parser.print_help()
sys.exit(-1)
strDate = returnDateString()
if not args.Debug:
if args.outputDirectory != None:
targetDirectory = pathlib.Path(args.outputDirectory+'/drace_report_'+strDate)
else:
targetDirectory = pathlib.Path('./drace_report_'+strDate)
if args.blacklist != None:
parseArgumentString(SOURCEFILE_BL, args.blacklist)
if args.whitelist != None:
parseArgumentString(SOURCEFILE_WL, args.whitelist)
WHITELISTING = True
if args.sourceDirectories != None:
parseArgumentString(SOURCE_DIRECTORIES, args.sourceDirectories)
#end of args handling
if not targetDirectory.is_dir():
targetDirectory.mkdir()
#report gets generated here
report = ReportCreator(str(inFile), str(targetDirectory))
if report.succesfullReportCreation:
#write report to destination
output = open(str(targetDirectory)+'/index.html', mode='w')
output.write(report.htmlReport)
output.close()
#copy needed files to destination
cssPath = pathlib.Path(str(targetDirectory)+"/css")
jsPath = pathlib.Path(str(targetDirectory)+"/js")
if cssPath.is_dir():
shutil.rmtree(str(cssPath))
if jsPath.is_dir():
shutil.rmtree(str(jsPath))
shutil.copytree(str(g_CSSPATH.resolve()), str(targetDirectory / "css"))
shutil.copytree(str(g_JSPATH.resolve()), str(targetDirectory / "js"))
shutil.copy(str((resourcesPath / 'legend.png').resolve()), str(targetDirectory))
print("Report creation successful")
print("Report is at:")
print(targetDirectory)
return 0
else:
print("Report creation was NOT successful")
targetDirectory.rmdir()
return -1
if __name__ == "__main__":
main()
| 40.711364 | 181 | 0.577514 |
sys
from subprocess import check_call, STDOUT, DEVNULL
from functools import lru_cache
try:
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
noMatplotLib = False
except ImportError:
noMatplotLib = True
print("Matplotlib is not installed.")
if getattr(sys, 'frozen', False):
SCRIPTPATH = pathlib.Path(sys.executable)
SCRIPTPATH = pathlib.Path(SCRIPTPATH / "..")
else :
SCRIPTPATH = pathlib.Path(pathlib.Path(__file__).resolve().parents[0])
if pathlib.Path(SCRIPTPATH / '../resources').is_dir():
resourcesPath = pathlib.Path(SCRIPTPATH / '../resources')
else:
if pathlib.Path(SCRIPTPATH / 'resources').is_dir():
resourcesPath = pathlib.Path(SCRIPTPATH / 'resources')
else:
print("path of resources not found")
sys.exit(-1)
g_HTMLTEMPLATES = resourcesPath / "entries.xml"
g_CSSPATH = resourcesPath / "css"
g_JSPATH = resourcesPath / "js"
DEBUG = False
SOURCEFILE_BL = list()
SOURCEFILE_WL = list()
WHITELISTING = False
NUMBEROFCODELINES = 400
if NUMBEROFCODELINES % 2:
print('Number of maximum of displayed code lines must be even, but is:')
print(str(NUMBEROFCODELINES))
sys.exit(-1)
SOURCE_DIRECTORIES = list()
class ReportCreator:
_htmlTemplatesPath = str(g_HTMLTEMPLATES)
_topStackGraphFileName = 'topStackBarchart.png'
_errorTimesPlot = 'errorTimes.png'
try:
if check_call(['code', '--version'], stdout=DEVNULL, stderr=STDOUT, shell=True) == 0:
_vscodeFlag = True
else:
_vscodeFlag = False
except:
_vscodeFlag = False
def __init__(self, pathOfReport, target):
self.sourcefileList = list()
self._callStackNumber = 0
self._errorNumber = 0
self._snippets = str()
self.succesfullReportCreation = True
try:
self._htmlTemplates = (ET.parse(self._htmlTemplatesPath)).getroot()
except FileNotFoundError:
print("template file is missing")
self.succesfullReportCreation = False
return
self.SCM = SourceCodeManagement()
self._pathOfReport = pathOfReport
if self._inputValidation():
hasErrors = self._reportRoot.find('error') != None
if not noMatplotLib and hasErrors:
self._makeHistogramm(target)
self._countTopStackOccurences(target)
self._createReport()
else:
print("input file is not valid")
self.succesfullReportCreation = False
def _inputValidation(self):
try:
self._reportContent = ET.parse(self._pathOfReport)
except ET.ParseError:
return 0
self._reportRoot = self._reportContent.getroot()
if self._reportRoot.find('protocolversion') != None and \
self._reportRoot.find('protocoltool') != None and \
self._reportRoot.find('preamble') != None and \
self._reportRoot.find('pid') != None and \
self._reportRoot.find('tool') != None and \
self._reportRoot.find('args') != None and \
self._reportRoot.find('status') != None and \
self._reportRoot.tag == 'valgrindoutput':
return 1
else:
return 0
def _getHeader(self):
header = list()
status = self._reportRoot.findall('status')
if len(status) == 2:
status = status[1] time').text
if "T" in strDatetime:
date = strDatetime.split('T')[0]
time = (strDatetime.split('T')[1])[0:-1]
else:
date = ""
time = strDatetime
header.append(adjText(date))
header.append(adjText(time))
if status.find('duration') != None:
header.append(adjText(status.find('duration').text))
header.append(adjText(status.find('duration').get('unit')))
else:
header.append("")
header.append("")
else:
header.append("")
header.append("")
header.append("")
header.append("")
arguments = str()
for arg in self._reportRoot.find('args').find('vargv').findall('arg'):
arguments += arg.text
arguments += ' '
header.append(adjText(arguments[0:-1]))
header.append(adjText(self._reportRoot.find('args').find('argv').find('exe').text))
header.append(adjText(self._reportRoot.find('protocolversion').text))
header.append(adjText(self._reportRoot.find('protocoltool').text))
return header
def _makeFileEntry(self, frame):
strDir = adjText(self._frameValues["dir"])
strFile = adjText(self._frameValues["file"])
strLine = adjText(self._frameValues["line"])
offset = adjText(self._frameValues["offset"])
if self._vscodeFlag:
entry = "<a href='vscode://file/" + strDir + "/" + strFile + ":" + strLine + ":" + offset +"'>"+ strFile +":" + strLine + ":" + offset + "</a>"
else:
entry = "<a href='file://"+ strDir + "/" + strFile + "'>" + strFile + ":" + strLine + "</a>"
return entry
def _readFrame(self, frame):
if frame is None:
self._frameValues = {"obj":"", "fn":"", "ip":"", "dir":"", "file":"", "line":"", "offset":""}
return
obj = frame.find('obj')
if obj is None:
obj = ""
else:
if obj.text is None:
obj = ""
else:
obj = obj.text
fn = frame.find('fn')
if fn is None:
fn = ""
else:
if fn.text is None:
fn = ""
else:
fn = fn.text
ip = frame.find('ip')
if ip is None:
ip = ""
else:
if ip.text is None:
ip = ""
else:
ip = ip.text
direc = frame.find('dir')
if direc is None:
direc = ""
else:
if direc.text is None:
direc = ""
else:
direc = direc.text
filename = frame.find('file')
if filename is None:
filename = ""
else:
if filename.text is None:
filename = ""
else:
filename = filename.text
line = frame.find('line')
if line is None:
line = "0"
else:
if line.text is None:
line = "0"
else:
line = line.text
offset = frame.find('offset')
if offset is None:
offset = "0"
else:
if offset.text is None:
offset = "0"
else:
offset = offset.text
self._frameValues = {"obj":obj, "fn":fn, "ip":ip, "dir":direc, "file":filename, "line":line, "offset":offset}
def _createSnippetEntry(self, frame, elementNumber, tag, codeIndex, buttonID):
newSnippet = self._htmlTemplates.find('snippet_entry').text
newSnippet = newSnippet.replace('*SNIPPET_VAR*', ("snippet_" + str(self._callStackNumber)))
newSnippet = newSnippet.replace('*STACK_NUMBER*', adjText(hex(elementNumber)))
newSnippet = newSnippet.replace('*OBJ*', adjText(self._frameValues["obj"]))
newSnippet = newSnippet.replace('*FUNCTION*', adjText(self._frameValues["fn"]))
newSnippet = newSnippet.replace('*INSTRUCTION_POINTER*', adjText(self._frameValues["ip"]))
newSnippet = newSnippet.replace('*CODE_TAG*', tag)
newSnippet = newSnippet.replace('*SNIPPET_BUTTON_ID*', buttonID)
if (self._frameValues["file"] != ""):
newSnippet = newSnippet.replace('*FILE_NAME_ENTRY*', self._makeFileEntry(frame))
newSnippet = newSnippet.replace('*DIRECTORY*', adjText(self._frameValues["dir"]))
newSnippet = newSnippet.replace('*SHORT_DIR*', adjText(self._makeShortDir(self._frameValues["dir"])))
newSnippet = newSnippet.replace('*LINE_OF_CODE*', adjText(self._frameValues["line"]))
if(codeIndex != -1):
newSnippet = newSnippet.replace('*CODE_ID_VAR*', "snippet_"+str(self._callStackNumber)+"_code")
newSnippet = newSnippet.replace('*LANGUAGE*', self.SCM.determineLanguage(adjText(self._frameValues["file"])))
newSnippet = newSnippet.replace('*FIRST_LINE*', str(self.SCM.getFirstLineOfCodeSnippet(codeIndex)))
else:
newSnippet = newSnippet.replace('*CODE_ID_VAR*', "'None'")
else:
newSnippet = newSnippet.replace('*FILE_NAME_ENTRY*', 'no filename avail.')
newSnippet = newSnippet.replace('*DIRECTORY*', 'no directory avail.')
newSnippet = newSnippet.replace('*SHORT_DIR*', 'no directory avail.')
self._snippets += newSnippet
def _makeShortDir(self, strDir):
elements = None
if "\\" in strDir:
elements = strDir.split("\\")
else:
if "/" in strDir:
elements = strDir.split("/")
if elements != None:
return elements[0] + "/" + elements[1] + "/.../" + elements[-1]
else:
return ""
def _createCallStack(self, errorEntry, position, outputID):
callStack = str()
stackTemplate = self._htmlTemplates.find('stack_entry').text
stackArray = errorEntry.findall('stack')
stack = stackArray[position]
elementNumber = 0
frames = stack.findall('frame')
if frames is None:
return ""
for frame in frames:
self._readFrame(frame)
newDir = self.SCM.searchSourceDirectories(self._frameValues["dir"], self._frameValues["file"])
self._frameValues["dir"] = adjText(newDir)
noPreview = False
buttonID = "button_" + str(self._errorNumber) + "_" + str(position) + "_" + str(elementNumber)
strOutputID = outputID+str(position)
if elementNumber == 0:
errorHeading += "<br> Obj. 1: " + (adjText(self._frameValues["obj"]) + ': "' + adjText(self._frameValues["fn"])) + '" <br> '
else:
self._errorHeading += "Obj. 2: " + (adjText(self._frameValues["obj"]) + ': "' + adjText(self._frameValues["fn"])) + '"'
newStackElement = stackTemplate.replace('*STACK_NUMBER*', adjText(hex(elementNumber))+":")
newStackElement = newStackElement.replace('*SNIPPET_VAR*', ("snippet_" + str(self._callStackNumber)))
newStackElement = newStackElement.replace('*OUTPUT_ID*', strOutputID)
newStackElement = newStackElement.replace('*FUNCTION*', adjText(self._frameValues['fn']))
newStackElement = newStackElement.replace('*BUTTON_ID*', buttonID)
if (self._frameValues["file"]!= ""):
codeIndex, tag = self.SCM.handleSourceCode(self._frameValues["file"], self._frameValues["dir"], self._frameValues["line"])
newStackElement = newStackElement.replace('*FILE*', adjText(self._frameValues["file"]))
if(codeIndex != -1):
newStackElement = newStackElement.replace('*CODE_VAR*', str(codeIndex))
newStackElement = newStackElement.replace('*CODE_ID_VAR*', "'snippet_"+str(self._callStackNumber)+"_code'")
newStackElement = newStackElement.replace('*LINE_OF_CODE*', adjText(self._frameValues["line"]))
newStackElement = newStackElement.replace('*FIRST_LINE*', str(self.SCM.getFirstLineOfCodeSnippet(codeIndex)))
else:
noPreview = True
else:
codeIndex = -1
tag = self._htmlTemplates.find('no_code_entry').text
newStackElement = newStackElement.replace('*FILE*', 'no filename avail.')
noPreview = True
if noPreview:
newStackElement = newStackElement.replace('*CODE_VAR*', "'None'")
newStackElement = newStackElement.replace('*CODE_ID_VAR*', "'None'")
newStackElement = newStackElement.replace('*LINE_OF_CODE*', "'None'")
newStackElement = newStackElement.replace('*FIRST_LINE*', "'NONE'")
searchStr = 'class="'
insertPosition = newStackElement.find(searchStr)+len(searchStr) #to add the ".grey" class the position before after class
#insertPosition += newStackElement[insertPosition:].find('"')
newStackElement = newStackElement[:insertPosition] + "grey-button " + newStackElement[insertPosition:]
self._createSnippetEntry(frame, elementNumber, tag, codeIndex, buttonID)
callStack += newStackElement
elementNumber += 1
self._callStackNumber += 1
return callStack
def _makeHistogramm(self, target):
errorTimes = dict()
statusNode = self._reportRoot.findall('status')[1]
if statusNode.find('duration') is None:
self._errorTimesPlot = ""
return
totalDuration = int(statusNode.find('duration').text)
errors = self._reportRoot.findall('error')
for error in errors:
timePoint = (round(float(100 * int(error.find('timestamp').text) /totalDuration)))
if errorTimes.get(timePoint) != None:
value = errorTimes.pop(timePoint)
errorTimes.update({timePoint: int(value)+1})
else:
errorTimes.update({timePoint: 1})
x = list(errorTimes.keys())
y = list(errorTimes.values())
fig = plt.figure(figsize=(10,4))
ax = plt.axes()
ax.scatter(x, y, color='#009999', edgecolor='black')
xRangeEnd = max(y)+1
if xRangeEnd < 3:
xRangeEnd = 3
ax.set_yticks([i for i in range(0, xRangeEnd)])
ax.set_xticks([i for i in range(0, 110, 10)])
plt.title('Error occurrences by time',fontfamily="monospace", fontweight='bold')
plt.ylabel('Occurrences', fontfamily="monospace",fontweight='bold')
plt.xlabel('Execution of program in %. \n Total execution time = ' + str(totalDuration) + 'ms', fontfamily="monospace",fontweight='bold')
fig.add_axes(ax)
figPath = pathlib.Path(target+'/'+self._errorTimesPlot)
plt.savefig(str(figPath), dpi=300, format='png', bbox_inches='tight', orientation='landscape')
def _countTopStackOccurences(self, target):
topStackOccurences = dict()
errors = self._reportRoot.findall('error')
for error in errors:
stacks = error.findall('stack')
for i in range(0,2):
topFrame = stacks[i].find('frame')
if(topFrame != None):
self._readFrame(topFrame)
tmp1 = self._frameValues["file"]
tmp2 = self._frameValues["fn"]
if(tmp1 != "None" and tmp2 != "None"):
if(len(tmp2) > 20):
tmp2 = tmp2[:len(tmp2)//2] + '\n' + tmp2[len(tmp2)//2:]
identifier = tmp1 + ":\n" + tmp2
if topStackOccurences.get(identifier) != None:
value = topStackOccurences.pop(identifier)
topStackOccurences.update({identifier: int(value)+1})
else:
topStackOccurences.update({identifier: 1})
sortedOccurences = sorted(topStackOccurences.items(), key=lambda kv: kv[1])
x=list()
y=list()
for ele in sortedOccurences[-5:]:
if len(ele[0]) < 250:
x.append(ele[0])
else:
x.append(ele[0][:250]+". . .")
y.append(ele[1])
fig = plt.figure(figsize=(10,4))
ax = plt.axes()
barWidth = 0.9
xLoc = list(range(len(y)))
ax.barh([loc for loc in xLoc], y, barWidth, color='#009999')
ax.set_yticks([loc for loc in xLoc])
ax.set_yticklabels(reversed(['#'+str(rank) for rank in range(1,len(y)+1)]), minor=False)
legend_lines = [Line2D([0], [0], color='#009999', lw=rank) for rank in range(len(y)+1, 1, -1)]
ax.legend(legend_lines, reversed(x), loc='center', bbox_to_anchor=(0.5, -0.1*(len(y)+2)))
plt.title('Top five functions by top of stack occurrences',fontfamily="monospace", fontweight='bold')
plt.xlabel('No. of top of stack occurrences', fontfamily="monospace",fontweight='bold')
for i,v in enumerate(y):
ax.text(v, i, str(v), ha='left',color='black', fontweight='bold')
fig.add_axes(ax)
figPath = pathlib.Path(target+'/'+self._topStackGraphFileName)
plt.savefig(str(figPath), dpi=300, format='png', bbox_inches='tight', orientation='landscape')
def _createErrorList(self):
self._strErrors = str()
errorTemplate = self._htmlTemplates.find('error_entry').text
errorList = self._reportRoot.findall('error')
self._numberOfErrors = len(errorList)
for error in errorList:
outputID = "output_"+str(self._errorNumber)+"_"
newError = errorTemplate.replace('*ERROR_ID*', adjText(error.find('unique').text))
newError = newError.replace('*ERROR_TYPE*', adjText(error.find('kind').text))
xwhat = error.findall('xwhat')
errortext1 = xwhat[0].find('text').text
if(len(xwhat) == 1):
element = error.find('xauxwhat')
if element != None:
errortext2 = element.find('text').text
else:
errortext2 = ""
else:
errortext2 = xwhat[1].find('text').text
newError = newError.replace('*XWHAT_TEXT_1*', adjText(errortext1))
newError = newError.replace('*XWHAT_TEXT_2*', adjText(errortext2))
resolvedaddress = error.find('resolvedaddress')
if resolvedaddress != None:
raModname = resolvedaddress.find('modname')
resolvedaddressEntry = "<h5>Resolved Address</h5>" + "<p class='reduced-margin'><b>Module Name: </b>" \
+ adjText(raModname.text) + "</p>"
raSymname = resolvedaddress.find('symname')
if raSymname != None:
resolvedaddressEntry = resolvedaddressEntry + "<p class='reduced-margin'><b>Symbol Name: </b>" \
+ adjText(raSymname.text) + "</p>"
raFile = resolvedaddress.find('file')
if raFile != None:
raLine = resolvedaddress.find('line')
raOffset = resolvedaddress.find('offset')
resolvedaddressEntry = resolvedaddressEntry + "<p class='reduced-margin'><b>File: </b>" + adjText(raFile.text) + "</p> <p class='reduced-margin'><b>Line: </b>" \
+ adjText(raLine.text) + "</p> <p class='reduced-margin'><b>Offset: </b>" + adjText(raOffset.text) + "</p>"
else:
resolvedaddressEntry = ""
newError = newError.replace('*RESOLVED_ADDRESS_ENTRY*', resolvedaddressEntry)
self._errorHeading = str()
newError = newError.replace('*CALL_STACK_ENTRIES_1*', self._createCallStack(error, 0, outputID))
if errortext2 != "":
newError = newError.replace('*CALL_STACK_ENTRIES_2*', self._createCallStack(error, 1, outputID))
else:
newError = newError.replace('*CALL_STACK_ENTRIES_2*', "No Callstack Available")
newError = newError.replace('*OUTPUT_ID_1*', outputID+'0')
newError = newError.replace('*OUTPUT_ID_2*', outputID+'1')
newError = newError.replace('*ERROR_HEADING*', self._errorHeading)
self._errorNumber += 1
self._strErrors += newError
self.SCM.searchSourceDirectories.cache_clear()
def _createHeader(self):
hasErrors = self._reportRoot.find('error') != None
headerInformation = self._getHeader()
self.htmlReport = self._htmlTemplates.find('base_entry').text
self.htmlReport = self.htmlReport.replace('*DATE*', headerInformation[0])
self.htmlReport = self.htmlReport.replace('*TIME*', headerInformation[1])
self.htmlReport = self.htmlReport.replace('*DURATION*', headerInformation[2])
self.htmlReport = self.htmlReport.replace('*DURATION_UNIT*', headerInformation[3])
self.htmlReport = self.htmlReport.replace('*ARGS*', headerInformation[4])
self.htmlReport = self.htmlReport.replace('*EXE*', headerInformation[5])
self.htmlReport = self.htmlReport.replace('*PROTOCOLVERSION*', headerInformation[6])
self.htmlReport = self.htmlReport.replace('*PROTOCOLTOOL*', headerInformation[7])
self.htmlReport = self.htmlReport.replace('*NUMBER_OF_ERRORS*', str(self._numberOfErrors))
self.htmlReport = self.htmlReport.replace('*ERROR_ENTRIES*', self._strErrors)
if not noMatplotLib and hasErrors:
matplotlib_snippet = self._htmlTemplates.find('matplotlib_entries').text
matplotlib_snippet = matplotlib_snippet.replace('*TOP_OF_STACK_GRAPH*', self._topStackGraphFileName)
matplotlib_snippet = matplotlib_snippet.replace('*ERROR_TIMES_PLOT*', self._errorTimesPlot)
self.htmlReport = self.htmlReport.replace('*MATPLOTLIB_PICTURES*', matplotlib_snippet)
else:
self.htmlReport = self.htmlReport.replace('*MATPLOTLIB_PICTURES*', '')
def _createReport(self):
self._createErrorList()
self._createHeader()
self.htmlReport = self.htmlReport.replace("*SNIPPET_VARIABLES*", self._snippets)
self.htmlReport = self.SCM.createCodeVars(self.htmlReport)
class SourceCodeManagement:
def __init__(self):
self._sourcefilelist = list()
self._htmlTemplatesPath = str(g_HTMLTEMPLATES)
self._htmlTemplates = (ET.parse(self._htmlTemplatesPath)).getroot()
def _createSourcefileEntry(self, path, line):
sourceFile = open(path, mode='r')
sourceLineList = sourceFile.readlines()
if len(sourceLineList) > NUMBEROFCODELINES:
newElement = [path, int(line), False]
else:
newElement = [path, int(line), True]
self._sourcefilelist.append(newElement)
return self._sourcefilelist.index(newElement)
def _returnCode(self, fullPath, justExistance, line = 0):
returnSrc = False
try:
fp = pathlib.Path(fullPath).resolve()
except FileNotFoundError:
return -1
except OSError:
return -1
if fp.is_file():
for element in SOURCEFILE_BL:
if str(element) in str(fp):
return -1
if WHITELISTING:
for element in SOURCEFILE_WL:
if str(element) in str(fullPath):
returnSrc = True
break
if not returnSrc:
return -1
if justExistance:
sourceCode = self._getLines(fullPath, line)
if sourceCode == -1: return -1
return 0
else:
return -1
return adjText(self._getLines(fullPath, line))
def _getLines(self, path, line):
sourceFile = open(path, mode='r')
sourceLineList = sourceFile.readlines()
if len(sourceLineList) < line:
return -1
if len(sourceLineList) > NUMBEROFCODELINES:
if line <= NUMBEROFCODELINES//2:
begin = 0
end = NUMBEROFCODELINES
else:
begin = (line - NUMBEROFCODELINES//2) - 1
end = begin + NUMBEROFCODELINES
sourceLineList = sourceLineList[begin:end]
sourceCode = str()
for sourceLine in sourceLineList:
sourceCode += sourceLine
sourceFile.close()
return sourceCode
def handleSourceCode(self, filename, directory, line):
fullPath = pathlib.Path(directory +'/'+ filename)
src = self._returnCode(fullPath, 1, int(line))
if src == -1:
return -1, self._htmlTemplates.find('no_code_entry').text
index = -1
for item in self._sourcefilelist:
if item[0] == fullPath:
if item[2] or (int(line) - NUMBEROFCODELINES//10) <= item[1] <= (int(line) + NUMBEROFCODELINES//10):
index = self._sourcefilelist.index(item)
if index == -1:
index = self._createSourcefileEntry(fullPath, line)
strIndex = 'code_' + str(index)
return strIndex, (self._htmlTemplates.find('code_entry').text)
def createCodeVars(self, report):
codeString = str()
for sourceObject in self._sourcefilelist:
src = self._returnCode(sourceObject[0], justExistance=0, line = sourceObject[1])
tmpCode = "code_" + str(self._sourcefilelist.index(sourceObject)) + ' = "' + src + '";\n'
codeString += tmpCode
report = report.replace("*CODE_VARIABLES*", codeString)
return report
def determineLanguage(self, filename):
fileParts = filename.split('.')
if len(fileParts) == 1:
return 'cpp'
else:
ending = fileParts[-1]
if ending == 'c':
return 'c'
elif ending == 'cpp':
return 'cpp'
elif ending == 'h':
return 'cpp'
elif ending == 'cs':
return 'csharp'
elif ending == 'css':
return 'css'
elif ending == 'js':
return 'javascript'
elif ending == 'html':
return 'markup'
else:
return 'cpp'
def getFirstLineOfCodeSnippet(self, index):
codeSnippet = int(index.split("_")[-1])
srcObject = self._sourcefilelist[codeSnippet]
if srcObject[2]:
return 1
else:
firstLine = srcObject[1] - NUMBEROFCODELINES//2
return firstLine
@lru_cache(maxsize=100)
def searchSourceDirectories(self, dir, file):
if pathlib.Path(pathlib.Path(dir) / file).is_file():
return dir
else:
if not SOURCE_DIRECTORIES:
print(f"Cannot find file '{file}' in directory '{dir}'.")
return dir
else:
print(f"Cannot find file '{file}' in directory '{dir}'. Searching through given source directories ...")
for customDirPath in SOURCE_DIRECTORIES:
customDir = pathlib.Path(customDirPath)
fileInstances = customDir.glob(f'**/{file}')
try:
f1 = next(fileInstances)
try:
f2 = next(fileInstances)
if str(f1.resolve().parent) == str(f2.resolve().parent)[:len(str(f1.resolve().parent))]:
return str(f2.resolve().parent)
else:
return str(f1.resolve().parent)
except StopIteration:
return str(f1.resolve().parent)
except StopIteration:
continue
print(f"Cannot find file '{file}' in given source directories.")
return dir
def adjText(text):
text = text.replace('`', '\'')
text = text.replace('\\', '/')
text = text.replace('\n', '\\n')
return html.escape(text)
def parseArgumentString(fileList, strEntries):
strEntries = strEntries.replace("\\","/")
listEntries = strEntries.split(',')
for entry in listEntries:
#remove potential leading and trailing whitespaces
while entry[0] == ' ':
entry = entry[1:]
while entry[-1] == ' ':
entry = entry[:-1]
newObject = pathlib.Path(entry)
newObject = newObject.resolve()
fileList.append(newObject)
return
def returnDateString():
date = datetime.datetime.utcnow()
return date.strftime('%Y%m%d_%H%M')
def main():
global SOURCEFILE_BL, SOURCEFILE_WL, WHITELISTING, SOURCE_DIRECTORIES, DEBUG
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--inputFile", help='define <input_file>', type=str)
parser.add_argument("-o", "--outputDirectory", help='define <output_directory>', type=str)
parser.add_argument("-b", "--blacklist", help='add blacklist entries <entry1,entry2 ...>', type=str)
parser.add_argument("-w", "--whitelist", help='add whitelist entries <entry1,entry2 ...>', type=str)
parser.add_argument("-s", "--sourceDirectories", help='add source directories <entry1,entry2 ...>', type=str)
parser.add_argument("--Debug", help='Debug Mode', action="store_true")
args = parser.parse_args()
###args handling
if args.Debug:
print("Debug Mode is on")
inFile = pathlib.Path(SCRIPTPATH / 'test_files/test.xml')
targetDirectory = pathlib.Path(SCRIPTPATH / 'test_files/output')
else:
if args.inputFile != None:
inFile = pathlib.Path(args.inputFile)
else:
print("You must specify an input file")
print()
parser.print_help()
sys.exit(-1)
if not inFile.is_file():
print("Your input file does not exist")
parser.print_help()
sys.exit(-1)
strDate = returnDateString()
if not args.Debug:
if args.outputDirectory != None:
targetDirectory = pathlib.Path(args.outputDirectory+'/drace_report_'+strDate)
else:
targetDirectory = pathlib.Path('./drace_report_'+strDate)
if args.blacklist != None:
parseArgumentString(SOURCEFILE_BL, args.blacklist)
if args.whitelist != None:
parseArgumentString(SOURCEFILE_WL, args.whitelist)
WHITELISTING = True
if args.sourceDirectories != None:
parseArgumentString(SOURCE_DIRECTORIES, args.sourceDirectories)
#end of args handling
if not targetDirectory.is_dir():
targetDirectory.mkdir()
#report gets generated here
report = ReportCreator(str(inFile), str(targetDirectory))
if report.succesfullReportCreation:
#write report to destination
output = open(str(targetDirectory)+'/index.html', mode='w')
output.write(report.htmlReport)
output.close()
#copy needed files to destination
cssPath = pathlib.Path(str(targetDirectory)+"/css")
jsPath = pathlib.Path(str(targetDirectory)+"/js")
if cssPath.is_dir():
shutil.rmtree(str(cssPath))
if jsPath.is_dir():
shutil.rmtree(str(jsPath))
shutil.copytree(str(g_CSSPATH.resolve()), str(targetDirectory / "css"))
shutil.copytree(str(g_JSPATH.resolve()), str(targetDirectory / "js"))
shutil.copy(str((resourcesPath / 'legend.png').resolve()), str(targetDirectory))
print("Report creation successful")
print("Report is at:")
print(targetDirectory)
return 0
else:
print("Report creation was NOT successful")
targetDirectory.rmdir()
return -1
if __name__ == "__main__":
main()
| true | true |
f72995c0706428d76b90433f3ac5c63e2b41e814 | 3,104 | py | Python | setup.py | PhTrempe/pytest | 47200c141a78f06e5d61e183f61c41ef464283ef | [
"MIT"
] | null | null | null | setup.py | PhTrempe/pytest | 47200c141a78f06e5d61e183f61c41ef464283ef | [
"MIT"
] | 1 | 2017-12-25T20:47:50.000Z | 2017-12-25T20:47:50.000Z | setup.py | PhTrempe/pytest | 47200c141a78f06e5d61e183f61c41ef464283ef | [
"MIT"
] | null | null | null | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pytest',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.0.dev1',
description='A sample Python project',
long_description=long_description,
# The project's main homepage.
url='https://github.com/phtrempe/pytest',
# Author details
author='Philippe Trempe',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 1 - Planning',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='pytest sample python project setup setuptools development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy'],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'pytest=pytest:main',
],
},
)
| 33.73913 | 79 | 0.670103 |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pytest',
version='0.0.0.dev1',
description='A sample Python project',
long_description=long_description,
url='https://github.com/phtrempe/pytest',
# Author details
author='Philippe Trempe',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 1 - Planning',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='pytest sample python project setup setuptools development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
install_requires=['numpy'],
entry_points={
'console_scripts': [
'pytest=pytest:main',
],
},
)
| true | true |
f729968b5fa200fe31945c3588835dee308235ac | 343 | py | Python | app/forms.py | budiryan/ScholarsNet | b6a9f3830c390a4420e361752f0187d8f955acfe | [
"MIT"
] | 9 | 2017-06-08T12:05:03.000Z | 2021-11-08T12:19:46.000Z | app/forms.py | budiryan/ScholarsNet | b6a9f3830c390a4420e361752f0187d8f955acfe | [
"MIT"
] | null | null | null | app/forms.py | budiryan/ScholarsNet | b6a9f3830c390a4420e361752f0187d8f955acfe | [
"MIT"
] | null | null | null | from wtforms import Form, TextField, SelectField
from wtforms.validators import DataRequired
class QueryForm(Form):
search_query = TextField('', validators=[DataRequired()], render_kw={"placeholder": "Your query here"})
search_category = SelectField('Search for', choices=[('pa', 'Paper / Author'), ('p', 'Paper'), ('a', 'Author')])
| 42.875 | 116 | 0.705539 | from wtforms import Form, TextField, SelectField
from wtforms.validators import DataRequired
class QueryForm(Form):
search_query = TextField('', validators=[DataRequired()], render_kw={"placeholder": "Your query here"})
search_category = SelectField('Search for', choices=[('pa', 'Paper / Author'), ('p', 'Paper'), ('a', 'Author')])
| true | true |
f72996a5cdda64f19e82fe2f13168ab10ac0eae9 | 1,161 | py | Python | test/functional/rpc_deprecated.py | republic-productions/finalcoin | 7c0f335ded1e5c662034c822ca2c474b8e62778f | [
"MIT"
] | null | null | null | test/functional/rpc_deprecated.py | republic-productions/finalcoin | 7c0f335ded1e5c662034c822ca2c474b8e62778f | [
"MIT"
] | null | null | null | test/functional/rpc_deprecated.py | republic-productions/finalcoin | 7c0f335ded1e5c662034c822ca2c474b8e62778f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Finalcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deprecation of RPC calls."""
from test_framework.test_framework import FinalcoinTestFramework
class DeprecatedRpcTest(FinalcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], ['-deprecatedrpc=bumpfee']]
def run_test(self):
# This test should be used to verify correct behaviour of deprecated
# RPC methods with and without the -deprecatedrpc flags. For example:
#
# In set_test_params:
# self.extra_args = [[], ["-deprecatedrpc=generate"]]
#
# In run_test:
# self.log.info("Test generate RPC")
# assert_raises_rpc_error(-32, 'The wallet generate rpc method is deprecated', self.nodes[0].rpc.generate, 1)
# self.generate(self.nodes[1], 1)
self.log.info("No tested deprecated RPC methods")
if __name__ == '__main__':
DeprecatedRpcTest().main()
| 38.7 | 117 | 0.683032 |
from test_framework.test_framework import FinalcoinTestFramework
class DeprecatedRpcTest(FinalcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], ['-deprecatedrpc=bumpfee']]
def run_test(self):
self.log.info("No tested deprecated RPC methods")
if __name__ == '__main__':
DeprecatedRpcTest().main()
| true | true |
f72997edfbcf28fc6d5fa7753ddb6011179db888 | 13,463 | py | Python | samples/openapi3/client/petstore/python/petstore_api/model/parent_pet.py | JigarJoshi/openapi-generator | 785535b8d6881b358463994823abbda2b26ff42e | [
"Apache-2.0"
] | 1 | 2022-01-03T04:40:07.000Z | 2022-01-03T04:40:07.000Z | samples/openapi3/client/petstore/python/petstore_api/model/parent_pet.py | JigarJoshi/openapi-generator | 785535b8d6881b358463994823abbda2b26ff42e | [
"Apache-2.0"
] | 28 | 2021-04-07T07:38:36.000Z | 2022-03-31T03:10:56.000Z | samples/openapi3/client/petstore/python/petstore_api/model/parent_pet.py | JigarJoshi/openapi-generator | 785535b8d6881b358463994823abbda2b26ff42e | [
"Apache-2.0"
] | 2 | 2021-11-03T10:07:15.000Z | 2021-12-17T13:00:53.000Z | """
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from petstore_api.exceptions import ApiAttributeError
def lazy_import():
from petstore_api.model.child_cat import ChildCat
from petstore_api.model.grandparent_animal import GrandparentAnimal
globals()['ChildCat'] = ChildCat
globals()['GrandparentAnimal'] = GrandparentAnimal
class ParentPet(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'pet_type': (str,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'ChildCat': ChildCat,
}
if not val:
return None
return {'pet_type': val}
attribute_map = {
'pet_type': 'pet_type', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""ParentPet - a model defined in OpenAPI
Keyword Args:
pet_type (str):
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ParentPet - a model defined in OpenAPI
Keyword Args:
pet_type (str):
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
GrandparentAnimal,
],
'oneOf': [
],
}
| 42.203762 | 174 | 0.581668 |
import re
import sys
from petstore_api.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from petstore_api.exceptions import ApiAttributeError
def lazy_import():
from petstore_api.model.child_cat import ChildCat
from petstore_api.model.grandparent_animal import GrandparentAnimal
globals()['ChildCat'] = ChildCat
globals()['GrandparentAnimal'] = GrandparentAnimal
class ParentPet(ModelComposed):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'pet_type': (str,),
}
@cached_property
def discriminator():
lazy_import()
val = {
'ChildCat': ChildCat,
}
if not val:
return None
return {'pet_type': val}
attribute_map = {
'pet_type': 'pet_type',
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
GrandparentAnimal,
],
'oneOf': [
],
}
| true | true |
f72997f84b0fae8cdf9d5355a151756c3d7c8425 | 1,530 | py | Python | test/test_sqlitedriver.py | ahplummer/jutestring | 2cb1dfda0152cae4c94be55587f34480518218ca | [
"MIT"
] | 1 | 2019-05-11T20:03:44.000Z | 2019-05-11T20:03:44.000Z | test/test_sqlitedriver.py | ahplummer/jutestring | 2cb1dfda0152cae4c94be55587f34480518218ca | [
"MIT"
] | null | null | null | test/test_sqlitedriver.py | ahplummer/jutestring | 2cb1dfda0152cae4c94be55587f34480518218ca | [
"MIT"
] | null | null | null | import pytest, sys, os
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../src/')
from sqlitedriver import sqliteclass
dbname = 'test.db'
shorturl = "shortdata"
longurl = "longdata"
@pytest.fixture(scope='module')
def resource_setup(request):
print('Setting up resources for testing')
if os.path.exists(dbname):
os.remove(dbname)
def resource_teardown():
print('Tearing down resources from testing')
if os.path.exists(dbname):
os.remove(dbname)
request.addfinalizer(resource_teardown)
def test_createTable(resource_setup):
sqlitedriver = sqliteclass(dbname)
result, error = sqlitedriver.createTable()
assert None == error
assert True == result
def test_insertData(resource_setup):
sqlitedriver = sqliteclass(dbname)
result, error = sqlitedriver.insertData(shorturl, longurl)
assert None == error
assert True == result
def test_readShortUrl(resource_setup):
sqlitedriver = sqliteclass(dbname)
result, error = sqlitedriver.readShortUrl(longurl)
assert None == error
assert shorturl == result
result, error = sqlitedriver.readShortUrl("blah")
assert None == error
assert None == result
def test_deleteShortUrl(resource_setup):
sqlitedriver = sqliteclass(dbname)
result, error = sqlitedriver.readShortUrl(longurl)
assert None == error
assert shorturl == result
result, error = sqlitedriver.deleteShortUrl(longurl)
assert None == error
assert 1 == result
| 30 | 62 | 0.711765 | import pytest, sys, os
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../src/')
from sqlitedriver import sqliteclass
dbname = 'test.db'
shorturl = "shortdata"
longurl = "longdata"
@pytest.fixture(scope='module')
def resource_setup(request):
print('Setting up resources for testing')
if os.path.exists(dbname):
os.remove(dbname)
def resource_teardown():
print('Tearing down resources from testing')
if os.path.exists(dbname):
os.remove(dbname)
request.addfinalizer(resource_teardown)
def test_createTable(resource_setup):
sqlitedriver = sqliteclass(dbname)
result, error = sqlitedriver.createTable()
assert None == error
assert True == result
def test_insertData(resource_setup):
sqlitedriver = sqliteclass(dbname)
result, error = sqlitedriver.insertData(shorturl, longurl)
assert None == error
assert True == result
def test_readShortUrl(resource_setup):
sqlitedriver = sqliteclass(dbname)
result, error = sqlitedriver.readShortUrl(longurl)
assert None == error
assert shorturl == result
result, error = sqlitedriver.readShortUrl("blah")
assert None == error
assert None == result
def test_deleteShortUrl(resource_setup):
sqlitedriver = sqliteclass(dbname)
result, error = sqlitedriver.readShortUrl(longurl)
assert None == error
assert shorturl == result
result, error = sqlitedriver.deleteShortUrl(longurl)
assert None == error
assert 1 == result
| true | true |
f72998baf379666c230be32adf84dd43e4101c26 | 4,709 | py | Python | ShicimingjuCrawleAndDisplay/manages.py | zuojilei/ECommerceCrawlers | b92d8c48e4cfe514ef050f78e0a32f952cfef6a6 | [
"MIT"
] | null | null | null | ShicimingjuCrawleAndDisplay/manages.py | zuojilei/ECommerceCrawlers | b92d8c48e4cfe514ef050f78e0a32f952cfef6a6 | [
"MIT"
] | 14 | 2021-03-31T19:34:14.000Z | 2022-03-12T00:23:00.000Z | ShicimingjuCrawleAndDisplay/manages.py | zuojilei/ECommerceCrawlers | b92d8c48e4cfe514ef050f78e0a32f952cfef6a6 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
__author__ = 'Joynice'
import queue
import re
import threading
import requests
from faker import Faker
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from lxml import etree
from .app import create_app
from .exts import db
from .models import Poem, Poet
user_agent = Faker('zh-CN').user_agent()
app = create_app()
manager = Manager(app)
Migrate(app, db)
manager.add_command('db', MigrateCommand)
def get_header():
return {
'User-Agent': user_agent,
'Connection': 'close'
}
# 多线程爬取,由于可能导致数据爬取不全,数据诗词总数约为20w+数据
@manager.command
def spider():
class Shici(object):
def __init__(self, thread=5):
self.poet_queue = queue.Queue() # 诗人
self.thread = thread
self.base_url = 'http://www.shicimingju.com'
def get_poet_url(self):
for i in range(1, 13054):
url = 'http://www.shicimingju.com/chaxun/zuozhe/{}.html'.format(i)
self.poet_queue.put(url)
def Spider(self):
while not self.poet_queue.empty():
url = self.poet_queue.get()
req = requests.get(url, headers=get_header())
if req.status_code == 200:
req.encoding = 'utf-8'
html = etree.HTML(req.text)
name = html.xpath('/html/body/div[4]/div[2]/div[1]/div[2]/div[1]/h4/a/text()')[0]
dynasty = html.xpath('/html/body/div[4]/div[2]/div[1]/div[3]/div[1]/div[2]/a/text()')
if len(dynasty) == 0:
dynasty = '未知'
else:
dynasty = dynasty[0]
introduction = html.xpath('/html/body/div[4]/div[2]/div[1]/div[2]/div[1]/div[1]')[0].xpath(
'string(.)').strip()
with app.app_context():
poet = Poet(name=name, dynasty=dynasty, introduction=introduction)
db.session.add(poet)
db.session.commit()
id = poet.id
poem_num = html.xpath('/html/body/div[4]/div[2]/div[1]/div[3]/div[2]/div[2]/a/text()')[0][:-1]
poet_url_list = []
for i in range(1, int(int(poem_num) / 40) + 2):
poet_id = re.sub("\D", "", url)
poet_page_url = 'http://www.shicimingju.com/chaxun/zuozhe/{}_{}.html'.format(poet_id, i)
req1 = requests.get(url=poet_page_url, headers=get_header())
if req1.status_code == 200:
req1.encoding = 'utf-8'
list_html = etree.HTML(req1.text)
poet_url = list_html.xpath('//*/h3/a/@href')
poet_url_list += poet_url
poet_url_list = map(lambda x: self.base_url + x, poet_url_list)
for url in poet_url_list:
print(url)
req2 = requests.get(url, headers=get_header())
if req2.status_code == 200:
req2.encoding = 'utf-8'
poet_html = etree.HTML(req2.text)
title = poet_html.xpath('//*[@class="card"]/h1/text()')[0]
content = '\n'.join(poet_html.xpath('//*[@class="item_content"]/text()')).strip()
if not content:
content = '\n'.join(poet_html.xpath('//*[@class="para"]/text()')).strip()
if len(poet_html.xpath('//*[@class="shangxi_content"]')) == 0:
analysis = ''
else:
analysis = poet_html.xpath('//*[@class="shangxi_content"]')[0].xpath(
'string(.)').strip()
with app.app_context():
poem = Poem(title=title, content=content, analysis=analysis, author=id)
db.session.add(poem)
db.session.commit()
def run(self):
self.get_poet_url()
thread_list = []
for i in range(self.thread):
t = threading.Thread(target=self.Spider)
thread_list.append(t)
for t in thread_list:
t.setDaemon(True)
t.start()
for t in thread_list:
t.join()
self.Spider()
a = Shici()
a.run()
if __name__ == '__main__':
manager.run()
| 39.241667 | 114 | 0.475048 |
__author__ = 'Joynice'
import queue
import re
import threading
import requests
from faker import Faker
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from lxml import etree
from .app import create_app
from .exts import db
from .models import Poem, Poet
user_agent = Faker('zh-CN').user_agent()
app = create_app()
manager = Manager(app)
Migrate(app, db)
manager.add_command('db', MigrateCommand)
def get_header():
return {
'User-Agent': user_agent,
'Connection': 'close'
}
@manager.command
def spider():
class Shici(object):
def __init__(self, thread=5):
self.poet_queue = queue.Queue()
self.thread = thread
self.base_url = 'http://www.shicimingju.com'
def get_poet_url(self):
for i in range(1, 13054):
url = 'http://www.shicimingju.com/chaxun/zuozhe/{}.html'.format(i)
self.poet_queue.put(url)
def Spider(self):
while not self.poet_queue.empty():
url = self.poet_queue.get()
req = requests.get(url, headers=get_header())
if req.status_code == 200:
req.encoding = 'utf-8'
html = etree.HTML(req.text)
name = html.xpath('/html/body/div[4]/div[2]/div[1]/div[2]/div[1]/h4/a/text()')[0]
dynasty = html.xpath('/html/body/div[4]/div[2]/div[1]/div[3]/div[1]/div[2]/a/text()')
if len(dynasty) == 0:
dynasty = '未知'
else:
dynasty = dynasty[0]
introduction = html.xpath('/html/body/div[4]/div[2]/div[1]/div[2]/div[1]/div[1]')[0].xpath(
'string(.)').strip()
with app.app_context():
poet = Poet(name=name, dynasty=dynasty, introduction=introduction)
db.session.add(poet)
db.session.commit()
id = poet.id
poem_num = html.xpath('/html/body/div[4]/div[2]/div[1]/div[3]/div[2]/div[2]/a/text()')[0][:-1]
poet_url_list = []
for i in range(1, int(int(poem_num) / 40) + 2):
poet_id = re.sub("\D", "", url)
poet_page_url = 'http://www.shicimingju.com/chaxun/zuozhe/{}_{}.html'.format(poet_id, i)
req1 = requests.get(url=poet_page_url, headers=get_header())
if req1.status_code == 200:
req1.encoding = 'utf-8'
list_html = etree.HTML(req1.text)
poet_url = list_html.xpath('//*/h3/a/@href')
poet_url_list += poet_url
poet_url_list = map(lambda x: self.base_url + x, poet_url_list)
for url in poet_url_list:
print(url)
req2 = requests.get(url, headers=get_header())
if req2.status_code == 200:
req2.encoding = 'utf-8'
poet_html = etree.HTML(req2.text)
title = poet_html.xpath('//*[@class="card"]/h1/text()')[0]
content = '\n'.join(poet_html.xpath('//*[@class="item_content"]/text()')).strip()
if not content:
content = '\n'.join(poet_html.xpath('//*[@class="para"]/text()')).strip()
if len(poet_html.xpath('//*[@class="shangxi_content"]')) == 0:
analysis = ''
else:
analysis = poet_html.xpath('//*[@class="shangxi_content"]')[0].xpath(
'string(.)').strip()
with app.app_context():
poem = Poem(title=title, content=content, analysis=analysis, author=id)
db.session.add(poem)
db.session.commit()
def run(self):
self.get_poet_url()
thread_list = []
for i in range(self.thread):
t = threading.Thread(target=self.Spider)
thread_list.append(t)
for t in thread_list:
t.setDaemon(True)
t.start()
for t in thread_list:
t.join()
self.Spider()
a = Shici()
a.run()
if __name__ == '__main__':
manager.run()
| true | true |
f7299913712ad38823cbff706b312ff773b30c29 | 19,889 | py | Python | manila/utils.py | redhat-openstack/manila | bef43561b303a36d99849952ba8c408b19bafd02 | [
"Apache-2.0"
] | null | null | null | manila/utils.py | redhat-openstack/manila | bef43561b303a36d99849952ba8c408b19bafd02 | [
"Apache-2.0"
] | null | null | null | manila/utils.py | redhat-openstack/manila | bef43561b303a36d99849952ba8c408b19bafd02 | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import contextlib
import errno
import inspect
import os
import pyclbr
import random
import re
import shutil
import socket
import sys
import tempfile
from eventlet import pools
import netaddr
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
from oslo_utils import timeutils
import paramiko
import retrying
import six
from manila.db import api as db_api
from manila import exception
from manila.i18n import _
CONF = cfg.CONF
LOG = log.getLogger(__name__)
synchronized = lockutils.synchronized_with_prefix('manila-')
def _get_root_helper():
return 'sudo manila-rootwrap %s' % CONF.rootwrap_config
def execute(*cmd, **kwargs):
"""Convenience wrapper around oslo's execute() function."""
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = _get_root_helper()
return processutils.execute(*cmd, **kwargs)
def trycmd(*args, **kwargs):
"""Convenience wrapper around oslo's trycmd() function."""
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = _get_root_helper()
return processutils.trycmd(*args, **kwargs)
class SSHPool(pools.Pool):
"""A simple eventlet pool to hold ssh connections."""
def __init__(self, ip, port, conn_timeout, login, password=None,
privatekey=None, *args, **kwargs):
self.ip = ip
self.port = port
self.login = login
self.password = password
self.conn_timeout = conn_timeout if conn_timeout else None
self.path_to_private_key = privatekey
super(SSHPool, self).__init__(*args, **kwargs)
def create(self):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
look_for_keys = True
if self.path_to_private_key:
self.path_to_private_key = os.path.expanduser(
self.path_to_private_key)
look_for_keys = False
elif self.password:
look_for_keys = False
try:
ssh.connect(self.ip,
port=self.port,
username=self.login,
password=self.password,
key_filename=self.path_to_private_key,
look_for_keys=look_for_keys,
timeout=self.conn_timeout)
# Paramiko by default sets the socket timeout to 0.1 seconds,
# ignoring what we set through the sshclient. This doesn't help for
# keeping long lived connections. Hence we have to bypass it, by
# overriding it after the transport is initialized. We are setting
# the sockettimeout to None and setting a keepalive packet so that,
# the server will keep the connection open. All that does is send
# a keepalive packet every ssh_conn_timeout seconds.
if self.conn_timeout:
transport = ssh.get_transport()
transport.sock.settimeout(None)
transport.set_keepalive(self.conn_timeout)
return ssh
except Exception as e:
msg = _("Check whether private key or password are correctly "
"set. Error connecting via ssh: %s") % e
LOG.error(msg)
raise exception.SSHException(msg)
def get(self):
"""Return an item from the pool, when one is available.
This may cause the calling greenthread to block. Check if a
connection is active before returning it. For dead connections
create and return a new connection.
"""
if self.free_items:
conn = self.free_items.popleft()
if conn:
if conn.get_transport().is_active():
return conn
else:
conn.close()
return self.create()
if self.current_size < self.max_size:
created = self.create()
self.current_size += 1
return created
return self.channel.get()
def remove(self, ssh):
"""Close an ssh client and remove it from free_items."""
ssh.close()
ssh = None
if ssh in self.free_items:
self.free_items.pop(ssh)
if self.current_size > 0:
self.current_size -= 1
def check_ssh_injection(cmd_list):
ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>',
'<']
# Check whether injection attacks exist
for arg in cmd_list:
arg = arg.strip()
# Check for matching quotes on the ends
is_quoted = re.match('^(?P<quote>[\'"])(?P<quoted>.*)(?P=quote)$', arg)
if is_quoted:
# Check for unescaped quotes within the quoted argument
quoted = is_quoted.group('quoted')
if quoted:
if (re.match('[\'"]', quoted) or
re.search('[^\\\\][\'"]', quoted)):
raise exception.SSHInjectionThreat(command=cmd_list)
else:
# We only allow spaces within quoted arguments, and that
# is the only special character allowed within quotes
if len(arg.split()) > 1:
raise exception.SSHInjectionThreat(command=cmd_list)
# Second, check whether danger character in command. So the shell
# special operator must be a single argument.
for c in ssh_injection_pattern:
if c not in arg:
continue
result = arg.find(c)
if not result == -1:
if result == 0 or not arg[result - 1] == '\\':
raise exception.SSHInjectionThreat(command=cmd_list)
class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
backend_name = CONF[self.__pivot]
if backend_name not in self.__backends:
raise exception.Error(_('Invalid backend: %s') % backend_name)
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
LOG.debug('backend %s', self.__backend)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
def delete_if_exists(pathname):
"""Delete a file, but ignore file not found error."""
try:
os.unlink(pathname)
except OSError as e:
if e.errno == errno.ENOENT:
return
else:
raise
def get_from_path(items, path):
"""Returns a list of items matching the specified path.
Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the
intermediate results are lists it will treat each list item individually.
A 'None' in items or any child expressions will be ignored, this function
will not throw because of None (anywhere) in items. The returned list
will contain no None values.
"""
if path is None:
raise exception.Error('Invalid mini_xpath')
(first_token, sep, remainder) = path.partition('/')
if first_token == '':
raise exception.Error('Invalid mini_xpath')
results = []
if items is None:
return results
if not isinstance(items, list):
# Wrap single objects in a list
items = [items]
for item in items:
if item is None:
continue
get_method = getattr(item, 'get', None)
if get_method is None:
continue
child = get_method(first_token)
if child is None:
continue
if isinstance(child, list):
# Flatten intermediate lists
for x in child:
results.append(x)
else:
results.append(child)
if not sep:
# No more tokens
return results
else:
return get_from_path(results, remainder)
def is_ipv6_configured():
"""Check if system contain IPv6 capable network interface.
:rtype: bool
:raises: IOError
"""
try:
fd = open('/proc/net/if_inet6')
except IOError as e:
if e.errno != errno.ENOENT:
raise
result = False
else:
result = bool(fd.read(32))
fd.close()
return result
def is_eventlet_bug105():
"""Check if eventlet support IPv6 addresses.
See https://bitbucket.org/eventlet/eventlet/issue/105
:rtype: bool
"""
try:
mod = sys.modules['eventlet.support.greendns']
except KeyError:
return False
try:
connect_data = mod.getaddrinfo('::1', 80)
except socket.gaierror:
return True
fail = [x for x in connect_data if x[0] != socket.AF_INET6]
return bool(fail)
def monkey_patch():
"""Patch decorator.
If the Flags.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using CONF.monkey_patch_modules.
The format is "Module path:Decorator function".
Example: 'manila.api.ec2.cloud:' \
manila.openstack.common.notifier.api.notify_decorator'
Parameters of the decorator is as follows.
(See manila.openstack.common.notifier.api.notify_decorator)
name - name of the function
function - object of the function
"""
# If CONF.monkey_patch is not True, this function do nothing.
if not CONF.monkey_patch:
return
# Get list of modules and decorators
for module_and_decorator in CONF.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = importutils.import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key in module_data.keys():
# set the decorator for the class methods
if isinstance(module_data[key], pyclbr.Class):
clz = importutils.import_class("%s.%s" % (module, key))
# NOTE(vponomaryov): we need to distinguish class methods types
# for py2 and py3, because the concept of 'unbound methods' has
# been removed from the python3.x
if six.PY3:
member_type = inspect.isfunction
else:
member_type = inspect.ismethod
for method, func in inspect.getmembers(clz, member_type):
setattr(
clz, method,
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(module_data[key], pyclbr.Function):
func = importutils.import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,
decorator("%s.%s" % (module, key), func))
def read_cached_file(filename, cache_info, reload_func=None):
"""Read from a file if it has been modified.
:param cache_info: dictionary to hold opaque cache.
:param reload_func: optional function to be called with data when
file is reloaded due to a modification.
:returns: data from file
"""
mtime = os.path.getmtime(filename)
if not cache_info or mtime != cache_info.get('mtime'):
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
if reload_func:
reload_func(cache_info['data'])
return cache_info['data']
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
def service_is_up(service):
"""Check whether a service is up based on last heartbeat."""
last_heartbeat = service['updated_at'] or service['created_at']
# Timestamps in DB are UTC.
elapsed = timeutils.total_seconds(timeutils.utcnow() - last_heartbeat)
return abs(elapsed) <= CONF.service_down_time
def validate_service_host(context, host):
service = db_api.service_get_by_host_and_topic(context, host,
'manila-share')
if not service_is_up(service):
raise exception.ServiceIsDown(service=service['host'])
return service
def read_file_as_root(file_path):
"""Secure helper to read file as root."""
try:
out, _err = execute('cat', file_path, run_as_root=True)
return out
except exception.ProcessExecutionError:
raise exception.FileNotFound(file_path=file_path)
@contextlib.contextmanager
def temporary_chown(path, owner_uid=None):
"""Temporarily chown a path.
:params owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
execute('chown', orig_uid, path, run_as_root=True)
@contextlib.contextmanager
def tempdir(**kwargs):
tmpdir = tempfile.mkdtemp(**kwargs)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
LOG.debug('Could not remove tmpdir: %s', six.text_type(e))
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first."""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
def ensure_tree(path):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
def cidr_to_netmask(cidr):
"""Convert cidr to netmask."""
try:
network = netaddr.IPNetwork(cidr)
return str(network.netmask)
except netaddr.AddrFormatError:
raise exception.InvalidInput(_("Invalid cidr supplied %s") % cidr)
def is_valid_ip_address(ip_address, ip_version):
if int(ip_version) == 4:
return netaddr.valid_ipv4(ip_address)
elif int(ip_version) == 6:
return netaddr.valid_ipv6(ip_address)
else:
raise exception.ManilaException(
_("Provided improper IP version '%s'.") % ip_version)
class IsAMatcher(object):
def __init__(self, expected_value=None):
self.expected_value = expected_value
def __eq__(self, actual_value):
return isinstance(actual_value, self.expected_value)
class ComparableMixin(object):
def _compare(self, other, method):
try:
return method(self._cmpkey(), other._cmpkey())
except (AttributeError, TypeError):
# _cmpkey not implemented, or return different type,
# so I can't compare with "other".
return NotImplemented
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def retry(exception, interval=1, retries=10, backoff_rate=2,
wait_random=False):
"""A wrapper around retrying library.
This decorator allows to log and to check 'retries' input param.
Time interval between retries is calculated in the following way:
interval * backoff_rate ^ previous_attempt_number
:param exception: expected exception type. When wrapped function
raises an exception of this type, the function
execution is retried.
:param interval: param 'interval' is used to calculate time interval
between retries:
interval * backoff_rate ^ previous_attempt_number
:param retries: number of retries.
:param backoff_rate: param 'backoff_rate' is used to calculate time
interval between retries:
interval * backoff_rate ^ previous_attempt_number
:param wait_random: boolean value to enable retry with random wait timer.
"""
def _retry_on_exception(e):
return isinstance(e, exception)
def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms):
exp = backoff_rate ** previous_attempt_number
wait_for = max(0, interval * exp)
if wait_random:
wait_val = random.randrange(interval * 1000.0, wait_for * 1000.0)
else:
wait_val = wait_for * 1000.0
LOG.debug("Sleeping for %s seconds.", (wait_val / 1000.0))
return wait_val
def _print_stop(previous_attempt_number, delay_since_first_attempt_ms):
delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0
LOG.debug("Failed attempt %s", previous_attempt_number)
LOG.debug("Have been at this for %s seconds",
delay_since_first_attempt)
return previous_attempt_number == retries
if retries < 1:
raise ValueError(_('Retries must be greater than or '
'equal to 1 (received: %s).') % retries)
def _decorator(f):
@six.wraps(f)
def _wrapper(*args, **kwargs):
r = retrying.Retrying(retry_on_exception=_retry_on_exception,
wait_func=_backoff_sleep,
stop_func=_print_stop)
return r.call(f, *args, **kwargs)
return _wrapper
return _decorator
| 32.983416 | 79 | 0.622052 |
import contextlib
import errno
import inspect
import os
import pyclbr
import random
import re
import shutil
import socket
import sys
import tempfile
from eventlet import pools
import netaddr
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
from oslo_utils import timeutils
import paramiko
import retrying
import six
from manila.db import api as db_api
from manila import exception
from manila.i18n import _
CONF = cfg.CONF
LOG = log.getLogger(__name__)
synchronized = lockutils.synchronized_with_prefix('manila-')
def _get_root_helper():
return 'sudo manila-rootwrap %s' % CONF.rootwrap_config
def execute(*cmd, **kwargs):
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = _get_root_helper()
return processutils.execute(*cmd, **kwargs)
def trycmd(*args, **kwargs):
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = _get_root_helper()
return processutils.trycmd(*args, **kwargs)
class SSHPool(pools.Pool):
def __init__(self, ip, port, conn_timeout, login, password=None,
privatekey=None, *args, **kwargs):
self.ip = ip
self.port = port
self.login = login
self.password = password
self.conn_timeout = conn_timeout if conn_timeout else None
self.path_to_private_key = privatekey
super(SSHPool, self).__init__(*args, **kwargs)
def create(self):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
look_for_keys = True
if self.path_to_private_key:
self.path_to_private_key = os.path.expanduser(
self.path_to_private_key)
look_for_keys = False
elif self.password:
look_for_keys = False
try:
ssh.connect(self.ip,
port=self.port,
username=self.login,
password=self.password,
key_filename=self.path_to_private_key,
look_for_keys=look_for_keys,
timeout=self.conn_timeout)
# keeping long lived connections. Hence we have to bypass it, by
# overriding it after the transport is initialized. We are setting
# the sockettimeout to None and setting a keepalive packet so that,
# the server will keep the connection open. All that does is send
# a keepalive packet every ssh_conn_timeout seconds.
if self.conn_timeout:
transport = ssh.get_transport()
transport.sock.settimeout(None)
transport.set_keepalive(self.conn_timeout)
return ssh
except Exception as e:
msg = _("Check whether private key or password are correctly "
"set. Error connecting via ssh: %s") % e
LOG.error(msg)
raise exception.SSHException(msg)
def get(self):
if self.free_items:
conn = self.free_items.popleft()
if conn:
if conn.get_transport().is_active():
return conn
else:
conn.close()
return self.create()
if self.current_size < self.max_size:
created = self.create()
self.current_size += 1
return created
return self.channel.get()
def remove(self, ssh):
ssh.close()
ssh = None
if ssh in self.free_items:
self.free_items.pop(ssh)
if self.current_size > 0:
self.current_size -= 1
def check_ssh_injection(cmd_list):
ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>',
'<']
# Check whether injection attacks exist
for arg in cmd_list:
arg = arg.strip()
# Check for matching quotes on the ends
is_quoted = re.match('^(?P<quote>[\'"])(?P<quoted>.*)(?P=quote)$', arg)
if is_quoted:
# Check for unescaped quotes within the quoted argument
quoted = is_quoted.group('quoted')
if quoted:
if (re.match('[\'"]', quoted) or
re.search('[^\\\\][\'"]', quoted)):
raise exception.SSHInjectionThreat(command=cmd_list)
else:
# We only allow spaces within quoted arguments, and that
# is the only special character allowed within quotes
if len(arg.split()) > 1:
raise exception.SSHInjectionThreat(command=cmd_list)
# Second, check whether danger character in command. So the shell
# special operator must be a single argument.
for c in ssh_injection_pattern:
if c not in arg:
continue
result = arg.find(c)
if not result == -1:
if result == 0 or not arg[result - 1] == '\\':
raise exception.SSHInjectionThreat(command=cmd_list)
class LazyPluggable(object):
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
backend_name = CONF[self.__pivot]
if backend_name not in self.__backends:
raise exception.Error(_('Invalid backend: %s') % backend_name)
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
LOG.debug('backend %s', self.__backend)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
def delete_if_exists(pathname):
try:
os.unlink(pathname)
except OSError as e:
if e.errno == errno.ENOENT:
return
else:
raise
def get_from_path(items, path):
if path is None:
raise exception.Error('Invalid mini_xpath')
(first_token, sep, remainder) = path.partition('/')
if first_token == '':
raise exception.Error('Invalid mini_xpath')
results = []
if items is None:
return results
if not isinstance(items, list):
# Wrap single objects in a list
items = [items]
for item in items:
if item is None:
continue
get_method = getattr(item, 'get', None)
if get_method is None:
continue
child = get_method(first_token)
if child is None:
continue
if isinstance(child, list):
# Flatten intermediate lists
for x in child:
results.append(x)
else:
results.append(child)
if not sep:
# No more tokens
return results
else:
return get_from_path(results, remainder)
def is_ipv6_configured():
try:
fd = open('/proc/net/if_inet6')
except IOError as e:
if e.errno != errno.ENOENT:
raise
result = False
else:
result = bool(fd.read(32))
fd.close()
return result
def is_eventlet_bug105():
try:
mod = sys.modules['eventlet.support.greendns']
except KeyError:
return False
try:
connect_data = mod.getaddrinfo('::1', 80)
except socket.gaierror:
return True
fail = [x for x in connect_data if x[0] != socket.AF_INET6]
return bool(fail)
def monkey_patch():
# If CONF.monkey_patch is not True, this function do nothing.
if not CONF.monkey_patch:
return
# Get list of modules and decorators
for module_and_decorator in CONF.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = importutils.import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key in module_data.keys():
# set the decorator for the class methods
if isinstance(module_data[key], pyclbr.Class):
clz = importutils.import_class("%s.%s" % (module, key))
# NOTE(vponomaryov): we need to distinguish class methods types
# for py2 and py3, because the concept of 'unbound methods' has
# been removed from the python3.x
if six.PY3:
member_type = inspect.isfunction
else:
member_type = inspect.ismethod
for method, func in inspect.getmembers(clz, member_type):
setattr(
clz, method,
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(module_data[key], pyclbr.Function):
func = importutils.import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,
decorator("%s.%s" % (module, key), func))
def read_cached_file(filename, cache_info, reload_func=None):
mtime = os.path.getmtime(filename)
if not cache_info or mtime != cache_info.get('mtime'):
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
if reload_func:
reload_func(cache_info['data'])
return cache_info['data']
def file_open(*args, **kwargs):
return file(*args, **kwargs)
def service_is_up(service):
last_heartbeat = service['updated_at'] or service['created_at']
# Timestamps in DB are UTC.
elapsed = timeutils.total_seconds(timeutils.utcnow() - last_heartbeat)
return abs(elapsed) <= CONF.service_down_time
def validate_service_host(context, host):
service = db_api.service_get_by_host_and_topic(context, host,
'manila-share')
if not service_is_up(service):
raise exception.ServiceIsDown(service=service['host'])
return service
def read_file_as_root(file_path):
try:
out, _err = execute('cat', file_path, run_as_root=True)
return out
except exception.ProcessExecutionError:
raise exception.FileNotFound(file_path=file_path)
@contextlib.contextmanager
def temporary_chown(path, owner_uid=None):
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
execute('chown', orig_uid, path, run_as_root=True)
@contextlib.contextmanager
def tempdir(**kwargs):
tmpdir = tempfile.mkdtemp(**kwargs)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
LOG.debug('Could not remove tmpdir: %s', six.text_type(e))
def walk_class_hierarchy(clazz, encountered=None):
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
def ensure_tree(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
def cidr_to_netmask(cidr):
try:
network = netaddr.IPNetwork(cidr)
return str(network.netmask)
except netaddr.AddrFormatError:
raise exception.InvalidInput(_("Invalid cidr supplied %s") % cidr)
def is_valid_ip_address(ip_address, ip_version):
if int(ip_version) == 4:
return netaddr.valid_ipv4(ip_address)
elif int(ip_version) == 6:
return netaddr.valid_ipv6(ip_address)
else:
raise exception.ManilaException(
_("Provided improper IP version '%s'.") % ip_version)
class IsAMatcher(object):
def __init__(self, expected_value=None):
self.expected_value = expected_value
def __eq__(self, actual_value):
return isinstance(actual_value, self.expected_value)
class ComparableMixin(object):
def _compare(self, other, method):
try:
return method(self._cmpkey(), other._cmpkey())
except (AttributeError, TypeError):
# _cmpkey not implemented, or return different type,
# so I can't compare with "other".
return NotImplemented
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def retry(exception, interval=1, retries=10, backoff_rate=2,
wait_random=False):
def _retry_on_exception(e):
return isinstance(e, exception)
def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms):
exp = backoff_rate ** previous_attempt_number
wait_for = max(0, interval * exp)
if wait_random:
wait_val = random.randrange(interval * 1000.0, wait_for * 1000.0)
else:
wait_val = wait_for * 1000.0
LOG.debug("Sleeping for %s seconds.", (wait_val / 1000.0))
return wait_val
def _print_stop(previous_attempt_number, delay_since_first_attempt_ms):
delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0
LOG.debug("Failed attempt %s", previous_attempt_number)
LOG.debug("Have been at this for %s seconds",
delay_since_first_attempt)
return previous_attempt_number == retries
if retries < 1:
raise ValueError(_('Retries must be greater than or '
'equal to 1 (received: %s).') % retries)
def _decorator(f):
@six.wraps(f)
def _wrapper(*args, **kwargs):
r = retrying.Retrying(retry_on_exception=_retry_on_exception,
wait_func=_backoff_sleep,
stop_func=_print_stop)
return r.call(f, *args, **kwargs)
return _wrapper
return _decorator
| true | true |
f7299b8c93a212f361068d4db0ea7d318b67843a | 17,636 | py | Python | src/models/SGNN_EBM_models.py | chao1224/SGNN-EBM | bda4c486e8ecb9775b635757dbe1071878be7b8a | [
"MIT"
] | null | null | null | src/models/SGNN_EBM_models.py | chao1224/SGNN-EBM | bda4c486e8ecb9775b635757dbe1071878be7b8a | [
"MIT"
] | 1 | 2022-03-25T01:47:18.000Z | 2022-03-25T01:50:12.000Z | src/models/SGNN_EBM_models.py | chao1224/SGNN-EBM | bda4c486e8ecb9775b635757dbe1071878be7b8a | [
"MIT"
] | null | null | null | import torch
from torch import nn
import torch.nn.functional as F
from torch_scatter import scatter_add
class NCE_C_Parameter(torch.nn.Module):
def __init__(self, N):
super(NCE_C_Parameter, self).__init__()
self.NCE_C = nn.Parameter(torch.zeros(N, requires_grad=True))
class GNN_EBM_Layer_01(torch.nn.Module):
def __init__(self, input_dim, output_dim):
super(GNN_EBM_Layer_01, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.edge_layer = torch.nn.Linear(input_dim, output_dim)
self.node_layer = torch.nn.Linear(input_dim, output_dim)
self.mlp = torch.nn.Linear(input_dim, output_dim)
def node_message_passing(self, x, x_2nd_agg, edge):
T = x.size()[1]
node_in, node_out = edge[0], edge[1] # M, M
update = (scatter_add(x_2nd_agg, node_out, dim=1, dim_size=T) +
scatter_add(x_2nd_agg, node_in, dim=1, dim_size=T)) / 2 # B, T, d
x = x + update # B, T, d
return x
def forward(self, x_1st, x_2nd, edge):
'''
:param x: (B, T, 2, d)
:param x_2nd: (B, M, 4, d)
:param edge: (M, 2)
:return: (B, T, 2, d_out)
'''
aggregate_indice = torch.LongTensor([0, 0, 1, 1]).to(x_1st.device)
node_i_indice = torch.LongTensor([0, 0, 1, 1]).to(x_1st.device)
node_j_indice = torch.LongTensor([0, 1, 0, 1]).to(x_1st.device)
x_1st_neg = x_1st[:, :, 0, :] # B, T, d
x_1st_pos = x_1st[:, :, 1, :] # B, T, d
x_2nd_agg = scatter_add(x_2nd, aggregate_indice, dim=2) # B, T, 2, d
x_2nd_neg = x_2nd_agg[:, :, 0, :] # B, M, d
x_2nd_pos = x_2nd_agg[:, :, 1, :] # B, M, d
x_neg = self.node_message_passing(x_1st_neg, x_2nd_neg, edge) # B, T, d
x_pos = self.node_message_passing(x_1st_pos, x_2nd_pos, edge) # B, T, d
x = torch.stack([x_neg, x_pos], dim=2) # B, T, 2, d
x = self.node_layer(x) # B, T, 2, d
edge_i = torch.index_select(x_1st, 1, edge[0]) # B, M, 2, dim
edge_i = torch.index_select(edge_i, 2, node_i_indice) # B, M, 4, dim
edge_j = torch.index_select(x_1st, 1, edge[1]) # B, M, 2, dim
edge_j = torch.index_select(edge_j, 2, node_j_indice) # B, M, 4, dim
edge = x_2nd + self.mlp(edge_i + edge_j) # B, M, 4, d
edge = self.edge_layer(edge)
return x, edge
class GNN_Energy_Model_1st_Order_01(torch.nn.Module):
def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, output_dim, dropout=0, concat=False):
super(GNN_Energy_Model_1st_Order_01, self).__init__()
self.ebm_GNN_dim = ebm_GNN_dim
self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1
self.dropout = dropout
self.output_dim = output_dim
self.concat = concat
hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num
self.hidden_layers = torch.nn.ModuleList()
for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):
self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))
if self.concat:
hidden_dim_sum = sum(hidden_layers_dim)
else:
hidden_dim_sum = ebm_GNN_dim
self.node_readout = torch.nn.Sequential(
torch.nn.Linear(2 * hidden_dim_sum, 2 * hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim_sum, output_dim)
)
return
def forward(self, x_1st, x_2nd, edge):
'''
:param x_1st: B,T,2,dim
:param x_2nd: B,M,4,dim
:param edge: 2,M
:return: B,T,1
'''
B, T = x_1st.size()[:2]
h_node_list = [x_1st]
x_node, x_edge = x_1st, x_2nd
for i in range(self.ebm_GNN_layer_num):
x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)
if i < self.ebm_GNN_layer_num - 1:
x_node = F.relu(x_node)
# x_edge = F.relu(x_edge)
x_node = F.dropout(x_node, self.dropout, training=self.training)
# x_edge = F.dropout(x_edge, self.dropout, training=self.training)
h_node_list.append(x_node)
if self.concat:
h = torch.cat(h_node_list, dim=3).view(B, T, -1) # B, T, 2*layer_num*d
else:
h = x_node.view(B, T, -1) # B, T, 2*d
h = self.node_readout(h) # B, T, 1
return h
class GNN_Energy_Model_1st_Order_02(torch.nn.Module):
def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, output_dim, dropout=0, concat=False):
super(GNN_Energy_Model_1st_Order_02, self).__init__()
self.ebm_GNN_dim = ebm_GNN_dim
self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1
self.dropout = dropout
self.output_dim = output_dim
self.concat = concat
hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num
self.hidden_layers = torch.nn.ModuleList()
for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):
self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))
if self.concat:
hidden_dim_sum = sum(hidden_layers_dim)
else:
hidden_dim_sum = ebm_GNN_dim
self.node_readout = torch.nn.Linear(2 * hidden_dim_sum, output_dim)
return
def forward(self, x_1st, x_2nd, edge):
'''
:param x_1st: B,T,2,dim
:param x_2nd: B,M,4,dim
:param edge: 2,M
:return: B,T,1
'''
B, T = x_1st.size()[:2]
h_node_list = [x_1st]
x_node, x_edge = x_1st, x_2nd
for i in range(self.ebm_GNN_layer_num):
x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)
if i < self.ebm_GNN_layer_num - 1:
x_node = F.relu(x_node)
# x_edge = F.relu(x_edge)
x_node = F.dropout(x_node, self.dropout, training=self.training)
# x_edge = F.dropout(x_edge, self.dropout, training=self.training)
h_node_list.append(x_node)
if self.concat:
h = torch.cat(h_node_list, dim=3).view(B, T, -1) # B, T, 2*layer_num*d
else:
h = x_node.view(B, T, -1) # B, T, 2*d
h = self.node_readout(h) # B, T, 1
return h
class GNN_Energy_Model_2nd_Order_01(torch.nn.Module):
def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, dropout=0, concat=False):
super(GNN_Energy_Model_2nd_Order_01, self).__init__()
self.ebm_GNN_dim = ebm_GNN_dim
self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1
self.dropout = dropout
self.concat = concat
hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num
self.hidden_layers = torch.nn.ModuleList()
for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):
self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))
if self.concat:
hidden_dim_sum = sum(hidden_layers_dim)
else:
hidden_dim_sum = ebm_GNN_dim
self.node_readout = torch.nn.Sequential(
torch.nn.Linear(hidden_dim_sum, 2 * hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim_sum, 1)
)
self.edge_readout = torch.nn.Sequential(
torch.nn.Linear(hidden_dim_sum, 2 * hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim_sum, 1)
)
return
def forward(self, x_1st, x_2nd, edge):
'''
:param x_1st: B,T,2,dim
:param x_2nd: B,M,4,dim
:param edge: 2,M
:return: (B,T,2), (B,M,4)
'''
B, T = x_1st.size()[:2]
M = edge.size()[1]
h_node_list = [x_1st]
h_edge_list = [x_2nd]
x_node, x_edge = x_1st, x_2nd
for i in range(self.ebm_GNN_layer_num):
x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)
if i < self.ebm_GNN_layer_num - 1:
x_node = F.relu(x_node)
# x_edge = F.relu(x_edge)
x_node = F.dropout(x_node, self.dropout, training=self.training)
# x_edge = F.dropout(x_edge, self.dropout, training=self.training)
h_node_list.append(x_node)
h_edge_list.append(x_edge)
if self.concat:
h_node = torch.cat(h_node_list, dim=3) # B, T, 2, layer_num*d
h_edge = torch.cat(h_edge_list, dim=3) # B, M, 4, layer_num*d
else:
h_node = x_node # B, T, 2, d
h_edge = x_edge # B, M, 4, d
h_node = self.node_readout(h_node) # B, T, 2, 1
h_edge = self.edge_readout(h_edge) # B, M, 4, 1
h_node = h_node.squeeze(3) # B, T, 2
h_edge = h_edge.squeeze(3) # B, M, 4
return h_node, h_edge
class GNN_Energy_Model_2nd_Order_02(torch.nn.Module):
def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, dropout=0, concat=False):
super(GNN_Energy_Model_2nd_Order_02, self).__init__()
self.ebm_GNN_dim = ebm_GNN_dim
self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1
self.dropout = dropout
self.concat = concat
hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num
self.hidden_layers = torch.nn.ModuleList()
for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):
self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))
if self.concat:
hidden_dim_sum = sum(hidden_layers_dim)
else:
hidden_dim_sum = ebm_GNN_dim
self.node_readout = torch.nn.Sequential(
torch.nn.Linear(2 * hidden_dim_sum, 2 * hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim_sum, 2)
)
self.edge_readout = torch.nn.Sequential(
torch.nn.Linear(4 * hidden_dim_sum, 2 * hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim_sum, 4)
)
return
def forward(self, x_1st, x_2nd, edge):
'''
:param x_1st: B,T,2,dim
:param x_2nd: B,M,4,dim
:param edge: 2,M
:return: (B,T,2), (B,M,4)
'''
B, T = x_1st.size()[:2]
M = x_2nd.size()[1]
h_node_list = [x_1st]
h_edge_list = [x_2nd]
x_node, x_edge = x_1st, x_2nd
for i in range(self.ebm_GNN_layer_num):
x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)
if i < self.ebm_GNN_layer_num - 1:
x_node = F.relu(x_node)
# x_edge = F.relu(x_edge)
x_node = F.dropout(x_node, self.dropout, training=self.training)
# x_edge = F.dropout(x_edge, self.dropout, training=self.training)
h_node_list.append(x_node)
h_edge_list.append(x_edge)
if self.concat:
h_node = torch.cat(h_node_list, dim=3).view(B, T, -1) # B, T, 2*layer_num*d
h_edge = torch.cat(h_edge_list, dim=3).view(B, M, -1) # B, M, 4*layer_num*d
else:
h_node = x_node.view(B, T, -1) # B, T, 2*d
h_edge = x_edge.view(B, M, -1) # B, M, 4*d
h_node = self.node_readout(h_node) # B, T, 2
h_edge = self.edge_readout(h_edge) # B, M, 4
return h_node, h_edge
class GNN_Energy_Model_2nd_Order_03(torch.nn.Module):
def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, dropout=0, concat=False):
super(GNN_Energy_Model_2nd_Order_03, self).__init__()
self.ebm_GNN_dim = ebm_GNN_dim
self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1
self.dropout = dropout
self.concat = concat
hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num
self.hidden_layers = torch.nn.ModuleList()
for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):
self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))
if self.concat:
hidden_dim_sum = sum(hidden_layers_dim)
else:
hidden_dim_sum = ebm_GNN_dim
self.node_readout = nn.Linear(2 * hidden_dim_sum, 2)
self.edge_readout = nn.Linear(4 * hidden_dim_sum, 4)
return
def forward(self, x_1st, x_2nd, edge):
'''
:param x_1st: B,T,2,dim
:param x_2nd: B,M,4,dim
:param edge: 2,M
:return: (B,T,2), (B,M,4)
'''
B, T = x_1st.size()[:2]
M = edge.size()[1]
h_node_list = [x_1st]
h_edge_list = [x_2nd]
x_node, x_edge = x_1st, x_2nd
for i in range(self.ebm_GNN_layer_num):
x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)
if i < self.ebm_GNN_layer_num - 1:
x_node = F.relu(x_node)
# x_edge = F.relu(x_edge)
x_node = F.dropout(x_node, self.dropout, training=self.training)
# x_edge = F.dropout(x_edge, self.dropout, training=self.training)
h_node_list.append(x_node)
h_edge_list.append(x_edge)
if self.concat:
h_node = torch.cat(h_node_list, dim=3) # B, T, 2, layer_num*d
h_edge = torch.cat(h_edge_list, dim=3) # B, M, 4, layer_num*d
else:
h_node = x_node # B, T, 2, d
h_edge = x_edge # B, M, 4, d
h_node = h_node.view(B, T, -1) # B, T, 2*d
h_edge = h_edge.view(B, M, -1) # B, M, 4*d
h_node = self.node_readout(h_node) # B, T, 2
h_edge = self.edge_readout(h_edge) # B, M, 4
return h_node, h_edge
# class GATNet(torch.nn.Module):
# def __init__(self, embedding_dim=10, hidden_dim=10, num_head=8):
# super(GATNet, self).__init__()
# self.conv1 = GATConv(embedding_dim, hidden_dim, heads=num_head, dropout=0.6)
# self.conv2 = GATConv(hidden_dim * num_head, hidden_dim, heads=1, concat=False, dropout=0.6)
# def forward(self, data):
# x = data.x
# x = F.dropout(x, p=0.6, training=self.training)
# x = F.elu(self.conv1(x, data.edge_index))
# x = F.dropout(x, p=0.6, training=self.training)
# x = self.conv2(x, data.edge_index)
# return x
# class MLP(nn.Sequential):
# def __init__(self, input_dim, output_dim, hidden_dims=[1024, 512], dropout=0.1, use_batch_norm=False):
# super(MLP, self).__init__()
# self.input_dim = input_dim
# self.output_dim = output_dim
# self.hidden_dims = hidden_dims
# self.use_batch_norm = use_batch_norm
# self.dropout = nn.Dropout(0.1)
# self.layer_size = len(self.hidden_dims) + 1
# dims = [self.input_dim] + self.hidden_dims + [self.output_dim]
# self.predictor = nn.ModuleList([nn.Linear(dims[i], dims[i + 1]) for i in range(self.layer_size)])
# if use_batch_norm:
# self.batch_norms = nn.ModuleList([nn.BatchNorm1d(dims[i + 1]) for i in range(self.layer_size)])
# for m in self.modules():
# if isinstance(m, nn.Linear):
# nn.init.xavier_uniform_(m.weight.data)
# if m.bias is not None:
# m.bias.data.fill_(0.0)
# def norm(self):
# with torch.no_grad():
# norm = 0
# for m in self.modules():
# if isinstance(m, nn.Linear):
# norm += torch.norm(m.weight.data).item()
# return norm
# def forward(self, v):
# '''
# : params x: (batch_size, *, input_dim)
# : output : (batch_size, *, output_dim)
# '''
# B, t, _ = v.size()
# v = v.flatten(0, -2)
# # print('input norm: %.5f' % (torch.norm(v).item()))
# for i, l in enumerate(self.predictor):
# v = l(v)
# if i != self.layer_size - 1:
# if self.use_batch_norm:
# v = self.batch_norms[i](v)
# v = F.relu(v)
# v = self.dropout(v)
# # print('layer %d norm: %.5f' % (i, torch.norm(v).item()))
# v = v.reshape(B, t, -1)
# return v
# class GradKnowledgeGraphModel(nn.Module):
# def __init__(self, num_tasks, args):
# super(GradKnowledgeGraphModel, self).__init__()
# self.num_tasks = num_tasks
# self.weights = nn.Parameter(torch.ones(self.num_tasks, 1), requires_grad=True)
# self.register_parameter('grad_KG', self.weights)
# self.softmax = nn.Softmax(dim=0)
# self.normalize_method = args.grad_KG_normalize_method
# def forward(self, task_repr):
# # ########## This won't train ##########
# # task_repr = task_repr * self.weights.data
# task_repr = task_repr * self.weights
# return task_repr
# def renormalize(self):
# if self.normalize_method == 'sum':
# ########## TODO: there might be negatives after backward ##########
# normalize_coeff = self.num_tasks / self.weights.data.sum()
# self.weights.data *= normalize_coeff
# elif self.normalize_method == 'softmax':
# self.weights.data = self.softmax(self.weights.data) * self.num_tasks
# return
# def reset_param(self):
# self.weights.data.fill_(1)
# return
| 37.845494 | 109 | 0.576435 | import torch
from torch import nn
import torch.nn.functional as F
from torch_scatter import scatter_add
class NCE_C_Parameter(torch.nn.Module):
def __init__(self, N):
super(NCE_C_Parameter, self).__init__()
self.NCE_C = nn.Parameter(torch.zeros(N, requires_grad=True))
class GNN_EBM_Layer_01(torch.nn.Module):
def __init__(self, input_dim, output_dim):
super(GNN_EBM_Layer_01, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.edge_layer = torch.nn.Linear(input_dim, output_dim)
self.node_layer = torch.nn.Linear(input_dim, output_dim)
self.mlp = torch.nn.Linear(input_dim, output_dim)
def node_message_passing(self, x, x_2nd_agg, edge):
T = x.size()[1]
node_in, node_out = edge[0], edge[1]
update = (scatter_add(x_2nd_agg, node_out, dim=1, dim_size=T) +
scatter_add(x_2nd_agg, node_in, dim=1, dim_size=T)) / 2
x = x + update
return x
def forward(self, x_1st, x_2nd, edge):
aggregate_indice = torch.LongTensor([0, 0, 1, 1]).to(x_1st.device)
node_i_indice = torch.LongTensor([0, 0, 1, 1]).to(x_1st.device)
node_j_indice = torch.LongTensor([0, 1, 0, 1]).to(x_1st.device)
x_1st_neg = x_1st[:, :, 0, :]
x_1st_pos = x_1st[:, :, 1, :]
x_2nd_agg = scatter_add(x_2nd, aggregate_indice, dim=2)
x_2nd_neg = x_2nd_agg[:, :, 0, :]
x_2nd_pos = x_2nd_agg[:, :, 1, :]
x_neg = self.node_message_passing(x_1st_neg, x_2nd_neg, edge)
x_pos = self.node_message_passing(x_1st_pos, x_2nd_pos, edge)
x = torch.stack([x_neg, x_pos], dim=2)
x = self.node_layer(x)
edge_i = torch.index_select(x_1st, 1, edge[0])
edge_i = torch.index_select(edge_i, 2, node_i_indice)
edge_j = torch.index_select(x_1st, 1, edge[1])
edge_j = torch.index_select(edge_j, 2, node_j_indice)
edge = x_2nd + self.mlp(edge_i + edge_j)
edge = self.edge_layer(edge)
return x, edge
class GNN_Energy_Model_1st_Order_01(torch.nn.Module):
def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, output_dim, dropout=0, concat=False):
super(GNN_Energy_Model_1st_Order_01, self).__init__()
self.ebm_GNN_dim = ebm_GNN_dim
self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1
self.dropout = dropout
self.output_dim = output_dim
self.concat = concat
hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num
self.hidden_layers = torch.nn.ModuleList()
for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):
self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))
if self.concat:
hidden_dim_sum = sum(hidden_layers_dim)
else:
hidden_dim_sum = ebm_GNN_dim
self.node_readout = torch.nn.Sequential(
torch.nn.Linear(2 * hidden_dim_sum, 2 * hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim_sum, output_dim)
)
return
def forward(self, x_1st, x_2nd, edge):
B, T = x_1st.size()[:2]
h_node_list = [x_1st]
x_node, x_edge = x_1st, x_2nd
for i in range(self.ebm_GNN_layer_num):
x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)
if i < self.ebm_GNN_layer_num - 1:
x_node = F.relu(x_node)
x_node = F.dropout(x_node, self.dropout, training=self.training)
h_node_list.append(x_node)
if self.concat:
h = torch.cat(h_node_list, dim=3).view(B, T, -1)
else:
h = x_node.view(B, T, -1)
h = self.node_readout(h)
return h
class GNN_Energy_Model_1st_Order_02(torch.nn.Module):
def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, output_dim, dropout=0, concat=False):
super(GNN_Energy_Model_1st_Order_02, self).__init__()
self.ebm_GNN_dim = ebm_GNN_dim
self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1
self.dropout = dropout
self.output_dim = output_dim
self.concat = concat
hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num
self.hidden_layers = torch.nn.ModuleList()
for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):
self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))
if self.concat:
hidden_dim_sum = sum(hidden_layers_dim)
else:
hidden_dim_sum = ebm_GNN_dim
self.node_readout = torch.nn.Linear(2 * hidden_dim_sum, output_dim)
return
def forward(self, x_1st, x_2nd, edge):
B, T = x_1st.size()[:2]
h_node_list = [x_1st]
x_node, x_edge = x_1st, x_2nd
for i in range(self.ebm_GNN_layer_num):
x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)
if i < self.ebm_GNN_layer_num - 1:
x_node = F.relu(x_node)
x_node = F.dropout(x_node, self.dropout, training=self.training)
h_node_list.append(x_node)
if self.concat:
h = torch.cat(h_node_list, dim=3).view(B, T, -1)
else:
h = x_node.view(B, T, -1)
h = self.node_readout(h)
return h
class GNN_Energy_Model_2nd_Order_01(torch.nn.Module):
def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, dropout=0, concat=False):
super(GNN_Energy_Model_2nd_Order_01, self).__init__()
self.ebm_GNN_dim = ebm_GNN_dim
self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1
self.dropout = dropout
self.concat = concat
hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num
self.hidden_layers = torch.nn.ModuleList()
for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):
self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))
if self.concat:
hidden_dim_sum = sum(hidden_layers_dim)
else:
hidden_dim_sum = ebm_GNN_dim
self.node_readout = torch.nn.Sequential(
torch.nn.Linear(hidden_dim_sum, 2 * hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim_sum, 1)
)
self.edge_readout = torch.nn.Sequential(
torch.nn.Linear(hidden_dim_sum, 2 * hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim_sum, 1)
)
return
def forward(self, x_1st, x_2nd, edge):
B, T = x_1st.size()[:2]
M = edge.size()[1]
h_node_list = [x_1st]
h_edge_list = [x_2nd]
x_node, x_edge = x_1st, x_2nd
for i in range(self.ebm_GNN_layer_num):
x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)
if i < self.ebm_GNN_layer_num - 1:
x_node = F.relu(x_node)
x_node = F.dropout(x_node, self.dropout, training=self.training)
h_node_list.append(x_node)
h_edge_list.append(x_edge)
if self.concat:
h_node = torch.cat(h_node_list, dim=3)
h_edge = torch.cat(h_edge_list, dim=3)
else:
h_node = x_node
h_edge = x_edge
h_node = self.node_readout(h_node)
h_edge = self.edge_readout(h_edge)
h_node = h_node.squeeze(3)
h_edge = h_edge.squeeze(3)
return h_node, h_edge
class GNN_Energy_Model_2nd_Order_02(torch.nn.Module):
def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, dropout=0, concat=False):
super(GNN_Energy_Model_2nd_Order_02, self).__init__()
self.ebm_GNN_dim = ebm_GNN_dim
self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1
self.dropout = dropout
self.concat = concat
hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num
self.hidden_layers = torch.nn.ModuleList()
for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):
self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))
if self.concat:
hidden_dim_sum = sum(hidden_layers_dim)
else:
hidden_dim_sum = ebm_GNN_dim
self.node_readout = torch.nn.Sequential(
torch.nn.Linear(2 * hidden_dim_sum, 2 * hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim_sum, 2)
)
self.edge_readout = torch.nn.Sequential(
torch.nn.Linear(4 * hidden_dim_sum, 2 * hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim_sum, 4)
)
return
def forward(self, x_1st, x_2nd, edge):
B, T = x_1st.size()[:2]
M = x_2nd.size()[1]
h_node_list = [x_1st]
h_edge_list = [x_2nd]
x_node, x_edge = x_1st, x_2nd
for i in range(self.ebm_GNN_layer_num):
x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)
if i < self.ebm_GNN_layer_num - 1:
x_node = F.relu(x_node)
x_node = F.dropout(x_node, self.dropout, training=self.training)
h_node_list.append(x_node)
h_edge_list.append(x_edge)
if self.concat:
h_node = torch.cat(h_node_list, dim=3).view(B, T, -1)
h_edge = torch.cat(h_edge_list, dim=3).view(B, M, -1)
else:
h_node = x_node.view(B, T, -1)
h_edge = x_edge.view(B, M, -1)
h_node = self.node_readout(h_node)
h_edge = self.edge_readout(h_edge)
return h_node, h_edge
class GNN_Energy_Model_2nd_Order_03(torch.nn.Module):
def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, dropout=0, concat=False):
super(GNN_Energy_Model_2nd_Order_03, self).__init__()
self.ebm_GNN_dim = ebm_GNN_dim
self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1
self.dropout = dropout
self.concat = concat
hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num
self.hidden_layers = torch.nn.ModuleList()
for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):
self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))
if self.concat:
hidden_dim_sum = sum(hidden_layers_dim)
else:
hidden_dim_sum = ebm_GNN_dim
self.node_readout = nn.Linear(2 * hidden_dim_sum, 2)
self.edge_readout = nn.Linear(4 * hidden_dim_sum, 4)
return
def forward(self, x_1st, x_2nd, edge):
B, T = x_1st.size()[:2]
M = edge.size()[1]
h_node_list = [x_1st]
h_edge_list = [x_2nd]
x_node, x_edge = x_1st, x_2nd
for i in range(self.ebm_GNN_layer_num):
x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)
if i < self.ebm_GNN_layer_num - 1:
x_node = F.relu(x_node)
x_node = F.dropout(x_node, self.dropout, training=self.training)
h_node_list.append(x_node)
h_edge_list.append(x_edge)
if self.concat:
h_node = torch.cat(h_node_list, dim=3)
h_edge = torch.cat(h_edge_list, dim=3)
else:
h_node = x_node
h_edge = x_edge
h_node = h_node.view(B, T, -1)
h_edge = h_edge.view(B, M, -1)
h_node = self.node_readout(h_node)
h_edge = self.edge_readout(h_edge)
return h_node, h_edge
# : params x: (batch_size, *, input_dim)
# : output : (batch_size, *, output_dim)
# '''
# self.weights.data = self.softmax(self.weights.data) * self.num_tasks
# return
# def reset_param(self):
# self.weights.data.fill_(1)
# return
| true | true |
f7299b9017108db2507245642f537176097512d3 | 3,847 | py | Python | calculate_t2.py | kathoma/AutomaticKneeMRISegmentation | 72ea3fa96fa5de34461b5999814aa706360f4a79 | [
"Apache-2.0"
] | 7 | 2020-12-09T05:34:06.000Z | 2022-03-17T10:14:24.000Z | calculate_t2.py | kathoma/AutomaticKneeMRISegmentation | 72ea3fa96fa5de34461b5999814aa706360f4a79 | [
"Apache-2.0"
] | 8 | 2021-03-31T18:03:52.000Z | 2022-02-09T23:54:21.000Z | calculate_t2.py | kathoma/AutomaticKneeMRISegmentation | 72ea3fa96fa5de34461b5999814aa706360f4a79 | [
"Apache-2.0"
] | 1 | 2022-02-20T16:04:55.000Z | 2022-02-20T16:04:55.000Z | from __future__ import print_function, division
import sys
sys.path.insert(0, 'lib')
import numpy as np
import random
import scipy.io as sio
import os
import pandas as pd
import scipy.ndimage as ndimage
import math
import os
import scipy.linalg as la
from joblib import Parallel, delayed
from scipy.optimize import curve_fit
from skimage import measure
import scipy.stats as ss
import skimage
#########################################################
# Calculating T2 Values for Segmented Voxels
#########################################################
def exp_func(mri_time, A, m, b):
return A*np.exp(-m*mri_time)
def running_mean(x):
kernel = np.ones((3,))/3
conv = np.convolve(x, kernel, mode = 'valid')
temp = np.copy(x)
temp[1:-1]=conv
# Avoid boundary effects of convolution
temp[0]=np.mean(x[0:2])
temp[-1]=np.mean(x[-2:])
return temp
def strictly_decreasing(vec):
return np.all(np.diff(vec)<0)
def fit_t2(t2imgs, t2times, segmentation = None, n_jobs = 4, show_bad_pixels = True):
'''
Fits T2 curves to the T2_weighted images in each slice.
IN:
t2imgs - with T2 weighted images in numpy array (nr_slices, time_steps, width, heigth)
t2times - list with aquisition times
segmentation - segmentation matrix (nr_slices, width, heigth)
n_jobs - number of parallel jobs
OUT:
matrix (nr_slices, width, heigth) with T2 values
'''
t2_tensor = np.zeros((t2imgs.shape[0], t2imgs.shape[2], t2imgs.shape[3]))
def fit_per_slice(slice_idx, show_bad_pixels):
scan = t2imgs[slice_idx,:,:,:]
mri_time = np.array(t2times[slice_idx]) - t2times[slice_idx][0] #np.array(t2times[slice_idx])#
if not segmentation is None: # if we have a segmentation
segmentation_mask = segmentation[slice_idx,:,:]
(cartilage_indices_r, cartilage_indices_c) = np.where(segmentation_mask)
t2_matrix = np.full((scan.shape[1], scan.shape[2]), np.nan)
if len(cartilage_indices_r)> 0:
for i in np.arange(len(cartilage_indices_r)):
ir = cartilage_indices_r[i]
ic = cartilage_indices_c[i]
if all(scan[:,ir,ic] == scan[0,ir,ic]): # if constant value, decay is 0
continue
try:
if strictly_decreasing(scan[1:,ir,ic]):
echo_corrected = scan[1:,ir,ic]
else:
echo_corrected = running_mean(scan[1:,ir,ic])
parameters,_ = curve_fit(exp_func,
mri_time[1:],
echo_corrected,
p0 = [scan[0,ir,ic], .03, 0])#,
# bounds = ([-np.inf, 0, -np.inf], [np.inf, 100, np.inf]))
m = parameters[1]
t2_ = 1./m
t2_matrix[ir, ic] = t2_
if show_bad_pixels:
if ((t2_ > .100) or (t2_< -.100)):
print(t2_)
plt.plot(mri_time, scan[:,ir,ic])
plt.plot(mri_time, exp_func(mri_time, *parameters), 'r-')
plt.show()
except RuntimeError:
if show_bad_pixels:
plt.plot(mri_time, scan[:,ir,ic])
plt.title("Did not converge")
plt.show()
return t2_matrix
for i in range(t2imgs.shape[0]):
t2_tensor[i,:,:] = fit_per_slice(i, show_bad_pixels)*1000 # in ms
return t2_tensor | 35.953271 | 102 | 0.512867 | from __future__ import print_function, division
import sys
sys.path.insert(0, 'lib')
import numpy as np
import random
import scipy.io as sio
import os
import pandas as pd
import scipy.ndimage as ndimage
import math
import os
import scipy.linalg as la
from joblib import Parallel, delayed
from scipy.optimize import curve_fit
from skimage import measure
import scipy.stats as ss
import skimage
| true | true |
f7299c8d741309a0fb151a29fea28020d2131a61 | 808 | py | Python | somedjango/manage.py | dvaldivia/grpc-celery-fork-bug | 421eca43daef9e138d53e6f095cf470b98c14f99 | [
"MIT"
] | null | null | null | somedjango/manage.py | dvaldivia/grpc-celery-fork-bug | 421eca43daef9e138d53e6f095cf470b98c14f99 | [
"MIT"
] | null | null | null | somedjango/manage.py | dvaldivia/grpc-celery-fork-bug | 421eca43daef9e138d53e6f095cf470b98c14f99 | [
"MIT"
] | 1 | 2019-03-14T04:09:43.000Z | 2019-03-14T04:09:43.000Z | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "somedjango.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.130435 | 77 | 0.643564 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "somedjango.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| true | true |
f7299d40298fb4401be8a4ad23a730983ca934ac | 14,248 | py | Python | src/longbow/scsplit/command.py | broadinstitute/annmas | 79da783acf41e5aaca4a14ef991c9ab0aac3c59a | [
"BSD-3-Clause"
] | null | null | null | src/longbow/scsplit/command.py | broadinstitute/annmas | 79da783acf41e5aaca4a14ef991c9ab0aac3c59a | [
"BSD-3-Clause"
] | 12 | 2021-02-02T17:16:33.000Z | 2021-03-15T20:31:28.000Z | src/longbow/scsplit/command.py | broadinstitute/annmas | 79da783acf41e5aaca4a14ef991c9ab0aac3c59a | [
"BSD-3-Clause"
] | null | null | null | import logging
import sys
import itertools
import time
import click
import click_log
import tqdm
import pysam
import multiprocessing as mp
from inspect import getframeinfo, currentframe, getdoc
from ..utils import bam_utils
from ..utils.model import LibraryModel
from ..annotate.command import get_segments
from ..meta import VERSION
logging.basicConfig(stream=sys.stderr)
logger = logging.getLogger("scsplit")
click_log.basic_config(logger)
__DEFAULT_DUMMY_CBC = "CTGCCTAACCTGATCC"
__DEFAULT_OUT_BASE_NAME = logger.name
__DEFAULT_UMI_LENGTH = 10
__OUT_READ_FILE_SUFFIX = "_mates"
__OUT_WHITELIST_FILE_SUFFIX = "_whitelist.txt"
@click.command(name=logger.name)
@click_log.simple_verbosity_option(logger)
@click.option(
"-t",
"--threads",
type=int,
default=mp.cpu_count() - 1,
show_default=True,
help="number of threads to use (0 for all)",
)
@click.option(
"-o",
"--output-base-name",
default=__DEFAULT_OUT_BASE_NAME,
type=str,
help=f"base name for output files [default: {__DEFAULT_OUT_BASE_NAME}]",
)
@click.option(
"-c",
"--cell-barcode",
default=__DEFAULT_DUMMY_CBC,
type=str,
help=f"dummy cell barcode to use for the dataset [default: {__DEFAULT_DUMMY_CBC}, "
f"length: {len(__DEFAULT_DUMMY_CBC)}]",
)
@click.option(
"-u",
"--umi-length",
default=__DEFAULT_UMI_LENGTH,
type=int,
show_default=True,
help=f"length of the UMI from this library",
)
@click.option(
"-b",
"--write-bam",
is_flag=True,
default=False,
show_default=True,
help=f"Write out an annotated bam file in addition to the mates files.",
)
@click.option(
'--force',
is_flag=True,
default=False,
show_default=True,
help="Force scsplit to run on the input bam without checking for compatibility."
)
@click.option(
"-m",
"--model",
default="mas15",
show_default=True,
help="The model to use for annotation. If the given value is a pre-configured model name, then that "
"model will be used. Otherwise, the given value will be treated as a file name and Longbow will attempt to "
"read in the file and create a LibraryModel from it. Longbow will assume the contents are the configuration "
"of a LibraryModel as per LibraryModel.to_json()."
)
@click.argument("input-bam", default="-" if not sys.stdin.isatty() else None, type=click.File("rb"))
def main(threads, output_base_name, cell_barcode, umi_length, force, model, write_bam, input_bam):
"""Create files for use in `alevin` for single-cell analysis.
This tool coerces a set of reads from a single source into a format that `alevin` can ingest.
Segment names are assumed to be those in the default model (utils/model.py).
INPUT_BAM should contain reads that have been processed by `longbow segment`.
The output from this tool consists of several files:
OUTPUT_BASE_NAME_mates_1.fastq:
A file containing partial sequences for all reads in the given input file. These partial reads consist of the
dummy cell barcode + detected UMI for each read in the given input file.
OUTPUT_BASE_NAME_mates_2.fastq:
A file containing partial sequences for all reads in the given input file. These partial reads consist of the
transcript sequences for all reads in the given input file. Transcript sequences include data after the UMI
and before the Poly-A tail. All bases outside of this range are excluded from the output.
OUTPUT_BASE_NAME_whitelist.txt:
A whitelist file for alevin containing the given dummy cell barcode.
"""
t_start = time.time()
logger.info("Invoked via: longbow %s", " ".join(sys.argv[1:]))
threads = mp.cpu_count() if threads <= 0 or threads > mp.cpu_count() else threads
logger.info(f"Running with {threads} worker subprocess(es)")
# Get our model:
if LibraryModel.has_prebuilt_model(model):
logger.info(f"Using %s", LibraryModel.pre_configured_models[model]["description"])
model = LibraryModel.build_pre_configured_model(model)
else:
logger.info(f"Loading model from json file: %s", model)
model = LibraryModel.from_json_file(model)
# Configure process manager:
# NOTE: We're using processes to overcome the Global Interpreter Lock.
manager = mp.Manager()
process_input_data_queue = manager.Queue(threads)
results = manager.Queue()
# Start worker sub-processes:
worker_process_pool = []
for _ in range(threads):
p = mp.Process(
target=_sub_process_work_fn, args=(process_input_data_queue, results, umi_length, model, write_bam)
)
p.start()
worker_process_pool.append(p)
pysam.set_verbosity(0) # silence message about the .bai file not being found
with pysam.AlignmentFile(
input_bam, "rb", check_sq=False, require_index=False
) as bam_file, tqdm.tqdm(
desc="Progress",
unit=" read",
colour="green",
file=sys.stderr,
leave=False,
disable=not sys.stdin.isatty(),
) as pbar:
if force:
logger.info("Force mode - skipping bam header check for compatibility")
else:
# Make sure we're given an input bam file we can work with:
if not _validate_input_bam(bam_file.header):
# Bad news - we have to quit.
# let's try to do it nicely:
for r in (None,) * threads:
process_input_data_queue.put(r)
# Wait for our input jobs to finish:
for p in worker_process_pool:
p.join()
sys.exit(1)
# Get our header from the input bam file:
out_header = bam_utils.create_bam_header_with_program_group(logger.name, bam_file.header, models=[model])
# Start output worker:
res = manager.dict({"num_reads_processed": 0})
output_worker = mp.Process(
target=_sub_process_write_fn,
args=(
results,
output_base_name,
cell_barcode,
pbar,
res,
write_bam,
out_header
),
)
output_worker.start()
# Add in a sentinel value at the end of the queue - one for each subprocess - so we guarantee
# that all subprocesses will exit:
iter_data = itertools.chain(bam_file, (None,) * threads)
for r in iter_data:
if r is not None:
process_input_data_queue.put(r.to_string())
else:
process_input_data_queue.put(r)
# Wait for our input jobs to finish:
for p in worker_process_pool:
p.join()
# Now that our input processes are done, we can add our exit sentinel onto the output queue and
# wait for that process to end:
results.put(None)
output_worker.join()
# Write out our CBC whitelist file:
with open(f"{output_base_name}{__OUT_WHITELIST_FILE_SUFFIX}", "w") as f:
f.write(f"{cell_barcode}\n")
logger.info(f"Processed {res['num_reads_processed']} reads.")
logger.info(f"CBC length: {len(cell_barcode)}.")
logger.info(f"UMI length: {umi_length}.")
logger.info(f"Done. Elapsed time: %2.2fs.", time.time() - t_start)
def _validate_input_bam(input_bam_header):
"""Check that the given input_bam_header contains an `longbow segment` program group."""
in_bam_header_dict = input_bam_header.to_dict()
if "PG" not in in_bam_header_dict:
logger.warning("Could not find PG entry in header. Cannot confirm that this file is compatible.")
else:
found_segment_cmd = False
for info in [item for item in in_bam_header_dict["PG"]]:
if "PN" not in info:
continue
if info["PN"] == "longbow" and info["ID"].split("-")[1] == "segment":
found_segment_cmd = True
break
if not found_segment_cmd:
logger.error(
"Input bam file header does not indicate that it was created by longbow segment. "
"This tool requires `longbow segment` reads as input data.")
return False
return True
def _get_start_segment_from_list(seg_list, model, read_name):
"""Get the start segment segment from the list of SegmentInfo objects based on the given model.
If no start segment is found, returns None."""
# The start segment should be the first matching segment:
for s in seg_list:
if s.name in model.start_element_names:
return s
logger.warning("Could not process read: %s - No start segment found (start names: %s).",
read_name, model.start_element_names)
return None
def _get_end_segment_from_list(seg_list, model, read_name):
"""Get the end segment segment from the list of SegmentInfo objects based on the given model.
If no start segment is found, returns None."""
# The end segment should be the last matching segment, so we
# iterate from the end to the start of the list:
for s in reversed(seg_list):
if s.name in model.end_element_names:
return s
logger.warning("Could not process read: %s - No end segment found (end names: %s).",
read_name, model.start_element_names)
return None
def _sub_process_work_fn(in_queue, out_queue, umi_length, array_model, do_bam_out):
"""Function to run in each subprocess.
Extracts and returns all segments from an input read."""
while True:
# Wait until we get some data.
# Note: Because we have a sentinel value None inserted at the end of the input data for each
# subprocess, we don't have to add a timeout - we're guaranteed each process will always have
# at least one element.
raw_data = in_queue.get()
# Check for exit sentinel:
if raw_data is None:
return
# Unpack our data here:
read = pysam.AlignedSegment.fromstring(
raw_data, pysam.AlignmentHeader.from_dict(dict())
)
_, segments = get_segments(read)
# Get start element position
# (for MAS-seq it's the 10x adapter)
start_segment = _get_start_segment_from_list(segments, array_model, read.query_name)
if start_segment is None:
continue
# Get the end element position:
# (for MAS-seq it's the Poly-a)
end_segment = _get_end_segment_from_list(segments, array_model, read.query_name)
if end_segment is None:
continue
# Now we grab the bases just after the 10x adapter as the UMI
# and the bases between the UMI and the poly A for the transcript
# Note: Positions are inclusive so we must add 1 to the end position to get that base as well:
umi_start = start_segment.end+1
umi_end = umi_start + umi_length
umi_bases = read.query_sequence[umi_start:umi_end]
umi_quals = "".join([chr(i + 33) for i in read.query_alignment_qualities[umi_start:umi_end]])
transcript_bases = read.query_sequence[umi_end:end_segment.start]
transcript_quals = "".join(
[chr(i + 33) for i in read.query_alignment_qualities[umi_end:end_segment.start]]
)
# Place our data on the output queue:
if do_bam_out:
out_queue.put(
tuple([read.query_name, umi_bases, umi_quals, transcript_bases, transcript_quals, read.to_string()])
)
else:
out_queue.put(
tuple([read.query_name, umi_bases, umi_quals, transcript_bases, transcript_quals])
)
def _sub_process_write_fn(
out_queue,
out_base_name,
cell_barcode,
pbar,
res,
do_bam_out,
out_bam_header
):
"""Thread / process fn to write out all our data."""
try:
if do_bam_out:
out_bam_file = pysam.AlignmentFile(f"{out_base_name}.cbc_umi_annotated.bam", "wb", header=out_bam_header)
with open(f"{out_base_name}{__OUT_READ_FILE_SUFFIX}1.fastq", "w") as mates1_file, \
open(f"{out_base_name}{__OUT_READ_FILE_SUFFIX}2.fastq", "w") as mates2_file:
while True:
# Wait for some output data:
raw_data = out_queue.get()
# Check for exit sentinel:
if raw_data is None:
break
# Unpack data:
if do_bam_out:
read_name, umi_bases, umi_quals, transcript_bases, transcript_quals, read_string = raw_data
else:
read_name, umi_bases, umi_quals, transcript_bases, transcript_quals = raw_data
# Create mates1 and mates2 records:
mates_1_record = pysam.FastxRecord(
name=read_name,
sequence=cell_barcode + umi_bases,
quality=(chr(33 + 60) * len(cell_barcode)) + umi_quals
)
mates_2_record = pysam.FastxRecord(
name=read_name,
sequence=transcript_bases,
quality=transcript_quals
)
# Write out mates1 and mates2 records:
mates1_file.write(str(mates_1_record))
mates1_file.write("\n")
mates2_file.write(str(mates_2_record))
mates2_file.write("\n")
if do_bam_out:
read = pysam.AlignedSegment.fromstring(
read_string, pysam.AlignmentHeader.from_dict(dict())
)
read.set_tag("CR", cell_barcode)
read.set_tag("UR", umi_bases)
out_bam_file.write(read)
# Increment our counters:
res["num_reads_processed"] += 1
pbar.update(1)
# Obligatory log message:
logger.debug("Processed read: %s", read_name)
finally:
if do_bam_out:
out_bam_file.close()
| 36.162437 | 123 | 0.629913 | import logging
import sys
import itertools
import time
import click
import click_log
import tqdm
import pysam
import multiprocessing as mp
from inspect import getframeinfo, currentframe, getdoc
from ..utils import bam_utils
from ..utils.model import LibraryModel
from ..annotate.command import get_segments
from ..meta import VERSION
logging.basicConfig(stream=sys.stderr)
logger = logging.getLogger("scsplit")
click_log.basic_config(logger)
__DEFAULT_DUMMY_CBC = "CTGCCTAACCTGATCC"
__DEFAULT_OUT_BASE_NAME = logger.name
__DEFAULT_UMI_LENGTH = 10
__OUT_READ_FILE_SUFFIX = "_mates"
__OUT_WHITELIST_FILE_SUFFIX = "_whitelist.txt"
@click.command(name=logger.name)
@click_log.simple_verbosity_option(logger)
@click.option(
"-t",
"--threads",
type=int,
default=mp.cpu_count() - 1,
show_default=True,
help="number of threads to use (0 for all)",
)
@click.option(
"-o",
"--output-base-name",
default=__DEFAULT_OUT_BASE_NAME,
type=str,
help=f"base name for output files [default: {__DEFAULT_OUT_BASE_NAME}]",
)
@click.option(
"-c",
"--cell-barcode",
default=__DEFAULT_DUMMY_CBC,
type=str,
help=f"dummy cell barcode to use for the dataset [default: {__DEFAULT_DUMMY_CBC}, "
f"length: {len(__DEFAULT_DUMMY_CBC)}]",
)
@click.option(
"-u",
"--umi-length",
default=__DEFAULT_UMI_LENGTH,
type=int,
show_default=True,
help=f"length of the UMI from this library",
)
@click.option(
"-b",
"--write-bam",
is_flag=True,
default=False,
show_default=True,
help=f"Write out an annotated bam file in addition to the mates files.",
)
@click.option(
'--force',
is_flag=True,
default=False,
show_default=True,
help="Force scsplit to run on the input bam without checking for compatibility."
)
@click.option(
"-m",
"--model",
default="mas15",
show_default=True,
help="The model to use for annotation. If the given value is a pre-configured model name, then that "
"model will be used. Otherwise, the given value will be treated as a file name and Longbow will attempt to "
"read in the file and create a LibraryModel from it. Longbow will assume the contents are the configuration "
"of a LibraryModel as per LibraryModel.to_json()."
)
@click.argument("input-bam", default="-" if not sys.stdin.isatty() else None, type=click.File("rb"))
def main(threads, output_base_name, cell_barcode, umi_length, force, model, write_bam, input_bam):
t_start = time.time()
logger.info("Invoked via: longbow %s", " ".join(sys.argv[1:]))
threads = mp.cpu_count() if threads <= 0 or threads > mp.cpu_count() else threads
logger.info(f"Running with {threads} worker subprocess(es)")
if LibraryModel.has_prebuilt_model(model):
logger.info(f"Using %s", LibraryModel.pre_configured_models[model]["description"])
model = LibraryModel.build_pre_configured_model(model)
else:
logger.info(f"Loading model from json file: %s", model)
model = LibraryModel.from_json_file(model)
manager = mp.Manager()
process_input_data_queue = manager.Queue(threads)
results = manager.Queue()
# Start worker sub-processes:
worker_process_pool = []
for _ in range(threads):
p = mp.Process(
target=_sub_process_work_fn, args=(process_input_data_queue, results, umi_length, model, write_bam)
)
p.start()
worker_process_pool.append(p)
pysam.set_verbosity(0) # silence message about the .bai file not being found
with pysam.AlignmentFile(
input_bam, "rb", check_sq=False, require_index=False
) as bam_file, tqdm.tqdm(
desc="Progress",
unit=" read",
colour="green",
file=sys.stderr,
leave=False,
disable=not sys.stdin.isatty(),
) as pbar:
if force:
logger.info("Force mode - skipping bam header check for compatibility")
else:
# Make sure we're given an input bam file we can work with:
if not _validate_input_bam(bam_file.header):
for r in (None,) * threads:
process_input_data_queue.put(r)
# Wait for our input jobs to finish:
for p in worker_process_pool:
p.join()
sys.exit(1)
# Get our header from the input bam file:
out_header = bam_utils.create_bam_header_with_program_group(logger.name, bam_file.header, models=[model])
# Start output worker:
res = manager.dict({"num_reads_processed": 0})
output_worker = mp.Process(
target=_sub_process_write_fn,
args=(
results,
output_base_name,
cell_barcode,
pbar,
res,
write_bam,
out_header
),
)
output_worker.start()
# Add in a sentinel value at the end of the queue - one for each subprocess - so we guarantee
# that all subprocesses will exit:
iter_data = itertools.chain(bam_file, (None,) * threads)
for r in iter_data:
if r is not None:
process_input_data_queue.put(r.to_string())
else:
process_input_data_queue.put(r)
# Wait for our input jobs to finish:
for p in worker_process_pool:
p.join()
# Now that our input processes are done, we can add our exit sentinel onto the output queue and
# wait for that process to end:
results.put(None)
output_worker.join()
# Write out our CBC whitelist file:
with open(f"{output_base_name}{__OUT_WHITELIST_FILE_SUFFIX}", "w") as f:
f.write(f"{cell_barcode}\n")
logger.info(f"Processed {res['num_reads_processed']} reads.")
logger.info(f"CBC length: {len(cell_barcode)}.")
logger.info(f"UMI length: {umi_length}.")
logger.info(f"Done. Elapsed time: %2.2fs.", time.time() - t_start)
def _validate_input_bam(input_bam_header):
in_bam_header_dict = input_bam_header.to_dict()
if "PG" not in in_bam_header_dict:
logger.warning("Could not find PG entry in header. Cannot confirm that this file is compatible.")
else:
found_segment_cmd = False
for info in [item for item in in_bam_header_dict["PG"]]:
if "PN" not in info:
continue
if info["PN"] == "longbow" and info["ID"].split("-")[1] == "segment":
found_segment_cmd = True
break
if not found_segment_cmd:
logger.error(
"Input bam file header does not indicate that it was created by longbow segment. "
"This tool requires `longbow segment` reads as input data.")
return False
return True
def _get_start_segment_from_list(seg_list, model, read_name):
# The start segment should be the first matching segment:
for s in seg_list:
if s.name in model.start_element_names:
return s
logger.warning("Could not process read: %s - No start segment found (start names: %s).",
read_name, model.start_element_names)
return None
def _get_end_segment_from_list(seg_list, model, read_name):
# The end segment should be the last matching segment, so we
# iterate from the end to the start of the list:
for s in reversed(seg_list):
if s.name in model.end_element_names:
return s
logger.warning("Could not process read: %s - No end segment found (end names: %s).",
read_name, model.start_element_names)
return None
def _sub_process_work_fn(in_queue, out_queue, umi_length, array_model, do_bam_out):
while True:
# Wait until we get some data.
# Note: Because we have a sentinel value None inserted at the end of the input data for each
# subprocess, we don't have to add a timeout - we're guaranteed each process will always have
# at least one element.
raw_data = in_queue.get()
# Check for exit sentinel:
if raw_data is None:
return
# Unpack our data here:
read = pysam.AlignedSegment.fromstring(
raw_data, pysam.AlignmentHeader.from_dict(dict())
)
_, segments = get_segments(read)
# Get start element position
# (for MAS-seq it's the 10x adapter)
start_segment = _get_start_segment_from_list(segments, array_model, read.query_name)
if start_segment is None:
continue
end_segment = _get_end_segment_from_list(segments, array_model, read.query_name)
if end_segment is None:
continue
# Now we grab the bases just after the 10x adapter as the UMI
# and the bases between the UMI and the poly A for the transcript
# Note: Positions are inclusive so we must add 1 to the end position to get that base as well:
umi_start = start_segment.end+1
umi_end = umi_start + umi_length
umi_bases = read.query_sequence[umi_start:umi_end]
umi_quals = "".join([chr(i + 33) for i in read.query_alignment_qualities[umi_start:umi_end]])
transcript_bases = read.query_sequence[umi_end:end_segment.start]
transcript_quals = "".join(
[chr(i + 33) for i in read.query_alignment_qualities[umi_end:end_segment.start]]
)
# Place our data on the output queue:
if do_bam_out:
out_queue.put(
tuple([read.query_name, umi_bases, umi_quals, transcript_bases, transcript_quals, read.to_string()])
)
else:
out_queue.put(
tuple([read.query_name, umi_bases, umi_quals, transcript_bases, transcript_quals])
)
def _sub_process_write_fn(
out_queue,
out_base_name,
cell_barcode,
pbar,
res,
do_bam_out,
out_bam_header
):
try:
if do_bam_out:
out_bam_file = pysam.AlignmentFile(f"{out_base_name}.cbc_umi_annotated.bam", "wb", header=out_bam_header)
with open(f"{out_base_name}{__OUT_READ_FILE_SUFFIX}1.fastq", "w") as mates1_file, \
open(f"{out_base_name}{__OUT_READ_FILE_SUFFIX}2.fastq", "w") as mates2_file:
while True:
# Wait for some output data:
raw_data = out_queue.get()
# Check for exit sentinel:
if raw_data is None:
break
# Unpack data:
if do_bam_out:
read_name, umi_bases, umi_quals, transcript_bases, transcript_quals, read_string = raw_data
else:
read_name, umi_bases, umi_quals, transcript_bases, transcript_quals = raw_data
# Create mates1 and mates2 records:
mates_1_record = pysam.FastxRecord(
name=read_name,
sequence=cell_barcode + umi_bases,
quality=(chr(33 + 60) * len(cell_barcode)) + umi_quals
)
mates_2_record = pysam.FastxRecord(
name=read_name,
sequence=transcript_bases,
quality=transcript_quals
)
# Write out mates1 and mates2 records:
mates1_file.write(str(mates_1_record))
mates1_file.write("\n")
mates2_file.write(str(mates_2_record))
mates2_file.write("\n")
if do_bam_out:
read = pysam.AlignedSegment.fromstring(
read_string, pysam.AlignmentHeader.from_dict(dict())
)
read.set_tag("CR", cell_barcode)
read.set_tag("UR", umi_bases)
out_bam_file.write(read)
# Increment our counters:
res["num_reads_processed"] += 1
pbar.update(1)
# Obligatory log message:
logger.debug("Processed read: %s", read_name)
finally:
if do_bam_out:
out_bam_file.close()
| true | true |
f7299dc9ddc1e115d3f2974bfd99ca7dd9ec3b12 | 10,844 | py | Python | isign/bundle.py | chenchaozhongvip/isign | 9561e3c3fc3fe9281792c60503c5a2a6235725ad | [
"Apache-2.0"
] | null | null | null | isign/bundle.py | chenchaozhongvip/isign | 9561e3c3fc3fe9281792c60503c5a2a6235725ad | [
"Apache-2.0"
] | null | null | null | isign/bundle.py | chenchaozhongvip/isign | 9561e3c3fc3fe9281792c60503c5a2a6235725ad | [
"Apache-2.0"
] | null | null | null | """ Represents a bundle. In the words of the Apple docs, it's a convenient way to deliver
software. Really it's a particular kind of directory structure, with one main executable,
well-known places for various data files and libraries,
and tracking hashes of all those files for signing purposes.
For isign, we have two main kinds of bundles: the App, and the Framework (a reusable
library packaged along with its data files.) An App may contain many Frameworks, but
a Framework has to be re-signed independently.
See the Apple Developer Documentation "About Bundles" """
import biplist
import code_resources
from exceptions import NotMatched
import copy
import glob
import logging
import os
from os.path import basename, exists, join, splitext
from signer import openssl_command
import signable
import shutil
log = logging.getLogger(__name__)
def is_info_plist_native(plist):
""" If an bundle is for native iOS, it has these properties in the Info.plist """
return (
'CFBundleSupportedPlatforms' in plist and
'iPhoneOS' in plist['CFBundleSupportedPlatforms']
)
class Bundle(object):
""" A bundle is a standard directory structure, a signable, installable set of files.
Apps are Bundles, but so are some kinds of Frameworks (libraries) """
helpers = []
signable_class = None
entitlements_path = None # Not set for every bundle type
def __init__(self, path):
self.path = path
self.info_path = join(self.path, 'Info.plist')
if not exists(self.info_path):
raise NotMatched("no Info.plist found; probably not a bundle")
self.info = biplist.readPlist(self.info_path)
self.orig_info = None
if not is_info_plist_native(self.info):
# while we should probably not allow this *or* add it ourselves, it appears to work without it
log.debug(u"Missing/invalid CFBundleSupportedPlatforms value in {}".format(self.info_path))
# will be added later
self.seal_path = None
def get_entitlements_path(self):
return self.entitlements_path
def get_executable_path(self):
""" Path to the main executable. For an app, this is app itself. For
a Framework, this is the main framework """
executable_name = None
if 'CFBundleExecutable' in self.info:
executable_name = self.info['CFBundleExecutable']
else:
executable_name, _ = splitext(basename(self.path))
executable = join(self.path, executable_name)
if not exists(executable):
raise Exception(
'could not find executable for {0}'.format(self.path))
return executable
def update_info_props(self, new_props):
if self.orig_info is None:
self.orig_info = copy.deepcopy(self.info)
changed = False
if ('CFBundleIdentifier' in new_props and
'CFBundleURLTypes' in self.info and
'CFBundleURLTypes' not in new_props):
# The bundle identifier changed. Check CFBundleURLTypes for
# CFBundleURLName values matching the old bundle
# id if it's not being set explicitly
old_bundle_id = self.info['CFBundleIdentifier']
new_bundle_id = new_props['CFBundleIdentifier']
for url_type in self.info['CFBundleURLTypes']:
if 'CFBundleURLName' not in url_type:
continue
if url_type['CFBundleURLName'] == old_bundle_id:
url_type['CFBundleURLName'] = new_bundle_id
changed = True
for key, val in new_props.iteritems():
is_new_key = key not in self.info
if is_new_key or self.info[key] != val:
if is_new_key:
log.warn("Adding new Info.plist key: {}".format(key))
self.info[key] = val
changed = True
if changed:
biplist.writePlist(self.info, self.info_path, binary=True)
else:
self.orig_info = None
def info_props_changed(self):
return self.orig_info is not None
def info_prop_changed(self, key):
if not self.orig_info:
# No props have been changed
return False
if key in self.info and key in self.orig_info and self.info[key] == self.orig_info[key]:
return False
return True
def get_info_prop(self, key):
return self.info[key]
def sign_dylibs(self, signer, path):
""" Sign all the dylibs in this directory """
for dylib_path in glob.glob(join(path, '*.dylib')):
dylib = signable.Dylib(self, dylib_path, signer)
dylib.sign(self, signer)
def sign(self, signer):
""" Sign everything in this bundle, recursively with sub-bundles """
# log.debug("SIGNING: %s" % self.path)
frameworks_path = join(self.path, 'Frameworks')
if exists(frameworks_path):
# log.debug("SIGNING FRAMEWORKS: %s" % frameworks_path)
# sign all the frameworks
for framework_name in os.listdir(frameworks_path):
framework_path = join(frameworks_path, framework_name)
# log.debug("checking for framework: %s" % framework_path)
try:
framework = Framework(framework_path)
# log.debug("resigning: %s" % framework_path)
framework.resign(signer)
except NotMatched:
# log.debug("not a framework: %s" % framework_path)
continue
# sign all the dylibs under Frameworks
self.sign_dylibs(signer, frameworks_path)
# sign any dylibs in the main directory (rare, but it happens)
self.sign_dylibs(signer, self.path)
plugins_path = join(self.path, 'PlugIns')
if exists(plugins_path):
# sign the appex executables
appex_paths = glob.glob(join(plugins_path, '*.appex'))
for appex_path in appex_paths:
plist_path = join(appex_path, 'Info.plist')
if not exists(plist_path):
continue
plist = biplist.readPlist(plist_path)
appex_exec_path = join(appex_path, plist['CFBundleExecutable'])
appex = signable.Appex(self, appex_exec_path, singer)
appex.sign(self, signer)
# then create the seal
# TODO maybe the app should know what its seal path should be...
self.seal_path = code_resources.make_seal(self.get_executable_path(),
self.path)
# then sign the app
executable = self.signable_class(self, self.get_executable_path(), signer)
executable.sign(self, signer)
def resign(self, signer):
""" signs bundle, modifies in place """
self.sign(signer)
log.debug("Resigned bundle at <%s>", self.path)
class Framework(Bundle):
""" A bundle that comprises reusable code. Similar to an app in that it has
its own resources and metadata. Not like an app because the main executable
doesn't have Entitlements, or an Application hash, and it doesn't have its
own provisioning profile. """
# the executable in this bundle will be a Framework
signable_class = signable.Framework
def __init__(self, path):
super(Framework, self).__init__(path)
class App(Bundle):
""" The kind of bundle that is visible as an app to the user.
Contains the provisioning profile, entitlements, etc. """
# the executable in this bundle will be an Executable (i.e. the main
# executable of an app)
signable_class = signable.Executable
def __init__(self, path):
super(App, self).__init__(path)
self.entitlements_path = join(self.path,
'Entitlements.plist')
self.provision_path = join(self.path,
'embedded.mobileprovision')
def provision(self, provision_path):
shutil.copyfile(provision_path, self.provision_path)
@staticmethod
def extract_entitlements(provision_path):
""" Given a path to a provisioning profile, return the entitlements
encoded therein """
cmd = [
'smime',
'-inform', 'der',
'-verify', # verifies content, prints verification status to STDERR,
# outputs content to STDOUT. In our case, will be an XML plist
'-noverify', # accept self-signed certs. Not the opposite of -verify!
'-in', provision_path
]
# this command always prints 'Verification successful' to stderr.
(profile_text, err) = openssl_command(cmd, data=None, expect_err=True)
if err and err.strip() != 'Verification successful':
log.error('Received unexpected error from openssl: {}'.format(err))
plist_dict = biplist.readPlistFromString(profile_text)
if 'Entitlements' not in plist_dict:
log.debug('failed to get entitlements in provisioning profile')
raise Exception('could not find Entitlements in {}'.format(provision_path))
return plist_dict['Entitlements']
def write_entitlements(self, entitlements):
""" Write entitlements to self.entitlements_path. This actually doesn't matter
to the app, it's just used later on by other parts of the signing process. """
biplist.writePlist(entitlements, self.entitlements_path, binary=False)
log.debug("wrote Entitlements to {0}".format(self.entitlements_path))
def resign(self, signer, provisioning_profile, alternate_entitlements_path=None):
""" signs app in place """
# TODO all this mucking about with entitlements feels wrong. The entitlements_path is
# not actually functional, it's just a way of passing it to later stages of signing.
# Maybe we should determine entitlements data in isign/archive.py or even isign/isign.py,
# and then embed it into Signer?
# In the typical case, we add entitlements from the pprof into the app's signature
if alternate_entitlements_path is None:
# copy the provisioning profile in
self.provision(provisioning_profile)
entitlements = self.extract_entitlements(provisioning_profile)
else:
log.info("signing with alternative entitlements: {}".format(alternate_entitlements_path))
entitlements = biplist.readPlist(alternate_entitlements_path)
self.write_entitlements(entitlements)
# actually resign this bundle now
super(App, self).resign(signer)
| 42.359375 | 106 | 0.635835 |
import biplist
import code_resources
from exceptions import NotMatched
import copy
import glob
import logging
import os
from os.path import basename, exists, join, splitext
from signer import openssl_command
import signable
import shutil
log = logging.getLogger(__name__)
def is_info_plist_native(plist):
return (
'CFBundleSupportedPlatforms' in plist and
'iPhoneOS' in plist['CFBundleSupportedPlatforms']
)
class Bundle(object):
helpers = []
signable_class = None
entitlements_path = None
def __init__(self, path):
self.path = path
self.info_path = join(self.path, 'Info.plist')
if not exists(self.info_path):
raise NotMatched("no Info.plist found; probably not a bundle")
self.info = biplist.readPlist(self.info_path)
self.orig_info = None
if not is_info_plist_native(self.info):
log.debug(u"Missing/invalid CFBundleSupportedPlatforms value in {}".format(self.info_path))
self.seal_path = None
def get_entitlements_path(self):
return self.entitlements_path
def get_executable_path(self):
executable_name = None
if 'CFBundleExecutable' in self.info:
executable_name = self.info['CFBundleExecutable']
else:
executable_name, _ = splitext(basename(self.path))
executable = join(self.path, executable_name)
if not exists(executable):
raise Exception(
'could not find executable for {0}'.format(self.path))
return executable
def update_info_props(self, new_props):
if self.orig_info is None:
self.orig_info = copy.deepcopy(self.info)
changed = False
if ('CFBundleIdentifier' in new_props and
'CFBundleURLTypes' in self.info and
'CFBundleURLTypes' not in new_props):
old_bundle_id = self.info['CFBundleIdentifier']
new_bundle_id = new_props['CFBundleIdentifier']
for url_type in self.info['CFBundleURLTypes']:
if 'CFBundleURLName' not in url_type:
continue
if url_type['CFBundleURLName'] == old_bundle_id:
url_type['CFBundleURLName'] = new_bundle_id
changed = True
for key, val in new_props.iteritems():
is_new_key = key not in self.info
if is_new_key or self.info[key] != val:
if is_new_key:
log.warn("Adding new Info.plist key: {}".format(key))
self.info[key] = val
changed = True
if changed:
biplist.writePlist(self.info, self.info_path, binary=True)
else:
self.orig_info = None
def info_props_changed(self):
return self.orig_info is not None
def info_prop_changed(self, key):
if not self.orig_info:
# No props have been changed
return False
if key in self.info and key in self.orig_info and self.info[key] == self.orig_info[key]:
return False
return True
def get_info_prop(self, key):
return self.info[key]
def sign_dylibs(self, signer, path):
for dylib_path in glob.glob(join(path, '*.dylib')):
dylib = signable.Dylib(self, dylib_path, signer)
dylib.sign(self, signer)
def sign(self, signer):
# log.debug("SIGNING: %s" % self.path)
frameworks_path = join(self.path, 'Frameworks')
if exists(frameworks_path):
# log.debug("SIGNING FRAMEWORKS: %s" % frameworks_path)
# sign all the frameworks
for framework_name in os.listdir(frameworks_path):
framework_path = join(frameworks_path, framework_name)
# log.debug("checking for framework: %s" % framework_path)
try:
framework = Framework(framework_path)
# log.debug("resigning: %s" % framework_path)
framework.resign(signer)
except NotMatched:
# log.debug("not a framework: %s" % framework_path)
continue
# sign all the dylibs under Frameworks
self.sign_dylibs(signer, frameworks_path)
# sign any dylibs in the main directory (rare, but it happens)
self.sign_dylibs(signer, self.path)
plugins_path = join(self.path, 'PlugIns')
if exists(plugins_path):
# sign the appex executables
appex_paths = glob.glob(join(plugins_path, '*.appex'))
for appex_path in appex_paths:
plist_path = join(appex_path, 'Info.plist')
if not exists(plist_path):
continue
plist = biplist.readPlist(plist_path)
appex_exec_path = join(appex_path, plist['CFBundleExecutable'])
appex = signable.Appex(self, appex_exec_path, singer)
appex.sign(self, signer)
# then create the seal
# TODO maybe the app should know what its seal path should be...
self.seal_path = code_resources.make_seal(self.get_executable_path(),
self.path)
# then sign the app
executable = self.signable_class(self, self.get_executable_path(), signer)
executable.sign(self, signer)
def resign(self, signer):
self.sign(signer)
log.debug("Resigned bundle at <%s>", self.path)
class Framework(Bundle):
# the executable in this bundle will be a Framework
signable_class = signable.Framework
def __init__(self, path):
super(Framework, self).__init__(path)
class App(Bundle):
# the executable in this bundle will be an Executable (i.e. the main
# executable of an app)
signable_class = signable.Executable
def __init__(self, path):
super(App, self).__init__(path)
self.entitlements_path = join(self.path,
'Entitlements.plist')
self.provision_path = join(self.path,
'embedded.mobileprovision')
def provision(self, provision_path):
shutil.copyfile(provision_path, self.provision_path)
@staticmethod
def extract_entitlements(provision_path):
cmd = [
'smime',
'-inform', 'der',
'-verify', # verifies content, prints verification status to STDERR,
# outputs content to STDOUT. In our case, will be an XML plist
'-noverify', # accept self-signed certs. Not the opposite of -verify!
'-in', provision_path
]
# this command always prints 'Verification successful' to stderr.
(profile_text, err) = openssl_command(cmd, data=None, expect_err=True)
if err and err.strip() != 'Verification successful':
log.error('Received unexpected error from openssl: {}'.format(err))
plist_dict = biplist.readPlistFromString(profile_text)
if 'Entitlements' not in plist_dict:
log.debug('failed to get entitlements in provisioning profile')
raise Exception('could not find Entitlements in {}'.format(provision_path))
return plist_dict['Entitlements']
def write_entitlements(self, entitlements):
biplist.writePlist(entitlements, self.entitlements_path, binary=False)
log.debug("wrote Entitlements to {0}".format(self.entitlements_path))
def resign(self, signer, provisioning_profile, alternate_entitlements_path=None):
# TODO all this mucking about with entitlements feels wrong. The entitlements_path is
# not actually functional, it's just a way of passing it to later stages of signing.
if alternate_entitlements_path is None:
# copy the provisioning profile in
self.provision(provisioning_profile)
entitlements = self.extract_entitlements(provisioning_profile)
else:
log.info("signing with alternative entitlements: {}".format(alternate_entitlements_path))
entitlements = biplist.readPlist(alternate_entitlements_path)
self.write_entitlements(entitlements)
# actually resign this bundle now
super(App, self).resign(signer)
| true | true |
f729a0a1a201637f8cee6c4cdabae3ca28bf5f7c | 2,036 | py | Python | models/servicedefinition_tests.py | elementechemlyn/CareConnectBuilder | c004fa94c1af64d636ee25de8f13e34fe723b5f3 | [
"MIT"
] | 1 | 2021-12-24T11:14:38.000Z | 2021-12-24T11:14:38.000Z | models/servicedefinition_tests.py | elementechemlyn/CareConnectBuilder | c004fa94c1af64d636ee25de8f13e34fe723b5f3 | [
"MIT"
] | null | null | null | models/servicedefinition_tests.py | elementechemlyn/CareConnectBuilder | c004fa94c1af64d636ee25de8f13e34fe723b5f3 | [
"MIT"
] | 1 | 2020-09-16T14:47:26.000Z | 2020-09-16T14:47:26.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 on 2017-03-22.
# 2017, SMART Health IT.
import os
import io
import unittest
import json
from . import servicedefinition
from .fhirdate import FHIRDate
class ServiceDefinitionTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("ServiceDefinition", js["resourceType"])
return servicedefinition.ServiceDefinition(js)
def testServiceDefinition1(self):
inst = self.instantiate_from("servicedefinition-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ServiceDefinition instance")
self.implServiceDefinition1(inst)
js = inst.as_json()
self.assertEqual("ServiceDefinition", js["resourceType"])
inst2 = servicedefinition.ServiceDefinition(js)
self.implServiceDefinition1(inst2)
def implServiceDefinition1(self, inst):
self.assertEqual(inst.date.date, FHIRDate("2015-07-22").date)
self.assertEqual(inst.date.as_json(), "2015-07-22")
self.assertEqual(inst.description, "Guideline appropriate ordering is used to assess appropriateness of an order given a patient, a proposed order, and a set of clinical indications.")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "guildeline-appropriate-ordering")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "Guideline Appropriate Ordering Module")
self.assertEqual(inst.topic[0].text, "Guideline Appropriate Ordering")
self.assertEqual(inst.topic[1].text, "Appropriate Use Criteria")
self.assertEqual(inst.version, "1.0.0")
| 42.416667 | 192 | 0.698428 |
import os
import io
import unittest
import json
from . import servicedefinition
from .fhirdate import FHIRDate
class ServiceDefinitionTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("ServiceDefinition", js["resourceType"])
return servicedefinition.ServiceDefinition(js)
def testServiceDefinition1(self):
inst = self.instantiate_from("servicedefinition-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ServiceDefinition instance")
self.implServiceDefinition1(inst)
js = inst.as_json()
self.assertEqual("ServiceDefinition", js["resourceType"])
inst2 = servicedefinition.ServiceDefinition(js)
self.implServiceDefinition1(inst2)
def implServiceDefinition1(self, inst):
self.assertEqual(inst.date.date, FHIRDate("2015-07-22").date)
self.assertEqual(inst.date.as_json(), "2015-07-22")
self.assertEqual(inst.description, "Guideline appropriate ordering is used to assess appropriateness of an order given a patient, a proposed order, and a set of clinical indications.")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].use, "official")
self.assertEqual(inst.identifier[0].value, "guildeline-appropriate-ordering")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "Guideline Appropriate Ordering Module")
self.assertEqual(inst.topic[0].text, "Guideline Appropriate Ordering")
self.assertEqual(inst.topic[1].text, "Appropriate Use Criteria")
self.assertEqual(inst.version, "1.0.0")
| true | true |
f729a1238c99c70155e183fce2609f5d29e9072a | 6,509 | py | Python | src/common/receiptmanager/receiptmanager.py | catarinaacsilva/security_auction | f0b76ad47ca8cc211fd90712c2090b8e5ff934a5 | [
"MIT"
] | null | null | null | src/common/receiptmanager/receiptmanager.py | catarinaacsilva/security_auction | f0b76ad47ca8cc211fd90712c2090b8e5ff934a5 | [
"MIT"
] | 1 | 2021-06-01T23:30:44.000Z | 2021-06-01T23:30:44.000Z | src/common/receiptmanager/receiptmanager.py | catarinaacsilva/security_auction | f0b76ad47ca8cc211fd90712c2090b8e5ff934a5 | [
"MIT"
] | null | null | null | # coding: utf-8
from ..cryptmanager import *
from ..utils import *
from ..cartaodecidadao import CartaoDeCidadao
from ..certmanager import CertManager
from Crypto.Hash import SHA256
from hmac import compare_digest
import hashlib
import json
import os
import getpass
import sys
class ReceiptManager:
def __init__(self, cc):
self.cc = cc
self.cc_number = str(self.cc.get_identity()[1])
self.pw = None
def validate_receipt(self, receipt):
repository_onion = json.dumps(receipt["ONION_2"]).encode('UTF-8')
repository_onion_sig = fromBase64(receipt["SIGNATURE"])
repository_cert = CertManager.get_cert_by_name('repository.crt')
manager_onion = json.dumps(receipt["ONION_2"]["ONION_1"]).encode('UTF-8')
manager_onion_sig = fromBase64(receipt["ONION_2"]["SIGNATURE"])
manager_cert = CertManager.get_cert_by_name('manager.crt')
client_onion = json.dumps(receipt["ONION_2"]["ONION_1"]["ONION_0"]).encode('UTF-8')
client_onion_sig = fromBase64(receipt["ONION_2"]["ONION_1"]["SIGNATURE"])
client_cert = self.cc.get_certificate_raw()
cm = CertManager(cert = repository_cert)
valid_repo = cm.verify_signature( repository_onion_sig , repository_onion )
cm = CertManager(cert = manager_cert)
valid_mana = cm.verify_signature( manager_onion_sig , manager_onion )
cm = CertManager(cert = client_cert)
valid_client = cm.verify_signature( client_onion_sig , client_onion )
return valid_repo and valid_mana and valid_client
def save_receipt(self, auction_id, receipt, prev_hash):
'''
Save Receipt
'''
# Checking for Permissions on Folder
self.check_perm()
# Checking existence of user dir
self.check_dir()
# Opening File Where Receipt Will Be Stored
file = open('src/common/receiptmanager/receipts/'+self.cc_number+'/'+auction_id+'-'+prev_hash, 'wb')
# Getting User Password Key
pw = self.get_key()
# Building HMAC for receipt
hmac = SHA256.new(receipt)
hmac = hmac.digest()
# Encrypting receipt with key
result = encrypt(pw, (hmac+receipt))
# Writting on File
file.write(result)
file.close()
def get_receipt(self, receipt_name, pw = None):
'''
Get Receipt
'''
# Checking for Permissions on Folder
self.check_perm()
# Checking existence of user dir
self.check_dir()
# Checking if such receipt exists
if os.path.isfile('src/common/receiptmanager/receipts/'+self.cc_number+'/'+receipt_name):
# Opening receipt file
file = open('src/common/receiptmanager/receipts/'+self.cc_number+'/'+receipt_name, 'rb')
# Getting the key
if not pw:
pw = self.get_key()
# Decrypting Receipt
result = decrypt(pw, file.read())
file.close()
# Checking integrity of the receipt
if(compare_digest(result[:32], SHA256.new(result[32:]).digest())):
return result[32:]
else:
print( colorize("ERROR: Corrupted File Or Unauthorized Access", 'red') )
input("Press any key to continue...")
return None
else:
print( colorize("ERROR: Receipt Not Found", 'red') )
input("Press any key to continue...")
return None
def get_participated_auctions(self):
'''
Get list of participated auctions ids
'''
# Checking for Permissions on Folder
self.check_perm()
# Checking existence of user dir
self.check_dir()
auctions = []
# For Each Receipt
for filename in os.listdir('src/common/receiptmanager/receipts/'+self.cc_number):
# Ignore pwd file
if filename.startswith('.'): continue
# Add receipt to receipts list
auctions.append(int(filename.split("-")[0]))
return auctions
def get_receipt_value(self, auction_id, hidden_value):
'''
Get Receipt Value
'''
# Checking for Permissions on Folder
self.check_perm()
# Checking existence of user dir
self.check_dir()
receipts = []
for filename in os.listdir('src/common/receiptmanager/receipts/'+self.cc_number):
if filename.startswith(auction_id+'-'):
# Opening receipt file
file = open('src/common/receiptmanager/receipts/'+self.cc_number+'/'+filename, 'rb')
# Getting the key
pw = self.get_key()
# Decrypting Receipt
result = decrypt(pw, file.read())
file.close()
# Checking integrity of the receipt
if(compare_digest(result[:32], SHA256.new(result[32:]).digest())):
receipt = json.loads(result[32:])
value = receipt["ONION_2"]["ONION_1"]["ONION_0"]["VALUE"]
if hidden_value:
secret = fromBase64(receipt["KEY"])
receipts.append((decrypt(secret, fromBase64(value)).decode(), receipt["ONION_2"]["PREV_HASH"]))
else:
receipts.append((fromBase64(value).decode(), receipt["ONION_2"]["PREV_HASH"]))
else:
print( colorize("ERROR: Corrupted File Or Unauthorized Access", 'red') )
input("Press any key to continue...")
return receipts
def get_key(self):
'''
Getting new password from user
'''
if not self.pw is None:
return self.pw
# Checking if there is a password already set
if os.path.isfile("src/common/receiptmanager/receipts/"+self.cc_number+"/.pwd"):
# Getting .pwd contents and sign it
file = open("src/common/receiptmanager/receipts/"+self.cc_number+"/.pwd", "rb")
key = self.cc.sign(file.read())
file.close()
else:
# Building new random for password
new = os.urandom(128)
file = open("src/common/receiptmanager/receipts/"+self.cc_number+"/.pwd", "wb")
file.write(new)
file.close()
key = self.cc.sign(new)
self.pw = self.password_builder(key, self.cc.get_public_key()[10:26])
# Return Hashing Of Password
return self.pw
def password_builder(self, password, salt):
'''
Hashing of Password
'''
password_hash = hashlib.pbkdf2_hmac('sha256', password, salt, 1000, 16)
return password_hash
def check_dir(self):
'''
Check if DIR exists, if it doesn't, create a new one
'''
if os.path.isdir("src/common/receiptmanager/receipts/"+self.cc_number): return
else: os.mkdir("src/common/receiptmanager/receipts/"+self.cc_number)
def check_perm(self):
'''
Checks read and write permissions
'''
while(not os.access('src/common/receiptmanager/receipts', os.R_OK)):
print( colorize("I have no READ permissions, please allow READ permissions at src/common/receiptmanager/receipts", 'red') )
input("Press any key to try again...")
clean(lines = 2)
while(not os.access('src/common/receiptmanager/receipts', os.W_OK)):
print( colorize("I have no WRITE permissions, please allow WRITE permissions at src/common/receiptmanager/receipts", 'red') )
input("Press any key to try again...")
clean(lines = 2)
| 31.75122 | 128 | 0.703027 |
from ..cryptmanager import *
from ..utils import *
from ..cartaodecidadao import CartaoDeCidadao
from ..certmanager import CertManager
from Crypto.Hash import SHA256
from hmac import compare_digest
import hashlib
import json
import os
import getpass
import sys
class ReceiptManager:
def __init__(self, cc):
self.cc = cc
self.cc_number = str(self.cc.get_identity()[1])
self.pw = None
def validate_receipt(self, receipt):
repository_onion = json.dumps(receipt["ONION_2"]).encode('UTF-8')
repository_onion_sig = fromBase64(receipt["SIGNATURE"])
repository_cert = CertManager.get_cert_by_name('repository.crt')
manager_onion = json.dumps(receipt["ONION_2"]["ONION_1"]).encode('UTF-8')
manager_onion_sig = fromBase64(receipt["ONION_2"]["SIGNATURE"])
manager_cert = CertManager.get_cert_by_name('manager.crt')
client_onion = json.dumps(receipt["ONION_2"]["ONION_1"]["ONION_0"]).encode('UTF-8')
client_onion_sig = fromBase64(receipt["ONION_2"]["ONION_1"]["SIGNATURE"])
client_cert = self.cc.get_certificate_raw()
cm = CertManager(cert = repository_cert)
valid_repo = cm.verify_signature( repository_onion_sig , repository_onion )
cm = CertManager(cert = manager_cert)
valid_mana = cm.verify_signature( manager_onion_sig , manager_onion )
cm = CertManager(cert = client_cert)
valid_client = cm.verify_signature( client_onion_sig , client_onion )
return valid_repo and valid_mana and valid_client
def save_receipt(self, auction_id, receipt, prev_hash):
self.check_perm()
self.check_dir()
file = open('src/common/receiptmanager/receipts/'+self.cc_number+'/'+auction_id+'-'+prev_hash, 'wb')
pw = self.get_key()
hmac = SHA256.new(receipt)
hmac = hmac.digest()
result = encrypt(pw, (hmac+receipt))
file.write(result)
file.close()
def get_receipt(self, receipt_name, pw = None):
self.check_perm()
self.check_dir()
if os.path.isfile('src/common/receiptmanager/receipts/'+self.cc_number+'/'+receipt_name):
file = open('src/common/receiptmanager/receipts/'+self.cc_number+'/'+receipt_name, 'rb')
if not pw:
pw = self.get_key()
result = decrypt(pw, file.read())
file.close()
if(compare_digest(result[:32], SHA256.new(result[32:]).digest())):
return result[32:]
else:
print( colorize("ERROR: Corrupted File Or Unauthorized Access", 'red') )
input("Press any key to continue...")
return None
else:
print( colorize("ERROR: Receipt Not Found", 'red') )
input("Press any key to continue...")
return None
def get_participated_auctions(self):
self.check_perm()
self.check_dir()
auctions = []
for filename in os.listdir('src/common/receiptmanager/receipts/'+self.cc_number):
if filename.startswith('.'): continue
auctions.append(int(filename.split("-")[0]))
return auctions
def get_receipt_value(self, auction_id, hidden_value):
self.check_perm()
self.check_dir()
receipts = []
for filename in os.listdir('src/common/receiptmanager/receipts/'+self.cc_number):
if filename.startswith(auction_id+'-'):
file = open('src/common/receiptmanager/receipts/'+self.cc_number+'/'+filename, 'rb')
pw = self.get_key()
result = decrypt(pw, file.read())
file.close()
if(compare_digest(result[:32], SHA256.new(result[32:]).digest())):
receipt = json.loads(result[32:])
value = receipt["ONION_2"]["ONION_1"]["ONION_0"]["VALUE"]
if hidden_value:
secret = fromBase64(receipt["KEY"])
receipts.append((decrypt(secret, fromBase64(value)).decode(), receipt["ONION_2"]["PREV_HASH"]))
else:
receipts.append((fromBase64(value).decode(), receipt["ONION_2"]["PREV_HASH"]))
else:
print( colorize("ERROR: Corrupted File Or Unauthorized Access", 'red') )
input("Press any key to continue...")
return receipts
def get_key(self):
if not self.pw is None:
return self.pw
if os.path.isfile("src/common/receiptmanager/receipts/"+self.cc_number+"/.pwd"):
file = open("src/common/receiptmanager/receipts/"+self.cc_number+"/.pwd", "rb")
key = self.cc.sign(file.read())
file.close()
else:
new = os.urandom(128)
file = open("src/common/receiptmanager/receipts/"+self.cc_number+"/.pwd", "wb")
file.write(new)
file.close()
key = self.cc.sign(new)
self.pw = self.password_builder(key, self.cc.get_public_key()[10:26])
return self.pw
def password_builder(self, password, salt):
password_hash = hashlib.pbkdf2_hmac('sha256', password, salt, 1000, 16)
return password_hash
def check_dir(self):
if os.path.isdir("src/common/receiptmanager/receipts/"+self.cc_number): return
else: os.mkdir("src/common/receiptmanager/receipts/"+self.cc_number)
def check_perm(self):
while(not os.access('src/common/receiptmanager/receipts', os.R_OK)):
print( colorize("I have no READ permissions, please allow READ permissions at src/common/receiptmanager/receipts", 'red') )
input("Press any key to try again...")
clean(lines = 2)
while(not os.access('src/common/receiptmanager/receipts', os.W_OK)):
print( colorize("I have no WRITE permissions, please allow WRITE permissions at src/common/receiptmanager/receipts", 'red') )
input("Press any key to try again...")
clean(lines = 2)
| true | true |
f729a24a66e616326b404dc1b3f9fa74a4985595 | 245 | py | Python | happybase/hbase/constants.py | BeeGroup-cimne/happybase | 2dacd3045baaaf39c6328b5172eef1dc302ea307 | [
"MIT"
] | null | null | null | happybase/hbase/constants.py | BeeGroup-cimne/happybase | 2dacd3045baaaf39c6328b5172eef1dc302ea307 | [
"MIT"
] | null | null | null | happybase/hbase/constants.py | BeeGroup-cimne/happybase | 2dacd3045baaaf39c6328b5172eef1dc302ea307 | [
"MIT"
] | null | null | null | #
# Autogenerated by Thrift Compiler (0.9.1)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from .ttypes import *
| 20.416667 | 80 | 0.75102 |
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from .ttypes import *
| true | true |
f729a2d781b6d90d31a6e8106f80366f8380d7fc | 1,962 | py | Python | tests/test.py | alexd-conf/coinmarketcap-scraper | 06c60f7c2deba14d876e812d6a94c30f3a8091cb | [
"MIT"
] | null | null | null | tests/test.py | alexd-conf/coinmarketcap-scraper | 06c60f7c2deba14d876e812d6a94c30f3a8091cb | [
"MIT"
] | null | null | null | tests/test.py | alexd-conf/coinmarketcap-scraper | 06c60f7c2deba14d876e812d6a94c30f3a8091cb | [
"MIT"
] | null | null | null | import unittest
from unittest.mock import MagicMock
from scraper.scraper import get_table_with_data, row_not_loaded, \
reload_table_rows, get_coin_name, get_coin_symbol, \
get_coin_price, get_coin_change24h, get_coin_change7d, \
get_coin_market_cap, get_coin_volume24h, get_coin_circulating_supply
class TestStringMethods(unittest.TestCase):
def test_get_table_with_data_raises_error(self):
self.assertRaises(AttributeError, get_table_with_data, "")
def test_row_not_loaded_true(self):
def has_attr(arg):
return True
row_mock = MagicMock()
row_mock.has_attr = has_attr
self.assertTrue(row_not_loaded(row_mock))
def test_row_not_loaded_false(self):
def has_attr(arg):
return False
row_mock = MagicMock()
row_mock.has_attr = has_attr
self.assertFalse(row_not_loaded(row_mock))
def test_reload_table_rows_raises_error(self):
driver_mock = MagicMock(page_source="")
self.assertRaises(AttributeError, reload_table_rows, driver_mock)
def test_get_coin_name_result_none(self):
self.assertIsNone(get_coin_name([]))
def test_get_coin_symbol_result_none(self):
self.assertIsNone(get_coin_symbol([]))
def test_get_coin_price_result_none(self):
self.assertIsNone(get_coin_price([]))
def test_get_coin_change24h_result_none(self):
self.assertIsNone(get_coin_change24h([]))
def test_get_coin_change7d_result_none(self):
self.assertIsNone(get_coin_change7d([]))
def test_get_coin_market_cap_result_none(self):
self.assertIsNone(get_coin_market_cap([]))
def test_get_coin_volume24h_result_none(self):
self.assertIsNone(get_coin_volume24h([]))
def test_get_coin_circulating_supply_result_none(self):
self.assertIsNone(get_coin_circulating_supply([]))
| 34.421053 | 96 | 0.70897 | import unittest
from unittest.mock import MagicMock
from scraper.scraper import get_table_with_data, row_not_loaded, \
reload_table_rows, get_coin_name, get_coin_symbol, \
get_coin_price, get_coin_change24h, get_coin_change7d, \
get_coin_market_cap, get_coin_volume24h, get_coin_circulating_supply
class TestStringMethods(unittest.TestCase):
def test_get_table_with_data_raises_error(self):
self.assertRaises(AttributeError, get_table_with_data, "")
def test_row_not_loaded_true(self):
def has_attr(arg):
return True
row_mock = MagicMock()
row_mock.has_attr = has_attr
self.assertTrue(row_not_loaded(row_mock))
def test_row_not_loaded_false(self):
def has_attr(arg):
return False
row_mock = MagicMock()
row_mock.has_attr = has_attr
self.assertFalse(row_not_loaded(row_mock))
def test_reload_table_rows_raises_error(self):
driver_mock = MagicMock(page_source="")
self.assertRaises(AttributeError, reload_table_rows, driver_mock)
def test_get_coin_name_result_none(self):
self.assertIsNone(get_coin_name([]))
def test_get_coin_symbol_result_none(self):
self.assertIsNone(get_coin_symbol([]))
def test_get_coin_price_result_none(self):
self.assertIsNone(get_coin_price([]))
def test_get_coin_change24h_result_none(self):
self.assertIsNone(get_coin_change24h([]))
def test_get_coin_change7d_result_none(self):
self.assertIsNone(get_coin_change7d([]))
def test_get_coin_market_cap_result_none(self):
self.assertIsNone(get_coin_market_cap([]))
def test_get_coin_volume24h_result_none(self):
self.assertIsNone(get_coin_volume24h([]))
def test_get_coin_circulating_supply_result_none(self):
self.assertIsNone(get_coin_circulating_supply([]))
| true | true |
f729a2d9f63ddeb8e9182a8ef1ee556c40834496 | 78 | py | Python | orionxapi/__init__.py | tomymacmillan/orionx-api-client | 7a9f0dc8b86ec451c8482451eba356c5d840c66e | [
"MIT"
] | 19 | 2017-12-27T17:23:08.000Z | 2021-08-02T01:13:10.000Z | orionxapi/__init__.py | tomymacmillan/orionx-api-client | 7a9f0dc8b86ec451c8482451eba356c5d840c66e | [
"MIT"
] | 12 | 2018-01-02T22:36:56.000Z | 2018-07-23T15:52:23.000Z | orionxapi/__init__.py | tomymacmillan/orionx-api-client | 7a9f0dc8b86ec451c8482451eba356c5d840c66e | [
"MIT"
] | 9 | 2017-12-27T08:10:46.000Z | 2021-05-12T17:03:35.000Z | from .client import client, as_completed
__all__ = ['client', 'as_completed'] | 26 | 40 | 0.75641 | from .client import client, as_completed
__all__ = ['client', 'as_completed'] | true | true |
f729a3f83be9c31ba99f040897298ef106279325 | 1,944 | py | Python | src/modules/indel_primer/main.py | AndersenLab/CAENDR | ce4cdb74db736db8226ffc90988959b71b0d5ff5 | [
"MIT"
] | 3 | 2022-02-09T07:04:37.000Z | 2022-03-11T02:46:35.000Z | src/modules/indel_primer/main.py | AndersenLab/CAENDR | ce4cdb74db736db8226ffc90988959b71b0d5ff5 | [
"MIT"
] | 4 | 2022-01-28T22:28:08.000Z | 2022-02-11T21:47:15.000Z | src/modules/indel_primer/main.py | AndersenLab/CAENDR | ce4cdb74db736db8226ffc90988959b71b0d5ff5 | [
"MIT"
] | 1 | 2022-01-11T03:39:02.000Z | 2022-01-11T03:39:02.000Z | import os
import sys
from subprocess import Popen, PIPE, STDOUT
from logzero import logger
from dotenv import load_dotenv
from caendr.utils import monitor
from caendr.models.error import EnvVarError
from caendr.services.cloud.storage import upload_blob_from_file
dotenv_file = '.env'
load_dotenv(dotenv_file)
monitor.init_sentry("indel_primer")
MODULE_SITE_BUCKET_PRIVATE_NAME = os.environ.get('MODULE_SITE_BUCKET_PRIVATE_NAME')
INDEL_SITE = os.environ.get('INDEL_SITE')
INDEL_STRAIN_1 = os.environ.get('INDEL_STRAIN_1')
INDEL_STRAIN_2 = os.environ.get('INDEL_STRAIN_2')
RESULT_BUCKET = os.environ.get('RESULT_BUCKET')
RESULT_BLOB = os.environ.get('RESULT_BLOB')
WORMBASE_VERSION = os.environ.get('WORMBASE_VERSION')
INDEL_VCF_VERSION = os.environ.get('INDEL_VCF_VERSION')
logger.info(f'Indel Primer: INDEL_SITE:{INDEL_SITE} INDEL_STRAIN_1:{INDEL_STRAIN_1} INDEL_STRAIN_2:{INDEL_STRAIN_2} WORMBASE_VERSION:{WORMBASE_VERSION} INDEL_VCF_VERSION:{INDEL_VCF_VERSION} RESULT_BUCKET:{RESULT_BUCKET} RESULT_BLOB:{RESULT_BLOB}')
if not INDEL_SITE or not INDEL_STRAIN_1 or not INDEL_STRAIN_2 or not RESULT_BLOB or not RESULT_BUCKET or not WORMBASE_VERSION or not INDEL_VCF_VERSION:
raise EnvVarError()
cmd = ('conda',
'run',
'-n',
'indel-primer',
'vk',
'primer',
'indel',
'--region',
INDEL_SITE,
'--nprimers',
'10',
'--polymorphic',
'--ref',
WORMBASE_VERSION,
'--samples',
f'{INDEL_STRAIN_1},{INDEL_STRAIN_2}', INDEL_VCF_VERSION)
with Popen(cmd, stdout=PIPE, stderr=PIPE, bufsize=1) as p, open('results.tsv', 'ab') as file:
for line in p.stdout: # b'\n'-separated lines
logger.info(line) # pass bytes as is
file.write(line)
for line in p.stderr: # b'\n'-separated lines
logger.error(sys.stdout.buffer.write(line)) # pass bytes as is
upload_blob_from_file(RESULT_BUCKET, 'results.tsv', RESULT_BLOB)
| 31.354839 | 247 | 0.727366 | import os
import sys
from subprocess import Popen, PIPE, STDOUT
from logzero import logger
from dotenv import load_dotenv
from caendr.utils import monitor
from caendr.models.error import EnvVarError
from caendr.services.cloud.storage import upload_blob_from_file
dotenv_file = '.env'
load_dotenv(dotenv_file)
monitor.init_sentry("indel_primer")
MODULE_SITE_BUCKET_PRIVATE_NAME = os.environ.get('MODULE_SITE_BUCKET_PRIVATE_NAME')
INDEL_SITE = os.environ.get('INDEL_SITE')
INDEL_STRAIN_1 = os.environ.get('INDEL_STRAIN_1')
INDEL_STRAIN_2 = os.environ.get('INDEL_STRAIN_2')
RESULT_BUCKET = os.environ.get('RESULT_BUCKET')
RESULT_BLOB = os.environ.get('RESULT_BLOB')
WORMBASE_VERSION = os.environ.get('WORMBASE_VERSION')
INDEL_VCF_VERSION = os.environ.get('INDEL_VCF_VERSION')
logger.info(f'Indel Primer: INDEL_SITE:{INDEL_SITE} INDEL_STRAIN_1:{INDEL_STRAIN_1} INDEL_STRAIN_2:{INDEL_STRAIN_2} WORMBASE_VERSION:{WORMBASE_VERSION} INDEL_VCF_VERSION:{INDEL_VCF_VERSION} RESULT_BUCKET:{RESULT_BUCKET} RESULT_BLOB:{RESULT_BLOB}')
if not INDEL_SITE or not INDEL_STRAIN_1 or not INDEL_STRAIN_2 or not RESULT_BLOB or not RESULT_BUCKET or not WORMBASE_VERSION or not INDEL_VCF_VERSION:
raise EnvVarError()
cmd = ('conda',
'run',
'-n',
'indel-primer',
'vk',
'primer',
'indel',
'--region',
INDEL_SITE,
'--nprimers',
'10',
'--polymorphic',
'--ref',
WORMBASE_VERSION,
'--samples',
f'{INDEL_STRAIN_1},{INDEL_STRAIN_2}', INDEL_VCF_VERSION)
with Popen(cmd, stdout=PIPE, stderr=PIPE, bufsize=1) as p, open('results.tsv', 'ab') as file:
for line in p.stdout:
logger.info(line)
file.write(line)
for line in p.stderr:
logger.error(sys.stdout.buffer.write(line))
upload_blob_from_file(RESULT_BUCKET, 'results.tsv', RESULT_BLOB)
| true | true |
f729a4340682dd093b68dbd33d955ab4cb402955 | 800 | py | Python | corehq/apps/domain/management/commands/fill_last_modified_date.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/apps/domain/management/commands/fill_last_modified_date.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/apps/domain/management/commands/fill_last_modified_date.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | from django.core.management.base import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
def _get_domains_without_last_modified_date(self):
docs = iter_docs(Domain.get_db(), [
domain['id']
for domain in Domain.view(
"domain/domains",
reduce=False,
include_docs=False
)
])
return [x for x in docs if 'last_modified' not in x or not x['last_modified']]
def handle(self, **options):
for domain_doc in self._get_domains_without_last_modified_date():
print("Updating domain {}".format(domain_doc['name']))
domain = Domain.wrap(domain_doc)
domain.save()
| 30.769231 | 86 | 0.625 | from django.core.management.base import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
def _get_domains_without_last_modified_date(self):
docs = iter_docs(Domain.get_db(), [
domain['id']
for domain in Domain.view(
"domain/domains",
reduce=False,
include_docs=False
)
])
return [x for x in docs if 'last_modified' not in x or not x['last_modified']]
def handle(self, **options):
for domain_doc in self._get_domains_without_last_modified_date():
print("Updating domain {}".format(domain_doc['name']))
domain = Domain.wrap(domain_doc)
domain.save()
| true | true |
f729a4ab9057a7ce46593c5c5e7bee598b7c3f0f | 32,947 | py | Python | src/module/DeepFM.py | uchida-takumi/recommender_system_verification | a079e0c8764926e5dc66da01a809c6ba4fde7fb7 | [
"MIT"
] | null | null | null | src/module/DeepFM.py | uchida-takumi/recommender_system_verification | a079e0c8764926e5dc66da01a809c6ba4fde7fb7 | [
"MIT"
] | null | null | null | src/module/DeepFM.py | uchida-takumi/recommender_system_verification | a079e0c8764926e5dc66da01a809c6ba4fde7fb7 | [
"MIT"
] | null | null | null | """
# install the package
pip install deepctr
# tutorial
https://deepctr-doc.readthedocs.io/en/latest/Quick-Start.html#getting-started-4-steps-to-deepctr
# github
https://github.com/shenweichen/DeepCTR
しかし、これは binary しか出来ないので適応不可能。
binary を無理矢理適応させるばあいは、非クリックデータを何らかの方法で生成する必要がある。
# ---- 次のアイデア ----
# github
https://github.com/ChenglongChen/tensorflow-DeepFM
"""
import tensorflow as tf
import os
import pickle
import pandas as pd
import numpy as np
import copy
from sklearn.metrics import roc_auc_score
from sklearn.metrics import mean_absolute_error
from src.module.tensorflow_DeepFM.DeepFM import DeepFM as DeepFM_
# インターフェース
class DeepFM:
def __init__(self, set_train_test_users, set_train_test_items, dict_genre=None, first_half_fit_only_fm=False, ctr_prediction=True):
"""
import pandas as pd
DIR_DATA = 'src/module/knowledge_graph_attention_network/Data/ml'
df_train = pd.read_csv(os.path.join(DIR_DATA, 'train_rating.csv'))
df_test = pd.read_csv(os.path.join(DIR_DATA, 'test_rating.csv'))
set_train_test_users = set(np.concatenate([df_train['UserID'], df_test['UserID']]))
set_train_test_items = set(np.concatenate([df_train['MovieID'], df_test['MovieID']]))
dict_genre = pickle.load(open(os.path.join(DIR_DATA, 'genre.pickle'), 'rb'))
self = DeepFM(set_train_test_users, set_train_test_items, dict_genre)
self.dfm_params['epoch'] = 10
self.dfm_params['batch_size'] = 64
users = df_train['UserID'].values
items = df_train['UserID'].values
ratings = df_train['Rating'].values
self.fit(users, items, ratings)
predicted = self.predict(df_test['UserID'].values, df_test['UserID'].values)
# MAE of test-set
print( np.mean(np.abs(predicted - df_test['Rating'])) )
# MAE of mean-prediction
print( np.mean(np.abs(df_test['Rating'].mean() - df_test['Rating'])) )
## まぁ、実際のテストをクリアできればOKとする。
"""
"""
参考として、Movielens1Mデータで検証されたハイパーパラメータは以下の通り
Deep Matrix Factorization Approach for
Collaborative Filtering Recommender Systems
k(hidden-factor) = 8, γ(learning-rate) = 0.01, λ(regularization) = 0.045
K = [9, 3, 3]; Γ= [0.01, 0.01, 0.01]; Λ = [0.1, 0.01, 0.1]
"""
self.set_train_test_users = set(set_train_test_users)
self.set_train_test_items = set(set_train_test_items)
self.dict_genre = dict_genre
self.first_half_fit_only_fm = first_half_fit_only_fm
self.data_manager = Data_manager(set_train_test_users, set_train_test_items, dict_genre)
feature_size, field_size = self.data_manager.get_feature_size_field_size()
self.dfm_params = {
"feature_size" : feature_size,
"field_size" : field_size,
"loss_type" : "mse", # "logloss" なら {0,1} の判別問題。 "mse" なら regression。
"use_fm": True, # fm-layer を使用
"use_deep": True, # deep-layer を使用
"embedding_size": 8,
"dropout_fm": [1.0, 1.0],
"deep_layers": [32, 32],
"dropout_deep": [0.5, 0.5, 0.5],
"deep_layers_activation": tf.nn.relu,
"epoch": 30,
"batch_size": 64,
"learning_rate": 0.001,
"optimizer_type": "adam",
"batch_norm": 1,
"batch_norm_decay": 0.995,
"l2_reg": 0.0001,
"l2_reg_embedding": 0.0001,
"l2_reg_bias": 0.0001,
"verbose": True,
"eval_metric": mean_absolute_error,
"greater_is_better": False, # 学習における損失スコアが大きい方が良いかどうか
"random_seed": 2017,
}
self.ctr_prediction = ctr_prediction
if self.ctr_prediction:
self.dfm_params["loss_type"] = "logloss"
def fit(self, users, items, ratings, test_users=[], test_items=[], test_ratings=[], **kargs):
"""
users = [0,0,1]
items = [0,3,3]
ratings = [3.,4.,5.]
"""
global_mean_bias_init = np.float32(np.mean(ratings))
global_mean_bias_init = 0.01
self.model = DeepFM_(**self.dfm_params, global_mean_bias_init=global_mean_bias_init, first_half_fit_only_fm=self.first_half_fit_only_fm)
# もし、CTR予測の場合は、y=0のデータをランダム生成する。
if self.ctr_prediction:
users = list(users) + list(np.random.choice(list(set(users)), size=len(users)))
items = list(items) + list(np.random.choice(list(set(items)), size=len(items)))
ratings = list((np.array(ratings)>0).astype(int)) + [0]*len(ratings)
test_ratings = list((np.array(test_ratings)>0).astype(int))
Xi, Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(users, items)
if len(test_users)>0:
test_Xi, test_Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(test_users, test_items)
self.model.fit(Xi, Xv, ratings, test_Xi, test_Xv, test_ratings, early_stopping=True)
else:
self.model.fit(Xi, Xv, ratings, early_stopping=True, **kargs)
# load data
self.trained_users = list(set(users))
self.trained_items = list(set(items))
self.global_mean = self.model.predict(Xi, Xv).mean()
def predict(self, users, items, *args, **kargs):
Xi, Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(users, items)
predicted = self.model.predict(Xi, Xv)
return predicted
# prepare training and validation data in the required format
class Data_manager:
def __init__(self, users, items, dict_genre=None):
"""
users [array like object]:
train, test set に含まれる user_id
items [array like object]:
train, test set に含まれる item_id
dict_genre [dictionary]:
ex) {item_id: [genre_id1, genre_id2]}
tensorflow_DeepFM/example 内部のプログラム、特にDataReader.pyを読み、データの形式を解読した。
結論として、 item, user, genre の各IDは以下のように変換すればよい。
1) user = {0,1,2} → {0,1,2} *未変更
2) item = {0,1} → {3,4} *userからのインクリメントID
3) genre = {0,1} → {5,6} *itemからのインクリメントID
4) a interaction-sample [u,i,g] = [0,1,0]→[0,4,5]
5) Xi_train (X-index trainset) = [変換した[u,i,g]1, 変換した[u,i,g]2, ...]
6) Xv_train (X-value trainset) = [[1.,1.,1.], [1.,1.,1.], ...]
user,item,genre はカテゴリ変数なのですべて1.となる。
7) y_train = [rating-score1, rating-score2, ...] *変換不要
EXAMPLE
-------------
import pandas as pd
df_rating = pd.read_csv(os.path.join(DIR_DATA, 'train_rating.csv'))
dict_genre = pickle.load(open(os.path.join(DIR_DATA, 'genre.pickle'), 'rb'))
users = df_rating['UserID']
items = df_rating['MovieID']
self = Data_manager(users, items, dict_genre=dict_genre)
"""
self.dict_genre = dict_genre
# インクリメントインデックスを生成するオブジェクト self.inclement_index を生成する。
if dict_genre:
dict_genre = {i:gs for i,gs in dict_genre.items() if i in items}
n_genre = max([max(gs) for i,gs in dict_genre.items() if gs]) + 1
genres = list(range(n_genre))
else:
dict_genre = {}
n_genre = 0
genres = []
self.inclement_index = inclement_index(users, items, genres)
# userとitemをインクリメントIDに変更する
dict_genre = {self.inclement_index.transform([i], field='item')[0]:gs for i,gs in dict_genre.items()}
# user, itemはそれぞれで2フィールド、ジャンルはジャンルラベルごとに別々のフィールドにわける。
self.re_dict_genre = {}
for i,gs in dict_genre.items():
# re_dict は {item_id:(field_id, genru_id)}となる。
genre_one_hot_vec = [0] * n_genre
for g in gs:
genre_one_hot_vec[g] = 1 # カテゴリ変数はかならず整数の1とする。
self.re_dict_genre[i] = genre_one_hot_vec
self.genre_indexes = self.inclement_index.transform(genres, field='genre')
self.feature_size = self.inclement_index.get_feature_size()
self.field_size = 2 + n_genre
def get_feature_size_field_size(self):
return self.feature_size, self.field_size
def transform_users_and_items_to_Xi_Xv(self, users, items):
"""
users = [0,0,1]
items = [1,5,5]
"""
Xi, Xv = [], []
users = self.inclement_index.transform(users, field='user')
items = self.inclement_index.transform(items, field='item')
for u,i in zip(users, items):
if self.dict_genre:
Xi.append([u, i] + self.genre_indexes)
Xv.append([1, 1] + self.re_dict_genre[i])
else:
Xi.append([u, i])
Xv.append([1, 1])
return Xi, Xv
class inclement_index:
def __init__(self, users, items, genres=[]):
"""
users = ['u0','u1',3]
items = ['i0', 3]
genres = ['pop', 'sf']
self = inclement_index(users, items, genres)
self.transform(['u0', 'u1', 3], field='user', inverse=False)
self.transform(['i0', 3], field='item', inverse=False)
self.transform(['pop', 'sf'], field='genre', inverse=False)
transformed = self.transform(['u0', 'u1', 3], field='user', inverse=False)
self.transform(transformed, field='user', inverse=True)
"""
users = set(users)
items = set(items)
genres = set(genres)
self.increment_cnt = 0
self.user_dict = {u:self.get_incremate_index() for u in users}
self.user_inverse_dict = {v:k for k,v in self.user_dict.items()}
self.item_dict = {i:self.get_incremate_index() for i in items}
self.item_inverse_dict = {v:k for k,v in self.item_dict.items()}
self.genre_dict = {g:self.get_incremate_index() for g in genres}
self.genre_inverse_dict = {v:k for k,v in self.genre_dict.items()}
def transform(self, xs, field='user', inverse=False):
"""
xs = [0,2]
self.transform(xs, type='user')
"""
if inverse:
if field == 'user':
_dict = self.user_inverse_dict
elif field == 'item':
_dict = self.item_inverse_dict
elif field == 'genre':
_dict = self.genre_inverse_dict
else:
if field == 'user':
_dict = self.user_dict
elif field == 'item':
_dict = self.item_dict
elif field == 'genre':
_dict = self.genre_dict
return [_dict[x] for x in xs]
def get_incremate_index(self):
now_index = copy.deepcopy(self.increment_cnt)
self.increment_cnt += 1
return now_index
def get_feature_size(self):
return self.increment_cnt
if __name__ == 'how to use it.':
###########################
# --- かなりシンプルなテスト ---
sample_size = 1000
users = np.random.choice(range(100), size=sample_size)
items = np.random.choice(range(100), size=sample_size)
genre_dict = None
ratings = users - items
self = DeepFM(set(users), set(items))
self.dfm_params['batch_size'] = 64
self.dfm_params['epoch'] = 100
self.fit(users, items, ratings)
self.predict([10, 5, 10], [10, 10, 2]) # 正解は [0, -5, 8] である
# 十分に小さなbatch_sizeかどうかは非常に重要
# これは学習テストのロス減少によって確認できる。
###########################
# --- シンプルなテスト1 ---
sample_size = 1000
n_user = 500
n_item = 20
users = np.random.choice(range(n_user), size=sample_size)
items = np.random.choice(range(n_item), size=sample_size)
user_embedding = {u:np.random.rand(5)-0.5 for u in range(n_user)}
item_embedding = {i:np.random.rand(5)-0.5 for i in range(n_item)}
def rating(u, i):
return 10*sum(user_embedding[u] * item_embedding[i]) + 3
ratings = [rating(u, i) for u,i in zip(users, items)]
self = DeepFM(list(range(n_user)), list(range(n_item)))
self.dfm_params['epoch'] = 100
self.dfm_params['embedding_size'] = 200
self.dfm_params['l2_reg'] = 0.0045
self.fit(users, items, ratings)
test_users = np.random.choice(range(n_user), size=sample_size)
test_items = np.random.choice(range(n_item), size=sample_size)
test_ratings = [rating(u, i) for u,i in zip(users, items)]
predicted = self.predict(test_users, test_items)
print( np.mean(np.abs(test_ratings - predicted)) )
print( np.mean(np.abs(test_ratings - np.mean(ratings))) )
# scaler を導入すると改善されるか? → 特に改善はされていない。
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit([[r] for r in ratings])
s_ratings = scaler.transform([[r] for r in ratings])[:,0]
self.fit(users, items, s_ratings)
predicted = self.predict(test_users, test_items)
predicted = scaler.inverse_transform(predicted[:,None])
print( np.mean(np.abs(test_ratings - predicted)) )
print( np.mean(np.abs(test_ratings - np.mean(ratings))) )
###########################
# --- シンプルなテスト2 bias とembedding あり ---
sample_size = 1000
n_user = 500
n_item = 20
users = np.random.choice(range(n_user), size=sample_size)
items = np.random.choice(range(n_item), size=sample_size)
user_embedding = {u:np.random.rand(5)-0.5 for u in range(n_user)}
item_embedding = {i:np.random.rand(5)-0.5 for i in range(n_item)}
user_bias = {u:u/10 for u in range(n_user)} # 単純にidが大きいほどバイアスが大きい
item_bias = {i:i for i in range(n_item)} # 単純にidが大きいほどバイアスが大きい
def rating(u, i):
return 10*sum(user_embedding[u] * item_embedding[i]) + user_bias[u] + item_bias[i]
ratings = [rating(u, i) for u,i in zip(users, items)]
test_users = np.random.choice(range(n_user), size=sample_size)
test_items = np.random.choice(range(n_item), size=sample_size)
test_ratings = [rating(u, i) for u,i in zip(users, items)]
self = DeepFM(list(range(n_user)), list(range(n_item)))
self.dfm_params['epoch'] = 100
self.dfm_params['embedding_size'] = 200
self.fit(users, items, ratings, test_users, test_items, test_ratings)
# 平均性能との比較
predicted = self.predict(test_users, test_items)
print( np.mean(np.abs(test_ratings - predicted)) )
print( np.mean(np.abs(test_ratings - np.mean(ratings))) )
# オラクルとの比較
predicted = self.predict([200]*n_item, list(range(n_item)))
answer = [rating(200,i) for i in range(n_item)]
print(predicted)
print(answer)
print(predicted - answer)
## 内部の embedding を確認する。
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
###########################
# --- シンプルなテスト3 head-tail-new ID ---
sample_size = 1000
n_user = 200
n_item = 50
## id が後半になるほど学習セット中の出現率が低くなる。
p_user = 1/np.array(range(1, n_user+1)); p_user /= p_user.sum()
p_item = 1/np.array(range(1, n_item+1)); p_item /= p_item.sum()
users = np.random.choice(range(n_user), size=sample_size, p=p_user)
items = np.random.choice(range(n_item), size=sample_size, p=p_item)
user_embedding = {u:np.random.rand(5)-0.5 for u in range(n_user)}
item_embedding = {i:np.random.rand(5)-0.5 for i in range(n_item)}
user_bias = {u:u/10 for u in range(n_user)} # 単純にidが大きいほどバイアスが大きい
item_bias = {i:i for i in range(n_item)} # 単純にidが大きいほどバイアスが大きい
def rating(u, i):
return 10*sum(user_embedding[u] * item_embedding[i]) + user_bias[u] + item_bias[i]
ratings = [rating(u, i) for u,i in zip(users, items)]
## user=500 と item=20 はそれぞれ新規IDとなる
test_users = np.random.choice(range(n_user), size=sample_size)
test_items = np.random.choice(range(n_item), size=sample_size)
test_ratings = [rating(u, i) for u,i in zip(users, items)]
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))
self.dfm_params['epoch'] = 300
self.dfm_params['embedding_size'] = 4
self.fit(users, items, ratings, test_users, test_items, test_ratings)
# 平均値予測との比較
predicted = self.predict(test_users, test_items)
print( np.mean(np.abs(test_ratings - predicted)) )
print( np.mean(np.abs(test_ratings - np.mean(ratings))) )
## 内部の embedding を確認する。
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
## 可視化する(ID=500 まではユーザーで、それ以降はアイテム)
import pandas as pd
# [正常] 一部のembeddingがIDの増加に合わせて線形に変化している。これらはバイアス効果を一部学習している。
pd.DataFrame(feature_embeddings).plot()
# [成功] DeepFM のバイアスの初期値を0付近にすることで、userのバイアスはオラクルに近くなった。
# [?] itemのバイアスはオラクルと逆にidが増加するほど減少している → おそらくembeddingがバイアスを学習してしまったゆえか?
pd.DataFrame(feature_bias).plot()
# 新規IDを確認する → ほぼ、初期値の0付近か?
## 新規ユーザー
feature_embeddings[200]
feature_bias[200]
## 新規アイテム
feature_embeddings[-1]
feature_bias[-1]
##############################################
# --- IDとは無関係なランダムなバイアスで学習してみる ---
sample_size = 1000
n_user = 200
n_item = 50
## id が後半になるほど学習セット中の出現率が低くなる。
p_user = 1/np.array(range(1, n_user+1)); p_user /= p_user.sum()
p_item = 1/np.array(range(1, n_item+1)); p_item /= p_item.sum()
users = np.random.choice(range(n_user), size=sample_size, p=p_user)
items = np.random.choice(range(n_item), size=sample_size, p=p_item)
user_bias = {u:np.random.rand() for u in range(n_user)}
item_bias = {i:np.random.rand() for i in range(n_item)}
user_embedding = {u:np.random.rand(5)-0.5 for u in range(n_user)}
item_embedding = {i:np.random.rand(5)-0.5 for i in range(n_item)}
def rating(u, i):
return 3*(sum(user_embedding[u] * item_embedding[i]) + user_bias[u] + item_bias[i])
ratings = [rating(u, i) for u,i in zip(users, items)]
## user=500 と item=20 はそれぞれ新規IDとなる
test_users = np.random.choice(range(n_user), size=sample_size)
test_items = np.random.choice(range(n_item), size=sample_size)
test_ratings = [rating(u, i) for u,i in zip(users, items)]
# ------------------------------
##############################################
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))
self.dfm_params['epoch'] = 100
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.001
self.fit(users, items, ratings, test_users, test_items, test_ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
""" デバック
self.predict([1]*n_item, range(n_item))
self.predict([0]*n_item, range(n_item))
[rating(1, i) for i in range(n_item)]
"""
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
"""
本テストは想定どおりの結果となり、成功したといえる。
その成功要因は、以下の変更を加えたことによる。
[1] 各id の embedding, bias の初期値を0付近のものに変更した。
[2] l2_reg の対象として embedding, bias を追加した。(おそらく、マイナーIDのweightが抑制されると思われるが、詳細は不明)
"""
# --- パラメータごとの影響を確認する。
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))
self.dfm_params['epoch'] = 10
self.dfm_params['embedding_size'] = 4
self.dfm_params['use_deep'] = False
self.fit(users, items, ratings, test_users, test_items, test_ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))
self.dfm_params['epoch'] = 10
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.001
self.dfm_params['learning_rate'] = 0.001
self.fit(users, items, ratings, test_users, test_items, test_ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))
self.dfm_params['epoch'] = 10
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.100
self.dfm_params['learning_rate'] = 0.001
self.fit(users, items, ratings, test_users, test_items, test_ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))
self.dfm_params['epoch'] = 10
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.001
self.dfm_params['learning_rate'] = 0.010
self.fit(users, items, ratings, test_users, test_items, test_ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=True)
self.dfm_params['epoch'] = 20
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.010
self.dfm_params['learning_rate'] = 0.010
self.fit(users, items, ratings, test_users, test_items, test_ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
# --- only fm
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=True)
self.dfm_params['epoch'] = 20
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.010
self.dfm_params['learning_rate'] = 0.010
self.dfm_params['use_deep'] = False
self.fit(users, items, ratings, test_users, test_items, test_ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
# ---- high l2-reg
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=True)
self.dfm_params['epoch'] = 20
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.100
self.dfm_params['learning_rate'] = 0.010
self.dfm_params['use_deep'] = False
self.fit(users, items, ratings, test_users, test_items, test_ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
# ---- high learning_rate
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=False)
self.dfm_params['epoch'] = 20
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.0100
self.dfm_params['l2_reg_embedding'] = 0.0100
self.dfm_params['l2_reg_bias'] = 0.0100
self.dfm_params['learning_rate'] = 0.0100
self.dfm_params['use_deep'] = False
self.fit(users, items, ratings, test_users, test_items, test_ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
## 結論、頻度の違いがバイアスに影響を与えることはない。
# ---- high learning_rate
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=False)
self.dfm_params['epoch'] = 20
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.0100
#self.dfm_params['l2_reg_embedding'] = 0.0100
#self.dfm_params['l2_reg_bias'] = 0.0100
self.dfm_params['learning_rate'] = 0.0020
self.dfm_params['use_deep'] = False
self.dfm_params['batch_size'] = 32
self.dfm_params['loss_type'] = 'mse'
self.dfm_params['optimizer_type'] = 'sgd'
#self.dfm_params['optimizer_type'] = 'adam'
self.fit(users, items, ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
self.predict([0,0,150,150],[0,10,0,10])
##########################
# MovieLensのCTR問題として定義し直して、性能を比較する
import numpy as np
ctr_users = list(users) + list(np.random.choice(list(set(users)), size=len(users)))
ctr_items = list(items) + list(np.random.choice(list(set(items)), size=len(items)))
ctrs = [1]*len(users) + [0]*len(users)
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=True)
self.dfm_params['epoch'] = 20
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.0010
#self.dfm_params['l2_reg_embedding'] = 0.0020
#self.dfm_params['l2_reg_bias'] = 0.0020
self.dfm_params['learning_rate'] = 0.00010
#self.dfm_params['use_deep'] = False
self.dfm_params['batch_size'] = 16
self.dfm_params['loss_type'] = 'logloss'
self.dfm_params['greater_is_better'] = True
self.fit(ctr_users, ctr_items, ctrs)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
self.predict([0,0,150,150],[0,10,0,10])
########################
# CTR 対応型のテスト
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=False, ctr_prediction=True)
self.dfm_params['epoch'] = 30
self.dfm_params['embedding_size'] = 4
self.dfm_params['batch_size'] = 32
self.dfm_params['dropout_fm'] = [0.5, 0.5]
self.dfm_params['l2_reg'] = 0.0
self.dfm_params['l2_reg_embedding'] = 0.0
self.dfm_params['l2_reg_bias'] = 0.0
self.fit(users, items, ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
pd.DataFrame(self.predict([200]*50,list(range(50)))).plot() # 新規ユーザーだけ常に一定になる。
self.predict([0,0,150,150],[0,10,0,10])
self.predict([50]*50,list(range(50)))
self.predict([100]*50,list(range(50)))
self.predict([150]*50,list(range(50)))
self.predict([200]*50,list(range(50))) # 新規ユーザーだけ常に一定になる。
self.predict([199]*50,list(range(50))) # 新規ユーザーだけ常に一定になる。
self.predict([198]*50,list(range(50))) # 新規ユーザーだけ常に一定になる。
self.predict([197]*50,list(range(50))) # 新規ユーザーだけ常に一定になる。
self.predict(list(range(200)),[50]*200) # 新規ユーザーだけ常に一定になる。
feature_embeddings[200]
feature_bias[200]
feature_embeddings[150]
feature_bias[150]
feature_embeddings[220]
feature_embeddings[222]
feature_embeddings[223]
########################
# tensorflow の動作テスト
weight = tf.Variable(initial_value=[[0,1,2,3], [0,10,20,30], [0,100,200,300]], trainable=True, name='test', dtype=tf.float32)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(weight)
op = weight[1,3].assign(9999.)
sess.run(op)
sess.run(weight)
########################
# 上手く行かなかったので、テスト
# 実際のデータで確認する
##############################################
# --- 疑似データの生成 ---
sample_size = 10000
n_user = 2000
n_item = 500
## id が後半になるほど学習セット中の出現率が低くなる。
p_user = 1/np.array(range(1, n_user+1)); p_user /= p_user.sum()
p_item = 1/np.array(range(1, n_item+1)); p_item /= p_item.sum()
users = np.random.choice(range(n_user), size=sample_size, p=p_user)
items = np.random.choice(range(n_item), size=sample_size, p=p_item)
user_bias = {u:np.random.rand() for u in range(n_user)}
item_bias = {i:np.random.rand() for i in range(n_item)}
user_embedding = {u:np.random.rand(5)-0.5 for u in range(n_user)}
item_embedding = {i:np.random.rand(5)-0.5 for i in range(n_item)}
def rating(u, i):
return 3*(sum(user_embedding[u] * item_embedding[i]) + user_bias[u] + item_bias[i])
ratings = [rating(u, i) for u,i in zip(users, items)]
## user=500 と item=20 はそれぞれ新規IDとなる
test_users = np.random.choice(range(n_user), size=sample_size)
test_items = np.random.choice(range(n_item), size=sample_size)
test_ratings = [rating(u, i) for u,i in zip(users, items)]
# ------------------------------
##############################################
for i in range(5):
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=False, ctr_prediction=False)
self.dfm_params['epoch'] = 10
self.dfm_params['embedding_size'] = 4
self.dfm_params['deep_layers'] = [16, 16]
self.dfm_params['l2_reg'] = 0.0100 #0.0040
self.dfm_params['l2_reg_embedding'] = 0.0000 #0.001
self.dfm_params['l2_reg_bias'] = 0.000 #0.001
self.dfm_params['learning_rate'] = 0.00100 #0.001
self.dfm_params['use_deep'] = False
self.dfm_params['batch_size'] = 128
self.dfm_params['loss_type'] = 'mse'
#self.dfm_params['optimizer_type'] = 'sgd'
self.fit(users, items, ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
concat_projection = self.model.sess.run(self.model.weights["concat_projection"]) # [0,1]がuser,itemのbiasに対する重み
#pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
pd.DataFrame(concat_projection).plot()
#pd.DataFrame(self.predict([200]*50,list(range(50)))).plot() # 新規ユーザーだけ常に一定になる。
df_result = pd.DataFrame()
df_result['u=10'] = self.predict([10]*n_item,list(range(n_item)))
df_result['u=100'] = self.predict([100]*n_item,list(range(n_item)))
df_result['u=1000'] = self.predict([1000]*n_item,list(range(n_item)))
df_result['u=2000'] = self.predict([2000]*n_item,list(range(n_item)))
df_result.plot()
""" Best setting?
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=False, ctr_prediction=False)
self.dfm_params['epoch'] = 10
self.dfm_params['embedding_size'] = 4
self.dfm_params['deep_layers'] = [16, 16]
self.dfm_params['l2_reg'] = 0.04 #0.0040
self.dfm_params['l2_reg_embedding'] = 0.001 #0.001
self.dfm_params['l2_reg_bias'] = 0.001 #0.001
self.dfm_params['learning_rate'] = 0.0010 #0.001
self.dfm_params['use_deep'] = True
self.dfm_params['batch_size'] = 64
self.dfm_params['loss_type'] = 'mse'
#self.dfm_params['optimizer_type'] = 'sgd'
self.fit(users, items, ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
#pd.DataFrame(feature_embeddings).plot()
#pd.DataFrame(feature_bias).plot()
pd.DataFrame(self.predict([200]*50,list(range(50)))).plot() # 新規ユーザーだけ常に一定になる。
""" | 40.47543 | 144 | 0.63241 |
import tensorflow as tf
import os
import pickle
import pandas as pd
import numpy as np
import copy
from sklearn.metrics import roc_auc_score
from sklearn.metrics import mean_absolute_error
from src.module.tensorflow_DeepFM.DeepFM import DeepFM as DeepFM_
class DeepFM:
def __init__(self, set_train_test_users, set_train_test_items, dict_genre=None, first_half_fit_only_fm=False, ctr_prediction=True):
self.set_train_test_users = set(set_train_test_users)
self.set_train_test_items = set(set_train_test_items)
self.dict_genre = dict_genre
self.first_half_fit_only_fm = first_half_fit_only_fm
self.data_manager = Data_manager(set_train_test_users, set_train_test_items, dict_genre)
feature_size, field_size = self.data_manager.get_feature_size_field_size()
self.dfm_params = {
"feature_size" : feature_size,
"field_size" : field_size,
"loss_type" : "mse",
"use_fm": True,
"use_deep": True,
"embedding_size": 8,
"dropout_fm": [1.0, 1.0],
"deep_layers": [32, 32],
"dropout_deep": [0.5, 0.5, 0.5],
"deep_layers_activation": tf.nn.relu,
"epoch": 30,
"batch_size": 64,
"learning_rate": 0.001,
"optimizer_type": "adam",
"batch_norm": 1,
"batch_norm_decay": 0.995,
"l2_reg": 0.0001,
"l2_reg_embedding": 0.0001,
"l2_reg_bias": 0.0001,
"verbose": True,
"eval_metric": mean_absolute_error,
"greater_is_better": False,
"random_seed": 2017,
}
self.ctr_prediction = ctr_prediction
if self.ctr_prediction:
self.dfm_params["loss_type"] = "logloss"
def fit(self, users, items, ratings, test_users=[], test_items=[], test_ratings=[], **kargs):
global_mean_bias_init = np.float32(np.mean(ratings))
global_mean_bias_init = 0.01
self.model = DeepFM_(**self.dfm_params, global_mean_bias_init=global_mean_bias_init, first_half_fit_only_fm=self.first_half_fit_only_fm)
if self.ctr_prediction:
users = list(users) + list(np.random.choice(list(set(users)), size=len(users)))
items = list(items) + list(np.random.choice(list(set(items)), size=len(items)))
ratings = list((np.array(ratings)>0).astype(int)) + [0]*len(ratings)
test_ratings = list((np.array(test_ratings)>0).astype(int))
Xi, Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(users, items)
if len(test_users)>0:
test_Xi, test_Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(test_users, test_items)
self.model.fit(Xi, Xv, ratings, test_Xi, test_Xv, test_ratings, early_stopping=True)
else:
self.model.fit(Xi, Xv, ratings, early_stopping=True, **kargs)
self.trained_users = list(set(users))
self.trained_items = list(set(items))
self.global_mean = self.model.predict(Xi, Xv).mean()
def predict(self, users, items, *args, **kargs):
Xi, Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(users, items)
predicted = self.model.predict(Xi, Xv)
return predicted
class Data_manager:
def __init__(self, users, items, dict_genre=None):
self.dict_genre = dict_genre
if dict_genre:
dict_genre = {i:gs for i,gs in dict_genre.items() if i in items}
n_genre = max([max(gs) for i,gs in dict_genre.items() if gs]) + 1
genres = list(range(n_genre))
else:
dict_genre = {}
n_genre = 0
genres = []
self.inclement_index = inclement_index(users, items, genres)
dict_genre = {self.inclement_index.transform([i], field='item')[0]:gs for i,gs in dict_genre.items()}
self.re_dict_genre = {}
for i,gs in dict_genre.items():
genre_one_hot_vec = [0] * n_genre
for g in gs:
genre_one_hot_vec[g] = 1
self.re_dict_genre[i] = genre_one_hot_vec
self.genre_indexes = self.inclement_index.transform(genres, field='genre')
self.feature_size = self.inclement_index.get_feature_size()
self.field_size = 2 + n_genre
def get_feature_size_field_size(self):
return self.feature_size, self.field_size
def transform_users_and_items_to_Xi_Xv(self, users, items):
Xi, Xv = [], []
users = self.inclement_index.transform(users, field='user')
items = self.inclement_index.transform(items, field='item')
for u,i in zip(users, items):
if self.dict_genre:
Xi.append([u, i] + self.genre_indexes)
Xv.append([1, 1] + self.re_dict_genre[i])
else:
Xi.append([u, i])
Xv.append([1, 1])
return Xi, Xv
class inclement_index:
def __init__(self, users, items, genres=[]):
users = set(users)
items = set(items)
genres = set(genres)
self.increment_cnt = 0
self.user_dict = {u:self.get_incremate_index() for u in users}
self.user_inverse_dict = {v:k for k,v in self.user_dict.items()}
self.item_dict = {i:self.get_incremate_index() for i in items}
self.item_inverse_dict = {v:k for k,v in self.item_dict.items()}
self.genre_dict = {g:self.get_incremate_index() for g in genres}
self.genre_inverse_dict = {v:k for k,v in self.genre_dict.items()}
def transform(self, xs, field='user', inverse=False):
if inverse:
if field == 'user':
_dict = self.user_inverse_dict
elif field == 'item':
_dict = self.item_inverse_dict
elif field == 'genre':
_dict = self.genre_inverse_dict
else:
if field == 'user':
_dict = self.user_dict
elif field == 'item':
_dict = self.item_dict
elif field == 'genre':
_dict = self.genre_dict
return [_dict[x] for x in xs]
def get_incremate_index(self):
now_index = copy.deepcopy(self.increment_cnt)
self.increment_cnt += 1
return now_index
def get_feature_size(self):
return self.increment_cnt
if __name__ == 'how to use it.':
ings)
self.predict([10, 5, 10], [10, 10, 2])
ng(u, i):
return 10*sum(user_embedding[u] * item_embedding[i]) + 3
ratings = [rating(u, i) for u,i in zip(users, items)]
self = DeepFM(list(range(n_user)), list(range(n_item)))
self.dfm_params['epoch'] = 100
self.dfm_params['embedding_size'] = 200
self.dfm_params['l2_reg'] = 0.0045
self.fit(users, items, ratings)
test_users = np.random.choice(range(n_user), size=sample_size)
test_items = np.random.choice(range(n_item), size=sample_size)
test_ratings = [rating(u, i) for u,i in zip(users, items)]
predicted = self.predict(test_users, test_items)
print( np.mean(np.abs(test_ratings - predicted)) )
print( np.mean(np.abs(test_ratings - np.mean(ratings))) )
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit([[r] for r in ratings])
s_ratings = scaler.transform([[r] for r in ratings])[:,0]
self.fit(users, items, s_ratings)
predicted = self.predict(test_users, test_items)
predicted = scaler.inverse_transform(predicted[:,None])
print( np.mean(np.abs(test_ratings - predicted)) )
print( np.mean(np.abs(test_ratings - np.mean(ratings))) )
u:u/10 for u in range(n_user)}
item_bias = {i:i for i in range(n_item)}
def rating(u, i):
return 10*sum(user_embedding[u] * item_embedding[i]) + user_bias[u] + item_bias[i]
ratings = [rating(u, i) for u,i in zip(users, items)]
test_users = np.random.choice(range(n_user), size=sample_size)
test_items = np.random.choice(range(n_item), size=sample_size)
test_ratings = [rating(u, i) for u,i in zip(users, items)]
self = DeepFM(list(range(n_user)), list(range(n_item)))
self.dfm_params['epoch'] = 100
self.dfm_params['embedding_size'] = 200
self.fit(users, items, ratings, test_users, test_items, test_ratings)
predicted = self.predict(test_users, test_items)
print( np.mean(np.abs(test_ratings - predicted)) )
print( np.mean(np.abs(test_ratings - np.mean(ratings))) )
predicted = self.predict([200]*n_item, list(range(n_item)))
answer = [rating(200,i) for i in range(n_item)]
print(predicted)
print(answer)
print(predicted - answer)
s = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
ndom.rand(5)-0.5 for u in range(n_user)}
item_embedding = {i:np.random.rand(5)-0.5 for i in range(n_item)}
user_bias = {u:u/10 for u in range(n_user)}
item_bias = {i:i for i in range(n_item)}
def rating(u, i):
return 10*sum(user_embedding[u] * item_embedding[i]) + user_bias[u] + item_bias[i]
ratings = [rating(u, i) for u,i in zip(users, items)]
e(range(n_user), size=sample_size)
test_items = np.random.choice(range(n_item), size=sample_size)
test_ratings = [rating(u, i) for u,i in zip(users, items)]
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))
self.dfm_params['epoch'] = 300
self.dfm_params['embedding_size'] = 4
self.fit(users, items, ratings, test_users, test_items, test_ratings)
predicted = self.predict(test_users, test_items)
print( np.mean(np.abs(test_ratings - predicted)) )
print( np.mean(np.abs(test_ratings - np.mean(ratings))) )
s = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
ture_embeddings[200]
feature_bias[200]
ture_embeddings[-1]
feature_bias[-1]
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))
self.dfm_params['epoch'] = 10
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.100
self.dfm_params['learning_rate'] = 0.001
self.fit(users, items, ratings, test_users, test_items, test_ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)))
self.dfm_params['epoch'] = 10
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.001
self.dfm_params['learning_rate'] = 0.010
self.fit(users, items, ratings, test_users, test_items, test_ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=True)
self.dfm_params['epoch'] = 20
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.010
self.dfm_params['learning_rate'] = 0.010
self.fit(users, items, ratings, test_users, test_items, test_ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=True)
self.dfm_params['epoch'] = 20
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.010
self.dfm_params['learning_rate'] = 0.010
self.dfm_params['use_deep'] = False
self.fit(users, items, ratings, test_users, test_items, test_ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=True)
self.dfm_params['epoch'] = 20
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.100
self.dfm_params['learning_rate'] = 0.010
self.dfm_params['use_deep'] = False
self.fit(users, items, ratings, test_users, test_items, test_ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
self = DeepFM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=False)
self.dfm_params['epoch'] = 20
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.0100
self.dfm_params['l2_reg_embedding'] = 0.0100
self.dfm_params['l2_reg_bias'] = 0.0100
self.dfm_params['learning_rate'] = 0.0100
self.dfm_params['use_deep'] = False
self.fit(users, items, ratings, test_users, test_items, test_ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
FM(list(range(n_user+1)), list(range(n_item+1)), first_half_fit_only_fm=False)
self.dfm_params['epoch'] = 20
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.0100
self.dfm_params['learning_rate'] = 0.0020
self.dfm_params['use_deep'] = False
self.dfm_params['batch_size'] = 32
self.dfm_params['loss_type'] = 'mse'
self.dfm_params['optimizer_type'] = 'sgd'
self.fit(users, items, ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
self.predict([0,0,150,150],[0,10,0,10])
rst_half_fit_only_fm=True)
self.dfm_params['epoch'] = 20
self.dfm_params['embedding_size'] = 4
self.dfm_params['l2_reg'] = 0.0010
self.dfm_params['learning_rate'] = 0.00010
self.dfm_params['batch_size'] = 16
self.dfm_params['loss_type'] = 'logloss'
self.dfm_params['greater_is_better'] = True
self.fit(ctr_users, ctr_items, ctrs)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
self.predict([0,0,150,150],[0,10,0,10])
, 0.5]
self.dfm_params['l2_reg'] = 0.0
self.dfm_params['l2_reg_embedding'] = 0.0
self.dfm_params['l2_reg_bias'] = 0.0
self.fit(users, items, ratings)
feature_embeddings = self.model.sess.run(self.model.weights["feature_embeddings"])
feature_bias = self.model.sess.run(self.model.weights["feature_bias"])
concat_bias = self.model.sess.run(self.model.weights["concat_bias"])
pd.DataFrame(feature_embeddings).plot()
pd.DataFrame(feature_bias).plot()
pd.DataFrame(self.predict([200]*50,list(range(50)))).plot()
self.predict([0,0,150,150],[0,10,0,10])
self.predict([50]*50,list(range(50)))
self.predict([100]*50,list(range(50)))
self.predict([150]*50,list(range(50)))
self.predict([200]*50,list(range(50)))
self.predict([199]*50,list(range(50)))
self.predict([198]*50,list(range(50)))
self.predict([197]*50,list(range(50)))
self.predict(list(range(200)),[50]*200)
feature_embeddings[200]
feature_bias[200]
feature_embeddings[150]
feature_bias[150]
feature_embeddings[220]
feature_embeddings[222]
feature_embeddings[223]
(op)
sess.run(weight)
| true | true |
f729a5f7dadfdd5df237186e52fbdba21151b043 | 1,185 | py | Python | scripts/quest/q20894s.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 54 | 2019-04-16T23:24:48.000Z | 2021-12-18T11:41:50.000Z | scripts/quest/q20894s.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 3 | 2019-05-19T15:19:41.000Z | 2020-04-27T16:29:16.000Z | scripts/quest/q20894s.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 49 | 2020-11-25T23:29:16.000Z | 2022-03-26T16:20:24.000Z | # 20893 - [Job Adv] (Lv.100) The Empress' Chief Knight
sm.setSpeakerID(1101000)
sm.sendNext("#h #... what is this?")
sm.setPlayerAsSpeaker()
sm.sendNext("This, milady, is the report from Neinheart about the activities of the Cygnus Knights.")
sm.setSpeakerID(1101000)
sm.sendNext("Haha, is that wat Neinheart said? It is a recommendation about you. It's all about the process of you getting stronger and the activities done by you...")
sm.setPlayerAsSpeaker()
sm.sendNext("What did Neinheart write about me?")
sm.setSpeakerID(1101000)
if sm.sendAskYesNo("I would like to appoint a title to you for your activities and effort. will you accept this?"):
sm.sendSay("#h #, with your braveness and courage, from now on you are a new captain of the knights. Please use your power to protect the Maple World.")
if sm.canHold(1142069):
chrJobID = sm.getChr().getJob()
sm.jobAdvance(chrJobID+1)
sm.giveItem(1142069)
sm.addAP(3)
sm.completeQuest(parentID)
else:
sm.sendSay("Please make space in your Equip inventory.")
sm.dispose()
else:
sm.sendSay("Please speak to me again when you change your mind.")
sm.dispose() | 49.375 | 167 | 0.709705 |
sm.setSpeakerID(1101000)
sm.sendNext("#h #... what is this?")
sm.setPlayerAsSpeaker()
sm.sendNext("This, milady, is the report from Neinheart about the activities of the Cygnus Knights.")
sm.setSpeakerID(1101000)
sm.sendNext("Haha, is that wat Neinheart said? It is a recommendation about you. It's all about the process of you getting stronger and the activities done by you...")
sm.setPlayerAsSpeaker()
sm.sendNext("What did Neinheart write about me?")
sm.setSpeakerID(1101000)
if sm.sendAskYesNo("I would like to appoint a title to you for your activities and effort. will you accept this?"):
sm.sendSay("#h #, with your braveness and courage, from now on you are a new captain of the knights. Please use your power to protect the Maple World.")
if sm.canHold(1142069):
chrJobID = sm.getChr().getJob()
sm.jobAdvance(chrJobID+1)
sm.giveItem(1142069)
sm.addAP(3)
sm.completeQuest(parentID)
else:
sm.sendSay("Please make space in your Equip inventory.")
sm.dispose()
else:
sm.sendSay("Please speak to me again when you change your mind.")
sm.dispose() | true | true |
f729a757c28a8effadf260089ab2ae44c11f245e | 2,029 | py | Python | esphome/components/esp32/gpio_esp32_s3.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 249 | 2018-04-07T12:04:11.000Z | 2019-01-25T01:11:34.000Z | esphome/components/esp32/gpio_esp32_s3.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 243 | 2018-04-11T16:37:11.000Z | 2019-01-25T16:50:37.000Z | esphome/components/esp32/gpio_esp32_s3.py | OttoWinter/esphomeyaml | 6a85259e4d6d1b0a0f819688b8e555efcb99ecb0 | [
"MIT"
] | 40 | 2018-04-10T05:50:14.000Z | 2019-01-25T15:20:36.000Z | import logging
from esphome.const import (
CONF_INPUT,
CONF_MODE,
CONF_NUMBER,
)
import esphome.config_validation as cv
_ESP_32S3_SPI_PSRAM_PINS = {
26: "SPICS1",
27: "SPIHD",
28: "SPIWP",
29: "SPICS0",
30: "SPICLK",
31: "SPIQ",
32: "SPID",
}
_ESP_32_ESP32_S3R8_PSRAM_PINS = {
33: "SPIIO4",
34: "SPIIO5",
35: "SPIIO6",
36: "SPIIO7",
37: "SPIDQS",
}
_ESP_32S3_STRAPPING_PINS = {0, 3, 45, 46}
_LOGGER = logging.getLogger(__name__)
def esp32_s3_validate_gpio_pin(value):
if value < 0 or value > 48:
raise cv.Invalid(f"Invalid pin number: {value} (must be 0-46)")
if value in _ESP_32S3_SPI_PSRAM_PINS:
raise cv.Invalid(
f"This pin cannot be used on ESP32-S3s and is already used by the SPI/PSRAM interface(function: {_ESP_32S3_SPI_PSRAM_PINS[value]})"
)
if value in _ESP_32_ESP32_S3R8_PSRAM_PINS:
_LOGGER.warning(
"GPIO%d is used by the PSRAM interface on ESP32-S3R8 / ESP32-S3R8V and should be avoided on these models",
value,
)
if value in _ESP_32S3_STRAPPING_PINS:
_LOGGER.warning(
"GPIO%d is a Strapping PIN and should be avoided.\n"
"Attaching external pullup/down resistors to strapping pins can cause unexpected failures.\n"
"See https://esphome.io/guides/faq.html#why-am-i-getting-a-warning-about-strapping-pins",
value,
)
if value in (22, 23, 24, 25):
# These pins are not exposed in GPIO mux (reason unknown)
# but they're missing from IO_MUX list in datasheet
raise cv.Invalid(f"The pin GPIO{value} is not usable on ESP32-S3s.")
return value
def esp32_s3_validate_supports(value):
num = value[CONF_NUMBER]
mode = value[CONF_MODE]
is_input = mode[CONF_INPUT]
if num < 0 or num > 48:
raise cv.Invalid(f"Invalid pin number: {num} (must be 0-46)")
if is_input:
# All ESP32 pins support input mode
pass
return value
| 27.053333 | 143 | 0.637753 | import logging
from esphome.const import (
CONF_INPUT,
CONF_MODE,
CONF_NUMBER,
)
import esphome.config_validation as cv
_ESP_32S3_SPI_PSRAM_PINS = {
26: "SPICS1",
27: "SPIHD",
28: "SPIWP",
29: "SPICS0",
30: "SPICLK",
31: "SPIQ",
32: "SPID",
}
_ESP_32_ESP32_S3R8_PSRAM_PINS = {
33: "SPIIO4",
34: "SPIIO5",
35: "SPIIO6",
36: "SPIIO7",
37: "SPIDQS",
}
_ESP_32S3_STRAPPING_PINS = {0, 3, 45, 46}
_LOGGER = logging.getLogger(__name__)
def esp32_s3_validate_gpio_pin(value):
if value < 0 or value > 48:
raise cv.Invalid(f"Invalid pin number: {value} (must be 0-46)")
if value in _ESP_32S3_SPI_PSRAM_PINS:
raise cv.Invalid(
f"This pin cannot be used on ESP32-S3s and is already used by the SPI/PSRAM interface(function: {_ESP_32S3_SPI_PSRAM_PINS[value]})"
)
if value in _ESP_32_ESP32_S3R8_PSRAM_PINS:
_LOGGER.warning(
"GPIO%d is used by the PSRAM interface on ESP32-S3R8 / ESP32-S3R8V and should be avoided on these models",
value,
)
if value in _ESP_32S3_STRAPPING_PINS:
_LOGGER.warning(
"GPIO%d is a Strapping PIN and should be avoided.\n"
"Attaching external pullup/down resistors to strapping pins can cause unexpected failures.\n"
"See https://esphome.io/guides/faq.html#why-am-i-getting-a-warning-about-strapping-pins",
value,
)
if value in (22, 23, 24, 25):
raise cv.Invalid(f"The pin GPIO{value} is not usable on ESP32-S3s.")
return value
def esp32_s3_validate_supports(value):
num = value[CONF_NUMBER]
mode = value[CONF_MODE]
is_input = mode[CONF_INPUT]
if num < 0 or num > 48:
raise cv.Invalid(f"Invalid pin number: {num} (must be 0-46)")
if is_input:
# All ESP32 pins support input mode
pass
return value
| true | true |
f729a7d37d4edca3f86ab33a43e1680548398202 | 810 | py | Python | setup.py | ghuvrons/Jati | b4d056cf38d4770f3bef0f3db93c4b982f4e3da0 | [
"MIT"
] | null | null | null | setup.py | ghuvrons/Jati | b4d056cf38d4770f3bef0f3db93c4b982f4e3da0 | [
"MIT"
] | null | null | null | setup.py | ghuvrons/Jati | b4d056cf38d4770f3bef0f3db93c4b982f4e3da0 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="Jati",
version="0.0.2",
author="Janoko",
author_email="janoko@sandhika.com",
description="Jati merupakan modul python untuk restAPI dan websocket",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ghuvrons/Jati",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points = {
'console_scripts': ['jati=Jati.CLI:main'],
},
python_requires='>=3.7',
install_requires=[
"click>=5.1",
"PyMySQL==1.0.2"
]
)
| 26.129032 | 74 | 0.62963 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="Jati",
version="0.0.2",
author="Janoko",
author_email="janoko@sandhika.com",
description="Jati merupakan modul python untuk restAPI dan websocket",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ghuvrons/Jati",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points = {
'console_scripts': ['jati=Jati.CLI:main'],
},
python_requires='>=3.7',
install_requires=[
"click>=5.1",
"PyMySQL==1.0.2"
]
)
| true | true |
f729a80b6998f64cb1816f1f898fafce7f1fc291 | 6,848 | py | Python | kubernetes/client/models/v1_endpoints_list.py | pllsxyc/python | 442ebc019056c2dc246be94f85cf61f1e1d26a88 | [
"Apache-2.0"
] | 1 | 2019-10-07T13:54:36.000Z | 2019-10-07T13:54:36.000Z | kubernetes/client/models/v1_endpoints_list.py | pllsxyc/python | 442ebc019056c2dc246be94f85cf61f1e1d26a88 | [
"Apache-2.0"
] | 8 | 2020-12-21T03:18:50.000Z | 2022-03-02T03:06:30.000Z | kubernetes/client/models/v1_endpoints_list.py | pllsxyc/python | 442ebc019056c2dc246be94f85cf61f1e1d26a88 | [
"Apache-2.0"
] | 1 | 2021-03-16T16:05:33.000Z | 2021-03-16T16:05:33.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.16
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1EndpointsList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1Endpoints]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1EndpointsList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1EndpointsList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1EndpointsList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1EndpointsList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1EndpointsList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1EndpointsList. # noqa: E501
List of endpoints. # noqa: E501
:return: The items of this V1EndpointsList. # noqa: E501
:rtype: list[V1Endpoints]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1EndpointsList.
List of endpoints. # noqa: E501
:param items: The items of this V1EndpointsList. # noqa: E501
:type: list[V1Endpoints]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1EndpointsList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1EndpointsList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1EndpointsList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1EndpointsList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1EndpointsList. # noqa: E501
:return: The metadata of this V1EndpointsList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1EndpointsList.
:param metadata: The metadata of this V1EndpointsList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1EndpointsList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1EndpointsList):
return True
return self.to_dict() != other.to_dict()
| 33.242718 | 312 | 0.622079 |
import pprint
import re
import six
from kubernetes.client.configuration import Configuration
class V1EndpointsList(object):
openapi_types = {
'api_version': 'str',
'items': 'list[V1Endpoints]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, api_version):
self._api_version = api_version
@property
def items(self):
return self._items
@items.setter
def items(self, items):
if self.local_vars_configuration.client_side_validation and items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, kind):
self._kind = kind
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, metadata):
self._metadata = metadata
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1EndpointsList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, V1EndpointsList):
return True
return self.to_dict() != other.to_dict()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.