hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bc1bb8524481d0a5248f75317737be37aaced6a0 | 758 | py | Python | Projects/FindPiToTheNthDigit.py | pycoder2000/Fun-Python-Projects | bfd6e5f696b7887e9f99af45cbb35f3d10ee807e | [
"MIT"
] | null | null | null | Projects/FindPiToTheNthDigit.py | pycoder2000/Fun-Python-Projects | bfd6e5f696b7887e9f99af45cbb35f3d10ee807e | [
"MIT"
] | null | null | null | Projects/FindPiToTheNthDigit.py | pycoder2000/Fun-Python-Projects | bfd6e5f696b7887e9f99af45cbb35f3d10ee807e | [
"MIT"
] | null | null | null | __author__ = ['[Parth Desai](https://github.com/pycoder2000)']
__copyright__ = "Copyright (C) 2020 Parth Desai"
__credits__ = ["Parth Desai"]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Parth Desai"
__email__ = "desaiparth2000@gmail.com"
__status__ = "Production"
"""
Enter a number and have the program generate PI Enter a number and have the program generate PI up to that many decimal places.
"""
import math
def Calculate_Pi(digits): # Generator Function
pi = str(round(math.pi,ndigits=digits))
def main(): # Wrapper Function
try:
no_of_digits = int(input("Enter the number of deciamals to calculate to : "))
except:
print("You did not enter an integer : ")
if __name__ == '__main__':
main() | 25.266667 | 127 | 0.691293 |
a896be417c50edd33e02a79fd337b7e803a3a536 | 90 | py | Python | hdp-doc-gen/__init__.py | nabeelhdp/hdp-doc-gen | f283035b94edf677d0d4424415bae4c2944658b2 | [
"Apache-2.0"
] | null | null | null | hdp-doc-gen/__init__.py | nabeelhdp/hdp-doc-gen | f283035b94edf677d0d4424415bae4c2944658b2 | [
"Apache-2.0"
] | null | null | null | hdp-doc-gen/__init__.py | nabeelhdp/hdp-doc-gen | f283035b94edf677d0d4424415bae4c2944658b2 | [
"Apache-2.0"
] | null | null | null | __all__ = [
'configmapping',
'connectionchecks',
'docgen',
'readconfig'
]
| 12.857143 | 23 | 0.577778 |
7bd7f4ab0431a48b5f5fe54fe047c7a664a5b508 | 2,265 | py | Python | numpoly/array_function/subtract.py | jonathf/npoly | 9df4bd2a3b134e8a196e24389c0ad84c26da9662 | [
"BSD-2-Clause"
] | 8 | 2019-12-13T23:54:33.000Z | 2021-11-08T22:44:25.000Z | numpoly/array_function/subtract.py | jonathf/npoly | 9df4bd2a3b134e8a196e24389c0ad84c26da9662 | [
"BSD-2-Clause"
] | 54 | 2019-08-25T20:03:10.000Z | 2021-08-09T08:59:27.000Z | numpoly/array_function/subtract.py | jonathf/npoly | 9df4bd2a3b134e8a196e24389c0ad84c26da9662 | [
"BSD-2-Clause"
] | 2 | 2020-03-05T12:03:28.000Z | 2021-03-07T16:56:09.000Z | """Subtract arguments, element-wise."""
from __future__ import annotations
from typing import Any, Optional
import numpy
import numpy.typing
from ..baseclass import ndpoly, PolyLike
from ..dispatch import implements, simple_dispatch
@implements(numpy.subtract)
def subtract(
x1: PolyLike,
x2: PolyLike,
out: Optional[ndpoly] = None,
where: numpy.typing.ArrayLike = True,
**kwargs: Any,
) -> ndpoly:
"""
Subtract arguments, element-wise.
Args:
x1, x2:
The arrays to be subtracted from each other. If
``x1.shape != x2.shape``, they must be broadcastable to a common
shape (which becomes the shape of the output).
out:
A location into which the result is stored. If provided, it must
have a shape that the inputs broadcast to. If not provided or
`None`, a freshly-allocated array is returned. A tuple (possible
only as a keyword argument) must have length equal to the number of
outputs.
where:
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value. Note
that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
kwargs:
Keyword args passed to numpy.ufunc.
Returns:
The difference of `x1` and `x2`, element-wise.
This is a scalar if both `x1` and `x2` are scalars.
Examples:
>>> q0, q1 = numpoly.variable(2)
>>> numpoly.subtract(q0, 4)
polynomial(q0-4)
>>> poly1 = q0**numpy.arange(9).reshape((3, 3))
>>> poly2 = q1**numpy.arange(3)
>>> numpoly.subtract(poly1, poly2)
polynomial([[0, -q1+q0, -q1**2+q0**2],
[q0**3-1, q0**4-q1, q0**5-q1**2],
[q0**6-1, q0**7-q1, q0**8-q1**2]])
"""
return simple_dispatch(
numpy_func=numpy.subtract,
inputs=(x1, x2),
out=None if out is None else (out,),
where=where,
**kwargs
)
| 33.80597 | 79 | 0.596026 |
9680a52828c55c9a5642ffa6c7e0b6d94ef0f9d3 | 45,047 | py | Python | vnpy/api/xspeed/pyscript/xspeed_struct.py | black0144/vnpy | 0d0ea30dad14a0150f7500ff9a62528030321426 | [
"MIT"
] | 77 | 2017-02-28T08:18:53.000Z | 2021-12-27T09:47:49.000Z | vnpy/api/xspeed/pyscript/xspeed_struct.py | black0144/vnpy | 0d0ea30dad14a0150f7500ff9a62528030321426 | [
"MIT"
] | 1 | 2016-08-18T14:00:25.000Z | 2016-08-18T14:00:25.000Z | vnpy/api/xspeed/pyscript/xspeed_struct.py | black0144/vnpy | 0d0ea30dad14a0150f7500ff9a62528030321426 | [
"MIT"
] | 41 | 2017-03-27T12:42:04.000Z | 2022-03-20T01:37:20.000Z | # encoding: UTF-8
structDict = {}
#心跳包
DFITCTimeOutField = {}
DFITCTimeOutField["lRequestID"] = "long"
structDict['DFITCTimeOutField'] = DFITCTimeOutField
#请求报单数据类型(基本报单)
DFITCInsertOrderField = {}
DFITCInsertOrderField["accountID"] = "string"
DFITCInsertOrderField["localOrderID"] = "long"
DFITCInsertOrderField["instrumentID"] = "string"
DFITCInsertOrderField["insertPrice"] = "float"
DFITCInsertOrderField["orderAmount"] = "long"
DFITCInsertOrderField["buySellType"] = "short"
DFITCInsertOrderField["openCloseType"] = "int"
DFITCInsertOrderField["speculator"] = "int"
DFITCInsertOrderField["insertType"] = "int"
DFITCInsertOrderField["orderType"] = "int"
DFITCInsertOrderField["orderProperty"] = "char"
DFITCInsertOrderField["instrumentType"] = "int"
DFITCInsertOrderField["minMatchAmount"] = "long"
DFITCInsertOrderField["reservedType2"] = "int"
DFITCInsertOrderField["lRequestID"] = "long"
DFITCInsertOrderField["customCategory"] = "string"
DFITCInsertOrderField["profitLossPrice"] = "float"
structDict['DFITCInsertOrderField'] = DFITCInsertOrderField
#撤单数据类型
DFITCCancelOrderField = {}
DFITCCancelOrderField["accountID"] = "string"
DFITCCancelOrderField["spdOrderID"] = "long"
DFITCCancelOrderField["localOrderID"] = "long"
DFITCCancelOrderField["instrumentID"] = "string"
DFITCCancelOrderField["lRequestID"] = "long"
structDict['DFITCCancelOrderField'] = DFITCCancelOrderField
#委托响应类型
DFITCOrderRspDataRtnField = {}
DFITCOrderRspDataRtnField["localOrderID"] = "long"
DFITCOrderRspDataRtnField["spdOrderID"] = "long"
DFITCOrderRspDataRtnField["orderStatus"] = "short"
DFITCOrderRspDataRtnField["lRequestID"] = "long"
DFITCOrderRspDataRtnField["fee"] = "float"
DFITCOrderRspDataRtnField["margin"] = "float"
DFITCOrderRspDataRtnField["customCategory"] = "string"
DFITCOrderRspDataRtnField["accountID"] = "string"
DFITCOrderRspDataRtnField["instrumentID"] = "string"
DFITCOrderRspDataRtnField["sessionID"] = "long"
DFITCOrderRspDataRtnField["exchangeID"] = "string"
DFITCOrderRspDataRtnField["buySellType"] = "short"
DFITCOrderRspDataRtnField["openCloseType"] = "int"
DFITCOrderRspDataRtnField["instrumentType"] = "int"
DFITCOrderRspDataRtnField["speculator"] = "int"
DFITCOrderRspDataRtnField["insertPrice"] = "float"
DFITCOrderRspDataRtnField["profitLossPrice"] = "float"
DFITCOrderRspDataRtnField["minMatchAmount"] = "long"
DFITCOrderRspDataRtnField["orderAmount"] = "long"
DFITCOrderRspDataRtnField["insertType"] = "int"
DFITCOrderRspDataRtnField["orderType"] = "int"
DFITCOrderRspDataRtnField["orderProperty"] = "char"
DFITCOrderRspDataRtnField["clientID"] = "string"
structDict['DFITCOrderRspDataRtnField'] = DFITCOrderRspDataRtnField
#查询资金数据类型
DFITCCapitalField = {}
DFITCCapitalField["lRequestID"] = "long"
DFITCCapitalField["accountID"] = "string"
structDict['DFITCCapitalField'] = DFITCCapitalField
#查询持仓数据类型
DFITCPositionField = {}
DFITCPositionField["lRequestID"] = "long"
DFITCPositionField["accountID"] = "string"
DFITCPositionField["instrumentID"] = "string"
DFITCPositionField["instrumentType"] = "int"
structDict['DFITCPositionField'] = DFITCPositionField
#交易所合约
DFITCExchangeInstrumentField = {}
DFITCExchangeInstrumentField["lRequestID"] = "long"
DFITCExchangeInstrumentField["accountID"] = "string"
DFITCExchangeInstrumentField["exchangeID"] = "string"
DFITCExchangeInstrumentField["instrumentType"] = "int"
structDict['DFITCExchangeInstrumentField'] = DFITCExchangeInstrumentField
#用户登录数据类型
DFITCUserLoginField = {}
DFITCUserLoginField["lRequestID"] = "long"
DFITCUserLoginField["accountID"] = "string"
DFITCUserLoginField["passwd"] = "string"
DFITCUserLoginField["companyID"] = "short"
structDict['DFITCUserLoginField'] = DFITCUserLoginField
#用户退出类型
DFITCUserLogoutField = {}
DFITCUserLogoutField["lRequestID"] = "long"
DFITCUserLogoutField["accountID"] = "string"
DFITCUserLogoutField["sessionID"] = "long"
structDict['DFITCUserLogoutField'] = DFITCUserLogoutField
#委托回报
DFITCOrderRtnField = {}
DFITCOrderRtnField["localOrderID"] = "long"
DFITCOrderRtnField["spdOrderID"] = "long"
DFITCOrderRtnField["OrderSysID"] = "string"
DFITCOrderRtnField["orderStatus"] = "short"
DFITCOrderRtnField["sessionID"] = "long"
DFITCOrderRtnField["SuspendTime"] = "string"
DFITCOrderRtnField["instrumentID"] = "string"
DFITCOrderRtnField["exchangeID"] = "string"
DFITCOrderRtnField["buySellType"] = "short"
DFITCOrderRtnField["openCloseType"] = "int"
DFITCOrderRtnField["instrumentType"] = "int"
DFITCOrderRtnField["speculator"] = "int"
DFITCOrderRtnField["insertPrice"] = "float"
DFITCOrderRtnField["profitLossPrice"] = "float"
DFITCOrderRtnField["accountID"] = "string"
DFITCOrderRtnField["cancelAmount"] = "long"
DFITCOrderRtnField["orderAmount"] = "long"
DFITCOrderRtnField["insertType"] = "int"
DFITCOrderRtnField["orderType"] = "int"
DFITCOrderRtnField["extSpdOrderID"] = "long"
DFITCOrderRtnField["reservedType2"] = "int"
DFITCOrderRtnField["customCategory"] = "string"
DFITCOrderRtnField["orderProperty"] = "char"
DFITCOrderRtnField["minMatchAmount"] = "long"
DFITCOrderRtnField["clientID"] = "string"
DFITCOrderRtnField["statusMsg"] = "string"
structDict['DFITCOrderRtnField'] = DFITCOrderRtnField
#成交回报
DFITCMatchRtnField = {}
DFITCMatchRtnField["localOrderID"] = "long"
DFITCMatchRtnField["OrderSysID"] = "string"
DFITCMatchRtnField["matchID"] = "string"
DFITCMatchRtnField["instrumentID"] = "string"
DFITCMatchRtnField["buySellType"] = "short"
DFITCMatchRtnField["openCloseType"] = "int"
DFITCMatchRtnField["matchedPrice"] = "float"
DFITCMatchRtnField["orderAmount"] = "long"
DFITCMatchRtnField["matchedAmount"] = "long"
DFITCMatchRtnField["matchedTime"] = "string"
DFITCMatchRtnField["insertPrice"] = "float"
DFITCMatchRtnField["spdOrderID"] = "long"
DFITCMatchRtnField["matchType"] = "long"
DFITCMatchRtnField["speculator"] = "int"
DFITCMatchRtnField["exchangeID"] = "string"
DFITCMatchRtnField["fee"] = "float"
DFITCMatchRtnField["sessionID"] = "long"
DFITCMatchRtnField["instrumentType"] = "int"
DFITCMatchRtnField["accountID"] = "string"
DFITCMatchRtnField["orderStatus"] = "short"
DFITCMatchRtnField["margin"] = "float"
DFITCMatchRtnField["frozenCapita"] = "float"
DFITCMatchRtnField["adjustmentInfo"] = "string"
DFITCMatchRtnField["customCategory"] = "string"
DFITCMatchRtnField["turnover"] = "float"
DFITCMatchRtnField["orderType"] = "int"
DFITCMatchRtnField["insertType"] = "int"
DFITCMatchRtnField["clientID"] = "string"
structDict['DFITCMatchRtnField'] = DFITCMatchRtnField
#撤单回报
DFITCOrderCanceledRtnField = {}
DFITCOrderCanceledRtnField["localOrderID"] = "long"
DFITCOrderCanceledRtnField["OrderSysID"] = "string"
DFITCOrderCanceledRtnField["instrumentID"] = "string"
DFITCOrderCanceledRtnField["insertPrice"] = "float"
DFITCOrderCanceledRtnField["buySellType"] = "short"
DFITCOrderCanceledRtnField["openCloseType"] = "int"
DFITCOrderCanceledRtnField["cancelAmount"] = "long"
DFITCOrderCanceledRtnField["spdOrderID"] = "long"
DFITCOrderCanceledRtnField["speculator"] = "int"
DFITCOrderCanceledRtnField["exchangeID"] = "string"
DFITCOrderCanceledRtnField["canceledTime"] = "string"
DFITCOrderCanceledRtnField["sessionID"] = "long"
DFITCOrderCanceledRtnField["orderStatus"] = "short"
DFITCOrderCanceledRtnField["instrumentType"] = "int"
DFITCOrderCanceledRtnField["accountID"] = "string"
DFITCOrderCanceledRtnField["orderAmount"] = "long"
DFITCOrderCanceledRtnField["margin"] = "float"
DFITCOrderCanceledRtnField["fee"] = "float"
DFITCOrderCanceledRtnField["customCategory"] = "string"
DFITCOrderCanceledRtnField["profitLossPrice"] = "float"
DFITCOrderCanceledRtnField["minMatchAmount"] = "long"
DFITCOrderCanceledRtnField["insertType"] = "int"
DFITCOrderCanceledRtnField["clientID"] = "string"
DFITCOrderCanceledRtnField["statusMsg"] = "string"
DFITCOrderCanceledRtnField["orderProperty"] = "char"
structDict['DFITCOrderCanceledRtnField'] = DFITCOrderCanceledRtnField
#错误信息
DFITCErrorRtnField = {}
DFITCErrorRtnField["requestID"] = "long"
DFITCErrorRtnField["sessionID"] = "long"
DFITCErrorRtnField["accountID"] = "string"
DFITCErrorRtnField["nErrorID"] = "int"
DFITCErrorRtnField["spdOrderID"] = "long"
DFITCErrorRtnField["localOrderID"] = "long"
DFITCErrorRtnField["errorMsg"] = "string"
DFITCErrorRtnField["instrumentID"] = "string"
structDict['DFITCErrorRtnField'] = DFITCErrorRtnField
#返回资金信息
DFITCCapitalInfoRtnField = {}
DFITCCapitalInfoRtnField["requestID"] = "long"
DFITCCapitalInfoRtnField["accountID"] = "string"
DFITCCapitalInfoRtnField["preEquity"] = "float"
DFITCCapitalInfoRtnField["todayEquity"] = "float"
DFITCCapitalInfoRtnField["closeProfitLoss"] = "float"
DFITCCapitalInfoRtnField["positionProfitLoss"] = "float"
DFITCCapitalInfoRtnField["frozenMargin"] = "float"
DFITCCapitalInfoRtnField["margin"] = "float"
DFITCCapitalInfoRtnField["fee"] = "float"
DFITCCapitalInfoRtnField["available"] = "float"
DFITCCapitalInfoRtnField["withdraw"] = "float"
DFITCCapitalInfoRtnField["riskDegree"] = "float"
DFITCCapitalInfoRtnField["todayPremiumIncome"] = "float"
DFITCCapitalInfoRtnField["todayPremiumPay"] = "float"
DFITCCapitalInfoRtnField["yesterdayPremium"] = "float"
DFITCCapitalInfoRtnField["optMarketValue"] = "float"
DFITCCapitalInfoRtnField["floatProfitLoss"] = "float"
DFITCCapitalInfoRtnField["totFundOut"] = "float"
DFITCCapitalInfoRtnField["totFundIn"] = "float"
structDict['DFITCCapitalInfoRtnField'] = DFITCCapitalInfoRtnField
#返回持仓信息
DFITCPositionInfoRtnField = {}
DFITCPositionInfoRtnField["lRequestID"] = "long"
DFITCPositionInfoRtnField["accountID"] = "string"
DFITCPositionInfoRtnField["exchangeID"] = "string"
DFITCPositionInfoRtnField["instrumentID"] = "string"
DFITCPositionInfoRtnField["buySellType"] = "short"
DFITCPositionInfoRtnField["openAvgPrice"] = "float"
DFITCPositionInfoRtnField["positionAvgPrice"] = "float"
DFITCPositionInfoRtnField["positionAmount"] = "long"
DFITCPositionInfoRtnField["totalAvaiAmount"] = "long"
DFITCPositionInfoRtnField["todayAvaiAmount"] = "long"
DFITCPositionInfoRtnField["lastAvaiAmount"] = "long"
DFITCPositionInfoRtnField["todayAmount"] = "long"
DFITCPositionInfoRtnField["lastAmount"] = "long"
DFITCPositionInfoRtnField["tradingAmount"] = "long"
DFITCPositionInfoRtnField["datePositionProfitLoss"] = "float"
DFITCPositionInfoRtnField["dateCloseProfitLoss"] = "float"
DFITCPositionInfoRtnField["dPremium"] = "float"
DFITCPositionInfoRtnField["floatProfitLoss"] = "float"
DFITCPositionInfoRtnField["dMargin"] = "float"
DFITCPositionInfoRtnField["speculator"] = "int"
DFITCPositionInfoRtnField["clientID"] = "string"
DFITCPositionInfoRtnField["preSettlementPrice"] = "float"
DFITCPositionInfoRtnField["instrumentType"] = "int"
DFITCPositionInfoRtnField["yesterdayTradingAmount"] = "long"
structDict['DFITCPositionInfoRtnField'] = DFITCPositionInfoRtnField
#用户登录返回信息
DFITCUserLoginInfoRtnField = {}
DFITCUserLoginInfoRtnField["lRequestID"] = "long"
DFITCUserLoginInfoRtnField["accountID"] = "string"
DFITCUserLoginInfoRtnField["loginResult"] = "int"
DFITCUserLoginInfoRtnField["initLocalOrderID"] = "long"
DFITCUserLoginInfoRtnField["sessionID"] = "long"
DFITCUserLoginInfoRtnField["nErrorID"] = "int"
DFITCUserLoginInfoRtnField["errorMsg"] = "string"
DFITCUserLoginInfoRtnField["DCEtime"] = "string"
DFITCUserLoginInfoRtnField["SHFETime"] = "string"
DFITCUserLoginInfoRtnField["CFFEXTime"] = "string"
DFITCUserLoginInfoRtnField["CZCETime"] = "string"
DFITCUserLoginInfoRtnField["INETime"] = "string"
structDict['DFITCUserLoginInfoRtnField'] = DFITCUserLoginInfoRtnField
#用户退出返回信息
DFITCUserLogoutInfoRtnField = {}
DFITCUserLogoutInfoRtnField["lRequestID"] = "long"
DFITCUserLogoutInfoRtnField["accountID"] = "string"
DFITCUserLogoutInfoRtnField["logoutResult"] = "int"
DFITCUserLogoutInfoRtnField["nErrorID"] = "int"
DFITCUserLogoutInfoRtnField["errorMsg"] = "string"
structDict['DFITCUserLogoutInfoRtnField'] = DFITCUserLogoutInfoRtnField
#套利合约查询
DFITCAbiInstrumentField = {}
DFITCAbiInstrumentField["lRequestID"] = "long"
DFITCAbiInstrumentField["accountID"] = "string"
DFITCAbiInstrumentField["exchangeID"] = "string"
structDict['DFITCAbiInstrumentField'] = DFITCAbiInstrumentField
#套利合约返回信息
DFITCAbiInstrumentRtnField = {}
DFITCAbiInstrumentRtnField["lRequestID"] = "long"
DFITCAbiInstrumentRtnField["exchangeID"] = "string"
DFITCAbiInstrumentRtnField["InstrumentID"] = "string"
DFITCAbiInstrumentRtnField["instrumentName"] = "string"
structDict['DFITCAbiInstrumentRtnField'] = DFITCAbiInstrumentRtnField
#指定的合约
DFITCSpecificInstrumentField = {}
DFITCSpecificInstrumentField["lRequestID"] = "long"
DFITCSpecificInstrumentField["accountID"] = "string"
DFITCSpecificInstrumentField["InstrumentID"] = "string"
DFITCSpecificInstrumentField["exchangeID"] = "string"
DFITCSpecificInstrumentField["instrumentType"] = "int"
structDict['DFITCSpecificInstrumentField'] = DFITCSpecificInstrumentField
#指定的合约信息
DFITCSpecificInstrumentFieldEX = {}
DFITCSpecificInstrumentFieldEX["lRequestID"] = "long"
DFITCSpecificInstrumentFieldEX["accountID"] = "string"
DFITCSpecificInstrumentFieldEX["FunctionID"] = "string"
DFITCSpecificInstrumentFieldEX["InstrumentID"] = "string"
DFITCSpecificInstrumentFieldEX["exchangeID"] = "string"
DFITCSpecificInstrumentFieldEX["instrumentType"] = "int"
structDict['DFITCSpecificInstrumentFieldEX'] = DFITCSpecificInstrumentFieldEX
#行情订阅返回信息
DFITCActiveContractField = {}
DFITCActiveContractField["lRequestID"] = "long"
DFITCActiveContractField["activeContract"] = "string"
structDict['DFITCActiveContractField'] = DFITCActiveContractField
#交易所合约返回信息
DFITCExchangeInstrumentRtnField = {}
DFITCExchangeInstrumentRtnField["lRequestID"] = "long"
DFITCExchangeInstrumentRtnField["exchangeID"] = "string"
DFITCExchangeInstrumentRtnField["instrumentID"] = "string"
DFITCExchangeInstrumentRtnField["VarietyName"] = "string"
DFITCExchangeInstrumentRtnField["instrumentType"] = "int"
DFITCExchangeInstrumentRtnField["orderTopLimit"] = "long"
DFITCExchangeInstrumentRtnField["mktOrderTopLimit"] = "long"
DFITCExchangeInstrumentRtnField["contractMultiplier"] = "float"
DFITCExchangeInstrumentRtnField["minPriceFluctuation"] = "float"
DFITCExchangeInstrumentRtnField["instrumentMaturity"] = "string"
DFITCExchangeInstrumentRtnField["upperLimitPrice"] = "float"
DFITCExchangeInstrumentRtnField["lowerLimitPrice"] = "float"
DFITCExchangeInstrumentRtnField["preClosePrice"] = "float"
DFITCExchangeInstrumentRtnField["preSettlementPrice"] = "float"
DFITCExchangeInstrumentRtnField["settlementPrice"] = "float"
DFITCExchangeInstrumentRtnField["preOpenInterest"] = "long"
DFITCExchangeInstrumentRtnField["instrumentPrefix"] = "string"
DFITCExchangeInstrumentRtnField["instrumentExpiration"] = "string"
DFITCExchangeInstrumentRtnField["underlying"] = "string"
DFITCExchangeInstrumentRtnField["optionType"] = "int"
DFITCExchangeInstrumentRtnField["strikePrice"] = "float"
DFITCExchangeInstrumentRtnField["exchangeRiskDegree"] = "float"
DFITCExchangeInstrumentRtnField["minMargin"] = "float"
DFITCExchangeInstrumentRtnField["tradeSize"] = "long"
structDict['DFITCExchangeInstrumentRtnField'] = DFITCExchangeInstrumentRtnField
#委托查询数据结构
DFITCOrderField = {}
DFITCOrderField["lRequestID"] = "long"
DFITCOrderField["accountID"] = "string"
DFITCOrderField["instrumentType"] = "int"
DFITCOrderField["customCategory"] = "string"
DFITCOrderField["orderStatus"] = "short"
DFITCOrderField["orderType"] = "int"
DFITCOrderField["spdOrderID"] = "long"
DFITCOrderField["localOrderID"] = "long"
DFITCOrderField["instrumentID"] = "string"
structDict['DFITCOrderField'] = DFITCOrderField
#成交查询数据结构
DFITCMatchField = {}
DFITCMatchField["lRequestID"] = "long"
DFITCMatchField["accountID"] = "string"
DFITCMatchField["instrumentType"] = "int"
DFITCMatchField["customCategory"] = "string"
DFITCMatchField["orderType"] = "int"
DFITCMatchField["spdOrderID"] = "long"
DFITCMatchField["instrumentID"] = "string"
structDict['DFITCMatchField'] = DFITCMatchField
#委托查询响应数据结构
DFITCOrderCommRtnField = {}
DFITCOrderCommRtnField["lRequestID"] = "long"
DFITCOrderCommRtnField["spdOrderID"] = "long"
DFITCOrderCommRtnField["orderStatus"] = "short"
DFITCOrderCommRtnField["instrumentID"] = "string"
DFITCOrderCommRtnField["buySellType"] = "short"
DFITCOrderCommRtnField["openClose"] = "int"
DFITCOrderCommRtnField["insertPrice"] = "float"
DFITCOrderCommRtnField["orderAmount"] = "long"
DFITCOrderCommRtnField["matchedPrice"] = "float"
DFITCOrderCommRtnField["matchedAmount"] = "long"
DFITCOrderCommRtnField["cancelAmount"] = "long"
DFITCOrderCommRtnField["insertType"] = "int"
DFITCOrderCommRtnField["speculator"] = "int"
DFITCOrderCommRtnField["commTime"] = "string"
DFITCOrderCommRtnField["submitTime"] = "string"
DFITCOrderCommRtnField["clientID"] = "string"
DFITCOrderCommRtnField["exchangeID"] = "string"
DFITCOrderCommRtnField["operStation"] = "string"
DFITCOrderCommRtnField["accountID"] = "string"
DFITCOrderCommRtnField["instrumentType"] = "int"
DFITCOrderCommRtnField["sessionId"] = "long"
DFITCOrderCommRtnField["reservedType2"] = "int"
DFITCOrderCommRtnField["OrderSysID"] = "string"
DFITCOrderCommRtnField["customCategory"] = "string"
DFITCOrderCommRtnField["margin"] = "float"
DFITCOrderCommRtnField["fee"] = "float"
DFITCOrderCommRtnField["localOrderID"] = "long"
DFITCOrderCommRtnField["profitLossPrice"] = "float"
DFITCOrderCommRtnField["orderType"] = "int"
DFITCOrderCommRtnField["orderProperty"] = "char"
structDict['DFITCOrderCommRtnField'] = DFITCOrderCommRtnField
#成交查询数据响应
DFITCMatchedRtnField = {}
DFITCMatchedRtnField["lRequestID"] = "long"
DFITCMatchedRtnField["spdOrderID"] = "long"
DFITCMatchedRtnField["accountID"] = "string"
DFITCMatchedRtnField["exchangeID"] = "string"
DFITCMatchedRtnField["instrumentID"] = "string"
DFITCMatchedRtnField["buySellType"] = "short"
DFITCMatchedRtnField["openClose"] = "int"
DFITCMatchedRtnField["matchedPrice"] = "float"
DFITCMatchedRtnField["matchedAmount"] = "long"
DFITCMatchedRtnField["matchedMort"] = "float"
DFITCMatchedRtnField["speculator"] = "int"
DFITCMatchedRtnField["matchedTime"] = "string"
DFITCMatchedRtnField["matchedID"] = "string"
DFITCMatchedRtnField["localOrderID"] = "long"
DFITCMatchedRtnField["clientID"] = "string"
DFITCMatchedRtnField["matchType"] = "long"
DFITCMatchedRtnField["instrumentType"] = "int"
DFITCMatchedRtnField["sessionId"] = "long"
DFITCMatchedRtnField["reservedType2"] = "int"
DFITCMatchedRtnField["customCategory"] = "string"
DFITCMatchedRtnField["fee"] = "float"
DFITCMatchedRtnField["orderType"] = "int"
DFITCMatchedRtnField["OrderSysID"] = "string"
structDict['DFITCMatchedRtnField'] = DFITCMatchedRtnField
#返回合约信息数据结构
DFITCInstrumentRtnField = {}
DFITCInstrumentRtnField["lRequestID"] = "long"
DFITCInstrumentRtnField["instrumentID"] = "string"
DFITCInstrumentRtnField["longMarginRatio"] = "float"
DFITCInstrumentRtnField["shortMarginRatio"] = "float"
DFITCInstrumentRtnField["longMarginRatioByVolume"] = "float"
DFITCInstrumentRtnField["shortMarginRatioByVolume"] = "float"
DFITCInstrumentRtnField["openFeeVolRatio"] = "float"
DFITCInstrumentRtnField["closeFeeVolRatio"] = "float"
DFITCInstrumentRtnField["closeTodayFeeVolRatio"] = "float"
DFITCInstrumentRtnField["openFeeAmtRatio"] = "float"
DFITCInstrumentRtnField["closeFeeAmtRatio"] = "float"
DFITCInstrumentRtnField["closeTodayFeeAmtRatio"] = "float"
DFITCInstrumentRtnField["orderTopLimit"] = "long"
DFITCInstrumentRtnField["contractMultiplier"] = "float"
DFITCInstrumentRtnField["minimumPriceChange"] = "float"
DFITCInstrumentRtnField["instrumentType"] = "int"
DFITCInstrumentRtnField["instrumentMaturity"] = "string"
DFITCInstrumentRtnField["computeMode"] = "int"
DFITCInstrumentRtnField["atMoneyNorm"] = "float"
DFITCInstrumentRtnField["upperLimitPrice"] = "float"
DFITCInstrumentRtnField["lowerLimitPrice"] = "float"
DFITCInstrumentRtnField["preClosePrice"] = "float"
DFITCInstrumentRtnField["preSettlementPrice"] = "float"
DFITCInstrumentRtnField["settlementPrice"] = "float"
DFITCInstrumentRtnField["preOpenInterest"] = "long"
DFITCInstrumentRtnField["optExecRatio"] = "float"
DFITCInstrumentRtnField["optExecRatioPerVol"] = "float"
structDict['DFITCInstrumentRtnField'] = DFITCInstrumentRtnField
#深度行情
DFITCDepthMarketDataField = {}
DFITCDepthMarketDataField["tradingDay"] = "string"
DFITCDepthMarketDataField["instrumentID"] = "string"
DFITCDepthMarketDataField["exchangeID"] = "string"
DFITCDepthMarketDataField["exchangeInstID"] = "string"
DFITCDepthMarketDataField["lastPrice"] = "float"
DFITCDepthMarketDataField["preSettlementPrice"] = "float"
DFITCDepthMarketDataField["preClosePrice"] = "float"
DFITCDepthMarketDataField["preOpenInterest"] = "long"
DFITCDepthMarketDataField["openPrice"] = "float"
DFITCDepthMarketDataField["highestPrice"] = "float"
DFITCDepthMarketDataField["lowestPrice"] = "float"
DFITCDepthMarketDataField["Volume"] = "long"
DFITCDepthMarketDataField["turnover"] = "float"
DFITCDepthMarketDataField["openInterest"] = "long"
DFITCDepthMarketDataField["closePrice"] = "float"
DFITCDepthMarketDataField["settlementPrice"] = "float"
DFITCDepthMarketDataField["upperLimitPrice"] = "float"
DFITCDepthMarketDataField["lowerLimitPrice"] = "float"
DFITCDepthMarketDataField["preDelta"] = "float"
DFITCDepthMarketDataField["currDelta"] = "float"
DFITCDepthMarketDataField["UpdateTime"] = "string"
DFITCDepthMarketDataField["UpdateMillisec"] = "int"
DFITCDepthMarketDataField["BidPrice1"] = "float"
DFITCDepthMarketDataField["BidVolume1"] = "int"
DFITCDepthMarketDataField["AskPrice1"] = "float"
DFITCDepthMarketDataField["AskVolume1"] = "int"
DFITCDepthMarketDataField["BidPrice2"] = "float"
DFITCDepthMarketDataField["BidVolume2"] = "int"
DFITCDepthMarketDataField["AskPrice2"] = "float"
DFITCDepthMarketDataField["AskVolume2"] = "int"
DFITCDepthMarketDataField["BidPrice3"] = "float"
DFITCDepthMarketDataField["BidVolume3"] = "int"
DFITCDepthMarketDataField["AskPrice3"] = "float"
DFITCDepthMarketDataField["AskVolume3"] = "int"
DFITCDepthMarketDataField["BidPrice4"] = "float"
DFITCDepthMarketDataField["BidVolume4"] = "int"
DFITCDepthMarketDataField["AskPrice4"] = "float"
DFITCDepthMarketDataField["AskVolume4"] = "int"
DFITCDepthMarketDataField["BidPrice5"] = "float"
DFITCDepthMarketDataField["BidVolume5"] = "int"
DFITCDepthMarketDataField["AskPrice5"] = "float"
DFITCDepthMarketDataField["AskVolume5"] = "int"
DFITCDepthMarketDataField["AveragePrice"] = "float"
DFITCDepthMarketDataField["XSpeedTime"] = "string"
structDict['DFITCDepthMarketDataField'] = DFITCDepthMarketDataField
#********************************期权扩展行情************************************
DFITCMarketDataFieldEx = {}
DFITCMarketDataFieldEx["FunctionID"] = "string"
DFITCMarketDataFieldEx["tradingDay"] = "string"
DFITCMarketDataFieldEx["UpdateTime"] = "string"
DFITCMarketDataFieldEx["UpdateMillisec"] = "int"
DFITCMarketDataFieldEx["instrumentID"] = "string"
DFITCMarketDataFieldEx["ExtMarketData"] = "string"
structDict['DFITCMarketDataFieldEx'] = DFITCMarketDataFieldEx
#********************************************************************************
DFITCCustomMarketDataField = {}
DFITCCustomMarketDataField["instrumentID"] = "string"
DFITCCustomMarketDataField["exchangeID"] = "string"
DFITCCustomMarketDataField["bidVolume1"] = "int"
DFITCCustomMarketDataField["bidPrice1"] = "float"
DFITCCustomMarketDataField["askVolume1"] = "int"
DFITCCustomMarketDataField["askPrice1"] = "float"
DFITCCustomMarketDataField["lastPrice"] = "float"
structDict['DFITCCustomMarketDataField'] = DFITCCustomMarketDataField
#查询持仓明细
DFITCPositionDetailField = {}
DFITCPositionDetailField["lRequestID"] = "long"
DFITCPositionDetailField["accountID"] = "string"
DFITCPositionDetailField["instrumentID"] = "string"
DFITCPositionDetailField["instrumentType"] = "int"
structDict['DFITCPositionDetailField'] = DFITCPositionDetailField
#查询持仓明细响应
DFITCPositionDetailRtnField = {}
DFITCPositionDetailRtnField["lRequestID"] = "long"
DFITCPositionDetailRtnField["accountID"] = "string"
DFITCPositionDetailRtnField["exchangeID"] = "string"
DFITCPositionDetailRtnField["instrumentID"] = "string"
DFITCPositionDetailRtnField["buySellType"] = "short"
DFITCPositionDetailRtnField["openPrice"] = "float"
DFITCPositionDetailRtnField["volume"] = "long"
DFITCPositionDetailRtnField["matchID"] = "string"
DFITCPositionDetailRtnField["matchedDate"] = "string"
DFITCPositionDetailRtnField["datePositionProfitLoss"] = "float"
DFITCPositionDetailRtnField["dateCloseProfitLoss"] = "float"
DFITCPositionDetailRtnField["floatProfitLoss"] = "float"
DFITCPositionDetailRtnField["dMargin"] = "float"
DFITCPositionDetailRtnField["speculator"] = "int"
DFITCPositionDetailRtnField["clientID"] = "string"
DFITCPositionDetailRtnField["preSettlementPrice"] = "float"
DFITCPositionDetailRtnField["instrumentType"] = "int"
DFITCPositionDetailRtnField["spdOrderID"] = "long"
DFITCPositionDetailRtnField["customCategory"] = "string"
DFITCPositionDetailRtnField["closeOrderVol"] = "long"
DFITCPositionDetailRtnField["closeMatchVol"] = "long"
DFITCPositionDetailRtnField["positionDateType"] = "int"
structDict['DFITCPositionDetailRtnField'] = DFITCPositionDetailRtnField
#用户事件通知信息
DFITCTradingNoticeInfoField = {}
DFITCTradingNoticeInfoField["accountID"] = "string"
DFITCTradingNoticeInfoField["SendTime"] = "string"
DFITCTradingNoticeInfoField["FieldContent"] = "string"
DFITCTradingNoticeInfoField["noticeType"] = "short"
structDict['DFITCTradingNoticeInfoField'] = DFITCTradingNoticeInfoField
#合约交易状态通知信息
DFITCInstrumentStatusField = {}
DFITCInstrumentStatusField["ExchangeID"] = "string"
DFITCInstrumentStatusField["InstrumentID"] = "string"
DFITCInstrumentStatusField["InstrumentStatus"] = "int"
DFITCInstrumentStatusField["TradingSegmentSN"] = "int"
DFITCInstrumentStatusField["EnterTime"] = "string"
DFITCInstrumentStatusField["EnterReason"] = "short"
structDict['DFITCInstrumentStatusField'] = DFITCInstrumentStatusField
#用户密码修改
DFITCResetPwdField = {}
DFITCResetPwdField["lRequestID"] = "long"
DFITCResetPwdField["accountID"] = "string"
DFITCResetPwdField["oldpasswd"] = "string"
DFITCResetPwdField["newpasswd"] = "string"
structDict['DFITCResetPwdField'] = DFITCResetPwdField
#用户密码修改返回信息
DFITCResetPwdRspField = {}
DFITCResetPwdRspField["lRequestID"] = "long"
DFITCResetPwdRspField["accountID"] = "string"
DFITCResetPwdRspField["execState"] = "int"
structDict['DFITCResetPwdRspField'] = DFITCResetPwdRspField
#账单确认
DFITCBillConfirmField = {}
DFITCBillConfirmField["lRequestID"] = "long"
DFITCBillConfirmField["accountID"] = "string"
DFITCBillConfirmField["date"] = "string"
DFITCBillConfirmField["confirmFlag"] = "int"
structDict['DFITCBillConfirmField'] = DFITCBillConfirmField
#账单确认响应
DFITCBillConfirmRspField = {}
DFITCBillConfirmRspField["lRequestID"] = "long"
DFITCBillConfirmRspField["accountID"] = "string"
DFITCBillConfirmRspField["execState"] = "int"
structDict['DFITCBillConfirmRspField'] = DFITCBillConfirmRspField
#交易编码查询
DFITCQryTradeCodeField = {}
DFITCQryTradeCodeField["lRequestID"] = "long"
DFITCQryTradeCodeField["accountID"] = "string"
structDict['DFITCQryTradeCodeField'] = DFITCQryTradeCodeField
#交易编码查询响应
DFITCQryTradeCodeRtnField = {}
DFITCQryTradeCodeRtnField["lRequestID"] = "long"
DFITCQryTradeCodeRtnField["accountID"] = "string"
DFITCQryTradeCodeRtnField["exchangeCode"] = "string"
DFITCQryTradeCodeRtnField["clientID"] = "string"
DFITCQryTradeCodeRtnField["clientStatus"] = "int"
DFITCQryTradeCodeRtnField["clientIDType"] = "int"
structDict['DFITCQryTradeCodeRtnField'] = DFITCQryTradeCodeRtnField
#浮盈浮亏是否计算到权益中
DFITCEquityComputModeRtnField = {}
DFITCEquityComputModeRtnField["capConMode"] = "long"
DFITCEquityComputModeRtnField["priceNote"] = "int"
structDict['DFITCEquityComputModeRtnField'] = DFITCEquityComputModeRtnField
#查询账单
DFITCQryBillField = {}
DFITCQryBillField["lRequestID"] = "long"
DFITCQryBillField["accountID"] = "string"
DFITCQryBillField["date"] = "string"
structDict['DFITCQryBillField'] = DFITCQryBillField
#查询账单响应
DFITCQryBillRtnField = {}
DFITCQryBillRtnField["lRequestID"] = "long"
DFITCQryBillRtnField["accountID"] = "string"
DFITCQryBillRtnField["message"] = "string"
structDict['DFITCQryBillRtnField'] = DFITCQryBillRtnField
#厂商ID确认请求
DFITCProductField = {}
DFITCProductField["productID"] = "string"
DFITCProductField["vendorID"] = "string"
structDict['DFITCProductField'] = DFITCProductField
#厂商ID确认响应
DFITCProductRtnField = {}
DFITCProductRtnField["productID"] = "string"
DFITCProductRtnField["vendorID"] = "string"
DFITCProductRtnField["productOnlineCount"] = "long"
DFITCProductRtnField["brokerInfoName"] = "string"
DFITCProductRtnField["frontID"] = "int"
structDict['DFITCProductRtnField'] = DFITCProductRtnField
#查询交易日请求
DFITCTradingDayField = {}
DFITCTradingDayField["lRequestID"] = "long"
structDict['DFITCTradingDayField'] = DFITCTradingDayField
#交易日请求响应
DFITCTradingDayRtnField = {}
DFITCTradingDayRtnField["lRequestID"] = "long"
DFITCTradingDayRtnField["date"] = "string"
structDict['DFITCTradingDayRtnField'] = DFITCTradingDayRtnField
#询价通知订阅请求
DFITCQuoteSubscribeField = {}
DFITCQuoteSubscribeField["lRequestID"] = "long"
DFITCQuoteSubscribeField["accountID"] = "string"
DFITCQuoteSubscribeField["exchangeID"] = "string"
structDict['DFITCQuoteSubscribeField'] = DFITCQuoteSubscribeField
#询价通知订阅响应
DFITCQuoteSubscribeRspField = {}
DFITCQuoteSubscribeRspField["lRequestID"] = "long"
DFITCQuoteSubscribeRspField["subscribeFlag"] = "int"
DFITCQuoteSubscribeRspField["exchangeID"] = "string"
structDict['DFITCQuoteSubscribeRspField'] = DFITCQuoteSubscribeRspField
#询价通知退订请求
DFITCQuoteUnSubscribeField = {}
DFITCQuoteUnSubscribeField["lRequestID"] = "long"
DFITCQuoteUnSubscribeField["accountID"] = "string"
DFITCQuoteUnSubscribeField["exchangeID"] = "string"
structDict['DFITCQuoteUnSubscribeField'] = DFITCQuoteUnSubscribeField
#询价通知退订响应
DFITCQuoteUnSubscribeRspField = {}
DFITCQuoteUnSubscribeRspField["lRequestID"] = "long"
DFITCQuoteUnSubscribeRspField["subscribeFlag"] = "int"
DFITCQuoteUnSubscribeRspField["exchangeID"] = "string"
structDict['DFITCQuoteUnSubscribeRspField'] = DFITCQuoteUnSubscribeRspField
#询价通知订阅回报
DFITCQuoteSubscribeRtnField = {}
DFITCQuoteSubscribeRtnField["quoteID"] = "string"
DFITCQuoteSubscribeRtnField["exchangeID"] = "string"
DFITCQuoteSubscribeRtnField["instrumentID"] = "string"
DFITCQuoteSubscribeRtnField["source"] = "short"
DFITCQuoteSubscribeRtnField["quoteTime"] = "string"
structDict['DFITCQuoteSubscribeRtnField'] = DFITCQuoteSubscribeRtnField
#询价通知查询请求
DFITCQryQuoteNoticeField = {}
DFITCQryQuoteNoticeField["accountID"] = "string"
DFITCQryQuoteNoticeField["lRequestID"] = "long"
DFITCQryQuoteNoticeField["exchangeID"] = "string"
DFITCQryQuoteNoticeField["instrumentID"] = "string"
structDict['DFITCQryQuoteNoticeField'] = DFITCQryQuoteNoticeField
#询价通知查询响应
DFITCQryQuoteNoticeRtnField = {}
DFITCQryQuoteNoticeRtnField["lRequestID"] = "long"
DFITCQryQuoteNoticeRtnField["quoteID"] = "string"
DFITCQryQuoteNoticeRtnField["exchangeID"] = "string"
DFITCQryQuoteNoticeRtnField["instrumentID"] = "string"
DFITCQryQuoteNoticeRtnField["source"] = "short"
DFITCQryQuoteNoticeRtnField["quoteTime"] = "string"
structDict['DFITCQryQuoteNoticeRtnField'] = DFITCQryQuoteNoticeRtnField
#做市商报单请求
DFITCQuoteInsertField = {}
DFITCQuoteInsertField["accountID"] = "string"
DFITCQuoteInsertField["lRequestID"] = "long"
DFITCQuoteInsertField["localOrderID"] = "long"
DFITCQuoteInsertField["insertType"] = "int"
DFITCQuoteInsertField["instrumentID"] = "string"
DFITCQuoteInsertField["quoteID"] = "string"
DFITCQuoteInsertField["instrumentType"] = "int"
DFITCQuoteInsertField["bOrderAmount"] = "long"
DFITCQuoteInsertField["sOrderAmount"] = "long"
DFITCQuoteInsertField["bInsertPrice"] = "float"
DFITCQuoteInsertField["sInsertPrice"] = "float"
DFITCQuoteInsertField["bOpenCloseType"] = "int"
DFITCQuoteInsertField["sOpenCloseType"] = "int"
DFITCQuoteInsertField["bSpeculator"] = "int"
DFITCQuoteInsertField["sSpeculator"] = "int"
DFITCQuoteInsertField["stayTime"] = "int"
DFITCQuoteInsertField["customCategory"] = "string"
structDict['DFITCQuoteInsertField'] = DFITCQuoteInsertField
#做市商报单响应
DFITCQuoteRspField = {}
DFITCQuoteRspField["localOrderID"] = "long"
DFITCQuoteRspField["spdOrderID"] = "long"
DFITCQuoteRspField["lRequestID"] = "long"
DFITCQuoteRspField["fee"] = "float"
DFITCQuoteRspField["margin"] = "float"
DFITCQuoteRspField["orderTime"] = "string"
DFITCQuoteRspField["orderStatus"] = "short"
DFITCQuoteRspField["customCategory"] = "string"
DFITCQuoteRspField["instrumentID"] = "string"
DFITCQuoteRspField["accountID"] = "string"
DFITCQuoteRspField["quoteID"] = "string"
DFITCQuoteRspField["sessionID"] = "long"
DFITCQuoteRspField["clientID"] = "string"
structDict['DFITCQuoteRspField'] = DFITCQuoteRspField
#做市商报单回报
DFITCQuoteRtnField = {}
DFITCQuoteRtnField["exchangeID"] = "string"
DFITCQuoteRtnField["clientID"] = "string"
DFITCQuoteRtnField["orderSysID"] = "string"
DFITCQuoteRtnField["instrumentID"] = "string"
DFITCQuoteRtnField["localOrderID"] = "long"
DFITCQuoteRtnField["seatCode"] = "string"
DFITCQuoteRtnField["bOpenCloseType"] = "int"
DFITCQuoteRtnField["sOpenCloseType"] = "int"
DFITCQuoteRtnField["speculator"] = "int"
DFITCQuoteRtnField["bOrderAmount"] = "long"
DFITCQuoteRtnField["sOrderAmount"] = "long"
DFITCQuoteRtnField["bInsertPrice"] = "float"
DFITCQuoteRtnField["sInsertPrice"] = "float"
DFITCQuoteRtnField["spdOrderID"] = "long"
DFITCQuoteRtnField["accountID"] = "string"
DFITCQuoteRtnField["instrumentType"] = "int"
DFITCQuoteRtnField["suspendTime"] = "string"
DFITCQuoteRtnField["entrusTeller"] = "string"
DFITCQuoteRtnField["orderStatus"] = "short"
DFITCQuoteRtnField["sessionID"] = "long"
DFITCQuoteRtnField["quoteID"] = "string"
DFITCQuoteRtnField["errorMsg"] = "string"
DFITCQuoteRtnField["customCategory"] = "string"
structDict['DFITCQuoteRtnField'] = DFITCQuoteRtnField
#做市商撤单回报
DFITCQuoteCanceledRtnField = {}
DFITCQuoteCanceledRtnField["exchangeID"] = "string"
DFITCQuoteCanceledRtnField["clientID"] = "string"
DFITCQuoteCanceledRtnField["orderSysID"] = "string"
DFITCQuoteCanceledRtnField["instrumentID"] = "string"
DFITCQuoteCanceledRtnField["localOrderID"] = "long"
DFITCQuoteCanceledRtnField["seatCode"] = "string"
DFITCQuoteCanceledRtnField["bOpenCloseType"] = "int"
DFITCQuoteCanceledRtnField["sOpenCloseType"] = "int"
DFITCQuoteCanceledRtnField["speculator"] = "int"
DFITCQuoteCanceledRtnField["spdOrderID"] = "long"
DFITCQuoteCanceledRtnField["accountID"] = "string"
DFITCQuoteCanceledRtnField["entrusTeller"] = "string"
DFITCQuoteCanceledRtnField["orderStatus"] = "short"
DFITCQuoteCanceledRtnField["cancelAmount"] = "long"
DFITCQuoteCanceledRtnField["fee"] = "float"
DFITCQuoteCanceledRtnField["margin"] = "float"
DFITCQuoteCanceledRtnField["sessionID"] = "long"
DFITCQuoteCanceledRtnField["buySellType"] = "short"
DFITCQuoteCanceledRtnField["quoteID"] = "string"
DFITCQuoteCanceledRtnField["canceledTime"] = "string"
DFITCQuoteCanceledRtnField["customCategory"] = "string"
structDict['DFITCQuoteCanceledRtnField'] = DFITCQuoteCanceledRtnField
#做市商成交回报
DFITCQuoteMatchRtnField = {}
DFITCQuoteMatchRtnField["exchangeID"] = "string"
DFITCQuoteMatchRtnField["clientID"] = "string"
DFITCQuoteMatchRtnField["instrumentID"] = "string"
DFITCQuoteMatchRtnField["seatCode"] = "string"
DFITCQuoteMatchRtnField["localOrderID"] = "long"
DFITCQuoteMatchRtnField["openCloseType"] = "int"
DFITCQuoteMatchRtnField["speculator"] = "int"
DFITCQuoteMatchRtnField["spdOrderID"] = "long"
DFITCQuoteMatchRtnField["OrderSysID"] = "string"
DFITCQuoteMatchRtnField["matchID"] = "string"
DFITCQuoteMatchRtnField["matchedAmount"] = "long"
DFITCQuoteMatchRtnField["matchedPrice"] = "float"
DFITCQuoteMatchRtnField["accountID"] = "string"
DFITCQuoteMatchRtnField["turnover"] = "float"
DFITCQuoteMatchRtnField["entrusTeller"] = "string"
DFITCQuoteMatchRtnField["matchedTime"] = "string"
DFITCQuoteMatchRtnField["fee"] = "float"
DFITCQuoteMatchRtnField["insertPrice"] = "float"
DFITCQuoteMatchRtnField["orderAmount"] = "long"
DFITCQuoteMatchRtnField["orderStatus"] = "short"
DFITCQuoteMatchRtnField["margin"] = "float"
DFITCQuoteMatchRtnField["buySellType"] = "short"
DFITCQuoteMatchRtnField["closeTodayAmount"] = "long"
DFITCQuoteMatchRtnField["closePrice"] = "float"
DFITCQuoteMatchRtnField["closeTodayPrice"] = "float"
DFITCQuoteMatchRtnField["adjustmentInfo"] = "string"
DFITCQuoteMatchRtnField["frozenCapita"] = "float"
DFITCQuoteMatchRtnField["dateCloseProfitLoss"] = "float"
DFITCQuoteMatchRtnField["instrumentType"] = "int"
DFITCQuoteMatchRtnField["sessionID"] = "long"
DFITCQuoteMatchRtnField["largeMarginDirect"] = "string"
DFITCQuoteMatchRtnField["quoteID"] = "string"
DFITCQuoteMatchRtnField["customCategory"] = "string"
structDict['DFITCQuoteMatchRtnField'] = DFITCQuoteMatchRtnField
#批量撤单请求
DFITCCancelAllOrderField = {}
DFITCCancelAllOrderField["lRequestID"] = "long"
DFITCCancelAllOrderField["accountID"] = "string"
DFITCCancelAllOrderField["exchangeID"] = "string"
structDict['DFITCCancelAllOrderField'] = DFITCCancelAllOrderField
#批量撤单响应
DFITCCancelAllOrderRspField = {}
DFITCCancelAllOrderRspField["lRequestID"] = "long"
DFITCCancelAllOrderRspField["accountID"] = "string"
DFITCCancelAllOrderRspField["orderStatus"] = "short"
structDict['DFITCCancelAllOrderRspField'] = DFITCCancelAllOrderRspField
#询价请求
DFITCForQuoteField = {}
DFITCForQuoteField["lRequestID"] = "long"
DFITCForQuoteField["accountID"] = "string"
DFITCForQuoteField["instrumentID"] = "string"
structDict['DFITCForQuoteField'] = DFITCForQuoteField
#询价请求响应
DFITCForQuoteRspField = {}
DFITCForQuoteRspField["lRequestID"] = "long"
DFITCForQuoteRspField["spdOrderID"] = "long"
DFITCForQuoteRspField["commTime"] = "string"
structDict['DFITCForQuoteRspField'] = DFITCForQuoteRspField
#询价回报
DFITCForQuoteRtnField = {}
DFITCForQuoteRtnField["spdOrderID"] = "long"
DFITCForQuoteRtnField["sessionID"] = "long"
DFITCForQuoteRtnField["instrumentID"] = "string"
DFITCForQuoteRtnField["exchangeID"] = "string"
DFITCForQuoteRtnField["accountID"] = "string"
DFITCForQuoteRtnField["orderStatus"] = "short"
structDict['DFITCForQuoteRtnField'] = DFITCForQuoteRtnField
#做市商报价委托查询
DFITCQuoteOrderField = {}
DFITCQuoteOrderField["lRequestID"] = "long"
DFITCQuoteOrderField["exchangeID"] = "string"
DFITCQuoteOrderField["accountID"] = "string"
DFITCQuoteOrderField["instrumentID"] = "string"
DFITCQuoteOrderField["localOrderID"] = "long"
DFITCQuoteOrderField["spdOrderID"] = "long"
DFITCQuoteOrderField["orderStatus"] = "short"
structDict['DFITCQuoteOrderField'] = DFITCQuoteOrderField
#做市商报价查询响应
DFITCQuoteOrderRtnField = {}
DFITCQuoteOrderRtnField["lRequestID"] = "long"
DFITCQuoteOrderRtnField["spdOrderID"] = "long"
DFITCQuoteOrderRtnField["orderStatus"] = "short"
DFITCQuoteOrderRtnField["instrumentID"] = "string"
DFITCQuoteOrderRtnField["margin"] = "float"
DFITCQuoteOrderRtnField["fee"] = "float"
DFITCQuoteOrderRtnField["localOrderID"] = "long"
DFITCQuoteOrderRtnField["accountID"] = "string"
DFITCQuoteOrderRtnField["commTime"] = "string"
DFITCQuoteOrderRtnField["submitTime"] = "string"
DFITCQuoteOrderRtnField["exchangeID"] = "string"
DFITCQuoteOrderRtnField["bOrderAmount"] = "long"
DFITCQuoteOrderRtnField["bMatchedAmount"] = "long"
DFITCQuoteOrderRtnField["bCancelAmount"] = "long"
DFITCQuoteOrderRtnField["bInsertPrice"] = "float"
DFITCQuoteOrderRtnField["bMatchedPrice"] = "float"
DFITCQuoteOrderRtnField["bOpenCloseType"] = "int"
DFITCQuoteOrderRtnField["sOrderAmount"] = "long"
DFITCQuoteOrderRtnField["sMatchedAmount"] = "long"
DFITCQuoteOrderRtnField["sCancelAmount"] = "long"
DFITCQuoteOrderRtnField["sInsertPrice"] = "float"
DFITCQuoteOrderRtnField["sMatchedPrice"] = "float"
DFITCQuoteOrderRtnField["sOpenCloseType"] = "int"
DFITCQuoteOrderRtnField["operStation"] = "string"
DFITCQuoteOrderRtnField["sessionID"] = "long"
DFITCQuoteOrderRtnField["quoteID"] = "string"
DFITCQuoteOrderRtnField["customCategory"] = "string"
structDict['DFITCQuoteOrderRtnField'] = DFITCQuoteOrderRtnField
#查询转账银行
DFITCQryTransferBankField = {}
DFITCQryTransferBankField["accountID"] = "string"
DFITCQryTransferBankField["bankID"] = "string"
DFITCQryTransferBankField["lRequestID"] = "long"
structDict['DFITCQryTransferBankField'] = DFITCQryTransferBankField
#转帐银行响应
DFITCTransferBankRspField = {}
DFITCTransferBankRspField["accountID"] = "string"
DFITCTransferBankRspField["bankID"] = "string"
DFITCTransferBankRspField["bankAccount"] = "string"
DFITCTransferBankRspField["currency"] = "string"
DFITCTransferBankRspField["registDate"] = "string"
DFITCTransferBankRspField["lRequestID"] = "long"
structDict['DFITCTransferBankRspField'] = DFITCTransferBankRspField
#查询转账流水
DFITCQryTransferSerialField = {}
DFITCQryTransferSerialField["accountID"] = "string"
DFITCQryTransferSerialField["bankID"] = "string"
DFITCQryTransferSerialField["bankAccount"] = "string"
DFITCQryTransferSerialField["lRequestID"] = "long"
structDict['DFITCQryTransferSerialField'] = DFITCQryTransferSerialField
#转账流水响应
DFITCTransferSerialRspField = {}
DFITCTransferSerialRspField["accountID"] = "string"
DFITCTransferSerialRspField["bankID"] = "string"
DFITCTransferSerialRspField["bankAccount"] = "string"
DFITCTransferSerialRspField["currency"] = "string"
DFITCTransferSerialRspField["applyNum"] = "int"
DFITCTransferSerialRspField["type"] = "int"
DFITCTransferSerialRspField["tradeAmount"] = "float"
DFITCTransferSerialRspField["curFutAccountFund"] = "float"
DFITCTransferSerialRspField["bankSerialNum"] = "int"
DFITCTransferSerialRspField["reqTransferTime"] = "string"
DFITCTransferSerialRspField["dealTransferTime"] = "string"
DFITCTransferSerialRspField["procResult"] = "int"
DFITCTransferSerialRspField["lRequestID"] = "long"
structDict['DFITCTransferSerialRspField'] = DFITCTransferSerialRspField
#资金转账请求信息
DFITCReqTransferField = {}
DFITCReqTransferField["bankID"] = "string"
DFITCReqTransferField["bankAccount"] = "string"
DFITCReqTransferField["bankPassword"] = "string"
DFITCReqTransferField["accountID"] = "string"
DFITCReqTransferField["password"] = "string"
DFITCReqTransferField["currency"] = "string"
DFITCReqTransferField["tradeAmount"] = "float"
DFITCReqTransferField["lRequestID"] = "long"
structDict['DFITCReqTransferField'] = DFITCReqTransferField
#资金转账响应信息
DFITCTransferRspField = {}
DFITCTransferRspField["bankID"] = "string"
DFITCTransferRspField["bankAccount"] = "string"
DFITCTransferRspField["accountID"] = "string"
DFITCTransferRspField["tradeAmount"] = "float"
DFITCTransferRspField["applyNumber"] = "int"
DFITCTransferRspField["lRequestID"] = "long"
structDict['DFITCTransferRspField'] = DFITCTransferRspField
#资金转账通知信息
DFITCTransferRtnField = {}
DFITCTransferRtnField["accountID"] = "string"
DFITCTransferRtnField["bankID"] = "string"
DFITCTransferRtnField["bankAccount"] = "string"
DFITCTransferRtnField["type"] = "int"
DFITCTransferRtnField["tradeAmount"] = "float"
DFITCTransferRtnField["bankSerialNum"] = "int"
DFITCTransferRtnField["applyNumber"] = "int"
DFITCTransferRtnField["sessionID"] = "long"
structDict['DFITCTransferRtnField'] = DFITCTransferRtnField
#银行或主席发起出金冲正通知
DFITCRepealRtnField = {}
DFITCRepealRtnField["accountID"] = "string"
DFITCRepealRtnField["bankID"] = "string"
DFITCRepealRtnField["bankAccount"] = "string"
DFITCRepealRtnField["type"] = "int"
DFITCRepealRtnField["tradeAmount"] = "float"
DFITCRepealRtnField["bankSerialNum"] = "int"
DFITCRepealRtnField["repealSerial"] = "int"
structDict['DFITCRepealRtnField'] = DFITCRepealRtnField
#交易状态查询请求
DFITCQryExchangeStatusField = {}
DFITCQryExchangeStatusField["lRequestID"] = "long"
DFITCQryExchangeStatusField["exchangeID"] = "string"
structDict['DFITCQryExchangeStatusField'] = DFITCQryExchangeStatusField
#交易所状态查询响应
DFITCExchangeStatusRspField = {}
DFITCExchangeStatusRspField["lRequestID"] = "long"
DFITCExchangeStatusRspField["exchangeStatus"] = "int"
DFITCExchangeStatusRspField["exchangeID"] = "string"
structDict['DFITCExchangeStatusRspField'] = DFITCExchangeStatusRspField
#交易所状态通知
DFITCExchangeStatusRtnField = {}
DFITCExchangeStatusRtnField["exchangeID"] = "string"
DFITCExchangeStatusRtnField["instrumentID"] = "string"
DFITCExchangeStatusRtnField["exchangeStatus"] = "int"
structDict['DFITCExchangeStatusRtnField'] = DFITCExchangeStatusRtnField
#行情查询请求
DFITCQryDepthMarketDataField = {}
DFITCQryDepthMarketDataField["lRequestID"] = "long"
DFITCQryDepthMarketDataField["instrumentID"] = "string"
DFITCQryDepthMarketDataField["exchangeID"] = "string"
structDict['DFITCQryDepthMarketDataField'] = DFITCQryDepthMarketDataField
#查询询价请求
DFITCQryForQuoteField = {}
DFITCQryForQuoteField["lRequestID"] = "long"
DFITCQryForQuoteField["accountID"] = "string"
DFITCQryForQuoteField["instrumentID"] = "string"
DFITCQryForQuoteField["exchangeID"] = "string"
structDict['DFITCQryForQuoteField'] = DFITCQryForQuoteField
#查询询价响应
DFITCQryForQuoteRtnField = {}
DFITCQryForQuoteRtnField["lRequestID"] = "long"
DFITCQryForQuoteRtnField["accountID"] = "string"
DFITCQryForQuoteRtnField["spdOrderID"] = "long"
DFITCQryForQuoteRtnField["instrumentID"] = "string"
DFITCQryForQuoteRtnField["exchangeID"] = "string"
DFITCQryForQuoteRtnField["SuspendTime"] = "string"
DFITCQryForQuoteRtnField["orderStatus"] = "short"
structDict['DFITCQryForQuoteRtnField'] = DFITCQryForQuoteRtnField
| 36.593826 | 81 | 0.796723 |
4d4c6f7de86c9bf7f971e735202d70651ad55252 | 2,034 | py | Python | pastepwn/analyzers/tests/adobekeyanalyzer_test.py | DaRuudii/pastepwn | f21e82f703c6c5e9bd8b3dbc5d75d4d90583fa63 | [
"MIT"
] | null | null | null | pastepwn/analyzers/tests/adobekeyanalyzer_test.py | DaRuudii/pastepwn | f21e82f703c6c5e9bd8b3dbc5d75d4d90583fa63 | [
"MIT"
] | null | null | null | pastepwn/analyzers/tests/adobekeyanalyzer_test.py | DaRuudii/pastepwn | f21e82f703c6c5e9bd8b3dbc5d75d4d90583fa63 | [
"MIT"
] | 1 | 2019-10-09T13:09:17.000Z | 2019-10-09T13:09:17.000Z | # -*- coding: utf-8 -*-
import unittest
from unittest import mock
from pastepwn.analyzers.adobekeyanalyzer import AdobeKeyAnalyzer
class TestAdobeKeyAnalyzer(unittest.TestCase):
def setUp(self):
self.analyzer = AdobeKeyAnalyzer(None)
self.paste = mock.Mock()
def test_match_positive(self):
"""Test if positives are recognized"""
# adobe key dump
self.paste.body = "1118-1993-2045-6322-6067-9110"
self.assertTrue(self.analyzer.match(self.paste))
# adobe key dump
self.paste.body = "1118-1551-7298-8490-8910-4435"
self.assertTrue(self.analyzer.match(self.paste))
# adobe key dump
self.paste.body = "1118-1088-9818-3636-2479-0297"
self.assertTrue(self.analyzer.match(self.paste))
# adobe key dump
self.paste.body = "1118-1194-1581-4556-8113-6593"
self.assertTrue(self.analyzer.match(self.paste))
# part of a sentence
self.paste.body = "Hey, I have your key right here: 1118-1470-8779-0264-4009-3244!"
self.assertTrue(self.analyzer.match(self.paste))
# Newline seperated microsoft key
self.paste.body = "1118-1993-2046-6322-6067-9110\n1118-1470-8779-0264-4009-3244"
self.assertTrue(self.analyzer.match(self.paste))
def test_match_negative(self):
"""Test if negatives are not recognized"""
self.paste.body = ""
self.assertFalse(self.analyzer.match(self.paste))
self.paste.body = None
self.assertFalse(self.analyzer.match(self.paste))
# Invalid length
self.paste.body = "1118-1470-8779-0264-4009-32445"
self.assertFalse(self.analyzer.match(self.paste))
# Invalid length
self.paste.body = "1118-1470-8779-0264-4009-324"
self.assertFalse(self.analyzer.match(self.paste))
# Invalid Characters
self.paste.body = "1118-1194-1581-4556-8113-659A"
self.assertFalse(self.analyzer.match(self.paste))
if __name__ == '__main__':
unittest.main() | 33.344262 | 91 | 0.658309 |
0734ef44fc85fef593531157b7a608cfbf9ee937 | 4,092 | py | Python | setup.py | vidartf/ipydatawidgets | da6db7a9f9ace74c87583e633b22c3634526520e | [
"BSD-3-Clause"
] | 29 | 2017-08-15T11:56:06.000Z | 2022-03-18T19:24:47.000Z | setup.py | vidartf/ipydatawidgets | da6db7a9f9ace74c87583e633b22c3634526520e | [
"BSD-3-Clause"
] | 21 | 2017-08-15T13:00:07.000Z | 2021-12-27T21:38:01.000Z | setup.py | vidartf/ipydatawidgets | da6db7a9f9ace74c87583e633b22c3634526520e | [
"BSD-3-Clause"
] | 8 | 2017-09-18T08:14:22.000Z | 2021-08-05T20:21:14.000Z | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
# the name of the project
name = 'ipydatawidgets'
#-----------------------------------------------------------------------------
# Minimal Python version sanity check
#-----------------------------------------------------------------------------
import sys
v = sys.version_info
if v[:2] < (2, 7) or (v[0] >= 3 and v[:2] < (3, 3)):
# Note: 3.3 is untested, but we'll still allow it
error = "ERROR: %s requires Python version 2.7 or 3.3 or above." % name
print(error, file=sys.stderr)
sys.exit(1)
#-----------------------------------------------------------------------------
# get on with it
#-----------------------------------------------------------------------------
import io
import os
from glob import glob
from setuptools import setup, find_packages
from jupyter_packaging import (create_cmdclass, install_npm, ensure_targets,
combine_commands)
pjoin = os.path.join
here = os.path.abspath(os.path.dirname(__file__))
# Representative files that should exist after a successful build
jstargets = [
os.path.join(here, name, 'nbextension', 'static', 'extension.js'),
os.path.join(here, name, 'nbextension', 'static', 'index.js'),
os.path.join(here, 'packages', 'jlabextension', 'build', 'index.js'),
]
version_ns = {}
with io.open(pjoin(here, name, '_version.py'), encoding="utf8") as f:
exec(f.read(), {}, version_ns)
cmdclass = create_cmdclass(
'js',
data_files_spec=[
('share/jupyter/nbextensions/jupyter-datawidgets',
name + '/nbextension/static',
'*.js'),
('share/jupyter/nbextensions/jupyter-datawidgets',
name + '/nbextension/static',
'*.js.map'),
('share/jupyter/lab/extensions',
'packages/jlabextension/dist',
'jupyterlab-datawidgets-*.tgz'),
('share/jupyter/labextensions/jupyterlab-datawidgets',
'packages/jlabextension/dist/jupyterlab-datawidgets',
'**/*.*'),
('etc/jupyter/nbconfig',
'jupyter-config',
'**/*.json'),
],)
cmdclass['js'] = combine_commands(
install_npm(here),
ensure_targets(jstargets),
)
setup_args = dict(
name = name,
description = "A set of widgets to help facilitate reuse of large datasets across widgets",
version = version_ns['__version__'],
scripts = glob(pjoin('scripts', '*')),
cmdclass = cmdclass,
packages = find_packages(here),
include_package_data = True,
author = 'Jupyter Development Team',
author_email = 'jupyter@googlegroups.com',
url = 'https://github.com/vidartf/ipydatawidgets',
license = 'BSD',
platforms = "Linux, Mac OS X, Windows",
keywords = ['Jupyter', 'Widgets', 'IPython'],
classifiers = [
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Framework :: Jupyter',
],
)
setuptools_args = {}
install_requires = setuptools_args['install_requires'] = [
'ipywidgets>=7.0.0',
'numpy',
'six',
'traittypes>=0.2.0',
]
extras_require = setuptools_args['extras_require'] = {
'test': [
'pytest>=4',
'pytest-cov',
'nbval>=0.9.2',
],
'docs': [
'sphinx',
'recommonmark',
'sphinx_rtd_theme'
],
}
if 'setuptools' in sys.modules:
setup_args.update(setuptools_args)
setup_args.pop('scripts', None)
setup_args.update(setuptools_args)
if __name__ == '__main__':
setup(**setup_args)
| 29.652174 | 99 | 0.56696 |
288654c964997f557d1e537e6ac02c0bb3bc46a7 | 755 | py | Python | tpdatasrc/tpgamefiles/scr/Spell391 - Remove Curse.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | 69 | 2015-05-05T14:09:25.000Z | 2022-02-15T06:13:04.000Z | tpdatasrc/tpgamefiles/scr/Spell391 - Remove Curse.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | 457 | 2015-05-01T22:07:45.000Z | 2022-03-31T02:19:10.000Z | tpdatasrc/tpgamefiles/scr/Spell391 - Remove Curse.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | 25 | 2016-02-04T21:19:53.000Z | 2021-11-15T23:14:51.000Z | from toee import *
def OnBeginSpellCast( spell ):
print "Remove Curse OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
game.particles( "sp-abjuration-conjure", spell.caster )
def OnSpellEffect ( spell ):
print "Remove Curse OnSpellEffect"
spell.duration = 0
target = spell.target_list[0]
game.particles( 'sp-Remove Curse', target.obj )
target.partsys_id = game.particles( 'sp-Remove Curse', target.obj )
target.obj.condition_add_with_args( 'sp-Remove Curse', spell.id, spell.duration, 0 )
spell.spell_end(spell.id, 1)
def OnBeginRound( spell ):
print "Remove Curse OnBeginRound"
def OnEndSpellCast( spell ):
print "Remove Curse OnEndSpellCast" | 27.962963 | 85 | 0.749669 |
90f6bfaef038e84b133cc8c61e3ec0e3bcfa43c2 | 17,628 | py | Python | webStorm-APICloud/python_tools/Lib/ntpath.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | webStorm-APICloud/python_tools/Lib/ntpath.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | webStorm-APICloud/python_tools/Lib/ntpath.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | # Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting "\\" as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
b_wins = 0 # set to 1 iff b makes path irrelevant
if path == "":
b_wins = 1
elif isabs(b):
# This probably wipes out path so far. However, it's more
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.")
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verifed in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '%': # variable or '%'
if path[index + 1:index + 2] == '%':
res = res + c
index = index + 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index('%')
except ValueError:
res = res + '%' + path
index = pathlen - 1
else:
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '%' + var + '%'
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '$' + var
if c != '':
index = index - 1
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path[:1] == "\\":
prefix = prefix + "\\"
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + "\\"
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append('.')
return prefix + "\\".join(comps)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| 35.39759 | 81 | 0.537157 |
b899d52606c290a2a8d0b48a7c2519873deba04f | 10,722 | py | Python | main_pretrain.py | jkhu29/SoCo | 1cef465ce5bdc975a72d3d869147ebeb6031781d | [
"MIT"
] | null | null | null | main_pretrain.py | jkhu29/SoCo | 1cef465ce5bdc975a72d3d869147ebeb6031781d | [
"MIT"
] | null | null | null | main_pretrain.py | jkhu29/SoCo | 1cef465ce5bdc975a72d3d869147ebeb6031781d | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# SoCo
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Yue Gao
# --------------------------------------------------------
import json
import math
import os
import time
from shutil import copyfile
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.backends import cudnn
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
from contrast import models, resnet
from contrast.data import get_loader
from contrast.lars import LARS, add_weight_decay
from contrast.logger import setup_logger
from contrast.lr_scheduler import get_scheduler
from contrast.option import parse_option
from contrast.util import AverageMeter
from converter_detectron2.convert_detectron2_C4 import convert_detectron2_C4
from converter_detectron2.convert_detectron2_Head import convert_detectron2_Head
from converter_mmdetection.convert_mmdetection_Head import convert_mmdetection_Head
try:
from apex import amp # type: ignore
except ImportError:
amp = None
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
import random
import numpy as np
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def build_model(args):
encoder = resnet.__dict__[args.arch]
model = models.__dict__[args.model](encoder, args).cuda()
if args.optimizer == 'sgd':
optimizer = torch.optim.SGD(
model.parameters(),
lr=args.batch_size * dist.get_world_size() / 256 * args.base_learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
elif args.optimizer == 'lars':
params = add_weight_decay(model, args.weight_decay)
optimizer = torch.optim.SGD(
params,
# lr=args.batch_size * dist.get_world_size() / 256 * args.base_learning_rate,
lr=args.base_learning_rate,
momentum=args.momentum
)
optimizer = LARS(optimizer)
else:
raise NotImplementedError
if args.amp_opt_level != "O0":
model, optimizer = amp.initialize(model, optimizer, opt_level=args.amp_opt_level)
# model = DistributedDataParallel(model, device_ids=[args.local_rank], broadcast_buffers=False, find_unused_parameters=True)
return model, optimizer
def load_pretrained(model, pretrained_model):
ckpt = torch.load(pretrained_model, map_location='cpu')
state_dict = ckpt['model']
model_dict = model.state_dict()
model_dict.update(state_dict)
model.load_state_dict(model_dict)
logger.info(f"==> loaded checkpoint '{pretrained_model}' (epoch {ckpt['epoch']})")
def load_checkpoint(args, model, optimizer, scheduler, sampler=None):
logger.info(f"=> loading checkpoint '{args.resume}'")
checkpoint = torch.load(args.resume, map_location='cpu')
args.start_epoch = checkpoint['epoch'] + 1
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
if args.amp_opt_level != "O0" and checkpoint['opt'].amp_opt_level != "O0":
amp.load_state_dict(checkpoint['amp'])
if args.use_sliding_window_sampler:
sampler.load_state_dict(checkpoint['sampler'])
logger.info(f"=> loaded successfully '{args.resume}' (epoch {checkpoint['epoch']})")
del checkpoint
torch.cuda.empty_cache()
def save_checkpoint(args, epoch, model, optimizer, scheduler, sampler=None):
logger.info('==> Saving...')
state = {
'opt': args,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'epoch': epoch,
}
if args.amp_opt_level != "O0":
state['amp'] = amp.state_dict()
if args.use_sliding_window_sampler:
state['sampler'] = sampler.state_dict()
file_name = os.path.join(args.output_dir, f'ckpt_epoch_{epoch}.pth')
torch.save(state, file_name)
copyfile(file_name, os.path.join(args.output_dir, 'current.pth'))
def convert_checkpoint(args):
file_name = os.path.join(args.output_dir, 'current.pth')
output_file_name_C4 = os.path.join(args.output_dir, 'current_detectron2_C4.pkl')
output_file_name_Head = os.path.join(args.output_dir, 'current_detectron2_Head.pkl')
output_file_name_mmdet_Head = os.path.join(args.output_dir, 'current_mmdetection_Head.pth')
convert_detectron2_C4(file_name, output_file_name_C4)
convert_detectron2_Head(file_name, output_file_name_Head, start=2, num_outs=4)
convert_mmdetection_Head(file_name, output_file_name_mmdet_Head)
def main(args):
train_prefix = 'train2017' if args.dataset == 'COCO' else 'train'
train_loader = get_loader(args.aug, args, prefix=train_prefix, return_coord=True)
args.num_instances = len(train_loader.dataset)
logger.info(f"length of training dataset: {args.num_instances}")
model, optimizer = build_model(args)
if dist.get_rank() == 0:
print(model)
scheduler = get_scheduler(optimizer, len(train_loader), args)
# optionally resume from a checkpoint
if args.pretrained_model:
assert os.path.isfile(args.pretrained_model)
load_pretrained(model, args.pretrained_model)
if args.auto_resume:
resume_file = os.path.join(args.output_dir, "current.pth")
if os.path.exists(resume_file):
logger.info(f'auto resume from {resume_file}')
args.resume = resume_file
else:
logger.info(f'no checkpoint found in {args.output_dir}, ignoring auto resume')
if args.resume:
assert os.path.isfile(args.resume)
load_checkpoint(args, model, optimizer, scheduler, sampler=train_loader.sampler)
# tensorboard
if dist.get_rank() == 0:
summary_writer = SummaryWriter(log_dir=args.output_dir)
else:
summary_writer = None
if args.use_sliding_window_sampler:
args.epochs = math.ceil(args.epochs * len(train_loader.dataset) / args.window_size)
for epoch in range(args.start_epoch, args.epochs + 1):
if isinstance(train_loader.sampler, DistributedSampler):
train_loader.sampler.set_epoch(epoch)
train(epoch, train_loader, model, optimizer, scheduler, args, summary_writer)
if dist.get_rank() == 0 and (epoch % args.save_freq == 0 or epoch == args.epochs):
save_checkpoint(args, epoch, model, optimizer, scheduler, sampler=train_loader.sampler)
if dist.get_rank() == 0 and epoch == args.epochs:
convert_checkpoint(args)
def train(epoch, train_loader, model, optimizer, scheduler, args, summary_writer):
"""
one epoch training
"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
end = time.time()
for idx, data in enumerate(train_loader):
# NOTE: what is data? see ./test.py
data = [item.cuda(non_blocking=True) for item in data]
data_time.update(time.time() - end)
"""
for SoCo_C4:
inputs:
- img1, img2
- bbox1, bbox2
- correspondence
for SoCo_FPN:
inputs:
-
"""
if args.model in ['SoCo_C4']:
loss = model(data[0], data[1], data[2], data[3], data[4])
elif args.model in ['SoCo_FPN',]:
loss = model(data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8])
elif args.model in ['SoCo_FPN_Star']:
loss = model(data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], data[10], data[11], data[12])
else:
logit, label = model(data[0], data[1])
loss = F.cross_entropy(logit, label)
# backward
optimizer.zero_grad()
if args.amp_opt_level != "O0":
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
scheduler.step()
# update meters and print info
loss_meter.update(loss.item(), data[0].size(0))
batch_time.update(time.time() - end)
end = time.time()
train_len = len(train_loader)
if args.use_sliding_window_sampler:
train_len = int(args.window_size / args.batch_size / dist.get_world_size())
if idx % args.print_freq == 0:
lr = optimizer.param_groups[0]['lr']
logger.info(
f'Train: [{epoch}/{args.epochs}][{idx}/{train_len}] '
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f}) '
f'Data Time {data_time.val:.3f} ({data_time.avg:.3f}) '
f'lr {lr:.3f} '
f'loss {loss_meter.val:.3f} ({loss_meter.avg:.3f})')
# tensorboard logger
if summary_writer is not None:
step = (epoch - 1) * len(train_loader) + idx
summary_writer.add_scalar('lr', lr, step)
summary_writer.add_scalar('loss', loss_meter.val, step)
if __name__ == '__main__':
opt = parse_option(stage='pre_train')
if opt.amp_opt_level != "O0":
assert amp is not None, "amp not installed!"
torch.cuda.set_device(opt.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
cudnn.benchmark = True
# setup logger
os.makedirs(opt.output_dir, exist_ok=True)
logger = setup_logger(output=opt.output_dir, distributed_rank=dist.get_rank(), name="SoCo")
if dist.get_rank() == 0:
path = os.path.join(opt.output_dir, "config.json")
with open(path, 'w') as f:
json.dump(vars(opt), f, indent=2)
logger.info("Full config saved to {}".format(path))
# print args
logger.info(
"\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(opt)).items()))
)
if opt.debug:
logger.info('enable debug mode, set seed to 0')
set_random_seed(0)
main(opt)
| 35.979866 | 144 | 0.649879 |
601cb4c5fafc34037963802c974d4e75b39002ec | 117 | py | Python | learn-to-code-with-python/04-Variables/multiple-variable-assignments.py | MaciejZurek/python_practicing | 0a426f2aed151573e1f8678e0239ff596d92bbde | [
"MIT"
] | null | null | null | learn-to-code-with-python/04-Variables/multiple-variable-assignments.py | MaciejZurek/python_practicing | 0a426f2aed151573e1f8678e0239ff596d92bbde | [
"MIT"
] | null | null | null | learn-to-code-with-python/04-Variables/multiple-variable-assignments.py | MaciejZurek/python_practicing | 0a426f2aed151573e1f8678e0239ff596d92bbde | [
"MIT"
] | null | null | null | a = 5
b = 5
a = b = 5
b = 1
print(b)
print(a)
a = 5
b = 10
a, b = 5, 10
print(a,b)
a, b, c = 5, 10, 11
print(c) | 6.5 | 19 | 0.444444 |
9cd890100bbcb49a43f1569312ab1ec089afd1f5 | 13,599 | py | Python | discretezoo/discretezoo_main.py | googleinterns/adversarial-0th-order-optimization | aacdc8c88cc11f57456b989da0832f2e0ad89178 | [
"Apache-2.0"
] | null | null | null | discretezoo/discretezoo_main.py | googleinterns/adversarial-0th-order-optimization | aacdc8c88cc11f57456b989da0832f2e0ad89178 | [
"Apache-2.0"
] | null | null | null | discretezoo/discretezoo_main.py | googleinterns/adversarial-0th-order-optimization | aacdc8c88cc11f57456b989da0832f2e0ad89178 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
import datetime
import itertools
import os
from absl import app
from absl import flags
from absl import logging
import more_itertools
from nltk.tokenize import treebank
import tensorflow as tf
import tensorflow_datasets as tfds
import tqdm
from discretezoo import attack_setup
from discretezoo import metrics
from discretezoo.attack import importance
from discretezoo.attack import attack_loop
from discretezoo.attack import estimation
from discretezoo.attack import sampling
from discretezoo.loss import semantic_similarity
FLAGS = flags.FLAGS
# Target model settings.
flags.DEFINE_string('model', None, 'The directory of the model to attack.')
flags.DEFINE_integer('padding_index',
0,
'Which index to use as the padding.',
lower_bound=0)
flags.DEFINE_integer('oov_index',
0,
'Which index to use for unknown tokens.',
lower_bound=0)
flags.DEFINE_boolean(
'include_tokenizer', True,
'Include the pretrained model\'s tokenizer in the call function.')
# Optimizer settings.
flags.DEFINE_integer(
'token_changes_per_sentence',
0, 'This controls how many tokens per sentence can be changed.\n'
'If this is set to 0, all tokens in a sentence may be changed.',
lower_bound=0)
flags.DEFINE_integer(
'changes_per_token',
3,
'This controls how many times a token can be changed by the optimizer.',
lower_bound=1)
flags.DEFINE_string(
'embeddings_file', None, 'The path to a tsv file containing embeddings.\n'
'Vectors have a corresponding token in vocab_file on the same line number.')
flags.DEFINE_string(
'vocab_file', None,
'The path to a text file containing an individual vocab item on each line.')
flags.DEFINE_enum(
'sampling_strategy', 'uniform', ['uniform', 'knn_euclidean', 'knn_cosine'],
'Which sampling method to use to replace tokens in sentences.')
flags.DEFINE_integer('num_to_sample',
1,
'How many tokens to sample while estimating the gradient.',
lower_bound=0)
flags.DEFINE_bool('normalize_embeddings', False,
'Normalize embeddings used by the optimizer.')
flags.DEFINE_bool(
'reduce_mean', True,
'Controls whether sentences and gradients are reduced using mean or sum.')
flags.DEFINE_list(
'special_tokens', [],
'The index of vocabulary items that should not be generated. '
'Must be integers.')
flags.DEFINE_bool(
'discretize_by_cosine', False,
'This controls whether the optimizer uses the maximum inner product or '
'maximum cosine similarity for discretization.')
flags.DEFINE_bool(
'add_displacement_to_embedding', False,
'This controls if we directly multiply the displacement with the '
'embeddings or if we add it to the original embedding first.')
# Attack settings.
flags.DEFINE_string(
'dataset', None,
'The name of the dataset you would like to make adversarial.\n'
'It must be the name of a valid dataset in tensorflow_datasets.')
flags.DEFINE_string('split', 'test', 'Which split of the dataset to use.')
flags.DEFINE_integer(
'num_examples',
0, 'The number of sentences in the dataset to make adversarial. \n'
'0 means all sentences. The attack will start with the first sentence in '
'the dataset and attack this many sentences.',
lower_bound=0)
flags.DEFINE_integer('batch_size',
8,
'How many sentences to attack simultaneously.',
lower_bound=1)
flags.DEFINE_enum(
'semantic_similarity', 'cosine', ['euclidean', 'cosine', 'use'],
'This controls how similarity between two sentences is computed. '
'"use" stands for Universal Sentence Encoder, a sentence embedding method '
'and the resulting embeddings will be compared with cosine distance.')
flags.DEFINE_float(
'interpolation',
1.0,
'Interpolation factor between adversarial loss and semantic similarity.',
lower_bound=0.0)
flags.DEFINE_float(
'kappa',
0.0,
'Controls how confident the model should be about the adversarial label.',
lower_bound=0.0)
# Logging
flags.DEFINE_string('output_file', None,
'The output file to write adversarial examples to.')
flags.DEFINE_string('tensorboard_logdir', None,
'The output directory to write tensorboard logs to.')
flags.DEFINE_string('tensorboard_profiling_dir', None,
'The directory to write profiling data to.')
flags.mark_flags_as_required(
['model', 'embeddings_file', 'vocab_file', 'dataset', 'output_file'])
TSV_HEADER = [
'true_label', 'predicted_label', 'label_flipped', 'query_count',
'changed_token_count', 'bleu_score', 'semantic_similarity',
'original_sentence', 'adversarial_sentence'
]
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
logging.get_absl_handler().use_absl_log_file()
if FLAGS.tensorboard_profiling_dir is not None:
tf.profiler.experimental.start(FLAGS.tensorboard_profiling_dir)
logging.info('Writing output to: %s', FLAGS.output_file)
detokenizer = treebank.TreebankWordDetokenizer()
if FLAGS.tensorboard_logdir:
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_path = os.path.join(FLAGS.tensorboard_logdir, current_time)
summary_writer = tf.summary.create_file_writer(tensorboard_path)
logging.info('Writing tensorboard logs to %s', tensorboard_path)
else:
summary_writer = tf.summary.create_noop_writer()
embeddings, token_to_id, vocab = attack_setup.load_embeddings(
FLAGS.embeddings_file, FLAGS.vocab_file)
tensorboard_logging = FLAGS.tensorboard_logdir is not None
model_fun = attack_setup.ModelCallable(
FLAGS.model,
vocab,
detokenizer.detokenize,
include_tokenizer=FLAGS.include_tokenizer,
padding_index=FLAGS.padding_index)
if FLAGS.semantic_similarity == 'cosine':
distance_fun = semantic_similarity.EmbeddedCosineDistance(embeddings)
elif FLAGS.semantic_similarity == 'euclidean':
distance_fun = semantic_similarity.EmbeddedEuclideanDistance(embeddings)
else:
distance_fun = semantic_similarity.UniversalSentenceEncoderDistance(
detokenizer.detokenize, vocab, padding_index=FLAGS.padding_index)
adversarial_loss = attack_setup.AdversarialLoss(
model_fun=model_fun,
distance_fun=distance_fun,
embeddings=embeddings,
interpolation=FLAGS.interpolation,
kappa=FLAGS.kappa,
tensorboard_logging=tensorboard_logging)
output_difference = attack_setup.OutputDifference(model_fun)
early_stopping_criterion = attack_setup.EarlyStopping(model_fun)
dataset = tfds.load(FLAGS.dataset, split=FLAGS.split)
sorted_dataset = attack_setup.sort_dataset(dataset)
batched_dataset = sorted_dataset.batch(FLAGS.batch_size)
if FLAGS.sampling_strategy == 'uniform':
sampling_strategy = sampling.uniform_sampling
elif FLAGS.sampling_strategy == 'knn_euclidean':
sampling_strategy = sampling.knn_sampling_euclidean
else:
sampling_strategy = sampling.knn_sampling_cosine
command_line_special_tokens = [int(index) for index in FLAGS.special_tokens]
# This is to de-deduplicate any possible copies.
special_tokens = ({FLAGS.padding_index,
FLAGS.oov_index}.union(command_line_special_tokens))
special_tokens = list(special_tokens)
optimizer = estimation.DiscreteZOO(
sampling_strategy=sampling_strategy,
embeddings=embeddings,
adversarial_loss=adversarial_loss,
num_to_sample=FLAGS.num_to_sample,
reduce_mean=FLAGS.reduce_mean,
descent=True,
norm_embeddings=FLAGS.normalize_embeddings,
vocab=vocab,
padding_index=FLAGS.padding_index,
special_tokens=special_tokens,
discretize_by_cosine=FLAGS.discretize_by_cosine,
add_displacement_to_embedding=FLAGS.add_displacement_to_embedding)
with tf.io.gfile.GFile(FLAGS.output_file,
'w') as output_file, summary_writer.as_default():
tsv_output = csv.writer(output_file,
delimiter='\t',
quoting=csv.QUOTE_NONE,
escapechar='\\')
tsv_output.writerow(TSV_HEADER)
examples_attacked = 0
total_successes = 0
for step, batch in enumerate(
tqdm.tqdm(batched_dataset, desc='Attack Progress')):
tf.summary.experimental.set_step(step)
if examples_attacked >= FLAGS.num_examples and FLAGS.num_examples != 0:
break
text_batch = batch['sentence'].numpy().tolist()
original_labels = batch['label']
# Tensorflow saves text as bytes.
decoded_text_batch = []
for text in text_batch:
decoded_text_batch.append([token.decode('utf-8') for token in text])
# Log original tokenized texts.
logging.debug("Original sentences: \n%s", decoded_text_batch)
# Pre-process the batch of sentences into a numerical tensor.
numericalized_batch = []
for tokenized_text in decoded_text_batch:
numericalized_text = [
token_to_id.get(token.lower(), FLAGS.oov_index)
for token in tokenized_text
]
numericalized_batch.append(numericalized_text)
ragged_tensor_batch = tf.ragged.constant(numericalized_batch,
dtype=tf.int32)
tensor_batch = ragged_tensor_batch.to_tensor(FLAGS.padding_index)
model_fun.reset_query_tracking(tensor_batch)
model_predicted_probabilities = model_fun(tensor_batch)
model_predicted_labels = tf.argmax(model_predicted_probabilities,
axis=-1,
output_type=tf.int32)
importance_scores = importance.scorer(tensor_batch,
model_predicted_probabilities,
output_difference,
importance.drop_position)
adversarial_sentences, is_finished_attacks = attack_loop.loop(
sentences=tensor_batch,
labels=model_predicted_labels,
optimizer=optimizer,
token_importance_scores=importance_scores,
early_stopping_criterion=early_stopping_criterion,
iterations_per_token=FLAGS.changes_per_token,
max_changes=FLAGS.token_changes_per_sentence)
# Post-process the adversarial sentences back into strings.
adversarial_sentences_list = adversarial_sentences.numpy().tolist()
adversarial_sentences_list = [
more_itertools.rstrip(tokens,
lambda token: token == FLAGS.padding_index)
for tokens in adversarial_sentences_list
]
adversarial_sentences_tokens = []
for sentence in adversarial_sentences_list:
sentence_tokens = [vocab[index] for index in sentence]
adversarial_sentences_tokens.append(sentence_tokens)
original_sentence_strings = [
' '.join(tokens) for tokens in decoded_text_batch
]
adversarial_sentence_strings = [
' '.join(tokens) for tokens in adversarial_sentences_tokens
]
changed_token_counts = metrics.changed_token_count(
decoded_text_batch, adversarial_sentences_tokens)
bleu_scores = metrics.sentence_bleu_scores(original_sentence_strings,
adversarial_sentence_strings)
semantic_differences = (tf.reshape(
distance_fun(tensor_batch, adversarial_sentences),
-1).numpy().tolist())
is_padding = adversarial_sentences == FLAGS.padding_index
padding_per_sentence = tf.reduce_sum(tf.cast(is_padding, tf.int32),
axis=-1)
# TODO: Come up with a less hack-y way to ignore queries in importance
# scoring for padding tokens.
query_count = model_fun.query_count - padding_per_sentence
query_count = query_count.numpy().tolist()
is_finished_attacks = tf.reshape(is_finished_attacks, -1).numpy().tolist()
tsv_data = zip(original_labels.numpy().tolist(),
model_predicted_labels.numpy().tolist(),
is_finished_attacks, query_count, changed_token_counts,
bleu_scores, semantic_differences,
original_sentence_strings, adversarial_sentence_strings)
tsv_output.writerows(tsv_data)
total_successes += tf.reduce_sum(tf.cast(is_finished_attacks,
tf.int32)).numpy()
examples_attacked = examples_attacked + tensor_batch.shape[0]
success_rate = total_successes / examples_attacked
logging.info('Success Rate: %f', success_rate)
if FLAGS.tensorboard_profiling_dir is not None:
tf.profiler.experimental.stop()
if __name__ == '__main__':
app.run(main)
| 41.587156 | 80 | 0.697698 |
8d74fde922023b2a83f22e8cdc5c7ce91157bd77 | 353 | py | Python | wafw00f/plugins/cachefly.py | 84KaliPleXon3/EnableSecurity-wafw00f | 7fb9d5c9ddd222377a935724c66977e2e9c97c78 | [
"BSD-3-Clause"
] | 3,069 | 2016-05-23T16:08:31.000Z | 2022-03-31T16:27:40.000Z | wafw00f/plugins/cachefly.py | marcelovff/wafw00f | e3e6147d5d0dbb89030a9902e47170f15e95dd31 | [
"BSD-3-Clause"
] | 98 | 2016-06-06T16:43:54.000Z | 2022-03-18T12:30:19.000Z | wafw00f/plugins/cachefly.py | marcelovff/wafw00f | e3e6147d5d0dbb89030a9902e47170f15e95dd31 | [
"BSD-3-Clause"
] | 734 | 2016-05-25T02:17:46.000Z | 2022-03-31T11:43:37.000Z | #!/usr/bin/env python
'''
Copyright (C) 2020, WAFW00F Developers.
See the LICENSE file for copying permission.
'''
NAME = 'CacheFly CDN (CacheFly)'
def is_waf(self):
schemes = [
self.matchHeader(('BestCDN', r'Cachefly')),
self.matchCookie(r'^cfly_req.*=')
]
if any(i for i in schemes):
return True
return False | 20.764706 | 51 | 0.626062 |
4491c1daee94d8d8c149175d69931c3055d3ca65 | 5,393 | py | Python | docs/source/conf.py | Shynixn/BlockBall | 696e51c71c4c84a1d1974fc13243d67705e6ae57 | [
"Apache-2.0"
] | 50 | 2017-01-13T17:17:46.000Z | 2022-03-08T19:26:50.000Z | docs/source/conf.py | Shynixn/BlockBall | 696e51c71c4c84a1d1974fc13243d67705e6ae57 | [
"Apache-2.0"
] | 316 | 2017-01-26T09:42:37.000Z | 2022-03-15T08:46:34.000Z | docs/source/conf.py | Shynixn/BlockBall | 696e51c71c4c84a1d1974fc13243d67705e6ae57 | [
"Apache-2.0"
] | 32 | 2017-01-14T19:57:40.000Z | 2022-03-15T13:10:44.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# BlockBall documentation build configuration file, created by
# sphinx-quickstart on Sat Nov 25 12:11:16 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'BlockBall'
copyright = '2015 - 2021, Shynixn'
author = 'Shynixn'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '6.28.0'
# The full version, including alpha/beta/rc tags.
release = '6.28.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_logo = "_static/images/BlockBall-small.png"
html_favicon = "_static/images/favicon.png"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {"relbarbgcolor": "black", "rightsidebar": "true"}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'BlockBalldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'BlockBall.tex', 'BlockBall Documentation',
'Shynixn', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'blockball', 'BlockBall Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'BlockBall', 'BlockBall Documentation',
author, 'BlockBall', 'One line description of project.',
'Miscellaneous'),
]
html_context = {
"display_github": True,
"github_user": "Shynixn",
"github_repo": "BlockBall",
"github_version": "tree/master",
"conf_py_path": "/docs/source/",
}
| 30.128492 | 79 | 0.681624 |
83dd71f7292f62544550388116c5d2c608a0e768 | 104 | py | Python | app/main/__init__.py | sboerwinkle/SocialDeduction | fc86ffe779ae5ec3641012b664c67aba748821b0 | [
"MIT"
] | null | null | null | app/main/__init__.py | sboerwinkle/SocialDeduction | fc86ffe779ae5ec3641012b664c67aba748821b0 | [
"MIT"
] | null | null | null | app/main/__init__.py | sboerwinkle/SocialDeduction | fc86ffe779ae5ec3641012b664c67aba748821b0 | [
"MIT"
] | null | null | null | from flask import Blueprint
main = Blueprint('main', __name__)
from . import routes, events, games_db
| 17.333333 | 38 | 0.759615 |
dfbfa0bce949cf7d412fbd1ec7fa699b775ee1ed | 499 | py | Python | notebooks/solutions/load_iris.py | lampsonnguyen/ml-training-intro | e9d01104853c2b35932fbcc8f00738af2a0f2cb7 | [
"MIT"
] | 218 | 2017-05-01T15:01:28.000Z | 2021-11-19T10:25:56.000Z | notebooks/solutions/load_iris.py | skirmer/ml-training-intro | 2259a9ea4a3a7bc85ad328b3190c1676e283f5d3 | [
"MIT"
] | 3 | 2017-05-03T10:26:20.000Z | 2019-03-27T14:51:52.000Z | notebooks/solutions/load_iris.py | skirmer/ml-training-intro | 2259a9ea4a3a7bc85ad328b3190c1676e283f5d3 | [
"MIT"
] | 152 | 2017-05-02T13:50:58.000Z | 2022-03-30T09:15:36.000Z | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
iris = load_iris()
X, y = iris.data, iris.target
print("Dataset size: %d number of features: %d number of classes: %d"
% (X.shape[0], X.shape[1], len(np.unique(y))))
X_train, X_test, y_train, y_test = train_test_split(X, y)
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train)
plt.figure()
plt.scatter(X_train[:, 2], X_train[:, 3], c=y_train)
| 27.722222 | 71 | 0.709419 |
9b9fc24d341505c96b5292fac28d046b75ba321f | 6,392 | py | Python | mmdet/apis/train.py | surajkothawade/mmdetection | a148d8a9079c6d88ad6eddee929d577388c8182c | [
"Apache-2.0"
] | null | null | null | mmdet/apis/train.py | surajkothawade/mmdetection | a148d8a9079c6d88ad6eddee929d577388c8182c | [
"Apache-2.0"
] | null | null | null | mmdet/apis/train.py | surajkothawade/mmdetection | a148d8a9079c6d88ad6eddee929d577388c8182c | [
"Apache-2.0"
] | null | null | null | import random
import warnings
import numpy as np
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner,
Fp16OptimizerHook, OptimizerHook, build_optimizer,
build_runner)
from mmcv.utils import build_from_cfg
from mmdet.core import DistEvalHook, EvalHook
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.utils import get_root_logger
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def train_detector(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
if 'imgs_per_gpu' in cfg.data:
logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. '
'Please use "samples_per_gpu" instead')
if 'samples_per_gpu' in cfg.data:
logger.warning(
f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and '
f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"'
f'={cfg.data.imgs_per_gpu} is used in this experiments')
else:
logger.warning(
'Automatically set "samples_per_gpu"="imgs_per_gpu"='
f'{cfg.data.imgs_per_gpu} in this experiments')
cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# cfg.gpus will be ignored if distributed
len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed,#delta change
indices_file=cfg.indices_file) for ds in dataset
]
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
if 'runner' not in cfg:
cfg.runner = {
'type': 'EpochBasedRunner',
'max_epochs': cfg.total_epochs
}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
else:
if 'total_epochs' in cfg:
assert cfg.total_epochs == cfg.runner.max_epochs
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))
# an ugly workaround to make .log and .log.json filenames the same
runner.timestamp = timestamp
# fp16 setting
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif distributed and 'type' not in cfg.optimizer_config:
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# register hooks
runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
if distributed:
if isinstance(runner, EpochBasedRunner):
runner.register_hook(DistSamplerSeedHook())
# register eval hooks
if validate:
# Support batch_size > 1 in validation
val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1)
if val_samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.val.pipeline = replace_ImageToTensor(
cfg.data.val.pipeline)
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=val_samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
eval_hook = DistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
# user-defined hooks
if cfg.get('custom_hooks', None):
custom_hooks = cfg.custom_hooks
assert isinstance(custom_hooks, list), \
f'custom_hooks expect list type, but got {type(custom_hooks)}'
for hook_cfg in cfg.custom_hooks:
assert isinstance(hook_cfg, dict), \
'Each item in custom_hooks expects dict type, but got ' \
f'{type(hook_cfg)}'
hook_cfg = hook_cfg.copy()
priority = hook_cfg.pop('priority', 'NORMAL')
hook = build_from_cfg(hook_cfg, HOOKS)
runner.register_hook(hook, priority=priority)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow)
| 37.162791 | 79 | 0.62281 |
0860482c02bcea94e13e55009a5d14bad9540ee7 | 2,191 | py | Python | generator.py | JiroShimaya/RomajiDict | 586b6a6d6848d603a52b7fe826df2f22642e51fa | [
"MIT"
] | null | null | null | generator.py | JiroShimaya/RomajiDict | 586b6a6d6848d603a52b7fe826df2f22642e51fa | [
"MIT"
] | null | null | null | generator.py | JiroShimaya/RomajiDict | 586b6a6d6848d603a52b7fe826df2f22642e51fa | [
"MIT"
] | null | null | null | import json
def generate():
obj = {}
kana = {
"k":"カキクケコ",
"ky":["キ"+v for v in "ャ/ィ/ュ/ェ/ョ".split("/")],
"kw":["ク"+v for v in "ヮ/ィ/ゥ/ェ/ォ".split("/")],
"s":"サシスセソ",
"sy":["シ"+v for v in "ャ//ュ/ェ/ョ".split("/")],
"sh":["シ"+v for v in "ャ//ュ/ェ/ョ".split("/")],
"sw":["ス"+v for v in "ヮ/ィ/ゥ/ェ/ォ".split("/")],
"t":"タチツテト",
"ty":["チ"+v for v in "ャ/ィ/ュ/ェ/ョ".split("/")],
"th":["テ"+v for v in "ャ/ィ/ュ/ェ/ョ".split("/")],
"ts":["ツ"+v for v in "ァ/ィ//ェ/ォ".split("/")],
"c":"カシクセコ",
"ch":["チ"+v for v in "ャ//ュ/ェ/ョ".split("/")],
"cy":["チ"+v for v in "ャ/ィ/ュ/ェ/ョ".split("/")],
"q":["ク"+v for v in "ァ/ィ//ェ/ォ".split("/")],
"n":"ナニヌネノ",
"ny":["ニ"+v for v in "ャ/ィ/ュ/ェ/ョ".split("/")],
"nw":["ヌ"+v for v in "ヮ/ィ/ゥ/ェ/ォ".split("/")],
"h":"ハヒフヘホ",
"hy":["ヒ"+v for v in "ャ/ィ/ュ/ェ/ョ".split("/")],
"f":["フ"+v for v in "ァ/ィ//ェ/ォ".split("/")],
"fy":["フ"+v for v in "ャ/ィ/ュ/ェ/ョ".split("/")],
"m":"マミムメモ",
"my":["ミ"+v for v in "ャ/ィ/ュ/ェ/ョ".split("/")],
"y":"ヤ/イ/ユ/イェ/ヨ".split("/"),
"r":"ラリルレロ",
"ry":["リ"+v for v in "ャ/ィ/ュ/ェ/ョ".split("/")],
"w":"ワ/ウィ/ウ/ウェ/ウォ".split("/"), #ウォはヲでも可
"g":"ガギグゲゴ",
"gy":["ギ"+v for v in "ャ/ィ/ュ/ェ/ョ".split("/")],
"z":"ザジズゼゾ",
"zy":["ジ"+v for v in "ャ/ィ/ュ/ェ/ョ".split("/")],
"j":["ジ"+v for v in "ャ//ュ/ェ/ョ".split("/")],
"jy":["ジ"+v for v in "ャ/ィ/ュ/ェ/ョ".split("/")],
"d":"ダヂヅデド",
"dh":["デ"+v for v in "ャ/ィ/ュ/ェ/ョ".split("/")],
"dy":["ヂ"+v for v in "ャ/ィ/ュ/ェ/ョ".split("/")],
"b":"バビブベボ",
"by":["ビ"+v for v in "ャ/ィ/ュ/ェ/ョ".split("/")],
"v":"ヴァ/ヴィ/ヴ/ヴェ/ヴォ".split("/"),
"vy":["ヴ"+v for v in "ャ/ィ/ュ/ェ/ョ".split("/")],
"p":"パピプペポ",
"py":["ピ"+v for v in "ャ/ィ/ュ/ェ/ョ".split("/")],
"x":"ァィゥェォ",
"xy":"ャィュェョ",
"l":"ァィゥェォ",
"ly":"ャィュェョ",
}
obj.update({r:k for r,k in zip("aiueo","アイウエオ")})
for c, col in kana.items():
obj.update({c+v:k for v, k in zip("aiueo",col)})
obj.update({k+"tu":"ッ" for k in "lx"})
obj.update({k+"tsu":"ッ" for k in "lx"})
return obj
if __name__=="__main__":
PATH = "./ramaji_dict.json"
obj = generate()
with open(PATH,"w") as f:
json.dump(obj, f, ensure_ascii=False,indent=2) | 32.701493 | 52 | 0.429484 |
aff79e656f5a5cb2ceb716638a8793851a82553c | 3,175 | py | Python | firstu_rewards/firstu_rewards/doctype/fuel_payment/fuel_payment.py | fadilsiddique/firstu_rewards | 3df54cfc9f43ef6f4df97aa09320a096c534f446 | [
"MIT"
] | null | null | null | firstu_rewards/firstu_rewards/doctype/fuel_payment/fuel_payment.py | fadilsiddique/firstu_rewards | 3df54cfc9f43ef6f4df97aa09320a096c534f446 | [
"MIT"
] | null | null | null | firstu_rewards/firstu_rewards/doctype/fuel_payment/fuel_payment.py | fadilsiddique/firstu_rewards | 3df54cfc9f43ef6f4df97aa09320a096c534f446 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Tridz and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class FuelPayment(Document):
def validate(self):
self.customer_doc = frappe.get_doc('Customer', self.customer)
fuel_doc = frappe.get_doc('Fuel Price')
# validate fuel type and membership type and assign fuel price according to membership type. Fuel price are updated on change of fuel prices.
fuel_type = self.customer_doc.fuel_type
membership_type = self.customer_doc.membership_type
if fuel_type == "Petrol": # if fuel type is petrol, petrol price is fetched from fuel price
fuel_today = fuel_doc.petrol
if membership_type == "Status": # if membership type is status/privelege, fuel price is fetched according to membership
fuel = fuel_doc.petrol_status
elif membership_type == "Privilege":
fuel = fuel_doc.petrol_privilege
elif fuel_type == "Diesel": # if fuel type is petrol, petrol price is fetched from fuel price
fuel_today = fuel_doc.diesel
if membership_type == "Status": # if membership type is status/privelege, fuel price is fetched according to membership
fuel = fuel_doc.diesel_status
elif membership_type == "Privilege":
fuel = fuel_doc.diesel_privilege
# calculate liters filled and cashback amount and create cashback ledger document against customer.
litre_filled = round(int(self.amount) / int(fuel_today), 2)
self.litres = litre_filled
self.market_value = fuel_today
self.customer_value = fuel
cashback = int(fuel_today) * int(litre_filled) - int(fuel) * int(litre_filled)
self.cashback = cashback
self.cashback_doc = frappe.get_doc({
'doctype': 'Cashback Ledger', # creates a cashback ledger against the customer
'customer': self.customer,
'amount': cashback,
'fuel_payment': self.name,
'fuel_paid_amount': self.amount,
'note': 'Cashbhack received for fuel refill'
})
def on_submit(self):
# calculates cashback balance and trophies collected by customer.
# Trophies are added upon fuel payment frequency which is predefined in trophy settings.
# Refuel left field indicates the refuel left to earn next trophies.
trophy_doc = frappe.get_doc('Trophy Settings')
self.customer_doc.cashback_balance = int(self.customer_doc.cashback_balance) + int(self.cashback)
self.customer_doc.lifetime = int(self.customer_doc.lifetime) + int(self.cashback)
if int(self.customer_doc.refuel_left) == 0:
self.customer_doc.total_trophies_collected = int(self.customer_doc.total_trophies_collected) + int(trophy_doc.trophies)
self.customer_doc.refuel_left = int(trophy_doc.frequency)
trophy_doc = frappe.get_doc({
'doctype': 'Trophy Ledger', #creates a trophy ledger against the customer
'trophy_count': trophy_doc.trophies,
'creditdebit': "Credit",
'note': 'Trophy Earned from refuel',
'customer': self.customer,
'docstatus': 1
})
trophy_doc.insert()
else:
self.customer_doc.refuel_left = int(self.customer_doc.refuel_left) - 1
self.customer_doc.save()
self.cashback_doc.submit()
| 39.197531 | 143 | 0.747402 |
29acbde45481e099ba802a6195f5d5a3f43323fc | 4,276 | py | Python | beemgraphenebase/objects.py | abitmore/beem | 2026833a836007e45f16395a9ca3b31d02e98f87 | [
"MIT"
] | 118 | 2018-03-06T07:26:19.000Z | 2022-03-21T20:16:04.000Z | beemgraphenebase/objects.py | abitmore/beem | 2026833a836007e45f16395a9ca3b31d02e98f87 | [
"MIT"
] | 248 | 2018-03-20T18:03:39.000Z | 2022-03-28T16:38:09.000Z | beemgraphenebase/objects.py | abitmore/beem | 2026833a836007e45f16395a9ca3b31d02e98f87 | [
"MIT"
] | 81 | 2018-04-27T15:27:52.000Z | 2021-10-31T06:14:25.000Z | # -*- coding: utf-8 -*-
from collections import OrderedDict
import json
from beemgraphenebase.types import (
Uint8, Int16, Uint16, Uint32, Uint64,
Varint32, Int64, String, Bytes, Void,
Array, PointInTime, Signature, Bool,
Set, Fixed_array, Optional, Static_variant,
Map, Id, JsonObj
)
from .py23 import py23_bytes, bytes_types, integer_types, string_types
from .objecttypes import object_type
from .operationids import operations
class Operation(object):
def __init__(self, op):
if isinstance(op, list) and len(op) == 2:
if isinstance(op[0], integer_types):
self.opId = op[0]
name = self.getOperationNameForId(self.opId)
else:
self.opId = self.operations().get(op[0], None)
name = op[0]
if self.opId is None:
raise ValueError("Unknown operation")
self.name = name[0].upper() + name[1:] # klassname
try:
klass = self._getklass(self.name)
except Exception:
raise NotImplementedError("Unimplemented Operation %s" % self.name)
self.op = klass(op[1])
self.appbase = False
elif isinstance(op, dict):
if len(op["type"]) > 10 and op["type"][-9:] == "operation":
name = op["type"][:-10]
else:
name = op["type"]
self.opId = self.operations().get(name, None)
if self.opId is None:
raise ValueError("Unknown operation")
self.name = name[0].upper() + name[1:] # klassname
try:
klass = self._getklass(self.name)
except Exception:
raise NotImplementedError("Unimplemented Operation %s" % self.name)
self.op = klass(op["value"])
self.appbase = True
else:
self.op = op
self.name = type(self.op).__name__.lower() # also store name
self.opId = self.operations()[self.name]
def operations(self):
return operations
def getOperationNameForId(self, i):
""" Convert an operation id into the corresponding string
"""
for key in self.operations():
if int(self.operations()[key]) is int(i):
return key
return "Unknown Operation ID %d" % i
def _getklass(self, name):
module = __import__("graphenebase.operations", fromlist=["operations"])
class_ = getattr(module, name)
return class_
def __bytes__(self):
return py23_bytes(Id(self.opId)) + py23_bytes(self.op)
def __str__(self):
return json.dumps([self.opId, self.op.toJson()])
class GrapheneObject(object):
""" Core abstraction class
This class is used for any JSON reflected object in Graphene.
* ``instance.__json__()``: encodes data into json format
* ``bytes(instance)``: encodes data into wire format
* ``str(instances)``: dumps json object as string
"""
def __init__(self, data=None):
self.data = data
def __bytes__(self):
if self.data is None:
return py23_bytes()
b = b""
for name, value in list(self.data.items()):
if isinstance(value, string_types):
b += py23_bytes(value, 'utf-8')
else:
b += py23_bytes(value)
return b
def __json__(self):
if self.data is None:
return {}
d = {} # JSON output is *not* ordered
for name, value in list(self.data.items()):
if isinstance(value, Optional) and value.isempty():
continue
if isinstance(value, String):
d.update({name: str(value)})
else:
try:
d.update({name: JsonObj(value)})
except Exception:
d.update({name: value.__str__()})
return d
def __str__(self):
return json.dumps(self.__json__())
def toJson(self):
return self.__json__()
def json(self):
return self.__json__()
def isArgsThisClass(self, args):
return (len(args) == 1 and type(args[0]).__name__ == type(self).__name__)
| 32.892308 | 83 | 0.55753 |
4a7f9fcf80c7052639639ca510ac98a7dccee15d | 4,063 | py | Python | data_utils/speech.py | WinderWL/PaddlePaddle-DeepSpeech | 3611de2d7c5dd9ece304f1694a77b2fae6f69ccd | [
"Apache-2.0"
] | 314 | 2020-05-28T09:44:45.000Z | 2022-03-31T08:48:59.000Z | data_utils/speech.py | WinderWL/PaddlePaddle-DeepSpeech | 3611de2d7c5dd9ece304f1694a77b2fae6f69ccd | [
"Apache-2.0"
] | 135 | 2020-05-24T05:56:20.000Z | 2022-03-24T05:54:10.000Z | data_utils/speech.py | WinderWL/PaddlePaddle-DeepSpeech | 3611de2d7c5dd9ece304f1694a77b2fae6f69ccd | [
"Apache-2.0"
] | 79 | 2020-06-27T05:58:53.000Z | 2022-03-30T14:24:37.000Z | """Contains the speech segment class."""
import numpy as np
from data_utils.audio import AudioSegment
class SpeechSegment(AudioSegment):
"""语音片段抽象是音频片段的一个子类,附加文字记录。
:param samples: Audio samples [num_samples x num_channels].
:type samples: ndarray.float32
:param sample_rate: 训练数据的采样率
:type sample_rate: int
:param transcript: 音频文件对应的文本
:type transript: str
:raises TypeError: If the sample data type is not float or int.
"""
def __init__(self, samples, sample_rate, transcript):
AudioSegment.__init__(self, samples, sample_rate)
self._transcript = transcript
def __eq__(self, other):
"""Return whether two objects are equal.
"""
if not AudioSegment.__eq__(self, other):
return False
if self._transcript != other._transcript:
return False
return True
def __ne__(self, other):
"""Return whether two objects are unequal."""
return not self.__eq__(other)
@classmethod
def from_file(cls, filepath, transcript):
"""从音频文件和相应的文本创建语音片段
:param filepath: 音频文件路径
:type filepath: str|file
:param transcript: 音频文件对应的文本
:type transript: str
:return: Speech segment instance.
:rtype: SpeechSegment
"""
audio = AudioSegment.from_file(filepath)
return cls(audio.samples, audio.sample_rate, transcript)
@classmethod
def from_bytes(cls, bytes, transcript):
"""从字节串和相应的文本创建语音片段
:param bytes: 包含音频样本的字节字符串
:type bytes: str
:param transcript: 音频文件对应的文本
:type transript: str
:return: Speech segment instance.
:rtype: Speech Segment
"""
audio = AudioSegment.from_bytes(bytes)
return cls(audio.samples, audio.sample_rate, transcript)
@classmethod
def concatenate(cls, *segments):
"""将任意数量的语音片段连接在一起,音频和文本都将被连接
:param *segments: 要连接的输入语音片段
:type *segments: tuple of SpeechSegment
:return: 返回SpeechSegment实例
:rtype: SpeechSegment
:raises ValueError: 不能用不同的抽样率连接片段
:raises TypeError: 只有相同类型SpeechSegment实例的语音片段可以连接
"""
if len(segments) == 0:
raise ValueError("音频片段为空")
sample_rate = segments[0]._sample_rate
transcripts = ""
for seg in segments:
if sample_rate != seg._sample_rate:
raise ValueError("不能用不同的抽样率连接片段")
if type(seg) is not cls:
raise TypeError("只有相同类型SpeechSegment实例的语音片段可以连接")
transcripts += seg._transcript
samples = np.concatenate([seg.samples for seg in segments])
return cls(samples, sample_rate, transcripts)
@classmethod
def slice_from_file(cls, filepath, transcript, start=None, end=None):
"""只加载一小部分SpeechSegment,而不需要将整个文件加载到内存中,这是非常浪费的。
:param filepath:文件路径或文件对象到音频文件
:type filepath: str|file
:param start: 开始时间,单位为秒。如果start是负的,则它从末尾开始计算。如果没有提供,这个函数将从最开始读取。
:type start: float
:param end: 结束时间,单位为秒。如果end是负的,则它从末尾开始计算。如果没有提供,默认的行为是读取到文件的末尾。
:type end: float
:param transcript: 音频文件对应的文本,如果没有提供,默认值是一个空字符串。
:type transript: str
:return: SpeechSegment实例
:rtype: SpeechSegment
"""
audio = AudioSegment.slice_from_file(filepath, start, end)
return cls(audio.samples, audio.sample_rate, transcript)
@classmethod
def make_silence(cls, duration, sample_rate):
"""创建指定安静音频长度和采样率的SpeechSegment实例,音频文件对应的文本将为空字符串。
:param duration: 安静音频的时间,单位秒
:type duration: float
:param sample_rate: 音频采样率
:type sample_rate: float
:return: 安静音频SpeechSegment实例
:rtype: SpeechSegment
"""
audio = AudioSegment.make_silence(duration, sample_rate)
return cls(audio.samples, audio.sample_rate, "")
@property
def transcript(self):
"""返回音频文件对应的文本
:return: 音频文件对应的文本
:rtype: str
"""
return self._transcript
| 31.742188 | 73 | 0.639675 |
47b45e6d6f04b30bdb3536b9e7a1c2756dba2318 | 2,318 | py | Python | src/game.py | jadmz/pygame-box2d-template | cd5ef75940b1c919aade5acb11924cbfba8e7c60 | [
"MIT"
] | null | null | null | src/game.py | jadmz/pygame-box2d-template | cd5ef75940b1c919aade5acb11924cbfba8e7c60 | [
"MIT"
] | null | null | null | src/game.py | jadmz/pygame-box2d-template | cd5ef75940b1c919aade5acb11924cbfba8e7c60 | [
"MIT"
] | 1 | 2020-03-22T18:20:54.000Z | 2020-03-22T18:20:54.000Z | import sys
import warnings
try:
import pygame_sdl2
except ImportError:
if sys.platform in ('darwin', ):
warnings.warn('OSX has major issues with pygame/SDL 1.2 when used '
'inside a virtualenv. If this affects you, try '
'installing the updated pygame_sdl2 library.')
else:
# pygame_sdl2 is backward-compatible with pygame:
pygame_sdl2.import_as_pygame()
import pygame
from pygame.locals import (QUIT, KEYDOWN, KEYUP, MOUSEBUTTONDOWN,
MOUSEBUTTONUP, MOUSEMOTION, KMOD_LSHIFT)
from Box2D import (b2World, b2PolygonShape)
from renderer import Renderer
from game_object import GameObject
class Game:
FRAMES_PER_SECOND = 60
def __init__(self, title):
pygame.init()
pygame.display.set_caption(title)
self.running = False
self.clock = pygame.time.Clock()
self.renderer = Renderer()
self.world = b2World(gravity=(0,-10), doSleep=True)
self.renderer.setupDebugDraw(self.world)
self.gameObjects = []
body = self.world.CreateDynamicBody(position=(10,-10))
box = body.CreatePolygonFixture(box=(1,1), density=1, friction=0.3)
self.player = self.createObject(physics=box)
def run(self):
self.running = True
while self.running:
self.loop()
pygame.quit()
sys.exit()
def loop(self):
time = self.clock.tick(self.FRAMES_PER_SECOND)
self.processEvents(pygame_sdl2.event.get())
self.world.Step(1.0/self.FRAMES_PER_SECOND, 6, 2)
self.update()
self.renderer.gameWillRender()
self.render()
self.world.DrawDebugData()
self.renderer.gameDidRender()
def processEvents(self, events):
for event in events:
if event.type == QUIT:
self.running = False
def update(self):
for obj in self.gameObjects:
obj.update()
def render(self):
for obj in self.gameObjects:
obj.render(self.renderer)
def createObject(self, physics=None, renderable=None):
gameObject = GameObject(game=self, physics=physics, renderable=renderable)
self.gameObjects.append(gameObject)
return gameObject
| 28.268293 | 82 | 0.622951 |
ac415b33b47725235143094128da879a648bffaf | 44 | py | Python | c_cpp_with_python/use_cython/src/cpython_module/test_primes.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 2 | 2019-01-24T15:06:59.000Z | 2019-01-25T07:34:45.000Z | c_cpp_with_python/use_cython/src/cpython_module/test_primes.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 1 | 2019-12-23T09:45:11.000Z | 2019-12-23T09:45:11.000Z | c_cpp_with_python/use_cython/src/cpython_module/test_primes.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 1 | 2019-07-18T14:21:35.000Z | 2019-07-18T14:21:35.000Z | import primes
print(primes.primes(100000))
| 11 | 28 | 0.795455 |
09cb8496e6d97876cc6627238db737f4234d3340 | 3,222 | py | Python | tests/test_transport.py | blueyed/core-api-python-client | e0eca9d075f1af376e004e5625f1002763046bb0 | [
"Unlicense"
] | null | null | null | tests/test_transport.py | blueyed/core-api-python-client | e0eca9d075f1af376e004e5625f1002763046bb0 | [
"Unlicense"
] | null | null | null | tests/test_transport.py | blueyed/core-api-python-client | e0eca9d075f1af376e004e5625f1002763046bb0 | [
"Unlicense"
] | null | null | null | # coding: utf-8
from coreapi import Document, Link, Field
from coreapi.codecs import CoreJSONCodec
from coreapi.compat import force_text
from coreapi.exceptions import NetworkError
from coreapi.transports import HTTPTransport
from coreapi.utils import determine_transport
import pytest
import requests
import json
decoders = [CoreJSONCodec()]
transports = [HTTPTransport()]
@pytest.fixture
def http():
return HTTPTransport()
class MockResponse(object):
def __init__(self, content):
self.content = content
self.headers = {}
self.url = 'http://example.org'
self.status_code = 200
# Test transport errors.
def test_unknown_scheme():
with pytest.raises(NetworkError):
determine_transport(transports, 'ftp://example.org')
def test_missing_scheme():
with pytest.raises(NetworkError):
determine_transport(transports, 'example.org')
def test_missing_hostname():
with pytest.raises(NetworkError):
determine_transport(transports, 'http://')
# Test basic transition types.
def test_get(monkeypatch, http):
def mockreturn(self, request):
return MockResponse(b'{"_type": "document", "example": 123}')
monkeypatch.setattr(requests.Session, 'send', mockreturn)
link = Link(url='http://example.org', action='get')
doc = http.transition(link, decoders)
assert doc == {'example': 123}
def test_get_with_parameters(monkeypatch, http):
def mockreturn(self, request):
insert = request.path_url.encode('utf-8')
return MockResponse(
b'{"_type": "document", "url": "' + insert + b'"}'
)
monkeypatch.setattr(requests.Session, 'send', mockreturn)
link = Link(url='http://example.org', action='get')
doc = http.transition(link, decoders, params={'example': 'abc'})
assert doc == {'url': '/?example=abc'}
def test_get_with_path_parameter(monkeypatch, http):
def mockreturn(self, request):
insert = request.url.encode('utf-8')
return MockResponse(
b'{"_type": "document", "example": "' + insert + b'"}'
)
monkeypatch.setattr(requests.Session, 'send', mockreturn)
link = Link(
url='http://example.org/{user_id}/',
action='get',
fields=[Field(name='user_id', location='path')]
)
doc = http.transition(link, decoders, params={'user_id': 123})
assert doc == {'example': 'http://example.org/123/'}
def test_post(monkeypatch, http):
def mockreturn(self, request):
codec = CoreJSONCodec()
body = force_text(request.body)
content = codec.encode(Document(content={'data': json.loads(body)}))
return MockResponse(content)
monkeypatch.setattr(requests.Session, 'send', mockreturn)
link = Link(url='http://example.org', action='post')
doc = http.transition(link, decoders, params={'example': 'abc'})
assert doc == {'data': {'example': 'abc'}}
def test_delete(monkeypatch, http):
def mockreturn(self, request):
return MockResponse(b'')
monkeypatch.setattr(requests.Session, 'send', mockreturn)
link = Link(url='http://example.org', action='delete')
doc = http.transition(link, decoders)
assert doc is None
| 28.017391 | 76 | 0.665115 |
b75ce399fa50138c427710dd55f8e57a5a578109 | 6,052 | py | Python | acs/acs/Device/Model/AndroidDevice/Agent/AcsAgentLegacy.py | wangji1/test-framework-and-suites-for-android | 59564f826f205fe7fab64f45b88b1a6dde6900af | [
"Apache-2.0"
] | null | null | null | acs/acs/Device/Model/AndroidDevice/Agent/AcsAgentLegacy.py | wangji1/test-framework-and-suites-for-android | 59564f826f205fe7fab64f45b88b1a6dde6900af | [
"Apache-2.0"
] | null | null | null | acs/acs/Device/Model/AndroidDevice/Agent/AcsAgentLegacy.py | wangji1/test-framework-and-suites-for-android | 59564f826f205fe7fab64f45b88b1a6dde6900af | [
"Apache-2.0"
] | null | null | null | """
:copyright: (c)Copyright 2013, Intel Corporation All Rights Reserved.
The source code contained or described here in and all documents related
to the source code ("Material") are owned by Intel Corporation or its
suppliers or licensors. Title to the Material remains with Intel Corporation
or its suppliers and licensors. The Material contains trade secrets and
proprietary and confidential information of Intel or its suppliers and
licensors.
The Material is protected by worldwide copyright and trade secret laws and
treaty provisions. No part of the Material may be used, copied, reproduced,
modified, published, uploaded, posted, transmitted, distributed, or disclosed
in any way without Intel's prior express written permission.
No license under any patent, copyright, trade secret or other intellectual
property right is granted to or conferred upon you by disclosure or delivery
of the Materials, either expressly, by implication, inducement, estoppel or
otherwise. Any license under such intellectual property rights must be express
and approved by Intel in writing.
:organization: INTEL MCG PSI
:summary: This file implements class which will handle acs agent for legacy android versions (under Kitkat)
:since: 09/07/2013
:author: vdechefd
"""
import time
from acs.Device.Common.Common import Global
from acs.UtilitiesFWK.Utilities import AcsConstants
from acs.ErrorHandling.DeviceException import DeviceException
from acs.Device.Model.AndroidDevice.Agent.IAgent import IAgent
class AcsAgentLegacy(IAgent):
"""
Class that will handle Acs embedded agent status for android implementation
"""
def __init__(self, device):
"""
Constructor
:type logger: logging
:param logger: the logger to use in this module
"""
self._logger = device._logger
self._device = device
self._agentv2_pkg = "com.intel.acs.agentv2"
self._agentv2_service = "{0}.service".format(self._agentv2_pkg)
self._agentv2_version = None
self.is_started = False
@property
def version(self):
"""
Get the ACS Agent Version that has been retrieve through retrieve_version()
:rtype: str
:return: ACS Agents version
"""
if self._agentv2_version is None:
self.update_version()
# return V2, as agent V1 will soon be removed
return self._agentv2_version
def update_version(self):
"""
Get the ACS Agent version deployed on device
:return: None
"""
# Get agent version
agentv2_version = self._device.get_apk_version(self._agentv2_pkg)
self._agentv2_version = agentv2_version if agentv2_version is not None else AcsConstants.NOT_INSTALLED
def wait_for_agent_started(self, timeout=None):
"""
Wait for acs agent to start before timeout.
If no timeout is set, it will get value of device parameter **acsAgentStartTimeout**.
:type timeout: float
:param timeout: Value before which agent shall be started before.
:rtype: bool
:return: True if agent is started, False otherwise
"""
is_started = False
if not timeout:
timeout = self._device.get_config("acsAgentStartTimeout", 60.0, float)
# check that service is ready
uecmd_phonesystem = self._device.get_uecmd("PhoneSystem")
end_time = time.time() + timeout
while time.time() < end_time and not is_started:
# wait before checking service start
time.sleep(self._device.get_config("waitBetweenCmd", 5.0, float))
# check that service is ready
try:
is_started = uecmd_phonesystem.is_acs_agent_ready()
except DeviceException:
# No answer from the device
is_started = False
# Update is_started attribute
self.is_started = is_started
return self.is_started
def start(self):
"""
Try to start the Android embedded agent.
:rtype: boolean
:return: True if agent is started, False otherwise
"""
self._logger.debug("Trying to start ACS agent V2 ...")
cmd = "adb shell am start -n com.intel.acs.agentv2/.common.framework.ServiceStarterActivity"
output = self._device.run_cmd(cmd, self._device.get_uecmd_timeout(), force_execution=True)
return output[0] is Global.SUCCESS
def stop(self):
"""
Try to stop the Android embedded Service.
:rtype: boolean
:return: True if AcsAgentService is stopped, False otherwise
"""
# stop service
cmd = "adb shell am broadcast -a intel.intent.action.acs.stop_service"
self._device.run_cmd(cmd, self._device.get_uecmd_timeout(), force_execution=True)
time.sleep(0.5)
# kill agent process
cmd = "adb shell am force-stop com.intel.acs.agentv2"
output = self._device.run_cmd(cmd, 2, force_execution=True)
return output[0] is Global.SUCCESS
def is_running(self):
"""
Check if agent is running
:return: boolean
:return: True if Acs agent is running, False otherwise
"""
cmd = "adb shell ps | grep {0}".format(self._agentv2_pkg)
result, output_msg = self._device.run_cmd(cmd,
self._device.get_uecmd_timeout(),
force_execution=True,
wait_for_response=True)
return result == 0 and self._agentv2_service in str(output_msg)
def get_intent_action_cmd(self, is_system=False):
"""
Get intent action command line
:param is_system: boolean to notify that the command is system or user.
By default we consider that the command is user
:return: string containing the intent action command line
"""
return "intel.intent.action.acs.cmd"
| 39.298701 | 110 | 0.660773 |
4be28f9e06e90b0f72437d76997f6cf59ec018e5 | 842 | py | Python | Python/798.py | GeneralLi95/leetcode | f42392f2283e19ec76273d81b2912944f9039568 | [
"MIT"
] | null | null | null | Python/798.py | GeneralLi95/leetcode | f42392f2283e19ec76273d81b2912944f9039568 | [
"MIT"
] | null | null | null | Python/798.py | GeneralLi95/leetcode | f42392f2283e19ec76273d81b2912944f9039568 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# @Date : 2022/3/9
# @Filename : 798.py
# @Tag :
# @Autor : LI YAO
# @Difficulty :
from heapq import *
from typing import List, Optional
from collections import defaultdict, deque, Counter
from itertools import product,combinations,permutations,accumulate
from random import choice, randrange,randint
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
# -------------------------
class Solution:
def bestRotation(self, nums: List[int]) -> int:
n = len(nums)
count = [0] * n
for i in range(n):
nl = nums[i:n] + nums[0:i]
score = 0
for j, val in enumerate(nl):
if val <= j:
score += 1
count[i] = score
return count.index(max(count))
# -------------------------
a = Solution()
n = [2,3,1,4,0]
print(a.bestRotation(n)) | 22.157895 | 66 | 0.597387 |
4b096ebd49d103e7c9a20f49b06f8519c14b1a8e | 3,515 | py | Python | test/functional/feature_loadblock.py | ruvcoindev/ruvchain | 9c7c17823b1efb612e0adeb92f0cb1c62859f840 | [
"MIT"
] | 1 | 2022-03-18T17:26:58.000Z | 2022-03-18T17:26:58.000Z | test/functional/feature_loadblock.py | ruvcoindev/ruvchain | 9c7c17823b1efb612e0adeb92f0cb1c62859f840 | [
"MIT"
] | null | null | null | test/functional/feature_loadblock.py | ruvcoindev/ruvchain | 9c7c17823b1efb612e0adeb92f0cb1c62859f840 | [
"MIT"
] | 1 | 2022-01-17T09:17:52.000Z | 2022-01-17T09:17:52.000Z | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test loadblock option
Test the option to start a node with the option loadblock which loads
a serialized blockchain from a file (usually called bootstrap.dat).
To generate that file this test uses the helper scripts available
in contrib/linearize.
"""
import os
import subprocess
import sys
import tempfile
import urllib
from test_framework.test_framework import RuvchainTestFramework
from test_framework.util import assert_equal
class LoadblockTest(RuvchainTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.supports_cli = False
def run_test(self):
self.nodes[1].setnetworkactive(state=False)
self.nodes[0].generate(100)
# Parsing the url of our node to get settings for config file
data_dir = self.nodes[0].datadir
node_url = urllib.parse.urlparse(self.nodes[0].url)
cfg_file = os.path.join(data_dir, "linearize.cfg")
bootstrap_file = os.path.join(self.options.tmpdir, "bootstrap.dat")
genesis_block = self.nodes[0].getblockhash(0)
blocks_dir = os.path.join(data_dir, self.chain, "blocks")
hash_list = tempfile.NamedTemporaryFile(dir=data_dir,
mode='w',
delete=False,
encoding="utf-8")
self.log.info("Create linearization config file")
with open(cfg_file, "a", encoding="utf-8") as cfg:
cfg.write("datadir={}\n".format(data_dir))
cfg.write("rpcuser={}\n".format(node_url.username))
cfg.write("rpcpassword={}\n".format(node_url.password))
cfg.write("port={}\n".format(node_url.port))
cfg.write("host={}\n".format(node_url.hostname))
cfg.write("output_file={}\n".format(bootstrap_file))
cfg.write("max_height=100\n")
cfg.write("netmagic=fabfb5da\n")
cfg.write("input={}\n".format(blocks_dir))
cfg.write("genesis={}\n".format(genesis_block))
cfg.write("hashlist={}\n".format(hash_list.name))
base_dir = self.config["environment"]["SRCDIR"]
linearize_dir = os.path.join(base_dir, "contrib", "linearize")
self.log.info("Run linearization of block hashes")
linearize_hashes_file = os.path.join(linearize_dir, "linearize-hashes.py")
subprocess.run([sys.executable, linearize_hashes_file, cfg_file],
stdout=hash_list,
check=True)
self.log.info("Run linearization of block data")
linearize_data_file = os.path.join(linearize_dir, "linearize-data.py")
subprocess.run([sys.executable, linearize_data_file, cfg_file],
check=True)
self.log.info("Restart second, unsynced node with bootstrap file")
self.restart_node(1, extra_args=["-loadblock=" + bootstrap_file])
assert_equal(self.nodes[1].getblockcount(), 100) # start_node is blocking on all block files being imported
assert_equal(self.nodes[1].getblockchaininfo()['blocks'], 100)
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
if __name__ == '__main__':
LoadblockTest().main()
| 42.349398 | 116 | 0.646373 |
8a3c1d8f23c2707a21e7cc3245b95e791ce75491 | 35,402 | py | Python | tensorflow2_implementations/CIFAR_crossentropy/federated_learning_keras_consensus_FL_threads_CIFAR_crossentropy_gradients_exchange.py | KoalaYan/federated | f69bd74c164172da24a6b5866c3f0687604d5501 | [
"MIT"
] | 12 | 2019-12-01T13:17:18.000Z | 2021-08-23T07:25:59.000Z | tensorflow2_implementations/CIFAR_crossentropy/federated_learning_keras_consensus_FL_threads_CIFAR_crossentropy_gradients_exchange.py | KoalaYan/federated | f69bd74c164172da24a6b5866c3f0687604d5501 | [
"MIT"
] | 3 | 2020-03-30T06:04:15.000Z | 2020-06-19T10:18:33.000Z | tensorflow2_implementations/CIFAR_crossentropy/federated_learning_keras_consensus_FL_threads_CIFAR_crossentropy_gradients_exchange.py | KoalaYan/federated | f69bd74c164172da24a6b5866c3f0687604d5501 | [
"MIT"
] | 6 | 2020-05-06T15:24:26.000Z | 2022-03-28T10:02:46.000Z | from DataSets import CIFARData
from DataSets_task import CIFARData_task
from consensus.consensus_v4 import CFA_process
from consensus.parameter_server_v2 import Parameter_Server
# use only for consensus , PS only for energy efficiency
# from ReplayMemory import ReplayMemory
import numpy as np
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import models
import argparse
import warnings
import glob
import datetime
import scipy.io as sio
# import multiprocessing
import threading
import math
from matplotlib.pyplot import pause
import time
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser()
parser.add_argument('-resume', default=0, help="set 1 to resume from a previous simulation, 0 to start from the beginning", type=float)
parser.add_argument('-PS', default=0, help="set 1 to enable PS server and FedAvg, set 0 to disable PS", type=float)
parser.add_argument('-consensus', default=1, help="set 1 to enable consensus, set 0 to disable", type=float)
parser.add_argument('-mu', default=0.001, help="sets the learning rate for all setups", type=float)
parser.add_argument('-mu2', default=0.01, help="sets the gradient update rate", type=float)
parser.add_argument('-eps', default=0.5, help="sets the mixing parameters for model averaging (CFA)", type=float)
parser.add_argument('-eps_grads', default=0.5, help="sets the mixing parameters for gradient combining (CFA-GE)", type=float)
parser.add_argument('-target', default=0.2, help="sets the target loss to stop federation", type=float)
parser.add_argument('-K', default=30, help="sets the number of network devices", type=int)
parser.add_argument('-Ka', default=20, help="sets the number of active devices per round in FA (<= K)", type=int)
parser.add_argument('-N', default=1, help="sets the max. number of neighbors per device per round in CFA", type=int)
parser.add_argument('-Ka_consensus', default=30, help="sets the number of active devices for consensus", type=int)
parser.add_argument('-samp', default=500, help="sets the number samples per device", type=int)
parser.add_argument('-noniid_assignment', default=1, help=" set 0 for iid assignment, 1 for non-iid random", type=int)
parser.add_argument('-gradients', default=1, help=" set 0 to disable gradient exchange, 1 to enable", type=int)
parser.add_argument('-run', default=0, help=" set the run id", type=int)
parser.add_argument('-random_data_distribution', default=0, help=" set 0 for fixed distribution, 1 for time-varying", type=int)
parser.add_argument('-batches', default=5, help="sets the number of batches per learning round", type=int)
parser.add_argument('-batch_size', default=100, help="sets the batch size per learning round", type=int)
parser.add_argument('-graph', default=6, help="sets the input graph: 0 for default graph, >0 uses the input graph in vGraph.mat, and choose one graph from the available adjacency matrices", type=int)
parser.add_argument('-modelselection', default=0, help="sets the model: 0 for lenet-1", type=int)
args = parser.parse_args()
devices = args.K # NUMBER OF DEVICES
active_devices_per_round = args.Ka
max_epochs = 1000
condition = args.modelselection
if args.consensus == 1:
federated = True
parameter_server = False
elif args.PS == 1:
federated = False
parameter_server = True
else: # CL: CENTRALIZED LEARNING ON DEVICE 0 (DATA CENTER)
federated = False
parameter_server = False
################# consensus, create the scheduling function ################
scheduling_tx = np.zeros((devices, max_epochs*2), dtype=int)
if parameter_server and not federated:
indexes_tx = np.zeros((args.Ka, max_epochs*2), dtype=int)
for k in range(max_epochs*2):
# inds = np.random.choice(devices, args.Ka, replace=False)
sr = devices - args.Ka + 1
sr2 = k % sr
inds = np.arange(sr2, args.Ka + sr2)
scheduling_tx[inds, k] = 1
indexes_tx[:,k] = inds
elif not parameter_server and federated:
indexes_tx = np.zeros((args.Ka_consensus, max_epochs*2), dtype=int)
for k in range(max_epochs*2):
# inds = np.random.choice(devices, args.Ka_consensus, replace=False)
sr = devices - args.Ka_consensus + 1
sr2 = k % sr
inds = np.arange(sr2, args.Ka_consensus + sr2)
scheduling_tx[inds, k] = 1
indexes_tx[:, k] = inds
###########################################################################
if active_devices_per_round > devices:
active_devices_per_round = devices
target_loss = args.target
# Configuration paramaters for the whole setup
seed = 42
# batch_size = 5 # Size of batch taken from replay buffer
batch_size = args.batch_size
number_of_batches = args.batches
training_set_per_device = args.samp # NUMBER OF TRAINING SAMPLES PER DEVICE
validation_train = 50000 # VALIDATION and training DATASET size
validation_test = 10000
if (training_set_per_device > validation_train/args.K):
training_set_per_device = math.floor(validation_train/args.K)
print(training_set_per_device)
if batch_size > training_set_per_device:
batch_size = training_set_per_device
# if batch_size*number_of_batches > training_set_per_device:
# number_of_batches = math.floor(training_set_per_device/batch_size)
# number_of_batches = int(training_set_per_device/batch_size)
# number_of_batches = args.batches
number_of_batches_for_validation = int(validation_test/batch_size)
print("Number of batches for learning {}".format(number_of_batches))
max_lag = 1 # consensus max delay 2= 2 epochs max
refresh_server = 1 # refresh server updates (in sec)
n_outputs = 10 # 6 classes
validation_start = 1 # start validation in epochs
# Using huber loss for stability
loss_function = keras.losses.Huber()
# save scheduling format
# dict_0 = {"scheduling": scheduling_tx, "devices_scheduling": indexes_tx}
# sio.savemat("results/matlab/CFA_scheduling_devices_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}.mat".format(devices, args.N, number_of_batches, batch_size, args.noniid_assignment, args.run), dict_0)
# def get_noniid_data(total_training_size, devices, batch_size):
# samples = np.random.random_integers(batch_size, total_training_size - batch_size * (devices - 1),
# devices) # create random numbers
# samples = samples / np.sum(samples, axis=0) * total_training_size # force them to sum to totals
# # Ignore the following if you don't need integers
# samples = np.round(samples) # transform them into integers
# remainings = total_training_size - np.sum(samples, axis=0) # check if there are corrections to be done
# step = 1 if remainings > 0 else -1
# while remainings != 0:
# i = np.random.randint(devices)
# if samples[i] + step >= 0:
# samples[i] += step
# remainings -= step
# return samples
####
def preprocess_observation(obs, batch_size):
img = obs# crop and downsize
img = (img).astype(np.float)
return img.reshape(batch_size, 32, 32, 3)
def create_q_model():
# Network defined by the Deepmind paper
inputs = layers.Input(shape=(32, 32, 3,))
if condition == 0:
# VGG 1 BLOCK
layer1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu",
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(inputs)
layer11 = layers.BatchNormalization()(layer1)
layer2 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu",
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(layer11)
layer21 = layers.BatchNormalization()(layer2)
layer3 = layers.MaxPooling2D(pool_size=(2, 2))(layer21)
layer4 = layers.Flatten()(layer3)
layer5 = layers.Dense(128, activation="relu")(layer4)
layer51 = layers.BatchNormalization()(layer5)
classification = layers.Dense(n_outputs, activation=tf.keras.activations.softmax)(layer51)
elif condition == 1:
# VGG 2 BLOCK
layer1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
inputs)
layer2 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer1)
layer3 = layers.MaxPooling2D(pool_size=(2, 2))(layer2)
layer4 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer3)
layer5 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer4)
layer6 = layers.MaxPooling2D(pool_size=(2, 2))(layer5)
layer7 = layers.Flatten()(layer6)
layer8 = layers.Dense(128, activation="relu", kernel_initializer='he_uniform')(layer7)
classification = layers.Dense(n_outputs, activation="softmax")(layer8)
else:
# VGG 3 BLOCK
layer1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
inputs)
layer2 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer1)
layer3 = layers.MaxPooling2D(pool_size=(2, 2))(layer2)
layer4 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer3)
layer5 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer4)
layer6 = layers.MaxPooling2D(pool_size=(2, 2))(layer5)
layer7 = layers.Conv2D(128, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer6)
layer8 = layers.Conv2D(128, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer7)
layer9 = layers.MaxPooling2D(pool_size=(2, 2))(layer8)
layer10 = layers.Flatten()(layer9)
layer11 = layers.Dense(128, activation="relu", kernel_initializer='he_uniform')(layer10)
classification = layers.Dense(n_outputs, activation="softmax")(layer11)
return keras.Model(inputs=inputs, outputs=classification)
def processParameterServer(devices, active_devices_per_round, federated, refresh_server=1):
model_global = create_q_model()
model_parameters_initial = np.asarray(model_global.get_weights())
parameter_server = Parameter_Server(devices, model_parameters_initial, active_devices_per_round, indexes_tx)
global_target_model = 'results/model_global.npy'
global_epoch = 'results/epoch_global.npy'
epoch_count = 0
np.save(global_target_model, model_parameters_initial)
np.save(global_epoch, epoch_count)
pause(2) # wait for neighbors
while True:
pause(refresh_server) # refresh global model on every xx seconds
fileList = glob.glob('*.mat', recursive=False)
if len(fileList) == devices:
# stop the server
break
else:
np.save(global_target_model, parameter_server.federated_target_weights_aggregation(epoch_count, aggregation_type=0))
epoch_count += 1
np.save(global_epoch, epoch_count)
# execute for each deployed device
def processData(device_index, start_samples, samples, federated, full_data_size, number_of_batches, parameter_server, sample_distribution):
pause(5) # PS server (if any) starts first
checkpointpath1 = 'results/model{}.h5'.format(device_index)
outfile = 'results/dump_train_variables{}.npz'.format(device_index)
outfile_models = 'results/dump_train_model{}.npy'.format(device_index)
outfile_models_grad = 'results/dump_train_grad{}.npy'.format(device_index)
global_model = 'results/model_global.npy'
global_epoch = 'results/epoch_global.npy'
#np.random.seed(1)
#tf.random.set_seed(1) # common initialization
learning_rate = args.mu
learning_rate_local = learning_rate
B = np.ones((devices, devices)) - tf.one_hot(np.arange(devices), devices)
Probabilities = B[device_index, :]/(devices - 1)
training_signal = False
# check for backup variables on start
if os.path.isfile(checkpointpath1):
train_start = False
# backup the model and the model target
model = models.load_model(checkpointpath1)
model_transmitted = create_q_model()
data_history = []
label_history = []
local_model_parameters = np.load(outfile_models, allow_pickle=True)
model.set_weights(local_model_parameters.tolist())
dump_vars = np.load(outfile, allow_pickle=True)
frame_count = dump_vars['frame_count']
epoch_loss_history = dump_vars['epoch_loss_history'].tolist()
running_loss = np.mean(epoch_loss_history[-5:])
epoch_count = dump_vars['epoch_count']
else:
train_start = True
model = create_q_model()
model_transmitted = create_q_model()
data_history = []
label_history = []
frame_count = 0
# Experience replay buffers
epoch_loss_history = []
epoch_count = 0
running_loss = math.inf
if parameter_server:
epoch_global = 0
training_end = False
#a = model.get_weights()
# set an arbitrary optimizer, here Adam is used
optimizer = keras.optimizers.Adam(learning_rate=args.mu, clipnorm=1.0)
#optimizer2 = keras.optimizers.SGD(learning_rate=args.mu2)
optimizer2 = keras.optimizers.Adam(learning_rate=args.mu2, clipnorm=1.0)
# create a data object (here radar data)
# start = time.time()
if args.noniid_assignment == 1:
data_handle = CIFARData_task(device_index, start_samples, samples, full_data_size, args.random_data_distribution)
else:
data_handle = CIFARData(device_index, start_samples, samples, full_data_size, args.random_data_distribution)
# end = time.time()
# time_count = (end - start)
# print(Training time"time_count)
# create a consensus object
cfa_consensus = CFA_process(devices, device_index, args.N)
while True: # Run until solved
# collect 1 batch
frame_count += 1
obs, labels = data_handle.getTrainingData(batch_size)
data_batch = preprocess_observation(obs, batch_size)
# Save data and labels in the current learning session
data_history.append(data_batch)
label_history.append(labels)
if frame_count % number_of_batches == 0:
if not parameter_server:
epoch_count += 1
# check scheduling for federated
if federated:
if epoch_count == 1 or scheduling_tx[device_index, epoch_count] == 1:
training_signal = False
else:
# stop all computing, just save the previous model
training_signal = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
# Local learning update every "number of batches" batches
# time_count = 0
if frame_count % number_of_batches == 0 and not training_signal:
# run local batches
for i in range(number_of_batches):
start = time.time()
data_sample = np.array(data_history[i])
label_sample = np.array(label_history[i])
# Create a mask to calculate loss
masks = tf.one_hot(label_sample, n_outputs)
with tf.GradientTape() as tape:
# Train the model on data samples
classes = model(data_sample, training=False)
# Calculate loss
loss = tf.reduce_mean(-tf.reduce_sum(masks * tf.math.log(classes), axis=1))
# Backpropagation
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
#end = time.time()
#time_count = time_count + (end-start)/number_of_batches
del data_history
del label_history
data_history = []
label_history = []
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
cfa_consensus.update_local_model(model_weights)
grads_v = []
for d in range(len(grads)):
grads_v.append(grads[d].numpy())
grads_v = np.asarray(grads_v)
cfa_consensus.update_local_gradient(grads_v)
# compute gradients for selected neighbors in get_tx_connectvity, obtain a new test observation from local database
obs_t, labels_t = data_handle.getTrainingData(batch_size)
data_batch_t = preprocess_observation(obs_t, batch_size)
masks_t = tf.one_hot(labels_t, n_outputs)
gradient_neighbor = cfa_consensus.get_tx_connectivity(device_index, args.N, devices)
outfile_n = 'results/dump_train_variables{}.npz'.format(gradient_neighbor)
outfile_models_n = 'results/dump_train_model{}.npy'.format(gradient_neighbor)
neighbor_model_for_gradient, success = cfa_consensus.get_neighbor_weights(epoch_count, outfile_n, outfile_models_n, epoch=0, max_lag=1)
if success:
model_transmitted.set_weights(neighbor_model_for_gradient.tolist())
else:
print("failed retrieving the model for gradient computation")
with tf.GradientTape() as tape2:
# Train the model on data samples
classes = model_transmitted(data_batch_t, training=False)
# Calculate loss
# loss = loss_function(labels_t, class_v)
loss = tf.reduce_mean(-tf.reduce_sum(masks_t * tf.math.log(classes), axis=1))
# getting and save neighbor gradients
grads_t = tape2.gradient(loss, model_transmitted.trainable_variables)
grads_v = []
for d in range(len(grads_t)):
grads_v.append(grads_t[d].numpy())
grads_v = np.asarray(grads_v)
np.save(outfile_models_grad, grads_v)
np.random.seed(1)
tf.random.set_seed(1) # common initialization
if not train_start:
if federated and not training_signal:
eps_c = args.eps
# apply consensus for model parameter
neighbor = cfa_consensus.get_connectivity(device_index, args.N, devices) # fixed neighbor
#if args.gradients == 0 or running_loss < 0.5:
if args.gradients == 0:
# random selection of neighor
# neighbor = np.random.choice(indexes_tx[:, epoch_count - 1], args.N, replace=False) # choose neighbor
# while neighbor == device_index:
# neighbor = np.random.choice(indexes_tx[:, epoch_count - 1], args.N,
# replace=False) # choose neighbor
print("Consensus from neighbor {} for device {}, local loss {:.2f}".format(neighbor, device_index,
loss.numpy()))
model.set_weights(cfa_consensus.federated_weights_computing(neighbor, args.N, epoch_count, eps_c, max_lag))
if cfa_consensus.getTrainingStatusFromNeightbor():
training_signal = True # stop local learning, just do validation
else:
# compute gradients as usual
print("Consensus from neighbor {} for device {}, local loss {:.2f}".format(neighbor, device_index,
loss.numpy()))
print("Applying gradient updates...")
# model.set_weights(cfa_consensus.federated_weights_computing(neighbor, args.N, epoch_count, eps_c, max_lag))
model_averaging = cfa_consensus.federated_weights_computing(neighbor, args.N, epoch_count, eps_c, max_lag)
model.set_weights(model_averaging)
if cfa_consensus.getTrainingStatusFromNeightbor():
# model.set_weights(model_averaging)
training_signal = True # stop local learning, just do validation
else:
grads = cfa_consensus.federated_grads_computing(neighbor, args.N, epoch_count, args.eps_grads, max_lag)
optimizer2.apply_gradients(zip(grads, model.trainable_variables))
else:
print("Warm up")
train_start = False
del model_weights
#start = time.time()
# validation tool for device 'device_index'
if epoch_count > validation_start and frame_count % number_of_batches == 0:
avg_cost = 0.
for i in range(number_of_batches_for_validation):
obs_valid, labels_valid = data_handle.getTestData(batch_size, i)
# obs_valid, labels_valid = data_handle.getRandomTestData(batch_size)
data_valid = preprocess_observation(np.squeeze(obs_valid), batch_size)
data_sample = np.array(data_valid)
label_sample = np.array(labels_valid)
# Create a mask to calculate loss
masks = tf.one_hot(label_sample, n_outputs)
classes = model(data_sample, training=False)
# Calculate loss
loss = tf.reduce_mean(-tf.reduce_sum(masks * tf.math.log(classes), axis=1)).numpy()
avg_cost += loss / number_of_batches_for_validation # Training loss
epoch_loss_history.append(avg_cost)
print("Device {} epoch count {}, validation loss {:.2f}".format(device_index, epoch_count,
avg_cost))
# mean loss for last 5 epochs
running_loss = np.mean(epoch_loss_history[-1:])
#end = time.time()
#time_count = (end - start)
#print(time_count)
if running_loss < target_loss: # Condition to consider the task solved
print("Solved for device {} at epoch {} with average loss {:.2f} !".format(device_index, epoch_count, running_loss))
training_end = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
# model_target.save(checkpointpath2, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
if federated:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices, "neighbors": args.N,
"active_devices": args.Ka_consensus,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples, "noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
elif parameter_server:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"active_devices": active_devices_per_round,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
else:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
if federated:
sio.savemat(
"results/matlab/CFA_device_{}_samples_{}_devices_{}_active_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}_distribution{}_gradients{}.mat".format(
device_index, samples, devices, args.Ka_consensus, args.N, number_of_batches, batch_size, args.noniid_assignment, args.run, args.random_data_distribution, args.gradients), dict_1)
sio.savemat(
"CFA_device_{}_samples_{}_devices_{}_neighbors_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, args.N, number_of_batches, batch_size), dict_1)
elif parameter_server:
sio.savemat(
"results/matlab/FA_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size, args.noniid_assignment,args.run, args.random_data_distribution), dict_1)
sio.savemat(
"FA_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size), dict_1)
else: # CL
sio.savemat(
"results/matlab/CL_samples_{}_devices_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(samples, devices, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
break
if epoch_count > max_epochs: # stop simulation
print("Unsolved for device {} at epoch {}!".format(device_index, epoch_count))
training_end = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
# model_target.save(checkpointpath2, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
if federated:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices, "neighbors": args.N,
"active_devices": args.Ka_consensus,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
elif parameter_server:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"active_devices": active_devices_per_round,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
else:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
if federated:
sio.savemat(
"results/matlab/CFA_device_{}_samples_{}_devices_{}_active_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}_distribution{}_gradients{}.mat".format(
device_index, samples, devices, args.Ka_consensus, args.N, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution, args.gradients), dict_1)
sio.savemat(
"CFA_device_{}_samples_{}_devices_{}_neighbors_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, args.N, number_of_batches, batch_size), dict_1)
elif parameter_server:
sio.savemat(
"results/matlab/FA_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
sio.savemat(
"FA_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size),
dict_1)
else: # CL
sio.savemat(
"results/matlab/CL_samples_{}_devices_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
samples, devices, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
break
if __name__ == "__main__":
if args.resume == 0: # clear all files
# DELETE TEMPORARY CACHE FILES
fileList = glob.glob('results/*.npy', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('results/*.h5', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('results/*.npz', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('*.mat', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
# main loop for multiprocessing
t = []
############# enable consensus based federation #######################
# federated = False
# federated = True
########################################################
##################### enable parameter server ##############
# parameter_server = False
server_index = devices
# parameter_server = True
#########################################################
samples = np.zeros(devices) # training samples per device
for id in range(devices):
# samples[id]=math.floor(w[id]*validation_train)
# samples[id] = math.floor(balancing_vect[id]*fraction_training)
samples[id] = training_set_per_device
# samples = int(fraction_training/devices) # training samples per device
# ######################### Create a non-iid assignment ##########################
# if args.noniid_assignment == 1:
# total_training_size = training_set_per_device * devices
# samples = get_noniid_data(total_training_size, devices, batch_size)
# while np.min(samples) < batch_size:
# samples = get_noniid_data(total_training_size, devices, batch_size)
#############################################################################
print(samples)
#################################### code testing CL learning (0: data center)
# federated = False
# parameter_server = False
# processData(0, validation_train, federated, validation_train, number_of_batches, parameter_server)
######################################################################################
np.random.seed(1)
tf.random.set_seed(1) # common initialization
if federated or parameter_server:
for ii in range(devices):
# position start
if ii == 0:
start_index = 0
else:
start_index = start_index + int(samples[ii-1])
t.append(threading.Thread(target=processData, args=(ii, start_index, int(samples[ii]), federated, validation_train, number_of_batches, parameter_server, samples)))
t[ii].start()
# last process is for the target server
if parameter_server:
print("Target server starting with active devices {}".format(active_devices_per_round))
t.append(threading.Thread(target=processParameterServer, args=(devices, active_devices_per_round, federated)))
t[devices].start()
else: # run centralized learning on device 0 (data center)
processData(0, 0, training_set_per_device*devices, federated, validation_train, number_of_batches, parameter_server, samples)
exit(0)
| 52.447407 | 204 | 0.630077 |
a72c29bad345cfac120ac6f04f850d968023e88f | 2,462 | py | Python | macros/util.py | pedromorgan/mkdocs_macros_plugin | 7615f29fccf1f85daa22c4000e5ffaa584ea3a08 | [
"MIT"
] | null | null | null | macros/util.py | pedromorgan/mkdocs_macros_plugin | 7615f29fccf1f85daa22c4000e5ffaa584ea3a08 | [
"MIT"
] | null | null | null | macros/util.py | pedromorgan/mkdocs_macros_plugin | 7615f29fccf1f85daa22c4000e5ffaa584ea3a08 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# ------------------------------------------
# Utilities
# ------------------------------------------
from termcolor import colored
from copy import deepcopy
def trace(*args, **kwargs):
"General purpose print function, with first item emphasized (color)"
COLOR = 'green'
first = args[0]
rest = args[1:]
emphasized = colored("[macros] " + first, COLOR)
print(emphasized, *rest, **kwargs)
def update(d1, d2):
"""
Update object d1, with object d2, recursively
It has a simple behaviour:
- if these are dictionaries, attempt to merge keys
(recursively).
- otherwise simply makes a deep copy.
"""
BASIC_TYPES = (int, float, str, bool, complex)
if isinstance(d1, dict) and isinstance(d2, dict):
for key, value in d2.items():
# print(key, value)
if key in d1:
# key exists
if isinstance(d1[key], BASIC_TYPES):
d1[key] = value
else:
update(d1[key], value)
else:
d1[key] = deepcopy(value)
else:
# if it is any kind of object
d1 = deepcopy(d2)
class SuperDict(dict):
"""
A dictionary accessible with the dot notation
a['foo'] <=> a.foo
except for standard methods
"""
def __getattr__(self, name):
"Allow dot notation on reading"
try:
return self[name]
except KeyError:
raise AttributeError("Cannot find attribute '%s" % name)
def __setattr__(self, name, value):
"Allow dot notation on writing"
self[name] = value
if __name__ == '__main__':
# test merging of dictionaries
a = {'foo': 4, 'bar': 5}
b = {'foo': 5, 'baz': 6}
update(a, b)
print(a)
assert a['foo'] == 5
assert a['baz'] == 6
a = {'foo': 4, 'bar': 5}
b = {'foo': 5, 'baz': ['hello', 'world']}
update(a, b)
print(a)
assert a['baz'] == ['hello', 'world']
a = {'foo': 4, 'bar': {'first': 1, 'second': 2}}
b = {'foo': 5, 'bar': {'first': 2, 'third': 3}}
update(a, b)
print(a)
assert a['bar'] == {'first': 2, 'second': 2, 'third': 3}
NEW = {'hello': 5}
c = {'bar': {'third': NEW}}
update(a, c)
print(a)
assert a['bar']['third'] == NEW
NEW = {'first': 2, 'third': 3}
a = {'foo': 4}
b = {'bar': NEW}
update(a, b)
print(a)
assert a['bar'] == NEW
| 25.122449 | 72 | 0.506093 |
7cbbfd0801d6f6cf7848d7359812f162d6d984d5 | 282 | py | Python | blog/migrations/0009_delete_profile.py | ShagiakhmedovA/OilCalculator | 66c658428916c4b76dd30a685f82c529d877190d | [
"bzip2-1.0.6"
] | null | null | null | blog/migrations/0009_delete_profile.py | ShagiakhmedovA/OilCalculator | 66c658428916c4b76dd30a685f82c529d877190d | [
"bzip2-1.0.6"
] | 7 | 2021-03-19T09:54:35.000Z | 2022-03-12T00:23:26.000Z | blog/migrations/0009_delete_profile.py | ShagiakhmedovA/demoblog | 66c658428916c4b76dd30a685f82c529d877190d | [
"bzip2-1.0.6"
] | null | null | null | # Generated by Django 2.2.7 on 2020-03-16 19:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0008_profile'),
]
operations = [
migrations.DeleteModel(
name='Profile',
),
]
| 16.588235 | 47 | 0.585106 |
d7fa3e43501d8cb595378d3cbf9187f4270b3425 | 11,635 | py | Python | plugins/Config/plugin.py | ircpuzzles/competitionbot | 19333dd93a1394dc6dce329f947cabe6697b4a99 | [
"BSD-3-Clause"
] | 1 | 2020-04-01T21:53:47.000Z | 2020-04-01T21:53:47.000Z | plugins/Config/plugin.py | ircpuzzles/competitionbot | 19333dd93a1394dc6dce329f947cabe6697b4a99 | [
"BSD-3-Clause"
] | null | null | null | plugins/Config/plugin.py | ircpuzzles/competitionbot | 19333dd93a1394dc6dce329f947cabe6697b4a99 | [
"BSD-3-Clause"
] | null | null | null | ###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import os
import signal
import supybot.log as log
import supybot.conf as conf
import supybot.utils as utils
import supybot.world as world
import supybot.ircdb as ircdb
from supybot.commands import *
from supybot.utils.iter import all
import supybot.ircutils as ircutils
import supybot.registry as registry
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Config')
###
# Now, to setup the registry.
###
def getWrapper(name):
parts = registry.split(name)
if not parts or parts[0] not in ('supybot', 'users'):
raise registry.InvalidRegistryName(name)
group = getattr(conf, parts.pop(0))
while parts:
try:
group = group.get(parts.pop(0))
# We'll catch registry.InvalidRegistryName and re-raise it here so
# that we have a useful error message for the user.
except (registry.NonExistentRegistryEntry,
registry.InvalidRegistryName):
raise registry.InvalidRegistryName(name)
return group
def getCapability(name):
capability = 'owner' # Default to requiring the owner capability.
parts = registry.split(name)
while parts:
part = parts.pop()
if ircutils.isChannel(part):
# If a registry value has a channel in it, it requires a
# 'channel,op' capability, or so we assume. We'll see if we're
# proven wrong.
capability = ircdb.makeChannelCapability(part, 'op')
### Do more later, for specific capabilities/sections.
return capability
def _reload():
ircdb.users.reload()
ircdb.ignores.reload()
ircdb.channels.reload()
registry.open_registry(world.registryFilename)
def _hupHandler(sig, frame):
log.info('Received SIGHUP, reloading configuration.')
_reload()
if os.name == 'posix':
signal.signal(signal.SIGHUP, _hupHandler)
def getConfigVar(irc, msg, args, state):
name = args[0]
if name.startswith('conf.'):
name = name[5:]
if not name.startswith('supybot') and not name.startswith('users'):
name = 'supybot.' + name
try:
group = getWrapper(name)
state.args.append(group)
del args[0]
except registry.InvalidRegistryName as e:
state.errorInvalid(_('configuration variable'), str(e))
addConverter('configVar', getConfigVar)
def getSettableConfigVar(irc, msg, args, state):
getConfigVar(irc, msg, args, state)
if not hasattr(state.args[-1], 'set'):
state.errorInvalid(_('settable configuration variable'),
state.args[-1]._name)
addConverter('settableConfigVar', getSettableConfigVar)
class Config(callbacks.Plugin):
def callCommand(self, command, irc, msg, *args, **kwargs):
try:
super(Config, self).callCommand(command, irc, msg, *args, **kwargs)
except registry.InvalidRegistryValue as e:
irc.error(str(e))
def _list(self, group):
L = []
for (vname, v) in group._children.iteritems():
if hasattr(group, 'channelValue') and group.channelValue and \
ircutils.isChannel(vname) and not v._children:
continue
if hasattr(v, 'channelValue') and v.channelValue:
vname = '#' + vname
if v._added and not all(ircutils.isChannel, v._added):
vname = '@' + vname
L.append(vname)
utils.sortBy(str.lower, L)
return L
@internationalizeDocstring
def list(self, irc, msg, args, group):
"""<group>
Returns the configuration variables available under the given
configuration <group>. If a variable has values under it, it is
preceded by an '@' sign. If a variable is a 'ChannelValue', that is,
it can be separately configured for each channel using the 'channel'
command in this plugin, it is preceded by an '#' sign.
"""
L = self._list(group)
if L:
irc.reply(format('%L', L))
else:
irc.error(_('There don\'t seem to be any values in %s.') %
group._name)
list = wrap(list, ['configVar'])
@internationalizeDocstring
def search(self, irc, msg, args, word):
"""<word>
Searches for <word> in the current configuration variables.
"""
L = []
for (name, x) in conf.supybot.getValues(getChildren=True):
if word in name.lower():
possibleChannel = registry.split(name)[-1]
if not ircutils.isChannel(possibleChannel):
L.append(name)
if L:
irc.reply(format('%L', L))
else:
irc.reply(_('There were no matching configuration variables.'))
search = wrap(search, ['lowered']) # XXX compose with withoutSpaces?
def _getValue(self, irc, msg, group, addChannel=False):
value = str(group) or ' '
if addChannel and irc.isChannel(msg.args[0]) and not irc.nested:
s = str(group.get(msg.args[0]))
value = _('Global: %s; %s: %s') % (value, msg.args[0], s)
if hasattr(group, 'value'):
if not group._private:
irc.reply(value)
else:
capability = getCapability(group._name)
if ircdb.checkCapability(msg.prefix, capability):
irc.reply(value, private=True)
else:
irc.errorNoCapability(capability)
else:
irc.error(_('That registry variable has no value. Use the list '
'command in this plugin to see what variables are '
'available in this group.'))
def _setValue(self, irc, msg, group, value):
capability = getCapability(group._name)
if ircdb.checkCapability(msg.prefix, capability):
# I think callCommand catches exceptions here. Should it?
group.set(value)
irc.replySuccess()
else:
irc.errorNoCapability(capability)
@internationalizeDocstring
def channel(self, irc, msg, args, channel, group, value):
"""[<channel>] <name> [<value>]
If <value> is given, sets the channel configuration variable for <name>
to <value> for <channel>. Otherwise, returns the current channel
configuration value of <name>. <channel> is only necessary if the
message isn't sent in the channel itself."""
if not group.channelValue:
irc.error(_('That configuration variable is not a channel-specific '
'configuration variable.'))
return
group = group.get(channel)
if value is not None:
self._setValue(irc, msg, group, value)
else:
self._getValue(irc, msg, group)
channel = wrap(channel, ['channel', 'settableConfigVar',
additional('text')])
@internationalizeDocstring
def config(self, irc, msg, args, group, value):
"""<name> [<value>]
If <value> is given, sets the value of <name> to <value>. Otherwise,
returns the current value of <name>. You may omit the leading
"supybot." in the name if you so choose.
"""
if value is not None:
self._setValue(irc, msg, group, value)
else:
self._getValue(irc, msg, group, addChannel=group.channelValue)
config = wrap(config, ['settableConfigVar', additional('text')])
@internationalizeDocstring
def help(self, irc, msg, args, group):
"""<name>
Returns the description of the configuration variable <name>.
"""
if hasattr(group, '_help'):
s = group.help()
if s:
if hasattr(group, 'value') and not group._private:
s += _(' (Current value: %s)') % group
irc.reply(s)
else:
irc.reply(_('That configuration group exists, but seems to '
'have no help. Try "config list %s" to see if it '
'has any children values.') % group._name)
else:
irc.error(_('%s has no help.') % group._name)
help = wrap(help, ['configVar'])
@internationalizeDocstring
def default(self, irc, msg, args, group):
"""<name>
Returns the default value of the configuration variable <name>.
"""
v = group.__class__(group._default, '')
irc.reply(str(v))
default = wrap(default, ['settableConfigVar'])
@internationalizeDocstring
def reload(self, irc, msg, args):
"""takes no arguments
Reloads the various configuration files (user database, channel
database, registry, etc.).
"""
_reload() # This was factored out for SIGHUP handling.
irc.replySuccess()
reload = wrap(reload, [('checkCapability', 'owner')])
@internationalizeDocstring
def export(self, irc, msg, args, filename):
"""<filename>
Exports the public variables of your configuration to <filename>.
If you want to show someone your configuration file, but you don't
want that person to be able to see things like passwords, etc., this
command will export a "sanitized" configuration file suitable for
showing publicly.
"""
registry.close(conf.supybot, filename, private=False)
irc.replySuccess()
export = wrap(export, [('checkCapability', 'owner'), 'filename'])
@internationalizeDocstring
def setdefault(self, irc, msg, args, group):
"""<name>
Resets the configuration variable <name> to its default value.
"""
v = str(group.__class__(group._default, ''))
self._setValue(irc, msg, group, v)
setdefault = wrap(setdefault, ['settableConfigVar'])
Class = Config
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| 38.783333 | 80 | 0.629136 |
350368ea01e9833164637f37b5811a40dc3b6162 | 231 | py | Python | iCTRL/var/mobile/pentest/exploits/iCTRL/IOSPLOIT/modules/commands/macOS/macos_cd.py | CydiaRepoArchiver/noahlittle.github.io | 3ff35ad0d49ccdd9c3691beb43c393e06d1e309a | [
"Apache-2.0"
] | null | null | null | iCTRL/var/mobile/pentest/exploits/iCTRL/IOSPLOIT/modules/commands/macOS/macos_cd.py | CydiaRepoArchiver/noahlittle.github.io | 3ff35ad0d49ccdd9c3691beb43c393e06d1e309a | [
"Apache-2.0"
] | null | null | null | iCTRL/var/mobile/pentest/exploits/iCTRL/IOSPLOIT/modules/commands/macOS/macos_cd.py | CydiaRepoArchiver/noahlittle.github.io | 3ff35ad0d49ccdd9c3691beb43c393e06d1e309a | [
"Apache-2.0"
] | 1 | 2021-01-29T21:46:40.000Z | 2021-01-29T21:46:40.000Z | class payload:
def __init__(self):
self.name = "cd"
self.description = "change directories"
self.type = "native"
self.id = 100
def run(self,session,server,command):
return self.name
| 23.1 | 47 | 0.588745 |
df7ff7a5d7553d1e21afe14daf2fbc0a94332b07 | 186 | py | Python | server/app/settings.py | josenava/meal-calendar | d5182f9b9ee30c02efc8bd22e79bb7a53e778919 | [
"MIT"
] | null | null | null | server/app/settings.py | josenava/meal-calendar | d5182f9b9ee30c02efc8bd22e79bb7a53e778919 | [
"MIT"
] | 5 | 2020-07-24T14:45:31.000Z | 2022-02-27T09:49:55.000Z | server/app/settings.py | josenava/meal-calendar | d5182f9b9ee30c02efc8bd22e79bb7a53e778919 | [
"MIT"
] | null | null | null | import os
SQLALCHEMY_DATABASE_URL = os.getenv("POSTGRES_URL", "")
# auth
SECRET_KEY = os.getenv("SECRET_KEY", "not_super_secret")
AUTH_ALGORITHM = os.getenv("AUTH_ALGORITHM", "HS256")
| 23.25 | 56 | 0.752688 |
27bf3173b563b47a3e23b2514dfad68ec66d5fab | 11,322 | py | Python | ahrs/filters/oleq.py | FedericoCeratto/ahrs | 3a03638841741fb4a13f92f81532ac394e422b67 | [
"MIT"
] | 184 | 2019-09-06T07:58:52.000Z | 2022-03-31T04:27:09.000Z | ahrs/filters/oleq.py | geoKinga/ahrs | 87f9210cfcf6c545d86ae8588a93f012020164ee | [
"MIT"
] | 48 | 2019-11-13T15:42:46.000Z | 2022-03-31T23:53:53.000Z | ahrs/filters/oleq.py | geoKinga/ahrs | 87f9210cfcf6c545d86ae8588a93f012020164ee | [
"MIT"
] | 34 | 2019-12-19T16:22:00.000Z | 2022-03-14T09:51:50.000Z | # -*- coding: utf-8 -*-
"""
Optimal Linear Estimator of Quaternion
======================================
Considering an attitude determination model from a pair of vector observations:
.. math::
\\mathbf{D}^b = \\mathbf{CD}^r
where :math:`\\mathbf{D}_i^b=\\begin{bmatrix}D_{x,i}^b & D_{y,i}^b & D_{z,i}^b\\end{bmatrix}^T`
and :math:`\\mathbf{D}_i^r=\\begin{bmatrix}D_{x,i}^r & D_{y,i}^r & D_{z,i}^r\\end{bmatrix}^T`
are the *i*-th pair of normalized vector observations from the body frame :math:`b`
and the reference frame :math:`r`.
The goal is to find the optimal attitude :math:`\\mathbf{C}\\in\\mathbb{R}^{3\\times 3}`
relating both vectors. The famous `Wahba's problem
<https://en.wikipedia.org/wiki/Wahba%27s_problem>`_ can help us to find
:math:`\\mathbf{C}` from a set of observations and a least-squares method of
the form:
.. math::
L(\\mathbf{C}) = \\sum_{i=1}^n a_i \\|\\mathbf{D}_i^b - \\mathbf{CD}_i^r \\|^2
being :math:`a_i` the weight of the *i*-th sensor output. The goal of **OLEQ**
is to find this optimal attitude, but in the form of a quaternion [Zhou2018]_.
First, notice that the attitude matrix is related to quaternion
:math:`\\mathbf{q}=\\begin{bmatrix}q_w & q_x & q_y & q_z\\end{bmatrix}^T` via:
.. math::
\\mathbf{C} = \\begin{bmatrix}\\mathbf{P}_1\\mathbf{q} & \\mathbf{P}_2\\mathbf{q} & \\mathbf{P}_3\\mathbf{q}\\end{bmatrix}
where the decomposition matrices are:
.. math::
\\begin{array}{rcl}
\\mathbf{P}_1 &=&
\\begin{bmatrix}q_w & q_x & -q_y & -q_z \\\\ -q_z & q_y & q_x & -q_w \\\\ q_y & q_z & q_w & q_x \\end{bmatrix} \\\\
\\mathbf{P}_2 &=&
\\begin{bmatrix}q_z & q_y & q_x & q_w \\\\ q_w & -q_x & q_y & -q_z \\\\ -q_x & -q_w & q_z & q_y \\end{bmatrix} \\\\
\\mathbf{P}_3 &=&
\\begin{bmatrix}-q_y & q_z & -q_w & q_x \\\\ q_x & q_w & q_z & q_y \\\\ q_w & -q_x & -q_y & q_z \\end{bmatrix}
\\end{array}
It is accepted that :math:`\\mathbf{P}_1^T=\\mathbf{P}_1^\\dagger`,
:math:`\\mathbf{P}_2^T=\\mathbf{P}_2^\\dagger`, and :math:`\\mathbf{P}_3^T=\\mathbf{P}_3^\\dagger`,
where the notation :math:`^\\dagger` stands for the `Moore-Penrose pseudo-
inverse <https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse>`_. So,
the reference and observation vectors can be related to the quaternion with a
:math:`4\\times 4` matrix of the form:
.. math::
\\begin{array}{rcl}
\\mathbf{D}^b &=& \\mathbf{K}(\\mathbf{q}) \\mathbf{q} \\\\
\\mathbf{D}^b &=& \\big(D_x^r\\mathbf{P}_1 + D_y^r\\mathbf{P}_2 + D_z^r\\mathbf{P}_3\\big) \\mathbf{q}
\\end{array}
Knowing that :math:`\\mathbf{K}^T(\\mathbf{q})=\\mathbf{K}^\\dagger(\\mathbf{q})`,
the expression can be expanded to:
.. math::
\\begin{array}{rcl}
\\mathbf{K}^T(\\mathbf{q})\\mathbf{D}^b &=&
D_x^r\\mathbf{P}_1^T\\mathbf{D}^b + D_y^r\\mathbf{P}_2^T\\mathbf{D}^b + D_z^r\\mathbf{P}_3^T\\mathbf{D}^b \\\\
\\mathbf{Wq} &=& D_x^r\\mathbf{M}_1\\mathbf{q} + D_y^r\\mathbf{M}_2\\mathbf{q} + D_z^r\\mathbf{M}_3\\mathbf{q}
\\end{array}
where :math:`\\mathbf{W}` is built with:
.. math::
\\begin{array}{rcl}
\\mathbf{W} &=& D_x^r\\mathbf{M}_1 + D_y^r\\mathbf{M}_2 + D_z^r\\mathbf{M}_3 \\\\ && \\\\
\\mathbf{M}_1 &=&
\\begin{bmatrix}
D_x^b & 0 & D_z^b & -D_y^b \\\\
0 & D_x^b & D_y^b & D_z^b \\\\
D_z^b & D_y^b & -D_x^b & 0 \\\\
-D_y^b & D_z^b & 0 & -D_x^b
\\end{bmatrix} \\\\
\\mathbf{M}_2 &=&
\\begin{bmatrix}
D_y^b & -D_z^b & 0 & D_x^b \\\\
-D_z^b & -D_y^b & D_x^b & 0 \\\\
0 & D_x^b & D_y^b & D_z^b \\\\
D_x^b & 0 & D_z^b & -D_y^b
\\end{bmatrix} \\\\
\\mathbf{M}_3 &=&
\\begin{bmatrix}
D_z^b & D_y^b & -D_x^b & 0 \\\\
D_y^b & -D_z^b & 0 & D_x^b \\\\
-D_x^b & 0 & -D_z^b & D_y^b \\\\
0 & D_x^b & D_y^b & D_z^b
\\end{bmatrix}
\\end{array}
Now the attitude estimation is shifted to :math:`\\mathbf{Wq}=\\mathbf{q}`. If
treated as an iterative dynamical system, the quatenion at the *n*-th itreation
is calculated as:
.. math::
\\mathbf{q}(n) = \\mathbf{Wq}(n-1)
It is possible to list all rotation equations as:
.. math::
\\begin{bmatrix}
\\sqrt{a_1}\\mathbf{I}_4 \\\\ \\vdots \\\\ \\sqrt{a_n}\\mathbf{I}_4
\\end{bmatrix} \\mathbf{q} =
\\begin{bmatrix}
\\sqrt{a_1}\\mathbf{W}_1 \\\\ \\vdots \\\\ \\sqrt{a_n}\\mathbf{W}_n
\\end{bmatrix} \\mathbf{q}
Leading to a pre-multiplication of the form:
.. math::
\\mathbf{q} = \\Big(\\sum_{i=1}^na_i\\mathbf{W}_i\\Big)\\mathbf{q}
A stable and continuous solution to each equation is done by pre-multiplying
:math:`\\frac{1}{2}(\\mathbf{W}_i+\\mathbf{I}_4)`.
.. math::
\\begin{bmatrix}
\\sqrt{a_1}\\mathbf{I}_4 \\\\ \\vdots \\\\ \\sqrt{a_n}\\mathbf{I}_4
\\end{bmatrix} \\mathbf{q} =
\\begin{bmatrix}
\\frac{1}{2}\\sqrt{a_1}(\\mathbf{W}_1+\\mathbf{I}_4) \\\\ \\vdots \\\\ \\frac{1}{2}\\sqrt{a_n}(\\mathbf{W}_n+\\mathbf{I}_4)
\\end{bmatrix} \\mathbf{q}
Based on `Brouwer's fixed-point theorem <https://en.wikipedia.org/wiki/Brouwer_fixed-point_theorem>`_,
it is possible to recursively obtain the normalized optimal quaternion by
rotating a randomly given initial quaternion, :math:`\\mathbf{q}_\\mathrm{rand}`,
over and over again indefinitely.
.. math::
\\mathbf{q} = \\frac{\\mathbf{W} + \\mathbf{I}}{2} \\mathbf{q}_\\mathrm{rand}
This equals the least-square of the set of pre-computed single rotated
quaternions.
References
----------
.. [Zhou2018] Zhou, Z.; Wu, J.; Wang, J.; Fourati, H. Optimal, Recursive and
Sub-Optimal Linear Solutions to Attitude Determination from Vector
Observations for GNSS/Accelerometer/Magnetometer Orientation Measurement.
Remote Sens. 2018, 10, 377.
(https://www.mdpi.com/2072-4292/10/3/377)
"""
import numpy as np
from ..common.mathfuncs import cosd, sind
class OLEQ:
"""
Optimal Linear Estimator of Quaternion
Parameters
----------
acc : numpy.ndarray, default: None
N-by-3 array with measurements of acceleration in in m/s^2
mag : numpy.ndarray, default: None
N-by-3 array with measurements of magnetic field in mT
magnetic_ref : float or numpy.ndarray
Local magnetic reference.
frame : str, default: 'NED'
Local tangent plane coordinate frame. Valid options are right-handed
``'NED'`` for North-East-Down and ``'ENU'`` for East-North-Up.
Raises
------
ValueError
When dimension of input arrays ``acc`` and ``mag`` are not equal.
Examples
--------
>>> acc_data.shape, mag_data.shape # NumPy arrays with sensor data
((1000, 3), (1000, 3))
>>> from ahrs.filters import OLEQ
>>> orientation = OLEQ(acc=acc_data, mag=mag_data)
>>> orientation.Q.shape # Estimated attitude
(1000, 4)
"""
def __init__(self,
acc: np.ndarray = None,
mag: np.ndarray = None,
weights: np.ndarray = None,
magnetic_ref: np.ndarray = None,
frame: str = 'NED'
):
self.acc = acc
self.mag = mag
self.a = weights if weights is not None else np.ones(2)
self.frame = frame
# Reference measurements
self._set_reference_frames(magnetic_ref, self.frame)
if self.acc is not None and self.mag is not None:
self.Q = self._compute_all()
def _set_reference_frames(self, mref: float, frame: str = 'NED') -> None:
if frame.upper() not in ['NED', 'ENU']:
raise ValueError(f"Invalid frame '{frame}'. Try 'NED' or 'ENU'")
# Magnetic Reference Vector
if mref is None:
# Local magnetic reference of Munich, Germany
from ..common.mathfuncs import MUNICH_LATITUDE, MUNICH_LONGITUDE, MUNICH_HEIGHT
from ..utils.wmm import WMM
wmm = WMM(latitude=MUNICH_LATITUDE, longitude=MUNICH_LONGITUDE, height=MUNICH_HEIGHT)
self.m_ref = np.array([wmm.X, wmm.Y, wmm.Z]) if frame.upper() == 'NED' else np.array([wmm.Y, wmm.X, -wmm.Z])
elif isinstance(mref, (int, float)):
cd, sd = cosd(mref), sind(mref)
self.m_ref = np.array([cd, 0.0, sd]) if frame.upper() == 'NED' else np.array([0.0, cd, -sd])
else:
self.m_ref = np.copy(mref)
self.m_ref /= np.linalg.norm(self.m_ref)
# Gravitational Reference Vector
self.a_ref = np.array([0.0, 0.0, -1.0]) if frame.upper() == 'NED' else np.array([0.0, 0.0, 1.0])
def _compute_all(self) -> np.ndarray:
"""Estimate the quaternions given all data.
Attributes ``acc`` and ``mag`` must contain data.
Returns
-------
Q : array
M-by-4 Array with all estimated quaternions, where M is the number
of samples.
"""
if self.acc.shape != self.mag.shape:
raise ValueError("acc and mag are not the same size")
num_samples = len(self.acc)
Q = np.zeros((num_samples, 4))
for t in range(num_samples):
Q[t] = self.estimate(self.acc[t], self.mag[t])
return Q
def WW(self, Db: np.ndarray, Dr: np.ndarray) -> np.ndarray:
"""W Matrix
.. math::
\\mathbf{W} = D_x^r\\mathbf{M}_1 + D_y^r\\mathbf{M}_2 + D_z^r\\mathbf{M}_3
Parameters
----------
Db : numpy.ndarray
Normalized tri-axial observations vector.
Dr : numpy.ndarray
Normalized tri-axial reference vector.
Returns
-------
W_matrix : numpy.ndarray
W Matrix.
"""
bx, by, bz = Db
rx, ry, rz = Dr
M1 = np.array([
[bx, 0.0, bz, -by],
[0.0, bx, by, bz],
[bz, by, -bx, 0.0],
[-by, bz, 0.0, -bx]]) # (eq. 18a)
M2 = np.array([
[by, -bz, 0.0, bx],
[-bz, -by, bx, 0.0],
[0.0, bx, by, bz],
[bx, 0.0, bz, -by]]) # (eq. 18b)
M3 = np.array([
[bz, by, -bx, 0.0],
[by, -bz, 0.0, bx],
[-bx, 0.0, -bz, by],
[0.0, bx, by, bz]]) # (eq. 18c)
return rx*M1 + ry*M2 + rz*M3 # (eq. 20)
def estimate(self, acc: np.ndarray, mag: np.ndarray) -> np.ndarray:
"""Attitude Estimation
Parameters
----------
acc : numpy.ndarray
Sample of tri-axial Accelerometer.
mag : numpy.ndarray
Sample of tri-axial Magnetometer.
Returns
-------
q : numpy.ndarray
Estimated quaternion.
"""
# Normalize measurements (eq. 1)
a_norm = np.linalg.norm(acc)
m_norm = np.linalg.norm(mag)
if not a_norm > 0 or not m_norm > 0: # handle NaN
return None
acc = np.copy(acc)/a_norm
mag = np.copy(mag)/m_norm
sum_aW = self.a[0]*self.WW(acc, self.a_ref) + self.a[1]*self.WW(mag, self.m_ref) # (eq. 31)
R = 0.5*(np.identity(4) + sum_aW) # (eq. 33)
q = np.array([0., 0., 0., 1.]) # "random" quaternion
last_q = np.array([1., 0., 0., 0.])
i = 0
while np.linalg.norm(q-last_q) > 1e-8 and i <= 20:
last_q = q
q = R @ last_q # (eq. 24)
q /= np.linalg.norm(q)
i += 1
return q/np.linalg.norm(q)
| 36.405145 | 127 | 0.561208 |
1965d64ee2bffd1af2fd92233c46021e18e31e05 | 913 | py | Python | setup.py | raileanu/playground | 5c46b9f1823d4ef7c64b5128328ce1c717b0e043 | [
"Apache-2.0"
] | null | null | null | setup.py | raileanu/playground | 5c46b9f1823d4ef7c64b5128328ce1c717b0e043 | [
"Apache-2.0"
] | null | null | null | setup.py | raileanu/playground | 5c46b9f1823d4ef7c64b5128328ce1c717b0e043 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
with open('requirements.txt', 'r') as f:
install_requires = f.readlines()
with open('requirements_extra.txt', 'r') as f:
extras_require = f.readlines()
with open('VERSION') as f:
version = f.read().strip()
setup(name='pommerman',
version=version,
description='PlayGround: AI Research into Multi-Agent Learning',
url='https://www.pommerman.com',
author='Pommerman',
author_email='support@pommerman.com',
license='Apache 2.0',
packages=find_packages(),
install_requires=install_requires,
extras_require={
'extras': extras_require # @TODO this might need refinement
},
entry_points={
'console_scripts': [
'pom_battle=pommerman.cli.run_battle:main',
'pom_tf_battle=pommerman.cli.train_with_tensorforce:main',
],
},
zip_safe=False)
| 29.451613 | 70 | 0.649507 |
d8dd8bed551a2da26ed0c3ad64e8e4c5aa330f15 | 4,181 | py | Python | examples/mechanics/JointsTests/spring_damper_pivot.py | siconos/siconos-tutorials | 821365a6ce679fc3d606b272ff069134e3c6aa4b | [
"Apache-2.0"
] | 6 | 2017-01-12T23:09:28.000Z | 2021-03-20T17:03:58.000Z | examples/mechanics/JointsTests/spring_damper_pivot.py | siconos/siconos-tutorials | 821365a6ce679fc3d606b272ff069134e3c6aa4b | [
"Apache-2.0"
] | 3 | 2019-01-14T13:44:51.000Z | 2021-05-17T13:57:27.000Z | examples/mechanics/JointsTests/spring_damper_pivot.py | siconos/siconos-tutorials | 821365a6ce679fc3d606b272ff069134e3c6aa4b | [
"Apache-2.0"
] | 2 | 2019-10-22T13:30:39.000Z | 2020-10-06T10:19:57.000Z |
from __future__ import print_function
import os,sys
import numpy as np
import math
import siconos.kernel as Kernel
np.set_printoptions(precision=3)
from siconos.mechanics.collision.tools import Contactor
from siconos.mechanics.joints import cast_PivotJointR
from siconos.io.mechanics_run import MechanicsHdf5Runner
from siconos.kernel import SiconosVector, BlockVector, changeFrameAbsToBody
import siconos.numerics as sn
import siconos.kernel as sk
# An example of applying force to the axis of a joint, and applying
# spring and virtual damping by measuring position and velocity along
# the same axis.
# Note: This example is to demonstrate external measurement of joint
# positions and application of forces to dynamical systems attached to
# joints. In practice it is better to use internal forces (fInt,
# mInt) to model joint spring-dampers, see folder
# JointsTestsWithInternalForces, and extra Relations with associated
# Non-Smooth Laws to model non-linearities such as joint stops and
# friction, see JointsTestsWithContactDetection.
# Creation of the hdf5 file for input/output
with MechanicsHdf5Runner() as io:
# Definition of two bars connected by a prismatic joint
io.add_primitive_shape('Bar', 'Box', (1, 0.1, 0.1))
io.add_object('bar1', [Contactor('Bar')], [0,0,2],
orientation=[(0,0,1),np.pi/2], mass=1.0, velocity=[0,0,0,0,0,1])
io.add_object('bar2', [Contactor('Bar',relative_translation=[0, 0.1,0]),
Contactor('Bar',relative_translation=[0,-0.1,0])],
[0,0,2],
orientation=[(0,0,1),np.pi/2], mass=1.0)
io.add_joint('joint1', 'bar1', 'bar2', [[0,0,0]], [[0,1,0]], 'PivotJointR',
absolute=False)
# Definition of the ground
io.add_primitive_shape('Ground', 'Box', (5, 5, 0.1))
io.add_object('ground', [Contactor('Ground')], [0,0,-0.05])
io.add_Newton_impact_friction_nsl('contact', mu=0.3, e=0.0)
class Ctrl(object):
def initialize(self, io):
self.count = 0
self.topo = io._nsds.topology()
self.ds1 = Kernel.cast_NewtonEulerDS(self.topo.getDynamicalSystem('bar1'))
self.ds2 = Kernel.cast_NewtonEulerDS(self.topo.getDynamicalSystem('bar2'))
self.joint1 = cast_PivotJointR(
self.topo.getInteraction('joint1').relation())
self.ds1.setIsMextExpressedInInertialFrame(True)
self.ds2.setIsMextExpressedInInertialFrame(True)
# Apply initial forces
self.step()
def step(self):
self.count += 1
# Make a temporary BlockVector containing both qs
bv = BlockVector(self.ds1.q(), self.ds2.q())
torque1 = np.zeros(3)
torque2 = np.zeros(3)
# Get the position and use it to project a torque vector
# onto the DoF (spring torque)
angle = SiconosVector(1)
self.joint1.computehDoF(0, bv, angle, 0)
setpoint = np.pi/4
ang_diff = setpoint - angle.getValue(0)
spring_torque = np.array(self.joint1.normalDoF(bv, 0)) * ang_diff * 500.0
# Get the velocity of each body projected onto the DoF and
# calculate their difference (damping torque)
vel1 = self.joint1.projectVectorDoF(self.ds1.angularVelocity(True), bv, 0)
vel2 = self.joint1.projectVectorDoF(self.ds2.angularVelocity(True), bv, 0)
vel_diff = vel1 - vel2
damping_torque = vel_diff * 5.0
# Calculate total torques for each body
torque1 += - (spring_torque + damping_torque)/2
torque2 += + (spring_torque + damping_torque)/2
print('applying spring-damper torques', torque1, torque2)
self.ds1.setMExtPtr(torque1)
self.ds2.setMExtPtr(torque2)
options = sk.solver_options_create(sn.SICONOS_GENERIC_MECHANICAL_NSGS)
options.iparam[sn.SICONOS_IPARAM_MAX_ITER] = 1000
options.dparam[sn.SICONOS_DPARAM_TOL] = 1e-12
# Load and run the simulation
with MechanicsHdf5Runner(mode='r+') as io:
io.run(t0=0,
T=5,
h=0.001,
theta=0.5,
Newton_max_iter=1,
controller=Ctrl(),
solver_options=options,
output_frequency=1)
| 37.666667 | 82 | 0.67137 |
22718d84211b58aa472bca265f2b965f96be188d | 6,869 | py | Python | week02/week02_project.py | AnaRita93/spiced_projects | 64f0caec4008cc9ccb528e71ec16afba78728b8e | [
"MIT"
] | null | null | null | week02/week02_project.py | AnaRita93/spiced_projects | 64f0caec4008cc9ccb528e71ec16afba78728b8e | [
"MIT"
] | null | null | null | week02/week02_project.py | AnaRita93/spiced_projects | 64f0caec4008cc9ccb528e71ec16afba78728b8e | [
"MIT"
] | null | null | null |
"""
WEEK 02:Classification Project with Titatnic Data
This program will run a set of steps to calculate the survival predictions of Titanic passengers using Classification Models
For more details on EDA and model selection, please check the jupyter notebook version of this program (week02_project.ipynb).
###Step 1: Load Data
###Step 2: Train-Test Split (df_train and df_test)
###Step 3: Feature Engineering on df_train and df_test
###Step 4: Train Models (Logistic Reg, Random Forest) + Cross validation
###Step 5: Make predictions for Titanic Kaggle challenge and save results in a csv file
"""
# Packages
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import make_pipeline
from sklearn.pipeline import Pipeline
from sklearn.compose import make_column_transformer
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PolynomialFeatures
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.api import add_constant
from statsmodels.api import OLS, add_constant
from sklearn.feature_selection import RFE
import statsmodels.api as sm
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from sklearn.metrics import plot_roc_curve, auc, roc_curve
from sklearn.model_selection import cross_val_score
pd.options.mode.chained_assignment = None
# Functions
def clean_data(df):
df['Age'].fillna(df['Age'].mean(), inplace=True)
df['Age'] = pd.to_numeric(df['Age'])
df['Age'].fillna(df['Age'].mean(), inplace=True)
df['Cabin'].fillna('not identified', inplace=True)
df['Embarked'].dropna(inplace=True)
df.drop(['PassengerId', 'Ticket', 'Cabin'], inplace=True, axis=1)
def format_family_name(df):
family = df[['PassengerId','Name', 'SibSp','Parch']]
family.set_index('PassengerId',inplace=True)
family['Surname'] = family['Name'].str.split(expand=True, n=2, pat=',')[0]
family['FirstName'] = family['Name'].str.split(expand=True, n=2, pat=',')[1]
family['PrefixName'] = family['FirstName'].str.split(expand=True, n=2, pat='.')[0]
family['FirstName'] = family['FirstName'].str.split(expand=True, n=2, pat='.')[1]
family.sort_values(by='Surname', inplace=True)
df = df.merge(family)
df.drop(['Name','FirstName'], inplace=True, axis=1)
def add_family_size_variable(df):
df['family_size'] = df['Parch']+df['SibSp']+1
df['family_size'].value_counts().sort_index()
family_size_map = {1: 'alone', 2: 'small', 3: 'small', 4: 'small', 5: 'medium', 6: 'medium', 7: 'large',
8: 'large', 11: 'large'}
df['family_size_cat'] = df['family_size'].map(family_size_map)
def create_age_groups(df):
kbins = KBinsDiscretizer(n_bins=5, encode='onehot-dense', strategy='quantile')
columns = df[['Age']]
kbins.fit(columns)
t = kbins.transform(columns)
edges = kbins.bin_edges_[0].round(1)
labels = []
for i in range(len(edges)-1):
edge1 = edges[i]
edge2 = edges[i+1]
labels.append(f"{edge1}_to_{edge2}")
df_bins = pd.DataFrame(t, columns=labels)
df_bins['Age'] = df['Age']
df = df.merge(df_bins)
def create_pipeline_for_logistic_regression(Xtrain, ytrain):
categorical_pipe = make_pipeline(OneHotEncoder())
numeric_pipe = make_pipeline(StandardScaler())
feature_transform = make_column_transformer(
(categorical_pipe, ['Sex','Embarked', 'family_size_cat']),
(numeric_pipe, ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'family_size']) )
pipeline = make_pipeline(feature_transform, LogisticRegression())
pipeline.fit(Xtrain, ytrain)
pipeline[:-1].get_feature_names_out()
Xtrain_transformed_LogReg = pd.DataFrame(pipeline[:-1].transform(Xtrain),
columns=pipeline[:-1].get_feature_names_out(),
index=Xtrain.index)
def train_decision_tree(Xtrain,ytrain,Xtest, ytest,tree_depth):
m_tree = DecisionTreeClassifier(max_depth=tree_depth)
m_tree.fit(Xtrain.values, ytrain.values)
ypred_tree = m_tree.predict(Xtrain.values)
print(classification_report(ytrain.values, ypred_tree))
print(f'crossvalidatin for DecisionTree is {cross_val_score(m_tree, Xtrain, ytrain)}')
print('')
print('Comparison with test data')
print("training DecTree accuracy = ", round(m_tree.score(Xtrain.values, ytrain.values),2))
print("validation DecTree accuracy = ", round(m_tree.score(Xtest.values, ytest.values), 2))
def train_logistic_regression(Xtrain,ytrain,Xtest, ytest):
m = LogisticRegression(class_weight='balanced', multi_class='multinomial', solver='newton-cg')
m.fit(Xtrain.values, ytrain.values)
ypred_log = m.predict(Xtrain.values)
print(classification_report(ytrain.values, ypred_log))
print(f'crossvalidatin for LogReg is {cross_val_score(m, Xtrain, ytrain)}')
print('')
print('Comparison with test data')
print("training LogReg accuracy = ", round(m.score(Xtrain.values, ytrain.values),2))
print("validation LogReg accuracy = ", round(m.score(Xtest.values, ytest.values), 2))
def train_randomforest(Xtrain,ytrain,Xtest, ytest):
r = RandomForestClassifier(max_depth=tree_dept)
r.fit(Xtrain.values, ytrain.values)
ypred_rf = r.predict(Xtrain.values)
print(classification_report(ytrain.values, ypred_rf))
print(f'crossvalidatin for RF is {cross_val_score(r, Xtrain, ytrain)}')
print('')
print('Comparison with test data:')
print("training RF accuracy = ", round(r.score(Xtrain.values, ytrain.values),2))
print("validation RF accuracy = ", round(r.score(Xtest.values, ytest.values),2))
# Program execution
# Load and Inspect Data. Fill Missing values.
df = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
clean_data(df)
clean_data(test)
# Train-test split the data
df_train, df_test = train_test_split(df, test_size = 0.2)
# Step 3: Feature Engineering of df train and df_test
# Step 4: Train models -Logistic Regression, Random Forest and print out evaluation and cross validation metrics
train_logReg(Xtrain.values, ytrain.values)
train_randomForest(Xtrain.values, ytrain.values)
# Step 6: Make predictions and save it into a csv file in Kaggle format
| 39.477011 | 128 | 0.727035 |
b5792f140bbd09840ce3765f0f7fc9e5dbdc3d30 | 1,712 | py | Python | app/user/serializers.py | joaofranca13/recipe-app-api | 2216447939e7283fcfd9a4679659985a390777f1 | [
"MIT"
] | null | null | null | app/user/serializers.py | joaofranca13/recipe-app-api | 2216447939e7283fcfd9a4679659985a390777f1 | [
"MIT"
] | null | null | null | app/user/serializers.py | joaofranca13/recipe-app-api | 2216447939e7283fcfd9a4679659985a390777f1 | [
"MIT"
] | null | null | null | from django.contrib.auth import authenticate, get_user_model
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""Serializer for the users object"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 8}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the users object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""Validade and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
| 32.301887 | 74 | 0.64486 |
f67c14a0073772ba1196f961dd3d9a97e28d3c6e | 2,639 | py | Python | cliente_GUI.py | Solid-Red/Pychat-GUI-version | d5caf9befe7686c6304c1f6555e67c25d7afa26f | [
"MIT"
] | null | null | null | cliente_GUI.py | Solid-Red/Pychat-GUI-version | d5caf9befe7686c6304c1f6555e67c25d7afa26f | [
"MIT"
] | null | null | null | cliente_GUI.py | Solid-Red/Pychat-GUI-version | d5caf9befe7686c6304c1f6555e67c25d7afa26f | [
"MIT"
] | null | null | null | import tkinter
from tkinter import *
import socket
import sys
import threading
import time
#crea ventana
ventana=tkinter.Tk()
ventana.geometry("563x432")
#icono para la ventana grafica
#ventana.iconbitmap("otacon.ico")
#------------no funciona por alguna razon --------------
#titulo de la ventana
ventana.title("Pychat GUI V.0.2")
#fondo de pantalla otacon
fondo=PhotoImage(file="base_real.png")
imagen=Label(ventana,image=fondo).place(x=0,y=0)
#letras en pantalla :
nombre=tkinter.Label(ventana,text="Pychat GUI V.0.2").place(x=200,y=10)
#para sacar los mensajes de la caja
my_msj=tkinter.StringVar()
#caja de entrada :
escribe_tu_msg=tkinter.Label(ventana,text="escribe tu mensaje aca :",bg="gray").place(x=22,y=280)
caja_entrada=tkinter.Entry(ventana,borderwidth=0,width=62,textvariable=my_msj)
caja_entrada.place(x=22,y=300)
#nombre de usuario
Name="usuario"
def name_change():
global Name
if Names.get() !="":
Name=Names.get()
else:
Name="usuario"
print(Name)
def quien_eres():
global Names
Names=tkinter.StringVar()
print(Names)
quien=tkinter.Tk()
print(Names)
# quien.transient(ventana)
quien.title("usuario")
usuarioN=tkinter.Label(quien,text="usuario").place(x=50,y=10)
print(Names)
# Names=tkinter.StringVar()
entrada_N=tkinter.Entry(quien,textvariable=Names).place(x=20,y=60)
print(Names)
nombre_N=tkinter.Button(quien,text="cambia el nombre",command=lambda:name_change()).place(x=20,y=90)
print(Names)
quien.mainloop()
# quien.geometry("250,200")
#extraer el texto de la caja de entrada
def texto_salida():
print("chat-cliente GUI con base a python \n by red_wolf 2020 ")
texto=Name+" >>>"+my_msj.get()
print(texto)
my_msj.set("")
caja_salida.insert(tkinter.END,texto)
#barra para moverte a travez de los mensajes
barra=tkinter.Scrollbar()
barra.place(x=310,y=230)
#caja de salida :
caja_salida=tkinter.Listbox(height=12,width=43,bg="gray",borderwidth=0,yscrollcommand=barra.set)
caja_salida.place(x=18,y=57)
#boton de envio :
envio=tkinter.Button(ventana,text="Enviar",padx=10,pady=14,bg="red",command=lambda:texto_salida())
envio.place(x=465,y=280)
#animacion de otacon
otacon=PhotoImage(file="otacon.png")
otakon=Label(ventana,image=otacon).place(x=400,y=50)
#usuario :
user=tkinter.Button(ventana,text="usuario",padx=8,pady=15,bg="blue",command=lambda:quien_eres())
user.place(x=465,y=335)
#sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#sock.connect((str(host), int(port)))
#msg_recv = threading.Thread(msg_recv)
#msg_recv.daemon = True
#msg_recv.start()
#loop infinito que mantiene todo abierto
ventana.mainloop()
| 19.123188 | 101 | 0.731717 |
c122afaec29516461af8d05ff7ede080ed94e117 | 14,548 | py | Python | tools/upgrade/errors.py | GreyElaina/pyre-check | abcb5daa64c38a25aed9ab238bb61290444ab06c | [
"MIT"
] | null | null | null | tools/upgrade/errors.py | GreyElaina/pyre-check | abcb5daa64c38a25aed9ab238bb61290444ab06c | [
"MIT"
] | null | null | null | tools/upgrade/errors.py | GreyElaina/pyre-check | abcb5daa64c38a25aed9ab238bb61290444ab06c | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import json
import logging
import re
import subprocess
import sys
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union, cast
import libcst
from . import UserError, ast
LOG: logging.Logger = logging.getLogger(__name__)
MAX_LINES_PER_FIXME: int = 4
class LineBreakTransformer(libcst.CSTTransformer):
def leave_SimpleWhitespace(
self,
original_node: libcst.SimpleWhitespace,
updated_node: libcst.SimpleWhitespace,
) -> Union[libcst.SimpleWhitespace, libcst.ParenthesizedWhitespace]:
whitespace = original_node.value.replace("\\", "")
if "\n" in whitespace:
first_line = libcst.TrailingWhitespace(
whitespace=libcst.SimpleWhitespace(
value=whitespace.split("\n")[0].rstrip()
),
comment=None,
newline=libcst.Newline(),
)
last_line = libcst.SimpleWhitespace(value=whitespace.split("\n")[1])
return libcst.ParenthesizedWhitespace(
first_line=first_line, empty_lines=[], indent=True, last_line=last_line
)
return updated_node
def leave_Assign(
self, original_node: libcst.Assign, updated_node: libcst.Assign
) -> libcst.Assign:
assign_value = updated_node.value
if hasattr(assign_value, "lpar"):
parenthesized_value = assign_value.with_changes(
lpar=[libcst.LeftParen()], rpar=[libcst.RightParen()]
)
return updated_node.with_changes(value=parenthesized_value)
return updated_node
class PartialErrorSuppression(Exception):
def __init__(self, message: str, unsuppressed_paths: List[str]) -> None:
super().__init__(message)
self.unsuppressed_paths: List[str] = unsuppressed_paths
def error_path(error: Dict[str, Any]) -> str:
return error["path"]
class Errors:
@classmethod
def empty(cls) -> "Errors":
return cls([])
@staticmethod
def from_json(
json_string: str,
only_fix_error_code: Optional[int] = None,
from_stdin: bool = False,
) -> "Errors":
try:
errors = json.loads(json_string)
return Errors(_filter_errors(errors, only_fix_error_code))
except json.decoder.JSONDecodeError:
if from_stdin:
raise UserError(
"Received invalid JSON as input. "
"If piping from `pyre check` be sure to use `--output=json`."
)
else:
raise UserError(
f"Encountered invalid output when checking for pyre errors: `{json_string}`."
)
@staticmethod
def from_stdin(only_fix_error_code: Optional[int] = None) -> "Errors":
input = sys.stdin.read()
return Errors.from_json(input, only_fix_error_code, from_stdin=True)
def __init__(self, errors: List[Dict[str, Any]]) -> None:
self.errors: List[Dict[str, Any]] = errors
self.error_iterator: Iterator[
Tuple[str, Iterator[Dict[str, Any]]]
] = itertools.groupby(sorted(errors, key=error_path), error_path)
self.length: int = len(errors)
def __iter__(self) -> Iterator[Tuple[str, Iterator[Dict[str, Any]]]]:
return self.error_iterator.__iter__()
def __next__(self) -> Tuple[str, Iterator[Dict[str, Any]]]:
return self.error_iterator.__next__()
def __len__(self) -> int:
return self.length
def __eq__(self, other: "Errors") -> bool:
return self.errors == other.errors
def suppress(
self,
comment: Optional[str] = None,
max_line_length: Optional[int] = None,
truncate: bool = False,
unsafe: bool = False,
) -> None:
unsuppressed_paths = []
for path_to_suppress, errors in self:
LOG.info("Processing `%s`", path_to_suppress)
try:
path = Path(path_to_suppress)
input = path.read_text()
output = _suppress_errors(
input,
_build_error_map(errors),
comment,
max_line_length
if max_line_length and max_line_length > 0
else None,
truncate,
unsafe,
)
path.write_text(output)
except SkippingGeneratedFileException:
LOG.warning(f"Skipping generated file at {path_to_suppress}")
except (ast.UnstableAST, SyntaxError):
unsuppressed_paths.append(path_to_suppress)
if unsuppressed_paths:
paths_string = ", ".join(unsuppressed_paths)
raise PartialErrorSuppression(
f"Could not fully suppress errors in: {paths_string}",
unsuppressed_paths,
)
def _filter_errors(
errors: List[Dict[str, Any]], only_fix_error_code: Optional[int] = None
) -> List[Dict[str, Any]]:
if only_fix_error_code is not None:
errors = [error for error in errors if error["code"] == only_fix_error_code]
return errors
def errors_from_targets(
project_directory: Path,
path: str,
targets: List[str],
check_alternate_names: bool = True,
) -> Errors:
buck_test_command = (
["buck", "test", "--show-full-json-output"] + targets + ["--", "--run-disabled"]
)
buck_test = subprocess.run(
buck_test_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
errors = Errors.empty()
if buck_test.returncode == 0:
# Successful run with no type errors
LOG.info("No errors in %s...", path)
elif buck_test.returncode == 32:
buck_test_output = buck_test.stdout.decode().split("\n")
pyre_error_pattern = re.compile(r"\W*(.*\.pyi?):(\d*):(\d*) (.* \[(\d*)\]: .*)")
errors = {}
for output_line in buck_test_output:
matched = pyre_error_pattern.match(output_line)
if matched:
path = matched.group(1)
line = int(matched.group(2))
column = int(matched.group(3))
description = matched.group(4)
code = matched.group(5)
error = {
"line": line,
"column": column,
"path": project_directory / path,
"code": code,
"description": description,
"concise_description": description,
}
errors[(line, column, path, code)] = error
errors = Errors(list(errors.values()))
elif check_alternate_names and buck_test.returncode == 5:
# Generated type check target was not named as expected.
LOG.warning("Could not find buck test targets: %s", targets)
LOG.info("Looking for similar targets...")
targets_to_retry = []
for target in targets:
query_command = ["buck", "query", target]
similar_targets = subprocess.run(
query_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output = similar_targets.stdout.decode()
error_output = similar_targets.stderr.decode()
if output:
targets_to_retry.append(output.strip())
elif error_output:
typecheck_targets = [
target.strip()
for target in error_output.split("\n")
if target.strip().endswith("-pyre-typecheck")
]
targets_to_retry += typecheck_targets
if targets_to_retry:
LOG.info("Retrying similar targets: %s", targets_to_retry)
errors = errors_from_targets(
project_directory, path, targets_to_retry, check_alternate_names=False
)
else:
LOG.error("No similar targets to retry.")
else:
LOG.error(
"Failed to run buck test command:\n\t%s\n\n%s",
" ".join(buck_test_command),
buck_test.stderr.decode(),
)
return errors
def _remove_comment_preamble(lines: List[str]) -> None:
# Deprecated: leaving remove logic until live old-style comments are cleaned up.
while lines:
old_line = lines.pop()
new_line = re.sub(r"# pyre: .*$", "", old_line).rstrip()
if old_line == "" or new_line != "":
# The preamble has ended.
lines.append(new_line)
return
def _add_error_to_line_break_block(lines: List[str], errors: List[List[str]]) -> None:
# Gather unbroken lines.
line_break_block = [lines.pop() for _ in range(0, len(errors))]
line_break_block.reverse()
# Transform line break block to use parenthesis.
indent = len(line_break_block[0]) - len(line_break_block[0].lstrip())
line_break_block = [line[indent:] for line in line_break_block]
statement = "\n".join(line_break_block)
transformed_statement = libcst.Module([]).code_for_node(
cast(
libcst.CSTNode,
libcst.parse_statement(statement).visit(LineBreakTransformer()),
)
)
transformed_lines = transformed_statement.split("\n")
transformed_lines = [" " * indent + line for line in transformed_lines]
# Add to lines.
for line, comment in zip(transformed_lines, errors):
lines.extend(comment)
lines.append(line)
def _split_across_lines(
comment: str, indent: int, max_line_length: Optional[int]
) -> List[str]:
if not max_line_length or len(comment) <= max_line_length:
return [comment]
comment = comment.lstrip()
available_columns = max_line_length - indent - len("# ")
buffered_line = ""
result = []
prefix = " " * indent
for token in comment.split():
if buffered_line and (
len(buffered_line) + len(token) + len(" ") > available_columns
):
# This new token would make the line exceed the limit,
# hence terminate what we have accumulated.
result.append(("{}{}".format(prefix, buffered_line)).rstrip())
# The first line already has a comment token on it, so don't prefix #. For
# the rest, we need to add the comment symbol manually.
prefix = "{}# ".format(" " * indent)
buffered_line = ""
buffered_line = buffered_line + token + " "
result.append(("{}{}".format(prefix, buffered_line)).rstrip())
return result
class SkippingGeneratedFileException(Exception):
pass
def _suppress_errors(
input: str,
errors: Dict[int, List[Dict[str, str]]],
custom_comment: Optional[str] = None,
max_line_length: Optional[int] = None,
truncate: bool = False,
unsafe: bool = False,
) -> str:
if "@" "generated" in input:
raise SkippingGeneratedFileException()
lines = input.split("\n") # type: List[str]
# Replace lines in file.
new_lines = []
removing_pyre_comments = False
line_break_block_errors: List[List[str]] = []
for index, line in enumerate(lines):
if removing_pyre_comments:
stripped = line.lstrip()
if stripped.startswith("#") and not re.match(
r"# *pyre-(ignore|fixme).*$", stripped
):
continue
else:
removing_pyre_comments = False
number = index + 1
relevant_errors = errors[number] if number in errors else []
if any(error["code"] == "0" for error in relevant_errors):
# Handle unused ignores.
replacement = re.sub(r"# pyre-(ignore|fixme).*$", "", line).rstrip()
if replacement == "":
removing_pyre_comments = True
_remove_comment_preamble(new_lines)
continue
else:
line = replacement
comments = []
for error in relevant_errors:
if error["code"] == "0":
continue
indent = len(line) - len(line.lstrip(" "))
description = custom_comment if custom_comment else error["description"]
comment = "{}# pyre-fixme[{}]: {}".format(
" " * indent, error["code"], description
)
if not max_line_length or len(comment) <= max_line_length:
comments.append(comment)
else:
truncated_comment = comment[: (max_line_length - 3)] + "..."
split_comment_lines = _split_across_lines(
comment, indent, max_line_length
)
if truncate or len(split_comment_lines) > MAX_LINES_PER_FIXME:
comments.append(truncated_comment)
else:
comments.extend(split_comment_lines)
if len(line_break_block_errors) > 0 and not line.endswith("\\"):
# Handle error suppressions in line break block
line_break_block_errors.append(comments)
new_lines.append(line)
_add_error_to_line_break_block(new_lines, line_break_block_errors)
line_break_block_errors = []
continue
if line.endswith("\\"):
line_break_block_errors.append(comments)
comments = []
LOG.info(
"Adding comment%s on line %d: %s",
"s" if len(comments) > 1 else "",
number,
" \n".join(comments),
)
new_lines.extend(comments)
new_lines.append(line)
output = "\n".join(new_lines)
if not unsafe:
ast.check_stable(input, output)
return output
def _build_error_map(
errors: Iterator[Dict[str, Any]]
) -> Dict[int, List[Dict[str, str]]]:
error_map = defaultdict(lambda: [])
for error in errors:
if error["concise_description"]:
description = error["concise_description"]
else:
description = error["description"]
match = re.search(r"\[(\d+)\]: (.*)", description)
if match:
error_map[error["line"]].append(
{"code": match.group(1), "description": match.group(2)}
)
return error_map
| 35.744472 | 97 | 0.584479 |
51ba14dd096049c58930f392d9e8c4da8f56c8e8 | 9,993 | py | Python | scrapy/tests/test_utils_url.py | dominikszabo/scrapy | e7de00a8f043f710d7dda38f0ba803bb89f55ad9 | [
"BSD-3-Clause"
] | 1 | 2022-03-04T06:18:22.000Z | 2022-03-04T06:18:22.000Z | scrapy/tests/test_utils_url.py | dominikszabo/scrapy | e7de00a8f043f710d7dda38f0ba803bb89f55ad9 | [
"BSD-3-Clause"
] | null | null | null | scrapy/tests/test_utils_url.py | dominikszabo/scrapy | e7de00a8f043f710d7dda38f0ba803bb89f55ad9 | [
"BSD-3-Clause"
] | null | null | null | import unittest
from scrapy.spider import BaseSpider
from scrapy.utils.url import url_is_from_any_domain, url_is_from_spider, canonicalize_url, url_has_any_extension
from scrapy.linkextractor import IGNORED_EXTENSIONS
__doctests__ = ['scrapy.utils.url']
class UrlUtilsTest(unittest.TestCase):
def test_url_is_from_any_domain(self):
url = 'http://www.wheele-bin-art.co.uk/get/product/123'
self.assertTrue(url_is_from_any_domain(url, ['wheele-bin-art.co.uk']))
self.assertFalse(url_is_from_any_domain(url, ['art.co.uk']))
url = 'http://wheele-bin-art.co.uk/get/product/123'
self.assertTrue(url_is_from_any_domain(url, ['wheele-bin-art.co.uk']))
self.assertFalse(url_is_from_any_domain(url, ['art.co.uk']))
url = 'http://192.169.0.15:8080/mypage.html'
self.assertTrue(url_is_from_any_domain(url, ['192.169.0.15:8080']))
self.assertFalse(url_is_from_any_domain(url, ['192.169.0.15']))
url = 'javascript:%20document.orderform_2581_1190810811.mode.value=%27add%27;%20javascript:%20document.orderform_2581_1190810811.submit%28%29'
self.assertFalse(url_is_from_any_domain(url, ['testdomain.com']))
self.assertFalse(url_is_from_any_domain(url+'.testdomain.com', ['testdomain.com']))
def test_url_is_from_spider(self):
spider = BaseSpider(name='example.com')
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://sub.example.com/some/page.html', spider))
self.assertFalse(url_is_from_spider('http://www.example.org/some/page.html', spider))
self.assertFalse(url_is_from_spider('http://www.example.net/some/page.html', spider))
def test_url_is_from_spider_class_attributes(self):
class MySpider(BaseSpider):
name = 'example.com'
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://sub.example.com/some/page.html', MySpider))
self.assertFalse(url_is_from_spider('http://www.example.org/some/page.html', MySpider))
self.assertFalse(url_is_from_spider('http://www.example.net/some/page.html', MySpider))
def test_url_is_from_spider_with_allowed_domains(self):
spider = BaseSpider(name='example.com', allowed_domains=['example.org', 'example.net'])
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://sub.example.com/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://example.com/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://www.example.org/some/page.html', spider))
self.assertTrue(url_is_from_spider('http://www.example.net/some/page.html', spider))
self.assertFalse(url_is_from_spider('http://www.example.us/some/page.html', spider))
def test_url_is_from_spider_with_allowed_domains_class_attributes(self):
class MySpider(BaseSpider):
name = 'example.com'
allowed_domains = ['example.org', 'example.net']
self.assertTrue(url_is_from_spider('http://www.example.com/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://sub.example.com/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://example.com/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://www.example.org/some/page.html', MySpider))
self.assertTrue(url_is_from_spider('http://www.example.net/some/page.html', MySpider))
self.assertFalse(url_is_from_spider('http://www.example.us/some/page.html', MySpider))
def test_canonicalize_url(self):
# simplest case
self.assertEqual(canonicalize_url("http://www.example.com/"),
"http://www.example.com/")
# always return a str
assert isinstance(canonicalize_url(u"http://www.example.com"), str)
# append missing path
self.assertEqual(canonicalize_url("http://www.example.com"),
"http://www.example.com/")
# typical usage
self.assertEqual(canonicalize_url("http://www.example.com/do?a=1&b=2&c=3"),
"http://www.example.com/do?a=1&b=2&c=3")
self.assertEqual(canonicalize_url("http://www.example.com/do?c=1&b=2&a=3"),
"http://www.example.com/do?a=3&b=2&c=1")
self.assertEqual(canonicalize_url("http://www.example.com/do?&a=1"),
"http://www.example.com/do?a=1")
# sorting by argument values
self.assertEqual(canonicalize_url("http://www.example.com/do?c=3&b=5&b=2&a=50"),
"http://www.example.com/do?a=50&b=2&b=5&c=3")
# using keep_blank_values
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&a=2", keep_blank_values=False),
"http://www.example.com/do?a=2")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&a=2"),
"http://www.example.com/do?a=2&b=")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&c&a=2", keep_blank_values=False),
"http://www.example.com/do?a=2")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&c&a=2"),
"http://www.example.com/do?a=2&b=&c=")
self.assertEqual(canonicalize_url(u'http://www.example.com/do?1750,4'),
'http://www.example.com/do?1750%2C4=')
# spaces
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a+space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a%20space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
# normalize percent-encoding case (in paths)
self.assertEqual(canonicalize_url("http://www.example.com/a%a3do"),
"http://www.example.com/a%A3do"),
# normalize percent-encoding case (in query arguments)
self.assertEqual(canonicalize_url("http://www.example.com/do?k=b%a3"),
"http://www.example.com/do?k=b%A3")
# non-ASCII percent-encoding in paths
self.assertEqual(canonicalize_url("http://www.example.com/a do?a=1"),
"http://www.example.com/a%20do?a=1"),
self.assertEqual(canonicalize_url("http://www.example.com/a %20do?a=1"),
"http://www.example.com/a%20%20do?a=1"),
self.assertEqual(canonicalize_url("http://www.example.com/a do\xc2\xa3.html?a=1"),
"http://www.example.com/a%20do%C2%A3.html?a=1")
# non-ASCII percent-encoding in query arguments
self.assertEqual(canonicalize_url(u"http://www.example.com/do?price=\xa3500&a=5&z=3"),
u"http://www.example.com/do?a=5&price=%C2%A3500&z=3")
self.assertEqual(canonicalize_url("http://www.example.com/do?price=\xc2\xa3500&a=5&z=3"),
"http://www.example.com/do?a=5&price=%C2%A3500&z=3")
self.assertEqual(canonicalize_url("http://www.example.com/do?price(\xc2\xa3)=500&a=1"),
"http://www.example.com/do?a=1&price%28%C2%A3%29=500")
# urls containing auth and ports
self.assertEqual(canonicalize_url(u"http://user:pass@www.example.com:81/do?now=1"),
u"http://user:pass@www.example.com:81/do?now=1")
# remove fragments
self.assertEqual(canonicalize_url(u"http://user:pass@www.example.com/do?a=1#frag"),
u"http://user:pass@www.example.com/do?a=1")
self.assertEqual(canonicalize_url(u"http://user:pass@www.example.com/do?a=1#frag", keep_fragments=True),
u"http://user:pass@www.example.com/do?a=1#frag")
# dont convert safe characters to percent encoding representation
self.assertEqual(canonicalize_url(
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html"),
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html")
# urllib.quote uses a mapping cache of encoded characters. when parsing
# an already percent-encoded url, it will fail if that url was not
# percent-encoded as utf-8, that's why canonicalize_url must always
# convert the urls to string. the following test asserts that
# functionality.
self.assertEqual(canonicalize_url(u'http://www.example.com/caf%E9-con-leche.htm'),
'http://www.example.com/caf%E9-con-leche.htm')
# domains are case insensitive
self.assertEqual(canonicalize_url("http://www.EXAMPLE.com/"),
"http://www.example.com/")
# quoted slash and question sign
self.assertEqual(canonicalize_url("http://foo.com/AC%2FDC+rocks%3f/?yeah=1"),
"http://foo.com/AC%2FDC+rocks%3F/?yeah=1")
self.assertEqual(canonicalize_url("http://foo.com/AC%2FDC/"),
"http://foo.com/AC%2FDC/")
if __name__ == "__main__":
unittest.main()
| 60.563636 | 150 | 0.611528 |
17dd50bc53a5334b1532c03ce93981d9020ccaf2 | 6,629 | py | Python | docker/test/integration/minifi/core/SingleNodeDockerCluster.py | tomhollingworth/nifi-minifi-cpp | 65fad0d02d04463181470b21c8ea290ddd61c52a | [
"Apache-2.0"
] | null | null | null | docker/test/integration/minifi/core/SingleNodeDockerCluster.py | tomhollingworth/nifi-minifi-cpp | 65fad0d02d04463181470b21c8ea290ddd61c52a | [
"Apache-2.0"
] | null | null | null | docker/test/integration/minifi/core/SingleNodeDockerCluster.py | tomhollingworth/nifi-minifi-cpp | 65fad0d02d04463181470b21c8ea290ddd61c52a | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import docker
import logging
import uuid
from .Cluster import Cluster
from .MinifiContainer import MinifiContainer
from .TransientMinifiContainer import TransientMinifiContainer
from .MinifiWithProvenanceRepoContainer import MinifiWithProvenanceRepoContainer
from .NifiContainer import NifiContainer
from .ZookeeperContainer import ZookeeperContainer
from .KafkaBrokerContainer import KafkaBrokerContainer
from .S3ServerContainer import S3ServerContainer
from .AzureStorageServerContainer import AzureStorageServerContainer
from .FakeGcsServerContainer import FakeGcsServerContainer
from .HttpProxyContainer import HttpProxyContainer
from .PostgreSQLServerContainer import PostgreSQLServerContainer
from .MqttBrokerContainer import MqttBrokerContainer
from .OPCUAServerContainer import OPCUAServerContainer
from .SplunkContainer import SplunkContainer
from .MinifiAsPodInKubernetesCluster import MinifiAsPodInKubernetesCluster
class SingleNodeDockerCluster(Cluster):
"""
A "cluster" which consists of a single docker node. Useful for
testing or use-cases which do not span multiple compute nodes.
"""
def __init__(self, image_store):
self.vols = {}
self.network = self.create_docker_network()
self.containers = {}
self.image_store = image_store
self.data_directories = {}
# Get docker client
self.client = docker.from_env()
def __del__(self):
self.cleanup()
def cleanup(self):
for container in self.containers.values():
container.cleanup()
self.containers = {}
if self.network:
logging.info('Cleaning up network: %s', self.network.name)
self.network.remove()
self.network = None
def set_directory_bindings(self, volumes, data_directories):
self.vols = volumes
self.data_directories = data_directories
for container in self.containers.values():
container.vols = self.vols
@staticmethod
def create_docker_network():
net_name = 'minifi_integration_test_network-' + str(uuid.uuid4())
logging.debug('Creating network: %s', net_name)
return docker.from_env().networks.create(net_name)
def acquire_container(self, name, engine='minifi-cpp', command=None):
if name is not None and name in self.containers:
return self.containers[name]
if name is None and (engine == 'nifi' or engine == 'minifi-cpp'):
name = engine + '-' + str(uuid.uuid4())
logging.info('Container name was not provided; using generated name \'%s\'', name)
if engine == 'nifi':
return self.containers.setdefault(name, NifiContainer(self.data_directories["nifi_config_dir"], name, self.vols, self.network, self.image_store, command))
elif engine == 'minifi-cpp':
return self.containers.setdefault(name, MinifiContainer(self.data_directories["minifi_config_dir"], name, self.vols, self.network, self.image_store, command))
elif engine == 'kubernetes':
return self.containers.setdefault(name, MinifiAsPodInKubernetesCluster(self.data_directories["minifi_config_dir"], name, self.vols, self.network, self.image_store, command))
elif engine == 'transient-minifi':
return self.containers.setdefault(name, TransientMinifiContainer(self.data_directories["minifi_config_dir"], name, self.vols, self.network, self.image_store, command))
elif engine == 'minifi-cpp-with-provenance-repo':
return self.containers.setdefault(name, MinifiWithProvenanceRepoContainer(self.data_directories["minifi_config_dir"], name, self.vols, self.network, self.image_store, command))
elif engine == 'kafka-broker':
if 'zookeeper' not in self.containers:
self.containers.setdefault('zookeeper', ZookeeperContainer('zookeeper', self.vols, self.network, self.image_store, command))
return self.containers.setdefault(name, KafkaBrokerContainer(name, self.vols, self.network, self.image_store, command))
elif engine == 'http-proxy':
return self.containers.setdefault(name, HttpProxyContainer(name, self.vols, self.network, self.image_store, command))
elif engine == 's3-server':
return self.containers.setdefault(name, S3ServerContainer(name, self.vols, self.network, self.image_store, command))
elif engine == 'azure-storage-server':
return self.containers.setdefault(name, AzureStorageServerContainer(name, self.vols, self.network, self.image_store, command))
elif engine == 'fake-gcs-server':
return self.containers.setdefault(name, FakeGcsServerContainer(name, self.vols, self.network, self.image_store, command))
elif engine == 'postgresql-server':
return self.containers.setdefault(name, PostgreSQLServerContainer(name, self.vols, self.network, self.image_store, command))
elif engine == 'mqtt-broker':
return self.containers.setdefault(name, MqttBrokerContainer(name, self.vols, self.network, self.image_store, command))
elif engine == 'opcua-server':
return self.containers.setdefault(name, OPCUAServerContainer(name, self.vols, self.network, self.image_store, command))
elif engine == 'splunk':
return self.containers.setdefault(name, SplunkContainer(name, self.vols, self.network, self.image_store, command))
else:
raise Exception('invalid flow engine: \'%s\'' % engine)
def deploy(self, name):
if name is None or name not in self.containers:
raise Exception('Invalid container to deploy: \'%s\'' % name)
self.containers[name].deploy()
def deploy_flow(self):
for container in self.containers.values():
container.deploy()
| 51.387597 | 188 | 0.718208 |
3033d8ab776e44e4dea8feddd44ba42587b6c539 | 1,867 | py | Python | include/aavm/cli/commands/stop.py | afdaniele/aavm | b67106c15b68f98366c88a5e4a44f2287a37bac4 | [
"MIT"
] | null | null | null | include/aavm/cli/commands/stop.py | afdaniele/aavm | b67106c15b68f98366c88a5e4a44f2287a37bac4 | [
"MIT"
] | null | null | null | include/aavm/cli/commands/stop.py | afdaniele/aavm | b67106c15b68f98366c88a5e4a44f2287a37bac4 | [
"MIT"
] | null | null | null | import argparse
import time
from typing import Optional
from cpk.types import Machine
from .. import AbstractCLICommand
from ..logger import aavmlogger
from ... import aavmconfig
from ...types import Arguments
class CLIStopCommand(AbstractCLICommand):
KEY = 'stop'
@staticmethod
def parser(parent: Optional[argparse.ArgumentParser] = None,
args: Optional[Arguments] = None) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(parents=[parent])
parser.add_argument(
"name",
type=str,
nargs=1,
help="Name of the machine to stop"
)
return parser
@staticmethod
def execute(cpk_machine: Machine, parsed: argparse.Namespace) -> bool:
parsed.machine = parsed.name[0].strip()
# check if the machine exists
if parsed.machine not in aavmconfig.machines:
aavmlogger.error(f"The machine '{parsed.machine}' does not exist.")
return False
# get the machine
machine = aavmconfig.machines[parsed.machine]
# try to get an existing container for this machine
container = machine.container
if container is None or container.status != "running":
aavmlogger.info(f"The machine '{machine.name}' does not appear to be running right "
f"now. Nothing to do.")
return True
# attempt to stop the container
aavmlogger.info("Stopping machine...")
container.stop()
# wait for container to be stopped
t = 0
while t < 15:
container.reload()
if container.status in ["stopped", "exited", "created"]:
break
time.sleep(1)
t += 1
# ---
aavmlogger.info("Machine stopped.")
# ---
return True
| 32.189655 | 96 | 0.598286 |
1255c91699c45f17edf704615cc6d2b2950237b4 | 3,062 | py | Python | categories/admin.py | s1n4/django-categories | 6af6d815e214bddbaac572c19e9c738ef1f752d6 | [
"Apache-2.0"
] | 1 | 2019-02-06T14:23:55.000Z | 2019-02-06T14:23:55.000Z | categories/admin.py | s1n4/django-categories | 6af6d815e214bddbaac572c19e9c738ef1f752d6 | [
"Apache-2.0"
] | null | null | null | categories/admin.py | s1n4/django-categories | 6af6d815e214bddbaac572c19e9c738ef1f752d6 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from django import forms
from django.utils.translation import ugettext_lazy as _
from .genericcollection import GenericCollectionTabularInline
from .settings import RELATION_MODELS, JAVASCRIPT_URL, REGISTER_ADMIN
from .models import Category
from .base import CategoryBaseAdminForm, CategoryBaseAdmin
from .settings import MODEL_REGISTRY
class NullTreeNodeChoiceField(forms.ModelChoiceField):
"""A ModelChoiceField for tree nodes."""
def __init__(self, level_indicator=u'---', *args, **kwargs):
self.level_indicator = level_indicator
super(NullTreeNodeChoiceField, self).__init__(*args, **kwargs)
def label_from_instance(self, obj):
"""
Creates labels which represent the tree level of each node when
generating option labels.
"""
return u'%s %s' % (self.level_indicator * getattr(
obj, obj._mptt_meta.level_attr), obj)
if RELATION_MODELS:
from .models import CategoryRelation
class InlineCategoryRelation(GenericCollectionTabularInline):
model = CategoryRelation
class CategoryAdminForm(CategoryBaseAdminForm):
class Meta:
model = Category
def clean_alternate_title(self):
if self.instance is None or not self.cleaned_data['alternate_title']:
return self.cleaned_data['name']
else:
return self.cleaned_data['alternate_title']
class CategoryAdmin(CategoryBaseAdmin):
form = CategoryAdminForm
list_display = ('name', 'alternate_title', 'active')
fieldsets = (
(None, {
'fields': ('parent', 'name', 'thumbnail', 'active')
}),
(_('Meta Data'), {
'fields': ('alternate_title', 'alternate_url', 'description',
'meta_keywords', 'meta_extra'),
'classes': ('collapse',),
}),
(_('Advanced'), {
'fields': ('order', 'slug'),
'classes': ('collapse',),
}),
)
if RELATION_MODELS:
inlines = [InlineCategoryRelation, ]
class Media:
js = (JAVASCRIPT_URL + 'genericcollections.js',)
if REGISTER_ADMIN:
admin.site.register(Category, CategoryAdmin)
for model, modeladmin in admin.site._registry.items():
if model in MODEL_REGISTRY.values() and modeladmin.fieldsets:
fieldsets = getattr(modeladmin, 'fieldsets', ())
fields = [cat.split('.')[2] for cat in MODEL_REGISTRY if MODEL_REGISTRY[cat] == model]
# check each field to see if already defined
for cat in fields:
for k, v in fieldsets:
if cat in v['fields']:
fields.remove(cat)
# if there are any fields left, add them under the categories fieldset
if len(fields) > 0:
admin.site.unregister(model)
admin.site.register(model, type('newadmin', (modeladmin.__class__,), {
'fieldsets': fieldsets + (('Categories', {
'fields': fields
}),)
}))
| 35.195402 | 94 | 0.627041 |
6a808b6f1a1df40904bd7090b9d3a4749abee511 | 762 | py | Python | test/runner.py | firodj/ascigram | 83bf7cab0a3cd27f5e8d8843b3df62f857db437d | [
"MIT"
] | null | null | null | test/runner.py | firodj/ascigram | 83bf7cab0a3cd27f5e8d8843b3df62f857db437d | [
"MIT"
] | null | null | null | test/runner.py | firodj/ascigram | 83bf7cab0a3cd27f5e8d8843b3df62f857db437d | [
"MIT"
] | null | null | null | import os
import subprocess
TEST_ROOT = os.path.dirname(__file__)
PROJECT_ROOT = os.path.dirname(TEST_ROOT)
ASCIGRAM = [os.path.abspath(os.path.join(PROJECT_ROOT, 'ascigram'))]
tests = []
def collect_tests():
for file in os.listdir(TEST_ROOT):
if '.svg' in file:
name = file.rsplit('.', 1)[0]
tests.append(name)
print("Collecting {0} tests.".format(len(tests)))
def main():
collect_tests()
flags = ['-T']
for test in tests:
cmd = ASCIGRAM + flags + [os.path.join(TEST_ROOT, test + '.txt')]
print("Execute: {0}".format(cmd))
ascigram_proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdoutdata = ascigram_proc.communicate()[0]
if __name__ == '__main__':
main()
| 23.090909 | 73 | 0.624672 |
1fa182f907fadc10bd41c5765a758a9aee685765 | 6,628 | py | Python | django_harmonization/HeartData/report_extraction.py | chrisroederucdenver/Kao-Harmonization-Release | 1a90db58cd378244a8aba138e27f049376045729 | [
"Apache-2.0"
] | null | null | null | django_harmonization/HeartData/report_extraction.py | chrisroederucdenver/Kao-Harmonization-Release | 1a90db58cd378244a8aba138e27f049376045729 | [
"Apache-2.0"
] | null | null | null | django_harmonization/HeartData/report_extraction.py | chrisroederucdenver/Kao-Harmonization-Release | 1a90db58cd378244a8aba138e27f049376045729 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
'''
Copyright 2017 The Regents of the University of Colorado
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
report_extraction.py <db> <user> <study_id> <extraction_id>
db - is the name of the database within postgresql
user - is the user with permissions in posgresql
study_id - is the study id we're importing to OHDSI, see the study table
extraction_id - is the extraction configuration used to query the categorization_* tables
Python Version: 3.6.3
a report that shows the mapping from a study to ohdsi and on to an extraction matrix.
TODO: I believe it doesn't show the path between migration and concepts used as input to
calculation functions. I think that's because it works backwards from the
categorization/extraction end.
It doesn't read through the calcuations to include their input.s
This is research code for demonstration purposes only.
croeder 6/2017 chris.roeder@ucdenver.edu
'''
import logging
import sys
import psycopg2
import argh
from psycopg2.extras import RealDictCursor
from study import get_study_details
log = sys.stdout.write
def get_concept_id_and_name(con, vocabulary, concept_code):
''' given a vocabulary name and concept code, return the concept_id within OHDSI CDM'''
cur = con.cursor(cursor_factory=RealDictCursor)
stmt = ("SELECT concept_id, concept_name "
"FROM concept "
"WHERE concept_code = %s and vocabulary_id = %s")
cur.execute(stmt, (concept_code, vocabulary))
rows = cur.fetchall()
cur.close()
if rows:
return (rows[0]['concept_id'], rows[0]['concept_name'])
# else:
return (None, None)
def extract_function_parameters(con, function_name, long_name, rule_id, extraction_id):
''' fetch the parameters to go with an extraction function '''
stmt = ("SELECT value_limit, from_string, from_concept_id, rank "
"FROM categorization_function_parameters "
"WHERE function_name = %s "
"AND long_name = %s "
"AND rule_id = %s "
"AND extract_study_id = %s "
"ORDER BY rank")
cur = con.cursor(cursor_factory=RealDictCursor)
cur.execute(stmt, (function_name, long_name, rule_id, extraction_id))
rows = cur.fetchall()
cur.close()
prefix = ""
if rows:
log(" CAT: " + function_name + "(")
row_count = 0
for row in rows:
row_count += 1
thing = ""
if prefix == "" and function_name[-3:] == 'asc':
prefix = "up to"
elif prefix == "" and function_name[-4:] == 'desc':
prefix = "down to"
if row['value_limit'] != None:
thing = row['value_limit']
elif row['from_string'] != None:
thing = row['from_string']
elif row['from_concept_id'] != None:
thing = row['from_concept_id']
else:
prefix = ""
thing = 'remainder'
log("{}:{} ".format(prefix, thing))
log("INTO: {}".format(row['rank']))
if row_count < len(rows):
log(", ")
if rows:
log(")")
else:
log(" CAT: (no categorization, value passed on as-is)")
# log(" missing? {} {} {}".format(function_name, long_name, rule_id))
sys.stdout.flush()
def report_narrow_extraction(con, study_id, extraction_id):
''' Shows categorization for results of calculations.
This is for concepts that don't come directly from input tables, rather
ones that are calculated.
'''
cur = con.cursor(cursor_factory=RealDictCursor)
stmt = ("SELECT "
"c.from_vocabulary_id, "
"c.from_concept_code, "
"c.from_table, "
"c.function_name, "
"c.long_name, "
"c.short_name, "
"c.rule_id "
"FROM categorization_function_metadata c "
"WHERE c.extract_study_id = %s "
"ORDER BY c.extract_study_id;")
#cur.execute(stmt, (study_id, extraction_id))
cur.execute(stmt, (extraction_id,))
rows = cur.fetchall()
for row in rows:
(_, name) = get_concept_id_and_name(con, row['from_vocabulary_id'], row['from_concept_code'])
log("{}() {}:{} \"{}\" --> \"{}\" / \"{}\" \n".format(row['function_name'], row['from_vocabulary_id'], row['from_concept_code'], name, row['long_name'], row['short_name']))
extract_function_parameters(con, row['function_name'], row['long_name'], row['rule_id'], extraction_id)
log("\n\n")
cur.close()
sys.stdout.flush()
def report_wide_extraction(con, extraction_id):
cur = con.cursor(cursor_factory=RealDictCursor)
stmt = ("SELECT from_table, from_column, from_vocabulary_id, from_concept_code, function_name, long_name "
" FROM categorization_function_table "
" WHERE extract_study_id = %s")
cur.execute(stmt, (extraction_id,))
rows = cur.fetchall()
for row in rows:
(_, name) = get_concept_id_and_name(con, row['from_vocabulary_id'], row['from_concept_code'])
log("\"{}\" <-- {}() {}:{} where: {}:{} \"{}\" \n".format(
row['long_name'],
row['function_name'],
row['from_table'], row['from_column'],
row['from_vocabulary_id'], row['from_concept_code'], name
))
log("\n")
cur.close()
sys.stdout.flush()
def main(db_name, user_name, study_name, extraction_id) :
logger = logging.getLogger(__name__) # pylint: disable=locally-disabled, invalid-name
conn = psycopg2.connect(database=db_name, user=user_name)
conn.autocommit = True
(study_id, observation_range_start, observation_range_end, _, _) = get_study_details(conn, study_name)
print("\n-- calculated extraction ----------\n")
report_narrow_extraction(conn, study_id, extraction_id)
print("\n-- wide extraction ----------\n")
report_wide_extraction(conn, extraction_id)
conn.close()
if __name__ == '__main__':
parser = argh.ArghParser()
argh.set_default_command(parser, main)
argh.dispatch(parser)
| 36.822222 | 181 | 0.638654 |
51bde9f4a06b63b5538bfd95bb7e880e7955f149 | 4,579 | py | Python | salt/utils/atomicfile.py | mimianddaniel/booksalt | 248c2349fd9a6edc30d48d11673ab72bed4583ce | [
"Apache-2.0"
] | 1 | 2022-02-18T05:30:18.000Z | 2022-02-18T05:30:18.000Z | salt/utils/atomicfile.py | mimianddaniel/booksalt | 248c2349fd9a6edc30d48d11673ab72bed4583ce | [
"Apache-2.0"
] | null | null | null | salt/utils/atomicfile.py | mimianddaniel/booksalt | 248c2349fd9a6edc30d48d11673ab72bed4583ce | [
"Apache-2.0"
] | null | null | null | '''
A module written originally by Armin Ronacher to manage file transfers in an
atomic way
'''
# Import python libs
import os
import tempfile
import sys
import errno
import time
import random
can_rename_open_file = False
if os.name == 'nt': # pragma: no cover
_rename = lambda src, dst: False
_rename_atomic = lambda src, dst: False
try:
import ctypes
_MOVEFILE_REPLACE_EXISTING = 0x1
_MOVEFILE_WRITE_THROUGH = 0x8
_MoveFileEx = ctypes.windll.kernel32.MoveFileExW
def _rename(src, dst):
if not isinstance(src, unicode):
src = unicode(src, sys.getfilesystemencoding())
if not isinstance(dst, unicode):
dst = unicode(dst, sys.getfilesystemencoding())
if _rename_atomic(src, dst):
return True
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH)
if not rv:
time.sleep(0.001)
retry += 1
return rv
# new in Vista and Windows Server 2008
_CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
_CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
_MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
_CloseHandle = ctypes.windll.kernel32.CloseHandle
can_rename_open_file = True
def _rename_atomic(src, dst):
ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Atomic rename')
if ta == -1:
return False
try:
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileTransacted(src, dst, None, None,
_MOVEFILE_REPLACE_EXISTING |
_MOVEFILE_WRITE_THROUGH, ta)
if rv:
rv = _CommitTransaction(ta)
break
else:
time.sleep(0.001)
retry += 1
return rv
finally:
_CloseHandle(ta)
except Exception:
pass
def atomic_rename(src, dst):
# Try atomic or pseudo-atomic rename
if _rename(src, dst):
return
# Fall back to "move away and replace"
try:
os.rename(src, dst)
except OSError, e:
if e.errno != errno.EEXIST:
raise
old = '{0}-{1:08x}'.format(dst, random.randint(0, sys.maxint))
os.rename(dst, old)
os.rename(src, dst)
try:
os.unlink(old)
except Exception:
pass
else:
atomic_rename = os.rename
can_rename_open_file = True
class _AtomicWFile(object):
'''
Helper class for :func:`atomic_open`.
'''
def __init__(self, f, tmp_filename, filename):
self._f = f
self._tmp_filename = tmp_filename
self._filename = filename
def __getattr__(self, attr):
return getattr(self._f, attr)
def __enter__(self):
return self
def close(self):
if self._f.closed:
return
self._f.close()
atomic_rename(self._tmp_filename, self._filename)
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
self.close()
else:
self._f.close()
try:
os.remove(self._tmp_filename)
except OSError:
pass
def __repr__(self):
return '<{0} {1}{2}, mode {3}>'.format(
self.__class__.__name__,
self._f.closed and 'closed ' or '',
self._filename,
self._f.mode
)
def atomic_open(filename, mode='w'):
'''
Works like a regular `open()` but writes updates into a temporary
file instead of the given file and moves it over when the file is
closed. The file returned behaves as if it was a regular Python
'''
if mode in ('r', 'rb', 'r+', 'rb+', 'a', 'ab'):
raise TypeError('Read or append modes don\'t work with atomic_open')
f = tempfile.NamedTemporaryFile(mode, prefix='.___atomic_write',
dir=os.path.dirname(filename),
delete=False)
return _AtomicWFile(f, f.name, filename)
| 30.939189 | 76 | 0.533741 |
9b3350c35439e691527728ec10f7d533aac29aff | 13,270 | py | Python | sagemaker/01-train/source/test.py | nwcd-samples/NWCD-Industry-Customized-OCR-Solution | 5d995bb9297a0d125df541da4c9a594f8ca23f3f | [
"MIT"
] | 17 | 2020-08-07T15:38:37.000Z | 2021-11-18T00:25:31.000Z | sagemaker/01-train/source/test.py | nwcd-samples/sagemaker-ocr-chinese | 3f9c25d89e012e0376466ece7493c50b2ff727e4 | [
"MIT"
] | 2 | 2020-09-09T09:39:04.000Z | 2020-12-03T09:27:52.000Z | sagemaker/01-train/source/test.py | nwcd-samples/sagemaker-ocr-chinese | 3f9c25d89e012e0376466ece7493c50b2ff727e4 | [
"MIT"
] | 3 | 2020-08-28T07:22:07.000Z | 2021-08-25T08:36:21.000Z | import os
import time
import string
import argparse
import re
import torch
import torch.backends.cudnn as cudnn
import torch.utils.data
import torch.nn.functional as F
import numpy as np
from nltk.metrics.distance import edit_distance
from utils import CTCLabelConverter, AttnLabelConverter, Averager
from dataset import hierarchical_dataset, AlignCollate
from model import Model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#device = torch.device('cpu')
def benchmark_all_eval(model, criterion, converter, opt, calculate_infer_time=False):
""" evaluation with 10 benchmark evaluation datasets """
# The evaluation datasets, dataset order is same with Table 1 in our paper.
eval_data_list = ['IIIT5k_3000', 'SVT', 'IC03_860', 'IC03_867', 'IC13_857',
'IC13_1015', 'IC15_1811', 'IC15_2077', 'SVTP', 'CUTE80']
if calculate_infer_time:
evaluation_batch_size = 1 # batch_size should be 1 to calculate the GPU inference time per image.
else:
evaluation_batch_size = opt.batch_size
list_accuracy = []
total_forward_time = 0
total_evaluation_data_number = 0
total_correct_number = 0
log = open(f'./result/{opt.exp_name}/log_all_evaluation.txt', 'a')
dashed_line = '-' * 80
print(dashed_line)
log.write(dashed_line + '\n')
for eval_data in eval_data_list:
eval_data_path = os.path.join(opt.eval_data, eval_data)
AlignCollate_evaluation = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
eval_data, eval_data_log = hierarchical_dataset(root=eval_data_path, opt=opt)
evaluation_loader = torch.utils.data.DataLoader(
eval_data, batch_size=evaluation_batch_size,
shuffle=False,
num_workers=int(opt.workers),
collate_fn=AlignCollate_evaluation, pin_memory=True)
_, accuracy_by_best_model, norm_ED_by_best_model, _, _, _, infer_time, length_of_data = validation(
model, criterion, evaluation_loader, converter, opt)
list_accuracy.append(f'{accuracy_by_best_model:0.3f}')
total_forward_time += infer_time
total_evaluation_data_number += len(eval_data)
total_correct_number += accuracy_by_best_model * length_of_data
log.write(eval_data_log)
print(f'Acc {accuracy_by_best_model:0.3f}\t normalized_ED {norm_ED_by_best_model:0.3f}')
log.write(f'Acc {accuracy_by_best_model:0.3f}\t normalized_ED {norm_ED_by_best_model:0.3f}\n')
print(dashed_line)
log.write(dashed_line + '\n')
averaged_forward_time = total_forward_time / total_evaluation_data_number * 1000
total_accuracy = total_correct_number / total_evaluation_data_number
params_num = sum([np.prod(p.size()) for p in model.parameters()])
evaluation_log = 'accuracy: '
for name, accuracy in zip(eval_data_list, list_accuracy):
evaluation_log += f'{name}: {accuracy}\t'
evaluation_log += f'total_accuracy: {total_accuracy:0.3f}\t'
evaluation_log += f'averaged_infer_time: {averaged_forward_time:0.3f}\t# parameters: {params_num/1e6:0.3f}'
print(evaluation_log)
log.write(evaluation_log + '\n')
log.close()
return None
def validation(model, criterion, evaluation_loader, converter, opt):
""" validation or evaluation """
n_correct = 0
norm_ED = 0
length_of_data = 0
infer_time = 0
valid_loss_avg = Averager()
for i, (image_tensors, labels) in enumerate(evaluation_loader):
batch_size = image_tensors.size(0)
length_of_data = length_of_data + batch_size
image = image_tensors.to(device)
# For max length prediction
length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
text_for_loss, length_for_loss = converter.encode(labels, batch_max_length=opt.batch_max_length)
start_time = time.time()
if 'CTC' in opt.Prediction:
preds = model(image, text_for_pred)
forward_time = time.time() - start_time
# Calculate evaluation loss for CTC deocder.
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
# permute 'preds' to use CTCloss format
cost = criterion(preds.log_softmax(2).permute(1, 0, 2), text_for_loss, preds_size, length_for_loss)
# Select max probabilty (greedy decoding) then decode index to character
_, preds_index = preds.max(2)
preds_str = converter.decode(preds_index.data, preds_size.data)
else:
preds = model(image, text_for_pred, is_train=False)
forward_time = time.time() - start_time
preds = preds[:, :text_for_loss.shape[1] - 1, :]
target = text_for_loss[:, 1:] # without [GO] Symbol
cost = criterion(preds.contiguous().view(-1, preds.shape[-1]), target.contiguous().view(-1))
# select max probabilty (greedy decoding) then decode index to character
_, preds_index = preds.max(2)
preds_str = converter.decode(preds_index, length_for_pred)
labels = converter.decode(text_for_loss[:, 1:], length_for_loss)
infer_time += forward_time
valid_loss_avg.add(cost)
# calculate accuracy & confidence score
preds_prob = F.softmax(preds, dim=2)
preds_max_prob, _ = preds_prob.max(dim=2)
confidence_score_list = []
for gt, pred, pred_max_prob in zip(labels, preds_str, preds_max_prob):
# add by dikers: 去掉两端的空格
pred = pred.replace('.', '.').lstrip().rstrip()
gt = gt.replace('.', '.').lstrip().rstrip()
if 'Attn' in opt.Prediction:
gt = gt[:gt.find('[s]')]
pred_EOS = pred.find('[s]')
pred = pred[:pred_EOS] # prune after "end of sentence" token ([s])
pred_max_prob = pred_max_prob[:pred_EOS]
# To evaluate 'case sensitive model' with alphanumeric and case insensitve setting.
if opt.sensitive and opt.data_filtering_off:
pred = pred.lower()
gt = gt.lower()
alphanumeric_case_insensitve = '0123456789abcdefghijklmnopqrstuvwxyz'
out_of_alphanumeric_case_insensitve = f'[^{alphanumeric_case_insensitve}]'
pred = re.sub(out_of_alphanumeric_case_insensitve, '', pred)
gt = re.sub(out_of_alphanumeric_case_insensitve, '', gt)
if pred == gt:
n_correct += 1
'''
(old version) ICDAR2017 DOST Normalized Edit Distance https://rrc.cvc.uab.es/?ch=7&com=tasks
"For each word we calculate the normalized edit distance to the length of the ground truth transcription."
if len(gt) == 0:
norm_ED += 1
else:
norm_ED += edit_distance(pred, gt) / len(gt)
'''
# ICDAR2019 Normalized Edit Distance
if len(gt) == 0 or len(pred) == 0:
norm_ED += 0
elif len(gt) > len(pred):
norm_ED += 1 - edit_distance(pred, gt) / len(gt)
else:
norm_ED += 1 - edit_distance(pred, gt) / len(pred)
# calculate confidence score (= multiply of pred_max_prob)
try:
confidence_score = pred_max_prob.cumprod(dim=0)[-1]
except:
confidence_score = 0 # for empty pred case, when prune after "end of sentence" token ([s])
confidence_score_list.append(confidence_score)
# print(pred, gt, pred==gt, confidence_score)
accuracy = n_correct / float(length_of_data) * 100
norm_ED = norm_ED / float(length_of_data) # ICDAR2019 Normalized Edit Distance
return valid_loss_avg.val(), accuracy, norm_ED, strip_str(preds_str), confidence_score_list, strip_str(labels), infer_time, length_of_data
# add by dikers, 去除两端的空格
def strip_str(str_list):
new_str_list = []
for s in str_list:
new_str_list.append(s.lstrip().rstrip())
return new_str_list
def test(opt):
""" model configuration """
if 'CTC' in opt.Prediction:
converter = CTCLabelConverter(opt.character)
else:
converter = AttnLabelConverter(opt.character)
opt.num_class = len(converter.character)
if opt.rgb:
opt.input_channel = 3
model = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,
opt.SequenceModeling, opt.Prediction)
model = torch.nn.DataParallel(model).to(device)
# load model
print('loading pretrained model from %s' % opt.saved_model)
model.load_state_dict(torch.load(opt.saved_model, map_location=device))
opt.exp_name = '_'.join(opt.saved_model.split('/')[1:])
# print(model)
""" keep evaluation model and result logs """
os.makedirs(f'./result/{opt.exp_name}', exist_ok=True)
os.system(f'cp {opt.saved_model} ./result/{opt.exp_name}/')
""" setup loss """
if 'CTC' in opt.Prediction:
criterion = torch.nn.CTCLoss(zero_infinity=True).to(device)
else:
criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) # ignore [GO] token = ignore index 0
""" evaluation """
model.eval()
with torch.no_grad():
if opt.benchmark_all_eval: # evaluation with 10 benchmark evaluation datasets
benchmark_all_eval(model, criterion, converter, opt)
else:
log = open(f'./result/{opt.exp_name}/log_evaluation.txt', 'a')
AlignCollate_evaluation = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
eval_data, eval_data_log = hierarchical_dataset(root=opt.eval_data, opt=opt)
evaluation_loader = torch.utils.data.DataLoader(
eval_data, batch_size=opt.batch_size,
shuffle=False,
num_workers=int(opt.workers),
collate_fn=AlignCollate_evaluation, pin_memory=True)
_, accuracy_by_best_model, _, _, _, _, _, _ = validation(
model, criterion, evaluation_loader, converter, opt)
log.write(eval_data_log)
print(f'{accuracy_by_best_model:0.3f}')
log.write(f'{accuracy_by_best_model:0.3f}\n')
log.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--eval_data', required=True, help='path to evaluation dataset')
parser.add_argument('--benchmark_all_eval', action='store_true', help='evaluate 10 benchmark evaluation datasets')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch_size', type=int, default=192, help='input batch size')
parser.add_argument('--saved_model', required=True, help="path to saved_model to evaluation")
""" Data processing """
parser.add_argument('--batch_max_length', type=int, default=40, help='maximum-label-length')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--imgW', type=int, default=280, help='the width of the input image')
parser.add_argument('--rgb', action='store_true', help='use rgb input')
parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')
""" Model Architecture """
parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS')
parser.add_argument('--FeatureExtraction', type=str, required=True, help='FeatureExtraction stage. VGG|RCNN|ResNet')
parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM')
parser.add_argument('--Prediction', type=str, required=True, help='Prediction stage. CTC|Attn')
parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser.add_argument('--input_channel', type=int, default=1, help='the number of input channel of Feature extractor')
parser.add_argument('--output_channel', type=int, default=512,
help='the number of output channel of Feature extractor')
parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
opt = parser.parse_args()
""" vocab / character number configuration """
if opt.sensitive:
opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
test(opt)
| 46.890459 | 142 | 0.663527 |
f272e40bb0c6bd3f551d244905460bd5570305ae | 1,441 | py | Python | python_modules/libraries/dagster-aws/dagster_aws_tests/s3_tests/test_s3_file_cache.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | 2 | 2021-06-21T17:50:26.000Z | 2021-06-21T19:14:23.000Z | python_modules/libraries/dagster-aws/dagster_aws_tests/s3_tests/test_s3_file_cache.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | 7 | 2022-03-16T06:55:04.000Z | 2022-03-18T07:03:25.000Z | python_modules/libraries/dagster-aws/dagster_aws_tests/s3_tests/test_s3_file_cache.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | 1 | 2021-08-18T17:21:57.000Z | 2021-08-18T17:21:57.000Z | import io
import boto3
from dagster_aws.s3 import S3FileCache, S3FileHandle
from moto import mock_s3
@mock_s3
def test_s3_file_cache_file_not_present():
s3 = boto3.client("s3")
s3.create_bucket(Bucket="some-bucket")
file_store = S3FileCache(
s3_bucket="some-bucket", s3_key="some-key", s3_session=s3, overwrite=False
)
assert not file_store.has_file_object("foo")
@mock_s3
def test_s3_file_cache_file_present():
s3 = boto3.client("s3")
s3.create_bucket(Bucket="some-bucket")
file_store = S3FileCache(
s3_bucket="some-bucket", s3_key="some-key", s3_session=s3, overwrite=False
)
assert not file_store.has_file_object("foo")
file_store.write_binary_data("foo", "bar".encode())
assert file_store.has_file_object("foo")
@mock_s3
def test_s3_file_cache_correct_handle():
s3 = boto3.client("s3")
s3.create_bucket(Bucket="some-bucket")
file_store = S3FileCache(
s3_bucket="some-bucket", s3_key="some-key", s3_session=s3, overwrite=False
)
assert isinstance(file_store.get_file_handle("foo"), S3FileHandle)
@mock_s3
def test_s3_file_cache_write_file_object():
s3 = boto3.client("s3")
s3.create_bucket(Bucket="some-bucket")
file_store = S3FileCache(
s3_bucket="some-bucket", s3_key="some-key", s3_session=s3, overwrite=False
)
stream = io.BytesIO("content".encode())
file_store.write_file_object("foo", stream)
| 26.2 | 82 | 0.713393 |
8c5a00b638836bede662af571beae2b0b7b9bf06 | 9,683 | py | Python | tests/snuba/api/endpoints/test_organization_event_details.py | viteksafronov/sentry | 37dc9190737f4a754b3c58447497a57b8a82ca55 | [
"BSD-3-Clause"
] | null | null | null | tests/snuba/api/endpoints/test_organization_event_details.py | viteksafronov/sentry | 37dc9190737f4a754b3c58447497a57b8a82ca55 | [
"BSD-3-Clause"
] | null | null | null | tests/snuba/api/endpoints/test_organization_event_details.py | viteksafronov/sentry | 37dc9190737f4a754b3c58447497a57b8a82ca55 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.testutils import APITestCase, SnubaTestCase
from sentry.testutils.helpers.datetime import iso_format, before_now
from sentry.models import Group
class OrganizationEventDetailsEndpointTest(APITestCase, SnubaTestCase):
def setUp(self):
super(OrganizationEventDetailsEndpointTest, self).setUp()
min_ago = iso_format(before_now(minutes=1))
two_min_ago = iso_format(before_now(minutes=2))
three_min_ago = iso_format(before_now(minutes=3))
self.login_as(user=self.user)
self.project = self.create_project()
self.store_event(
data={
"event_id": "a" * 32,
"message": "oh no",
"timestamp": three_min_ago,
"fingerprint": ["group-1"],
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "b" * 32,
"message": "very bad",
"timestamp": two_min_ago,
"fingerprint": ["group-1"],
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "c" * 32,
"message": "very bad",
"timestamp": min_ago,
"fingerprint": ["group-2"],
},
project_id=self.project.id,
)
self.groups = list(Group.objects.all().order_by("id"))
def test_simple(self):
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_slug": self.project.organization.slug,
"project_slug": self.project.slug,
"event_id": "a" * 32,
},
)
with self.feature("organizations:events-v2"):
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == "a" * 32
assert response.data["previousEventID"] is None
assert response.data["nextEventID"] == "b" * 32
assert response.data["projectSlug"] == self.project.slug
def test_simple_transaction(self):
min_ago = iso_format(before_now(minutes=1))
event = self.store_event(
data={
"event_id": "d" * 32,
"type": "transaction",
"transaction": "api.issue.delete",
"spans": [],
"contexts": {"trace": {"trace_id": "a" * 32, "span_id": "a" * 16}},
"start_timestamp": iso_format(before_now(minutes=1, seconds=5)),
"timestamp": min_ago,
},
project_id=self.project.id,
)
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_slug": self.project.organization.slug,
"project_slug": self.project.slug,
"event_id": event.event_id,
},
)
with self.feature("organizations:events-v2"):
response = self.client.get(url, format="json")
assert response.status_code == 200
def test_no_access(self):
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_slug": self.project.organization.slug,
"project_slug": self.project.slug,
"event_id": "a" * 32,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 404, response.content
def test_no_event(self):
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_slug": self.project.organization.slug,
"project_slug": self.project.slug,
"event_id": "d" * 32,
},
)
with self.feature("organizations:events-v2"):
response = self.client.get(url, format="json")
assert response.status_code == 404, response.content
def test_event_links_with_field_parameter(self):
# Create older and newer events
ten_sec_ago = iso_format(before_now(seconds=10))
self.store_event(
data={"event_id": "2" * 32, "message": "no match", "timestamp": ten_sec_ago},
project_id=self.project.id,
)
thirty_sec_ago = iso_format(before_now(seconds=30))
self.store_event(
data={"event_id": "1" * 32, "message": "very bad", "timestamp": thirty_sec_ago},
project_id=self.project.id,
)
five_min_ago = iso_format(before_now(minutes=5))
self.store_event(
data={"event_id": "d" * 32, "message": "very bad", "timestamp": five_min_ago},
project_id=self.project.id,
)
seven_min_ago = iso_format(before_now(minutes=7))
self.store_event(
data={"event_id": "e" * 32, "message": "very bad", "timestamp": seven_min_ago},
project_id=self.project.id,
)
eight_min_ago = iso_format(before_now(minutes=8))
self.store_event(
data={"event_id": "f" * 32, "message": "no match", "timestamp": eight_min_ago},
project_id=self.project.id,
)
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_slug": self.project.organization.slug,
"project_slug": self.project.slug,
"event_id": "b" * 32,
},
)
with self.feature("organizations:events-v2"):
response = self.client.get(url, format="json", data={"field": ["message", "count()"]})
assert response.data["eventID"] == "b" * 32
assert response.data["nextEventID"] == "c" * 32, "c is newer & matches message"
assert response.data["previousEventID"] == "d" * 32, "d is older & matches message"
assert response.data["oldestEventID"] == "e" * 32, "e is oldest matching message"
assert response.data["latestEventID"] == "1" * 32, "1 is newest matching message"
def test_event_links_with_date_range(self):
# Create older in and out of range events
ten_day_ago = iso_format(before_now(days=14))
self.store_event(
data={"event_id": "3" * 32, "message": "very bad", "timestamp": ten_day_ago},
project_id=self.project.id,
)
seven_min_ago = iso_format(before_now(minutes=7))
self.store_event(
data={"event_id": "2" * 32, "message": "very bad", "timestamp": seven_min_ago},
project_id=self.project.id,
)
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_slug": self.project.organization.slug,
"project_slug": self.project.slug,
"event_id": "b" * 32,
},
)
with self.feature("organizations:events-v2"):
response = self.client.get(
url, format="json", data={"field": ["message", "count()"], "statsPeriod": "7d"}
)
assert response.data["eventID"] == "b" * 32
assert response.data["nextEventID"] == "c" * 32, "c is newer & matches message + range"
assert response.data["previousEventID"] == "2" * 32, "d is older & matches message + range"
assert response.data["oldestEventID"] == "2" * 32, "3 is outside range, no match"
assert response.data["latestEventID"] == "c" * 32, "c is newest matching message"
def test_event_links_with_tag_fields(self):
# Create events that overlap with other event messages but
# with different tags
ten_sec_ago = iso_format(before_now(seconds=10))
self.store_event(
data={
"event_id": "2" * 32,
"message": "very bad",
"timestamp": ten_sec_ago,
"tags": {"important": "yes"},
},
project_id=self.project.id,
)
thirty_sec_ago = iso_format(before_now(seconds=30))
self.store_event(
data={
"event_id": "1" * 32,
"message": "very bad",
"timestamp": thirty_sec_ago,
"tags": {"important": "yes"},
},
project_id=self.project.id,
)
five_min_ago = iso_format(before_now(minutes=5))
self.store_event(
data={
"event_id": "d" * 32,
"message": "very bad",
"timestamp": five_min_ago,
"tags": {"important": "no"},
},
project_id=self.project.id,
)
url = reverse(
"sentry-api-0-organization-event-details",
kwargs={
"organization_slug": self.project.organization.slug,
"project_slug": self.project.slug,
"event_id": "1" * 32,
},
)
with self.feature("organizations:events-v2"):
response = self.client.get(url, format="json", data={"field": ["important", "count()"]})
assert response.data["eventID"] == "1" * 32
assert response.data["previousEventID"] is None, "no matching tags"
assert response.data["oldestEventID"] is None, "no older matching events"
assert response.data["nextEventID"] == "2" * 32, "2 is older and has matching tags "
assert response.data["latestEventID"] == "2" * 32, "2 is oldest matching message"
| 39.361789 | 100 | 0.546938 |
b8246522287e610e78d8a0d141e21a075d4a4246 | 5,403 | py | Python | manage.py | eaudeweb/natura2000db | cdf40495a708d0dc080913198d41b592609c17ba | [
"BSD-3-Clause"
] | 1 | 2021-09-22T01:13:12.000Z | 2021-09-22T01:13:12.000Z | manage.py | eaudeweb/natura2000db | cdf40495a708d0dc080913198d41b592609c17ba | [
"BSD-3-Clause"
] | null | null | null | manage.py | eaudeweb/natura2000db | cdf40495a708d0dc080913198d41b592609c17ba | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import sys
import os
import logging
from path import path
import flask
import flaskext.script
import jinja2
import babel.support
import naturasites.schema
import naturasites.views
from naturasites.storage import get_db
import tinygis.views
import auth
default_config = {
'DEBUG': False,
'ERROR_LOG_FILE': None,
'HTTP_LISTEN_HOST': '127.0.0.1',
'HTTP_LISTEN_PORT': 5000,
'HTTP_PROXIED': False,
'HTTP_CHERRYPY': False,
'STORAGE_ENGINE': 'solr',
'ZOPE_TEMPLATE_CACHE': False,
'ZOPE_TEMPLATE_PATH': None,
'ZOPE_TEMPLATE_LIST': ['frame.html'],
}
_i18n_path = path(__file__).parent/'i18n'
translations = babel.support.Translations.load(_i18n_path, ['ro'])
def create_app():
app = flask.Flask(__name__, instance_relative_config=True)
app.config.update(default_config)
app.config.from_pyfile("settings.py", silent=True)
app.jinja_options = app.jinja_options.copy()
app.jinja_options['extensions'] += ['jinja2.ext.i18n', 'jinja2.ext.do']
template_loader = app.create_global_jinja_loader()
if app.config["ZOPE_TEMPLATE_PATH"]:
from naturasites.loader import ZopeTemplateLoader
template_loader = ZopeTemplateLoader(template_loader,
app.config["ZOPE_TEMPLATE_PATH"],
app.config["ZOPE_TEMPLATE_CACHE"],
app.config["ZOPE_TEMPLATE_LIST"])
app.jinja_options['loader'] = template_loader
if 'STATIC_URL_MAP' in app.config:
from werkzeug.wsgi import SharedDataMiddleware
app.wsgi_app = SharedDataMiddleware(app.wsgi_app,
app.config['STATIC_URL_MAP'])
naturasites.views.register(app)
tinygis.views.register(app)
auth.register(app)
app.jinja_env.install_gettext_translations(translations)
return app
manager = flaskext.script.Manager(create_app)
@manager.option('--indent', '-i', default=False, action='store_true')
@manager.option('--mysql-login', default='root:',
help="MySQL login (username:password)")
def accessdb_mjson(indent=False, mysql_login='root:'):
logging.getLogger('migrations.from_access').setLevel(logging.INFO)
from migrations.from_access import load_from_sql, verify_data
kwargs = {'indent': 2} if indent else {}
[mysql_user, mysql_pw] = mysql_login.split(':')
for doc in verify_data(load_from_sql(mysql_user, mysql_pw)):
flask.json.dump(doc, sys.stdout, **kwargs)
sys.stdout.write('\n')
@manager.command
def import_mjson():
logging.getLogger('storage').setLevel(logging.INFO)
def batched(iterator, count=10):
batch = []
for value in iterator:
batch.append(value)
if len(batch) >= count:
yield batch
batch = []
if batch:
yield batch
def read_json_lines(stream):
for line in stream:
yield flask.json.loads(line)
def load_document(data):
doc = naturasites.schema.SpaDoc(data)
assert doc.validate(), '%s does not validate' % data['section1']['code']
assert doc.value == data, 'failed round-tripping the json data'
return doc
db = get_db(create_app())
for batch in batched(load_document(d) for d in read_json_lines(sys.stdin)):
db.save_document_batch(batch)
sys.stdout.write('.')
sys.stdout.flush()
print ' done'
@manager.command
def species_to_json():
values = set()
db = get_db()
search = naturasites.views._db_search(naturasites.schema.Search.from_flat({}), facets=True)
for d in search["docs"]:
doc = db.load_document(d["id"])
codes_and_labels = []
for specie in ("species_bird", "species_bird_extra", "species_mammal",
"species_reptile", "species_fish", "species_invertebrate",
"species_plant"):
for s in doc["section3"][specie]:
values.add((s["code"].value, s["name"].value.strip().lower()))
for s in doc["section3"]["species_other"]:
values.add((s["code"].value,
s["scientific_name"].value.strip().lower()))
print flask.json.dumps(dict(values), indent=4)
@manager.command
def runserver(verbose=False):
app = create_app()
if verbose:
storage_logger = logging.getLogger('storage')
storage_logger.setLevel(logging.DEBUG)
storage_handler = logging.StreamHandler()
storage_handler.setLevel(logging.DEBUG)
storage_logger.addHandler(storage_handler)
if app.config['ERROR_LOG_FILE'] is not None:
logging.basicConfig(filename=app.config['ERROR_LOG_FILE'],
loglevel=logging.ERROR)
if app.config['HTTP_PROXIED']:
from revproxy import ReverseProxied
app.wsgi_app = ReverseProxied(app.wsgi_app)
if app.config['HTTP_CHERRYPY']:
from cherrypy import wsgiserver
listen = (app.config['HTTP_LISTEN_HOST'], app.config['HTTP_LISTEN_PORT'])
server = wsgiserver.CherryPyWSGIServer(listen, app.wsgi_app)
try:
server.start()
except KeyboardInterrupt:
server.stop()
else:
app.run(app.config['HTTP_LISTEN_HOST'])
if __name__ == '__main__':
manager.run()
| 30.525424 | 96 | 0.6402 |
9cd649fc77d3f309047d80422a075edf39d6883a | 1,011 | py | Python | Day_59/singly_link_list_search.py | kiranrraj/100Days_Of_Coding | ab75d83be9be87fb7bc83a3f3b72a4638dab22a1 | [
"MIT"
] | null | null | null | Day_59/singly_link_list_search.py | kiranrraj/100Days_Of_Coding | ab75d83be9be87fb7bc83a3f3b72a4638dab22a1 | [
"MIT"
] | null | null | null | Day_59/singly_link_list_search.py | kiranrraj/100Days_Of_Coding | ab75d83be9be87fb7bc83a3f3b72a4638dab22a1 | [
"MIT"
] | null | null | null | # Title : Linked list implementation in python with search
# Author : Kiran raj R.
# Date : 30:10:2020
class Node:
"""Create a node with value provided, the pointer to next is set to None"""
def __init__(self, value):
self.value = value
self.next = None
class Simply_linked_list:
"""create a empty singly linked list """
def __init__(self):
self.head = None
def printList(self):
temp = self.head
while(temp):
print(temp.value, end=" ")
temp = temp.next
print()
def search_list(self, item):
temp = self.head
while temp.next != None:
temp = temp.next
if temp.value == item:
print(f"Found {temp.value}")
break
sl_list = Simply_linked_list()
sl_list.head = Node(1)
node2 = Node(2)
node3 = Node(3)
sl_list.head.next = node2
node2.next = node3
sl_list.printList()
sl_list.search_list(20)
sl_list.search_list(2)
sl_list.search_list(3)
| 21.0625 | 79 | 0.598417 |
86d3eae112c9bbf5c6d1cec371c2694ceacf088f | 25,329 | py | Python | sympy/core/tests/test_sympify.py | shilpiprd/sympy | 556e9c61b31d0d5f101cd56b43e843fbf3bcf121 | [
"BSD-3-Clause"
] | null | null | null | sympy/core/tests/test_sympify.py | shilpiprd/sympy | 556e9c61b31d0d5f101cd56b43e843fbf3bcf121 | [
"BSD-3-Clause"
] | null | null | null | sympy/core/tests/test_sympify.py | shilpiprd/sympy | 556e9c61b31d0d5f101cd56b43e843fbf3bcf121 | [
"BSD-3-Clause"
] | null | null | null | from sympy import (Symbol, exp, Integer, Float, sin, cos, Poly, Lambda,
Function, I, S, sqrt, srepr, Rational, Tuple, Matrix, Interval, Add, Mul,
Pow, Or, true, false, Abs, pi, Range, Xor)
from sympy.abc import x, y
from sympy.core.sympify import (sympify, _sympify, SympifyError, kernS,
CantSympify)
from sympy.core.decorators import _sympifyit
from sympy.external import import_module
from sympy.testing.pytest import raises, XFAIL, skip, warns_deprecated_sympy
from sympy.utilities.decorator import conserve_mpmath_dps
from sympy.geometry import Point, Line
from sympy.functions.combinatorial.factorials import factorial, factorial2
from sympy.abc import _clash, _clash1, _clash2
from sympy.core.compatibility import HAS_GMPY
from sympy.sets import FiniteSet, EmptySet
from sympy.tensor.array.dense_ndim_array import ImmutableDenseNDimArray
import mpmath
from collections import defaultdict, OrderedDict
from mpmath.rational import mpq
numpy = import_module('numpy')
def test_issue_3538():
v = sympify("exp(x)")
assert v == exp(x)
assert type(v) == type(exp(x))
assert str(type(v)) == str(type(exp(x)))
def test_sympify1():
assert sympify("x") == Symbol("x")
assert sympify(" x") == Symbol("x")
assert sympify(" x ") == Symbol("x")
# issue 4877
n1 = S.Half
assert sympify('--.5') == n1
assert sympify('-1/2') == -n1
assert sympify('-+--.5') == -n1
assert sympify('-.[3]') == Rational(-1, 3)
assert sympify('.[3]') == Rational(1, 3)
assert sympify('+.[3]') == Rational(1, 3)
assert sympify('+0.[3]*10**-2') == Rational(1, 300)
assert sympify('.[052631578947368421]') == Rational(1, 19)
assert sympify('.0[526315789473684210]') == Rational(1, 19)
assert sympify('.034[56]') == Rational(1711, 49500)
# options to make reals into rationals
assert sympify('1.22[345]', rational=True) == \
1 + Rational(22, 100) + Rational(345, 99900)
assert sympify('2/2.6', rational=True) == Rational(10, 13)
assert sympify('2.6/2', rational=True) == Rational(13, 10)
assert sympify('2.6e2/17', rational=True) == Rational(260, 17)
assert sympify('2.6e+2/17', rational=True) == Rational(260, 17)
assert sympify('2.6e-2/17', rational=True) == Rational(26, 17000)
assert sympify('2.1+3/4', rational=True) == \
Rational(21, 10) + Rational(3, 4)
assert sympify('2.234456', rational=True) == Rational(279307, 125000)
assert sympify('2.234456e23', rational=True) == 223445600000000000000000
assert sympify('2.234456e-23', rational=True) == \
Rational(279307, 12500000000000000000000000000)
assert sympify('-2.234456e-23', rational=True) == \
Rational(-279307, 12500000000000000000000000000)
assert sympify('12345678901/17', rational=True) == \
Rational(12345678901, 17)
assert sympify('1/.3 + x', rational=True) == Rational(10, 3) + x
# make sure longs in fractions work
assert sympify('222222222222/11111111111') == \
Rational(222222222222, 11111111111)
# ... even if they come from repetend notation
assert sympify('1/.2[123456789012]') == Rational(333333333333, 70781892967)
# ... or from high precision reals
assert sympify('.1234567890123456', rational=True) == \
Rational(19290123283179, 156250000000000)
def test_sympify_Fraction():
try:
import fractions
except ImportError:
pass
else:
value = sympify(fractions.Fraction(101, 127))
assert value == Rational(101, 127) and type(value) is Rational
def test_sympify_gmpy():
if HAS_GMPY:
if HAS_GMPY == 2:
import gmpy2 as gmpy
elif HAS_GMPY == 1:
import gmpy
value = sympify(gmpy.mpz(1000001))
assert value == Integer(1000001) and type(value) is Integer
value = sympify(gmpy.mpq(101, 127))
assert value == Rational(101, 127) and type(value) is Rational
@conserve_mpmath_dps
def test_sympify_mpmath():
value = sympify(mpmath.mpf(1.0))
assert value == Float(1.0) and type(value) is Float
mpmath.mp.dps = 12
assert sympify(
mpmath.pi).epsilon_eq(Float("3.14159265359"), Float("1e-12")) == True
assert sympify(
mpmath.pi).epsilon_eq(Float("3.14159265359"), Float("1e-13")) == False
mpmath.mp.dps = 6
assert sympify(
mpmath.pi).epsilon_eq(Float("3.14159"), Float("1e-5")) == True
assert sympify(
mpmath.pi).epsilon_eq(Float("3.14159"), Float("1e-6")) == False
assert sympify(mpmath.mpc(1.0 + 2.0j)) == Float(1.0) + Float(2.0)*I
assert sympify(mpq(1, 2)) == S.Half
def test_sympify2():
class A:
def _sympy_(self):
return Symbol("x")**3
a = A()
assert _sympify(a) == x**3
assert sympify(a) == x**3
assert a == x**3
def test_sympify3():
assert sympify("x**3") == x**3
assert sympify("x^3") == x**3
assert sympify("1/2") == Integer(1)/2
raises(SympifyError, lambda: _sympify('x**3'))
raises(SympifyError, lambda: _sympify('1/2'))
def test_sympify_keywords():
raises(SympifyError, lambda: sympify('if'))
raises(SympifyError, lambda: sympify('for'))
raises(SympifyError, lambda: sympify('while'))
raises(SympifyError, lambda: sympify('lambda'))
def test_sympify_float():
assert sympify("1e-64") != 0
assert sympify("1e-20000") != 0
def test_sympify_bool():
assert sympify(True) is true
assert sympify(False) is false
def test_sympyify_iterables():
ans = [Rational(3, 10), Rational(1, 5)]
assert sympify(['.3', '.2'], rational=True) == ans
assert sympify(dict(x=0, y=1)) == {x: 0, y: 1}
assert sympify(['1', '2', ['3', '4']]) == [S(1), S(2), [S(3), S(4)]]
@XFAIL
def test_issue_16772():
# because there is a converter for tuple, the
# args are only sympified without the flags being passed
# along; list, on the other hand, is not converted
# with a converter so its args are traversed later
ans = [Rational(3, 10), Rational(1, 5)]
assert sympify(tuple(['.3', '.2']), rational=True) == Tuple(*ans)
def test_issue_16859():
class no(float, CantSympify):
pass
raises(SympifyError, lambda: sympify(no(1.2)))
def test_sympify4():
class A:
def _sympy_(self):
return Symbol("x")
a = A()
assert _sympify(a)**3 == x**3
assert sympify(a)**3 == x**3
assert a == x
def test_sympify_text():
assert sympify('some') == Symbol('some')
assert sympify('core') == Symbol('core')
assert sympify('True') is True
assert sympify('False') is False
assert sympify('Poly') == Poly
assert sympify('sin') == sin
def test_sympify_function():
assert sympify('factor(x**2-1, x)') == -(1 - x)*(x + 1)
assert sympify('sin(pi/2)*cos(pi)') == -Integer(1)
def test_sympify_poly():
p = Poly(x**2 + x + 1, x)
assert _sympify(p) is p
assert sympify(p) is p
def test_sympify_factorial():
assert sympify('x!') == factorial(x)
assert sympify('(x+1)!') == factorial(x + 1)
assert sympify('(1 + y*(x + 1))!') == factorial(1 + y*(x + 1))
assert sympify('(1 + y*(x + 1)!)^2') == (1 + y*factorial(x + 1))**2
assert sympify('y*x!') == y*factorial(x)
assert sympify('x!!') == factorial2(x)
assert sympify('(x+1)!!') == factorial2(x + 1)
assert sympify('(1 + y*(x + 1))!!') == factorial2(1 + y*(x + 1))
assert sympify('(1 + y*(x + 1)!!)^2') == (1 + y*factorial2(x + 1))**2
assert sympify('y*x!!') == y*factorial2(x)
assert sympify('factorial2(x)!') == factorial(factorial2(x))
raises(SympifyError, lambda: sympify("+!!"))
raises(SympifyError, lambda: sympify(")!!"))
raises(SympifyError, lambda: sympify("!"))
raises(SympifyError, lambda: sympify("(!)"))
raises(SympifyError, lambda: sympify("x!!!"))
def test_issue_3595():
assert sympify("a_") == Symbol("a_")
assert sympify("_a") == Symbol("_a")
def test_lambda():
x = Symbol('x')
assert sympify('lambda: 1') == Lambda((), 1)
assert sympify('lambda x: x') == Lambda(x, x)
assert sympify('lambda x: 2*x') == Lambda(x, 2*x)
assert sympify('lambda x, y: 2*x+y') == Lambda((x, y), 2*x + y)
def test_lambda_raises():
raises(SympifyError, lambda: sympify("lambda *args: args")) # args argument error
raises(SympifyError, lambda: sympify("lambda **kwargs: kwargs[0]")) # kwargs argument error
raises(SympifyError, lambda: sympify("lambda x = 1: x")) # Keyword argument error
with raises(SympifyError):
_sympify('lambda: 1')
def test_sympify_raises():
raises(SympifyError, lambda: sympify("fx)"))
class A:
def __str__(self):
return 'x'
with warns_deprecated_sympy():
assert sympify(A()) == Symbol('x')
def test__sympify():
x = Symbol('x')
f = Function('f')
# positive _sympify
assert _sympify(x) is x
assert _sympify(1) == Integer(1)
assert _sympify(0.5) == Float("0.5")
assert _sympify(1 + 1j) == 1.0 + I*1.0
# Function f is not Basic and can't sympify to Basic. We allow it to pass
# with sympify but not with _sympify.
# https://github.com/sympy/sympy/issues/20124
assert sympify(f) is f
raises(SympifyError, lambda: _sympify(f))
class A:
def _sympy_(self):
return Integer(5)
a = A()
assert _sympify(a) == Integer(5)
# negative _sympify
raises(SympifyError, lambda: _sympify('1'))
raises(SympifyError, lambda: _sympify([1, 2, 3]))
def test_sympifyit():
x = Symbol('x')
y = Symbol('y')
@_sympifyit('b', NotImplemented)
def add(a, b):
return a + b
assert add(x, 1) == x + 1
assert add(x, 0.5) == x + Float('0.5')
assert add(x, y) == x + y
assert add(x, '1') == NotImplemented
@_sympifyit('b')
def add_raises(a, b):
return a + b
assert add_raises(x, 1) == x + 1
assert add_raises(x, 0.5) == x + Float('0.5')
assert add_raises(x, y) == x + y
raises(SympifyError, lambda: add_raises(x, '1'))
def test_int_float():
class F1_1:
def __float__(self):
return 1.1
class F1_1b:
"""
This class is still a float, even though it also implements __int__().
"""
def __float__(self):
return 1.1
def __int__(self):
return 1
class F1_1c:
"""
This class is still a float, because it implements _sympy_()
"""
def __float__(self):
return 1.1
def __int__(self):
return 1
def _sympy_(self):
return Float(1.1)
class I5:
def __int__(self):
return 5
class I5b:
"""
This class implements both __int__() and __float__(), so it will be
treated as Float in SymPy. One could change this behavior, by using
float(a) == int(a), but deciding that integer-valued floats represent
exact numbers is arbitrary and often not correct, so we do not do it.
If, in the future, we decide to do it anyway, the tests for I5b need to
be changed.
"""
def __float__(self):
return 5.0
def __int__(self):
return 5
class I5c:
"""
This class implements both __int__() and __float__(), but also
a _sympy_() method, so it will be Integer.
"""
def __float__(self):
return 5.0
def __int__(self):
return 5
def _sympy_(self):
return Integer(5)
i5 = I5()
i5b = I5b()
i5c = I5c()
f1_1 = F1_1()
f1_1b = F1_1b()
f1_1c = F1_1c()
assert sympify(i5) == 5
assert isinstance(sympify(i5), Integer)
assert sympify(i5b) == 5
assert isinstance(sympify(i5b), Float)
assert sympify(i5c) == 5
assert isinstance(sympify(i5c), Integer)
assert abs(sympify(f1_1) - 1.1) < 1e-5
assert abs(sympify(f1_1b) - 1.1) < 1e-5
assert abs(sympify(f1_1c) - 1.1) < 1e-5
assert _sympify(i5) == 5
assert isinstance(_sympify(i5), Integer)
assert _sympify(i5b) == 5
assert isinstance(_sympify(i5b), Float)
assert _sympify(i5c) == 5
assert isinstance(_sympify(i5c), Integer)
assert abs(_sympify(f1_1) - 1.1) < 1e-5
assert abs(_sympify(f1_1b) - 1.1) < 1e-5
assert abs(_sympify(f1_1c) - 1.1) < 1e-5
def test_evaluate_false():
cases = {
'2 + 3': Add(2, 3, evaluate=False),
'2**2 / 3': Mul(Pow(2, 2, evaluate=False), Pow(3, -1, evaluate=False), evaluate=False),
'2 + 3 * 5': Add(2, Mul(3, 5, evaluate=False), evaluate=False),
'2 - 3 * 5': Add(2, Mul(-1, Mul(3, 5,evaluate=False), evaluate=False), evaluate=False),
'1 / 3': Mul(1, Pow(3, -1, evaluate=False), evaluate=False),
'True | False': Or(True, False, evaluate=False),
'1 + 2 + 3 + 5*3 + integrate(x)': Add(1, 2, 3, Mul(5, 3, evaluate=False), x**2/2, evaluate=False),
'2 * 4 * 6 + 8': Add(Mul(2, 4, 6, evaluate=False), 8, evaluate=False),
'2 - 8 / 4': Add(2, Mul(-1, Mul(8, Pow(4, -1, evaluate=False), evaluate=False), evaluate=False), evaluate=False),
'2 - 2**2': Add(2, Mul(-1, Pow(2, 2, evaluate=False), evaluate=False), evaluate=False),
}
for case, result in cases.items():
assert sympify(case, evaluate=False) == result
def test_issue_4133():
a = sympify('Integer(4)')
assert a == Integer(4)
assert a.is_Integer
def test_issue_3982():
a = [3, 2.0]
assert sympify(a) == [Integer(3), Float(2.0)]
assert sympify(tuple(a)) == Tuple(Integer(3), Float(2.0))
assert sympify(set(a)) == FiniteSet(Integer(3), Float(2.0))
def test_S_sympify():
assert S(1)/2 == sympify(1)/2 == S.Half
assert (-2)**(S(1)/2) == sqrt(2)*I
def test_issue_4788():
assert srepr(S(1.0 + 0J)) == srepr(S(1.0)) == srepr(Float(1.0))
def test_issue_4798_None():
assert S(None) is None
def test_issue_3218():
assert sympify("x+\ny") == x + y
def test_issue_4988_builtins():
C = Symbol('C')
vars = {'C': C}
exp1 = sympify('C')
assert exp1 == C # Make sure it did not get mixed up with sympy.C
exp2 = sympify('C', vars)
assert exp2 == C # Make sure it did not get mixed up with sympy.C
def test_geometry():
p = sympify(Point(0, 1))
assert p == Point(0, 1) and isinstance(p, Point)
L = sympify(Line(p, (1, 0)))
assert L == Line((0, 1), (1, 0)) and isinstance(L, Line)
def test_kernS():
s = '-1 - 2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x)))'
# when 1497 is fixed, this no longer should pass: the expression
# should be unchanged
assert -1 - 2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) == -1
# sympification should not allow the constant to enter a Mul
# or else the structure can change dramatically
ss = kernS(s)
assert ss != -1 and ss.simplify() == -1
s = '-1 - 2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x)))'.replace(
'x', '_kern')
ss = kernS(s)
assert ss != -1 and ss.simplify() == -1
# issue 6687
assert (kernS('Interval(-1,-2 - 4*(-3))')
== Interval(-1, Add(-2, Mul(12, 1, evaluate=False), evaluate=False)))
assert kernS('_kern') == Symbol('_kern')
assert kernS('E**-(x)') == exp(-x)
e = 2*(x + y)*y
assert kernS(['2*(x + y)*y', ('2*(x + y)*y',)]) == [e, (e,)]
assert kernS('-(2*sin(x)**2 + 2*sin(x)*cos(x))*y/2') == \
-y*(2*sin(x)**2 + 2*sin(x)*cos(x))/2
# issue 15132
assert kernS('(1 - x)/(1 - x*(1-y))') == kernS('(1-x)/(1-(1-y)*x)')
assert kernS('(1-2**-(4+1)*(1-y)*x)') == (1 - x*(1 - y)/32)
assert kernS('(1-2**(4+1)*(1-y)*x)') == (1 - 32*x*(1 - y))
assert kernS('(1-2.*(1-y)*x)') == 1 - 2.*x*(1 - y)
one = kernS('x - (x - 1)')
assert one != 1 and one.expand() == 1
assert kernS("(2*x)/(x-1)") == 2*x/(x-1)
def test_issue_6540_6552():
assert S('[[1/3,2], (2/5,)]') == [[Rational(1, 3), 2], (Rational(2, 5),)]
assert S('[[2/6,2], (2/4,)]') == [[Rational(1, 3), 2], (S.Half,)]
assert S('[[[2*(1)]]]') == [[[2]]]
assert S('Matrix([2*(1)])') == Matrix([2])
def test_issue_6046():
assert str(S("Q & C", locals=_clash1)) == 'C & Q'
assert str(S('pi(x)', locals=_clash2)) == 'pi(x)'
locals = {}
exec("from sympy.abc import Q, C", locals)
assert str(S('C&Q', locals)) == 'C & Q'
# clash can act as Symbol or Function
assert str(S('pi(C, Q)', locals=_clash)) == 'pi(C, Q)'
assert len(S('pi + x', locals=_clash2).free_symbols) == 2
# but not both
raises(TypeError, lambda: S('pi + pi(x)', locals=_clash2))
assert all(set(i.values()) == {None} for i in (
_clash, _clash1, _clash2))
def test_issue_8821_highprec_from_str():
s = str(pi.evalf(128))
p = sympify(s)
assert Abs(sin(p)) < 1e-127
def test_issue_10295():
if not numpy:
skip("numpy not installed.")
A = numpy.array([[1, 3, -1],
[0, 1, 7]])
sA = S(A)
assert sA.shape == (2, 3)
for (ri, ci), val in numpy.ndenumerate(A):
assert sA[ri, ci] == val
B = numpy.array([-7, x, 3*y**2])
sB = S(B)
assert sB.shape == (3,)
assert B[0] == sB[0] == -7
assert B[1] == sB[1] == x
assert B[2] == sB[2] == 3*y**2
C = numpy.arange(0, 24)
C.resize(2,3,4)
sC = S(C)
assert sC[0, 0, 0].is_integer
assert sC[0, 0, 0] == 0
a1 = numpy.array([1, 2, 3])
a2 = numpy.array([i for i in range(24)])
a2.resize(2, 4, 3)
assert sympify(a1) == ImmutableDenseNDimArray([1, 2, 3])
assert sympify(a2) == ImmutableDenseNDimArray([i for i in range(24)], (2, 4, 3))
def test_Range():
# Only works in Python 3 where range returns a range type
assert sympify(range(10)) == Range(10)
assert _sympify(range(10)) == Range(10)
def test_sympify_set():
n = Symbol('n')
assert sympify({n}) == FiniteSet(n)
assert sympify(set()) == EmptySet
def test_sympify_numpy():
if not numpy:
skip('numpy not installed. Abort numpy tests.')
np = numpy
def equal(x, y):
return x == y and type(x) == type(y)
assert sympify(np.bool_(1)) is S(True)
try:
assert equal(
sympify(np.int_(1234567891234567891)), S(1234567891234567891))
assert equal(
sympify(np.intp(1234567891234567891)), S(1234567891234567891))
except OverflowError:
# May fail on 32-bit systems: Python int too large to convert to C long
pass
assert equal(sympify(np.intc(1234567891)), S(1234567891))
assert equal(sympify(np.int8(-123)), S(-123))
assert equal(sympify(np.int16(-12345)), S(-12345))
assert equal(sympify(np.int32(-1234567891)), S(-1234567891))
assert equal(
sympify(np.int64(-1234567891234567891)), S(-1234567891234567891))
assert equal(sympify(np.uint8(123)), S(123))
assert equal(sympify(np.uint16(12345)), S(12345))
assert equal(sympify(np.uint32(1234567891)), S(1234567891))
assert equal(
sympify(np.uint64(1234567891234567891)), S(1234567891234567891))
assert equal(sympify(np.float32(1.123456)), Float(1.123456, precision=24))
assert equal(sympify(np.float64(1.1234567891234)),
Float(1.1234567891234, precision=53))
assert equal(sympify(np.longdouble(1.123456789)),
Float(1.123456789, precision=80))
assert equal(sympify(np.complex64(1 + 2j)), S(1.0 + 2.0*I))
assert equal(sympify(np.complex128(1 + 2j)), S(1.0 + 2.0*I))
assert equal(sympify(np.longcomplex(1 + 2j)), S(1.0 + 2.0*I))
#float96 does not exist on all platforms
if hasattr(np, 'float96'):
assert equal(sympify(np.float96(1.123456789)),
Float(1.123456789, precision=80))
#float128 does not exist on all platforms
if hasattr(np, 'float128'):
assert equal(sympify(np.float128(1.123456789123)),
Float(1.123456789123, precision=80))
@XFAIL
def test_sympify_rational_numbers_set():
ans = [Rational(3, 10), Rational(1, 5)]
assert sympify({'.3', '.2'}, rational=True) == FiniteSet(*ans)
def test_issue_13924():
if not numpy:
skip("numpy not installed.")
a = sympify(numpy.array([1]))
assert isinstance(a, ImmutableDenseNDimArray)
assert a[0] == 1
def test_numpy_sympify_args():
# Issue 15098. Make sure sympify args work with numpy types (like numpy.str_)
if not numpy:
skip("numpy not installed.")
a = sympify(numpy.str_('a'))
assert type(a) is Symbol
assert a == Symbol('a')
class CustomSymbol(Symbol):
pass
a = sympify(numpy.str_('a'), {"Symbol": CustomSymbol})
assert isinstance(a, CustomSymbol)
a = sympify(numpy.str_('x^y'))
assert a == x**y
a = sympify(numpy.str_('x^y'), convert_xor=False)
assert a == Xor(x, y)
raises(SympifyError, lambda: sympify(numpy.str_('x'), strict=True))
a = sympify(numpy.str_('1.1'))
assert isinstance(a, Float)
assert a == 1.1
a = sympify(numpy.str_('1.1'), rational=True)
assert isinstance(a, Rational)
assert a == Rational(11, 10)
a = sympify(numpy.str_('x + x'))
assert isinstance(a, Mul)
assert a == 2*x
a = sympify(numpy.str_('x + x'), evaluate=False)
assert isinstance(a, Add)
assert a == Add(x, x, evaluate=False)
def test_issue_5939():
a = Symbol('a')
b = Symbol('b')
assert sympify('''a+\nb''') == a + b
def test_issue_16759():
d = sympify({.5: 1})
assert S.Half not in d
assert Float(.5) in d
assert d[.5] is S.One
d = sympify(OrderedDict({.5: 1}))
assert S.Half not in d
assert Float(.5) in d
assert d[.5] is S.One
d = sympify(defaultdict(int, {.5: 1}))
assert S.Half not in d
assert Float(.5) in d
assert d[.5] is S.One
def test_issue_17811():
a = Function('a')
assert sympify('a(x)*5', evaluate=False) == Mul(a(x), 5, evaluate=False)
def test_issue_14706():
if not numpy:
skip("numpy not installed.")
z1 = numpy.zeros((1, 1), dtype=numpy.float64)
z2 = numpy.zeros((2, 2), dtype=numpy.float64)
z3 = numpy.zeros((), dtype=numpy.float64)
y1 = numpy.ones((1, 1), dtype=numpy.float64)
y2 = numpy.ones((2, 2), dtype=numpy.float64)
y3 = numpy.ones((), dtype=numpy.float64)
assert numpy.all(x + z1 == numpy.full((1, 1), x))
assert numpy.all(x + z2 == numpy.full((2, 2), x))
assert numpy.all(z1 + x == numpy.full((1, 1), x))
assert numpy.all(z2 + x == numpy.full((2, 2), x))
for z in [z3,
numpy.int64(0),
numpy.float64(0),
numpy.complex64(0)]:
assert x + z == x
assert z + x == x
assert isinstance(x + z, Symbol)
assert isinstance(z + x, Symbol)
# If these tests fail, then it means that numpy has finally
# fixed the issue of scalar conversion for rank>0 arrays
# which is mentioned in numpy/numpy#10404. In that case,
# some changes have to be made in sympify.py.
# Note: For future reference, for anyone who takes up this
# issue when numpy has finally fixed their side of the problem,
# the changes for this temporary fix were introduced in PR 18651
assert numpy.all(x + y1 == numpy.full((1, 1), x + 1.0))
assert numpy.all(x + y2 == numpy.full((2, 2), x + 1.0))
assert numpy.all(y1 + x == numpy.full((1, 1), x + 1.0))
assert numpy.all(y2 + x == numpy.full((2, 2), x + 1.0))
for y_ in [y3,
numpy.int64(1),
numpy.float64(1),
numpy.complex64(1)]:
assert x + y_ == y_ + x
assert isinstance(x + y_, Add)
assert isinstance(y_ + x, Add)
assert x + numpy.array(x) == 2 * x
assert x + numpy.array([x]) == numpy.array([2*x], dtype=object)
assert sympify(numpy.array([1])) == ImmutableDenseNDimArray([1], 1)
assert sympify(numpy.array([[[1]]])) == ImmutableDenseNDimArray([1], (1, 1, 1))
assert sympify(z1) == ImmutableDenseNDimArray([0], (1, 1))
assert sympify(z2) == ImmutableDenseNDimArray([0, 0, 0, 0], (2, 2))
assert sympify(z3) == ImmutableDenseNDimArray([0], ())
assert sympify(z3, strict=True) == 0.0
raises(SympifyError, lambda: sympify(numpy.array([1]), strict=True))
raises(SympifyError, lambda: sympify(z1, strict=True))
raises(SympifyError, lambda: sympify(z2, strict=True))
def test_issue_21536():
#test to check evaluate=False in case of iterable input
u = sympify("x+3*x+2", evaluate=False)
v = sympify("2*x+4*x+2+4", evaluate=False)
assert u.is_Add and set(u.args) == {x, 3*x, 2}
assert v.is_Add and set(v.args) == {2*x, 4*x, 2, 4}
assert sympify(["x+3*x+2", "2*x+4*x+2+4"], evaluate=False) == [u, v]
#test to check evaluate=True in case of iterable input
u = sympify("x+3*x+2", evaluate=True)
v = sympify("2*x+4*x+2+4", evaluate=True)
assert u.is_Add and set(u.args) == {4*x, 2}
assert v.is_Add and set(v.args) == {6*x, 6}
assert sympify(["x+3*x+2", "2*x+4*x+2+4"], evaluate=True) == [u, v]
#test to check evaluate with no input in case of iterable input
u = sympify("x+3*x+2")
v = sympify("2*x+4*x+2+4")
assert u.is_Add and set(u.args) == {4*x, 2}
assert v.is_Add and set(v.args) == {6*x, 6}
assert sympify(["x+3*x+2", "2*x+4*x+2+4"]) == [u, v]
| 32.021492 | 121 | 0.593746 |
a2e67f15e6ebc7dbd20c73ec5e716cce9917d3ab | 10,777 | py | Python | autoarray/mock/mock_grid.py | caoxiaoyue/PyAutoArray | e10d3d6a5b8dd031f2ad277486bd539bd5858b2a | [
"MIT"
] | null | null | null | autoarray/mock/mock_grid.py | caoxiaoyue/PyAutoArray | e10d3d6a5b8dd031f2ad277486bd539bd5858b2a | [
"MIT"
] | null | null | null | autoarray/mock/mock_grid.py | caoxiaoyue/PyAutoArray | e10d3d6a5b8dd031f2ad277486bd539bd5858b2a | [
"MIT"
] | null | null | null | import numpy as np
from autoarray.inversion.mappers.abstract import AbstractMapper
from autoarray.structures.grids import grid_decorators
from autoarray.structures.grids.two_d.grid_2d_pixelization import PixelNeighbors
### Grids ###
def grid_to_grid_radii(grid):
return np.sqrt(np.add(np.square(grid[:, 0]), np.square(grid[:, 1])))
def ndarray_1d_from(profile, grid):
sersic_constant = (
(2 * 2.0)
- (1.0 / 3.0)
+ (4.0 / (405.0 * 2.0))
+ (46.0 / (25515.0 * 2.0 ** 2))
+ (131.0 / (1148175.0 * 2.0 ** 3))
- (2194697.0 / (30690717750.0 * 2.0 ** 4))
)
grid_radii = grid_to_grid_radii(grid=grid)
return np.exp(
np.multiply(
-sersic_constant,
np.add(np.power(np.divide(grid_radii, 0.2), 1.0 / 2.0), -1),
)
)
def grid_angle_to_profile(grid_thetas):
"""The angle between each (y,x) coordinate on the grid and the profile, in radians.
Parameters
-----------
grid_thetas
The angle theta counter-clockwise from the positive x-axis to each coordinate in radians.
"""
return np.cos(grid_thetas), np.sin(grid_thetas)
def grid_to_grid_cartesian(grid, radius):
"""
Convert a grid of (y,x) coordinates with their specified circular radii to their original (y,x) Cartesian
coordinates.
Parameters
----------
grid
The (y, x) coordinates in the reference frame of the profile.
radius
The circular radius of each coordinate from the profile center.
"""
grid_thetas = np.arctan2(grid[:, 0], grid[:, 1])
cos_theta, sin_theta = grid_angle_to_profile(grid_thetas=grid_thetas)
return np.multiply(radius[:, None], np.vstack((sin_theta, cos_theta)).T)
def ndarray_2d_from(profile, grid):
return grid_to_grid_cartesian(grid=grid, radius=np.full(grid.shape[0], 2.0))
class MockGridLikeIteratorObj:
def __init__(self):
pass
@property
def sersic_constant(self):
return (
(2 * 2.0)
- (1.0 / 3.0)
+ (4.0 / (405.0 * 2.0))
+ (46.0 / (25515.0 * 2.0 ** 2))
+ (131.0 / (1148175.0 * 2.0 ** 3))
- (2194697.0 / (30690717750.0 * 2.0 ** 4))
)
def grid_to_grid_radii(self, grid):
return np.sqrt(np.add(np.square(grid[:, 0]), np.square(grid[:, 1])))
def grid_angle_to_profile(self, grid_thetas):
"""The angle between each (y,x) coordinate on the grid and the profile, in radians.
Parameters
-----------
grid_thetas
The angle theta counter-clockwise from the positive x-axis to each coordinate in radians.
"""
return np.cos(grid_thetas), np.sin(grid_thetas)
def grid_to_grid_cartesian(self, grid, radius):
"""
Convert a grid of (y,x) coordinates with their specified circular radii to their original (y,x) Cartesian
coordinates.
Parameters
----------
grid
The (y, x) coordinates in the reference frame of the profile.
radius
The circular radius of each coordinate from the profile center.
"""
grid_thetas = np.arctan2(grid[:, 0], grid[:, 1])
cos_theta, sin_theta = self.grid_angle_to_profile(grid_thetas=grid_thetas)
return np.multiply(radius[:, None], np.vstack((sin_theta, cos_theta)).T)
@grid_decorators.grid_2d_to_structure
def ndarray_1d_from(self, grid) -> np.ndarray:
"""
Mock function mimicking the behaviour of a class function which given an input 1D grid, returns a 1D ndarray
of shape [total_masked_grid_pixels].
Such functions are common in **PyAutoGalaxy** for light and mass profile objects.
"""
grid_radii = self.grid_to_grid_radii(grid=grid)
return np.exp(
np.multiply(
-self.sersic_constant,
np.add(np.power(np.divide(grid_radii, 0.2), 1.0 / 2.0), -1),
)
)
@grid_decorators.grid_2d_to_structure
def ndarray_2d_from(self, grid):
"""
Mock function mimicking the behaviour of a class function which given an input grid, returns a 2D ndarray
of shape [total_masked_grid_pixels, 2].
Such functions are common in **PyAutoGalaxy** for light and mass profile objects.
"""
return self.grid_to_grid_cartesian(
grid=grid, radius=np.full(grid.shape[0], 2.0)
)
@grid_decorators.grid_2d_to_vector_yx
@grid_decorators.grid_2d_to_structure
def ndarray_yx_2d_from(self, grid):
"""
Mock function mimicking the behaviour of a class function which given an input grid, returns a 2D ndarray
of shape [total_masked_grid_pixels] which represents a vector field.
Such functions are common in **PyAutoGalaxy** for light and mass profile objects.
"""
return self.grid_to_grid_cartesian(
grid=grid, radius=np.full(grid.shape[0], 2.0)
)
@grid_decorators.grid_2d_to_structure_list
def ndarray_1d_list_from(self, grid):
"""
Mock function mimicking the behaviour of a class function which given an input 1D grid, returns a list of 1D
ndarrays of shape [total_masked_grid_pixels].
Such functions are common in **PyAutoGalaxy** for light and mass profile objects.
"""
grid_radii = self.grid_to_grid_radii(grid=grid)
return [
np.exp(
np.multiply(
-self.sersic_constant,
np.add(np.power(np.divide(grid_radii, 0.2), 1.0 / 2.0), -1),
)
)
]
@grid_decorators.grid_2d_to_structure_list
def ndarray_2d_list_from(self, grid):
"""
Mock function mimicking the behaviour of a class function which given an input grid, returns a 2D list of
ndarrays of shape [total_masked_grid_pixels, 2].
Such functions are common in **PyAutoGalaxy** for light and mass profile objects.
"""
return [
self.grid_to_grid_cartesian(grid=grid, radius=np.full(grid.shape[0], 2.0))
]
@grid_decorators.grid_2d_to_vector_yx
@grid_decorators.grid_2d_to_structure_list
def ndarray_yx_2d_list_from(self, grid):
"""
Mock function mimicking the behaviour of a class function which given an input grid, returns a list of 2D
ndarrays of shape [total_masked_grid_pixels] which represents a vector field.
Such functions are common in **PyAutoGalaxy** for light and mass profile objects.
"""
return [
self.grid_to_grid_cartesian(grid=grid, radius=np.full(grid.shape[0], 2.0))
]
class MockGrid1DLikeObj:
def __init__(self, centre=(0.0, 0.0), angle=0.0):
self.centre = centre
self.angle = angle
@grid_decorators.grid_1d_to_structure
def ndarray_1d_from(self, grid):
return np.ones(shape=grid.shape[0])
# @grid_decorators.grid_1d_to_structure
# def ndarray_2d_from(self, grid):
# return np.multiply(2.0, grid)
# @grid_decorators.grid_1d_to_structure_list
# def ndarray_1d_list_from(self, grid):
# return [np.ones(shape=grid.shape[0]), 2.0 * np.ones(shape=grid.shape[0])]
#
# @grid_decorators.grid_1d_to_structure_list
# def ndarray_2d_list_from(self, grid):
# return [np.multiply(1.0, grid), np.multiply(2.0, grid)]
class MockGrid2DLikeObj:
def __init__(self):
pass
@grid_decorators.grid_2d_to_structure
def ndarray_1d_from(self, grid):
"""
Mock function mimicking the behaviour of a class function which given an input 1D grid, returns a 1D ndarray
of shape [total_masked_grid_pixels].
Such functions are common in **PyAutoGalaxy** for light and mass profile objects.
"""
return np.ones(shape=grid.shape[0])
@grid_decorators.grid_2d_to_structure
def ndarray_2d_from(self, grid):
"""
Mock function mimicking the behaviour of a class function which given an input grid, returns a 2D ndarray
of shape [total_masked_grid_pixels, 2].
Such functions are common in **PyAutoGalaxy** for light and mass profile objects.
"""
return np.multiply(2.0, grid)
@grid_decorators.grid_2d_to_vector_yx
@grid_decorators.grid_2d_to_structure
def ndarray_yx_2d_from(self, grid):
"""
Mock function mimicking the behaviour of a class function which given an input grid, returns a 2D ndarray
of shape [total_masked_grid_pixels] which represents a vector field.
Such functions are common in **PyAutoGalaxy** for light and mass profile objects.
"""
return 2.0 * grid
@grid_decorators.grid_2d_to_structure_list
def ndarray_1d_list_from(self, grid):
"""
Mock function mimicking the behaviour of a class function which given an input 1D grid, returns a list of 1D
ndarrays of shape [total_masked_grid_pixels].
Such functions are common in **PyAutoGalaxy** for light and mass profile objects.
"""
return [np.ones(shape=grid.shape[0]), 2.0 * np.ones(shape=grid.shape[0])]
@grid_decorators.grid_2d_to_structure_list
def ndarray_2d_list_from(self, grid):
"""
Mock function mimicking the behaviour of a class function which given an input grid, returns a 2D list of
ndarrays of shape [total_masked_grid_pixels, 2].
Such functions are common in **PyAutoGalaxy** for light and mass profile objects.
"""
return [np.multiply(1.0, grid), np.multiply(2.0, grid)]
@grid_decorators.grid_2d_to_vector_yx
@grid_decorators.grid_2d_to_structure_list
def ndarray_yx_2d_list_from(self, grid):
"""
Mock function mimicking the behaviour of a class function which given an input grid, returns a list of 2D
ndarrays of shape [total_masked_grid_pixels] which represents a vector field.
Such functions are common in **PyAutoGalaxy** for light and mass profile objects.
"""
return [np.multiply(1.0, grid), np.multiply(2.0, grid)]
class MockGridRadialMinimum:
def __init__(self):
pass
def grid_to_grid_radii(self, grid):
return np.sqrt(np.add(np.square(grid[:, 0]), np.square(grid[:, 1])))
@grid_decorators.relocate_to_radial_minimum
def deflections_yx_2d_from(self, grid):
return grid
| 36.16443 | 117 | 0.628097 |
d62a466b95a610f12a05baee299e341a34d152c1 | 4,221 | py | Python | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/iaa_svm_container.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/iaa_svm_container.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/iaa_svm_container.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .protection_container import ProtectionContainer
class IaaSVMContainer(ProtectionContainer):
"""IaaS VM workload-specific container.
Variables are only populated by the server, and will be ignored when
sending a request.
:param friendly_name: Friendly name of the container.
:type friendly_name: str
:param backup_management_type: Type of backup managemenent for the
container. Possible values include: 'Invalid', 'AzureIaasVM', 'MAB',
'DPM', 'AzureBackupServer', 'AzureSql'
:type backup_management_type: str or :class:`BackupManagementType
<azure.mgmt.recoveryservicesbackup.models.BackupManagementType>`
:param registration_status: Status of registration of the container with
the Recovery Services Vault.
:type registration_status: str
:param health_status: Status of health of the container.
:type health_status: str
:ivar container_type: Type of the container. The value of this property
for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2. Classic
Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows
machines (like MAB, DPM etc) is Windows 4. Azure SQL instance is
AzureSqlContainer. Possible values include: 'Invalid', 'Unknown',
'IaasVMContainer', 'IaasVMServiceContainer', 'DPMContainer',
'AzureBackupServerContainer', 'MABContainer', 'Cluster',
'AzureSqlContainer', 'Windows', 'VCenter'
:vartype container_type: str or :class:`ContainerType
<azure.mgmt.recoveryservicesbackup.models.ContainerType>`
:param protectable_object_type: Polymorphic Discriminator
:type protectable_object_type: str
:param virtual_machine_id: Fully qualified ARM url of the virtual machine
represented by this Azure IaaS VM container.
:type virtual_machine_id: str
:param virtual_machine_version: Specifies whether the container represents
a Classic or an Azure Resource Manager VM.
:type virtual_machine_version: str
:param resource_group: Resource group name of Recovery Services Vault.
:type resource_group: str
"""
_validation = {
'container_type': {'readonly': True},
'protectable_object_type': {'required': True},
}
_attribute_map = {
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'backup_management_type': {'key': 'backupManagementType', 'type': 'str'},
'registration_status': {'key': 'registrationStatus', 'type': 'str'},
'health_status': {'key': 'healthStatus', 'type': 'str'},
'container_type': {'key': 'containerType', 'type': 'str'},
'protectable_object_type': {'key': 'protectableObjectType', 'type': 'str'},
'virtual_machine_id': {'key': 'virtualMachineId', 'type': 'str'},
'virtual_machine_version': {'key': 'virtualMachineVersion', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
_subtype_map = {
'protectable_object_type': {'Microsoft.ClassicCompute/virtualMachines': 'AzureIaaSClassicComputeVMContainer', 'Microsoft.Compute/virtualMachines': 'AzureIaaSComputeVMContainer'}
}
def __init__(self, friendly_name=None, backup_management_type=None, registration_status=None, health_status=None, virtual_machine_id=None, virtual_machine_version=None, resource_group=None):
super(IaaSVMContainer, self).__init__(friendly_name=friendly_name, backup_management_type=backup_management_type, registration_status=registration_status, health_status=health_status)
self.virtual_machine_id = virtual_machine_id
self.virtual_machine_version = virtual_machine_version
self.resource_group = resource_group
self.protectable_object_type = 'IaaSVMContainer'
| 51.47561 | 194 | 0.706705 |
46273000fd5aa170d3687ffe04060656d7f77c90 | 16,005 | py | Python | examples/prostate/pt/learners/supervised_learner.py | ArnovanHilten/NVFlare | bb45e7d606849c6bc8f7542347459c6ba1be00c4 | [
"Apache-2.0"
] | 2 | 2021-12-01T15:56:19.000Z | 2021-12-02T09:00:07.000Z | examples/prostate/pt/learners/supervised_learner.py | ArnovanHilten/NVFlare | bb45e7d606849c6bc8f7542347459c6ba1be00c4 | [
"Apache-2.0"
] | null | null | null | examples/prostate/pt/learners/supervised_learner.py | ArnovanHilten/NVFlare | bb45e7d606849c6bc8f7542347459c6ba1be00c4 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import ReservedHeaderKey, Shareable, make_reply
from nvflare.apis.signal import Signal
from nvflare.app_common.abstract.learner_spec import Learner
from nvflare.app_common.app_constant import AppConstants, ModelName, ValidateType
class SupervisedLearner(Learner):
def __init__(
self,
train_config_filename,
aggregation_epochs: int = 1,
train_task_name: str = AppConstants.TASK_TRAIN,
submit_model_task_name: str = AppConstants.TASK_SUBMIT_MODEL,
):
"""Simple Supervised Trainer.
Args:
train_config_filename: directory of config file.
aggregation_epochs: the number of training epochs for a round. Defaults to 1.
train_task_name: name of the task to train the model.
submit_model_task_name: name of the task to submit the best local model.
Returns:
a Shareable with the updated local model after running `execute()`
or the best local model depending on the specified task.
"""
super().__init__()
# trainer init happens at the very beginning, only the basic info regarding the trainer is set here
# the actual run has not started at this point
self.train_config_filename = train_config_filename
self.aggregation_epochs = aggregation_epochs
self.train_task_name = train_task_name
self.submit_model_task_name = submit_model_task_name
self.best_acc = 0.0
self.initialized = False
# Epoch counter
self.epoch_of_start_time = 0
self.epoch_global = 0
# FedProx related
self.fedproxloss_mu = 0.0
self.criterion_prox = None
def initialize(self, parts: dict, fl_ctx: FLContext):
# when the run starts, this is where the actual settings get initialized for trainer
# Set the paths according to fl_ctx
engine = fl_ctx.get_engine()
ws = engine.get_workspace()
app_config_dir = ws.get_app_config_dir(fl_ctx.get_run_number())
app_dir = ws.get_app_dir(fl_ctx.get_run_number())
self.local_model_file = os.path.join(app_dir, "local_model.pt")
self.best_local_model_file = os.path.join(app_dir, "best_local_model.pt")
train_config_file_path = os.path.join(app_config_dir, self.train_config_filename)
# get and print the args
fl_args = fl_ctx.get_prop(FLContextKey.ARGS)
self.client_id = fl_ctx.get_identity_name()
self.log_info(
fl_ctx,
f"Client {self.client_id} initialized with args: \n {fl_args}",
)
# Set local tensorboard writer - to be replaced by event
self.writer = SummaryWriter(app_dir)
# Set the training-related contexts
self.train_config(fl_ctx, train_config_file_path=train_config_file_path)
def _extra_train_config(self, fl_ctx: FLContext, config_info: dict):
"""Additional monai traning configuration customized to individual tasks
Need the following implementations for further training and validation:
self.model
self.device
self.optimizer
self.criterion
self.transform_post
self.train_loader
self.train_for_valid_loader
self.valid_loader
self.inferer
self.valid_metric
"""
pass
def train_config(self, fl_ctx: FLContext, train_config_file_path: str):
"""Common monai traning configuration
Individual training tasks can be customized by implementing `_extra_train_config`
"""
# Load training configurations
if not os.path.isfile(train_config_file_path):
self.log_error(
fl_ctx,
f"Training configuration file does not exist at {train_config_file_path}",
)
with open(train_config_file_path) as file:
config_info = json.load(file)
self._extra_train_config(fl_ctx, config_info)
def finalize(self, fl_ctx: FLContext):
# collect threads, close files here
pass
def local_train(
self,
fl_ctx,
train_loader,
model_global,
abort_signal: Signal,
val_freq: int = 0,
):
"""
val_freq: the validation interval for local training
"""
for epoch in range(self.aggregation_epochs):
if abort_signal.triggered:
return
self.model.train()
epoch_len = len(train_loader)
self.epoch_global = self.epoch_of_start_time + epoch
self.log_info(
fl_ctx,
f"Local epoch {self.client_id}: {epoch + 1}/{self.aggregation_epochs} (lr={self.lr})",
)
for i, batch_data in enumerate(train_loader):
if abort_signal.triggered:
return
inputs = batch_data["image"].to(self.device)
labels = batch_data["label"].to(self.device)
# zero the parameter gradients
self.optimizer.zero_grad()
# forward + backward + optimize
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
# FedProx loss term
if self.fedproxloss_mu > 0:
fed_prox_loss = self.criterion_prox(self.model, model_global)
loss += fed_prox_loss
loss.backward()
self.optimizer.step()
current_step = epoch_len * self.epoch_global + i
self.writer.add_scalar("train_loss", loss.item(), current_step)
if val_freq > 0 and epoch % val_freq == 0:
acc = self.local_valid(self.valid_loader, abort_signal, tb_id="val_acc_local_model")
if acc > self.best_acc:
self.save_model(is_best=True)
def local_valid(self, valid_loader, abort_signal: Signal, tb_id=None):
self.model.eval()
with torch.no_grad():
correct, total = 0, 0
for i, (inputs, labels) in enumerate(valid_loader):
if abort_signal.triggered:
return None
inputs, labels = inputs.to(self.device), labels.to(self.device)
outputs = self.model(inputs)
_, pred_label = torch.max(outputs.data, 1)
total += inputs.data.size()[0]
correct += (pred_label == labels.data).sum().item()
metric = correct / float(total)
if tb_id:
self.writer.add_scalar(tb_id, metric, self.epoch_global)
return metric
def save_model(self, is_best=False):
# save model
model_weights = self.model.state_dict()
save_dict = {"model_weights": model_weights, "epoch": self.epoch_global}
if is_best:
save_dict.update({"best_acc": self.best_acc})
torch.save(save_dict, self.best_local_model_file)
else:
torch.save(save_dict, self.local_model_file)
def train(
self,
shareable: Shareable,
fl_ctx: FLContext,
abort_signal: Signal,
) -> Shareable:
# Check abort signal
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# get round information
current_round = shareable.get_header(AppConstants.CURRENT_ROUND)
total_rounds = shareable.get_header(AppConstants.NUM_ROUNDS)
self.log_info(fl_ctx, f"Current/Total Round: {current_round + 1}/{total_rounds}")
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
for var_name in local_var_dict:
if var_name in model_keys:
weights = global_weights[var_name]
try:
# reshape global weights to compute difference later on
global_weights[var_name] = np.reshape(weights, local_var_dict[var_name].shape)
# update the local dict
local_var_dict[var_name] = torch.as_tensor(global_weights[var_name])
except Exception as e:
raise ValueError("Convert weight from {} failed with error: {}".format(var_name, str(e)))
self.model.load_state_dict(local_var_dict)
# local steps
epoch_len = len(self.train_loader)
self.log_info(fl_ctx, f"Local steps per epoch: {epoch_len}")
# make a copy of model_global as reference for potential FedProx loss
if self.fedproxloss_mu > 0:
model_global = copy.deepcopy(self.model)
for param in model_global.parameters():
param.requires_grad = False
else:
model_global = None
# local train
self.local_train(
fl_ctx=fl_ctx,
train_loader=self.train_loader,
model_global=model_global,
abort_signal=abort_signal,
val_freq=0,
)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.epoch_of_start_time += self.aggregation_epochs
# perform valid after local train
acc = self.local_valid(self.valid_loader, abort_signal, tb_id="val_acc_local_model")
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_acc_local_model: {acc:.4f}")
# save model
self.save_model(is_best=False)
if acc > self.best_acc:
self.save_model(is_best=True)
# compute delta model, global model has the primary key set
local_weights = self.model.state_dict()
model_diff = {}
for name in global_weights:
if name not in local_weights:
continue
model_diff[name] = local_weights[name].cpu().numpy() - global_weights[name]
if np.any(np.isnan(model_diff[name])):
self.system_panic(f"{name} weights became NaN...", fl_ctx)
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
# build the shareable
dxo = DXO(data_kind=DataKind.WEIGHT_DIFF, data=model_diff)
dxo.set_meta_prop(MetaKey.NUM_STEPS_CURRENT_ROUND, epoch_len)
self.log_info(fl_ctx, "Local epochs finished. Returning shareable")
return dxo.to_shareable()
def get_model_for_validation(self, model_name: str, fl_ctx: FLContext) -> Shareable:
# Retrieve the best local model saved during training.
if model_name == ModelName.BEST_MODEL:
model_data = None
try:
# load model to cpu as server might or might not have a GPU
model_data = torch.load(self.best_local_model_file, map_location="cpu")
except Exception as e:
self.log_error(fl_ctx, f"Unable to load best model: {e}")
# Create DXO and shareable from model data.
if model_data:
dxo = DXO(data_kind=DataKind.WEIGHTS, data=model_data["model_weights"])
return dxo.to_shareable()
else:
# Set return code.
self.log_error(fl_ctx, f"best local model not found at {self.best_local_model_file}.")
return make_reply(ReturnCode.EXECUTION_RESULT_ERROR)
else:
raise ValueError(f"Unknown model_type: {model_name}") # Raised errors are caught in LearnerExecutor class.
def validate(self, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable:
# Check abort signal
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
# get validation information
self.log_info(fl_ctx, f"Client identity: {fl_ctx.get_identity_name()}")
model_owner = shareable.get(ReservedHeaderKey.HEADERS).get(AppConstants.MODEL_OWNER)
if model_owner:
self.log_info(fl_ctx, f"Evaluating model from {model_owner} on {fl_ctx.get_identity_name()}")
else:
model_owner = "global_model"
# update local model weights with received weights
dxo = from_shareable(shareable)
global_weights = dxo.data
# Before loading weights, tensors might need to be reshaped to support HE for secure aggregation.
local_var_dict = self.model.state_dict()
model_keys = global_weights.keys()
n_loaded = 0
for var_name in local_var_dict:
if var_name in model_keys:
weights = torch.as_tensor(global_weights[var_name], device=self.device)
try:
# update the local dict
local_var_dict[var_name] = torch.as_tensor(torch.reshape(weights, local_var_dict[var_name].shape))
n_loaded += 1
except Exception as e:
raise ValueError("Convert weight from {} failed with error: {}".format(var_name, str(e)))
self.model.load_state_dict(local_var_dict)
if n_loaded == 0:
raise ValueError(f"No weights loaded for validation! Received weight dict is {global_weights}")
validate_type = shareable.get_header(AppConstants.VALIDATE_TYPE)
if validate_type == ValidateType.BEFORE_TRAIN_VALIDATE:
# perform valid before local train
global_acc = self.local_valid(self.valid_loader, abort_signal, tb_id="val_acc_global_model")
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"val_acc_global_model ({model_owner}): {global_acc:.4f}")
return DXO(data_kind=DataKind.METRICS, data={MetaKey.INITIAL_METRICS: global_acc}, meta={}).to_shareable()
elif validate_type == ValidateType.MODEL_VALIDATE:
# perform valid
train_acc = self.local_valid(self.train_for_valid_loader, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"training acc ({model_owner}): {train_acc:.4f}")
val_acc = self.local_valid(self.valid_loader, abort_signal)
if abort_signal.triggered:
return make_reply(ReturnCode.TASK_ABORTED)
self.log_info(fl_ctx, f"validation acc ({model_owner}): {val_acc:.4f}")
self.log_info(fl_ctx, "Evaluation finished. Returning shareable")
val_results = {"train_accuracy": train_acc, "val_accuracy": val_acc}
metric_dxo = DXO(data_kind=DataKind.METRICS, data=val_results)
return metric_dxo.to_shareable()
else:
return make_reply(ReturnCode.VALIDATE_TYPE_UNKNOWN)
| 42.007874 | 119 | 0.639238 |
0e4805c546f30242d12ec71e8fb1b7eb151e3c1e | 270 | py | Python | main.py | fivecountry/QupyRibbon | c5ed71ca94e07a243e758f3980aedb5fa4f3e3b1 | [
"MIT"
] | 18 | 2018-04-24T13:31:57.000Z | 2021-07-15T16:58:55.000Z | main.py | fivecountry/QupyRibbon | c5ed71ca94e07a243e758f3980aedb5fa4f3e3b1 | [
"MIT"
] | 1 | 2019-04-20T06:16:09.000Z | 2019-04-20T06:16:09.000Z | main.py | fivecountry/QupyRibbon | c5ed71ca94e07a243e758f3980aedb5fa4f3e3b1 | [
"MIT"
] | 12 | 2017-11-03T13:21:15.000Z | 2021-12-09T20:09:59.000Z | import sys
from PyQt5.QtWidgets import *
from GUI.MainWindow import MainWindow
__author__ = 'mamj'
def main():
a = QApplication(sys.argv)
a.setQuitOnLastWindowClosed(True)
main_window = MainWindow()
main_window.show()
sys.exit(a.exec_())
main()
| 15.882353 | 37 | 0.703704 |
0cdddd8e4ebcd0a10cefb4c5b01a8b8906835adb | 10,286 | py | Python | research/object_detection/core/standard_fields.py | matsavage/models | 634309ac537bbfc5198197b92096a59b52b0bb45 | [
"Apache-2.0"
] | 3 | 2018-10-31T02:16:47.000Z | 2018-11-06T09:11:37.000Z | research/object_detection/core/standard_fields.py | John618/models | 42f98218d7b0ee54077d4e07658442bc7ae0e661 | [
"Apache-2.0"
] | 1 | 2019-01-16T08:23:08.000Z | 2019-01-16T08:23:08.000Z | research/object_detection/core/standard_fields.py | John618/models | 42f98218d7b0ee54077d4e07658442bc7ae0e661 | [
"Apache-2.0"
] | 6 | 2019-07-01T00:03:08.000Z | 2022-03-26T19:00:32.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains classes specifying naming conventions used for object detection.
Specifies:
InputDataFields: standard fields used by reader/preprocessor/batcher.
DetectionResultFields: standard fields returned by object detector.
BoxListFields: standard field used by BoxList
TfExampleFields: standard fields for tf-example data format (go/tf-example).
"""
class InputDataFields(object):
"""Names for the input tensors.
Holds the standard data field names to use for identifying input tensors. This
should be used by the decoder to identify keys for the returned tensor_dict
containing input tensors. And it should be used by the model to identify the
tensors it needs.
Attributes:
image: image.
image_additional_channels: additional channels.
original_image: image in the original input size.
original_image_spatial_shape: image in the original input size.
key: unique key corresponding to image.
source_id: source of the original image.
filename: original filename of the dataset (without common path).
groundtruth_image_classes: image-level class labels.
groundtruth_image_confidences: image-level class confidences.
groundtruth_boxes: coordinates of the ground truth boxes in the image.
groundtruth_classes: box-level class labels.
groundtruth_confidences: box-level class confidences.
groundtruth_label_types: box-level label types (e.g. explicit negative).
groundtruth_is_crowd: [DEPRECATED, use groundtruth_group_of instead]
is the groundtruth a single object or a crowd.
groundtruth_area: area of a groundtruth segment.
groundtruth_difficult: is a `difficult` object
groundtruth_group_of: is a `group_of` objects, e.g. multiple objects of the
same class, forming a connected group, where instances are heavily
occluding each other.
proposal_boxes: coordinates of object proposal boxes.
proposal_objectness: objectness score of each proposal.
groundtruth_instance_masks: ground truth instance masks.
groundtruth_instance_boundaries: ground truth instance boundaries.
groundtruth_instance_classes: instance mask-level class labels.
groundtruth_keypoints: ground truth keypoints.
groundtruth_keypoint_visibilities: ground truth keypoint visibilities.
groundtruth_label_scores: groundtruth label scores.
groundtruth_weights: groundtruth weight factor for bounding boxes.
num_groundtruth_boxes: number of groundtruth boxes.
is_annotated: whether an image has been labeled or not.
true_image_shapes: true shapes of images in the resized images, as resized
images can be padded with zeros.
multiclass_scores: the label score per class for each box.
"""
image = 'image'
image_additional_channels = 'image_additional_channels'
original_image = 'original_image'
original_image_spatial_shape = 'original_image_spatial_shape'
key = 'key'
source_id = 'source_id'
filename = 'filename'
groundtruth_image_classes = 'groundtruth_image_classes'
groundtruth_image_confidences = 'groundtruth_image_confidences'
groundtruth_boxes = 'groundtruth_boxes'
groundtruth_classes = 'groundtruth_classes'
groundtruth_confidences = 'groundtruth_confidences'
groundtruth_label_types = 'groundtruth_label_types'
groundtruth_is_crowd = 'groundtruth_is_crowd'
groundtruth_area = 'groundtruth_area'
groundtruth_difficult = 'groundtruth_difficult'
groundtruth_group_of = 'groundtruth_group_of'
proposal_boxes = 'proposal_boxes'
proposal_objectness = 'proposal_objectness'
groundtruth_instance_masks = 'groundtruth_instance_masks'
groundtruth_instance_boundaries = 'groundtruth_instance_boundaries'
groundtruth_instance_classes = 'groundtruth_instance_classes'
groundtruth_keypoints = 'groundtruth_keypoints'
groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities'
groundtruth_label_scores = 'groundtruth_label_scores'
groundtruth_weights = 'groundtruth_weights'
num_groundtruth_boxes = 'num_groundtruth_boxes'
is_annotated = 'is_annotated'
true_image_shape = 'true_image_shape'
multiclass_scores = 'multiclass_scores'
class DetectionResultFields(object):
"""Naming conventions for storing the output of the detector.
Attributes:
source_id: source of the original image.
key: unique key corresponding to image.
detection_boxes: coordinates of the detection boxes in the image.
detection_scores: detection scores for the detection boxes in the image.
detection_classes: detection-level class labels.
detection_masks: contains a segmentation mask for each detection box.
detection_boundaries: contains an object boundary for each detection box.
detection_keypoints: contains detection keypoints for each detection box.
num_detections: number of detections in the batch.
"""
source_id = 'source_id'
key = 'key'
detection_boxes = 'detection_boxes'
detection_scores = 'detection_scores'
detection_classes = 'detection_classes'
detection_masks = 'detection_masks'
detection_boundaries = 'detection_boundaries'
detection_keypoints = 'detection_keypoints'
num_detections = 'num_detections'
class BoxListFields(object):
"""Naming conventions for BoxLists.
Attributes:
boxes: bounding box coordinates.
classes: classes per bounding box.
scores: scores per bounding box.
weights: sample weights per bounding box.
objectness: objectness score per bounding box.
masks: masks per bounding box.
boundaries: boundaries per bounding box.
keypoints: keypoints per bounding box.
keypoint_heatmaps: keypoint heatmaps per bounding box.
is_crowd: is_crowd annotation per bounding box.
"""
boxes = 'boxes'
classes = 'classes'
scores = 'scores'
weights = 'weights'
objectness = 'objectness'
masks = 'masks'
boundaries = 'boundaries'
keypoints = 'keypoints'
keypoint_heatmaps = 'keypoint_heatmaps'
is_crowd = 'is_crowd'
class TfExampleFields(object):
"""TF-example proto feature names for object detection.
Holds the standard feature names to load from an Example proto for object
detection.
Attributes:
image_encoded: JPEG encoded string
image_format: image format, e.g. "JPEG"
filename: filename
channels: number of channels of image
colorspace: colorspace, e.g. "RGB"
height: height of image in pixels, e.g. 462
width: width of image in pixels, e.g. 581
source_id: original source of the image
image_class_text: image-level label in text format
image_class_label: image-level label in numerical format
object_class_text: labels in text format, e.g. ["person", "cat"]
object_class_label: labels in numbers, e.g. [16, 8]
object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30
object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40
object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50
object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70
object_view: viewpoint of object, e.g. ["frontal", "left"]
object_truncated: is object truncated, e.g. [true, false]
object_occluded: is object occluded, e.g. [true, false]
object_difficult: is object difficult, e.g. [true, false]
object_group_of: is object a single object or a group of objects
object_depiction: is object a depiction
object_is_crowd: [DEPRECATED, use object_group_of instead]
is the object a single object or a crowd
object_segment_area: the area of the segment.
object_weight: a weight factor for the object's bounding box.
instance_masks: instance segmentation masks.
instance_boundaries: instance boundaries.
instance_classes: Classes for each instance segmentation mask.
detection_class_label: class label in numbers.
detection_bbox_ymin: ymin coordinates of a detection box.
detection_bbox_xmin: xmin coordinates of a detection box.
detection_bbox_ymax: ymax coordinates of a detection box.
detection_bbox_xmax: xmax coordinates of a detection box.
detection_score: detection score for the class label and box.
"""
image_encoded = 'image/encoded'
image_format = 'image/format' # format is reserved keyword
filename = 'image/filename'
channels = 'image/channels'
colorspace = 'image/colorspace'
height = 'image/height'
width = 'image/width'
source_id = 'image/source_id'
image_class_text = 'image/class/text'
image_class_label = 'image/class/label'
object_class_text = 'image/object/class/text'
object_class_label = 'image/object/class/label'
object_bbox_ymin = 'image/object/bbox/ymin'
object_bbox_xmin = 'image/object/bbox/xmin'
object_bbox_ymax = 'image/object/bbox/ymax'
object_bbox_xmax = 'image/object/bbox/xmax'
object_view = 'image/object/view'
object_truncated = 'image/object/truncated'
object_occluded = 'image/object/occluded'
object_difficult = 'image/object/difficult'
object_group_of = 'image/object/group_of'
object_depiction = 'image/object/depiction'
object_is_crowd = 'image/object/is_crowd'
object_segment_area = 'image/object/segment/area'
object_weight = 'image/object/weight'
instance_masks = 'image/segmentation/object'
instance_boundaries = 'image/boundaries/object'
instance_classes = 'image/segmentation/object/class'
detection_class_label = 'image/detection/label'
detection_bbox_ymin = 'image/detection/bbox/ymin'
detection_bbox_xmin = 'image/detection/bbox/xmin'
detection_bbox_ymax = 'image/detection/bbox/ymax'
detection_bbox_xmax = 'image/detection/bbox/xmax'
detection_score = 'image/detection/score'
| 44.145923 | 80 | 0.765604 |
9a54e07c38ce57de6b9a0435f60c4354b432c26a | 19 | py | Python | realtime_redis_backup/__init__.py | cr0hn/realtime-redis-backup | fb6237f0dfc7f8add0f378fb9002383d4895fa3e | [
"BSD-3-Clause"
] | 10 | 2020-05-13T13:05:38.000Z | 2020-07-01T06:46:06.000Z | realtime_redis_backup/__init__.py | cr0hn/realtime-redis-backup | fb6237f0dfc7f8add0f378fb9002383d4895fa3e | [
"BSD-3-Clause"
] | null | null | null | realtime_redis_backup/__init__.py | cr0hn/realtime-redis-backup | fb6237f0dfc7f8add0f378fb9002383d4895fa3e | [
"BSD-3-Clause"
] | 1 | 2021-03-30T14:55:02.000Z | 2021-03-30T14:55:02.000Z | from .rrb import *
| 9.5 | 18 | 0.684211 |
21259ec90e23baf720d2e6508b32d1394de31356 | 366 | py | Python | cursopython/pythonteste/aula19c.py | AtilaCosta87/Python | b4eea7885d16df80feecc4c699a8348ca13a80c2 | [
"MIT"
] | null | null | null | cursopython/pythonteste/aula19c.py | AtilaCosta87/Python | b4eea7885d16df80feecc4c699a8348ca13a80c2 | [
"MIT"
] | null | null | null | cursopython/pythonteste/aula19c.py | AtilaCosta87/Python | b4eea7885d16df80feecc4c699a8348ca13a80c2 | [
"MIT"
] | null | null | null | estado = dict()
brasil = list()
for c in range(0, 3):
estado['uf'] = str(input('Unidade Federativa: '))
estado['sigla'] = str(input('Sigla do Estado: '))
brasil.append(estado.copy())
#for e in brasil:
# for k, v in e.items():
# print(f'O campo {k} tem valor {v}')
for e in brasil:
for v in e.values():
print(v, end=' ')
print()
| 26.142857 | 53 | 0.562842 |
a7ff954785941d3e7598f19cf599dde13a923e8b | 5,340 | py | Python | src/pyronn_torch/parallel.py | maxrohleder/pyronn-torch | 16874724258557c670cb815d3a85d6d9ce7aeabf | [
"MIT"
] | null | null | null | src/pyronn_torch/parallel.py | maxrohleder/pyronn-torch | 16874724258557c670cb815d3a85d6d9ce7aeabf | [
"MIT"
] | 1 | 2021-06-12T16:00:06.000Z | 2021-06-12T16:00:06.000Z | src/pyronn_torch/parallel.py | maxrohleder/pyronn-torch | 16874724258557c670cb815d3a85d6d9ce7aeabf | [
"MIT"
] | null | null | null | #
# Copyright © 2021 Mayank Patwari <mayank.patwari@fau.de>
#
# Distributed under terms of the GPLv3 license.
"""
"""
import numpy as np
import pyronn_torch
import torch
class State:
def __init__(self, detector_origin, detector_spacing, projection_shape, ray_vectors,
volume_origin, volume_shape, volume_spacing):
self._volume_shape = volume_shape
self._volume_origin = volume_origin
self._volume_spacing = volume_spacing
self._projection_shape = projection_shape
self._detector_origin = detector_origin
self._detector_spacing = detector_spacing
self._ray_vectors = ray_vectors
class _ForwardProjection(torch.autograd.Function):
@staticmethod
def forward(self, volume, state=None):
if state is None:
state = self.state
return_none = True
else:
return_none = False
projection = torch.zeros(state._projection_shape,
requires_grad=volume.requires_grad,
device='cuda').float().contiguous()
pyronn_torch.cpp_extension.call_Parallel_Projection2D_Kernel_Launcher(
state._detector_origin,
state._detector_spacing,
projection,
state._ray_vectors,
state._volume_origin[0],
state._volume_origin[1],
volume,
state._volume_spacing[0],
state._volume_spacing[1]
)
self.state = state
if return_none:
return projection, None
else:
return projection
@staticmethod
def backward(self, projection_grad, state=None, *args):
if state is None:
state = self.state
return_none = True
else:
return_none = False
volume_grad = torch.zeros(state._volume_shape, requires_grad=projection_grad.requires_grad).cuda()
pyronn_torch.cpp_extension.call_Parallel_Backprojection2D_Kernel_Launcher(
state._detector_origin,
state._detector_spacing,
projection_grad,
state._ray_vectors,
state._volume_origin[0],
state._volume_origin[1],
volume_grad,
state._volume_spacing[0],
state._volume_spacing[1]
)
self.state = state
if return_none:
return volume_grad, None
else:
return volume_grad
class _BackwardProjection(torch.autograd.Function):
backward = staticmethod(_ForwardProjection.forward)
forward = staticmethod(_ForwardProjection.backward)
class ParallelProjector:
def __init__(self, detector_origin=None, detector_spacing=1, angles=torch.linspace(0, 360, 360 - 1),
volume_origin=None, volume_shape=[256, 256], volume_spacing=[1, 1]):
self._volume_shape = volume_shape
self._volume_origin = volume_origin or [-v/2 for v in reversed(volume_shape)]
self._volume_spacing = volume_spacing
self._projection_shape = [np.shape(angles)[0], volume_shape[1]]
self._detector_origin = detector_origin or volume_shape[0] / 2
self._detector_spacing = detector_spacing
self._calc_ray_vectors(angles)
def _calc_ray_vectors(self, angles):
self._ray_vectors = torch.zeros(angles.shape[0], 2)
self._ray_vectors[:, 0] = torch.cos(angles)
self._ray_vectors[:, 1] = torch.sin(angles)
def project_forward(self, volume):
volume = volume.float().cuda().contiguous()
if len(volume.shape) == 3:
volume = volume[:, None, :, :]
if len(volume.shape) != 4:
raise ValueError('4D input expected! [batch, channel (only 1), dim1, dim2]')
elif volume.shape[1] != 1:
raise ValueError('Only channel dimension of 1 is currently supported!')
projs = []
for i, slice in enumerate(volume):
projs.append(_ForwardProjection().apply(slice[0], State(
self._detector_origin,
self._detector_spacing,
self._projection_shape,
self._ray_vectors,
self._volume_origin,
self._volume_shape,
self._volume_spacing
)))
return torch.stack(projs, axis=0)
def project_backward(self, projection):
projection = projection.float().contiguous().cuda()
if len(projection.shape) == 2:
projection = projection[None, ...]
if len(projection.shape) != 3:
raise ValueError('3D input expected! [batch, number_of_views, image_dim]')
volume = torch.zeros(projection.shape[0],
1,
self._volume_shape[0],
self._volume_shape[1],
requires_grad=projection.requires_grad).cuda()
for i, proj in enumerate(projection):
volume[i] = _BackwardProjection().apply(proj, State(
self._detector_origin,
self._detector_spacing,
self._projection_shape,
self._ray_vectors,
self._volume_origin,
self._volume_shape,
self._volume_spacing
))
return volume
| 34.230769 | 106 | 0.602809 |
b95c940baf4f3add10c6d14429354082c6000b6b | 964 | py | Python | freshdesk/v1/tests/test_api_class.py | xtof2all/python-freshdesk | 7753aec9408f1d48542497d0cf192a82758caee6 | [
"BSD-2-Clause"
] | 62 | 2016-03-13T16:05:23.000Z | 2021-12-22T00:35:01.000Z | freshdesk/v1/tests/test_api_class.py | xtof2all/python-freshdesk | 7753aec9408f1d48542497d0cf192a82758caee6 | [
"BSD-2-Clause"
] | 74 | 2015-01-08T09:20:23.000Z | 2022-01-07T15:34:25.000Z | freshdesk/v1/tests/test_api_class.py | xtof2all/python-freshdesk | 7753aec9408f1d48542497d0cf192a82758caee6 | [
"BSD-2-Clause"
] | 65 | 2015-07-06T07:13:41.000Z | 2022-02-16T09:54:07.000Z | import pytest
import responses
from freshdesk.v1.api import API
from freshdesk.v1.tests.conftest import DOMAIN
def test_api_prefix():
api = API("test_domain", "test_key")
assert api._api_prefix == "https://test_domain/"
api = API("test_domain/", "test_key")
assert api._api_prefix == "https://test_domain/"
@responses.activate
def test_403_error():
responses.add(responses.GET, "https://{}/helpdesk/tickets/1.json".format(DOMAIN), status=403)
api = API(DOMAIN, "invalid_api_key")
from requests.exceptions import HTTPError
with pytest.raises(HTTPError):
api.tickets.get_ticket(1)
@responses.activate
def test_404_error():
DOMAIN_404 = "google.com"
responses.add(responses.GET, "https://{}/helpdesk/tickets/1.json".format(DOMAIN_404), status=404)
api = API(DOMAIN_404, "invalid_api_key")
from requests.exceptions import HTTPError
with pytest.raises(HTTPError):
api.tickets.get_ticket(1)
| 26.777778 | 101 | 0.71473 |
c8b90b50c55faa5bed482de1b1ba0782bc9410b4 | 4,854 | py | Python | samples/RoadStress/sample_3.py | KossBoii/Object-Detection-Metrics | 0536dc6ef2ae65a757962fe68931ddf7199d5f67 | [
"MIT"
] | null | null | null | samples/RoadStress/sample_3.py | KossBoii/Object-Detection-Metrics | 0536dc6ef2ae65a757962fe68931ddf7199d5f67 | [
"MIT"
] | null | null | null | samples/RoadStress/sample_3.py | KossBoii/Object-Detection-Metrics | 0536dc6ef2ae65a757962fe68931ddf7199d5f67 | [
"MIT"
] | null | null | null | import _init_paths
from BoundingBox import BoundingBox
from BoundingBoxes import BoundingBoxes
from Evaluator import *
from utils import *
import argparse
import numpy as np
import cv2
import glob
import os
def get_boxes(dir, allBoundingBoxes, dataset_type, bb_type):
result = allBoundingBoxes.clone()
for file in glob.iglob(dir + "/*.txt"):
img_name = file[-12:-4]
with open(file, "r") as f:
for line in f:
line = line.replace("\n", "")
vals = line.split(" ")
assert(bb_type=="gt" or bb_type=="dt")
if(bb_type == "gt"):
assert(len(vals) == 5)
idClass = vals[0] #class
x1 = float(vals[1])
y1 = float(vals[2])
x2 = float(vals[3])
y2 = float(vals[4])
if(dataset_type == "new"):
bb = BoundingBox(img_name, idClass, x1, y1, x2, y2, CoordinatesType.Absolute, (4864, 3648), BBType.GroundTruth, format=BBFormat.XYX2Y2)
elif(dataset_type == "old"):
bb = BoundingBox(img_name, idClass, x1, y1, x2, y2, CoordinatesType.Absolute, (4000, 3000), BBType.GroundTruth, format=BBFormat.XYX2Y2)
elif(bb_type == "dt"):
assert(len(vals) == 6)
idClass = vals[0] # class
confidence = float(vals[1]) # confidence
x1 = float(vals[2])
y1 = float(vals[3])
x2 = float(vals[4])
y2 = float(vals[5])
if(dataset_type == "new"):
bb = BoundingBox(img_name, idClass, x1, y1, x2, y2, CoordinatesType.Absolute, (4864, 3648), BBType.Detected, confidence, format=BBFormat.XYX2Y2)
elif(dataset_type == "old"):
bb = BoundingBox(img_name, idClass, x1, y1, x2, y2, CoordinatesType.Absolute, (4000, 3000), BBType.Detected, confidence, format=BBFormat.XYX2Y2)
result.addBoundingBox(bb)
return result
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 evaluation for road stress")
parser.add_argument("--gt", required=True, help="path to groundtruth directory")
parser.add_argument("--dt", required=True, help="path to detection directory")
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
print("Arguments: " + str(args))
for d in ["old", "new"]:
dataset_name = "roadstress_" + d + "_val"
print("Dataset: " + dataset_name)
bbox_gt = BoundingBoxes()
bbox_gt = get_boxes(args.gt + dataset_name, bbox_gt, dataset_type=d, bb_type="gt")
for i in np.arange(0.05, 0.95, 0.05):
i = i.round(decimals=2)
print("Threshold: " + str(i))
all_bbox = get_boxes(args.dt + dataset_name + "/threshold_%.2f" % i,
bbox_gt, dataset_type=d, bb_type="dt")
save_path = "./plot/%s/%s/threshold_%.2f/" % (dataset_name, args.dt[13:-1], i)
os.makedirs(save_path, exist_ok=True)
evaluator = Evaluator()
evaluator.PlotPrecisionRecallCurve(
all_bbox, # Object containing all bounding boxes (ground truths and detections)
IOUThreshold=i, # IOU threshold
method=MethodAveragePrecision.ElevenPointInterpolation, # As the official matlab code
showAP=True, # Show Average Precision in the title of the plot
showInterpolatedPrecision=True,
savePath=save_path) # Plot the interpolated precision curve
# Get metrics with PASCAL VOC metrics
metricsPerClass = evaluator.GetPascalVOCMetrics(
all_bbox, # Object containing all bounding boxes (ground truths and detections)
IOUThreshold=i, # IOU threshold
method=MethodAveragePrecision.ElevenPointInterpolation) # As the official matlab code
print("Average precision values per class:")
# Loop through classes to obtain their metrics
for mc in metricsPerClass:
# Get metric values per each class
c = mc['class']
precision = mc['precision']
recall = mc['recall']
average_precision = mc['AP']
ipre = mc['interpolated precision']
irec = mc['interpolated recall']
# Print AP per class
# print("%s: %s" % ("precision", str(precision)))
# print("%s: %s" % ("recall", str(recall)))
print("%s: %f" % ("AP", average_precision))
# print('%s: %f' % (c, average_precision))
| 44.53211 | 168 | 0.555624 |
f2ae161af10c9b62f524f0d6bf1f4beb833047d8 | 1,163 | py | Python | test.py | 3KUdelta/Orange-Pi-H2-H3-Hardware-PWM-Fan | ee497b8bb7517afc1a7e5278594a3d550b532635 | [
"MIT"
] | null | null | null | test.py | 3KUdelta/Orange-Pi-H2-H3-Hardware-PWM-Fan | ee497b8bb7517afc1a7e5278594a3d550b532635 | [
"MIT"
] | null | null | null | test.py | 3KUdelta/Orange-Pi-H2-H3-Hardware-PWM-Fan | ee497b8bb7517afc1a7e5278594a3d550b532635 | [
"MIT"
] | null | null | null | import OPi.GPIO as GPIO
if __name__ == "__main__":
PWM_chip = 0
PWM_pin = 0
frequency_Hz = 3800
Duty_Cycle_Percent = 100
p = GPIO.PWM(PWM_chip, PWM_pin, frequency_Hz, Duty_Cycle_Percent) # new PWM on channel=LED_gpio frequency=38KHz
print("turn on pwm by pressing button")
input()
p.start_pwm()
print("dimm pwm by pressing button")
input()
p.duty_cycle(50)
print("change pwm frequency by pressing button")
input()
p.change_frequency(500)
print("stop pwm by reducing duty cycle to 0 by pressing button")
input()
p.stop_pwm()
print("change polarity by pressing button")
input()
p.pwm_polarity()
print("increase duty cycle but inverted so light will dim. press button to contunue")
input()
p.duty_cycle(75)
print("duty cycle reduced press button to contunue")
input()
p.duty_cycle(25)
print("stop pwm (it was inverted so it shoudl be full brightness), press button to contunue")
input()
p.stop_pwm()
print("remove object and deactivate pwm pin, press button to contunue")
input()
p.pwm_close()
del p # delete the class | 24.744681 | 118 | 0.66638 |
bc327ea74e770ba02da97556f3d1991368fda00f | 1,155 | py | Python | talk_resources/example_scansion.py | krimkus/pyambic-pentameter | f2355f78a9c073a27acaff23398da2511ac95a42 | [
"MIT"
] | 14 | 2020-01-09T03:19:18.000Z | 2021-10-02T00:34:14.000Z | talk_resources/example_scansion.py | krimkus/pyambic-pentameter | f2355f78a9c073a27acaff23398da2511ac95a42 | [
"MIT"
] | 7 | 2020-01-15T21:24:24.000Z | 2021-09-19T10:05:55.000Z | talk_resources/example_scansion.py | krimkus/pyambic-pentameter | f2355f78a9c073a27acaff23398da2511ac95a42 | [
"MIT"
] | 7 | 2020-01-15T18:25:35.000Z | 2020-05-25T17:04:43.000Z | from generate.syllables import syllable_fingerprint, scansion_matches
from generate.generator import PoemMaker
import random
pm = PoemMaker()
pm.setup()
d, rev_d, seeds = pm.text_sources['poem']
def valid(words, desired_meter):
fps = ''.join([syllable_fingerprint(word) for word in words])
return len(fps) <= len(desired_meter) and scansion_matches(fps, desired_meter[:len(fps)])
def all_done(words, pattern):
fps = [syllable_fingerprint(word) for word in words]
return scansion_matches(''.join(fps), pattern)
def generate_scansion_with_backtrack(d, pattern, words):
if all_done(words, pattern):
print(' '.join(words))
return words
if not valid(words, pattern):
print(' '.join(words), 'X')
return None
print(' '.join(words))
if not words:
options = list(d.keys())
else:
last_word = words[-1]
options = d[last_word]
options = list(set(options))
random.shuffle(options)
for option in options:
result = generate_scansion_with_backtrack(d, pattern, words + [option])
if result is not None:
return result
import random
| 27.5 | 93 | 0.669264 |
d4d27a6be51a16c9885ccaa13be5ef9dbaa568c8 | 4,702 | py | Python | onmt/ape/utils/report_manager.py | aeceou/OpenNAPE-py | 9ee9126f85be40b3e83c58ebd58ea56aeaa06a4d | [
"MIT"
] | 1 | 2021-09-17T05:33:37.000Z | 2021-09-17T05:33:37.000Z | onmt/ape/utils/report_manager.py | aeceou/OpenNAPE-py | 9ee9126f85be40b3e83c58ebd58ea56aeaa06a4d | [
"MIT"
] | null | null | null | onmt/ape/utils/report_manager.py | aeceou/OpenNAPE-py | 9ee9126f85be40b3e83c58ebd58ea56aeaa06a4d | [
"MIT"
] | null | null | null | """ Report manager utility """
from __future__ import print_function
import time
from datetime import datetime
import onmt
from onmt.utils.logging import logger
from onmt.ape.utils.statistics import StatisticsForAPE
from onmt.utils.report_manager import ReportMgrBase
def build_report_manager(opt, gpu_rank):
if opt.tensorboard and gpu_rank == 0:
from torch.utils.tensorboard import SummaryWriter
tensorboard_log_dir = opt.tensorboard_log_dir
if not opt.train_from:
tensorboard_log_dir += datetime.now().strftime("/%b-%d_%H-%M-%S")
writer = SummaryWriter(tensorboard_log_dir, comment="Unmt")
else:
writer = None
report_mgr = ReportMgr(opt.report_every, start_time=-1,
tensorboard_writer=writer)
return report_mgr
class ReportMgrBaseForAPE(ReportMgrBase):
"""
Report Manager Base class
Inherited classes should override:
* `_report_training`
* `_report_step`
"""
def __init__(self, report_every, start_time=-1.):
"""
Args:
report_every(int): Report status every this many sentences
start_time(float): manually set report start time. Negative values
means that you will need to set it later or use `start()`
"""
super().__init__(report_every, start_time)
def report_training(self, step, num_steps, learning_rate,
report_stats, multigpu=False):
"""
This is the user-defined batch-level traing progress
report function.
Args:
step(int): current step count.
num_steps(int): total number of batches.
learning_rate(float): current learning rate.
report_stats(StatisticsForAPE): old StatisticsForAPE instance.
Returns:
report_stats(StatisticsForAPE): updated StatisticsForAPE instance.
"""
if self.start_time < 0:
raise ValueError("""ReportMgr needs to be started
(set 'start_time' or use 'start()'""")
if step % self.report_every == 0:
if multigpu:
report_stats = \
StatisticsForAPE.all_gather_stats(report_stats)
self._report_training(
step, num_steps, learning_rate, report_stats)
return StatisticsForAPE()
else:
return report_stats
class ReportMgr(ReportMgrBaseForAPE):
def __init__(self, report_every, start_time=-1., tensorboard_writer=None):
"""
A report manager that writes statistics on standard output as well as
(optionally) TensorBoard
Args:
report_every(int): Report status every this many sentences
tensorboard_writer(:obj:`tensorboard.SummaryWriter`):
The TensorBoard Summary writer to use or None
"""
super(ReportMgr, self).__init__(report_every, start_time)
self.tensorboard_writer = tensorboard_writer
def maybe_log_tensorboard(self, stats, prefix, learning_rate, step):
if self.tensorboard_writer is not None:
stats.log_tensorboard(
prefix, self.tensorboard_writer, learning_rate, step)
def _report_training(self, step, num_steps, learning_rate,
report_stats):
"""
See base class method `ReportMgrBase.report_training`.
"""
report_stats.output(step, num_steps,
learning_rate, self.start_time)
self.maybe_log_tensorboard(report_stats,
"progress",
learning_rate,
step)
report_stats = StatisticsForAPE()
return report_stats
def _report_step(self, lr, step, train_stats=None, valid_stats=None):
"""
See base class method `ReportMgrBase.report_step`.
"""
if train_stats is not None:
self.log('Train perplexity: %g' % train_stats.ppl())
self.log('Train accuracy: %g' % train_stats.accuracy())
self.maybe_log_tensorboard(train_stats,
"train",
lr,
step)
if valid_stats is not None:
self.log('Validation perplexity: %g' % valid_stats.ppl())
self.log('Validation accuracy: %g' % valid_stats.accuracy())
self.maybe_log_tensorboard(valid_stats,
"valid",
lr,
step)
| 35.621212 | 78 | 0.587835 |
33772da7966801ce75bdeabde9e7f4a53635cd25 | 743 | py | Python | blog/migrations/0001_initial.py | eadpearce/blog-archiver | a99a24dd7d5c2dcaf08f7b3973853a2049f5c991 | [
"CNRI-Python"
] | null | null | null | blog/migrations/0001_initial.py | eadpearce/blog-archiver | a99a24dd7d5c2dcaf08f7b3973853a2049f5c991 | [
"CNRI-Python"
] | null | null | null | blog/migrations/0001_initial.py | eadpearce/blog-archiver | a99a24dd7d5c2dcaf08f7b3973853a2049f5c991 | [
"CNRI-Python"
] | null | null | null | # Generated by Django 3.1.4 on 2020-12-03 13:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('avatar', models.JSONField()),
('description', models.TextField()),
('name', models.CharField(max_length=100)),
('title', models.TextField()),
('total_posts', models.IntegerField()),
('url', models.CharField(max_length=100)),
('uuid', models.CharField(editable=False, max_length=100, primary_key=True, serialize=False)),
],
),
]
| 27.518519 | 110 | 0.54105 |
b3779909f5a04cd092e6f028decd1ea1bd5c9ebe | 2,075 | py | Python | api/itchatemail.py | strongit/NewhostInit | 7684878186cec6063e96fe2bf8caabbd85b518ee | [
"Apache-2.0"
] | 2 | 2017-03-02T06:40:39.000Z | 2019-02-15T11:28:31.000Z | api/itchatemail.py | strongit/NewhostInit | 7684878186cec6063e96fe2bf8caabbd85b518ee | [
"Apache-2.0"
] | 1 | 2016-07-25T02:19:50.000Z | 2016-07-25T02:55:17.000Z | api/itchatemail.py | strongit/NewhostInit | 7684878186cec6063e96fe2bf8caabbd85b518ee | [
"Apache-2.0"
] | 2 | 2017-02-28T13:06:56.000Z | 2018-07-02T12:40:51.000Z | # -*- coding:utf-8 -*-
"""
https://github.com/littlecodersh/ItChat
根据itchat编写微信报警机器人
"""
import itchat
import email
import imaplib
from email.parser import Parser
from HTMLParser import HTMLParser
reload(sys)
sys.setdefaultencoding('utf8')
imapserver = "your imap and port"
username = "your username"
password = "your passwd"
itchat.auto_login(hotReload=True)
admin = itchat.search_friends(name=u'大叔')
admin = admin[0]['UserName']
def getEmail():
conn = imaplib.IMAP4(imapserver,143)
conn.login(username, password)
conn.select("INBOX")
state, data = conn.search(None, 'ALL')
elist = data[0].split()
state,data=conn.fetch(elist[len(elist)-1],'(RFC822)')
msg=email.message_from_string(data[0][1])
msg_content=msg.get_payload(decode=True)
return msg_content
class MyHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.flag = False
self.list = []
def handle_starttag(self, tag, attrs):
if tag == 'td':
for (variable, value) in attrs:
if variable == 'style':
self.flag = True
def handle_data(self,data):
if self.flag:
self.list.append(data)
def alarmInfo():
getemail = getEmail()
parser = MyHTMLParser()
parser.feed(getemail)
parlist = parser.list
emailtext = ''.join(parlist).replace("\t","").replace("\r\n","").replace(" ","")
emailtext = emailtext.decode('utf-8')\
.replace('攻击事件报告',' 攻击事件报告如下:')\
.replace('事件概况:','\n事件概况:\n')\
.replace('事件解释:', '\n事件解释:\n')\
.replace('安全处置建议', '\n安全处置建议')\
.replace('1、临时处理方案:', '\n1、临时处理方案')\
.replace('2、漏洞处理', '\n2、漏洞处理')\
.replace('3、事件动态追踪', '\n3、事件动态追踪')
return emailtext
@itchat.msg_register(itchat.content.TEXT)
def reply_text(msg):
if u'报警邮件' in msg['Text']:
itchat.send_msg(alarmInfo, toUserName=admin)
if __name__ == "__main__":
alarmInfo = alarmInfo()
while 1:
time.sleep(60*60*60)
itchat.send_msg(alarmInfo, toUserName=admin)
itchat.run()
| 27.302632 | 84 | 0.631325 |
fce68563ebf79188dcdced2458866b1760856efa | 66,036 | py | Python | sabnzbd/api.py | smilers/sabnzbd | ed0e5bbf9b963113f4962e7aec297266f19d6615 | [
"MIT",
"PSF-2.0",
"0BSD"
] | 2 | 2022-01-04T18:26:12.000Z | 2022-01-04T18:26:13.000Z | sabnzbd/api.py | smilers/sabnzbd | ed0e5bbf9b963113f4962e7aec297266f19d6615 | [
"MIT",
"PSF-2.0",
"0BSD"
] | 19 | 2022-02-08T12:23:06.000Z | 2022-03-27T00:21:00.000Z | sabnzbd/api.py | smilers/sabnzbd | ed0e5bbf9b963113f4962e7aec297266f19d6615 | [
"MIT",
"PSF-2.0",
"0BSD"
] | null | null | null | #!/usr/bin/python3 -OO
# Copyright 2007-2021 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.api - api
"""
import os
import logging
import re
import gc
import datetime
import time
import json
import getpass
import cherrypy
from threading import Thread
from typing import Tuple, Optional, List, Dict, Any
import sabnzbd
from sabnzbd.constants import (
VALID_ARCHIVES,
VALID_NZB_FILES,
Status,
FORCE_PRIORITY,
NORMAL_PRIORITY,
INTERFACE_PRIORITIES,
KIBI,
MEBI,
GIGI,
)
import sabnzbd.config as config
import sabnzbd.cfg as cfg
from sabnzbd.skintext import SKIN_TEXT
from sabnzbd.utils.diskspeed import diskspeedmeasure
from sabnzbd.utils.internetspeed import internetspeed
from sabnzbd.utils.pathbrowser import folders_at_path
from sabnzbd.utils.getperformance import getcpu, getpystone
from sabnzbd.misc import (
loadavg,
to_units,
int_conv,
time_format,
cat_convert,
create_https_certificates,
calc_age,
opts_to_pp,
)
from sabnzbd.filesystem import diskspace, get_ext, clip_path, remove_all, list_scripts
from sabnzbd.encoding import xml_name, utob
from sabnzbd.utils.servertests import test_nntp_server_dict
from sabnzbd.getipaddress import localipv4, publicipv4, ipv6, addresslookup, active_socks5_proxy
from sabnzbd.database import build_history_info, unpack_history_info, HistoryDB
from sabnzbd.lang import is_rtl
import sabnzbd.notifier
import sabnzbd.emailer
import sabnzbd.sorting
##############################################################################
# API error messages
##############################################################################
_MSG_NO_VALUE = "expects one parameter"
_MSG_NO_VALUE2 = "expects two parameters"
_MSG_INT_VALUE = "expects integer value"
_MSG_NO_ITEM = "item does not exist"
_MSG_NOT_IMPLEMENTED = "not implemented"
_MSG_NO_FILE = "no file given"
_MSG_NO_PATH = "file does not exist"
_MSG_OUTPUT_FORMAT = "Format not supported"
_MSG_NO_SUCH_CONFIG = "Config item does not exist"
_MSG_CONFIG_LOCKED = "Configuration locked"
_MSG_BAD_SERVER_PARMS = "Incorrect server settings"
def api_handler(kwargs: Dict[str, Any]):
"""API Dispatcher"""
# Clean-up the arguments
for vr in ("mode", "name", "value", "value2", "value3", "start", "limit", "search"):
if vr in kwargs and isinstance(kwargs[vr], list):
kwargs[vr] = kwargs[vr][0]
mode = kwargs.get("mode", "")
name = kwargs.get("name", "")
response = _api_table.get(mode, (_api_undefined, 2))[0](name, kwargs)
return response
def _api_get_config(name, kwargs):
"""API: accepts keyword, section"""
_, data = config.get_dconfig(kwargs.get("section"), kwargs.get("keyword"))
return report(keyword="config", data=data)
def _api_set_config(name, kwargs):
"""API: accepts keyword, section"""
if cfg.configlock():
return report(_MSG_CONFIG_LOCKED)
if kwargs.get("section") == "servers":
kwargs["keyword"] = handle_server_api(kwargs)
elif kwargs.get("section") == "rss":
kwargs["keyword"] = handle_rss_api(kwargs)
elif kwargs.get("section") == "categories":
kwargs["keyword"] = handle_cat_api(kwargs)
else:
res = config.set_config(kwargs)
if not res:
return report(_MSG_NO_SUCH_CONFIG)
config.save_config()
res, data = config.get_dconfig(kwargs.get("section"), kwargs.get("keyword"))
return report(keyword="config", data=data)
def _api_set_config_default(name, kwargs):
"""API: Reset requested config variables back to defaults. Currently only for misc-section"""
if cfg.configlock():
return report(_MSG_CONFIG_LOCKED)
keywords = kwargs.get("keyword", [])
if not isinstance(keywords, list):
keywords = [keywords]
for keyword in keywords:
item = config.get_config("misc", keyword)
if item:
item.set(item.default())
config.save_config()
return report()
def _api_del_config(name, kwargs):
"""API: accepts keyword, section"""
if cfg.configlock():
return report(_MSG_CONFIG_LOCKED)
if del_from_section(kwargs):
return report()
else:
return report(_MSG_NOT_IMPLEMENTED)
def _api_queue(name, kwargs):
"""API: Dispatcher for mode=queue"""
value = kwargs.get("value", "")
return _api_queue_table.get(name, (_api_queue_default, 2))[0](value, kwargs)
def _api_queue_delete(value, kwargs):
"""API: accepts value"""
if value.lower() == "all":
removed = sabnzbd.NzbQueue.remove_all(kwargs.get("search"))
return report(keyword="", data={"status": bool(removed), "nzo_ids": removed})
elif value:
items = value.split(",")
delete_all_data = int_conv(kwargs.get("del_files"))
removed = sabnzbd.NzbQueue.remove_multiple(items, delete_all_data=delete_all_data)
return report(keyword="", data={"status": bool(removed), "nzo_ids": removed})
else:
return report(_MSG_NO_VALUE)
def _api_queue_delete_nzf(value, kwargs):
"""API: accepts value(=nzo_id), value2(=nzf_ids)"""
nzf_ids = kwargs.get("value2")
if value and nzf_ids:
nzf_ids = nzf_ids.split(",")
removed = sabnzbd.NzbQueue.remove_nzfs(value, nzf_ids)
return report(keyword="", data={"status": bool(removed), "nzf_ids": removed})
else:
return report(_MSG_NO_VALUE2)
def _api_queue_rename(value, kwargs):
"""API: accepts value(=old name), value2(=new name), value3(=password)"""
value2 = kwargs.get("value2")
value3 = kwargs.get("value3")
if value and value2:
ret = sabnzbd.NzbQueue.change_name(value, value2, value3)
return report(keyword="", data={"status": ret})
else:
return report(_MSG_NO_VALUE2)
def _api_queue_change_complete_action(value, kwargs):
"""API: accepts value(=action)"""
sabnzbd.change_queue_complete_action(value)
return report()
def _api_queue_purge(value, kwargs):
removed = sabnzbd.NzbQueue.remove_all(kwargs.get("search"))
return report(keyword="", data={"status": bool(removed), "nzo_ids": removed})
def _api_queue_pause(value, kwargs):
"""API: accepts value(=list of nzo_id)"""
if value:
items = value.split(",")
handled = sabnzbd.NzbQueue.pause_multiple_nzo(items)
else:
handled = False
return report(keyword="", data={"status": bool(handled), "nzo_ids": handled})
def _api_queue_resume(value, kwargs):
"""API: accepts value(=list of nzo_id)"""
if value:
items = value.split(",")
handled = sabnzbd.NzbQueue.resume_multiple_nzo(items)
else:
handled = False
return report(keyword="", data={"status": bool(handled), "nzo_ids": handled})
def _api_queue_priority(value, kwargs):
"""API: accepts value(=nzo_id), value2(=priority)"""
value2 = kwargs.get("value2")
if value and value2:
try:
try:
priority = int(value2)
except:
return report(_MSG_INT_VALUE)
pos = sabnzbd.NzbQueue.set_priority(value, priority)
# Returns the position in the queue, -1 is incorrect job-id
return report(keyword="position", data=pos)
except:
return report(_MSG_NO_VALUE2)
else:
return report(_MSG_NO_VALUE2)
def _api_queue_sort(value, kwargs):
"""API: accepts sort, dir"""
sort = kwargs.get("sort", "")
direction = kwargs.get("dir", "")
if sort:
sabnzbd.NzbQueue.sort_queue(sort, direction)
return report()
else:
return report(_MSG_NO_VALUE2)
def _api_queue_default(value, kwargs):
"""API: accepts sort, dir, start, limit"""
start = int_conv(kwargs.get("start"))
limit = int_conv(kwargs.get("limit"))
search = kwargs.get("search")
nzo_ids = kwargs.get("nzo_ids")
if nzo_ids and not isinstance(nzo_ids, list):
nzo_ids = nzo_ids.split(",")
return report(keyword="queue", data=build_queue(start=start, limit=limit, search=search, nzo_ids=nzo_ids))
def _api_queue_rating(value, kwargs):
"""API: accepts value(=nzo_id), type, setting, detail"""
vote_map = {"up": sabnzbd.Rating.VOTE_UP, "down": sabnzbd.Rating.VOTE_DOWN}
flag_map = {
"spam": sabnzbd.Rating.FLAG_SPAM,
"encrypted": sabnzbd.Rating.FLAG_ENCRYPTED,
"expired": sabnzbd.Rating.FLAG_EXPIRED,
"other": sabnzbd.Rating.FLAG_OTHER,
"comment": sabnzbd.Rating.FLAG_COMMENT,
}
content_type = kwargs.get("type")
setting = kwargs.get("setting")
if value:
try:
video = audio = vote = flag = None
if content_type == "video" and setting != "-":
video = setting
if content_type == "audio" and setting != "-":
audio = setting
if content_type == "vote":
vote = vote_map[setting]
if content_type == "flag":
flag = flag_map[setting]
if cfg.rating_enable():
sabnzbd.Rating.update_user_rating(value, video, audio, vote, flag, kwargs.get("detail"))
return report()
except:
return report(_MSG_BAD_SERVER_PARMS)
else:
return report(_MSG_NO_VALUE)
def _api_options(name, kwargs):
return report(
keyword="options",
data={
"sabyenc": sabnzbd.decoder.SABYENC_ENABLED,
"par2": sabnzbd.newsunpack.PAR2_COMMAND,
"multipar": sabnzbd.newsunpack.MULTIPAR_COMMAND,
"rar": sabnzbd.newsunpack.RAR_COMMAND,
"zip": sabnzbd.newsunpack.ZIP_COMMAND,
"7zip": sabnzbd.newsunpack.SEVEN_COMMAND,
"nice": sabnzbd.newsunpack.NICE_COMMAND,
"ionice": sabnzbd.newsunpack.IONICE_COMMAND,
},
)
def _api_translate(name, kwargs):
"""API: accepts value(=acronym)"""
return report(keyword="value", data=T(kwargs.get("value", "")))
def _api_addfile(name, kwargs):
"""API: accepts name, pp, script, cat, priority, nzbname"""
# Normal upload will send the nzb in a kw arg called name or nzbfile
if not name or isinstance(name, str):
name = kwargs.get("nzbfile", None)
if hasattr(name, "file") and hasattr(name, "filename") and name.filename:
cat = kwargs.get("cat")
xcat = kwargs.get("xcat")
if not cat and xcat:
# Indexer category, so do mapping
cat = cat_convert(xcat)
# Add the NZB-file
res, nzo_ids = sabnzbd.add_nzbfile(
name,
pp=kwargs.get("pp"),
script=kwargs.get("script"),
cat=cat,
priority=kwargs.get("priority"),
nzbname=kwargs.get("nzbname"),
password=kwargs.get("password"),
)
return report(keyword="", data={"status": res == 0, "nzo_ids": nzo_ids})
else:
return report(_MSG_NO_VALUE)
def _api_retry(name, kwargs):
"""API: accepts name, value(=nzo_id), nzbfile(=optional NZB), password (optional)"""
value = kwargs.get("value")
# Normal upload will send the nzb in a kw arg called nzbfile
if name is None or isinstance(name, str):
name = kwargs.get("nzbfile")
password = kwargs.get("password")
password = password[0] if isinstance(password, list) else password
nzo_id = retry_job(value, name, password)
if nzo_id:
return report(keyword="", data={"status": True, "nzo_id": nzo_id})
else:
return report(_MSG_NO_ITEM)
def _api_cancel_pp(name, kwargs):
"""API: accepts name, value(=nzo_id)"""
nzo_id = kwargs.get("value")
if sabnzbd.PostProcessor.cancel_pp(nzo_id):
return report(keyword="", data={"status": True, "nzo_id": nzo_id})
else:
return report(_MSG_NO_ITEM)
def _api_addlocalfile(name, kwargs):
"""API: accepts name, pp, script, cat, priority, nzbname"""
if name:
if os.path.exists(name):
pp = kwargs.get("pp")
script = kwargs.get("script")
cat = kwargs.get("cat")
xcat = kwargs.get("xcat")
if not cat and xcat:
# Indexer category, so do mapping
cat = cat_convert(xcat)
priority = kwargs.get("priority")
nzbname = kwargs.get("nzbname")
password = kwargs.get("password")
if get_ext(name) in VALID_ARCHIVES + VALID_NZB_FILES:
res, nzo_ids = sabnzbd.add_nzbfile(
name,
pp=pp,
script=script,
cat=cat,
priority=priority,
keep=True,
nzbname=nzbname,
password=password,
)
return report(keyword="", data={"status": res == 0, "nzo_ids": nzo_ids})
else:
logging.info('API-call addlocalfile: "%s" is not a supported file', name)
return report(_MSG_NO_FILE)
else:
logging.info('API-call addlocalfile: file "%s" not found', name)
return report(_MSG_NO_PATH)
else:
logging.info("API-call addlocalfile: no file name given")
return report(_MSG_NO_VALUE)
def _api_switch(name, kwargs):
"""API: accepts value(=first id), value2(=second id)"""
value = kwargs.get("value")
value2 = kwargs.get("value2")
if value and value2:
pos, prio = sabnzbd.NzbQueue.switch(value, value2)
# Returns the new position and new priority (if different)
return report(keyword="result", data={"position": pos, "priority": prio})
else:
return report(_MSG_NO_VALUE2)
def _api_change_cat(name, kwargs):
"""API: accepts value(=nzo_id), value2(=category)"""
value = kwargs.get("value")
value2 = kwargs.get("value2")
if value and value2:
nzo_id = value
cat = value2
if cat == "None":
cat = None
result = sabnzbd.NzbQueue.change_cat(nzo_id, cat)
return report(keyword="status", data=bool(result > 0))
else:
return report(_MSG_NO_VALUE)
def _api_change_script(name, kwargs):
"""API: accepts value(=nzo_id), value2(=script)"""
value = kwargs.get("value")
value2 = kwargs.get("value2")
if value and value2:
nzo_id = value
script = value2
if script.lower() == "none":
script = None
result = sabnzbd.NzbQueue.change_script(nzo_id, script)
return report(keyword="status", data=bool(result > 0))
else:
return report(_MSG_NO_VALUE)
def _api_change_opts(name, kwargs):
"""API: accepts value(=nzo_id), value2(=pp)"""
value = kwargs.get("value")
value2 = kwargs.get("value2")
result = 0
if value and value2 and value2.isdigit():
result = sabnzbd.NzbQueue.change_opts(value, int(value2))
return report(keyword="status", data=bool(result > 0))
def _api_fullstatus(name, kwargs):
"""API: full history status"""
status = build_status(
calculate_performance=kwargs.get("calculate_performance", 0), skip_dashboard=kwargs.get("skip_dashboard", 1)
)
return report(keyword="status", data=status)
def _api_status(name, kwargs):
"""API: Dispatcher for mode=status, passing on the value"""
value = kwargs.get("value", "")
return _api_status_table.get(name, (_api_fullstatus, 2))[0](value, kwargs)
def _api_unblock_server(value, kwargs):
"""Unblock a blocked server"""
sabnzbd.Downloader.unblock(value)
return report()
def _api_delete_orphan(path, kwargs):
"""Remove orphaned job"""
if path:
path = os.path.join(cfg.download_dir.get_path(), path)
logging.info("Removing orphaned job %s", path)
remove_all(path, recursive=True)
return report()
else:
return report(_MSG_NO_ITEM)
def _api_delete_all_orphan(value, kwargs):
"""Remove all orphaned jobs"""
paths = sabnzbd.NzbQueue.scan_jobs(all_jobs=False, action=False)
for path in paths:
_api_delete_orphan(path, kwargs)
return report()
def _api_add_orphan(path, kwargs):
"""Add orphaned job"""
if path:
path = os.path.join(cfg.download_dir.get_path(), path)
logging.info("Re-adding orphaned job %s", path)
sabnzbd.NzbQueue.repair_job(path, None, None)
return report()
else:
return report(_MSG_NO_ITEM)
def _api_add_all_orphan(value, kwargs):
"""Add all orphaned jobs"""
paths = sabnzbd.NzbQueue.scan_jobs(all_jobs=False, action=False)
for path in paths:
_api_add_orphan(path, kwargs)
return report()
def _api_history(name, kwargs):
"""API: accepts value(=nzo_id), start, limit, search, nzo_ids"""
value = kwargs.get("value", "")
start = int_conv(kwargs.get("start"))
limit = int_conv(kwargs.get("limit"))
last_history_update = int_conv(kwargs.get("last_history_update", 0))
search = kwargs.get("search")
failed_only = int_conv(kwargs.get("failed_only"))
categories = kwargs.get("category")
nzo_ids = kwargs.get("nzo_ids")
# Do we need to send anything?
if last_history_update == sabnzbd.LAST_HISTORY_UPDATE:
return report(keyword="history", data=False)
if categories and not isinstance(categories, list):
categories = [categories]
if nzo_ids and not isinstance(nzo_ids, list):
nzo_ids = nzo_ids.split(",")
if not limit:
limit = cfg.history_limit()
if name == "delete":
special = value.lower()
del_files = bool(int_conv(kwargs.get("del_files")))
if special in ("all", "failed", "completed"):
history_db = sabnzbd.get_db_connection()
if special in ("all", "failed"):
if del_files:
del_job_files(history_db.get_failed_paths(search))
history_db.remove_failed(search)
if special in ("all", "completed"):
history_db.remove_completed(search)
sabnzbd.history_updated()
return report()
elif value:
jobs = value.split(",")
for job in jobs:
path = sabnzbd.PostProcessor.get_path(job)
if path:
sabnzbd.PostProcessor.delete(job, del_files=del_files)
else:
history_db = sabnzbd.get_db_connection()
remove_all(history_db.get_path(job), recursive=True)
history_db.remove_history(job)
sabnzbd.history_updated()
return report()
else:
return report(_MSG_NO_VALUE)
elif not name:
history = {}
grand, month, week, day = sabnzbd.BPSMeter.get_sums()
history["total_size"], history["month_size"], history["week_size"], history["day_size"] = (
to_units(grand),
to_units(month),
to_units(week),
to_units(day),
)
history["slots"], history["noofslots"] = build_history(
start=start, limit=limit, search=search, failed_only=failed_only, categories=categories, nzo_ids=nzo_ids
)
history["last_history_update"] = sabnzbd.LAST_HISTORY_UPDATE
history["version"] = sabnzbd.__version__
return report(keyword="history", data=history)
else:
return report(_MSG_NOT_IMPLEMENTED)
def _api_get_files(name, kwargs):
"""API: accepts value(=nzo_id)"""
value = kwargs.get("value")
if value:
return report(keyword="files", data=build_file_list(value))
else:
return report(_MSG_NO_VALUE)
def _api_move_nzf_bulk(name, kwargs):
"""API: accepts name(=top/up/down/bottom), value=(=nzo_id), nzf_ids, size (optional)"""
nzo_id = kwargs.get("value")
nzf_ids = kwargs.get("nzf_ids")
size = int_conv(kwargs.get("size"))
if nzo_id and nzf_ids and name:
name = name.lower()
nzf_ids = nzf_ids.split(",")
nzf_moved = False
if name == "up" and size:
sabnzbd.NzbQueue.move_nzf_up_bulk(nzo_id, nzf_ids, size)
nzf_moved = True
elif name == "top":
sabnzbd.NzbQueue.move_nzf_top_bulk(nzo_id, nzf_ids)
nzf_moved = True
elif name == "down" and size:
sabnzbd.NzbQueue.move_nzf_down_bulk(nzo_id, nzf_ids, size)
nzf_moved = True
elif name == "bottom":
sabnzbd.NzbQueue.move_nzf_bottom_bulk(nzo_id, nzf_ids)
nzf_moved = True
if nzf_moved:
return report(keyword="", data={"status": True, "nzf_ids": nzf_ids})
return report(_MSG_NO_VALUE)
def _api_addurl(name, kwargs):
"""API: accepts name, output, pp, script, cat, priority, nzbname"""
pp = kwargs.get("pp")
script = kwargs.get("script")
cat = kwargs.get("cat")
priority = kwargs.get("priority")
nzbname = kwargs.get("nzbname", "")
password = kwargs.get("password", "")
if name:
nzo_id = sabnzbd.add_url(name, pp, script, cat, priority, nzbname, password)
# Reporting a list of NZO's, for compatibility with other add-methods
return report(keyword="", data={"status": True, "nzo_ids": [nzo_id]})
else:
logging.info("API-call addurl: no URLs recieved")
return report(_MSG_NO_VALUE)
def _api_pause(name, kwargs):
sabnzbd.Scheduler.plan_resume(0)
sabnzbd.Downloader.pause()
return report()
def _api_resume(name, kwargs):
sabnzbd.Scheduler.plan_resume(0)
sabnzbd.unpause_all()
return report()
def _api_shutdown(name, kwargs):
sabnzbd.shutdown_program()
return report()
def _api_warnings(name, kwargs):
"""API: accepts name, output"""
if name == "clear":
return report(keyword="warnings", data=sabnzbd.GUIHANDLER.clear())
elif name == "show":
return report(keyword="warnings", data=sabnzbd.GUIHANDLER.content())
elif name:
return report(_MSG_NOT_IMPLEMENTED)
return report(keyword="warnings", data=sabnzbd.GUIHANDLER.content())
LOG_API_RE = re.compile(rb"(apikey|api)([=:])[\w]+", re.I)
LOG_API_JSON_RE = re.compile(rb"'(apikey|api)': '[\w]+'", re.I)
LOG_USER_RE = re.compile(rb"(user|username)\s?=\s?[\S]+", re.I)
LOG_PASS_RE = re.compile(rb"(password)\s?=\s?[\S]+", re.I)
LOG_INI_HIDE_RE = re.compile(
rb"(email_pwd|email_account|email_to|rating_api_key|pushover_token|pushover_userkey|pushbullet_apikey|prowl_apikey|growl_password|growl_server|IPv[4|6] address)\s?=\s?[\S]+",
re.I,
)
LOG_HASH_RE = re.compile(rb"([a-fA-F\d]{25})", re.I)
def _api_showlog(name, kwargs):
"""Fetch the INI and the log-data and add a message at the top"""
log_data = b"--------------------------------\n\n"
log_data += b"The log includes a copy of your sabnzbd.ini with\nall usernames, passwords and API-keys removed."
log_data += b"\n\n--------------------------------\n"
with open(sabnzbd.LOGFILE, "rb") as f:
log_data += f.read()
with open(config.get_filename(), "rb") as f:
log_data += f.read()
# We need to remove all passwords/usernames/api-keys
log_data = LOG_API_RE.sub(b"apikey=<APIKEY>", log_data)
log_data = LOG_API_JSON_RE.sub(b"'apikey':<APIKEY>'", log_data)
log_data = LOG_USER_RE.sub(b"\\g<1>=<USER>", log_data)
log_data = LOG_PASS_RE.sub(b"password=<PASSWORD>", log_data)
log_data = LOG_INI_HIDE_RE.sub(b"\\1 = <REMOVED>", log_data)
log_data = LOG_HASH_RE.sub(b"<HASH>", log_data)
# Try to replace the username
try:
cur_user = getpass.getuser()
if cur_user:
log_data = log_data.replace(utob(cur_user), b"<USERNAME>")
except:
pass
# Set headers
cherrypy.response.headers["Content-Type"] = "application/x-download;charset=utf-8"
cherrypy.response.headers["Content-Disposition"] = 'attachment;filename="sabnzbd.log"'
return log_data
def _api_get_cats(name, kwargs):
return report(keyword="categories", data=list_cats(False))
def _api_get_scripts(name, kwargs):
return report(keyword="scripts", data=list_scripts())
def _api_version(name, kwargs):
return report(keyword="version", data=sabnzbd.__version__)
def _api_auth(name, kwargs):
auth = "None"
if not cfg.disable_key():
auth = "badkey"
key = kwargs.get("key", "")
if not key:
auth = "apikey"
else:
if key == cfg.nzb_key():
auth = "nzbkey"
if key == cfg.api_key():
auth = "apikey"
elif cfg.username() and cfg.password():
auth = "login"
return report(keyword="auth", data=auth)
def _api_restart(name, kwargs):
logging.info("Restart requested by API")
# Do the shutdown async to still send goodbye to browser
Thread(target=sabnzbd.trigger_restart, kwargs={"timeout": 1}).start()
return report()
def _api_restart_repair(name, kwargs):
logging.info("Queue repair requested by API")
sabnzbd.request_repair()
# Do the shutdown async to still send goodbye to browser
Thread(target=sabnzbd.trigger_restart, kwargs={"timeout": 1}).start()
return report()
def _api_disconnect(name, kwargs):
sabnzbd.Downloader.disconnect()
return report()
def _api_osx_icon(name, kwargs):
"""API: accepts value"""
value = kwargs.get("value", "1").strip()
cfg.osx_menu.set(value != "0")
return report()
def _api_rescan(name, kwargs):
sabnzbd.NzbQueue.scan_jobs(all_jobs=False, action=True)
return report()
def _api_eval_sort(name, kwargs):
"""API: evaluate sorting expression"""
name = kwargs.get("name", "")
value = kwargs.get("value", "")
title = kwargs.get("title")
multipart = kwargs.get("movieextra", "")
path = sabnzbd.sorting.eval_sort(value, title, name, multipart)
if path is None:
return report(_MSG_NOT_IMPLEMENTED)
else:
return report(keyword="result", data=path)
def _api_watched_now(name, kwargs):
sabnzbd.DirScanner.scan()
return report()
def _api_resume_pp(name, kwargs):
sabnzbd.PostProcessor.paused = False
return report()
def _api_pause_pp(name, kwargs):
sabnzbd.PostProcessor.paused = True
return report()
def _api_rss_now(name, kwargs):
# Run RSS scan async, because it can take a long time
sabnzbd.Scheduler.force_rss()
return report()
def _api_retry_all(name, kwargs):
"""API: Retry all failed items in History"""
items = sabnzbd.api.build_history()[0]
nzo_ids = []
for item in items:
if item["retry"]:
nzo_ids.append(retry_job(item["nzo_id"]))
return report(keyword="status", data=nzo_ids)
def _api_reset_quota(name, kwargs):
"""Reset quota left"""
sabnzbd.BPSMeter.reset_quota(force=True)
return report()
def _api_test_email(name, kwargs):
"""API: send a test email, return result"""
logging.info("Sending test email")
pack = {"download": ["action 1", "action 2"], "unpack": ["action 1", "action 2"]}
res = sabnzbd.emailer.endjob(
"I had a d\xe8ja vu",
"unknown",
True,
os.path.normpath(os.path.join(cfg.complete_dir.get_path(), "/unknown/I had a d\xe8ja vu")),
123 * MEBI,
None,
pack,
"my_script",
"Line 1\nLine 2\nLine 3\nd\xe8ja vu\n",
0,
test=kwargs,
)
if res == T("Email succeeded"):
return report()
return report(error=res)
def _api_test_windows(name, kwargs):
"""API: send a test to Windows, return result"""
logging.info("Sending test notification")
res = sabnzbd.notifier.send_windows("SABnzbd", T("Test Notification"), "other")
return report(error=res)
def _api_test_notif(name, kwargs):
"""API: send a test to Notification Center, return result"""
logging.info("Sending test notification")
res = sabnzbd.notifier.send_notification_center("SABnzbd", T("Test Notification"), "other")
return report(error=res)
def _api_test_osd(name, kwargs):
"""API: send a test OSD notification, return result"""
logging.info("Sending OSD notification")
res = sabnzbd.notifier.send_notify_osd("SABnzbd", T("Test Notification"))
return report(error=res)
def _api_test_prowl(name, kwargs):
"""API: send a test Prowl notification, return result"""
logging.info("Sending Prowl notification")
res = sabnzbd.notifier.send_prowl("SABnzbd", T("Test Notification"), "other", force=True, test=kwargs)
return report(error=res)
def _api_test_pushover(name, kwargs):
"""API: send a test Pushover notification, return result"""
logging.info("Sending Pushover notification")
res = sabnzbd.notifier.send_pushover("SABnzbd", T("Test Notification"), "other", force=True, test=kwargs)
return report(error=res)
def _api_test_pushbullet(name, kwargs):
"""API: send a test Pushbullet notification, return result"""
logging.info("Sending Pushbullet notification")
res = sabnzbd.notifier.send_pushbullet("SABnzbd", T("Test Notification"), "other", force=True, test=kwargs)
return report(error=res)
def _api_test_nscript(name, kwargs):
"""API: execute a test notification script, return result"""
logging.info("Executing notification script")
res = sabnzbd.notifier.send_nscript("SABnzbd", T("Test Notification"), "other", force=True, test=kwargs)
return report(error=res)
def _api_undefined(name, kwargs):
return report(_MSG_NOT_IMPLEMENTED)
def _api_browse(name, kwargs):
"""Return tree of local path"""
compact = kwargs.get("compact")
if compact and compact == "1":
name = kwargs.get("term", "")
paths = [entry["path"] for entry in folders_at_path(os.path.dirname(name)) if "path" in entry]
return report(keyword="", data=paths)
else:
show_hidden = kwargs.get("show_hidden_folders")
paths = folders_at_path(name, True, show_hidden)
return report(keyword="paths", data=paths)
def _api_config(name, kwargs):
"""API: Dispatcher for "config" """
if cfg.configlock():
return report(_MSG_CONFIG_LOCKED)
return _api_config_table.get(name, (_api_config_undefined, 2))[0](kwargs)
def _api_config_speedlimit(kwargs):
"""API: accepts value(=speed)"""
value = kwargs.get("value")
if not value:
value = "0"
sabnzbd.Downloader.limit_speed(value)
return report()
def _api_config_get_speedlimit(kwargs):
return report(keyword="speedlimit", data=sabnzbd.Downloader.get_limit())
def _api_config_set_colorscheme(kwargs):
value = kwargs.get("value")
if value:
cfg.web_color.set(value)
return report()
else:
return report(_MSG_NO_VALUE)
def _api_config_set_pause(kwargs):
"""API: accepts value(=pause interval)"""
value = kwargs.get("value")
sabnzbd.Scheduler.plan_resume(int_conv(value))
return report()
def _api_config_set_apikey(kwargs):
cfg.api_key.set(config.create_api_key())
config.save_config()
return report(keyword="apikey", data=cfg.api_key())
def _api_config_set_nzbkey(kwargs):
cfg.nzb_key.set(config.create_api_key())
config.save_config()
return report(keyword="nzbkey", data=cfg.nzb_key())
def _api_config_regenerate_certs(kwargs):
# Make sure we only over-write default locations
result = False
if (
sabnzbd.cfg.https_cert() is sabnzbd.cfg.https_cert.default()
and sabnzbd.cfg.https_key() is sabnzbd.cfg.https_key.default()
):
https_cert = sabnzbd.cfg.https_cert.get_path()
https_key = sabnzbd.cfg.https_key.get_path()
result = create_https_certificates(https_cert, https_key)
sabnzbd.RESTART_REQ = True
return report(data=result)
def _api_config_test_server(kwargs):
"""API: accepts server-params"""
result, msg = test_nntp_server_dict(kwargs)
return report(data={"result": result, "message": msg})
def _api_config_undefined(kwargs):
return report(_MSG_NOT_IMPLEMENTED)
def _api_server_stats(name, kwargs):
sum_t, sum_m, sum_w, sum_d = sabnzbd.BPSMeter.get_sums()
stats = {"total": sum_t, "month": sum_m, "week": sum_w, "day": sum_d, "servers": {}}
for svr in config.get_servers():
t, m, w, d, daily, articles_tried, articles_success = sabnzbd.BPSMeter.amounts(svr)
stats["servers"][svr] = {
"total": t,
"month": m,
"week": w,
"day": d,
"daily": daily,
"articles_tried": articles_tried,
"articles_success": articles_success,
}
return report(keyword="", data=stats)
def _api_gc_stats(name, kwargs):
"""Function only intended for internal testing of the memory handling"""
# Collect before we check
gc.collect()
# We cannot create any lists/dicts, as they would create a reference
return report(data=[str(obj) for obj in gc.get_objects() if isinstance(obj, sabnzbd.nzbstuff.TryList)])
##############################################################################
_api_table = {
"server_stats": (_api_server_stats, 2),
"get_config": (_api_get_config, 3),
"set_config": (_api_set_config, 3),
"set_config_default": (_api_set_config_default, 3),
"del_config": (_api_del_config, 3),
"queue": (_api_queue, 2),
"options": (_api_options, 2),
"translate": (_api_translate, 2),
"addfile": (_api_addfile, 1),
"retry": (_api_retry, 2),
"cancel_pp": (_api_cancel_pp, 2),
"addlocalfile": (_api_addlocalfile, 1),
"switch": (_api_switch, 2),
"change_cat": (_api_change_cat, 2),
"change_script": (_api_change_script, 2),
"change_opts": (_api_change_opts, 2),
"fullstatus": (_api_fullstatus, 2),
"status": (_api_status, 2),
"history": (_api_history, 2),
"get_files": (_api_get_files, 2),
"move_nzf_bulk": (_api_move_nzf_bulk, 2),
"addurl": (_api_addurl, 1),
"addid": (_api_addurl, 1),
"pause": (_api_pause, 2),
"resume": (_api_resume, 2),
"shutdown": (_api_shutdown, 3),
"warnings": (_api_warnings, 2),
"showlog": (_api_showlog, 3),
"config": (_api_config, 2),
"get_cats": (_api_get_cats, 2),
"get_scripts": (_api_get_scripts, 2),
"version": (_api_version, 1),
"auth": (_api_auth, 1),
"restart": (_api_restart, 3),
"restart_repair": (_api_restart_repair, 3),
"disconnect": (_api_disconnect, 2),
"osx_icon": (_api_osx_icon, 3),
"gc_stats": (_api_gc_stats, 3),
"rescan": (_api_rescan, 2),
"eval_sort": (_api_eval_sort, 3),
"watched_now": (_api_watched_now, 2),
"resume_pp": (_api_resume_pp, 2),
"pause_pp": (_api_pause_pp, 2),
"rss_now": (_api_rss_now, 2),
"browse": (_api_browse, 3),
"retry_all": (_api_retry_all, 2),
"reset_quota": (_api_reset_quota, 3),
"test_email": (_api_test_email, 3),
"test_windows": (_api_test_windows, 3),
"test_notif": (_api_test_notif, 3),
"test_osd": (_api_test_osd, 3),
"test_pushover": (_api_test_pushover, 3),
"test_pushbullet": (_api_test_pushbullet, 3),
"test_prowl": (_api_test_prowl, 3),
"test_nscript": (_api_test_nscript, 3),
}
_api_queue_table = {
"delete": (_api_queue_delete, 2),
"delete_nzf": (_api_queue_delete_nzf, 2),
"rename": (_api_queue_rename, 2),
"change_complete_action": (_api_queue_change_complete_action, 2),
"purge": (_api_queue_purge, 2),
"pause": (_api_queue_pause, 2),
"resume": (_api_queue_resume, 2),
"priority": (_api_queue_priority, 2),
"sort": (_api_queue_sort, 2),
"rating": (_api_queue_rating, 2),
}
_api_status_table = {
"unblock_server": (_api_unblock_server, 2),
"delete_orphan": (_api_delete_orphan, 2),
"delete_all_orphan": (_api_delete_all_orphan, 2),
"add_orphan": (_api_add_orphan, 2),
"add_all_orphan": (_api_add_all_orphan, 2),
}
_api_config_table = {
"speedlimit": (_api_config_speedlimit, 2),
"set_speedlimit": (_api_config_speedlimit, 2),
"get_speedlimit": (_api_config_get_speedlimit, 2),
"set_pause": (_api_config_set_pause, 2),
"set_colorscheme": (_api_config_set_colorscheme, 3),
"set_apikey": (_api_config_set_apikey, 3),
"set_nzbkey": (_api_config_set_nzbkey, 3),
"regenerate_certs": (_api_config_regenerate_certs, 3),
"test_server": (_api_config_test_server, 3),
}
def api_level(mode: str, name: str) -> int:
"""Return access level required for this API call"""
if mode == "queue" and name in _api_queue_table:
return _api_queue_table[name][1]
if mode == "status" and name in _api_status_table:
return _api_status_table[name][1]
if mode == "config" and name in _api_config_table:
return _api_config_table[name][1]
if mode in _api_table:
return _api_table[mode][1]
# It is invalid if it's none of these, but that's is handled somewhere else
return 4
def report(error: Optional[str] = None, keyword: str = "value", data: Any = None) -> bytes:
"""Report message in json, xml or plain text
If error is set, only an status/error report is made.
If no error and no data, only a status report is made.
Else, a data report is made (optional 'keyword' for outer XML section).
"""
output = cherrypy.request.params.get("output")
if output == "json":
content = "application/json;charset=UTF-8"
if error:
info = {"status": False, "error": error}
elif data is None:
info = {"status": True}
else:
if hasattr(data, "__iter__") and not keyword:
info = data
else:
info = {keyword: data}
response = utob(json.dumps(info))
elif output == "xml":
if not keyword:
# xml always needs an outer keyword, even when json doesn't
keyword = "result"
content = "text/xml"
xmlmaker = XmlOutputFactory()
if error:
status_str = xmlmaker.run("result", {"status": False, "error": error})
elif data is None:
status_str = xmlmaker.run("result", {"status": True})
else:
status_str = xmlmaker.run(keyword, data)
response = '<?xml version="1.0" encoding="UTF-8" ?>\n%s\n' % status_str
else:
content = "text/plain"
if error:
response = "error: %s\n" % error
elif not data:
response = "ok\n"
else:
response = "%s\n" % str(data)
cherrypy.response.headers["Content-Type"] = content
cherrypy.response.headers["Pragma"] = "no-cache"
return response
class XmlOutputFactory:
"""Recursive xml string maker. Feed it a mixed tuple/dict/item object and will output into an xml string
Current limitations:
In Two tiered lists hard-coded name of "item": <cat_list><item> </item></cat_list>
In Three tiered lists hard-coded name of "slot": <tier1><slot><tier2> </tier2></slot></tier1>
"""
def __init__(self):
self.__text = ""
def _tuple(self, keyw, lst):
text = []
for item in lst:
text.append(self.run(keyw, item))
return "".join(text)
def _dict(self, keyw, lst):
text = []
for key in lst.keys():
text.append(self.run(key, lst[key]))
if keyw:
return "<%s>%s</%s>\n" % (keyw, "".join(text), keyw)
else:
return ""
def _list(self, keyw, lst):
text = []
for cat in lst:
if isinstance(cat, dict):
text.append(self._dict(plural_to_single(keyw, "slot"), cat))
elif isinstance(cat, list):
text.append(self._list(plural_to_single(keyw, "list"), cat))
elif isinstance(cat, tuple):
text.append(self._tuple(plural_to_single(keyw, "tuple"), cat))
else:
if not isinstance(cat, str):
cat = str(cat)
name = plural_to_single(keyw, "item")
text.append("<%s>%s</%s>\n" % (name, xml_name(cat), name))
if keyw:
return "<%s>%s</%s>\n" % (keyw, "".join(text), keyw)
else:
return ""
def run(self, keyw, lst):
if isinstance(lst, dict):
text = self._dict(keyw, lst)
elif isinstance(lst, list):
text = self._list(keyw, lst)
elif isinstance(lst, tuple):
text = self._tuple(keyw, lst)
elif keyw:
text = "<%s>%s</%s>\n" % (keyw, xml_name(lst), keyw)
else:
text = ""
return text
def handle_server_api(kwargs):
"""Special handler for API-call 'set_config' [servers]"""
name = kwargs.get("keyword")
if not name:
name = kwargs.get("name")
if name:
server = config.get_config("servers", name)
if server:
server.set_dict(kwargs)
old_name = name
else:
config.ConfigServer(name, kwargs)
old_name = None
sabnzbd.Downloader.update_server(old_name, name)
return name
def handle_rss_api(kwargs):
"""Special handler for API-call 'set_config' [rss]"""
name = kwargs.get("keyword")
if not name:
name = kwargs.get("name")
if not name:
return None
feed = config.get_config("rss", name)
if feed:
feed.set_dict(kwargs)
else:
config.ConfigRSS(name, kwargs)
action = kwargs.get("filter_action")
if action in ("add", "update"):
# Use the general function, but catch the redirect-raise
try:
kwargs["feed"] = name
sabnzbd.interface.ConfigRss("/").internal_upd_rss_filter(**kwargs)
except cherrypy.HTTPRedirect:
pass
elif action == "delete":
# Use the general function, but catch the redirect-raise
try:
kwargs["feed"] = name
sabnzbd.interface.ConfigRss("/").internal_del_rss_filter(**kwargs)
except cherrypy.HTTPRedirect:
pass
return name
def handle_cat_api(kwargs):
"""Special handler for API-call 'set_config' [categories]"""
name = kwargs.get("keyword")
if not name:
name = kwargs.get("name")
if not name:
return None
name = name.lower()
cat = config.get_config("categories", name)
if cat:
cat.set_dict(kwargs)
else:
config.ConfigCat(name, kwargs)
return name
def build_status(calculate_performance: bool = False, skip_dashboard: bool = False) -> Dict[str, Any]:
# build up header full of basic information
info = build_header(trans_functions=False)
info["logfile"] = sabnzbd.LOGFILE
info["weblogfile"] = sabnzbd.WEBLOGFILE
info["loglevel"] = str(cfg.log_level())
info["folders"] = sabnzbd.NzbQueue.scan_jobs(all_jobs=False, action=False)
info["configfn"] = config.get_filename()
info["warnings"] = sabnzbd.GUIHANDLER.content()
# Calculate performance measures, if requested
if int_conv(calculate_performance):
# PyStone
sabnzbd.PYSTONE_SCORE = getpystone()
# Diskspeed of download (aka incomplete) and complete directory:
sabnzbd.DOWNLOAD_DIR_SPEED = round(diskspeedmeasure(sabnzbd.cfg.download_dir.get_path()), 1)
sabnzbd.COMPLETE_DIR_SPEED = round(diskspeedmeasure(sabnzbd.cfg.complete_dir.get_path()), 1)
# Internet bandwidth
sabnzbd.INTERNET_BANDWIDTH = round(internetspeed(), 1)
# Dashboard: Speed of System
info["cpumodel"] = getcpu()
info["pystone"] = sabnzbd.PYSTONE_SCORE
# Dashboard: Speed of Download directory:
info["downloaddir"] = cfg.download_dir.get_clipped_path()
info["downloaddirspeed"] = sabnzbd.DOWNLOAD_DIR_SPEED
# Dashboard: Speed of Complete directory:
info["completedir"] = cfg.complete_dir.get_clipped_path()
info["completedirspeed"] = sabnzbd.COMPLETE_DIR_SPEED
# Dashboard: Measured download-speed
info["internetbandwidth"] = sabnzbd.INTERNET_BANDWIDTH
# Dashboard: Connection information
if not int_conv(skip_dashboard):
info["active_socks5_proxy"] = active_socks5_proxy()
info["localipv4"] = localipv4()
info["publicipv4"] = publicipv4()
info["ipv6"] = ipv6()
# Dashboard: DNS-check
try:
addresslookup(cfg.selftest_host())
info["dnslookup"] = "OK"
except:
info["dnslookup"] = None
info["servers"] = []
# Servers-list could be modified during iteration, so we need a copy
for server in sabnzbd.Downloader.servers[:]:
connected = sum(nw.connected for nw in server.idle_threads[:])
serverconnections = []
for nw in server.busy_threads[:]:
if nw.connected:
connected += 1
if nw.article:
serverconnections.append(
{
"thrdnum": nw.thrdnum,
"art_name": nw.article.article,
"nzf_name": nw.article.nzf.filename,
"nzo_name": nw.article.nzf.nzo.final_name,
}
)
if server.warning and not (connected or server.errormsg):
connected = server.warning
if server.request and not server.info:
connected = T(" Resolving address").replace(" ", "")
server_info = {
"servername": server.displayname,
"serveractiveconn": connected,
"servertotalconn": server.threads,
"serverconnections": serverconnections,
"serverssl": server.ssl,
"serversslinfo": server.ssl_info,
"serveractive": server.active,
"servererror": server.errormsg,
"serverpriority": server.priority,
"serveroptional": server.optional,
"serverbps": to_units(sabnzbd.BPSMeter.server_bps.get(server.id, 0)),
}
info["servers"].append(server_info)
return info
def build_queue(start: int = 0, limit: int = 0, search: Optional[str] = None, nzo_ids: Optional[List[str]] = None):
info = build_header(for_template=False)
qnfo = sabnzbd.NzbQueue.queue_info(search=search, nzo_ids=nzo_ids, start=start, limit=limit)
info["kbpersec"] = "%.2f" % (sabnzbd.BPSMeter.bps / KIBI)
info["speed"] = to_units(sabnzbd.BPSMeter.bps)
info["mbleft"] = "%.2f" % (qnfo.bytes_left / MEBI)
info["mb"] = "%.2f" % (qnfo.bytes / MEBI)
info["sizeleft"] = to_units(qnfo.bytes_left, "B")
info["size"] = to_units(qnfo.bytes, "B")
info["noofslots_total"] = qnfo.q_fullsize
if sabnzbd.Downloader.paused or sabnzbd.Downloader.paused_for_postproc:
status = Status.PAUSED
elif sabnzbd.BPSMeter.bps > 0:
status = Status.DOWNLOADING
else:
status = Status.IDLE
info["status"] = status
info["timeleft"] = calc_timeleft(qnfo.bytes_left, sabnzbd.BPSMeter.bps)
datestart = datetime.datetime.now()
try:
datefinish = datestart + datetime.timedelta(seconds=qnfo.bytes_left / sabnzbd.BPSMeter.bps)
# new eta format: 16:00 Fri 07 Feb
info["eta"] = datefinish.strftime(time_format("%H:%M %a %d %b"))
except:
info["eta"] = T("unknown")
info["refresh_rate"] = str(cfg.refresh_rate()) if cfg.refresh_rate() > 0 else ""
info["interface_settings"] = cfg.interface_settings()
info["scripts"] = list_scripts()
info["categories"] = list_cats()
info["rating_enable"] = bool(cfg.rating_enable())
info["noofslots"] = qnfo.q_fullsize
info["start"] = start
info["limit"] = limit
info["finish"] = info["start"] + info["limit"]
n = start
running_bytes = qnfo.bytes_left_previous_page
slotinfo = []
for pnfo in qnfo.list:
nzo_id = pnfo.nzo_id
bytesleft = pnfo.bytes_left
bytes_total = pnfo.bytes
average_date = pnfo.avg_date
is_propagating = (pnfo.avg_stamp + float(cfg.propagation_delay() * 60)) > time.time()
status = pnfo.status
priority = pnfo.priority
mbleft = bytesleft / MEBI
mb = bytes_total / MEBI
slot = {}
slot["index"] = n
slot["nzo_id"] = str(nzo_id)
slot["unpackopts"] = str(opts_to_pp(pnfo.repair, pnfo.unpack, pnfo.delete))
slot["priority"] = INTERFACE_PRIORITIES.get(priority, NORMAL_PRIORITY)
slot["script"] = pnfo.script if pnfo.script else "None"
slot["filename"] = pnfo.filename
slot["labels"] = pnfo.labels
slot["password"] = pnfo.password if pnfo.password else ""
slot["cat"] = pnfo.category if pnfo.category else "None"
slot["mbleft"] = "%.2f" % mbleft
slot["mb"] = "%.2f" % mb
slot["size"] = to_units(bytes_total, "B")
slot["sizeleft"] = to_units(bytesleft, "B")
slot["percentage"] = "%s" % (int(((mb - mbleft) / mb) * 100)) if mb != mbleft else "0"
slot["mbmissing"] = "%.2f" % (pnfo.bytes_missing / MEBI)
slot["direct_unpack"] = pnfo.direct_unpack
if not sabnzbd.Downloader.paused and status not in (Status.PAUSED, Status.FETCHING, Status.GRABBING):
if is_propagating:
slot["status"] = Status.PROP
elif status == Status.CHECKING:
slot["status"] = Status.CHECKING
else:
slot["status"] = Status.DOWNLOADING
else:
# Ensure compatibility of API status
if status == Status.DELETED or priority == FORCE_PRIORITY:
status = Status.DOWNLOADING
slot["status"] = "%s" % status
if (
sabnzbd.Downloader.paused
or sabnzbd.Downloader.paused_for_postproc
or is_propagating
or status not in (Status.DOWNLOADING, Status.FETCHING, Status.QUEUED)
) and priority != FORCE_PRIORITY:
slot["timeleft"] = "0:00:00"
slot["eta"] = "unknown"
else:
running_bytes += bytesleft
slot["timeleft"] = calc_timeleft(running_bytes, sabnzbd.BPSMeter.bps)
try:
datestart = datestart + datetime.timedelta(seconds=bytesleft / sabnzbd.BPSMeter.bps)
# new eta format: 16:00 Fri 07 Feb
slot["eta"] = datestart.strftime(time_format("%H:%M %a %d %b"))
except:
datestart = datestart
slot["eta"] = "unknown"
# Do not show age when it's not known
if average_date.year < 2000:
slot["avg_age"] = "-"
else:
slot["avg_age"] = calc_age(average_date)
rating = sabnzbd.Rating.get_rating_by_nzo(nzo_id)
slot["has_rating"] = rating is not None
if rating:
slot["rating_avg_video"] = rating.avg_video
slot["rating_avg_audio"] = rating.avg_audio
slotinfo.append(slot)
n += 1
if slotinfo:
info["slots"] = slotinfo
else:
info["slots"] = []
return info
def fast_queue() -> Tuple[bool, int, float, str]:
"""Return paused, bytes_left, bpsnow, time_left"""
bytes_left = sabnzbd.sabnzbd.NzbQueue.remaining()
paused = sabnzbd.Downloader.paused
bpsnow = sabnzbd.BPSMeter.bps
time_left = calc_timeleft(bytes_left, bpsnow)
return paused, bytes_left, bpsnow, time_left
def build_file_list(nzo_id: str):
"""Build file lists for specified job"""
jobs = []
nzo = sabnzbd.sabnzbd.NzbQueue.get_nzo(nzo_id)
if nzo:
pnfo = nzo.gather_info(full=True)
for nzf in pnfo.finished_files:
jobs.append(
{
"filename": nzf.filename,
"mbleft": "%.2f" % (nzf.bytes_left / MEBI),
"mb": "%.2f" % (nzf.bytes / MEBI),
"bytes": "%.2f" % nzf.bytes,
"age": calc_age(nzf.date),
"nzf_id": nzf.nzf_id,
"status": "finished",
}
)
for nzf in pnfo.active_files:
jobs.append(
{
"filename": nzf.filename,
"mbleft": "%.2f" % (nzf.bytes_left / MEBI),
"mb": "%.2f" % (nzf.bytes / MEBI),
"bytes": "%.2f" % nzf.bytes,
"age": calc_age(nzf.date),
"nzf_id": nzf.nzf_id,
"status": "active",
}
)
for nzf in pnfo.queued_files:
jobs.append(
{
"filename": nzf.filename,
"set": nzf.setname,
"mbleft": "%.2f" % (nzf.bytes_left / MEBI),
"mb": "%.2f" % (nzf.bytes / MEBI),
"bytes": "%.2f" % nzf.bytes,
"age": calc_age(nzf.date),
"nzf_id": nzf.nzf_id,
"status": "queued",
}
)
return jobs
def retry_job(job, new_nzb=None, password=None):
"""Re enter failed job in the download queue"""
if job:
history_db = sabnzbd.get_db_connection()
futuretype, url, pp, script, cat = history_db.get_other(job)
if futuretype:
nzo_id = sabnzbd.add_url(url, pp, script, cat)
else:
path = history_db.get_path(job)
nzo_id = sabnzbd.NzbQueue.repair_job(path, new_nzb, password)
if nzo_id:
# Only remove from history if we repaired something
history_db.remove_history(job)
return nzo_id
return None
def del_job_files(job_paths):
"""Remove files of each path in the list"""
for path in job_paths:
if path and clip_path(path).lower().startswith(cfg.download_dir.get_clipped_path().lower()):
remove_all(path, recursive=True)
def Tspec(txt):
"""Translate special terms"""
if txt == "None":
return T("None")
elif txt in ("Default", "*"):
return T("Default")
else:
return txt
_SKIN_CACHE = {} # Stores pre-translated acronyms
def Ttemplate(txt):
"""Translation function for Skin texts
This special is to be used in interface.py for template processing
to be passed for the $T function: so { ..., 'T' : Ttemplate, ...}
"""
global _SKIN_CACHE
if txt in _SKIN_CACHE:
return _SKIN_CACHE[txt]
else:
# We need to remove the " and ' to be JS/JSON-string-safe
# Saving it in dictionary is 20x faster on next look-up
tra = T(SKIN_TEXT.get(txt, txt)).replace('"', """).replace("'", "'")
_SKIN_CACHE[txt] = tra
return tra
def clear_trans_cache():
"""Clean cache for skin translations"""
global _SKIN_CACHE
_SKIN_CACHE = {}
sabnzbd.WEBUI_READY = True
def build_header(webdir: str = "", for_template: bool = True, trans_functions: bool = True) -> Dict:
"""Build the basic header"""
speed_limit = sabnzbd.Downloader.get_limit()
if speed_limit <= 0:
speed_limit = 100
speed_limit_abs = sabnzbd.Downloader.get_limit_abs()
if speed_limit_abs <= 0:
speed_limit_abs = ""
diskspace_info = diskspace()
header = {}
# We don't output everything for API
if for_template:
# These are functions, and cause problems for JSON
if trans_functions:
header["T"] = Ttemplate
header["Tspec"] = Tspec
header["uptime"] = calc_age(sabnzbd.START)
header["color_scheme"] = sabnzbd.WEB_COLOR or ""
header["helpuri"] = "https://sabnzbd.org/wiki/"
header["pid"] = os.getpid()
header["active_lang"] = cfg.language()
header["rtl"] = is_rtl(header["active_lang"])
header["my_lcldata"] = clip_path(sabnzbd.DIR_LCLDATA)
header["my_home"] = clip_path(sabnzbd.DIR_HOME)
header["webdir"] = webdir or sabnzbd.WEB_DIR
header["url_base"] = cfg.url_base()
header["nt"] = sabnzbd.WIN32
header["darwin"] = sabnzbd.DARWIN
header["power_options"] = sabnzbd.WIN32 or sabnzbd.DARWIN or sabnzbd.LINUX_POWER
header["pp_pause_event"] = sabnzbd.Scheduler.pp_pause_event
header["apikey"] = cfg.api_key()
header["new_release"], header["new_rel_url"] = sabnzbd.NEW_VERSION
header["version"] = sabnzbd.__version__
header["paused"] = bool(sabnzbd.Downloader.paused or sabnzbd.Downloader.paused_for_postproc)
header["pause_int"] = sabnzbd.Scheduler.pause_int()
header["paused_all"] = sabnzbd.PAUSED_ALL
header["diskspace1"] = "%.2f" % diskspace_info["download_dir"][1]
header["diskspace2"] = "%.2f" % diskspace_info["complete_dir"][1]
header["diskspace1_norm"] = to_units(diskspace_info["download_dir"][1] * GIGI)
header["diskspace2_norm"] = to_units(diskspace_info["complete_dir"][1] * GIGI)
header["diskspacetotal1"] = "%.2f" % diskspace_info["download_dir"][0]
header["diskspacetotal2"] = "%.2f" % diskspace_info["complete_dir"][0]
header["loadavg"] = loadavg()
header["speedlimit"] = "{1:0.{0}f}".format(int(speed_limit % 1 > 0), speed_limit)
header["speedlimit_abs"] = "%s" % speed_limit_abs
header["have_warnings"] = str(sabnzbd.GUIHANDLER.count())
header["finishaction"] = sabnzbd.QUEUECOMPLETE
header["quota"] = to_units(sabnzbd.BPSMeter.quota)
header["have_quota"] = bool(sabnzbd.BPSMeter.quota > 0.0)
header["left_quota"] = to_units(sabnzbd.BPSMeter.left)
anfo = sabnzbd.ArticleCache.cache_info()
header["cache_art"] = str(anfo.article_sum)
header["cache_size"] = to_units(anfo.cache_size, "B")
header["cache_max"] = str(anfo.cache_limit)
return header
def build_history(
start: int = 0,
limit: int = 0,
search: Optional[str] = None,
failed_only: int = 0,
categories: Optional[List[str]] = None,
nzo_ids: Optional[List[str]] = None,
):
"""Combine the jobs still in post-processing and the database history"""
if not limit:
limit = 1000000
# Grab any items that are active or queued in postproc
postproc_queue = sabnzbd.PostProcessor.get_queue()
# Filter out any items that don't match the search term or category
if postproc_queue:
# It would be more efficient to iterate only once, but we accept the penalty for code clarity
if isinstance(categories, list):
postproc_queue = [nzo for nzo in postproc_queue if nzo.cat in categories]
if isinstance(search, str):
# Replace * with .* and ' ' with .
search_text = search.strip().replace("*", ".*").replace(" ", ".*") + ".*?"
try:
re_search = re.compile(search_text, re.I)
postproc_queue = [nzo for nzo in postproc_queue if re_search.search(nzo.final_name)]
except:
logging.error(T("Failed to compile regex for search term: %s"), search_text)
if nzo_ids:
postproc_queue = [nzo for nzo in postproc_queue if nzo.nzo_id in nzo_ids]
# Multi-page support for postproc items
postproc_queue_size = len(postproc_queue)
if start > postproc_queue_size:
# On a page where we shouldn't show postproc items
postproc_queue = []
database_history_limit = limit
else:
try:
if limit:
postproc_queue = postproc_queue[start : start + limit]
else:
postproc_queue = postproc_queue[start:]
except:
pass
# Remove the amount of postproc items from the db request for history items
database_history_limit = max(limit - len(postproc_queue), 0)
database_history_start = max(start - postproc_queue_size, 0)
# Acquire the db instance
try:
history_db = sabnzbd.get_db_connection()
close_db = False
except:
# Required for repairs at startup because Cherrypy isn't active yet
history_db = HistoryDB()
close_db = True
# Fetch history items
if not database_history_limit:
items, total_items = history_db.fetch_history(
database_history_start, 1, search, failed_only, categories, nzo_ids
)
items = []
else:
items, total_items = history_db.fetch_history(
database_history_start, database_history_limit, search, failed_only, categories, nzo_ids
)
# Reverse the queue to add items to the top (faster than insert)
items.reverse()
# Add the postproc items to the top of the history
items = get_active_history(postproc_queue, items)
# Un-reverse the queue
items.reverse()
# Global check if rating is enabled
rating_enabled = cfg.rating_enable()
for item in items:
item["size"] = to_units(item["bytes"], "B")
if "loaded" not in item:
item["loaded"] = False
path = item.get("path", "")
item["retry"] = int_conv(item.get("status") == Status.FAILED and path and os.path.exists(path))
# Retry of failed URL-fetch
if item["report"] == "future":
item["retry"] = True
if rating_enabled:
rating = sabnzbd.Rating.get_rating_by_nzo(item["nzo_id"])
item["has_rating"] = rating is not None
if rating:
item["rating_avg_video"] = rating.avg_video
item["rating_avg_audio"] = rating.avg_audio
item["rating_avg_vote_up"] = rating.avg_vote_up
item["rating_avg_vote_down"] = rating.avg_vote_down
item["rating_user_video"] = rating.user_video
item["rating_user_audio"] = rating.user_audio
item["rating_user_vote"] = rating.user_vote
total_items += postproc_queue_size
if close_db:
history_db.close()
return items, total_items
def get_active_history(queue, items):
"""Get the currently in progress and active history queue."""
for nzo in queue:
item = {}
(
item["completed"],
item["name"],
item["nzb_name"],
item["category"],
item["pp"],
item["script"],
item["report"],
item["url"],
item["status"],
item["nzo_id"],
item["storage"],
item["path"],
item["script_log"],
item["script_line"],
item["download_time"],
item["postproc_time"],
item["stage_log"],
item["downloaded"],
item["fail_message"],
item["url_info"],
item["bytes"],
_,
_,
item["password"],
) = build_history_info(nzo)
item["action_line"] = nzo.action_line
item = unpack_history_info(item)
item["loaded"] = nzo.pp_active
if item["bytes"]:
item["size"] = to_units(item["bytes"], "B")
else:
item["size"] = ""
items.append(item)
return items
def calc_timeleft(bytesleft, bps):
"""Calculate the time left in the format HH:MM:SS"""
try:
if bytesleft <= 0:
return "0:00:00"
totalseconds = int(bytesleft / bps)
minutes, seconds = divmod(totalseconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
if minutes < 10:
minutes = "0%s" % minutes
if seconds < 10:
seconds = "0%s" % seconds
if days > 0:
if hours < 10:
hours = "0%s" % hours
return "%s:%s:%s:%s" % (days, hours, minutes, seconds)
else:
return "%s:%s:%s" % (hours, minutes, seconds)
except:
return "0:00:00"
def list_cats(default=True):
"""Return list of (ordered) categories,
when default==False use '*' for Default category
"""
lst = [cat["name"] for cat in config.get_ordered_categories()]
if default:
lst.remove("*")
lst.insert(0, "Default")
return lst
_PLURAL_TO_SINGLE = {
"categories": "category",
"servers": "server",
"rss": "feed",
"scripts": "script",
"warnings": "warning",
"files": "file",
"jobs": "job",
}
def plural_to_single(kw, def_kw=""):
try:
return _PLURAL_TO_SINGLE[kw]
except KeyError:
return def_kw
def del_from_section(kwargs):
"""Remove keyword in section"""
section = kwargs.get("section", "")
if section in ("servers", "rss", "categories"):
keyword = kwargs.get("keyword")
if keyword:
item = config.get_config(section, keyword)
if item:
item.delete()
del item
config.save_config()
if section == "servers":
sabnzbd.Downloader.update_server(keyword, None)
return True
else:
return False
def history_remove_failed():
"""Remove all failed jobs from history, including files"""
logging.info("Scheduled removal of all failed jobs")
with HistoryDB() as history_db:
del_job_files(history_db.get_failed_paths())
history_db.remove_failed()
def history_remove_completed():
"""Remove all completed jobs from history"""
logging.info("Scheduled removal of all completed jobs")
with HistoryDB() as history_db:
history_db.remove_completed()
| 33.864615 | 178 | 0.616527 |
08dc95f8957eb3c547db756c792694967388e8a6 | 2,178 | py | Python | passbook/providers/saml/forms.py | fossabot/passbook | cba17f6659404445ac3025f11657d89368cc8b4f | [
"MIT"
] | null | null | null | passbook/providers/saml/forms.py | fossabot/passbook | cba17f6659404445ac3025f11657d89368cc8b4f | [
"MIT"
] | null | null | null | passbook/providers/saml/forms.py | fossabot/passbook | cba17f6659404445ac3025f11657d89368cc8b4f | [
"MIT"
] | null | null | null | """passbook SAML IDP Forms"""
from django import forms
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.utils.translation import gettext as _
from passbook.providers.saml.models import (
SAMLPropertyMapping,
SAMLProvider,
get_provider_choices,
)
from passbook.providers.saml.utils.cert import CertificateBuilder
class SAMLProviderForm(forms.ModelForm):
"""SAML Provider form"""
processor_path = forms.ChoiceField(
choices=get_provider_choices(), label="Processor"
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
builder = CertificateBuilder()
builder.build()
self.fields["signing_cert"].initial = builder.certificate
self.fields["signing_key"].initial = builder.private_key
class Meta:
model = SAMLProvider
fields = [
"name",
"processor_path",
"acs_url",
"audience",
"issuer",
"assertion_valid_not_before",
"assertion_valid_not_on_or_after",
"session_valid_not_on_or_after",
"property_mappings",
"digest_algorithm",
"signature_algorithm",
"signing",
"signing_cert",
"signing_key",
]
widgets = {
"name": forms.TextInput(),
"audience": forms.TextInput(),
"issuer": forms.TextInput(),
"assertion_valid_not_before": forms.TextInput(),
"assertion_valid_not_on_or_after": forms.TextInput(),
"session_valid_not_on_or_after": forms.TextInput(),
"property_mappings": FilteredSelectMultiple(_("Property Mappings"), False),
}
class SAMLPropertyMappingForm(forms.ModelForm):
"""SAML Property Mapping form"""
template_name = "saml/idp/property_mapping_form.html"
class Meta:
model = SAMLPropertyMapping
fields = ["name", "saml_name", "friendly_name", "expression"]
widgets = {
"name": forms.TextInput(),
"saml_name": forms.TextInput(),
"friendly_name": forms.TextInput(),
}
| 29.835616 | 87 | 0.615702 |
1af38fdb37df0ad2366a990f6a76aaa8c7772228 | 1,358 | py | Python | remote/simulator.py | Ganben/iot-bi | f85c2402a4222ee37c6eb24632f7979b00f0ac24 | [
"Apache-2.0"
] | null | null | null | remote/simulator.py | Ganben/iot-bi | f85c2402a4222ee37c6eb24632f7979b00f0ac24 | [
"Apache-2.0"
] | 9 | 2020-09-05T04:55:53.000Z | 2022-03-02T02:50:02.000Z | remote/simulator.py | Ganben/iot-bi | f85c2402a4222ee37c6eb24632f7979b00f0ac24 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# for simulation of the iot remote unit
# ganben
import sys
import struct
from paho.mqtt import (
client,
publish,
)
# def connect to local web mqtt
def on_connect(client, userdata, flag_dict, rc):
#
print("connected with result code: %s" % str(rc))
# client.subscribe("shop1")
def on_message(client, userdata, msg):
#
print("%s:%s" % (msg.topic, msg.payload))
c = client.Client(transport="websockets")
c.on_connect = on_connect
c.on_message = on_message
# c.ws_set_options(path="/ws")
# c.connect("127.0.0.1", 15675, 60)
# c.loop_forever()
def send_bh():
publish.single("remote", "hb.ecfabc9c3610.1.1100", hostname="aishe.org.cn", port=1883)
def send_dv():
publish.single("dev", "ecfabc9c3610.4.110", hostname="aishe.org.cn", port=1883)
if __name__ == "__main__":
# read sys.argv[1]: id, [2] count
if len(sys.argv) < 3:
raise Exception('require device and counts')
# pl = struct.pack("", int(sys.argv[1]), int(sys.argv[2]))
# pl = "%2d%2d%2d%2d" % (int(sys.argv[1]), 0, int(sys.argv[2]), 0)
# real wlan addr: 192.168.31.122
ho = sys.argv[2]
# assert(ho, str)
# pl = f"{int(sys.argv[1]):x}"
pl = "%s.%s" % (sys.argv[1], 4)
publish.single("dev", pl, hostname=ho, port=9883, transport="websockets", retain=False, qos=2)
print("--sent--") | 26.627451 | 98 | 0.623711 |
8a73b8905b7395c837f3b3aff904f33a540bf8a0 | 411 | py | Python | products/migrations/0010_auto_20210711_0608.py | kevin-ci/janeric2 | df07508a76ad8194a63be7786d8b84a905b3d2e9 | [
"MIT"
] | 1 | 2021-02-28T11:54:14.000Z | 2021-02-28T11:54:14.000Z | products/migrations/0010_auto_20210711_0608.py | kevin-ci/janeric2 | df07508a76ad8194a63be7786d8b84a905b3d2e9 | [
"MIT"
] | 6 | 2021-12-01T11:39:03.000Z | 2021-12-04T22:28:57.000Z | products/migrations/0010_auto_20210711_0608.py | kevin-ci/janeric2 | df07508a76ad8194a63be7786d8b84a905b3d2e9 | [
"MIT"
] | 1 | 2021-12-21T16:20:49.000Z | 2021-12-21T16:20:49.000Z | # Generated by Django 3.1.5 on 2021-07-11 06:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0009_auto_20210704_1252'),
]
operations = [
migrations.AlterModelOptions(
name='product',
options={'ordering': ['category__division', 'category__name', 'product_family__name', 'size']},
),
]
| 22.833333 | 107 | 0.625304 |
5f04df00c954a047738ceab8f896434374919d94 | 5,827 | py | Python | rnnt/transforms.py | entn-at/Online-Speech-Recognition | 75680cef38c57d0ac60f5e23c90d24bb3046e4e7 | [
"Apache-2.0"
] | null | null | null | rnnt/transforms.py | entn-at/Online-Speech-Recognition | 75680cef38c57d0ac60f5e23c90d24bb3046e4e7 | [
"Apache-2.0"
] | null | null | null | rnnt/transforms.py | entn-at/Online-Speech-Recognition | 75680cef38c57d0ac60f5e23c90d24bb3046e4e7 | [
"Apache-2.0"
] | 1 | 2021-07-23T07:23:04.000Z | 2021-07-23T07:23:04.000Z | import random
import torch
import torchaudio
from torchaudio.transforms import MFCC, MelSpectrogram
from rnnt.features import FilterbankFeatures
class CatDeltas(torch.nn.Module):
@torch.no_grad()
def forward(self, feat):
d1 = torchaudio.functional.compute_deltas(feat)
d2 = torchaudio.functional.compute_deltas(d1)
feat = torch.cat([feat, d1, d2], dim=1)
return feat
class CMVN(torch.nn.Module):
eps = 1e-5
@torch.no_grad()
def forward(self, feat):
mean = feat.mean(dim=2, keepdim=True)
std = feat.std(dim=2, keepdim=True)
feat = (feat - mean) / (std + CMVN.eps)
return feat
class Downsample(torch.nn.Module):
def __init__(self, n_frame, pad_to_divisible=True):
super().__init__()
self.n_frame = n_frame
self.pad_to_divisible = pad_to_divisible
@torch.no_grad()
def forward(self, feat):
feat = feat.transpose(1, 2)
batch_size, feat_length, feat_size = feat.shape
if self.pad_to_divisible:
pad = (self.n_frame - feat_length % self.n_frame) % self.n_frame
pad_shape = [0, 0, 0, pad, 0, 0]
feat = torch.nn.functional.pad(feat, pad_shape)
else:
feat_length = feat_length - feat_length % self.n_frame
feat = feat[:, :feat_length, :]
feat = feat.reshape(batch_size, -1, feat_size * self.n_frame)
return feat.transpose(1, 2)
class FrequencyMasking(torch.nn.Module):
"""
Implements frequency masking transform from SpecAugment paper
(https://arxiv.org/abs/1904.08779)
Example:
>>> transforms.Compose([
>>> transforms.ToTensor(),
>>> FrequencyMasking(max_width=10, num_masks=1, use_mean=False),
>>> ])
"""
def __init__(self, max_width, num_masks, use_mean=False):
super().__init__()
self.max_width = max_width
self.num_masks = num_masks
self.use_mean = use_mean
def forward(self, x):
"""
Args:
x (Tensor): Tensor image of size (N, T, H) where the frequency
mask is to be applied.
Returns:
Tensor: Transformed image with Frequency Mask.
"""
if self.use_mean:
fill_value = x.mean()
else:
fill_value = 0
mask = x.new_zeros(x.shape).bool()
for i in range(x.shape[0]):
for _ in range(self.num_masks):
start = random.randrange(0, x.shape[1])
end = start + random.randrange(0, self.max_width)
mask[i, start:end, :] = 1
x = x.masked_fill(mask, value=fill_value)
return x
def __repr__(self):
format_string = "%s(max_width=%d,num_masks=%d,use_mean=%s)" % (
self.__class__.__name__, self.max_width, self.num_masks,
self.use_mean)
return format_string
class TimeMasking(torch.nn.Module):
"""
Implements time masking transform from SpecAugment paper
(https://arxiv.org/abs/1904.08779)
Example:
>>> transforms.Compose([
>>> transforms.ToTensor(),
>>> TimeMasking(max_width=10, num_masks=2, use_mean=False),
>>> ])
"""
def __init__(self, max_width, num_masks, use_mean=False):
super().__init__()
self.max_width = max_width
self.num_masks = num_masks
self.use_mean = use_mean
def forward(self, x):
"""
Args:
x (Tensor): Tensor image of size (N, T, H) where the time mask is
to be applied.
Returns:
Tensor: Transformed image with Time Mask.
"""
if self.use_mean:
fill_value = x.mean()
else:
fill_value = 0
mask = x.new_zeros(x.shape).bool()
for i in range(x.shape[0]):
for _ in range(self.num_masks):
start = random.randrange(0, x.shape[2])
end = start + random.randrange(0, self.max_width)
mask[i, :, start:end] = 1
x = x.masked_fill(mask, value=fill_value)
return x
def __repr__(self):
format_string = self.__class__.__name__ + "(max_width="
format_string += str(self.max_width) + ")"
return format_string
def build_transform(feature_type, feature_size, n_fft=512, win_length=400,
hop_length=200, delta=False, cmvn=False, downsample=1,
T_mask=0, T_num_mask=0, F_mask=0, F_num_mask=0,
pad_to_divisible=True):
feature_args = {
'n_fft': n_fft,
'win_length': win_length,
'hop_length': hop_length,
# 'f_min': 20,
# 'f_max': 5800,
}
transform = []
input_size = feature_size
if feature_type == 'mfcc':
transform.append(MFCC(
n_mfcc=feature_size, log_mels=True, melkwargs=feature_args))
if feature_type == 'melspec':
transform.append(MelSpectrogram(
n_mels=feature_size, **feature_args))
if feature_type == 'logfbank':
transform.append(FilterbankFeatures(
n_filt=feature_size, **feature_args))
if delta:
transform.append(CatDeltas())
input_size = input_size * 3
# if cmvn:
# transform.append(CMVN())
if downsample > 1:
transform.append(Downsample(downsample, pad_to_divisible))
input_size = input_size * downsample
transform_test = torch.nn.Sequential(*transform)
if T_mask > 0 and T_num_mask > 0:
transform.append(TimeMasking(T_mask, T_num_mask))
if F_mask > 0 and F_num_mask > 0:
transform.append(FrequencyMasking(F_mask, F_num_mask))
transform_train = torch.nn.Sequential(*transform)
return transform_train, transform_test, input_size
| 30.994681 | 77 | 0.593616 |
6b193ffcc4f2c358a2661635ac0fb6ee51d40118 | 5,970 | py | Python | scripts/main.py | Cyprus-hy/TRSOSNet | 9466b21ec6b82f31777fe0ed680f14fd87de2f43 | [
"MIT"
] | null | null | null | scripts/main.py | Cyprus-hy/TRSOSNet | 9466b21ec6b82f31777fe0ed680f14fd87de2f43 | [
"MIT"
] | null | null | null | scripts/main.py | Cyprus-hy/TRSOSNet | 9466b21ec6b82f31777fe0ed680f14fd87de2f43 | [
"MIT"
] | null | null | null | import sys
import time
import os.path as osp
import argparse
import torch
import torch.nn as nn
import torchreid
from torchreid.utils import (
Logger, check_isfile, set_random_seed, collect_env_info,
resume_from_checkpoint, load_pretrained_weights, compute_model_complexity
)
from default_config import (
imagedata_kwargs, optimizer_kwargs, videodata_kwargs, engine_run_kwargs,
get_default_config, lr_scheduler_kwargs
)
def build_datamanager(cfg):
if cfg.data.type == 'image':
return torchreid.data.ImageDataManager(**imagedata_kwargs(cfg))
else:
return torchreid.data.VideoDataManager(**videodata_kwargs(cfg))
def build_engine(cfg, datamanager, model, optimizer, scheduler):
if cfg.data.type == 'image':
if cfg.loss.name == 'softmax':
engine = torchreid.engine.ImageSoftmaxEngine(
datamanager,
model,
optimizer=optimizer,
scheduler=scheduler,
use_gpu=cfg.use_gpu,
label_smooth=cfg.loss.softmax.label_smooth
)
else:
engine = torchreid.engine.ImageTripletEngine(
datamanager,
model,
optimizer=optimizer,
margin=cfg.loss.triplet.margin,
weight_t=cfg.loss.triplet.weight_t,
weight_x=cfg.loss.triplet.weight_x,
scheduler=scheduler,
use_gpu=cfg.use_gpu,
label_smooth=cfg.loss.softmax.label_smooth
)
else:
if cfg.loss.name == 'softmax':
engine = torchreid.engine.VideoSoftmaxEngine(
datamanager,
model,
optimizer=optimizer,
scheduler=scheduler,
use_gpu=cfg.use_gpu,
label_smooth=cfg.loss.softmax.label_smooth,
pooling_method=cfg.video.pooling_method
)
else:
engine = torchreid.engine.VideoTripletEngine(
datamanager,
model,
optimizer=optimizer,
margin=cfg.loss.triplet.margin,
weight_t=cfg.loss.triplet.weight_t,
weight_x=cfg.loss.triplet.weight_x,
scheduler=scheduler,
use_gpu=cfg.use_gpu,
label_smooth=cfg.loss.softmax.label_smooth
)
return engine
def reset_config(cfg, args):
if args.root:
cfg.data.root = args.root
if args.sources:
cfg.data.sources = args.sources
if args.targets:
cfg.data.targets = args.targets
if args.transforms:
cfg.data.transforms = args.transforms
def check_cfg(cfg):
if cfg.loss.name == 'triplet' and cfg.loss.triplet.weight_x == 0:
assert cfg.train.fixbase_epoch == 0, \
'The output of classifier is not included in the computational graph'
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--config-file', type=str, default='', help='path to config file'
)
parser.add_argument(
'-s',
'--sources',
type=str,
nargs='+',
help='source datasets (delimited by space)'
)
parser.add_argument(
'-t',
'--targets',
type=str,
nargs='+',
help='target datasets (delimited by space)'
)
parser.add_argument(
'--transforms', type=str, nargs='+', help='data augmentation'
)
parser.add_argument(
'--root', type=str, default='', help='path to data root'
)
parser.add_argument(
'--save_dir', type=str, default='', help='path to save log'
)
parser.add_argument(
'opts',
default=None,
nargs=argparse.REMAINDER,
help='Modify config options using the command-line'
)
args = parser.parse_args()
cfg = get_default_config()
cfg.use_gpu = torch.cuda.is_available()
if args.config_file:
cfg.merge_from_file(args.config_file)
reset_config(cfg, args)
cfg.merge_from_list(args.opts)
set_random_seed(cfg.train.seed)
check_cfg(cfg)
log_name = 'test.log' if cfg.test.evaluate else 'train.log'
log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))
print('Show configuration\n{}\n'.format(cfg))
print('Collecting env info ...')
print('** System info **\n{}\n'.format(collect_env_info()))
if cfg.use_gpu:
torch.backends.cudnn.benchmark = True
datamanager = build_datamanager(cfg)
print('Building model: {}'.format(cfg.model.name))
model = torchreid.models.build_model(
name=cfg.model.name,
num_classes=datamanager.num_train_pids,
loss=cfg.loss.name,
pretrained=cfg.model.pretrained,
use_gpu=cfg.use_gpu
)
num_params, flops = compute_model_complexity(
model, (1, 3, cfg.data.height, cfg.data.width)
)
print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))
if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
load_pretrained_weights(model, cfg.model.load_weights)
if cfg.use_gpu:
model = nn.DataParallel(model).cuda()
optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))
scheduler = torchreid.optim.build_lr_scheduler(
optimizer, **lr_scheduler_kwargs(cfg)
)
if cfg.model.resume and check_isfile(cfg.model.resume):
cfg.train.start_epoch = resume_from_checkpoint(
cfg.model.resume, model, optimizer=optimizer, scheduler=scheduler
)
print(
'Building {}-engine for {}-reid'.format(cfg.loss.name, cfg.data.type)
)
engine = build_engine(cfg, datamanager, model, optimizer, scheduler)
engine.run(**engine_run_kwargs(cfg))
if __name__ == '__main__':
main()
| 30.615385 | 81 | 0.618425 |
0fd2beb6eea886ea0370de9ecff8ba8d0b319b97 | 4,395 | py | Python | tests/test_structures.py | jframos/sdklib | 0cc1126e94b823fad6cc47e6a00549cad6d2f771 | [
"BSD-2-Clause"
] | 3 | 2016-12-15T15:54:37.000Z | 2021-08-10T03:16:18.000Z | tests/test_structures.py | jframos/sdklib | 0cc1126e94b823fad6cc47e6a00549cad6d2f771 | [
"BSD-2-Clause"
] | 44 | 2016-04-13T08:19:45.000Z | 2022-01-14T12:58:44.000Z | tests/test_structures.py | jframos/sdklib | 0cc1126e94b823fad6cc47e6a00549cad6d2f771 | [
"BSD-2-Clause"
] | 5 | 2016-11-22T11:23:28.000Z | 2020-01-28T12:26:10.000Z | import unittest
from sdklib.util.structures import get_dict_from_list, to_key_val_dict, to_key_val_list, CaseInsensitiveDict
class TestStructures(unittest.TestCase):
def test_to_key_val_list(self):
test_list = {'key1': 'val1', 'key0': 'val0'}
res = to_key_val_list(test_list)
self.assertTrue((res == [('key1', 'val1'), ('key0', 'val0')]) or (res == [('key0', 'val0'), ('key1', 'val1')]))
def test_to_key_val_list_sorted_insensitive(self):
test_list = {'Key1': 'val1', 'key0': 'val0'}
res = to_key_val_list(test_list, sort=True, insensitive=True)
self.assertEqual(res, [('key0', 'val0'), ('Key1', 'val1')])
def test_to_key_val_list_sorted_sensitive(self):
test_list = {'Key1': 'val1', 'key0': 'val0'}
res = to_key_val_list(test_list, sort=True, insensitive=False)
self.assertEqual(res, [('Key1', 'val1'), ('key0', 'val0')])
def test_to_key_val_list_none(self):
res = to_key_val_list(None)
self.assertIsNone(res)
def test_to_key_val_list_exception(self):
try:
to_key_val_list(1)
self.assertTrue(False)
except ValueError:
pass
def test_get_dict_from_list(self):
test_list = [{"Id": 0, "key2": "", "key3": ""},{"Id": 1, "key2": ""}, {"Id": 2, "key2": "", "key7": 4}]
res = get_dict_from_list(test_list, Id=1)
self.assertEqual(res, {"Id": 1, "key2": ""})
def test_to_key_val_dict_json(self):
json_obj = {"Id": 0, "key2": "", "key3": ""}
res = to_key_val_dict(json_obj)
self.assertEqual(res, {"Id": 0, "key2": "", "key3": ""})
def test_to_key_val_dict_tuple_list(self):
json_obj = [("Id", 0), ("key2", ""), ("key3", "")]
res = to_key_val_dict(json_obj)
self.assertEqual(res, {"Id": 0, "key2": "", "key3": ""})
def test_to_key_val_dict_tuple_list_array(self):
json_obj = [("Id", 0), ("key2", ""), ("key2", "val")]
res = to_key_val_dict(json_obj)
self.assertEqual(res, {"Id": 0, "key2": ["", "val"]})
def test_to_key_val_dict_tuple_list_double_array(self):
json_obj = [("Id", 0), ("key2", [""]), ("key2", ["val", "val2"])]
res = to_key_val_dict(json_obj)
self.assertEqual(res, {"Id": 0, "key2": ["", "val", "val2"]})
def test_to_key_val_dict_tuple_list_array_append(self):
json_obj = [("Id", 0), ("key2", [""]), ("key2", "val")]
res = to_key_val_dict(json_obj)
self.assertEqual(res, {"Id": 0, "key2": ["", "val"]})
def test_to_key_val_dict_tuple_list_three_elements(self):
json_obj = [("Id", 0), ("key2", ""), ("key2", "val"), ("key2", "val2")]
res = to_key_val_dict(json_obj)
self.assertEqual(res, {"Id": 0, "key2": ["", "val", "val2"]})
def test_to_key_val_dict_none(self):
res = to_key_val_dict(None)
self.assertEqual(res, {})
def test_to_key_val_dict_exception(self):
try:
to_key_val_dict(1)
self.assertTrue(False)
except ValueError:
pass
def test_to_key_val_dict_invalid_array_of_dicts(self):
try:
to_key_val_dict([{"a": 1}, {"b": 2}])
self.assertTrue(False)
except ValueError:
pass
def test_to_key_val_dict_invalid_number_of_items(self):
try:
to_key_val_dict([(1, 2, 3), (1, 2, 3)])
self.assertTrue(False)
except ValueError:
pass
def test_case_insensitive_dict_basic_key_value_name(self):
d = CaseInsensitiveDict({"X-key": "X-value"})
self.assertEqual(1, len(d))
self.assertEqual("X-key", list(d.keys())[0])
self.assertEqual("X-value", list(d.values())[0])
self.assertEqual("X-value", d["x-key"])
def test_case_insensitive_dict_key_value_name_duplicated_keys(self):
d = CaseInsensitiveDict({"X-key": "X-value", "x-key": "x-value"})
self.assertEqual(1, len(d))
self.assertEqual("X-key".lower(), list(d.keys())[0].lower())
def test_case_insensitive_dict_key_value_update(self):
d = CaseInsensitiveDict({"X-key": "X-value"})
d["x-key"] = "x-value"
self.assertEqual(1, len(d))
self.assertEqual("x-key", list(d.keys())[0])
self.assertEqual("x-value", list(d.values())[0])
self.assertEqual("x-value", d["x-key"])
| 38.893805 | 119 | 0.584755 |
4ea9b99f6c1ac10ba81df42ac2c13aec9b3b26ad | 789 | py | Python | virtual/lib/python3.6/site-packages/registration/backends/default/views.py | Ruterana/clone_instagram | a068587ef1d1a93ec8d1c08086bf11c0fb274b83 | [
"MIT"
] | 33 | 2018-10-07T21:50:44.000Z | 2022-02-16T18:16:56.000Z | virtual/lib/python3.6/site-packages/registration/backends/default/views.py | Ruterana/clone_instagram | a068587ef1d1a93ec8d1c08086bf11c0fb274b83 | [
"MIT"
] | 13 | 2020-01-12T13:03:07.000Z | 2022-02-10T13:44:28.000Z | virtual/lib/python3.6/site-packages/registration/backends/default/views.py | amiinegal/Awwards | dd667a1ffbd3fa9b90c8282d44d497b3a9d0c1ed | [
"MIT",
"Unlicense"
] | 12 | 2018-11-24T16:39:12.000Z | 2022-03-02T21:05:59.000Z | """
Backwards-compatible support for importing the model-based activation
workflow's views.
The new location for those views is
registration.backends.model_activation.views. Importing from
registration.backends.default will raise deprecation warnings, and
support for it will be removed in a future release.
"""
import textwrap
import warnings
from registration.backends.model_activation import views as new_location
warnings.warn(
textwrap.dedent("""
registration.backends.default.views is deprecated and
will be removed in django-registration 3.0. Import from
registration.backends.model_activation.views
instead.
"""),
DeprecationWarning
)
ActivationView = new_location.ActivationView
RegistrationView = new_location.RegistrationView
| 25.451613 | 72 | 0.78327 |
5df2984f2f0b2e8fe277fb3b190ab59c5588e7a9 | 2,329 | py | Python | model_zoo/official/cv/inceptionv4/src/callback.py | GuoSuiming/mindspore | 48afc4cfa53d970c0b20eedfb46e039db2a133d5 | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | model_zoo/official/cv/inceptionv4/src/callback.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | model_zoo/official/cv/inceptionv4/src/callback.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""callback function"""
from mindspore.train.callback import Callback
class EvaluateCallBack(Callback):
"""EvaluateCallBack"""
def __init__(self, model, eval_dataset, per_print_time=1000):
super(EvaluateCallBack, self).__init__()
self.model = model
self.per_print_time = per_print_time
self.eval_dataset = eval_dataset
def step_end(self, run_context):
cb_params = run_context.original_args()
if cb_params.cur_step_num % self.per_print_time == 0:
result = self.model.eval(self.eval_dataset, dataset_sink_mode=False)
print('cur epoch {}, cur_step {}, top1 accuracy {}, top5 accuracy {}.'.format(cb_params.cur_epoch_num,
cb_params.cur_step_num,
result['top_1_accuracy'],
result['top_5_accuracy']))
def epoch_end(self, run_context):
cb_params = run_context.original_args()
result = self.model.eval(self.eval_dataset, dataset_sink_mode=False)
print('cur epoch {}, cur_step {}, top1 accuracy {}, top5 accuracy {}.'.format(cb_params.cur_epoch_num,
cb_params.cur_step_num,
result['top_1_accuracy'],
result['top_5_accuracy']))
| 54.162791 | 116 | 0.537999 |
c4874e6956df2a57913b467591881760162c5d74 | 3,619 | py | Python | plugin/core/registry.py | kaste/LSP | aaab8145652821e45838b199b9e1eb2a95b7ad15 | [
"MIT"
] | null | null | null | plugin/core/registry.py | kaste/LSP | aaab8145652821e45838b199b9e1eb2a95b7ad15 | [
"MIT"
] | null | null | null | plugin/core/registry.py | kaste/LSP | aaab8145652821e45838b199b9e1eb2a95b7ad15 | [
"MIT"
] | null | null | null | from .configurations import ConfigManager
from .sessions import Session
from .settings import client_configs
from .typing import Optional, Any, Generator, Iterable
from .windows import WindowRegistry
import sublime
import sublime_plugin
def sessions_for_view(view: sublime.View, capability: Optional[str] = None) -> Generator[Session, None, None]:
"""
Returns all sessions for this view, optionally matching the capability path.
"""
window = view.window()
if window:
manager = windows.lookup(window)
yield from manager.sessions(view, capability)
def best_session(view: sublime.View, sessions: Iterable[Session], point: Optional[int] = None) -> Optional[Session]:
if point is None:
try:
point = view.sel()[0].b
except IndexError:
return None
scope = view.scope_name(point)
try:
return max(sessions, key=lambda session: session.config.score_feature(scope))
except ValueError:
return None
configs = ConfigManager(client_configs.all)
client_configs.set_listener(configs.update)
windows = WindowRegistry(configs)
def get_position(view: sublime.View, event: Optional[dict] = None) -> int:
if event:
return view.window_to_text((event["x"], event["y"]))
else:
return view.sel()[0].begin()
class LspTextCommand(sublime_plugin.TextCommand):
"""
Inherit from this class to define your requests that should be triggered via the command palette and/or a
keybinding.
"""
# When this is defined in a derived class, the command is enabled if and only if there exists a session attached
# to the view that has the given capability. When both `capability` and `session_name` are defined, `capability`
# wins.
capability = ''
# When this is defined in a derived class, the command is enabled if and only if there exists a session attached
# to the view that has the given name. When both `capability` and `session_name` are defined, `capability` wins.
session_name = ''
def is_enabled(self, event: Optional[dict] = None) -> bool:
if self.capability:
# At least one active session with the given capability must exist.
return bool(self.best_session(self.capability, get_position(self.view, event)))
elif self.session_name:
# There must exist an active session with the given (config) name.
return bool(self.session_by_name(self.session_name))
else:
# Any session will do.
return any(self.sessions())
def want_event(self) -> bool:
return True
def best_session(self, capability: str, point: Optional[int] = None) -> Optional[Session]:
return best_session(self.view, self.sessions(capability), point)
def session_by_name(self, name: Optional[str] = None) -> Optional[Session]:
target = name if name else self.session_name
for session in self.sessions():
if session.config.name == target:
return session
return None
def sessions(self, capability: Optional[str] = None) -> Generator[Session, None, None]:
yield from sessions_for_view(self.view, capability)
class LspRestartClientCommand(sublime_plugin.TextCommand):
def run(self, edit: Any) -> None:
window = self.view.window()
if window:
windows.lookup(window).restart_sessions_async()
class LspRecheckSessionsCommand(sublime_plugin.WindowCommand):
def run(self) -> None:
sublime.set_timeout_async(lambda: windows.lookup(self.window).restart_sessions_async())
| 36.928571 | 116 | 0.68693 |
0ca3d9ccfac049fc7beb14758a0e79cfd51b7053 | 21,499 | py | Python | django/contrib/auth/tests/test_forms.py | dnozay/django | 5dcdbe95c749d36072f527e120a8cb463199ae0d | [
"BSD-3-Clause"
] | 1 | 2019-03-24T18:05:43.000Z | 2019-03-24T18:05:43.000Z | django/contrib/auth/tests/test_forms.py | dnozay/django | 5dcdbe95c749d36072f527e120a8cb463199ae0d | [
"BSD-3-Clause"
] | null | null | null | django/contrib/auth/tests/test_forms.py | dnozay/django | 5dcdbe95c749d36072f527e120a8cb463199ae0d | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import os
import re
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import (UserCreationForm, AuthenticationForm,
PasswordChangeForm, SetPasswordForm, UserChangeForm, PasswordResetForm,
ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget)
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.forms.fields import Field, CharField
from django.test import TestCase, override_settings
from django.utils.encoding import force_text
from django.utils._os import upath
from django.utils import translation
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class UserCreationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(User._meta.get_field('username').error_messages['unique'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [force_text(validator.message)])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [force_text(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
def test_success(self):
# The success case.
data = {
'username': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(repr(u), '<User: jsmith@example.com>')
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AuthenticationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})])
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows them to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError("This user is disallowed.")
raise forms.ValidationError("Sorry, nobody's allowed in.")
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class SetPasswordFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class PasswordChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors,
[force_text(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields),
['old_password', 'new_password1', 'new_password2'])
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class UserChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [force_text(validator.message)])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unsuable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password'], 'sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161')
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
@skipIfCustomUser
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
USE_TZ=False,
)
class PasswordResetFormTest(TestCase):
fixtures = ['authtestdata.json']
def create_dummy_user(self):
"""
Create a user and return a tuple (user_object, username, email).
"""
username = 'jsmith'
email = 'jsmith@example.com'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistant_email(self):
"""
Test nonexistent email address. This should not fail because it would
expose information about registered users.
"""
data = {'email': 'foo@bar.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': 'testclient@example.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_custom_email_constructor(self):
template_path = os.path.join(os.path.dirname(__file__), 'templates')
with self.settings(TEMPLATE_DIRS=(template_path,)):
data = {'email': 'testclient@example.com'}
class CustomEmailPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
EmailMultiAlternatives(
"Forgot your password?",
"Sorry to hear you forgot your password.",
None, [to_email],
['site_monitor@example.com'],
headers={'Reply-To': 'webmaster@example.com'},
alternatives=[("Really sorry to hear you forgot your password.",
"text/html")]).send()
form = CustomEmailPasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Forgot your password?')
self.assertEqual(mail.outbox[0].bcc, ['site_monitor@example.com'])
self.assertEqual(mail.outbox[0].content_subtype, "plain")
def test_preserve_username_case(self):
"""
Preserve the case of the user name (before the @ in the email address)
when creating a user (#5605).
"""
user = User.objects.create_user('forms_test2', 'tesT@EXAMple.com', 'test')
self.assertEqual(user.email, 'tesT@example.com')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
"""
Test that inactive user cannot receive password reset email.
"""
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', 'test@example.com', 'test')
data = {"email": "test@example.com"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_save_plaintext_email(self):
"""
Test the PasswordResetForm.save() method with no html_email_template_name
parameter passed in.
Test to ensure original behavior is unchanged after the parameter was added.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
def test_save_html_email_template_name(self):
"""
Test the PasswordResetFOrm.save() method with html_email_template_name
parameter specified.
Test to ensure that a multipart email is sent with both text/plain
and text/html parts.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(re.match(r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$', message.get_payload(1).get_payload()))
class ReadOnlyPasswordHashTest(TestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field._has_changed('aaa', 'bbb'))
| 40.035382 | 143 | 0.638402 |
0f07d0ce320b1d3c01e5e9f132d08927edec7df5 | 2,485 | py | Python | graph.py | ucsb-cs48-w19/6pm-stock-trading | daf70b684c15182753d8ca9b820238cf9cd5b75c | [
"MIT"
] | 1 | 2019-04-06T15:44:07.000Z | 2019-04-06T15:44:07.000Z | graph.py | ucsb-cs48-w19/6pm-stock-trading | daf70b684c15182753d8ca9b820238cf9cd5b75c | [
"MIT"
] | 35 | 2019-03-07T22:29:04.000Z | 2021-12-13T19:55:51.000Z | graph.py | ucsb-cs48-w19/6pm-stock-trading | daf70b684c15182753d8ca9b820238cf9cd5b75c | [
"MIT"
] | 1 | 2019-12-18T23:06:37.000Z | 2019-12-18T23:06:37.000Z | from pandas_datareader import data
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import io
import base64
#from datetime import datetime
tickers = ['^NDX']
def getTempPath():
filePath = 'static/plot.png'
return filePath
# In[31]:
def graphMaker():
#filePath = 'static/plot.png'
filePath = getTempPath()
todayDate = dt.date.today()
today = "" + str(todayDate.year) + "-" + str(todayDate.month) + "-" + str(todayDate.day)
# Start date set to 30 days before today
startDate = dt.date.today() - dt.timedelta(days=40)
start = "" + str(startDate.year) + "-" + str(startDate.month) + "-" + str(startDate.day)
# pull data from yahoo finance
panel_data = data.DataReader(tickers, 'yahoo', start, today)
closing_prices = pd.DataFrame()
# print(panel_data['Close'])
# print(panel_data.info())
panel_data[['Open', 'Adj Close']].plot(figsize=(15, 5))
plt.title('Recent 30 Days Stock Trading')
#plt.plot()
img = io.BytesIO()
plt.savefig(img, format='png')
img.seek(0)
graph_url = base64.b64encode(img.getvalue()).decode()
plt.close()
return 'data:image/png;base64,{}'.format(graph_url)
for ticker in tickers:
# print(ticker)
ticker_table = panel_data['Adj Close'][ticker]
closing_prices[ticker] = ticker_table
return filePath
# print(ticker_table)
# print(closing_prices[ticker])
# year = [1960, 1970, 1980, 1990, 2000, 2010]
# pop_pakistan = [44.91, 58.09, 78.07, 107.7, 138.5, 170.6]
# pop_india = [449.48, 553.57, 696.783, 870.133, 1000.4, 1309.1]
# plt.plot(year, pop_pakistan, color='g')
# plt.plot(year, pop_india, color='orange')
# plt.xlabel('Countries')
# plt.ylabel('Population in million')
# plt.title('Pakistan India Population till 2010')
# plt.show()
# plotly.tools.set_credentials_file(username='DemoAccount', api_key='lr1c37zw81')
# df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/finance-charts-apple.csv')
# trace = go.Ohlc(x=df['Date'],
# open=df['AAPL.Open'],
# high=df['AAPL.High'],
# low=df['AAPL.Low'],
# close=df['AAPL.Close'])
# layout = go.Layout(
# xaxis=dict(
# rangeslider=dict(
# visible=False
# )
# )
# )
# data = [trace]
# fig = go.Figure(data=data, layout=layout)
# py.iplot(fig, filename='simple_candlestick')
if __name__== "__main__":
graphMaker()
| 27.01087 | 103 | 0.632998 |
a5b5be4fce0a85329a0e2edacde64dbd211ad1e1 | 89 | py | Python | src/France/apps.py | IFB-ElixirFr/nCOV_genome_stats | b6cd7264722439492602bc824bb13cc012e26bb8 | [
"BSD-3-Clause"
] | null | null | null | src/France/apps.py | IFB-ElixirFr/nCOV_genome_stats | b6cd7264722439492602bc824bb13cc012e26bb8 | [
"BSD-3-Clause"
] | null | null | null | src/France/apps.py | IFB-ElixirFr/nCOV_genome_stats | b6cd7264722439492602bc824bb13cc012e26bb8 | [
"BSD-3-Clause"
] | null | null | null | from django.apps import AppConfig
class VaccinsConfig(AppConfig):
name = 'vaccins'
| 14.833333 | 33 | 0.752809 |
b89fdb6c8a9d19b7e3cf73051d77e7ddc81023e8 | 13,413 | py | Python | sdk/python/pulumi_azure_native/network/v20200801/packet_capture.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20200801/packet_capture.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20200801/packet_capture.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PacketCapture']
class PacketCapture(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bytes_to_capture_per_packet: Optional[pulumi.Input[float]] = None,
filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PacketCaptureFilterArgs']]]]] = None,
network_watcher_name: Optional[pulumi.Input[str]] = None,
packet_capture_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_location: Optional[pulumi.Input[pulumi.InputType['PacketCaptureStorageLocationArgs']]] = None,
target: Optional[pulumi.Input[str]] = None,
time_limit_in_seconds: Optional[pulumi.Input[int]] = None,
total_bytes_per_session: Optional[pulumi.Input[float]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Information about packet capture session.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[float] bytes_to_capture_per_packet: Number of bytes captured per packet, the remaining bytes are truncated.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PacketCaptureFilterArgs']]]] filters: A list of packet capture filters.
:param pulumi.Input[str] network_watcher_name: The name of the network watcher.
:param pulumi.Input[str] packet_capture_name: The name of the packet capture session.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['PacketCaptureStorageLocationArgs']] storage_location: The storage location for a packet capture session.
:param pulumi.Input[str] target: The ID of the targeted resource, only VM is currently supported.
:param pulumi.Input[int] time_limit_in_seconds: Maximum duration of the capture session in seconds.
:param pulumi.Input[float] total_bytes_per_session: Maximum size of the capture output.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if bytes_to_capture_per_packet is None:
bytes_to_capture_per_packet = 0
__props__['bytes_to_capture_per_packet'] = bytes_to_capture_per_packet
__props__['filters'] = filters
if network_watcher_name is None and not opts.urn:
raise TypeError("Missing required property 'network_watcher_name'")
__props__['network_watcher_name'] = network_watcher_name
__props__['packet_capture_name'] = packet_capture_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if storage_location is None and not opts.urn:
raise TypeError("Missing required property 'storage_location'")
__props__['storage_location'] = storage_location
if target is None and not opts.urn:
raise TypeError("Missing required property 'target'")
__props__['target'] = target
if time_limit_in_seconds is None:
time_limit_in_seconds = 18000
__props__['time_limit_in_seconds'] = time_limit_in_seconds
if total_bytes_per_session is None:
total_bytes_per_session = 1073741824
__props__['total_bytes_per_session'] = total_bytes_per_session
__props__['etag'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200801:PacketCapture"), pulumi.Alias(type_="azure-native:network:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network:PacketCapture"), pulumi.Alias(type_="azure-native:network/latest:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/latest:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20160901:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20160901:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20161201:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20161201:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20170301:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20170301:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20170601:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20170601:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20170801:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20170801:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20170901:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20170901:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20171001:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20171001:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20171101:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20171101:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20180101:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20180101:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20180201:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20180201:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20180401:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20180401:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20180601:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20180601:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20180701:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20180701:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20180801:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20180801:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20181001:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20181001:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20181101:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20181101:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20181201:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20181201:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20190201:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20190201:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20190401:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20190401:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20190601:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20190601:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20190701:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20190701:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20190801:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20190801:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20190901:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20190901:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20191101:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20191101:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20191201:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20191201:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20200301:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20200301:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20200401:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20200401:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20200501:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20200501:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20200601:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20200601:PacketCapture"), pulumi.Alias(type_="azure-native:network/v20200701:PacketCapture"), pulumi.Alias(type_="azure-nextgen:network/v20200701:PacketCapture")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PacketCapture, __self__).__init__(
'azure-native:network/v20200801:PacketCapture',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PacketCapture':
"""
Get an existing PacketCapture resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["bytes_to_capture_per_packet"] = None
__props__["etag"] = None
__props__["filters"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["storage_location"] = None
__props__["target"] = None
__props__["time_limit_in_seconds"] = None
__props__["total_bytes_per_session"] = None
return PacketCapture(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="bytesToCapturePerPacket")
def bytes_to_capture_per_packet(self) -> pulumi.Output[Optional[float]]:
"""
Number of bytes captured per packet, the remaining bytes are truncated.
"""
return pulumi.get(self, "bytes_to_capture_per_packet")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def filters(self) -> pulumi.Output[Optional[Sequence['outputs.PacketCaptureFilterResponse']]]:
"""
A list of packet capture filters.
"""
return pulumi.get(self, "filters")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the packet capture session.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the packet capture session.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="storageLocation")
def storage_location(self) -> pulumi.Output['outputs.PacketCaptureStorageLocationResponse']:
"""
The storage location for a packet capture session.
"""
return pulumi.get(self, "storage_location")
@property
@pulumi.getter
def target(self) -> pulumi.Output[str]:
"""
The ID of the targeted resource, only VM is currently supported.
"""
return pulumi.get(self, "target")
@property
@pulumi.getter(name="timeLimitInSeconds")
def time_limit_in_seconds(self) -> pulumi.Output[Optional[int]]:
"""
Maximum duration of the capture session in seconds.
"""
return pulumi.get(self, "time_limit_in_seconds")
@property
@pulumi.getter(name="totalBytesPerSession")
def total_bytes_per_session(self) -> pulumi.Output[Optional[float]]:
"""
Maximum size of the capture output.
"""
return pulumi.get(self, "total_bytes_per_session")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 65.75 | 4,480 | 0.711996 |
c44a2e4fa6eeae05ee17f248f63adbc24b761eee | 19,068 | py | Python | models/rank/bst/net.py | windstamp/PaddleRec | 0267e5b25703519429d160525df2fa5633d6a3b9 | [
"Apache-2.0"
] | null | null | null | models/rank/bst/net.py | windstamp/PaddleRec | 0267e5b25703519429d160525df2fa5633d6a3b9 | [
"Apache-2.0"
] | null | null | null | models/rank/bst/net.py | windstamp/PaddleRec | 0267e5b25703519429d160525df2fa5633d6a3b9 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import math
from functools import partial
import numpy as np
class BSTLayer(paddle.nn.Layer):
def __init__(self, user_count, item_emb_size, cat_emb_size,
position_emb_size, act, is_sparse, use_DataLoader, item_count,
cat_count, position_count, n_encoder_layers, d_model, d_key,
d_value, n_head, dropout_rate, postprocess_cmd,
preprocess_cmd, prepostprocess_dropout, d_inner_hid,
relu_dropout, layer_sizes):
super(BSTLayer, self).__init__()
self.item_emb_size = item_emb_size
self.cat_emb_size = cat_emb_size
self.position_emb_size = position_emb_size
self.act = act
self.is_sparse = is_sparse
# significant for speeding up the training process
self.use_DataLoader = use_DataLoader
self.item_count = item_count
self.cat_count = cat_count
self.position_count = position_count
self.user_count = user_count
self.n_encoder_layers = n_encoder_layers
self.d_model = d_model
self.d_key = d_key
self.d_value = d_value
self.n_head = n_head
self.dropout_rate = dropout_rate
self.postprocess_cmd = postprocess_cmd
self.preprocess_cmd = preprocess_cmd
self.prepostprocess_dropout = prepostprocess_dropout
self.d_inner_hid = d_inner_hid
self.relu_dropout = relu_dropout
self.layer_sizes = layer_sizes
self.bst = BST(user_count, item_emb_size, cat_emb_size,
position_emb_size, act, is_sparse, use_DataLoader,
item_count, cat_count, position_count, n_encoder_layers,
d_model, d_key, d_value, n_head, dropout_rate,
postprocess_cmd, preprocess_cmd, prepostprocess_dropout,
d_inner_hid, relu_dropout, layer_sizes)
self.bias = paddle.create_parameter(
shape=[1],
dtype='float32',
default_initializer=paddle.nn.initializer.Constant(value=0.0))
def forward(self, userid, hist_item_seq, hist_cat_seq, position_seq,
target_item, target_cat, target_position):
y_bst = self.bst.forward(userid, hist_item_seq, hist_cat_seq,
position_seq, target_item, target_cat,
target_position)
predict = paddle.nn.functional.sigmoid(y_bst + self.bias)
return predict
class BST(paddle.nn.Layer):
def __init__(self, user_count, item_emb_size, cat_emb_size,
position_emb_size, act, is_sparse, use_DataLoader, item_count,
cat_count, position_count, n_encoder_layers, d_model, d_key,
d_value, n_head, dropout_rate, postprocess_cmd,
preprocess_cmd, prepostprocess_dropout, d_inner_hid,
relu_dropout, layer_sizes):
super(BST, self).__init__()
self.item_emb_size = item_emb_size
self.cat_emb_size = cat_emb_size
self.position_emb_size = position_emb_size
self.act = act
self.is_sparse = is_sparse
# significant for speeding up the training process
self.use_DataLoader = use_DataLoader
self.item_count = item_count
self.cat_count = cat_count
self.user_count = user_count
self.position_count = position_count
self.n_encoder_layers = n_encoder_layers
self.d_model = d_model
self.d_key = d_key
self.d_value = d_value
self.n_head = n_head
self.dropout_rate = dropout_rate
self.postprocess_cmd = postprocess_cmd
self.preprocess_cmd = preprocess_cmd
self.prepostprocess_dropout = prepostprocess_dropout
self.d_inner_hid = d_inner_hid
self.relu_dropout = relu_dropout
self.layer_sizes = layer_sizes
init_value_ = 0.1
self.hist_item_emb_attr = paddle.nn.Embedding(
self.item_count,
self.item_emb_size,
sparse=False,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.TruncatedNormal(
mean=0.0,
std=init_value_ / math.sqrt(float(self.item_emb_size)))))
self.hist_cat_emb_attr = paddle.nn.Embedding(
self.cat_count,
self.cat_emb_size,
sparse=False,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.TruncatedNormal(
mean=0.0,
std=init_value_ / math.sqrt(float(self.cat_emb_size)))))
self.hist_position_emb_attr = paddle.nn.Embedding(
self.position_count,
self.position_emb_size,
sparse=False,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.TruncatedNormal(
mean=0.0,
std=init_value_ /
math.sqrt(float(self.position_emb_size)))))
self.target_item_emb_attr = paddle.nn.Embedding(
self.item_count,
self.item_emb_size,
sparse=False,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.TruncatedNormal(
mean=0.0,
std=init_value_ / math.sqrt(float(self.item_emb_size)))))
self.target_cat_emb_attr = paddle.nn.Embedding(
self.cat_count,
self.cat_emb_size,
sparse=False,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.TruncatedNormal(
mean=0.0,
std=init_value_ / math.sqrt(float(self.cat_emb_size)))))
self.target_position_emb_attr = paddle.nn.Embedding(
self.position_count,
self.position_emb_size,
sparse=False,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.TruncatedNormal(
mean=0.0,
std=init_value_ /
math.sqrt(float(self.position_emb_size)))))
self.userid_attr = paddle.nn.Embedding(
self.user_count,
self.d_model,
sparse=False,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.TruncatedNormal(
mean=0.0,
std=init_value_ / math.sqrt(float(self.d_model)))))
self._dnn_layers = []
sizes = [d_model] + layer_sizes + [1]
acts = ["relu" for _ in range(len(layer_sizes))] + [None]
for i in range(len(layer_sizes) + 1):
linear = paddle.nn.Linear(
in_features=sizes[i],
out_features=sizes[i + 1],
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Normal(
std=0.1 / math.sqrt(sizes[i]))))
self.add_sublayer('dnn_linear_%d' % i, linear)
self._dnn_layers.append(linear)
if acts[i] == 'relu':
act = paddle.nn.LeakyReLU()
self.add_sublayer('dnn_act_%d' % i, act)
self._dnn_layers.append(act)
self.drop_out = paddle.nn.Dropout(p=dropout_rate)
self.pff_layer = []
hid_linear = paddle.nn.Linear(
in_features=self.d_model,
out_features=self.d_inner_hid,
weight_attr=paddle.ParamAttr(
#regularizer=L2Decay(coeff=0.0001),
initializer=paddle.nn.initializer.Normal(
std=0.1 / math.sqrt(self.d_inner_hid))))
self.add_sublayer('hid_l', hid_linear)
self.pff_layer.append(hid_linear)
m = paddle.nn.LeakyReLU()
self.pff_layer.append(m)
hid2_linear = paddle.nn.Linear(
in_features=self.d_inner_hid,
out_features=self.d_model,
weight_attr=paddle.ParamAttr(
#regularizer=L2Decay(coeff=0.0001),
initializer=paddle.nn.initializer.Normal(
std=0.1 / math.sqrt(self.d_model))))
self.add_sublayer('hid2_l', hid2_linear)
self.pff_layer.append(hid2_linear)
self.compute_qkv_layer = []
q_linear = paddle.nn.Linear(
in_features=self.d_model,
out_features=d_key * n_head,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Normal(std=0.1 /
math.sqrt(d_model))))
self.add_sublayer("q_liner", q_linear)
self.compute_qkv_layer.append(q_linear)
k_linear = paddle.nn.Linear(
in_features=self.d_model,
out_features=d_key * n_head,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Normal(std=0.1 /
math.sqrt(d_key))))
self.add_sublayer("k_liner", k_linear)
self.compute_qkv_layer.append(k_linear)
v_linear = paddle.nn.Linear(
in_features=self.d_model,
out_features=d_value * n_head,
weight_attr=paddle.ParamAttr(
initializer=paddle.nn.initializer.Normal(std=0.1 /
math.sqrt(d_value))))
self.add_sublayer("v_liner", v_linear)
self.compute_qkv_layer.append(v_linear)
po_linear = paddle.nn.Linear(
in_features=d_model,
out_features=d_model,
weight_attr=paddle.ParamAttr(
#regularizer=L2Decay(coeff=0.0001),
initializer=paddle.nn.initializer.Normal(std=0.1 /
math.sqrt(d_model))))
self.add_sublayer("po_liner", po_linear)
self.compute_qkv_layer.append(po_linear)
def positionwise_feed_forward(self, x, dropout_rate):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
pff_input = x
for _layer in self.pff_layer:
pff_input = _layer(pff_input)
if dropout_rate:
pff_input = self.drop_out(pff_input)
return pff_input
def pre_post_process_layer_(self,
prev_out,
out,
process_cmd,
dropout_rate=0.5):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
out = paddle.add(out, prev_out)
for cmd in process_cmd:
if cmd == "n": # add layer normalization
out = paddle.static.nn.layer_norm(
out,
begin_norm_axis=len(out.shape) - 1,
param_attr=paddle.nn.initializer.Constant(value=1.0),
bias_attr=paddle.nn.initializer.Constant(value=0.0))
elif cmd == "d": # add dropout
if dropout_rate:
out = self.drop_out(out)
return out
def pre_post_process_layer(self, out, process_cmd, dropout_rate=0.5):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out
elif cmd == "n": # add layer normalization
out = paddle.static.nn.layer_norm(
out,
begin_norm_axis=len(out.shape) - 1,
param_attr=paddle.nn.initializer.Constant(value=1.0),
bias_attr=paddle.nn.initializer.Constant(value=0.0))
elif cmd == "d": # add dropout
if dropout_rate:
out = self.drop_out(out)
return out
def multi_head_attention(self, queries, keys, values, d_key, d_value,
d_model, n_head, dropout_rate):
keys = queries if keys is None else keys
values = keys if values is None else values
#print(keys.shape)
if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3
):
raise ValueError(
"Inputs: quries, keys and values should all be 3-D tensors.")
def __compute_qkv(queries, keys, values):
"""
Add linear projection to queries, keys, and values.
"""
q = self.compute_qkv_layer[0](queries)
k = self.compute_qkv_layer[1](keys)
v = self.compute_qkv_layer[2](values)
return q, k, v
def __split_heads_qkv(queries, keys, values, n_head, d_key, d_value):
"""
Reshape input tensors at the last dimension to split multi-heads
and then transpose. Specifically, transform the input tensor with shape
[bs, max_sequence_length, n_head * hidden_dim] to the output tensor
with shape [bs, n_head, max_sequence_length, hidden_dim].
"""
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
reshaped_q = paddle.reshape(x=queries, shape=[0, 0, n_head, d_key])
# permuate the dimensions into:
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
q = paddle.transpose(x=reshaped_q, perm=[0, 2, 1, 3])
# For encoder-decoder attention in inference, insert the ops and vars
# into global block to use as cache among beam search.
reshaped_k = paddle.reshape(x=keys, shape=[0, 0, n_head, d_key])
k = paddle.transpose(x=reshaped_k, perm=[0, 2, 1, 3])
reshaped_v = paddle.reshape(
x=values, shape=[0, 0, n_head, d_value])
v = paddle.transpose(x=reshaped_v, perm=[0, 2, 1, 3])
return q, k, v
def scaled_dot_product_attention(q, k, v, d_key, dropout_rate):
"""
Scaled Dot-Product Attention
"""
product = paddle.matmul(x=q, y=k, transpose_y=True)
weights = paddle.nn.functional.softmax(x=product)
if dropout_rate:
weights = self.drop_out(weights)
out = paddle.matmul(x=weights, y=v)
return out
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of inpunt tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = paddle.transpose(x, perm=[0, 2, 1, 3])
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
return paddle.reshape(
x=trans_x, shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]])
q, k, v = __compute_qkv(queries, keys, values)
q, k, v = __split_heads_qkv(q, k, v, n_head, d_key, d_value)
ctx_multiheads = scaled_dot_product_attention(q, k, v, d_model,
dropout_rate)
out = __combine_heads(ctx_multiheads)
proj_out = self.compute_qkv_layer[3](out)
return proj_out
def encoder_layer(self, x):
attention_out = self.multi_head_attention(
self.pre_post_process_layer(x, self.preprocess_cmd,
self.prepostprocess_dropout), None,
None, self.d_key, self.d_value, self.d_model, self.n_head,
self.dropout_rate)
attn_output = self.pre_post_process_layer_(x, attention_out,
self.postprocess_cmd,
self.prepostprocess_dropout)
ffd_output = self.positionwise_feed_forward(attn_output,
self.dropout_rate)
return self.pre_post_process_layer_(attn_output, ffd_output,
self.preprocess_cmd,
self.prepostprocess_dropout)
def forward(self, userid, hist_item_seq, hist_cat_seq, position_seq,
target_item, target_cat, target_position):
user_emb = self.userid_attr(userid)
hist_item_emb = self.hist_item_emb_attr(hist_item_seq)
hist_cat_emb = self.hist_cat_emb_attr(hist_cat_seq)
hist_position_emb = self.hist_position_emb_attr(position_seq)
target_item_emb = self.target_item_emb_attr(target_item)
target_cat_emb = self.target_cat_emb_attr(target_cat)
target_position_emb = self.target_position_emb_attr(target_position)
item_sequence = paddle.concat(
[hist_item_emb, hist_cat_emb, hist_position_emb], axis=2)
target_sequence = paddle.concat(
[target_item_emb, target_cat_emb, target_position_emb], axis=2)
#print(position_sequence_target.shape)
whole_embedding = paddle.concat(
[item_sequence, target_sequence], axis=1)
#print(whole_embedding)
enc_output = whole_embedding
'''
for _ in range(self.n_encoder_layers):
enc_output = self.encoder_layer(enc_output)
'''
enc_output = self.encoder_layer(enc_output)
enc_output = self.pre_post_process_layer(
enc_output, self.preprocess_cmd, self.prepostprocess_dropout)
_concat = paddle.concat([user_emb, enc_output], axis=1)
dnn_input = _concat
for n_layer in self._dnn_layers:
dnn_input = n_layer(dnn_input)
dnn_input = paddle.sum(x=dnn_input, axis=1)
return dnn_input
| 41.633188 | 83 | 0.5912 |
7d5e2794ee572ecc4d01dd5e16732f08e5e84624 | 5,765 | py | Python | setup.py | gemsouls/scalene | 15722c9c3e101bce9bc7c1fb2cd7727f0d7d7662 | [
"Apache-2.0"
] | null | null | null | setup.py | gemsouls/scalene | 15722c9c3e101bce9bc7c1fb2cd7727f0d7d7662 | [
"Apache-2.0"
] | null | null | null | setup.py | gemsouls/scalene | 15722c9c3e101bce9bc7c1fb2cd7727f0d7d7662 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
from setuptools.extension import Extension
from scalene.scalene_version import scalene_version
from os import path, environ
import sys
if sys.platform == 'darwin':
import sysconfig
mdt = 'MACOSX_DEPLOYMENT_TARGET'
target = environ[mdt] if mdt in environ else sysconfig.get_config_var(mdt)
# target >= 10.9 is required for gcc/clang to find libstdc++ headers
if [int(n) for n in target.split('.')] < [10, 9]:
from os import execve
newenv = environ.copy()
newenv[mdt] = '10.9'
execve(sys.executable, [sys.executable] + sys.argv, newenv)
def multiarch_args():
"""Returns args requesting multi-architecture support, if applicable."""
# On MacOS we build "universal2" packages, for both x86_64 and arm64/M1
if sys.platform == 'darwin':
return ['-arch', 'x86_64', '-arch', 'arm64']
return []
def extra_compile_args():
"""Returns extra compiler args for platform."""
if sys.platform == 'win32':
return ['/std:c++14'] # for Visual Studio C++
return ['-std=c++14'] + multiarch_args()
def make_command():
# return 'nmake' if sys.platform == 'win32' else 'make' # 'nmake' isn't found on github actions' VM
return 'make'
def dll_suffix():
"""Returns the file suffix ("extension") of a DLL"""
if (sys.platform == 'win32'): return '.dll'
if (sys.platform == 'darwin'): return '.dylib'
return '.so'
def read_file(name):
"""Returns a file's contents"""
with open(path.join(path.dirname(__file__), name), encoding="utf-8") as f:
return f.read()
import setuptools.command.egg_info
class EggInfoCommand(setuptools.command.egg_info.egg_info):
"""Custom command to download vendor libs before creating the egg_info."""
def run(self):
self.spawn([make_command(), 'vendor-deps'])
super().run()
import setuptools.command.build_ext
class BuildExtCommand(setuptools.command.build_ext.build_ext):
"""Custom command that runs 'make' to generate libscalene."""
def run(self):
super().run()
# No build of DLL for Windows currently.
if sys.platform != 'win32':
self.build_libscalene()
def build_libscalene(self):
scalene_temp = path.join(self.build_temp, 'scalene')
scalene_lib = path.join(self.build_lib, 'scalene')
libscalene = 'libscalene' + dll_suffix()
self.mkpath(scalene_temp)
self.mkpath(scalene_lib)
self.spawn([make_command(), 'OUTDIR=' + scalene_temp,
'ARCH=' + ' '.join(multiarch_args())])
self.copy_file(path.join(scalene_temp, libscalene),
path.join(scalene_lib, libscalene))
if self.inplace:
self.copy_file(path.join(scalene_lib, libscalene),
path.join('scalene', libscalene))
get_line_atomic = Extension('scalene.get_line_atomic',
include_dirs=['.', 'vendor/Heap-Layers', 'vendor/Heap-Layers/utility'],
sources=['src/source/get_line_atomic.cpp'],
extra_compile_args=extra_compile_args(),
extra_link_args=multiarch_args(),
py_limited_api=True, # for binary compatibility
language="c++"
)
# if TWINE_REPOSITORY=testpypi, we're testing packaging. Build using a ".devN"
# (monotonically increasing, not too big) suffix in the version number, so that
# we can upload new files (as testpypi/pypi don't allow re-uploading files with
# the same name as previously uploaded).
testing = 'TWINE_REPOSITORY' in environ and environ['TWINE_REPOSITORY'] == 'testpypi'
if testing:
import subprocess
import time
version_timestamp = int(subprocess.check_output(["git", "log", "-1", "--format=%ct",
"scalene/scalene_version.py"]))
mins_since_version = (time.time() - version_timestamp)/60
setup(
name="scalene",
version=scalene_version + (f'.dev{int(mins_since_version/5)}' if testing else ''),
description="Scalene: A high-resolution, low-overhead CPU, GPU, and memory profiler for Python",
keywords="performance memory profiler",
long_description=read_file("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/emeryberger/scalene",
author="Emery Berger",
author_email="emery@cs.umass.edu",
license="Apache License 2.0",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: IPython",
"Framework :: Jupyter",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Software Development",
"Topic :: Software Development :: Debuggers",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3"
] + (["Programming Language :: Python :: 3.7"] if sys.platform != 'win32' else []) +
[
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows :: Windows 10"
],
packages=find_packages(),
cmdclass={
'egg_info': EggInfoCommand,
'build_ext': BuildExtCommand,
},
install_requires=[
"rich>=9.2.10",
"cloudpickle>=1.5.0",
"nvidia-ml-py==11.450.51",
"numpy"
],
ext_modules=[get_line_atomic],
setup_requires=['setuptools_scm'],
include_package_data=True,
entry_points={"console_scripts": ["scalene = scalene.__main__:main"]},
python_requires=">=3.7" if sys.platform != 'win32' else ">=3.8",
)
| 39.486301 | 103 | 0.644926 |
1f2ef02debc24584ee9badc1f20840af28b26341 | 1,235 | py | Python | Bugscan_exploits-master/exp_list/exp-2629.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 11 | 2020-05-30T13:53:49.000Z | 2021-03-17T03:20:59.000Z | Bugscan_exploits-master/exp_list/exp-2629.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-13T03:25:18.000Z | 2020-07-21T06:24:16.000Z | Bugscan_exploits-master/exp_list/exp-2629.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-30T13:53:51.000Z | 2020-12-01T21:44:26.000Z | #!/usr/bin/evn python
#-*-:coding:utf-8 -*-
#Author:404
#Name:璐华通用企业版OA系统SQL注入4处(完美绕过感谢林)4
#Refer:http://www.wooyun.org/bugs/wooyun-2010-0104430
def assign(service,arg):
if service=="ruvar_oa":
return True,arg
def audit(arg):
ps=[
"OnlineChat/chat_show.aspx?id=char(71)%2Bchar(65)%2Bchar(79)%2Bchar(32)%2Bchar(74)%2Bchar(73)%2Bchar(64)%2B@@version",
"WorkFlow/wf_work_print.aspx?idlist=char(71)%2Bchar(65)%2Bchar(79)%2Bchar(32)%2Bchar(74)%2Bchar(73)%2Bchar(64)%2B@@version",
"OnlineChat/chatroom_show.aspx?id=char(71)%2Bchar(65)%2Bchar(79)%2Bchar(32)%2Bchar(74)%2Bchar(73)%2Bchar(64)%2B@@version",
"OnlineReport/get_condiction.aspx?t_id=char(71)%2Bchar(65)%2Bchar(79)%2Bchar(32)%2Bchar(74)%2Bchar(73)%2Bchar(64)%2B@@version",
]
for p in ps:
url=arg+p
code,head,res,errcode,_=curl.curl2(url)
if code==500 and "GAO JI@Microsoft" in res:
security_hole(url)
if __name__=="__main__":
from dummy import *
audit(assign('ruvar_oa','http://oa.gdjierong.com:8090/')[1])
audit(assign('ruvar_oa','http://oa.mingshiedu.com:801/')[1])
audit(assign('ruvar_oa','http://oa.pku-ioe.cn/')[1]) | 39.83871 | 136 | 0.634008 |
742a4e2f83a3c81dc674061192644b1130dd6b34 | 1,798 | py | Python | zabbix-proxy/molecule/default/tests/test_default.py | danielcastropalomares/ansible | f2e93f24841f12f3c7ec9d2e812a752000809f80 | [
"MIT"
] | null | null | null | zabbix-proxy/molecule/default/tests/test_default.py | danielcastropalomares/ansible | f2e93f24841f12f3c7ec9d2e812a752000809f80 | [
"MIT"
] | null | null | null | zabbix-proxy/molecule/default/tests/test_default.py | danielcastropalomares/ansible | f2e93f24841f12f3c7ec9d2e812a752000809f80 | [
"MIT"
] | null | null | null | import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_zabbixproxy_running_and_enabled(Service, SystemInfo):
zabbix = Service("zabbix-proxy")
# assert zabbix.is_enabled
if SystemInfo.distribution not in ['ubuntu']:
assert zabbix.is_running
@pytest.mark.parametrize("proxy", [
("zabbix-proxy-pgsql"),
("zabbix-proxy-mysql"),
])
def test_zabbix_package(Package, TestinfraBackend, proxy, SystemInfo):
host = TestinfraBackend.get_hostname()
host = host.replace("-centos", "")
host = host.replace("-debian", "")
if host == proxy:
zabbix_proxy = Package(proxy)
assert zabbix_proxy.is_installed
if SystemInfo.distribution in ['debian', 'ubuntu']:
assert zabbix_proxy.version.startswith("1:3.4")
elif SystemInfo.distribution == 'centos':
assert zabbix_proxy.version.startswith("3.4")
def test_socket(Socket):
assert Socket("tcp://0.0.0.0:10051").is_listening
def test_zabbix_proxy_dot_conf(File):
zabbix_proxy_conf = File("/etc/zabbix/zabbix_proxy.conf")
assert zabbix_proxy_conf.user == "zabbix"
assert zabbix_proxy_conf.group == "zabbix"
assert zabbix_proxy_conf.mode == 0o644
assert zabbix_proxy_conf.contains("ListenPort=10051")
assert zabbix_proxy_conf.contains("DBHost=localhost")
assert zabbix_proxy_conf.contains("DebugLevel=3")
def test_zabbix_include_dir(File):
zabbix_include_dir = File("/etc/zabbix/zabbix_proxy.conf.d")
assert zabbix_include_dir.is_directory
assert zabbix_include_dir.user == "zabbix"
assert zabbix_include_dir.group == "zabbix"
assert zabbix_include_dir.mode == 0o755
| 31.54386 | 70 | 0.722469 |
5a5fb91e4a1c825c2059594622e275f20eed09ed | 8,564 | py | Python | replicator-schema-translation/docs/conf.py | mailnavn/examples | 01932d208287c8fc2594402c844ee6b311ac6632 | [
"Apache-2.0"
] | null | null | null | replicator-schema-translation/docs/conf.py | mailnavn/examples | 01932d208287c8fc2594402c844ee6b311ac6632 | [
"Apache-2.0"
] | 1 | 2021-05-11T11:07:00.000Z | 2021-05-11T11:07:00.000Z | replicator-schema-translation/docs/conf.py | mailnavn/examples | 01932d208287c8fc2594402c844ee6b311ac6632 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# CP Demo documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 17 14:17:15 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.ifconfig', 'sphinxcontrib.httpdomain']
def setup(app):
app.add_config_value('platform_docs', True, 'env')
# Even if it has a default, these options need to be specified
platform_docs = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Schema Translation Demo'
copyright = u'2020, Confluent, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '6.1'
# The full version, including alpha/beta/rc tags.
release = '6.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
import sphinx_rtd_theme
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SchemaRegistryDoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'SchemaRegistry.tex', u'Schema Translation Demo Documentation',
u'Confluent, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'schemaregistry', u'Schema Translation Demo Documentation',
[u'Confluent, Inc.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SchemaRegistry', u'Schema Translation Demo Documentation',
u'Confluent, Inc.', 'SchemaRegistry', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.074906 | 79 | 0.71894 |
33b8a287a3d0fb95f16a6582da1ca4310ae65948 | 22,419 | py | Python | scripts/reader/train.py | tyzoh/DrQA-SHINRA | b9bac9e2c4b1d4481c4ecad8ac11d8ae5354269f | [
"BSD-3-Clause"
] | 5 | 2019-06-24T06:35:54.000Z | 2021-06-07T04:38:38.000Z | scripts/reader/train.py | tyzoh/DrQA-SHINRA | b9bac9e2c4b1d4481c4ecad8ac11d8ae5354269f | [
"BSD-3-Clause"
] | 1 | 2020-06-02T08:15:49.000Z | 2020-07-27T07:05:49.000Z | scripts/reader/train.py | tyzoh/DrQA-SHINRA | b9bac9e2c4b1d4481c4ecad8ac11d8ae5354269f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
# Copyright 2019 Nihon Unisys, Ltd.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Main DrQA reader training script."""
import argparse
import torch
import numpy as np
import json
import os
import sys
import subprocess
import logging
from drqa.reader import utils, vector, config, data
from drqa.reader import DocReader
from drqa import DATA_DIR as DRQA_DATA
logger = logging.getLogger()
# ------------------------------------------------------------------------------
# Training arguments.
# ------------------------------------------------------------------------------
# Defaults
DATA_DIR = os.path.join(DRQA_DATA, 'datasets')
MODEL_DIR = '/tmp/drqa-models/'
EMBED_DIR = os.path.join(DRQA_DATA, 'embeddings')
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1', 'y')
def add_train_args(parser):
"""Adds commandline arguments pertaining to training a model. These
are different from the arguments dictating the model architecture.
"""
parser.register('type', 'bool', str2bool)
# Runtime environment
runtime = parser.add_argument_group('Environment')
runtime.add_argument('--no-cuda', type='bool', default=False,
help='Train on CPU, even if GPUs are available.')
runtime.add_argument('--gpu', type=int, default=-1,
help='Run on a specific GPU')
runtime.add_argument('--data-workers', type=int, default=5,
help='Number of subprocesses for data loading')
runtime.add_argument('--parallel', type='bool', default=False,
help='Use DataParallel on all available GPUs')
runtime.add_argument('--random-seed', type=int, default=1013,
help=('Random seed for all numpy/torch/cuda '
'operations (for reproducibility)'))
runtime.add_argument('--num-epochs', type=int, default=40,
help='Train data iterations')
runtime.add_argument('--batch-size', type=int, default=32,
help='Batch size for training')
runtime.add_argument('--test-batch-size', type=int, default=128,
help='Batch size during validation/testing')
# Files
files = parser.add_argument_group('Filesystem')
files.add_argument('--model-dir', type=str, default=MODEL_DIR,
help='Directory for saved models/checkpoints/logs')
files.add_argument('--model-name', type=str, default='',
help='Unique model identifier (.mdl, .txt, .checkpoint)')
files.add_argument('--data-dir', type=str, default=DATA_DIR,
help='Directory of training/validation data')
files.add_argument('--train-file', type=str,
default='SQuAD-v1.1-train-processed-corenlp.txt',
help='Preprocessed train file')
files.add_argument('--dev-file', type=str,
default='SQuAD-v1.1-dev-processed-corenlp.txt',
help='Preprocessed dev file')
files.add_argument('--dev-json', type=str, default='SQuAD-v1.1-dev.json',
help=('Unprocessed dev file to run validation '
'while training on'))
files.add_argument('--embed-dir', type=str, default=EMBED_DIR,
help='Directory of pre-trained embedding files')
files.add_argument('--embedding-file', type=str,
default='glove.840B.300d.txt',
help='Space-separated pretrained embeddings file')
# Saving + loading
save_load = parser.add_argument_group('Saving/Loading')
save_load.add_argument('--checkpoint', type='bool', default=False,
help='Save model + optimizer state after each epoch')
save_load.add_argument('--pretrained', type=str, default='',
help='Path to a pretrained model to warm-start with')
save_load.add_argument('--expand-dictionary', type='bool', default=False,
help='Expand dictionary of pretrained model to ' +
'include training/dev words of new data')
# Data preprocessing
preprocess = parser.add_argument_group('Preprocessing')
preprocess.add_argument('--uncased-question', type='bool', default=False,
help='Question words will be lower-cased')
preprocess.add_argument('--uncased-doc', type='bool', default=False,
help='Document words will be lower-cased')
preprocess.add_argument('--restrict-vocab', type='bool', default=True,
help='Only use pre-trained words in embedding_file')
# General
general = parser.add_argument_group('General')
general.add_argument('--official-eval', type='bool', default=True,
help='Validate with official SQuAD eval')
general.add_argument('--valid-metric', type=str, default='f1',
help='The evaluation metric used for model selection')
general.add_argument('--display-iter', type=int, default=25,
help='Log state after every <display_iter> epochs')
general.add_argument('--sort-by-len', type='bool', default=True,
help='Sort batches by length for speed')
general.add_argument('--shinra-eval', type='bool', default=False,
help='Validate for SHINRA dataset')
def set_defaults(args):
"""Make sure the commandline arguments are initialized properly."""
# Check critical files exist
args.dev_json = os.path.join(args.data_dir, args.dev_json)
if not os.path.isfile(args.dev_json):
raise IOError('No such file: %s' % args.dev_json)
args.train_file = os.path.join(args.data_dir, args.train_file)
if not os.path.isfile(args.train_file):
raise IOError('No such file: %s' % args.train_file)
args.dev_file = os.path.join(args.data_dir, args.dev_file)
if not os.path.isfile(args.dev_file):
raise IOError('No such file: %s' % args.dev_file)
if args.embedding_file:
args.embedding_file = os.path.join(args.embed_dir, args.embedding_file)
if not os.path.isfile(args.embedding_file):
raise IOError('No such file: %s' % args.embedding_file)
# Set model directory
subprocess.call(['mkdir', '-p', args.model_dir])
# Set model name
if not args.model_name:
import uuid
import time
args.model_name = time.strftime("%Y%m%d-") + str(uuid.uuid4())[:8]
# Set log + model file names
args.log_file = os.path.join(args.model_dir, args.model_name + '.txt')
args.model_file = os.path.join(args.model_dir, args.model_name + '.mdl')
# Embeddings options
if args.embedding_file:
with open(args.embedding_file) as f:
line = f.readline().rstrip().split(' ')
if len(line) == 2:
dim = int(line[1])
else:
dim = len(f.readline().rstrip().split(' ')) - 1
args.embedding_dim = dim
elif not args.embedding_dim:
raise RuntimeError('Either embedding_file or embedding_dim '
'needs to be specified.')
# Make sure tune_partial and fix_embeddings are consistent.
if args.tune_partial > 0 and args.fix_embeddings:
logger.warning('WARN: fix_embeddings set to False as tune_partial > 0.')
args.fix_embeddings = False
# Make sure fix_embeddings and embedding_file are consistent
if args.fix_embeddings:
if not (args.embedding_file or args.pretrained):
logger.warning('WARN: fix_embeddings set to False '
'as embeddings are random.')
args.fix_embeddings = False
return args
# ------------------------------------------------------------------------------
# Initalization from scratch.
# ------------------------------------------------------------------------------
def init_from_scratch(args, train_exs, dev_exs):
"""New model, new data, new dictionary."""
# Create a feature dict out of the annotations in the data
logger.info('-' * 100)
logger.info('Generate features')
feature_dict = utils.build_feature_dict(args, train_exs)
logger.info('Num features = %d' % len(feature_dict))
logger.info(feature_dict)
# Build a dictionary from the data questions + words (train/dev splits)
logger.info('-' * 100)
logger.info('Build dictionary')
word_dict = utils.build_word_dict(args, train_exs + dev_exs)
logger.info('Num words = %d' % len(word_dict))
# Initialize model
model = DocReader(config.get_model_args(args), word_dict, feature_dict)
# Load pretrained embeddings for words in dictionary
if args.embedding_file:
model.load_embeddings(word_dict.tokens(), args.embedding_file)
return model
# ------------------------------------------------------------------------------
# Train loop.
# ------------------------------------------------------------------------------
def train(args, data_loader, model, global_stats):
"""Run through one epoch of model training with the provided data loader."""
# Initialize meters + timers
train_loss = utils.AverageMeter()
epoch_time = utils.Timer()
# Run one epoch
for idx, ex in enumerate(data_loader):
train_loss.update(*model.update(ex))
if idx % args.display_iter == 0:
logger.info('train: Epoch = %d | iter = %d/%d | ' %
(global_stats['epoch'], idx, len(data_loader)) +
'loss = %.6f | elapsed time = %.2f (s)' %
(train_loss.avg, global_stats['timer'].time()))
train_loss.reset()
logger.info('train: Epoch %d done. Time for epoch = %.2f (s)' %
(global_stats['epoch'], epoch_time.time()))
# Checkpoint
if args.checkpoint:
model.checkpoint(args.model_file + '.checkpoint',
global_stats['epoch'] + 1)
# ------------------------------------------------------------------------------
# Validation loops. Includes both "unofficial" and "official" functions that
# use different metrics and implementations.
# ------------------------------------------------------------------------------
def validate_unofficial(args, data_loader, model, global_stats, mode):
"""Run one full unofficial validation.
Unofficial = doesn't use SQuAD script.
"""
eval_time = utils.Timer()
start_acc = utils.AverageMeter()
end_acc = utils.AverageMeter()
exact_match = utils.AverageMeter()
# Make predictions
examples = 0
for ex in data_loader:
batch_size = ex[0].size(0)
pred_s, pred_e, _ = model.predict(ex)
target_s, target_e = ex[-3:-1]
# We get metrics for independent start/end and joint start/end
accuracies = eval_accuracies(pred_s, target_s, pred_e, target_e)
start_acc.update(accuracies[0], batch_size)
end_acc.update(accuracies[1], batch_size)
exact_match.update(accuracies[2], batch_size)
# If getting train accuracies, sample max 10k
examples += batch_size
if mode == 'train' and examples >= 1e4:
break
logger.info('%s valid unofficial: Epoch = %d | start = %.2f | ' %
(mode, global_stats['epoch'], start_acc.avg) +
'end = %.2f | exact = %.2f | examples = %d | ' %
(end_acc.avg, exact_match.avg, examples) +
'valid time = %.2f (s)' % eval_time.time())
return {'exact_match': exact_match.avg}
def validate_official(args, data_loader, model, global_stats,
offsets, texts, answers):
"""Run one full official validation. Uses exact spans and same
exact match/F1 score computation as in the SQuAD script.
Extra arguments:
offsets: The character start/end indices for the tokens in each context.
texts: Map of qid --> raw text of examples context (matches offsets).
answers: Map of qid --> list of accepted answers.
"""
eval_time = utils.Timer()
f1 = utils.AverageMeter()
exact_match = utils.AverageMeter()
# Run through examples
examples = 0
for ex in data_loader:
ex_id, batch_size = ex[-1], ex[0].size(0)
pred_s, pred_e, _ = model.predict(ex)
for i in range(batch_size):
s_offset = offsets[ex_id[i]][pred_s[i][0]][0]
e_offset = offsets[ex_id[i]][pred_e[i][0]][1]
prediction = texts[ex_id[i]][s_offset:e_offset]
# Compute metrics
ground_truths = answers[ex_id[i]]
exact_match.update(utils.metric_max_over_ground_truths(
utils.exact_match_score, prediction, ground_truths))
f1.update(utils.metric_max_over_ground_truths(
utils.f1_score, prediction, ground_truths))
examples += batch_size
logger.info('dev valid official: Epoch = %d | EM = %.2f | ' %
(global_stats['epoch'], exact_match.avg * 100) +
'F1 = %.2f | examples = %d | valid time = %.2f (s)' %
(f1.avg * 100, examples, eval_time.time()))
return {'exact_match': exact_match.avg * 100, 'f1': f1.avg * 100}
def eval_accuracies(pred_s, target_s, pred_e, target_e):
"""An unofficial evalutation helper.
Compute exact start/end/complete match accuracies for a batch.
"""
# Convert 1D tensors to lists of lists (compatibility)
if torch.is_tensor(target_s):
target_s = [[e.item()] for e in target_s]
target_e = [[e.item()] for e in target_e]
# Compute accuracies from targets
batch_size = len(pred_s)
start = utils.AverageMeter()
end = utils.AverageMeter()
em = utils.AverageMeter()
for i in range(batch_size):
# Start matches
if pred_s[i] in target_s[i]:
start.update(1)
else:
start.update(0)
# End matches
if pred_e[i] in target_e[i]:
end.update(1)
else:
end.update(0)
# Both start and end match
if any([1 for _s, _e in zip(target_s[i], target_e[i])
if _s == pred_s[i] and _e == pred_e[i]]):
em.update(1)
else:
em.update(0)
return start.avg * 100, end.avg * 100, em.avg * 100
# ------------------------------------------------------------------------------
# Main.
# ------------------------------------------------------------------------------
def main(args):
# --------------------------------------------------------------------------
# DATA
logger.info('-' * 100)
logger.info('Load data files')
if args.multiple_answer:
train_exs = utils.load_data(args, args.train_file, skip_no_answer=False)
else:
train_exs = utils.load_data(args, args.train_file, skip_no_answer=True)
logger.info('Num train examples = %d' % len(train_exs))
dev_exs = utils.load_data(args, args.dev_file)
logger.info('Num dev examples = %d' % len(dev_exs))
# If we are doing offician evals then we need to:
# 1) Load the original text to retrieve spans from offsets.
# 2) Load the (multiple) text answers for each question.
if args.official_eval:
dev_texts = utils.load_text(args.dev_json)
dev_offsets = {ex['id']: ex['offsets'] for ex in dev_exs}
dev_answers = utils.load_answers(args.dev_json)
if args.shinra_eval:
import validate_shinra
# --------------------------------------------------------------------------
# MODEL
logger.info('-' * 100)
start_epoch = 0
if args.checkpoint and os.path.isfile(args.model_file + '.checkpoint'):
# Just resume training, no modifications.
logger.info('Found a checkpoint...')
checkpoint_file = args.model_file + '.checkpoint'
model, start_epoch = DocReader.load_checkpoint(checkpoint_file, args)
else:
# Training starts fresh. But the model state is either pretrained or
# newly (randomly) initialized.
if args.pretrained:
logger.info('Using pretrained model...')
model = DocReader.load(args.pretrained, args)
if args.expand_dictionary:
logger.info('Expanding dictionary for new data...')
# Add words in training + dev examples
words = utils.load_words(args, train_exs + dev_exs)
added = model.expand_dictionary(words)
# Load pretrained embeddings for added words
if args.embedding_file:
model.load_embeddings(added, args.embedding_file)
else:
logger.info('Training model from scratch...')
model = init_from_scratch(args, train_exs, dev_exs)
# Set up partial tuning of embeddings
if args.tune_partial > 0:
logger.info('-' * 100)
logger.info('Counting %d most frequent question words' %
args.tune_partial)
top_words = utils.top_question_words(
args, train_exs, model.word_dict
)
for word in top_words[:5]:
logger.info(word)
logger.info('...')
for word in top_words[-6:-1]:
logger.info(word)
model.tune_embeddings([w[0] for w in top_words])
# Set up optimizer
model.init_optimizer()
# Use the GPU?
if args.cuda:
model.cuda()
# Use multiple GPUs?
if args.parallel:
model.parallelize()
# --------------------------------------------------------------------------
# DATA ITERATORS
# Two datasets: train and dev. If we sort by length it's faster.
logger.info('-' * 100)
logger.info('Make data loaders')
if args.multiple_answer:
train_dataset = data.ReaderDataset(train_exs, model, single_answer=False)
else:
train_dataset = data.ReaderDataset(train_exs, model, single_answer=True)
if args.sort_by_len:
train_sampler = data.SortedBatchSampler(train_dataset.lengths(),
args.batch_size,
shuffle=True)
else:
train_sampler = torch.utils.data.sampler.RandomSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
sampler=train_sampler,
num_workers=args.data_workers,
collate_fn=vector.batchify,
pin_memory=args.cuda,
)
dev_dataset = data.ReaderDataset(dev_exs, model, single_answer=False)
if args.sort_by_len:
dev_sampler = data.SortedBatchSampler(dev_dataset.lengths(),
args.test_batch_size,
shuffle=False)
else:
dev_sampler = torch.utils.data.sampler.SequentialSampler(dev_dataset)
dev_loader = torch.utils.data.DataLoader(
dev_dataset,
batch_size=args.test_batch_size,
sampler=dev_sampler,
num_workers=args.data_workers,
collate_fn=vector.batchify,
pin_memory=args.cuda,
)
# -------------------------------------------------------------------------
# PRINT CONFIG
logger.info('-' * 100)
logger.info('CONFIG:\n%s' %
json.dumps(vars(args), indent=4, sort_keys=True))
# --------------------------------------------------------------------------
# TRAIN/VALID LOOP
logger.info('-' * 100)
logger.info('Starting training...')
stats = {'timer': utils.Timer(), 'epoch': 0, 'best_valid': 0}
for epoch in range(start_epoch, args.num_epochs):
stats['epoch'] = epoch
# Train
train(args, train_loader, model, stats)
if args.shinra_eval:
validate_shinra.validate(args, train_loader, model, stats, mode='train')
result = validate_shinra.validate(args, dev_loader, model, stats, mode='dev')
else:
# Validate unofficial (train)
validate_unofficial(args, train_loader, model, stats, mode='train')
# Validate unofficial (dev)
result = validate_unofficial(args, dev_loader, model, stats, mode='dev')
# Validate official
if args.official_eval:
result = validate_official(args, dev_loader, model, stats,
dev_offsets, dev_texts, dev_answers)
# Save best valid
if result[args.valid_metric] > stats['best_valid']:
logger.info('Best valid: %s = %.2f (epoch %d, %d updates)' %
(args.valid_metric, result[args.valid_metric],
stats['epoch'], model.updates))
model.save(args.model_file)
stats['best_valid'] = result[args.valid_metric]
if __name__ == '__main__':
# Parse cmdline args and setup environment
parser = argparse.ArgumentParser(
'DrQA Document Reader',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
add_train_args(parser)
config.add_model_args(parser)
args = parser.parse_args()
set_defaults(args)
# Set cuda
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
# Set random state
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
if args.cuda:
torch.cuda.manual_seed(args.random_seed)
# Set logging
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s: [ %(message)s ]',
'%m/%d/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
if args.log_file:
if args.checkpoint:
logfile = logging.FileHandler(args.log_file, 'a')
else:
logfile = logging.FileHandler(args.log_file, 'w')
logfile.setFormatter(fmt)
logger.addHandler(logfile)
logger.info('COMMAND: %s' % ' '.join(sys.argv))
# Run!
main(args)
| 39.891459 | 89 | 0.579062 |
942dbaff2c5f82057471a36878dbecf2f128b5b2 | 2,285 | py | Python | shelf/shelf/model.py | bitsandsalsa/shelf | b39ab893ccb42890121ce6820934e3e1e80e83d7 | [
"BSD-3-Clause"
] | null | null | null | shelf/shelf/model.py | bitsandsalsa/shelf | b39ab893ccb42890121ce6820934e3e1e80e83d7 | [
"BSD-3-Clause"
] | null | null | null | shelf/shelf/model.py | bitsandsalsa/shelf | b39ab893ccb42890121ce6820934e3e1e80e83d7 | [
"BSD-3-Clause"
] | null | null | null | from flask_sqlalchemy import SQLAlchemy
from shelf import app
db = SQLAlchemy(app)
def init_db():
"""Initializes the database."""
db.create_all()
db.session.add_all([
Status('done'),
Status('pending'),
Status('unread'),
Format('conference paper'),
Format('conference presentation'),
Format('vendor whitepaper'),
])
db.session.commit()
class Status(db.Model):
id = db.Column(db.Integer, primary_key=True)
status = db.Column(db.Text, nullable=False)
def __init__(self, status):
self.status = status
def __repr__(self):
return '<Status "{}">'.format(self.status)
class Format(db.Model):
id = db.Column(db.Integer, primary_key=True)
format = db.Column(db.Text, nullable=False)
def __init__(self, format):
self.format = format
def __repr__(self):
return '<Format "{}">'.format(self.format)
class Document(db.Model):
id = db.Column(db.Integer, primary_key=True)
document_id = db.Column(db.Text, nullable=False, unique=True)
fs_name = db.Column(db.Text, nullable=False)
friendly_name = db.Column(db.Text)
def __init__(self, document_id, fs_name, friendly_name):
self.document_id = document_id
self.fs_name = fs_name
self.friendly_name = friendly_name
def __repr__(self):
return '<Document (document_id="{}", fs_name="{}", friendly_name="{}")>'.format(
self.document_id,
self.fs_name,
self.friendly_name
)
class Entry(db.Model):
id = db.Column(db.Integer, primary_key=True)
status_id = db.Column(db.Integer, db.ForeignKey('status.id'))
status = db.relationship('Status')
format_id = db.Column(db.Integer, db.ForeignKey('format.id'))
format = db.relationship('Format')
citation = db.Column(db.Text, nullable=False)
document_id = db.Column(db.Integer, db.ForeignKey('document.document_id'))
document = db.relationship('Document')
summary = db.Column(db.Text)
def __init__(self, status_id, format_id, citation, document_id, summary):
self.status_id = status_id
self.format_id = format_id
self.citation = citation
self.document_id = document_id
self.summary = summary
| 29.294872 | 88 | 0.647265 |
76690576c0a4c8334a2286961461d784413092bd | 10,162 | py | Python | src/complementary_filter.py | 30sectomars/psas_testbot | 06954927c1d11be2e49359515c0b8f57f6960fd5 | [
"MIT"
] | 1 | 2020-02-26T07:29:17.000Z | 2020-02-26T07:29:17.000Z | src/complementary_filter.py | 30sectomars/psas_testbot | 06954927c1d11be2e49359515c0b8f57f6960fd5 | [
"MIT"
] | null | null | null | src/complementary_filter.py | 30sectomars/psas_testbot | 06954927c1d11be2e49359515c0b8f57f6960fd5 | [
"MIT"
] | 1 | 2020-02-26T07:25:46.000Z | 2020-02-26T07:25:46.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 2 08:20:41 2018
@author: nschweizer
"""
# Python libs
import math
import numpy as np
from quaternion import quaternion
# Ros libraries
import rospy
# Ros Messages
from sensor_msgs.msg import Imu
from std_msgs.msg import Float64
from std_msgs.msg import Float32MultiArray
if rospy.has_param('/use_simulation'):
SIMULATION = rospy.get_param('/use_simulation')
else:
SIMULATION = False
class Filter:
def __init__(self):
if SIMULATION:
self.sub = rospy.Subscriber('/testbot/imu', Imu, self.imu_callback)
else:
self.sub = rospy.Subscriber('/testbot/imu', Float32MultiArray, self.imu_callback)
self.pub_roll = rospy.Publisher('/roll', Float64, queue_size=10)
self.pub_pitch = rospy.Publisher('/pitch', Float64, queue_size=10)
self.pub_yaw = rospy.Publisher('/yaw', Float64, queue_size=10)
self.pub_setpoint = rospy.Publisher('/setpoint', Float64, queue_size=10)
self.pub_imu = rospy.Publisher('/imu', Imu, queue_size=10)
self.pub_delta = rospy.Publisher('/testbot/delta1', Float64, queue_size=10)
self.accel_x = 0.0
self.accel_y = 0.0
self.accel_z = 0.0
self.gyro_x = 0.0
self.gyro_y = 0.0
self.gyro_z = 0.0
self.q = quaternion(1, 0, 0, 0)
self.gamma_accel = 0.002 # Static Weighting Accelerometer
self.time_old = rospy.get_rostime()
def imu_callback(self, msg):
if SIMULATION:
self.gyro_x = msg.angular_velocity.x # rad/s
self.gyro_y = -msg.angular_velocity.y # Achsen: -
self.gyro_z = -msg.angular_velocity.z # Achsen: -
self.accel_x = msg.linear_acceleration.x # m/s²
self.accel_y = -msg.linear_acceleration.y # Achsen: -
self.accel_z = -msg.linear_acceleration.z # Achsen: -
else:
self.gyro_x = msg.data[0]
self.gyro_y = msg.data[1]
self.gyro_z = msg.data[2]
self.accel_x = msg.data[3]
self.accel_y = msg.data[4]
self.accel_z = msg.data[5]
w_x = self.gyro_x
w_y = self.gyro_y
w_z = self.gyro_z
time_new = rospy.get_rostime()
dt = (time_new - self.time_old).to_sec()
self.time_old = time_new
self.integration_gyro(dt,w_x,w_y,w_z)
self.accelerometer()
(roll, pitch, yaw) = self.euler_from_quat()
self.pub_roll.publish(roll)
self.pub_pitch.publish(pitch)
self.pub_yaw.publish(yaw)
self.pub_setpoint.publish(0.0)
imu = Imu()
imu.header.frame_id = "imu_frame"
imu.orientation.x = self.q.x
imu.orientation.y = self.q.y
imu.orientation.z = self.q.z
imu.orientation.w = self.q.w
imu.angular_velocity.x = self.gyro_x
imu.angular_velocity.y = self.gyro_y
imu.angular_velocity.z = self.gyro_z
imu.linear_acceleration.x = self.accel_x
imu.linear_acceleration.y = self.accel_y
imu.linear_acceleration.z = self.accel_z
#rospy.loginfo("lin_accel_x = %f", self.accel_x)
#rospy.loginfo("lin_accel_y = %f", self.accel_y)
#rospy.loginfo("lin_accel_z = %f", self.accel_z)
self.pub_imu.publish(imu)
#rospy.loginfo(np.arcsin(self.accel_y/9.81))
# delta = -5.0*roll - 0.0*math.sin(pitch)/roll
# self.pub_delta.publish(delta)
def integration_gyro(self, dt, w_x, w_y, w_z):
omega_norm = math.sqrt(w_x*w_x + w_y*w_y + w_z*w_z)
beta = math.sin(omega_norm*dt/2.0)
alpha = math.cos(omega_norm*dt/2.0)
# SAFE DIVISION
if omega_norm >= 0.001:
beta = beta / omega_norm
else:
beta = dt / 2.0
# Hilfsgrößen
beta1 = beta * w_x
beta2 = beta * w_y
beta3 = beta * w_z
# Eigentlicher Integrationsschritt
self.q.w = alpha*self.q.w - beta1*self.q.x - beta2*self.q.y - beta3*self.q.z
self.q.x = beta1*self.q.w + alpha*self.q.x + beta3*self.q.y - beta2*self.q.z
self.q.y = beta2*self.q.w - beta3*self.q.x + alpha*self.q.y + beta1*self.q.z
self.q.z = beta3*self.q.w + beta2*self.q.x - beta1*self.q.y + alpha*self.q.z
# Normalisieren
q_norm_inv = 1.0 / (math.sqrt(self.q.w*self.q.w + self.q.x*self.q.x + self.q.y*self.q.y + self.q.z*self.q.z))
self.q = self.q * q_norm_inv
# Check to
if self.q.w<0:
self.q = -self.q
def accelerometer(self):
# Norm of external Acceleration
acc_norm = math.sqrt(self.accel_x*self.accel_x + self.accel_y*self.accel_y + self.accel_z*self.accel_z)
# Dynamic correction
gamma_accel_cor = 1.0
# Nur berechnen, wenn
if acc_norm>=8:
# Richtung der Beschleuingung - Annahme: acc=-g
acc_normalized_x = -self.accel_x / acc_norm
acc_normalized_y = -self.accel_y / acc_norm
acc_normalized_z = -self.accel_z / acc_norm
# Calculate procentual magnitude error -> external acceleration?
magnitude_error_acc = math.fabs(acc_norm-9.81)
magnitude_error_acc = magnitude_error_acc / 9.81
# Check for external acceleration and calc dynamic alpha modification
if magnitude_error_acc<=0.1:
gamma_accel_cor = 1.0
elif magnitude_error_acc>=0.2:
gamma_accel_cor = 0.0
else:
gamma_accel_cor = 2.0 - 10.0*magnitude_error_acc
gamma_accel_cor = gamma_accel_cor*self.gamma_accel
# Expected acceleration due to current attitude estimate
g_exp_x = 2.0*(self.q.x * self.q.z - self.q.w * self.q.y)
g_exp_y = 2.0*(self.q.y * self.q.z + self.q.w * self.q.x)
g_exp_z = self.q.w*self.q.w - self.q.x*self.q.x - self.q.y*self.q.y + self.q.z*self.q.z
# Hilfsgrößen -> middle between g_exp and g_meas
v_mid_x = g_exp_x + acc_normalized_x
v_mid_y = g_exp_y + acc_normalized_y
v_mid_z = g_exp_z + acc_normalized_z
# Normalisieren
v_mid_Inorm = 1.0 / (v_mid_x*v_mid_x + v_mid_y*v_mid_y + v_mid_z*v_mid_z)
if not (math.isnan(v_mid_Inorm) or math.isinf(v_mid_Inorm)):
# Normalize middle vector
v_mid_x *= v_mid_Inorm
v_mid_y *= v_mid_Inorm
v_mid_z *= v_mid_Inorm
q_delta_accel = np.quaternion(1, 0, 0, 0)
q_delta_accel.w = g_exp_x*v_mid_x + g_exp_y*v_mid_y + g_exp_z*v_mid_z
q_delta_accel.x = -(g_exp_y*v_mid_z - v_mid_y*g_exp_z)
q_delta_accel.y = -(g_exp_z*v_mid_x - v_mid_z*g_exp_x)
q_delta_accel.z = -(g_exp_x*v_mid_y - v_mid_x*g_exp_y)
q_final_accel = np.quaternion(1, 0, 0, 0)
q_final_accel.w = self.q.w*q_delta_accel.w - self.q.x*q_delta_accel.x - self.q.y*q_delta_accel.y - self.q.z*q_delta_accel.z
q_final_accel.x = self.q.w*q_delta_accel.x + self.q.x*q_delta_accel.w + self.q.y*q_delta_accel.z - self.q.z*q_delta_accel.y
q_final_accel.y = self.q.w*q_delta_accel.y - self.q.x*q_delta_accel.z + self.q.y*q_delta_accel.w + self.q.z*q_delta_accel.x
q_final_accel.z = self.q.w*q_delta_accel.z + self.q.x*q_delta_accel.y - self.q.y*q_delta_accel.x + self.q.z*q_delta_accel.w
# Protect from Unwinding
if q_final_accel.w<0:
q_final_accel = -q_final_accel
if math.fabs(q_delta_accel.w)>0.95:
# LERP
self.q = self.q*(1.0-gamma_accel_cor) + q_final_accel*gamma_accel_cor
else:
# SLERP
beta = math.acos(q_delta_accel.w)
beta1 = 1.0 / beta
beta2 = beta1*math.sin(beta*(1.0-gamma_accel_cor))
beta3 = beta1+math.sin(beta*gamma_accel_cor)
self.q = self.q*beta2 + q_final_accel*beta3
# Re-Normailze Quaternion
q_norm_inv = 1.0 / math.sqrt(self.q.w*self.q.w + self.q.x*self.q.x + self.q.y*self.q.y + self.q.z*self.q.z)
self.q = self.q*q_norm_inv
# Protect from Unwinding
if self.q.w<0:
self.q = -self.q
else:
# Magnitude of Acceleration not in the correct range for attitude correction
# Setzen damit BIAS-Schätzung nicht beeinflusst wird.
# Sensorfehler oder freier Fall
q_delta_accel = np.quaternion(1, 0, 0, 0)
q_final_accel = self.q
gamma_accel_cor = 0
def euler_from_quat(self):
roll = math.atan2(2.0*self.q.y*self.q.z + 2.0*self.q.w*self.q.x , self.q.z*self.q.z - self.q.y*self.q.y - self.q.x*self.q.x + self.q.w*self.q.w)
pitch = -math.asin(2.0*self.q.x*self.q.z - 2.0*self.q.w*self.q.y)
yaw = math.atan2(2.0*self.q.x*self.q.y + 2.0*self.q.w*self.q.z , self.q.x*self.q.x + self.q.w*self.q.w - self.q.z*self.q.z - self.q.y*self.q.y)
return (roll, pitch, yaw)
if __name__ == '__main__':
try:
rospy.init_node('complementary_filter')
filter = Filter()
rospy.spin()
except rospy.ROSInterruptException: pass
| 38.059925 | 152 | 0.546841 |
e0c3e77355fc87d38fa7fa2bfe3a4c0f8f5442a7 | 128 | py | Python | thinkpad_tools_assets/__main__.py | devksingh4/thinkpad-tools | 437585f8fbff3ef1deeb75f5de42c83ee8c6d984 | [
"BSD-3-Clause"
] | 207 | 2019-04-15T01:28:10.000Z | 2022-02-08T13:25:52.000Z | thinkpad_tools_assets/__main__.py | devksingh4/thinkpad-tools | 437585f8fbff3ef1deeb75f5de42c83ee8c6d984 | [
"BSD-3-Clause"
] | 11 | 2019-04-15T17:35:20.000Z | 2020-07-30T16:16:44.000Z | thinkpad_tools_assets/__main__.py | devksingh4/thinkpad-tools | 437585f8fbff3ef1deeb75f5de42c83ee8c6d984 | [
"BSD-3-Clause"
] | 7 | 2019-04-15T04:07:49.000Z | 2020-05-24T04:17:28.000Z | # __main__.py
import sys
from .cmd import commandline_parser
if __name__ == '__main__':
commandline_parser(sys.argv[1:])
| 14.222222 | 36 | 0.734375 |
6e4cf91377befb098b2b3ed07cb827f8ef4fc205 | 2,576 | py | Python | donkeycar/parts/cv.py | danielhj07/donkey | 54bf793b6f112c0bf38ddfefc53b007790593157 | [
"MIT"
] | 7 | 2018-01-17T15:39:58.000Z | 2020-01-23T10:13:55.000Z | donkeycar/parts/cv.py | danielhj07/donkey | 54bf793b6f112c0bf38ddfefc53b007790593157 | [
"MIT"
] | 1 | 2018-11-25T01:07:28.000Z | 2018-11-26T14:29:57.000Z | donkeycar/parts/cv.py | danielhj07/donkey | 54bf793b6f112c0bf38ddfefc53b007790593157 | [
"MIT"
] | 9 | 2018-01-15T13:09:51.000Z | 2020-12-02T07:46:53.000Z | import cv2
import numpy as np
class ImgGreyscale():
def run(self, img_arr):
img_arr = cv2.cvtColor(img_arr, cv2.COLOR_RGB2GRAY)
return img_arr
class ImgCanny():
def __init__(self, low_threshold=60, high_threshold=110):
self.low_threshold = low_threshold
self.high_threshold = high_threshold
def run(self, img_arr):
return cv2.Canny(img_arr,
self.low_threshold,
self.high_threshold)
class ImgGaussianBlur():
def __init__(self, kernal_size=5):
self.kernal_size = kernal_size
def run(self, img_arr):
return cv2.GaussianBlur(img_arr,
(self.kernel_size, self.kernel_size), 0)
class ImgCrop:
"""
Crop an image to an area of interest.
"""
def __init__(self, top=0, bottom=0, left=0, right=0):
self.top = top
self.bottom = bottom
self.left = left
self.right = right
def run(self, img_arr):
width, height, _ = img_arr.shape
img_arr = img_arr[self.top:height-self.bottom,
self.left: width-self.right]
return img_arr
class ImgStack:
"""
Stack N previous images into a single N channel image, after converting each to grayscale.
The most recent image is the last channel, and pushes previous images towards the front.
"""
def __init__(self, num_channels=3):
self.img_arr = None
self.num_channels = num_channels
def rgb2gray(self, rgb):
'''
take a numpy rgb image return a new single channel image converted to greyscale
'''
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
def run(self, img_arr):
width, height, _ = img_arr.shape
gray = self.rgb2gray(img_arr)
if self.img_arr is None:
self.img_arr = np.zeros([width, height, self.num_channels], dtype=np.dtype('B'))
for ch in range(self.num_channels - 1):
self.img_arr[...,ch] = self.img_arr[...,ch+1]
self.img_arr[...,self.num_channels - 1:] = np.reshape(gray, (width, height, 1))
return self.img_arr
class Pipeline():
def __init__(self, steps):
self.steps = steps
def run(self, val):
for step in self.steps:
f = step['f']
args = step['args']
kwargs = step['kwargs']
val = f(val, *args, **kwargs)
return val
| 26.285714 | 94 | 0.561335 |
3287d78ad97674c6e864b854068ccbcb7c2899c2 | 1,859 | py | Python | machine_learning/methods/k_nearest_neighbors/kdtree.py | thundergolfer/uni | e604d1edd8e5085f0ae1c0211015db38c07fc926 | [
"MIT"
] | 1 | 2022-01-06T04:50:09.000Z | 2022-01-06T04:50:09.000Z | machine_learning/methods/k_nearest_neighbors/kdtree.py | thundergolfer/uni | e604d1edd8e5085f0ae1c0211015db38c07fc926 | [
"MIT"
] | 1 | 2022-01-23T06:09:21.000Z | 2022-01-23T06:14:17.000Z | machine_learning/methods/k_nearest_neighbors/kdtree.py | thundergolfer/uni | e604d1edd8e5085f0ae1c0211015db38c07fc926 | [
"MIT"
] | null | null | null | import operator
import pprint
import unittest
from typing import Optional, NamedTuple, Protocol, Tuple
Vector = Tuple[int, ...]
# Somewhat strange technique to handle circular references in types.
# See: https://www.youtube.com/watch?v=QjFChmQHJxk.
class _BinaryTreeNode(Protocol):
@property
def location(self) -> Vector:
...
@property
def left(self) -> Optional["_BinaryTreeNode"]:
...
@property
def right(self) -> Optional["_BinaryTreeNode"]:
...
class BinaryTreeNode(NamedTuple):
"""
A Binary Tree (BT) with a node value, and left- and
right-subtrees.
"""
location: Vector
left: Optional[_BinaryTreeNode]
right: Optional[_BinaryTreeNode]
def __repr__(self):
return pprint.pformat(tuple(self))
def kdtree(points, depth: int = 0) -> Optional[BinaryTreeNode]:
"""
Construct a k-d tree from an iterable of points.
"""
if len(points) == 0:
return None
k = len(points[0])
points.sort(key=operator.itemgetter(depth % k))
median: int = len(points) // 2
return BinaryTreeNode(
location=points[median],
left=kdtree(points=points[:median], depth=depth + 1),
right=kdtree(points=points[median + 1 :], depth=depth + 1),
)
class TestKDTree(unittest.TestCase):
def test_construction(self) -> None:
point_list = [(7, 2), (5, 4), (9, 6), (4, 7), (8, 1), (2, 3)]
tree = kdtree(point_list)
if tree is None:
self.assertIsNotNone(tree)
else:
self.assertEqual(tree.location, (7, 2))
assert tree.left is not None
self.assertEqual(tree.left.location, (5, 4))
assert tree.right is not None
self.assertEqual(tree.right.location, (9, 6))
if __name__ == "__main__":
raise SystemExit(unittest.main())
| 25.819444 | 69 | 0.618612 |
e4f80fc8c190db84907ecdd2030e9ef1229e9dba | 913 | py | Python | helpers/event_handling.py | bdemin/M113_Visualization | bf863af9dfc2902ae9123afeae8d5bd413a4bedb | [
"MIT"
] | null | null | null | helpers/event_handling.py | bdemin/M113_Visualization | bf863af9dfc2902ae9123afeae8d5bd413a4bedb | [
"MIT"
] | null | null | null | helpers/event_handling.py | bdemin/M113_Visualization | bf863af9dfc2902ae9123afeae8d5bd413a4bedb | [
"MIT"
] | null | null | null | def keyboard_events(obj, pause, camera_flag, camera_distance, view, timer):
key = obj.GetKeySym()
# print(key)
if key == 'o' and pause == False:
pause = True
elif key == 'o':
pause = False
if key == 'i' and camera_flag:
camera_flag = False
elif key == 'i':
camera_flag = True
if key == 'u' and camera_flag:
camera_distance += 1
if key == 'y' and camera_flag:
camera_distance -= 1
if key == 'v':
if view == 5:
view = 1
else:
view += 1
if key == 'bracketright':
timer += 10
elif key == 'bracketleft':
if timer > 10:
timer -= 10
if key == 'backslash':
timer = 0
# Add camera zoom
if key == 'equal':
pass
if key == 'minus':
pass
return pause, camera_flag, camera_distance, view, timer
| 21.738095 | 75 | 0.496166 |
3549b26c3cea438bd1416734adc4d3ce0a3fd599 | 864 | py | Python | _run_scripts/run_intkmeans.py | simonharris/pykmeans | 256e0c6c7284182aae9c10783cf50778af120514 | [
"MIT"
] | 1 | 2021-12-30T01:25:03.000Z | 2021-12-30T01:25:03.000Z | _run_scripts/run_intkmeans.py | simonharris/pycluster | 4d47eb12a2bbaf1b05d7ccfd0cfc9ccf78ddf86d | [
"MIT"
] | 3 | 2020-11-12T12:36:00.000Z | 2021-06-18T12:46:59.000Z | _run_scripts/run_intkmeans.py | simonharris/pycluster | 4d47eb12a2bbaf1b05d7ccfd0cfc9ccf78ddf86d | [
"MIT"
] | 1 | 2021-12-30T01:32:32.000Z | 2021-12-30T01:32:32.000Z | """Temp bootstrap file to run Intelligent K-Means"""
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from datasets import testloader
from initialisations import ikmeans_card as alg
# Finds 91 centroids
# dataset = testloader._load_local('20_1000_1000_r_1.5_025')
# num_clusters = 20
# Finds 383 centroids
# dataset = testloader._load_local('5_50_1000_r_1_025')
# num_clusters = 5
# This is one that didn't used to complete on Ceres
# dataset = testloader._load_local('20_2_1000_r_1_024')
# num_clusters = 20
# This one fails with < 20 centroids (19)
# dataset = testloader._load_local('20_2_1000_r_1.5_035')
# num_clusters = 20
# This is Iris, obviously
dataset = testloader.load_iris()
num_clusters = 3
centroids = alg.generate(dataset.data, num_clusters)
print(centroids)
print("There were", len(centroids), "centroids found")
| 24.685714 | 60 | 0.756944 |
ccf0e9d8b7a8e46a0d1a7ca2a106ed1dc3d709aa | 54,897 | py | Python | py/dynesty/bounding.py | vishalbelsare/dynesty | 2c5f1bbe5a0745c6625876f23ec6aa710c845fd4 | [
"MIT"
] | null | null | null | py/dynesty/bounding.py | vishalbelsare/dynesty | 2c5f1bbe5a0745c6625876f23ec6aa710c845fd4 | [
"MIT"
] | null | null | null | py/dynesty/bounding.py | vishalbelsare/dynesty | 2c5f1bbe5a0745c6625876f23ec6aa710c845fd4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bounding classes used when proposing new live points, along with a number of
useful helper functions. Bounding objects include:
UnitCube:
The unit N-cube (unconstrained draws from the prior).
Ellipsoid:
Bounding ellipsoid.
MultiEllipsoid:
A set of (possibly overlapping) bounding ellipsoids.
RadFriends:
A set of (possibly overlapping) balls centered on each live point.
SupFriends:
A set of (possibly overlapping) cubes centered on each live point.
"""
import warnings
import math
import numpy as np
from numpy import linalg
from numpy import cov as mle_cov
from scipy import spatial
from scipy import cluster
from scipy import linalg as lalg
from scipy.special import logsumexp, gammaln
from .utils import unitcheck, get_seed_sequence, get_random_generator
__all__ = [
"UnitCube", "Ellipsoid", "MultiEllipsoid", "RadFriends", "SupFriends",
"logvol_prefactor", "randsphere", "bounding_ellipsoid",
"bounding_ellipsoids", "_bounding_ellipsoids",
"_ellipsoid_bootstrap_expand", "_friends_bootstrap_radius",
"_friends_leaveoneout_radius"
]
SQRTEPS = math.sqrt(float(np.finfo(np.float64).eps))
from scipy.cluster.vq import kmeans2
class UnitCube:
"""
An N-dimensional unit cube.
Parameters
----------
ndim : int
The number of dimensions of the unit cube.
"""
def __init__(self, ndim):
self.n = ndim # dimension
self.vol = 1. # volume
self.funit = 1. # overlap with the unit cube
def contains(self, x):
"""Checks if unit cube contains the point `x`."""
return unitcheck(x)
def randoffset(self, rstate=None):
"""Draw a random offset from the center of the unit cube."""
return self.sample(rstate=rstate) - 0.5
def sample(self, rstate=None):
"""
Draw a sample uniformly distributed within the unit cube.
Returns
-------
x : `~numpy.ndarray` with shape (ndim,)
A coordinate within the unit cube.
"""
return rstate.uniform(size=self.n)
def samples(self, nsamples, rstate=None):
"""
Draw `nsamples` samples randomly distributed within the unit cube.
Returns
-------
x : `~numpy.ndarray` with shape (nsamples, ndim)
A collection of coordinates within the unit cube.
"""
xs = np.array([self.sample(rstate=rstate) for i in range(nsamples)])
return xs
def update(self, points, rstate=None, bootstrap=0, pool=None):
"""Filler function."""
pass
class Ellipsoid:
"""
An N-dimensional ellipsoid defined by::
(x - v)^T A (x - v) = 1
where the vector `v` is the center of the ellipsoid and `A` is a
symmetric, positive-definite `N x N` matrix.
Parameters
----------
ctr : `~numpy.ndarray` with shape (N,)
Coordinates of ellipsoid center.
cov : `~numpy.ndarray` with shape (N, N)
Covariance matrix describing the axes.
"""
def __init__(self, ctr, cov, am=None, axes=None):
self.n = len(ctr) # dimension
self.ctr = np.asarray(ctr) # center coordinates
self.cov = np.asarray(cov) # covariance matrix
if axes is None:
self.axes = lalg.cholesky(cov, lower=True) # transformation axes
else:
self.axes = axes
# The eigenvalues (l) of `a` are (a^-2, b^-2, ...) where
# (a, b, ...) are the lengths of principle axes.
# The eigenvectors (v) are the normalized principle axes.
l, v = lalg.eigh(self.cov)
if np.all((l > 0.) & (np.isfinite(l))):
self.axlens = np.sqrt(l)
# Volume of ellipsoid is the volume of an n-sphere
# is a product of squares of eigen values
self.logvol = logvol_prefactor(self.n) + 0.5 * np.log(l).sum()
else:
raise ValueError("The input precision matrix defining the "
"ellipsoid {0} is apparently singular with "
"l={1} and v={2}.".format(self.cov, l, v))
if am is None:
self.am = v @ np.diag(1. / l) @ v.T
# precision matrix (inverse of covariance)
else:
self.am = am
# Scaled eigenvectors are the principle axes, where `paxes[:,i]` is the
# i-th axis. Multiplying this matrix by a vector will transform a
# point in the unit n-sphere to a point in the ellipsoid.
self.paxes = np.dot(v, np.diag(self.axlens))
# Amount by which volume was increased after initialization (i.e.
# cumulative factor from `scale_to_vol`).
self.expand = 1.
self.funit = 1
def scale_to_logvol(self, logvol):
"""Scale ellipsoid to a target volume."""
logf = (logvol - self.logvol)
# log of the maxium axis length of the ellipsoid
max_log_axlen = np.log(np.sqrt(self.n) / 2)
log_axlen = np.log(self.axlens)
if log_axlen.max() < max_log_axlen - logf / self.n:
# we are safe to inflate the ellipsoid isothropically
# without hitting boundaries
f = np.exp(logf / self.n)
self.cov *= f**2
self.am *= 1. / f**2
self.axlens *= f
self.axes *= f
else:
logfax = np.zeros(self.n)
curlogf = logf # how much we have left to inflate
curn = self.n # how many dimensions left
l, v = lalg.eigh(self.cov)
# here we start from largest and go to smallest
for curi in np.argsort(l)[::-1]:
delta = max(
min(max_log_axlen - log_axlen[curi], curlogf / curn), 0)
logfax[curi] = delta
curlogf -= delta
curn -= 1
fax = np.exp(logfax) # linear inflation of each dimension
l1 = l * fax**2 # eigen values are squares of axes
self.cov = v @ np.diag(l1) @ v.T
self.am = v @ np.diag(1 / l1) @ v.T
self.axlens *= fax
self.axes = lalg.cholesky(self.cov, lower=True)
# I don't quite know how to scale axes, so I rerun cholesky
self.logvol = logvol
def major_axis_endpoints(self):
"""Return the endpoints of the major axis."""
i = np.argmax(self.axlens) # find the major axis
v = self.paxes[:, i] # vector from center to major axis endpoint
return self.ctr - v, self.ctr + v
def distance(self, x):
"""Compute the normalized distance to `x` from the center of the
ellipsoid."""
d = x - self.ctr
return np.sqrt(np.dot(np.dot(d, self.am), d))
def distance_many(self, x):
"""Compute the normalized distance to `x` from the center of the
ellipsoid."""
d = x - self.ctr[None, :]
return np.sqrt(np.einsum('ij,jk,ik->i', d, self.am, d))
def contains(self, x):
"""Checks if ellipsoid contains `x`."""
return self.distance(x) <= 1.0
def randoffset(self, rstate=None):
"""Return a random offset from the center of the ellipsoid."""
return np.dot(self.axes, randsphere(self.n, rstate=rstate))
def sample(self, rstate=None):
"""
Draw a sample uniformly distributed within the ellipsoid.
Returns
-------
x : `~numpy.ndarray` with shape (ndim,)
A coordinate within the ellipsoid.
"""
return self.ctr + self.randoffset(rstate=rstate)
def samples(self, nsamples, rstate=None):
"""
Draw `nsamples` samples uniformly distributed within the ellipsoid.
Returns
-------
x : `~numpy.ndarray` with shape (nsamples, ndim)
A collection of coordinates within the ellipsoid.
"""
xs = np.array([self.sample(rstate=rstate) for i in range(nsamples)])
return xs
def unitcube_overlap(self, ndraws=10000, rstate=None):
"""Using `ndraws` Monte Carlo draws, estimate the fraction of
overlap between the ellipsoid and the unit cube."""
samples = [self.sample(rstate=rstate) for i in range(ndraws)]
nin = sum([unitcheck(x) for x in samples])
return 1. * nin / ndraws
def update(self,
points,
rstate=None,
bootstrap=0,
pool=None,
mc_integrate=False):
"""
Update the ellipsoid to bound the collection of points.
Parameters
----------
points : `~numpy.ndarray` with shape (npoints, ndim)
The set of points to bound.
rstate : `~numpy.random.Generator`, optional
`~numpy.random.Generator` instance.
bootstrap : int, optional
The number of bootstrapped realizations of the ellipsoid. The
maximum distance to the set of points "left out" during each
iteration is used to enlarge the resulting volumes.
Default is `0`.
pool : user-provided pool, optional
Use this pool of workers to execute operations in parallel.
mc_integrate : bool, optional
Whether to use Monte Carlo methods to compute the effective
overlap of the final ellipsoid with the unit cube.
Default is `False`.
"""
# Compute new bounding ellipsoid.
ell = bounding_ellipsoid(points)
self.n = ell.n
self.ctr = ell.ctr
self.cov = ell.cov
self.am = ell.am
self.logvol = ell.logvol
self.axlens = ell.axlens
self.axes = ell.axes
self.paxes = ell.paxes
self.expand = ell.expand
# Use bootstrapping to determine the volume expansion factor.
if bootstrap > 0:
# If provided, compute bootstraps in parallel using a pool.
if pool is None:
M = map
else:
M = pool.map
multis = [False for it in range(bootstrap)]
ps = [points for it in range(bootstrap)]
seeds = get_seed_sequence(rstate, bootstrap)
args = zip(multis, ps, seeds)
expands = list(M(_ellipsoid_bootstrap_expand, args))
# Conservatively set the expansion factor to be the maximum
# factor derived from our set of bootstraps.
expand = max(expands)
# If our ellipsoid is over-constrained, expand it.
if expand > 1.:
lv = self.logvol + self.n * np.log(expand)
self.scale_to_logvol(lv)
# Estimate the fractional overlap with the unit cube using
# Monte Carlo integration.
if mc_integrate:
self.funit = self.unitcube_overlap(rstate=rstate)
class MultiEllipsoid:
"""
A collection of M N-dimensional ellipsoids.
Parameters
----------
ells : list of `Ellipsoid` objects with length M, optional
A set of `Ellipsoid` objects that make up the collection of
N-ellipsoids. Used to initialize :class:`MultiEllipsoid` if provided.
ctrs : `~numpy.ndarray` with shape (M, N), optional
Collection of coordinates of ellipsoid centers. Used to initialize
:class:`MultiEllipsoid` if :data:`ams` is also provided.
covs : `~numpy.ndarray` with shape (M, N, N), optional
Collection of matrices describing the axes of the ellipsoids. Used to
initialize :class:`MultiEllipsoid` if :data:`ctrs` also provided.
"""
def __init__(self, ells=None, ctrs=None, covs=None):
if ells is not None:
# Try to initialize quantities using provided `Ellipsoid` objects.
if (ctrs is None) and (covs is None):
self.nells = len(ells)
self.ells = ells
else:
raise ValueError("You cannot specific both `ells` and "
"(`ctrs`, `covs`)!")
else:
# Try to initialize quantities using provided `ctrs` and `covs`.
if (ctrs is None) and (covs is None):
raise ValueError("You must specify either `ells` or "
"(`ctrs`, `covs`).")
else:
self.nells = len(ctrs)
self.ells = [
Ellipsoid(ctrs[i], covs[i]) for i in range(self.nells)
]
self.__update_arrays()
# Compute quantities.
self.expands = np.ones(self.nells)
self.logvol_tot = logsumexp(self.logvols)
self.expand_tot = 1.
self.funit = 1
def __update_arrays(self):
"""
Update internal arrays to ensure that in sync with ells
"""
self.ctrs = np.array([ell.ctr for ell in self.ells])
self.covs = np.array([ell.cov for ell in self.ells])
self.ams = np.array([ell.am for ell in self.ells])
self.logvols = np.array([ell.logvol for ell in self.ells])
def scale_to_logvol(self, logvols):
"""Scale ellipoids to a corresponding set of
target volumes.
"""
for i in range(self.nells):
self.ells[i].scale_to_logvol(logvols[i])
# IMPORTANT We must also update arrays ams, covs
self.__update_arrays()
self.expands = np.array(
[self.ells[i].expand for i in range(self.nells)])
logvol_tot = logsumexp(logvols)
self.expand_tot *= np.exp(logvol_tot - self.logvol_tot)
self.logvol_tot = logvol_tot
def major_axis_endpoints(self):
"""Return the endpoints of the major axis of each ellipsoid."""
return np.array([ell.major_axis_endpoints() for ell in self.ells])
def within(self, x, j=None):
"""Checks which ellipsoid(s) `x` falls within, skipping the `j`-th
ellipsoid if need be."""
delt = x[None, :] - self.ctrs
mask = np.einsum('ai,aij,aj->a', delt, self.ams, delt) < 1
if j is not None:
mask[j] = False
return np.nonzero(mask)[0]
def overlap(self, x, j=None):
"""Checks how many ellipsoid(s) `x` falls within, skipping the `j`-th
ellipsoid."""
q = len(self.within(x, j=j))
return q
def contains(self, x):
"""Checks if the set of ellipsoids contains `x`."""
delt = x[None, :] - self.ctrs
return np.any(np.einsum('ai,aij,aj->a', delt, self.ams, delt) < 1)
def sample(self, rstate=None, return_q=False):
"""
Sample a point uniformly distributed within the *union* of ellipsoids.
Returns
-------
x : `~numpy.ndarray` with shape (ndim,)
A coordinate within the set of ellipsoids.
idx : int
The index of the ellipsoid `x` was sampled from.
q : int, optional
The number of ellipsoids `x` falls within.
"""
# If there is only one ellipsoid, sample from it.
if self.nells == 1:
x = self.ells[0].sample(rstate=rstate)
idx = 0
q = 1
if return_q:
return x, idx, q
else:
return x, idx
probs = np.exp(self.logvols - self.logvol_tot)
while True:
# Select an ellipsoid at random proportional to its volume.
idx = rand_choice(probs, rstate)
# Select a point from the chosen ellipsoid.
x = self.ells[idx].sample(rstate=rstate)
# Check how many ellipsoids the point lies within
delts = (x[None, :] - self.ctrs)
q = (np.einsum('ai,aij,aj->a', delts, self.ams, delts) < 1).sum()
assert q > 0 # Should never fail
if return_q:
# If `q` is being returned, assume the user wants to
# explicitly apply the `1. / q` acceptance criterion to
# properly sample from the union of ellipsoids.
return x, idx, q
else:
# If `q` is not being returned, assume the user wants this
# done internally so we repeat the loop if needed
if q == 1 or rstate.uniform() < (1. / q):
return x, idx
def samples(self, nsamples, rstate=None):
"""
Draw `nsamples` samples uniformly distributed within the *union* of
ellipsoids.
Returns
-------
xs : `~numpy.ndarray` with shape (nsamples, ndim)
A collection of coordinates within the set of ellipsoids.
"""
xs = np.array([self.sample(rstate=rstate)[0] for i in range(nsamples)])
return xs
def monte_carlo_logvol(self,
ndraws=10000,
rstate=None,
return_overlap=True):
"""Using `ndraws` Monte Carlo draws, estimate the log volume of the
*union* of ellipsoids. If `return_overlap=True`, also returns the
estimated fractional overlap with the unit cube."""
# Estimate volume using Monte Carlo integration.
samples = [
self.sample(rstate=rstate, return_q=True) for i in range(ndraws)
]
qsum = sum([q for (x, idx, q) in samples])
logvol = np.log(ndraws * 1. / qsum) + self.logvol_tot
if return_overlap:
# Estimate the fractional amount of overlap with the
# unit cube using the same set of samples.
qin = sum([q * unitcheck(x) for (x, idx, q) in samples])
overlap = 1. * qin / qsum
return logvol, overlap
else:
return logvol
def update(self,
points,
rstate=None,
bootstrap=0,
pool=None,
mc_integrate=False):
"""
Update the set of ellipsoids to bound the collection of points.
Parameters
----------
points : `~numpy.ndarray` with shape (npoints, ndim)
The set of points to bound.
rstate : `~numpy.random.Generator`, optional
`~numpy.random.Generator` instance.
bootstrap : int, optional
The number of bootstrapped realizations of the ellipsoids. The
maximum distance to the set of points "left out" during each
iteration is used to enlarge the resulting volumes.
Default is `0`.
pool : user-provided pool, optional
Use this pool of workers to execute operations in parallel.
mc_integrate : bool, optional
Whether to use Monte Carlo methods to compute the effective
volume and fractional overlap of the final union of ellipsoids
with the unit cube. Default is `False`.
"""
npoints, ndim = points.shape
if npoints == 1:
raise RuntimeError('Cannot compute the bounding ellipsoid of '
'a single point.')
# Calculate the bounding ellipsoid for the points, possibly
# enlarged to a minimum volume.
firstell = bounding_ellipsoid(points)
# Recursively split the bounding ellipsoid
ells = _bounding_ellipsoids(points, firstell)
# Update the set of ellipsoids.
self.nells = len(ells)
self.ells = ells
self.__update_arrays()
# Sanity check: all points must be contained in some ellipsoid
if not all(self.contains(p) for p in points):
# refuse to update
raise RuntimeError('Rejecting invalid MultiEllipsoid region')
self.logvol_tot = logsumexp(self.logvols)
# Compute expansion factor.
expands = np.array([ell.expand for ell in self.ells])
logvols_orig = self.logvols - np.log(expands)
logvol_tot_orig = logsumexp(logvols_orig)
self.expand_tot = np.exp(self.logvol_tot - logvol_tot_orig)
# Use bootstrapping to determine the volume expansion factor.
if bootstrap > 0:
# If provided, compute bootstraps in parallel using a pool.
if pool is None:
M = map
else:
M = pool.map
multis = [True for it in range(bootstrap)]
ps = [points for it in range(bootstrap)]
seeds = get_seed_sequence(rstate, bootstrap)
args = zip(multis, ps, seeds)
expands = list(M(_ellipsoid_bootstrap_expand, args))
# Conservatively set the expansion factor to be the maximum
# factor derived from our set of bootstraps.
expand = max(expands)
# If our ellipsoids are overly constrained, expand them.
if expand > 1.:
lvs = self.logvols + ndim * np.log(expand)
self.scale_to_logvol(lvs)
# Estimate the volume and fractional overlap with the unit cube
# using Monte Carlo integration.
if mc_integrate:
self.logvol_tot, self.funit = self.monte_carlo_logvol(
rstate=rstate, return_overlap=True)
class RadFriends:
"""
A collection of N-balls of identical size centered on each live point.
Parameters
----------
ndim : int
The number of dimensions of each ball.
cov : `~numpy.ndarray` with shape `(ndim, ndim)`, optional
Covariance structure (correlation and size) of each ball.
"""
def __init__(self, ndim, cov=None):
self.n = ndim
if cov is None:
cov = np.identity(self.n)
self.cov = cov
self.am = lalg.pinvh(self.cov)
self.axes = lalg.sqrtm(self.cov)
self.axes_inv = lalg.pinvh(self.axes)
detsign, detln = linalg.slogdet(self.am)
assert detsign > 0
self.logvol_ball = (logvol_prefactor(self.n) - 0.5 * detln)
self.expand = 1.
self.funit = 1
def scale_to_logvol(self, logvol):
"""Scale ball to encompass a target volume."""
f = np.exp((logvol - self.logvol_ball) * (1.0 / self.n))
# linear factor
self.cov *= f**2
self.am /= f**2
self.axes *= f
self.axes_inv /= f
self.logvol_ball = logvol
def within(self, x, ctrs):
"""Check which balls `x` falls within."""
# Execute a brute-force search over all balls.
idxs = np.where(
lalg.norm(np.dot(ctrs - x, self.axes_inv), axis=1) <= 1.)[0]
return idxs
def overlap(self, x, ctrs):
"""Check how many balls `x` falls within."""
q = len(self.within(x, ctrs))
return q
def contains(self, x, ctrs):
"""Check if the set of balls contains `x`."""
return self.overlap(x, ctrs) > 0
def sample(self, ctrs, rstate=None, return_q=False):
"""
Sample a point uniformly distributed within the *union* of balls.
Returns
-------
x : `~numpy.ndarray` with shape (ndim,)
A coordinate within the set of balls.
q : int, optional
The number of balls `x` falls within.
"""
nctrs = len(ctrs) # number of balls
# If there is only one ball, sample from it.
if nctrs == 1:
ds = randsphere(self.n, rstate=rstate)
dx = np.dot(ds, self.axes)
x = ctrs[0] + dx
if return_q:
return x, 1
else:
return x
# Select a ball at random.
idx = rstate.integers(nctrs)
# Select a point from the chosen ball.
ds = randsphere(self.n, rstate=rstate)
dx = np.dot(ds, self.axes)
x = ctrs[idx] + dx
# Check how many balls the point lies within, passing over
# the `idx`-th ball `x` was sampled from.
q = self.overlap(x, ctrs)
if return_q:
# If `q` is being returned, assume the user wants to
# explicitly apply the `1. / q` acceptance criterion to
# properly sample from the union of balls.
return x, q
else:
# If `q` is not being returned, assume the user wants this
# done internally.
while rstate.uniform() > (1. / q):
idx = rstate.integers(nctrs)
ds = randsphere(self.n, rstate=rstate)
dx = np.dot(ds, self.axes)
x = ctrs[idx] + dx
q = self.overlap(x, ctrs)
return x
def samples(self, nsamples, ctrs, rstate=None):
"""
Draw `nsamples` samples uniformly distributed within the *union* of
balls.
Returns
-------
xs : `~numpy.ndarray` with shape (nsamples, ndim)
A collection of coordinates within the set of balls.
"""
xs = np.array(
[self.sample(ctrs, rstate=rstate) for i in range(nsamples)])
return xs
def monte_carlo_logvol(self,
ctrs,
ndraws=10000,
rstate=None,
return_overlap=True):
"""Using `ndraws` Monte Carlo draws, estimate the log volume of the
*union* of balls. If `return_overlap=True`, also returns the
estimated fractional overlap with the unit cube."""
# Estimate volume using Monte Carlo integration.
samples = [
self.sample(ctrs, rstate=rstate, return_q=True)
for i in range(ndraws)
]
qsum = sum([q for (x, q) in samples])
logvol = np.log(1. * ndraws / qsum * len(ctrs)) + self.logvol_ball
if return_overlap:
# Estimate the fractional amount of overlap with the
# unit cube using the same set of samples.
qin = sum([q * unitcheck(x) for (x, q) in samples])
overlap = 1. * qin / qsum
return logvol, overlap
else:
return logvol
def update(self,
points,
rstate=None,
bootstrap=0,
pool=None,
mc_integrate=False,
use_clustering=True):
"""
Update the radii of our balls.
Parameters
----------
points : `~numpy.ndarray` with shape (npoints, ndim)
The set of points to bound.
rstate : `~numpy.random.Generator`, optional
`~numpy.random.Generator` instance.
bootstrap : int, optional
The number of bootstrapped realizations of the ellipsoids. The
maximum distance to the set of points "left out" during each
iteration is used to enlarge the resulting volumes.
Default is `0`.
pool : user-provided pool, optional
Use this pool of workers to execute operations in parallel.
mc_integrate : bool, optional
Whether to use Monte Carlo methods to compute the effective
volume and fractional overlap of the final union of balls
with the unit cube. Default is `False`.
use_clustering : bool, optional
Whether to use clustering to avoid issues with widely-seperated
modes. Default is `True`.
"""
# If possible, compute bootstraps in parallel using a pool.
if pool is None:
M = map
else:
M = pool.map
# Get new covariance.
if use_clustering:
self.cov = self._get_covariance_from_clusters(points)
else:
self.cov = self._get_covariance_from_all_points(points)
self.am = lalg.pinvh(self.cov)
self.axes = lalg.sqrtm(self.cov)
self.axes_inv = lalg.pinvh(self.axes)
# Decorrelate and re-scale points.
points_t = np.dot(points, self.axes_inv)
if bootstrap == 0.:
# Construct radius using leave-one-out if no bootstraps used.
radii = _friends_leaveoneout_radius(points_t, 'balls')
else:
# Bootstrap radius using the set of live points.
ps = [points_t for it in range(bootstrap)]
ftypes = ['balls' for it in range(bootstrap)]
seeds = get_seed_sequence(rstate, bootstrap)
args = zip(ps, ftypes, seeds)
radii = list(M(_friends_bootstrap_radius, args))
# Conservatively set radius to be maximum of the set.
rmax = max(radii)
# Re-scale axes.
self.cov *= rmax**2
self.am /= rmax**2
self.axes *= rmax
self.axes_inv /= rmax
# Compute volume.
detsign, detln = linalg.slogdet(self.am)
assert detsign > 0
self.logvol_ball = (logvol_prefactor(self.n) - 0.5 * detln)
self.expand = 1.
# Estimate the volume and fractional overlap with the unit cube
# using Monte Carlo integration.
if mc_integrate:
self.funit = self.monte_carlo_logvol(points,
return_overlap=True,
rstate=rstate)[1]
def _get_covariance_from_all_points(self, points):
"""Compute covariance using all points."""
return np.cov(points, rowvar=False)
def _get_covariance_from_clusters(self, points):
"""Compute covariance from re-centered clusters."""
# Compute pairwise distances.
distances = spatial.distance.pdist(points,
metric='mahalanobis',
VI=self.am)
# Identify conglomerates of points by constructing a linkage matrix.
linkages = cluster.hierarchy.single(distances)
# Cut when linkage between clusters exceed the radius.
clusteridxs = cluster.hierarchy.fcluster(linkages,
1.0,
criterion='distance')
nclusters = np.max(clusteridxs)
if nclusters == 1:
return self._get_covariance_from_all_points(points)
else:
i = 0
overlapped_points = np.empty_like(points)
for idx in np.unique(clusteridxs):
group_points = points[clusteridxs == idx, :]
group_mean = group_points.mean(axis=0).reshape((1, -1))
j = i + len(group_points)
overlapped_points[i:j, :] = group_points - group_mean
i = j
return self._get_covariance_from_all_points(overlapped_points)
class SupFriends:
"""
A collection of N-cubes of identical size centered on each live point.
Parameters
----------
ndim : int
The number of dimensions of the cube.
cov : `~numpy.ndarray` with shape `(ndim, ndim)`, optional
Covariance structure (correlation and size) of each cube.
"""
def __init__(self, ndim, cov=None):
self.n = ndim
if cov is None:
cov = np.identity(self.n)
self.cov = cov
self.am = lalg.pinvh(self.cov)
self.axes = lalg.sqrtm(self.cov)
self.axes_inv = lalg.pinvh(self.axes)
detsign, detln = linalg.slogdet(self.am)
assert detsign > 0
self.logvol_cube = self.n * np.log(2.) - 0.5 * detln
self.expand = 1.
self.funit = 1
def scale_to_logvol(self, logvol):
"""Scale cube to encompass a target volume."""
f = np.exp((logvol - self.logvol_cube) * (1.0 / self.n))
# linear factor
self.cov *= f**2
self.am /= f**2
self.axes *= f
self.axes_inv /= f
self.logvol_cube = logvol
def within(self, x, ctrs):
"""Checks which cubes `x` falls within."""
# Execute a brute-force search over all cubes.
idxs = np.where(
np.max(np.abs(np.dot(ctrs - x, self.axes_inv)), axis=1) <= 1.)[0]
return idxs
def overlap(self, x, ctrs):
"""Checks how many cubes `x` falls within, skipping the `j`-th
cube."""
q = len(self.within(x, ctrs))
return q
def contains(self, x, ctrs):
"""Checks if the set of cubes contains `x`."""
return self.overlap(x, ctrs) > 0
def sample(self, ctrs, rstate=None, return_q=False):
"""
Sample a point uniformly distributed within the *union* of cubes.
Returns
-------
x : `~numpy.ndarray` with shape (ndim,)
A coordinate within the set of cubes.
q : int, optional
The number of cubes `x` falls within.
"""
nctrs = len(ctrs) # number of cubes
# If there is only one cube, sample from it.
if nctrs == 1:
ds = (2. * rstate.uniform(size=self.n) - 1.)
dx = np.dot(ds, self.axes)
x = ctrs[0] + dx
if return_q:
return x, 1
else:
return x
# Select a cube at random.
idx = rstate.integers(nctrs)
# Select a point from the chosen cube.
ds = (2. * rstate.uniform(size=self.n) - 1.)
dx = np.dot(ds, self.axes)
x = ctrs[idx] + dx
# Check how many cubes the point lies within, passing over
# the `idx`-th cube `x` was sampled from.
q = self.overlap(x, ctrs)
if return_q:
# If `q` is being returned, assume the user wants to
# explicitly apply the `1. / q` acceptance criterion to
# properly sample from the union of balls.
return x, q
else:
# If `q` is not being returned, assume the user wants this
# done internally.
while rstate.uniform() > (1. / q):
idx = rstate.integers(nctrs)
ds = (2. * rstate.uniform(size=self.n) - 1.)
dx = np.dot(ds, self.axes)
x = ctrs[idx] + dx
q = self.overlap(x, ctrs)
return x
def samples(self, nsamples, ctrs, rstate=None):
"""
Draw `nsamples` samples uniformly distributed within the *union* of
cubes.
Returns
-------
xs : `~numpy.ndarray` with shape (nsamples, ndim)
A collection of coordinates within the set of cubes.
"""
xs = np.array(
[self.sample(ctrs, rstate=rstate) for i in range(nsamples)])
return xs
def monte_carlo_logvol(self,
ctrs,
ndraws=10000,
rstate=None,
return_overlap=False):
"""Using `ndraws` Monte Carlo draws, estimate the log volume of the
*union* of cubes. If `return_overlap=True`, also returns the
estimated fractional overlap with the unit cube."""
# Estimate the volume using Monte Carlo integration.
samples = [
self.sample(ctrs, rstate=rstate, return_q=True)
for i in range(ndraws)
]
qsum = sum([q for (x, q) in samples])
logvol = np.log(1. * ndraws / qsum * len(ctrs)) + self.logvol_cube
if return_overlap:
# Estimate the fractional overlap with the unit cube using
# the same set of samples.
qin = sum([q * unitcheck(x) for (x, q) in samples])
overlap = 1. * qin / qsum
return logvol, overlap
else:
return logvol
def update(self,
points,
rstate=None,
bootstrap=0,
pool=None,
mc_integrate=False,
use_clustering=True):
"""
Update the half-side-lengths of our cubes.
Parameters
----------
points : `~numpy.ndarray` with shape (npoints, ndim)
The set of points to bound.
rstate : `~numpy.random.Generator`, optional
`~numpy.random.Generator` instance.
bootstrap : int, optional
The number of bootstrapped realizations of the ellipsoids. The
maximum distance to the set of points "left out" during each
iteration is used to enlarge the resulting volumes.
Default is `0`.
pool : user-provided pool, optional
Use this pool of workers to execute operations in parallel.
mc_integrate : bool, optional
Whether to use Monte Carlo methods to compute the effective
volume and fractional overlap of the final union of cubes
with the unit cube. Default is `False`.
use_clustering : bool, optional
Whether to use clustering to avoid issues with widely-seperated
modes. Default is `True`.
"""
# If possible, compute bootstraps in parallel using a pool.
if pool is None:
M = map
else:
M = pool.map
# Get new covariance.
if use_clustering:
self.cov = self._get_covariance_from_clusters(points)
else:
self.cov = self._get_covariance_from_all_points(points)
self.am = lalg.pinvh(self.cov)
self.axes = lalg.sqrtm(self.cov)
self.axes_inv = lalg.pinvh(self.axes)
# Decorrelate and re-scale points.
points_t = np.dot(points, self.axes_inv)
if bootstrap == 0.:
# Construct radius using leave-one-out if no bootstraps used.
hsides = _friends_leaveoneout_radius(points_t, 'cubes')
else:
# Bootstrap radius using the set of live points.
ps = [points_t for it in range(bootstrap)]
ftypes = ['cubes' for it in range(bootstrap)]
seeds = get_seed_sequence(rstate, bootstrap)
args = zip(ps, ftypes, seeds)
hsides = list(M(_friends_bootstrap_radius, args))
# Conservatively set half-side-length to be maximum of the set.
hsmax = max(hsides)
# Re-scale axes.
self.cov *= hsmax**2
self.am /= hsmax**2
self.axes *= hsmax
self.axes_inv /= hsmax
detsign, detln = linalg.slogdet(self.am)
self.logvol_cube = (self.n * np.log(2.) - 0.5 * detln)
self.expand = 1.
# Estimate the volume and fractional overlap with the unit cube
# using Monte Carlo integration.
if mc_integrate:
self.funit = self.monte_carlo_logvol(points,
return_overlap=True,
rstate=rstate)[1]
def _get_covariance_from_all_points(self, points):
"""Compute covariance using all points."""
return np.cov(points, rowvar=False)
def _get_covariance_from_clusters(self, points):
"""Compute covariance from re-centered clusters."""
# Compute pairwise distances.
distances = spatial.distance.pdist(points,
metric='mahalanobis',
VI=self.am)
# Identify conglomerates of points by constructing a linkage matrix.
linkages = cluster.hierarchy.single(distances)
# Cut when linkage between clusters exceed the radius.
clusteridxs = cluster.hierarchy.fcluster(linkages,
1.0,
criterion='distance')
nclusters = np.max(clusteridxs)
if nclusters == 1:
return self._get_covariance_from_all_points(points)
else:
i = 0
overlapped_points = np.empty_like(points)
for idx in np.unique(clusteridxs):
group_points = points[clusteridxs == idx, :]
group_mean = group_points.mean(axis=0).reshape((1, -1))
j = i + len(group_points)
overlapped_points[i:j, :] = group_points - group_mean
i = j
return self._get_covariance_from_all_points(overlapped_points)
##################
# HELPER FUNCTIONS
##################
def logvol_prefactor(n, p=2.):
"""
Returns the ln(volume constant) for an `n`-dimensional sphere with an
:math:`L^p` norm. The constant is defined as::
lnf = n * ln(2.) + n * LogGamma(1./p + 1) - LogGamma(n/p + 1.)
By default the `p=2.` norm is used (i.e. the standard Euclidean norm).
"""
p *= 1. # convert to float in case user inputs an integer
lnf = (n * np.log(2.) + n * gammaln(1. / p + 1.) - gammaln(n / p + 1))
return lnf
def randsphere(n, rstate=None):
"""Draw a point uniformly within an `n`-dimensional unit sphere."""
z = rstate.standard_normal(size=n) # initial n-dim vector
xhat = z * (rstate.uniform()**(1. / n) / lalg.norm(z)) # scale
return xhat
def rand_choice(pb, rstate):
""" Optimized version of numpy's random.choice
Return an index of a point selected with the probability pb
The pb must sum to 1
"""
p1 = np.cumsum(pb)
xr = rstate.uniform()
return min(np.searchsorted(p1, xr), len(pb) - 1)
def improve_covar_mat(covar0, ntries=100, max_condition_number=1e12):
"""
Given the covariance matrix improve it, if it is not invertable
or eigen values are negative or condition number that is above the limit
Returns:
a tuple with three elements
1) a boolean flag if a matrix is 'good', so it didn't need adjustments
2) updated matrix
3) its inverse
"""
ndim = covar0.shape[0]
covar = np.array(covar0)
coeffmin = 1e-10
# this will a starting point for the modification
# of the form (1-coeff)*M + (coeff)*E
eig_mult = 10 # we want the condition number to be at least that much
# smaller than the max_condition_number
# here we are trying to check if we compute cholesky transformation
# and all eigenvals > 0 and condition number is good
# if the only problem are the eigenvalues we just increase the lowest ones
# if we are getting linalg exceptions we use add diag matrices
for trial in range(ntries):
failed = 0
try:
# Check if matrix is invertible.
eigval, eigvec = lalg.eigh(covar, check_finite=False)
# compute eigenvalues/vectors
maxval = eigval.max()
minval = eigval.min()
# Check if eigen values are good
if np.isfinite(eigval).all():
if maxval <= 0:
# no positive eigvalues
# not much to fix
failed = 2
else:
if minval < maxval / max_condition_number:
# some eigen values are too small
failed = 1
else:
# eigen values are all right
# checking if cholesky works
axes = lalg.cholesky(covar,
lower=True,
check_finite=False)
# if we are here we are done
break
else:
# complete failure
failed = 2
except lalg.LinAlgError:
# There is some kind of massive failure
# we suppress the off-diagonal elements
failed = 2
if failed > 0:
if failed == 1:
eigval_fix = np.maximum(
eigval, eig_mult * maxval / max_condition_number)
covar = eigvec @ np.diag(eigval_fix) @ eigvec.T
else:
coeff = coeffmin * (1. / coeffmin)**(trial * 1. / (ntries - 1))
# this starts at coeffmin when trial=0 and ends at 1
# when trial == ntries-1
covar = (1. - coeff) * covar + coeff * np.eye(ndim)
if failed > 0:
warnings.warn("Failed to guarantee the ellipsoid axes will be "
"non-singular. Defaulting to a sphere.")
covar = np.eye(ndim) # default to identity
am = lalg.pinvh(covar)
axes = lalg.cholesky(covar, lower=True)
else:
# invert the matrix using eigen decomposition
am = eigvec @ np.diag(1. / eigval) @ eigvec.T
good_mat = trial == 0
# if True it means no adjustments were necessary
return good_mat, covar, am, axes
def bounding_ellipsoid(points):
"""
Calculate the bounding ellipsoid containing a collection of points.
Parameters
----------
points : `~numpy.ndarray` with shape (npoints, ndim)
A set of coordinates.
Returns
-------
ellipsoid : :class:`Ellipsoid`
The bounding :class:`Ellipsoid` object.
"""
npoints, ndim = points.shape
if npoints == 1:
raise ValueError("Cannot compute a bounding ellipsoid of a "
"single point.")
# Calculate covariance of points.
ctr = np.mean(points, axis=0)
covar = mle_cov(points, rowvar=False)
delta = points - ctr
# When ndim = 1, `np.cov` returns a 0-d array. Make it a 1x1 2-d array.
if ndim == 1:
covar = np.atleast_2d(covar)
ROUND_DELTA = 1e-3
# numerical experiments show that round off errors can reach large
# values if the matrix is poorly conditioned
# Note that likely the delta here must be related to maximum
# condition number parameter in improve_covar_mat()
#
one_minus_a_bit = 1. - ROUND_DELTA
for i in range(2):
# If the matrix needs improvement
# we improve the matrix twice, first before rescaling
# and second after rescaling. If matrix is okay, we do
# the loop once
good_mat, covar, am, axes = improve_covar_mat(covar)
# Calculate expansion factor necessary to bound each point.
# Points should obey `(x-v)^T A (x-v) <= 1`, so we calculate this for
# each point and then scale A up or down to make the
# "outermost" point obey `(x-v)^T A (x-v) = 1`.
fmax = np.einsum('ij,jk,ik->i', delta, am, delta).max()
# Due to round-off errors, we actually scale the ellipsoid so the
# outermost point obeys `(x-v)^T A (x-v) < 1 - (a bit) < 1`.
# in the first iteration we just try to adjust the matrix
# if it didn't work again, we bail out
if i == 0 and fmax > one_minus_a_bit:
mult = fmax / one_minus_a_bit
# IMPORTANT that we need to update the cov, its inverse and axes
# as those are used directly
covar *= mult
am /= mult
axes *= np.sqrt(mult)
if i == 1 and fmax >= 1:
raise RuntimeError(
"Failed to initialize the ellipsoid to contain all the points")
if good_mat:
# I only need to run through the loop twice if the matrix
# is problematic
break
# Initialize our ellipsoid with *safe* covariance matrix.
ell = Ellipsoid(ctr, covar, am=am, axes=axes)
return ell
def _bounding_ellipsoids(points, ell):
"""
Internal method used to compute a set of bounding ellipsoids when a
bounding ellipsoid for the entire set has already been calculated.
Parameters
----------
points : `~numpy.ndarray` with shape (npoints, ndim)
A set of coordinates.
ell : Ellipsoid
The bounding ellipsoid containing :data:`points`.
Returns
-------
ells : list of :class:`Ellipsoid` objects
List of :class:`Ellipsoid` objects used to bound the
collection of points. Used to initialize the :class:`MultiEllipsoid`
object returned in :meth:`bounding_ellipsoids`.
"""
npoints, ndim = points.shape
# Starting cluster centers are initialized using the major-axis
# endpoints of the original bounding ellipsoid.
p1, p2 = ell.major_axis_endpoints()
start_ctrs = np.vstack((p1, p2)) # shape is (k, ndim) = (2, ndim)
# Split points into two clusters using k-means clustering with k=2.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
k2_res = kmeans2(points,
k=start_ctrs,
iter=10,
minit='matrix',
check_finite=False)
labels = k2_res[1] # cluster identifier ; shape is (npoints,)
# Get points in each cluster.
points_k = [points[labels == k, :] for k in (0, 1)]
# If either cluster has less than 2*ndim points, the bounding ellipsoid
# will be poorly-constrained. Reject the split and simply return the
# original ellipsoid bounding all the points.
if points_k[0].shape[0] < 2 * ndim or points_k[1].shape[0] < 2 * ndim:
return [ell]
# Bounding ellipsoid for each cluster, possibly enlarged
# to a minimum volume.
ells = [bounding_ellipsoid(points_j) for points_j in points_k]
# If the total volume decreased significantly, we accept
# the split into subsets. We then recursively split each subset.
# The condition for hte volume decrease is motivated by the BIC values
# assuming that the number of parameter of the ellipsoid is X (it is
# Ndim*(Ndim+3)/2, the number of points is N
# then the BIC of the bounding ellipsoid model is
# N * log(V0) + X * ln(N)
# where V0 is the volume of the ellipsoid
# if we have many (k) ellipsoids with total volume V1
# then BIC is N*log(V1) + X *k *ln(N)
# from that we can get the volume decrease condition
# V1/V0 < exp(-(k-1)*X*ln(N)/N)
# The choice of BIC is motivated by Xmeans algo from Pelleg 2000
# See also Feroz2008
nparam = (ndim * (ndim + 3)) // 2
vol_dec1 = np.exp(-nparam * np.log(npoints) / npoints)
if np.logaddexp(ells[0].logvol,
ells[1].logvol) < np.log(vol_dec1) + ell.logvol:
return (_bounding_ellipsoids(points_k[0], ells[0]) +
_bounding_ellipsoids(points_k[1], ells[1]))
# here if the split didn't succeed, we still try to split
out = (_bounding_ellipsoids(points_k[0], ells[0]) +
_bounding_ellipsoids(points_k[1], ells[1]))
vol_dec2 = np.exp(-nparam * (len(out) - 1) * np.log(npoints) / npoints)
# Only accept the split if the volume decreased significantly
if logsumexp([e.logvol for e in out]) < np.log(vol_dec2) + ell.logvol:
return out
# Otherwise, we are happy with the single bounding ellipsoid.
return [ell]
def bounding_ellipsoids(points):
"""
Calculate a set of ellipsoids that bound the collection of points.
Parameters
----------
points : `~numpy.ndarray` with shape (npoints, ndim)
A set of coordinates.
Returns
-------
mell : :class:`MultiEllipsoid` object
The :class:`MultiEllipsoid` object used to bound the
collection of points.
"""
# Calculate the bounding ellipsoid for the points possibly
# enlarged to a minimum volume.
ell = bounding_ellipsoid(points)
# Recursively split the bounding ellipsoid
ells = _bounding_ellipsoids(points, ell)
return MultiEllipsoid(ells=ells)
def _bootstrap_points(points, rseed):
"""
Select the bootstrap set from points.
Return:
Tuple with selected, and not-selected points
"""
rstate = get_random_generator(rseed)
npoints, ndim = points.shape
# Resampling.
idxs = rstate.integers(npoints, size=npoints)
idx_in = np.unique(idxs) # selected objects
sel_in = np.zeros(npoints, dtype=bool)
sel_in[idx_in] = True
# in the crazy case of not having selected more than one
# point I just arbitrary add points to have at least two in idx_in
# and at least 1 in idx_out
n_in = sel_in.sum()
if n_in < 2:
sel_in[:2] = True
if n_in > npoints - 1:
sel_in[0] = False
points_in, points_out = points[sel_in], points[~sel_in]
return points_in, points_out
def _ellipsoid_bootstrap_expand(args):
"""Internal method used to compute the expansion factor for a bounding
ellipsoid or ellipsoids based on bootstrapping.
The argument is a tuple:
multi: boolean flag if we are doing multiell or single ell decomposition
points: 2d array of points
rseed: seed to initialize the random generator
"""
# Unzipping.
multi, points, rseed = args
points_in, points_out = _bootstrap_points(points, rseed)
# Compute bounding ellipsoid.
ell = bounding_ellipsoid(points_in)
if not multi:
# Compute normalized distances to missing points.
dists = ell.distance_many(points_out)
else:
ells = _bounding_ellipsoids(points_in, ell)
# Compute normalized distances to missing points.
dists = np.min(np.array([el.distance_many(points_out) for el in ells]),
axis=0)
# Compute expansion factor.
expand = max(1., np.max(dists))
return expand
def _friends_bootstrap_radius(args):
"""Internal method used to compute the radius (half-side-length) for each
ball (cube) used in :class:`RadFriends` (:class:`SupFriends`) using
bootstrapping."""
# Unzipping.
points, ftype, rseed = args
points_in, points_out = _bootstrap_points(points, rseed)
# Construct KDTree to enable quick nearest-neighbor lookup for
# our resampled objects.
kdtree = spatial.KDTree(points_in)
if ftype == 'balls':
# Compute distances from our "missing" points its closest neighbor
# among the resampled points using the Euclidean norm
# (i.e. "radius" of n-sphere).
dists, ids = kdtree.query(points_out, k=1, eps=0, p=2)
elif ftype == 'cubes':
# Compute distances from our "missing" points its closest neighbor
# among the resampled points using the Euclidean norm
# (i.e. "half-side-length" of n-cube).
dists, ids = kdtree.query(points_out, k=1, eps=0, p=np.inf)
# Conservative upper-bound on radius.
dist = max(dists)
return dist
def _friends_leaveoneout_radius(points, ftype):
"""Internal method used to compute the radius (half-side-length) for each
ball (cube) used in :class:`RadFriends` (:class:`SupFriends`) using
leave-one-out (LOO) cross-validation."""
# Construct KDTree to enable quick nearest-neighbor lookup for
# our resampled objects.
kdtree = spatial.KDTree(points)
if ftype == 'balls':
# Compute radius to two nearest neighbors (self + neighbor).
dists, ids = kdtree.query(points, k=2, eps=0, p=2)
elif ftype == 'cubes':
# Compute half-side-length to two nearest neighbors (self + neighbor).
dists, ids = kdtree.query(points, k=2, eps=0, p=np.inf)
dist = dists[:, 1] # distances to LOO nearest neighbor
return dist
| 34.07635 | 79 | 0.575441 |
3d0baf36499475546ff1f631eb01d23560e1fc15 | 3,033 | py | Python | cfgov/ask_cfpb/tests/test_hooks.py | higs4281/cfgov-refresh | a02b193fb2373d443265c21845adf8a196e05675 | [
"CC0-1.0"
] | null | null | null | cfgov/ask_cfpb/tests/test_hooks.py | higs4281/cfgov-refresh | a02b193fb2373d443265c21845adf8a196e05675 | [
"CC0-1.0"
] | null | null | null | cfgov/ask_cfpb/tests/test_hooks.py | higs4281/cfgov-refresh | a02b193fb2373d443265c21845adf8a196e05675 | [
"CC0-1.0"
] | null | null | null | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.http import HttpRequest
from django.test import TestCase
from wagtail.wagtailcore.models import Page, Site
from ask_cfpb.models import (
Answer, AnswerLandingPage, AnswerPage, Category, SubCategory
)
from ask_cfpb.wagtail_hooks import (
CategoryModelAdmin, SubCategoryModelAdmin, create_answer_id, editor_css
)
class TestAskHooks(TestCase):
fixtures = ['ask_tests.json']
def setUp(self):
self.user = User.objects.get(pk=1)
self.default_site = Site.objects.get(is_default_site=True)
self.site_root = Page.objects.get(slug='root')
self.spanish_landing_page = AnswerLandingPage(
title="Obtener respuestas",
slug='obtener-respuestas',
language='es')
self.site_root.add_child(instance=self.spanish_landing_page)
self.spanish_landing_page.save()
self.spanish_landing_page.save_revision(user=self.user).publish()
self.english_landing_page = AnswerLandingPage(
title="Ask CFPB",
slug='ask-cfpb',
language='en')
self.site_root.add_child(instance=self.english_landing_page)
self.english_landing_page.save()
self.english_landing_page.save_revision(user=self.user).publish()
def test_ask_hooks(self):
self.assertEqual(SubCategoryModelAdmin.model, SubCategory)
self.assertEqual(CategoryModelAdmin.model, Category)
def test_js_functions(self):
self.assertIn("css/question-tips.css", editor_css())
def test_create_answer_id_english(self):
"""Test that English page creation generates an Ask ID and pages."""
request = HttpRequest
request.user = self.user
test_page = AnswerPage(
slug='test-page', title='Test page')
self.english_landing_page.add_child(instance=test_page)
test_page.save()
create_answer_id(request, test_page)
self.assertEqual(test_page.slug, 'test-page-en-{}'.format(
Answer.objects.order_by('pk').last().pk))
self.assertIsNotNone(test_page.answer_base)
self.assertIsNotNone(test_page.answer_base.english_page)
self.assertIsNotNone(test_page.answer_base.spanish_page)
def test_create_answer_id_spanish(self):
"""Test that Spanish page creation generates an Ask ID and pages."""
request = HttpRequest
request.user = self.user
test_page = AnswerPage(
slug='spanish-page-1', title='Spanish page 1', language='es')
self.spanish_landing_page.add_child(instance=test_page)
test_page.save()
create_answer_id(request, test_page)
self.assertEqual(test_page.slug, 'spanish-page-1-es-{}'.format(
Answer.objects.order_by('pk').last().pk))
self.assertIsNotNone(test_page.answer_base)
self.assertIsNotNone(test_page.answer_base.english_page)
self.assertIsNotNone(test_page.answer_base.spanish_page)
| 40.44 | 76 | 0.697659 |
b14835594714544fb38e339bfad4ebdcda654a8d | 1,445 | py | Python | src/downloader.py | stevenlio88/Bank_Marketing_Prediction | d4c567cfe262fb3eb4e68a4c4629cd8fc1343c2e | [
"MIT"
] | null | null | null | src/downloader.py | stevenlio88/Bank_Marketing_Prediction | d4c567cfe262fb3eb4e68a4c4629cd8fc1343c2e | [
"MIT"
] | 12 | 2021-11-20T08:34:51.000Z | 2021-12-18T19:03:53.000Z | src/downloader.py | stevenlio88/Bank_Marketing_Prediction | d4c567cfe262fb3eb4e68a4c4629cd8fc1343c2e | [
"MIT"
] | 3 | 2021-11-18T01:01:53.000Z | 2021-11-18T22:31:03.000Z | # author: Melisa Maidana, Steven Lio, Zheren Xu
# date: 2021-11-19
'''This script downloads zip data from given URL, writes both zipped and
unzipped it to local directory.
This script takes a URL arg and an optional local directory path arg.
Usage: downloader.py <url> [--path=PATH]
Options:
<url> data set URL (Required)
--path=PATH Write path (Optional) [default: Current]
'''
import os, os.path
import errno
import requests
import zipfile
from docopt import docopt
opt = docopt(__doc__)
def main(url, path):
mkdir_p(path)
r = requests.get(url)
filename = path+"/"+url.split('/')[-1] if path != "Current" else url.split('/')[-1]
print(filename)
with open(filename,'wb') as output_file:
output_file.write(r.content)
print('Download Completed!!!')
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall(path)
print('Unzip Completed!!!')
def mkdir_p(path):
"""
Creates a new directory in the given path. If the directory already exists it does nothing.
Parameters
----------
path : pd.string
the path of the new directory
Returns
-------
None
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
if __name__ == "__main__":
main(opt["<url>"], opt["--path"])
| 23.688525 | 95 | 0.617301 |
fa0f41d812423c856e81b486e0fef6dbae4d80dc | 1,972 | py | Python | data/level/level10401.py | levelupai/match3-level-similarity | cc9b28b8741b41bea1273c8bc9b4d265d79a1dca | [
"Apache-2.0"
] | null | null | null | data/level/level10401.py | levelupai/match3-level-similarity | cc9b28b8741b41bea1273c8bc9b4d265d79a1dca | [
"Apache-2.0"
] | 6 | 2020-07-04T02:53:08.000Z | 2022-03-11T23:53:14.000Z | data/level/level10401.py | levelupai/match3-level-similarity | cc9b28b8741b41bea1273c8bc9b4d265d79a1dca | [
"Apache-2.0"
] | 3 | 2019-12-31T11:42:59.000Z | 2021-03-28T20:06:13.000Z | data = {'level_index': 10401, 'move_count': '26',
'board_info': {(1, 5): {'base': (6, 1)}, (1, 4): {'base': (5, 1)}, (1, 3): {'base': (1, 1)},
(2, 6): {'base': (6, 1)}, (2, 5): {'base': (2, 1)}, (2, 4): {'base': (6, 1)},
(2, 3): {'base': (4, 1)}, (2, 2): {'base': (2, 1)}, (3, 7): {'base': (4, 1)}, (3, 6): {},
(3, 5): {'base': (1, 1)}, (3, 4): {'base': (6, 1)}, (3, 3): {'base': (2, 1)},
(3, 2): {'base': (1, 1)}, (3, 1): {'base': (1, 1), 'fall_point': (0, -1)},
(4, 7): {'base': (2, 1)}, (4, 6): {'base': (1, 1)}, (4, 5): {'base': (6, 1)},
(4, 4): {'base': (4, 1)}, (4, 3): {'base': (5, 1)}, (4, 2): {'base': (6, 1)},
(4, 1): {'base': (4, 1), 'fall_point': (0, -1)}, (5, 7): {'base': (5, 1)},
(5, 6): {'base': (4, 1)}, (5, 5): {'base': (6, 1)}, (5, 4): {'base': (1, 1)},
(5, 3): {'base': (6, 1)}, (5, 2): {'base': (5, 1)},
(5, 1): {'base': (1, 1), 'fall_point': (0, -1)}, (6, 7): {'base': (6, 1)},
(6, 6): {'base': (1, 1)}, (6, 5): {'base': (5, 1)}, (6, 4): {'base': (5, 1)},
(6, 3): {'base': (6, 1)}, (6, 2): {'base': (2, 1)},
(6, 1): {'base': (4, 1), 'fall_point': (0, -1)}, (7, 7): {'base': (4, 1)},
(7, 6): {'base': (6, 1)}, (7, 5): {'base': (6, 1)}, (7, 4): {'base': (1, 1)},
(7, 3): {'base': (4, 1)}, (7, 2): {'base': (1, 1)},
(7, 1): {'base': (6, 1), 'fall_point': (0, -1)}, (8, 6): {'base': (6, 1)},
(8, 5): {'base': (5, 1)}, (8, 4): {'base': (2, 1)}, (8, 3): {'base': (6, 1)},
(8, 2): {'base': (1, 1)}, (9, 5): {'base': (6, 1)}, (9, 4): {'base': (6, 1)},
(9, 3): {'base': (5, 1)}}, 'trans_info': {(0, 0): {6: 100}}}
| 89.636364 | 112 | 0.263692 |
ae53dd2b37f35b45c52e3c59703c40a276398ca6 | 675 | bzl | Python | source/bazel/deps/xkbcommon/get.bzl | luxe/CodeLang-compiler | 78837d90bdd09c4b5aabbf0586a5d8f8f0c1e76a | [
"MIT"
] | 1 | 2019-01-06T08:45:46.000Z | 2019-01-06T08:45:46.000Z | source/bazel/deps/xkbcommon/get.bzl | luxe/CodeLang-compiler | 78837d90bdd09c4b5aabbf0586a5d8f8f0c1e76a | [
"MIT"
] | 264 | 2015-11-30T08:34:00.000Z | 2018-06-26T02:28:41.000Z | source/bazel/deps/xkbcommon/get.bzl | UniLang/compiler | c338ee92994600af801033a37dfb2f1a0c9ca897 | [
"MIT"
] | null | null | null | # Do not edit this file directly.
# It was auto-generated by: code/programs/reflexivity/reflexive_refresh
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
def xkbcommon():
http_archive(
name = "xkbcommon",
build_file = "//bazel/deps/xkbcommon:build.BUILD",
sha256 = "bb9b5784267331ac5bb1eed7c38d42ce85e545d7bf63168094c71fbb32093681",
strip_prefix = "libxkbcommon-bdb009bb0cd925b062ec922bf031042d6209eb29",
urls = [
"https://github.com/Unilang/libxkbcommon/archive/bdb009bb0cd925b062ec922bf031042d6209eb29.tar.gz",
],
)
| 39.705882 | 110 | 0.72 |
e54b3e3ba3590d4eeb2048adbc9e7ab154fe0877 | 31,216 | py | Python | qiskit/mapper/_mapping.py | NickyBar/QIP | 11747b40beb38d41faa297fb2b53f28c6519c753 | [
"Apache-2.0"
] | 1 | 2017-07-12T02:04:53.000Z | 2017-07-12T02:04:53.000Z | qiskit/mapper/_mapping.py | NickyBar/QIP | 11747b40beb38d41faa297fb2b53f28c6519c753 | [
"Apache-2.0"
] | null | null | null | qiskit/mapper/_mapping.py | NickyBar/QIP | 11747b40beb38d41faa297fb2b53f28c6519c753 | [
"Apache-2.0"
] | 6 | 2018-05-27T10:52:02.000Z | 2021-04-02T19:20:11.000Z | # -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Layout module to assist with mapping circuit qubits onto physical qubits.
Author: Andrew Cross
"""
import sys
import copy
import math
import numpy as np
import networkx as nx
from qiskit import QISKitException
from qiskit.qasm import Qasm
import qiskit.unroll as unroll
# Notes:
# Measurements may occur and be followed by swaps that result in repeated
# measurement of the same qubit. Near-term experiments cannot implement
# these circuits, so we may need to modify the algorithm.
# It can happen that a swap in a deeper layer can be removed by permuting
# qubits in the layout. We don't do this.
# It can happen that initial swaps can be removed or partly simplified
# because the initial state is zero. We don't do this.
def layer_permutation(layer_partition, layout, qubit_subset, coupling, trials):
"""Find a swap circuit that implements a permutation for this layer.
The goal is to swap qubits such that qubits in the same two-qubit gates
are adjacent.
Based on Sergey Bravyi's algorithm.
The layer_partition is a list of (qu)bit lists and each qubit is a
tuple (qreg, index).
The layout is a dict mapping qubits in the circuit to qubits in the
coupling graph and represents the current positions of the data.
The qubit_subset is the subset of qubits in the coupling graph that
we have chosen to map into.
The coupling is a CouplingGraph.
TRIALS is the number of attempts the randomized algorithm makes.
Returns: success_flag, best_circ, best_d, best_layout, trivial_flag
If success_flag is True, then best_circ contains an OPENQASM string with
the swap circuit, best_d contains the depth of the swap circuit, and
best_layout contains the new positions of the data qubits after the
swap circuit has been applied. The trivial_flag is set if the layer
has no multi-qubit gates.
"""
rev_layout = {b: a for a, b in layout.items()}
gates = []
for layer in layer_partition:
if len(layer) > 2:
raise QISKitException("Layer contains >2 qubit gates")
elif len(layer) == 2:
gates.append(tuple(layer))
# Can we already apply the gates?
dist = sum([coupling.distance(layout[g[0]],
layout[g[1]]) for g in gates])
if dist == len(gates):
return True, "", 0, layout, len(gates) == 0
# Begin loop over trials of randomized algorithm
n = coupling.size()
best_d = sys.maxsize # initialize best depth
best_circ = None # initialize best swap circuit
best_layout = None # initialize best final layout
for trial in range(trials):
trial_layout = copy.deepcopy(layout)
rev_trial_layout = copy.deepcopy(rev_layout)
trial_circ = "" # circuit produced in this trial
# Compute Sergey's randomized distance
xi = {}
for i in coupling.get_qubits():
xi[i] = {}
for i in coupling.get_qubits():
for j in coupling.get_qubits():
scale = 1.0 + np.random.normal(0.0, 1.0 / n)
xi[i][j] = scale * coupling.distance(i, j)**2
xi[j][i] = xi[i][j]
# Loop over depths d up to a max depth of 2n+1
d = 1
circ = "" # circuit for this swap slice
while d < 2*n+1:
# Set of available qubits
qubit_set = set(qubit_subset)
# While there are still qubits available
while qubit_set:
# Compute the objective function
min_cost = sum([xi[trial_layout[g[0]]][trial_layout[g[1]]]
for g in gates])
# Try to decrease objective function
progress_made = False
# Loop over edges of coupling graph
for e in coupling.get_edges():
# Are the qubits available?
if e[0] in qubit_set and e[1] in qubit_set:
# Try this edge to reduce the cost
new_layout = copy.deepcopy(trial_layout)
new_layout[rev_trial_layout[e[0]]] = e[1]
new_layout[rev_trial_layout[e[1]]] = e[0]
rev_new_layout = copy.deepcopy(rev_trial_layout)
rev_new_layout[e[0]] = rev_trial_layout[e[1]]
rev_new_layout[e[1]] = rev_trial_layout[e[0]]
# Compute the objective function
new_cost = sum([xi[new_layout[g[0]]][new_layout[g[1]]]
for g in gates])
# Record progress if we succceed
if new_cost < min_cost:
progress_made = True
min_cost = new_cost
opt_layout = new_layout
rev_opt_layout = rev_new_layout
opt_edge = e
# Were there any good choices?
if progress_made:
qubit_set.remove(opt_edge[0])
qubit_set.remove(opt_edge[1])
trial_layout = opt_layout
rev_trial_layout = rev_opt_layout
circ += "swap %s[%d],%s[%d]; " % (opt_edge[0][0],
opt_edge[0][1],
opt_edge[1][0],
opt_edge[1][1])
else:
break
# We have either run out of qubits or failed to improve
# Compute the coupling graph distance
dist = sum([coupling.distance(trial_layout[g[0]],
trial_layout[g[1]]) for g in gates])
# If all gates can be applied now, we are finished
# Otherwise we need to consider a deeper swap circuit
if dist == len(gates):
trial_circ += circ
break
# Increment the depth
d += 1
# Either we have succeeded at some depth d < dmax or failed
dist = sum([coupling.distance(trial_layout[g[0]],
trial_layout[g[1]]) for g in gates])
if dist == len(gates):
if d < best_d:
best_circ = trial_circ
best_layout = trial_layout
best_d = min(best_d, d)
if best_circ is None:
return False, None, None, None, False
else:
return True, best_circ, best_d, best_layout, False
def direction_mapper(circuit_graph, coupling_graph, verbose=False):
"""Change the direction of CNOT gates to conform to CouplingGraph.
circuit_graph = input Circuit
coupling_graph = corresponding CouplingGraph
verbose = optional flag to print more information
Adds "h" to the circuit basis.
Returns a Circuit object containing a circuit equivalent to
circuit_graph but with CNOT gate directions matching the edges
of coupling_graph. Raises an exception if the circuit_graph
does not conform to the coupling_graph.
"""
if "cx" not in circuit_graph.basis:
return circuit_graph
if circuit_graph.basis["cx"] != (2, 0, 0):
raise QISKitException("cx gate has unexpected signature %s"
% circuit_graph.basis["cx"])
flipped_qasm = "OPENQASM 2.0;\n" + \
"gate cx c,t { CX c,t; }\n" + \
"gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }\n" + \
"gate h a { u2(0,pi) a; }\n" + \
"gate cx_flipped a,b { h a; h b; cx b, a; h a; h b; }\n" + \
"qreg q[2];\n" + \
"cx_flipped q[0],q[1];\n"
u = unroll.Unroller(Qasm(data=flipped_qasm).parse(),
unroll.CircuitBackend(["cx", "h"]))
u.execute()
flipped_cx_circuit = u.backend.circuit
cx_node_list = circuit_graph.get_named_nodes("cx")
cg_edges = coupling_graph.get_edges()
for cx_node in cx_node_list:
nd = circuit_graph.multi_graph.node[cx_node]
cxedge = tuple(nd["qargs"])
if cxedge in cg_edges:
if verbose:
print("cx %s[%d], %s[%d] -- OK" % (cxedge[0][0], cxedge[0][1],
cxedge[1][0], cxedge[1][1]))
continue
elif (cxedge[1], cxedge[0]) in cg_edges:
circuit_graph.substitute_circuit_one(cx_node,
flipped_cx_circuit,
wires=[("q", 0), ("q", 1)])
if verbose:
print("cx %s[%d], %s[%d] -FLIP" % (cxedge[0][0], cxedge[0][1],
cxedge[1][0], cxedge[1][1]))
else:
raise QISKitException("circuit incompatible with CouplingGraph: "
+ "cx on %s" % cxedge)
return circuit_graph
def swap_mapper(circuit_graph, coupling_graph,
initial_layout=None,
basis="cx,u1,u2,u3,id", verbose=False):
"""Map a Circuit onto a CouplingGraph using swap gates.
circuit_graph = input Circuit
coupling_graph = CouplingGraph to map onto
initial_layout = dict from qubits of circuit_graph to qubits
of coupling_graph (optional)
basis = basis string specifying basis of output Circuit
verbose = optional flag to print more information
Returns a Circuit object containing a circuit equivalent to
circuit_graph that respects couplings in coupling_graph, and
a layout dict mapping qubits of circuit_graph into qubits
of coupling_graph. The layout may differ from the initial_layout
if the first layer of gates cannot be executed on the
initial_layout.
"""
if circuit_graph.width() > coupling_graph.size():
raise QISKitException("Not enough qubits in CouplingGraph")
# Schedule the input circuit
layerlist = circuit_graph.layers()
if verbose:
print("schedule:")
for i in range(len(layerlist)):
print(" %d: %s" % (i, layerlist[i]["partition"]))
# Check input layout and create default layout if necessary
if initial_layout is not None:
circ_qubits = circuit_graph.get_qubits()
coup_qubits = coupling_graph.get_qubits()
qubit_subset = []
for k, v in initial_layout.values():
qubit_subset.append(v)
if k not in circ_qubits:
raise QISKitException("initial_layout qubit %s[%d] not " +
"in input Circuit" % (k[0], k[1]))
if v not in coup_qubits:
raise QISKitException("initial_layout qubit %s[%d] not " +
" in input CouplingGraph" % (v[0], v[1]))
else:
# Supply a default layout
qubit_subset = coupling_graph.get_qubits()
qubit_subset = qubit_subset[0:circuit_graph.width()]
initial_layout = {a: b for a, b in
zip(circuit_graph.get_qubits(), qubit_subset)}
# Find swap circuit to preceed to each layer of input circuit
layout = copy.deepcopy(initial_layout)
openqasm_output = ""
first_layer = True # True until first layer is output
first_swapping_layer = True # True until first swap layer is output
# Iterate over layers
for i in range(len(layerlist)):
# Attempt to find a permutation for this layer
success_flag, best_circ, best_d, best_layout, trivial_flag \
= layer_permutation(layerlist[i]["partition"], layout,
qubit_subset, coupling_graph, 20)
# If this fails, try one gate at a time in this layer
if not success_flag:
if verbose:
print("swap_mapper: failed, layer %d, " % i,
" retrying sequentially")
serial_layerlist = layerlist[i]["graph"].serial_layers()
# Go through each gate in the layer
for j in range(len(serial_layerlist)):
success_flag, best_circ, best_d, best_layout, trivial_flag \
= layer_permutation(serial_layerlist[j]["partition"],
layout, qubit_subset, coupling_graph,
20)
# Give up if we fail again
if not success_flag:
raise QISKitException("swap_mapper failed: " +
"layer %d, sublayer %d" % (i, j) +
", \"%s\"" %
serial_layerlist[j]["graph"].qasm(
no_decls=True,
aliases=layout))
else:
# Update the qubit positions each iteration
layout = best_layout
if best_d == 0:
# Output qasm without swaps
if first_layer:
openqasm_output += circuit_graph.qasm(
add_swap=True,
decls_only=True,
aliases=layout)
first_layer = False
if not trivial_flag and first_swapping_layer:
initial_layout = layout
first_swapping_layer = False
else:
# Output qasm with swaps
if first_layer:
openqasm_output += circuit_graph.qasm(
add_swap=True,
decls_only=True,
aliases=layout)
first_layer = False
initial_layout = layout
first_swapping_layer = False
else:
if not first_swapping_layer:
if verbose:
print("swap_mapper: layer %d (%d), depth %d"
% (i, j, best_d))
openqasm_output += best_circ
else:
initial_layout = layout
first_swapping_layer = False
openqasm_output += serial_layerlist[j]["graph"].qasm(
no_decls=True,
aliases=layout)
else:
# Update the qubit positions each iteration
layout = best_layout
if best_d == 0:
# Output qasm without swaps
if first_layer:
openqasm_output += circuit_graph.qasm(
add_swap=True,
decls_only=True,
aliases=layout)
first_layer = False
if not trivial_flag and first_swapping_layer:
initial_layout = layout
first_swapping_layer = False
else:
# Output qasm with swaps
if first_layer:
openqasm_output += circuit_graph.qasm(
add_swap=True,
decls_only=True,
aliases=layout)
first_layer = False
initial_layout = layout
first_swapping_layer = False
else:
if not first_swapping_layer:
if verbose:
print("swap_mapper: layer %d, depth %d"
% (i, best_d))
openqasm_output += best_circ
else:
initial_layout = layout
first_swapping_layer = False
openqasm_output += layerlist[i]["graph"].qasm(
no_decls=True,
aliases=layout)
# Parse openqasm_output into Circuit object
basis += ",swap"
ast = Qasm(data=openqasm_output).parse()
u = unroll.Unroller(ast, unroll.CircuitBackend(basis.split(",")))
u.execute()
return u.backend.circuit, initial_layout
def test_trig_solution(theta, phi, lamb, xi, theta1, theta2):
"""Test if arguments are a solution to a system of equations.
Cos[phi+lamb] * Cos[theta] = Cos[xi] * Cos[theta1+theta2]
Sin[phi+lamb] * Cos[theta] = Sin[xi] * Cos[theta1-theta2]
Cos[phi-lamb] * Sin[theta] = Cos[xi] * Sin[theta1+theta2]
Sin[phi-lamb] * Sin[theta] = Sin[xi] * Sin[-theta1+theta2]
Returns the maximum absolute difference between right and left hand sides.
"""
delta1 = math.cos(phi + lamb) * math.cos(theta) - \
math.cos(xi) * math.cos(theta1 + theta2)
delta2 = math.sin(phi + lamb) * math.cos(theta) - \
math.sin(xi) * math.cos(theta1 - theta2)
delta3 = math.cos(phi - lamb) * math.sin(theta) - \
math.cos(xi) * math.sin(theta1 + theta2)
delta4 = math.sin(phi - lamb) * math.sin(theta) - \
math.sin(xi) * math.sin(-theta1 + theta2)
return max(map(abs, [delta1, delta2, delta3, delta4]))
def yzy_to_zyz(xi, theta1, theta2, eps=1e-9):
"""Express a Y.Z.Y single qubit gate as a Z.Y.Z gate.
Solve the equation
Ry(2*theta1).Rz(2*xi).Ry(2*theta2) = Rz(2*phi).Ry(2*theta).Rz(2*lambda)
for theta, phi, and lambda. This is equivalent to solving the system
given in the comment for test_solution. Use eps for comparisons with zero.
Return a solution theta, phi, and lambda.
"""
solutions = [] # list of potential solutions
# Four cases to avoid singularities
if abs(math.cos(xi)) < eps / 10:
solutions.append((theta2 - theta1, xi, 0.0))
elif abs(math.sin(theta1 + theta2)) < eps / 10:
phi_minus_lambda = [
math.pi / 2,
3 * math.pi / 2,
math.pi / 2,
3 * math.pi / 2]
stheta_1 = math.asin(math.sin(xi) * math.sin(-theta1 + theta2))
stheta_2 = math.asin(-math.sin(xi) * math.sin(-theta1 + theta2))
stheta_3 = math.pi - stheta_1
stheta_4 = math.pi - stheta_2
stheta = [stheta_1, stheta_2, stheta_3, stheta_4]
phi_plus_lambda = list(map(lambda x:
math.acos(math.cos(theta1 + theta2) *
math.cos(xi) / math.cos(x)),
stheta))
sphi = [(term[0] + term[1]) / 2 for term in
zip(phi_plus_lambda, phi_minus_lambda)]
slam = [(term[0] - term[1]) / 2 for term in
zip(phi_plus_lambda, phi_minus_lambda)]
solutions = list(zip(stheta, sphi, slam))
elif abs(math.cos(theta1 + theta2)) < eps / 10:
phi_plus_lambda = [
math.pi / 2,
3 * math.pi / 2,
math.pi / 2,
3 * math.pi / 2]
stheta_1 = math.acos(math.sin(xi) * math.cos(theta1 - theta2))
stheta_2 = math.acos(-math.sin(xi) * math.cos(theta1 - theta2))
stheta_3 = -stheta_1
stheta_4 = -stheta_2
stheta = [stheta_1, stheta_2, stheta_3, stheta_4]
phi_minus_lambda = list(map(lambda x:
math.acos(math.sin(theta1 + theta2) *
math.cos(xi) / math.sin(x)),
stheta))
sphi = [(term[0] + term[1]) / 2 for term in
zip(phi_plus_lambda, phi_minus_lambda)]
slam = [(term[0] - term[1]) / 2 for term in
zip(phi_plus_lambda, phi_minus_lambda)]
solutions = list(zip(stheta, sphi, slam))
else:
phi_plus_lambda = math.atan(math.sin(xi) * math.cos(theta1 - theta2) /
(math.cos(xi) * math.cos(theta1 + theta2)))
phi_minus_lambda = math.atan(math.sin(xi) * math.sin(-theta1 +
theta2) /
(math.cos(xi) * math.sin(theta1 +
theta2)))
sphi = (phi_plus_lambda + phi_minus_lambda) / 2
slam = (phi_plus_lambda - phi_minus_lambda) / 2
solutions.append((math.acos(math.cos(xi) * math.cos(theta1 + theta2) /
math.cos(sphi + slam)), sphi, slam))
solutions.append((math.acos(math.cos(xi) * math.cos(theta1 + theta2) /
math.cos(sphi + slam + math.pi)),
sphi + math.pi / 2,
slam + math.pi / 2))
solutions.append((math.acos(math.cos(xi) * math.cos(theta1 + theta2) /
math.cos(sphi + slam)),
sphi + math.pi / 2, slam - math.pi / 2))
solutions.append((math.acos(math.cos(xi) * math.cos(theta1 + theta2) /
math.cos(sphi + slam + math.pi)),
sphi + math.pi, slam))
# Select the first solution with the required accuracy
deltas = list(map(lambda x: test_trig_solution(x[0], x[1], x[2],
xi, theta1, theta2),
solutions))
for delta_sol in zip(deltas, solutions):
if delta_sol[0] < eps:
return delta_sol[1]
print("xi=", xi)
print("theta1=", theta1)
print("theta2=", theta2)
print("solutions=", solutions)
print("deltas=", deltas)
assert False, "Error! No solution found. This should not happen."
def compose_u3(theta1, phi1, lambda1, theta2, phi2, lambda2):
"""Return a triple theta, phi, lambda for the product.
u3(theta, phi, lambda)
= u3(theta1, phi1, lambda1).u3(theta2, phi2, lambda2)
= Rz(phi1).Ry(theta1).Rz(lambda1+phi2).Ry(theta2).Rz(lambda2)
= Rz(phi1).Rz(phi').Ry(theta').Rz(lambda').Rz(lambda2)
= u3(theta', phi1 + phi', lambda2 + lambda')
Return theta, phi, lambda.
"""
# Careful with the factor of two in yzy_to_zyz
thetap, phip, lambdap = yzy_to_zyz((lambda1 + phi2) / 2.0,
theta1 / 2.0, theta2 / 2.0)
return (2.0 * thetap, phi1 + 2.0 * phip, lambda2 + 2.0 * lambdap)
def cx_cancellation(circuit):
"""Cancel back-to-back "cx" gates in circuit."""
runs = circuit.collect_runs(["cx"])
for run in runs:
# Partition the run into chunks with equal gate arguments
partition = []
chunk = []
for i in range(len(run) - 1):
chunk.append(run[i])
qargs0 = circuit.multi_graph.node[run[i]]["qargs"]
qargs1 = circuit.multi_graph.node[run[i + 1]]["qargs"]
if qargs0 != qargs1:
partition.append(chunk)
chunk = []
chunk.append(run[-1])
partition.append(chunk)
# Simplify each chunk in the partition
for chunk in partition:
if len(chunk) % 2 == 0:
for n in chunk:
circuit._remove_op_node(n)
else:
for n in chunk[1:]:
circuit._remove_op_node(n)
def optimize_1q_gates(circuit):
"""Simplify runs of single qubit gates in the QX basis.
Return a new circuit that has been optimized.
"""
qx_basis = ["u1", "u2", "u3", "cx", "id"]
urlr = unroll.Unroller(Qasm(data=circuit.qasm(qeflag=True)).parse(),
unroll.CircuitBackend(qx_basis))
urlr.execute()
unrolled = urlr.backend.circuit
runs = unrolled.collect_runs(["u1", "u2", "u3", "id"])
for run in runs:
qname = unrolled.multi_graph.node[run[0]]["qargs"][0]
right_name = "u1"
right_parameters = (0.0, 0.0, 0.0) # (theta, phi, lambda)
for node in run:
nd = unrolled.multi_graph.node[node]
assert nd["condition"] is None, "internal error"
assert len(nd["qargs"]) == 1, "internal error"
assert nd["qargs"][0] == qname, "internal error"
left_name = nd["name"]
assert left_name in ["u1", "u2", "u3", "id"], "internal error"
if left_name == "u1":
left_parameters = (0.0, 0.0, float(nd["params"][0]))
elif left_name == "u2":
left_parameters = (math.pi / 2, float(nd["params"][0]),
float(nd["params"][1]))
elif left_name == "u3":
left_parameters = tuple(map(float, nd["params"]))
else:
left_name = "u1" # replace id with u1
left_parameters = (0.0, 0.0, 0.0)
# Compose gates
name_tuple = (left_name, right_name)
if name_tuple == ("u1", "u1"):
# u1(lambda1) * u1(lambda2) = u1(lambda1 + lambda2)
right_parameters = (0.0, 0.0, right_parameters[2] +
left_parameters[2])
elif name_tuple == ("u1", "u2"):
# u1(lambda1) * u2(phi2, lambda2) = u2(phi2 + lambda1, lambda2)
right_parameters = (math.pi / 2, right_parameters[1] +
left_parameters[2], right_parameters[2])
elif name_tuple == ("u2", "u1"):
# u2(phi1, lambda1) * u1(lambda2) = u2(phi1, lambda1 + lambda2)
right_name = "u2"
right_parameters = (math.pi / 2, left_parameters[1],
right_parameters[2] + left_parameters[2])
elif name_tuple == ("u1", "u3"):
# u1(lambda1) * u3(theta2, phi2, lambda2) =
# u3(theta2, phi2 + lambda1, lambda2)
right_parameters = (right_parameters[0], right_parameters[1] +
left_parameters[2], right_parameters[2])
elif name_tuple == ("u3", "u1"):
# u3(theta1, phi1, lambda1) * u1(lambda2) =
# u3(theta1, phi1, lambda1 + lambda2)
right_name = "u3"
right_parameters = (left_parameters[0], left_parameters[1],
right_parameters[2] + left_parameters[2])
elif name_tuple == ("u2", "u2"):
# Using Ry(pi/2).Rz(2*lambda).Ry(pi/2) =
# Rz(pi/2).Ry(pi-2*lambda).Rz(pi/2),
# u2(phi1, lambda1) * u2(phi2, lambda2) =
# u3(pi - lambda1 - phi2, phi1 + pi/2, lambda2 + pi/2)
right_name = "u3"
right_parameters = (math.pi - left_parameters[2] -
right_parameters[1], left_parameters[1] +
math.pi / 2, right_parameters[2] +
math.pi / 2)
else:
# For composing u3's or u2's with u3's, use
# u2(phi, lambda) = u3(pi/2, phi, lambda)
# together with the qiskit.mapper.compose_u3 method.
right_name = "u3"
right_parameters = compose_u3(left_parameters[0],
left_parameters[1],
left_parameters[2],
right_parameters[0],
right_parameters[1],
right_parameters[2])
# Here down, when we simplify, we add f(theta) to lambda to correct
# the global phase when f(theta) is 2*pi. This isn't necessary but
# the other steps preserve the global phase, so we continue.
epsilon = 1e-9 # for comparison with zero
# Y rotation is 0 mod 2*pi, so the gate is a u1
if abs(right_parameters[0] % 2.0 * math.pi) < epsilon \
and right_name != "u1":
right_name = "u1"
right_parameters = (0.0, 0.0, right_parameters[1] +
right_parameters[2] +
right_parameters[0])
# Y rotation is pi/2 or -pi/2 mod 2*pi, so the gate is a u2
if right_name == "u3":
# theta = pi/2 + 2*k*pi
if abs((right_parameters[0] - math.pi / 2) % 2.0 * math.pi) \
< epsilon:
right_name = "u2"
right_parameters = (math.pi / 2, right_parameters[1],
right_parameters[2] +
(right_parameters[0] - math.pi / 2))
# theta = -pi/2 + 2*k*pi
if abs((right_parameters[0] + math.pi / 2) % 2.0 * math.pi) \
< epsilon:
right_name = "u2"
right_parameters = (math.pi / 2, right_parameters[1] +
math.pi, right_parameters[2] -
math.pi + (right_parameters[0] +
math.pi / 2))
# u1 and lambda is 0 mod 4*pi so gate is nop
if right_name == "u1" and \
abs(right_parameters[2] % 4.0 * math.pi) < epsilon:
right_name = "nop"
# Replace the data of the first node in the run
new_params = []
if right_name == "u1":
new_params.append(right_parameters[2])
if right_name == "u2":
new_params = [right_parameters[1], right_parameters[2]]
if right_name == "u3":
new_params = list(right_parameters)
nx.set_node_attributes(unrolled.multi_graph, 'name',
{run[0]: right_name})
nx.set_node_attributes(unrolled.multi_graph, 'params',
{run[0]: tuple(map(str, new_params))})
# Delete the other nodes in the run
for node in run[1:]:
unrolled._remove_op_node(node)
if right_name == "nop":
unrolled._remove_op_node(run[0])
return unrolled
| 46.245926 | 80 | 0.515377 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.