text stringlengths 957 885k |
|---|
<filename>docs/source/tutorials/src/geo/gpx.py
# Copyright (c) 2020, <NAME>
# License: MIT License
from typing import Iterable, Tuple
from xml.etree import ElementTree as ET
from pathlib import Path
import json
import ezdxf
from ezdxf.math import Matrix44
from ezdxf.addons import geo
TRACK_DATA = Path(__file__).parent
GPX_NS = {'gpx': 'http://www.topografix.com/GPX/1/1'}
def load_gpx_track(p: Path) -> Iterable[Tuple[float, float]]:
""" Load all track points from all track segments at once. """
gpx = ET.parse(p)
root = gpx.getroot()
for track_point in root.findall('.//gpx:trkpt', GPX_NS):
data = track_point.attrib
# Elevation is not supported by the geo add-on.
yield float(data['lon']), float(data['lat'])
def add_gpx_track(msp, track_data, layer: str):
geo_mapping = {
'type': 'LineString',
'coordinates': track_data,
}
geo_track = geo.GeoProxy.parse(geo_mapping)
# Project GPS globe (polar) representation longitude, latitude EPSG:4326
# into 2D cartesian coordinates EPSG:3395
geo_track.globe_to_map()
# Load geo data information from the DXF file:
geo_data = msp.get_geodata()
if geo_data:
# Get the transformation matrix and epsg code:
m, epsg = geo_data.get_crs_transformation()
else:
# Identity matrix for DXF files without a geo location reference:
m = Matrix44()
epsg = 3395
# Check for compatible projection:
if epsg == 3395:
# Transform CRS coordinates into DXF WCS:
geo_track.crs_to_wcs(m)
# Create DXF entities (LWPOLYLINE)
for entity in geo_track.to_dxf_entities(dxfattribs={'layer': layer}):
# Add entity to the modelspace:
msp.add_entity(entity)
else:
print(f'Incompatible CRS EPSG:{epsg}')
def export_geojson(entity, m):
# Convert DXF entity into a GeoProxy object:
geo_proxy = geo.proxy(entity)
# Transform DXF WCS coordinates into CRS coordinates:
geo_proxy.wcs_to_crs(m)
# Transform 2D map projection EPSG:3395 into globe (polar)
# representation EPSG:4326
geo_proxy.map_to_globe()
# Export GeoJSON data:
name = entity.dxf.layer + '.geojson'
with open(TRACK_DATA / name, 'wt', encoding='utf8') as fp:
json.dump(geo_proxy.__geo_interface__, fp, indent=2)
def main(dxf_path: Path, out_path: Path, tracks):
doc = ezdxf.readfile(str(dxf_path))
msp = doc.modelspace()
# Load GPX data into DXF
for index, track_path in enumerate(tracks, 1):
layer = f'track{index}'
doc.layers.new(layer, dxfattribs={
'color': index,
'lineweight': 50,
})
track_data = list(load_gpx_track(track_path))
add_gpx_track(msp, track_data, layer)
# Store geo located DXF entities as GeoJSON data:
# Get the geo location information from the DXF file:
geo_data = msp.get_geodata()
if geo_data:
# Get transformation matrix and epsg code:
m, epsg = geo_data.get_crs_transformation()
else:
# Identity matrix for DXF files without geo reference data:
m = Matrix44()
for track in msp.query('LWPOLYLINE'):
export_geojson(track, m)
doc.saveas(str(out_path))
if __name__ == '__main__':
main(
TRACK_DATA / "Graz_10km_3m.dxf",
TRACK_DATA / "gpx_tracks.dxf",
[
TRACK_DATA / 'track1.gpx',
TRACK_DATA / 'track2.gpx',
TRACK_DATA / 'track3.gpx',
]
)
|
<filename>examples/example_all_functionality.py
from typing import Dict
from deephaven import DateTimeUtils as dtu
from ibapi.contract import Contract
from ibapi.order import Order
import deephaven_ib as dhib
###########################################################################
# WARNING: THIS SCRIPT EXECUTES TRADES!! ONLY USE ON PAPER TRADING ACCOUNTS
###########################################################################
print("==============================================================================================================")
print("==== Create a client and connect.")
print("==============================================================================================================")
client = dhib.IbSessionTws(host="host.docker.internal", port=7497, client_id=0, download_short_rates=False)
print(f"IsConnected: {client.is_connected()}")
client.connect()
print(f"IsConnected: {client.is_connected()}")
print("==============================================================================================================")
print("==== Get registered contracts for all contract types.")
print("==============================================================================================================")
def get_contracts() -> Dict[str, Contract]:
rst = {}
# FX Pairs
contract = Contract()
contract.symbol = "EUR"
contract.secType = "CASH"
contract.currency = "GBP"
contract.exchange = "IDEALPRO"
rst["fx_1"] = contract
# Cryptocurrency
contract = Contract()
contract.symbol = "ETH"
contract.secType = "CRYPTO"
contract.currency = "USD"
contract.exchange = "PAXOS"
rst["crypto_1"] = contract
# Stock
contract = Contract()
contract.symbol = "IBKR"
contract.secType = "STK"
contract.currency = "USD"
# In the API side, NASDAQ is always defined as ISLAND in the exchange field
contract.exchange = "ISLAND"
rst["stock_1"] = contract
contract = Contract()
contract.symbol = "MSFT"
contract.secType = "STK"
contract.currency = "USD"
contract.exchange = "SMART"
# Specify the Primary Exchange attribute to avoid contract ambiguity
# (there is an ambiguity because there is also a MSFT contract with primary exchange = "AEB")
contract.primaryExchange = "ISLAND"
rst["stock_2"] = contract
# Index
contract = Contract()
contract.symbol = "DAX"
contract.secType = "IND"
contract.currency = "EUR"
contract.exchange = "DTB"
rst["index_1"] = contract
# CFD
contract = Contract()
contract.symbol = "IBDE30"
contract.secType = "CFD"
contract.currency = "EUR"
contract.exchange = "SMART"
rst["cfd_1"] = contract
# Futures
contract = Contract()
contract.symbol = "ES"
contract.secType = "FUT"
contract.exchange = "GLOBEX"
contract.currency = "USD"
contract.lastTradeDateOrContractMonth = "202203"
rst["future_1"] = contract
contract = Contract()
contract.secType = "FUT"
contract.exchange = "GLOBEX"
contract.currency = "USD"
contract.localSymbol = "ESH2"
rst["future_2"] = contract
contract = Contract()
contract.symbol = "DAX"
contract.secType = "FUT"
contract.exchange = "DTB"
contract.currency = "EUR"
contract.lastTradeDateOrContractMonth = "202203"
contract.multiplier = "5"
rst["future_3"] = contract
contract = Contract()
contract.symbol = "ES"
contract.secType = "CONTFUT"
contract.exchange = "GLOBEX"
rst["future_4"] = contract
contract = Contract()
contract.symbol = "ES"
contract.secType = "FUT+CONTFUT"
contract.exchange = "GLOBEX"
rst["future_5"] = contract
# Options
contract = Contract()
contract.symbol = "GOOG"
contract.secType = "OPT"
contract.exchange = "BOX"
contract.currency = "USD"
contract.lastTradeDateOrContractMonth = "20220318"
contract.strike = 2800
contract.right = "C"
contract.multiplier = "100"
rst["option_1"] = contract
# contract = Contract()
# contract.symbol = "SANT"
# contract.secType = "OPT"
# contract.exchange = "MEFFRV"
# contract.currency = "EUR"
# contract.lastTradeDateOrContractMonth = "20190621"
# contract.strike = 7.5
# contract.right = "C"
# contract.multiplier = "100"
# contract.tradingClass = "SANEU"
# rst["option_2"] = contract
# contract = Contract()
# # Watch out for the spaces within the local symbol!
# contract.localSymbol = "C BMW JUL 20 4800"
# contract.secType = "OPT"
# contract.exchange = "DTB"
# contract.currency = "EUR"
# rst["option_3"] = contract
# Futures Options
contract = Contract()
contract.symbol = "ES"
contract.secType = "FOP"
contract.exchange = "GLOBEX"
contract.currency = "USD"
contract.lastTradeDateOrContractMonth = "202203"
contract.strike = 4700
contract.right = "C"
contract.multiplier = "50"
rst["futureoption_1"] = contract
# Bonds
contract = Contract()
# enter CUSIP as symbol
contract.symbol = "912828C57"
contract.secType = "BOND"
contract.exchange = "SMART"
contract.currency = "USD"
rst["bond_1"] = contract
contract = Contract()
contract.conId = 147554578
contract.exchange = "SMART"
rst["bond_2"] = contract
# Mutual Funds
contract = Contract()
contract.symbol = "VINIX"
contract.secType = "FUND"
contract.exchange = "FUNDSERV"
contract.currency = "USD"
rst["mutualfund_1"] = contract
# Commodities
contract = Contract()
contract.symbol = "XAUUSD"
contract.secType = "CMDTY"
contract.exchange = "SMART"
contract.currency = "USD"
rst["commodity_1"] = contract
# Standard warrants
contract = Contract()
contract.symbol = "OXY"
contract.secType = "WAR"
contract.exchange = "SMART"
contract.currency = "USD"
contract.lastTradeDateOrContractMonth = "20270803"
contract.strike = 22.0
contract.right = "C"
contract.multiplier = "1"
rst["standardwarrant_1"] = contract
# Dutch warrants and structured products
contract = Contract()
contract.localSymbol = "PJ07S"
contract.secType = "IOPT"
contract.exchange = "SBF"
contract.currency = "EUR"
rst["dutchwarrant_1"] = contract
return rst
contracts = get_contracts()
for name, contract in contracts.items():
print(f"{name} {contract}")
rc = client.get_registered_contract(contract)
print(rc)
registered_contracts = {name: client.get_registered_contract(contract) for name, contract in contracts.items()}
print("==============================================================================================================")
print("==== Request account pnl.")
print("==============================================================================================================")
client.request_account_pnl()
print("==============================================================================================================")
print("==== Request contracts matching.")
print("==============================================================================================================")
client.request_contracts_matching("AM")
print("==============================================================================================================")
print("==== Request news data.")
print("==============================================================================================================")
contract = Contract()
contract.symbol = "GOOG"
contract.secType = "STK"
contract.currency = "USD"
contract.exchange = "SMART"
rc = client.get_registered_contract(contract)
print(contract)
start = dtu.convertDateTime("2021-01-01T00:00:00 NY")
end = dtu.convertDateTime("2021-01-10T00:00:00 NY")
client.request_news_historical(rc, start=start, end=end)
client.request_news_article(provider_code="BRFUPDN", article_id="BRFUPDN$107d53ea")
print("==============================================================================================================")
print("==== Set market data type.")
print("==============================================================================================================")
# client.set_market_data_type(dhib.MarketDataType.DELAYED)
client.set_market_data_type(dhib.MarketDataType.REAL_TIME)
print("==============================================================================================================")
print("==== Request bars.")
print("==============================================================================================================")
contract = Contract()
contract.symbol = "IBKR"
contract.secType = "STK"
contract.currency = "USD"
contract.exchange = "SMART"
rc = client.get_registered_contract(contract)
print(contract)
client.request_bars_historical(rc, duration=dhib.Duration.days(10), bar_size=dhib.BarSize.MIN_5,
bar_type=dhib.BarDataType.MIDPOINT)
client.request_bars_historical(rc, duration=dhib.Duration.days(10), bar_size=dhib.BarSize.MIN_5,
bar_type=dhib.BarDataType.BID)
client.request_bars_historical(rc, duration=dhib.Duration.days(10), bar_size=dhib.BarSize.MIN_5,
bar_type=dhib.BarDataType.ASK)
client.request_bars_historical(rc, duration=dhib.Duration.days(10), bar_size=dhib.BarSize.MIN_5,
bar_type=dhib.BarDataType.BID_ASK, keep_up_to_date=False)
client.request_bars_historical(rc, duration=dhib.Duration.days(10), bar_size=dhib.BarSize.MIN_5,
bar_type=dhib.BarDataType.HISTORICAL_VOLATILITY, keep_up_to_date=False)
client.request_bars_historical(rc, duration=dhib.Duration.days(10), bar_size=dhib.BarSize.MIN_5,
bar_type=dhib.BarDataType.OPTION_IMPLIED_VOLATILITY, keep_up_to_date=False)
client.request_bars_historical(rc, duration=dhib.Duration.days(10), bar_size=dhib.BarSize.MIN_5,
bar_type=dhib.BarDataType.TRADES)
client.request_bars_realtime(rc, bar_type=dhib.BarDataType.MIDPOINT)
client.request_bars_realtime(rc, bar_type=dhib.BarDataType.BID)
client.request_bars_realtime(rc, bar_type=dhib.BarDataType.ASK)
client.request_bars_realtime(rc, bar_type=dhib.BarDataType.TRADES)
print("==============================================================================================================")
print("==== Request tick data.")
print("==============================================================================================================")
contract = Contract()
contract.symbol = "GOOG"
contract.secType = "STK"
contract.currency = "USD"
contract.exchange = "SMART"
rc = client.get_registered_contract(contract)
print(contract)
now = dtu.convertDateTime("2021-01-01T00:00:00 NY")
client.request_tick_data_historical(rc, dhib.TickDataType.MIDPOINT, 100, start=now)
client.request_tick_data_historical(rc, dhib.TickDataType.MIDPOINT, 100, end=now)
client.request_tick_data_realtime(rc, dhib.TickDataType.MIDPOINT)
client.request_tick_data_realtime(rc, dhib.TickDataType.BID_ASK)
client.request_tick_data_historical(rc, dhib.TickDataType.LAST, 100, start=now)
client.request_tick_data_historical(rc, dhib.TickDataType.LAST, 100, end=now)
client.request_tick_data_realtime(rc, dhib.TickDataType.LAST)
print("==============================================================================================================")
print("==== Request market data.")
print("==============================================================================================================")
contract = Contract()
contract.symbol = "GOOG"
contract.secType = "STK"
contract.currency = "USD"
contract.exchange = "SMART"
rc = client.get_registered_contract(contract)
print(contract)
generic_tick_types = [
dhib.GenericTickType.NEWS,
dhib.GenericTickType.DIVIDENDS,
dhib.GenericTickType.AUCTION,
dhib.GenericTickType.MARK_PRICE,
dhib.GenericTickType.MARK_PRICE_SLOW,
dhib.GenericTickType.TRADING_RANGE,
dhib.GenericTickType.TRADE_LAST_RTH,
dhib.GenericTickType.TRADE_COUNT,
dhib.GenericTickType.TRADE_COUNT_RATE,
dhib.GenericTickType.TRADE_VOLUME,
dhib.GenericTickType.TRADE_VOLUME_NO_UNREPORTABLE,
dhib.GenericTickType.TRADE_VOLUME_RATE,
dhib.GenericTickType.TRADE_VOLUME_SHORT_TERM,
dhib.GenericTickType.SHORTABLE,
dhib.GenericTickType.SHORTABLE_SHARES,
# dhib.GenericTickType.FUTURE_OPEN_INTEREST,
# dhib.GenericTickType.FUTURE_INDEX_PREMIUM,
dhib.GenericTickType.OPTION_VOLATILITY_HISTORICAL,
dhib.GenericTickType.OPTION_VOLATILITY_HISTORICAL_REAL_TIME,
dhib.GenericTickType.OPTION_VOLATILITY_IMPLIED,
dhib.GenericTickType.OPTION_VOLUME,
dhib.GenericTickType.OPTION_VOLUME_AVERAGE,
dhib.GenericTickType.OPTION_OPEN_INTEREST,
# dhib.GenericTickType.ETF_NAV_CLOSE,
# dhib.GenericTickType.ETF_NAV_PRICE,
# dhib.GenericTickType.ETF_NAV_LAST,
# dhib.GenericTickType.ETF_NAV_LAST_FROZEN,
# dhib.GenericTickType.ETF_NAV_RANGE,
#
# dhib.GenericTickType.BOND_FACTOR_MULTIPLIER,
]
client.request_market_data(rc, generic_tick_types=generic_tick_types)
print("==============================================================================================================")
print("==== Request option greeks.")
print("==============================================================================================================")
contract = Contract()
contract.symbol = "GOOG"
contract.secType = "OPT"
contract.exchange = "BOX"
contract.currency = "USD"
contract.lastTradeDateOrContractMonth = "20220318"
contract.strike = 2800
contract.right = "C"
contract.multiplier = "100"
rc = client.get_registered_contract(contract)
print(contract)
client.request_market_data(rc)
print("==============================================================================================================")
print("==== Orders.")
print("==============================================================================================================")
contract = Contract()
contract.symbol = "GOOG"
contract.secType = "STK"
contract.currency = "USD"
contract.exchange = "SMART"
rc = client.get_registered_contract(contract)
print(contract)
order = Order()
order.account = "DF4943843"
order.action = "BUY"
order.orderType = "LIMIT"
order.totalQuantity = 1
order.lmtPrice = 3000
order.eTradeOnly = False
order.firmQuoteOnly = False
print("Placing order: START")
client.order_place(rc, order)
print("Placing order: END")
order = Order()
order.account = "DF4943843"
order.action = "BUY"
order.orderType = "LIMIT"
order.totalQuantity = 1
order.lmtPrice = 2600
order.eTradeOnly = False
order.firmQuoteOnly = False
print("Placing order: START")
client.order_place(rc, order)
print("Placing order: START")
order = Order()
order.account = "DF4943843"
order.action = "BUY"
order.orderType = "LIMIT"
order.totalQuantity = 1
order.lmtPrice = 2700
order.eTradeOnly = False
order.firmQuoteOnly = False
print("Placing order: START")
req = client.order_place(rc, order)
print("Placing order: END")
# req.cancel()
# client.order_cancel_all()
print("==============================================================================================================")
print("==== Make all tables visible in the UI.")
print("==============================================================================================================")
for k, v in client.tables_raw.items():
globals()[k] = v
for k, v in client.tables.items():
globals()[k] = v
|
<gh_stars>0
#----------------------------------------------------------------------------
# GHI_PulseCount.py
#
# Raspberry Pi Python library for use with GHI PulseCount.
# https://www.ghielectronics.com/catalog/product/465
# a breakout board for the LSI LS7366R
# http://www.lsicsi.com/pdfs/Data_Sheets/LS7366R.pdf
#
# <NAME>
# 2017-04-28
#----------------------------------------------------------------------------
import spidev
# MDR0 configuration data - the configuration byte is formed with
# single segments taken from each group and ORing all together.
# Count modes
NQUAD = 0x00 # non-quadrature mode
QUADRX1 = 0x01 # X1 quadrature mode
QUADRX2 = 0x02 # X2 quadrature mode
QUADRX4 = 0x03 # X4 quadrature mode
# Running modes
FREE_RUN = 0x00
SINGE_CYCLE = 0x04
RANGE_LIMIT = 0x08
MODULO_N = 0x0C
# Index modes
DISABLE_INDX = 0x00 # index_disabled
INDX_LOADC = 0x10 # index_load_CNTR
INDX_RESETC = 0x20 # index_rest_CNTR
INDX_LOADO = 0x30 # index_load_OL
ASYNCH_INDX = 0x00 # asynchronous index
SYNCH_INDX = 0x80 # synchronous index
# Clock filter modes
FILTER_1 = 0x00 # filter clock frequncy division factor 1
FILTER_2 = 0x80 # filter clock frequncy division factor 2
# MDR1 configuration data; any of these
# data segments can be ORed together
# Flag modes
NO_FLAGS = 0x00 # all flags disabled
IDX_FLAG = 0x10 # IDX flag
CMP_FLAG = 0x20 # CMP flag
BW_FLAG = 0x40 # BW flag
CY_FLAG = 0x80 # CY flag
# 1 to 4 bytes data-width
BYTE_4 = 0x00 # four byte mode
BYTE_3 = 0x01 # three byte mode
BYTE_2 = 0x02 # two byte mode
BYTE_1 = 0x03 # one byte mode
# Enable/disable counter
EN_CNTR = 0x00 # counting enabled
DIS_CNTR = 0x04 # counting disabled
# LS7366R op-code list
CLR_MDR0 = 0x08
CLR_MDR1 = 0x10
CLR_CNTR = 0x20
CLR_STR = 0x30
READ_MDR0 = 0x48
READ_MDR1 = 0x50
READ_CNTR = 0x60
READ_OTR = 0x68
READ_STR = 0x70
WRITE_MDR1 = 0x90
WRITE_MDR0 = 0x88
WRITE_DTR = 0x98
LOAD_CNTR = 0xE0
LOAD_OTR = 0xE4
class GHI_PulseCount(object):
"""GHI PulseCounter."""
def __init__(self, ):
# Setup SPI interface.
self.spi = spidev.SpiDev()
self.spi.open(0, 0)
self.spi.max_speed_hz = 50000
# Default config
self.write_mdr0(QUADRX1 | FREE_RUN | DISABLE_INDX | FILTER_1)
self.write_mdr1(BYTE_4 | EN_CNTR)
# Set to zero at start
self.set_counts(0)
def get_counts(self, ):
b = self.read_cntr()
return (b[0] & 0xFF) << 24 | \
(b[1] & 0xFF) << 16 | \
(b[2] & 0xFF) << 8 | \
(b[3] & 0xFF)
def set_counts(self, value):
self.write_dtr(value)
self.load_cntr()
def get_byte_mode(self, ):
"""Return current counter mode number of bytes (1 to 4)."""
return 4 - (self.read_mdr1()[0] & 0x03)
def clear_mdr0(self, ):
"""Clear MDR0."""
self.spi_writebytes([CLR_MDR0])
def clear_mdr1(self, ):
"""Clear MDR1."""
self.spi_writebytes([CLR_MDR1])
def clear_cntr(self, ):
"""Clear the counter."""
self.spi.writebytes([CLR_CNTR])
def clear_str(self, ):
"""Clear the status register."""
self.spi.writebytes([CLR_STR])
def read_mdr0(self, ):
"""Output MDR0 serially on MISO."""
return self.spi.xfer2([READ_MDR0, 0x00])[1:]
def read_mdr1(self, ):
"""Output MDR1 serially on MISO."""
return self.spi.xfer2([READ_MDR1, 0x00])[1:]
def read_cntr(self, ):
"""Transfer CNTR to OTR, then output OTR serially on MISO."""
return self.spi.xfer2([READ_CNTR,0x00,0x00,0x00,0x00])[1:]
def read_otr(self, ):
"""Output OTR serially on MISO."""
return self.spi.xfer2([READ_OTR,0x00,0x00,0x00,0x00])[1:]
def read_str(self, ):
"""Output STR serially on MISO."""
return self.spi.xfer2([READ_STR,0x00])[1:]
def write_mdr0(self, mode):
"""Write serial data at MOSI into MDR0."""
self.spi.writebytes([WRITE_MDR0, mode])
def write_mdr1(self, mode):
"""Write serial data at MOSI into MDR1."""
self.spi.writebytes([WRITE_MDR1, mode])
def write_dtr(self, value):
"""Write serial data at MOSI into DTR."""
self.spi.writebytes([WRITE_DTR, value >> 24 & 0xFF,
value >> 16 & 0xFF,
value >> 8 & 0xFF,
value & 0xFF])
def load_cntr(self, ):
"""Transfer DTR to CNTR in parallel."""
self.spi.writebytes([LOAD_CNTR])
def load_otr(self, ):
"""Transfer CNTR to OTR in parallel."""
self.spi.writebytes([LOAD_OTR])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
import json
import os
import sys
import tarfile
import zipfile
from mock import patch
from resilient_sdk.cmds import CmdExtPackage as CmdPackage
from resilient_sdk.cmds.validate import CmdValidate
from resilient_sdk.util import package_file_helpers as package_helpers
from resilient_sdk.util import sdk_helpers
from tests import helpers
from tests.shared_mock_data import mock_paths
EXPECTED_FILES_APP_ZIP = ['app.json', 'export.res', 'fn_main_mock_integration-1.0.0.tar.gz', 'validate_report.md']
def test_setup():
# TODO
pass
def test_execute_command_no_samples(fx_copy_fn_main_mock_integration, fx_get_sub_parser, fx_cmd_line_args_package):
mock_integration_name = fx_copy_fn_main_mock_integration[0]
path_fn_main_mock_integration = fx_copy_fn_main_mock_integration[1]
# Replace cmd line arg "fn_main_mock_integration" with path to temp dir location
sys.argv[sys.argv.index(mock_integration_name)] = path_fn_main_mock_integration
# Append --no-samples command line arg
sys.argv.append("--no-samples")
# Package the app
cmd_package = CmdPackage(fx_get_sub_parser)
args = cmd_package.parser.parse_known_args()[0]
path_the_app_zip = cmd_package.execute_command(args)
# Test app.zip contents
assert zipfile.is_zipfile(path_the_app_zip)
with zipfile.ZipFile((path_the_app_zip), 'r') as app_zip:
assert helpers.verify_expected_list(EXPECTED_FILES_APP_ZIP[:-1], app_zip.namelist())
# Test app.zip/app.json contents
app_json_contents = sdk_helpers.read_zip_file(path_the_app_zip, "app.json")
mock_app_json_contents = sdk_helpers.read_file(mock_paths.MOCK_APP_ZIP_APP_JSON)[0]
assert app_json_contents == mock_app_json_contents
# Test app.zip/export.res contents
export_res_contents = sdk_helpers.read_zip_file(path_the_app_zip, "export.res")
mock_export_res_contents = sdk_helpers.read_file(mock_paths.MOCK_APP_ZIP_EXPORT_RES)[0]
assert json.loads(export_res_contents) == json.loads(mock_export_res_contents)
def test_execute_command_with_samples(fx_copy_fn_main_mock_integration, fx_get_sub_parser, fx_cmd_line_args_package, fx_add_dev_env_var):
mock_integration_name = fx_copy_fn_main_mock_integration[0]
path_fn_main_mock_integration = fx_copy_fn_main_mock_integration[1]
# Replace cmd line arg "fn_main_mock_integration" with path to temp dir location
sys.argv[sys.argv.index(mock_integration_name)] = path_fn_main_mock_integration
# Package the app
cmd_package = CmdPackage(fx_get_sub_parser)
args = cmd_package.parser.parse_known_args()[0]
path_the_app_zip = cmd_package.execute_command(args)
# Test app.zip contents
assert zipfile.is_zipfile(path_the_app_zip)
with zipfile.ZipFile((path_the_app_zip), 'r') as app_zip:
assert helpers.verify_expected_list(EXPECTED_FILES_APP_ZIP[:-1], app_zip.namelist())
# Test app.zip/app.json contents
app_json_contents = sdk_helpers.read_zip_file(path_the_app_zip, "app.json")
mock_app_json_contents = sdk_helpers.read_file(mock_paths.MOCK_APP_ZIP_APP_JSON)[0]
assert app_json_contents == mock_app_json_contents
# Test app.zip/export.res contents
export_res_contents = sdk_helpers.read_zip_file(path_the_app_zip, "export.res")
mock_export_res_contents = sdk_helpers.read_file(mock_paths.MOCK_APP_ZIP_EXPORT_RES_WITH_PAYLOAD_SAMPLES)[0]
# compare are dictionaries
assert json.loads(export_res_contents) == json.loads(mock_export_res_contents)
def test_execute_command_with_payload_sample_file_missing(caplog, fx_copy_fn_main_mock_integration, fx_get_sub_parser, fx_cmd_line_args_package, fx_add_dev_env_var):
mock_integration_name = fx_copy_fn_main_mock_integration[0]
path_fn_main_mock_integration = fx_copy_fn_main_mock_integration[1]
# Replace cmd line arg "fn_main_mock_integration" with path to temp dir location
sys.argv[sys.argv.index(mock_integration_name)] = path_fn_main_mock_integration
# Rename a payload_sample file
path_file_to_rename = os.path.join(path_fn_main_mock_integration, package_helpers.BASE_NAME_PAYLOAD_SAMPLES_DIR, "mock_function_one", package_helpers.BASE_NAME_PAYLOAD_SAMPLES_EXAMPLE)
sdk_helpers.rename_file(path_file_to_rename, "no_name.json")
# Package the app
cmd_package = CmdPackage(fx_get_sub_parser)
args = cmd_package.parser.parse_known_args()[0]
cmd_package.execute_command(args)
assert ("WARNING: could not access JSON file to add payload_samples. Continuing to create package.\n"
"Add '--no-samples' flag to avoid looking for them and avoid this warning message.") in caplog.text
def test_execute_command_with_validate_enabled(fx_copy_and_pip_install_fn_main_mock_integration, fx_get_sub_parser, fx_cmd_line_args_package, fx_add_dev_env_var):
mock_integration_name = fx_copy_and_pip_install_fn_main_mock_integration[0]
path_fn_main_mock_integration = fx_copy_and_pip_install_fn_main_mock_integration[1]
# Replace cmd line arg "fn_main_mock_integration" with path to temp dir location
sys.argv[sys.argv.index(mock_integration_name)] = path_fn_main_mock_integration
sys.argv.append("--no-samples")
sys.argv.append("--validate")
with patch("resilient_sdk.cmds.validate.sdk_helpers.run_subprocess") as mock_process:
mock_process.return_value = (0, "Done!")
# Package the app
cmd_validate = CmdValidate(fx_get_sub_parser)
cmd_package = CmdPackage(fx_get_sub_parser, cmd_validate)
args = cmd_package.parser.parse_known_args()[0]
path_the_app_zip = cmd_package.execute_command(args)
# Test app.zip contents
assert zipfile.is_zipfile(path_the_app_zip)
with zipfile.ZipFile((path_the_app_zip), 'r') as app_zip:
assert helpers.verify_expected_list(EXPECTED_FILES_APP_ZIP, app_zip.namelist())
# Test app.zip/validate_report.md contents
validate_report_contents = sdk_helpers.read_zip_file(path_the_app_zip, "validate_report.md")
assert "## App Details" in validate_report_contents
assert "## `setup.py` file validation" in validate_report_contents
assert "## Package files validation" in validate_report_contents
def test_bak_files_are_not_packaged(fx_copy_fn_main_mock_integration, fx_get_sub_parser, fx_cmd_line_args_package):
mock_integration_name = fx_copy_fn_main_mock_integration[0]
path_fn_main_mock_integration = fx_copy_fn_main_mock_integration[1]
# Replace cmd line arg "fn_main_mock_integration" with path to temp dir location
sys.argv[sys.argv.index(mock_integration_name)] = path_fn_main_mock_integration
# Package the app
cmd_package = CmdPackage(fx_get_sub_parser)
args = cmd_package.parser.parse_known_args()[0]
cmd_package.execute_command(args)
tarfile_path = os.path.join(path_fn_main_mock_integration, "dist", mock_integration_name + "-1.0.0.tar.gz")
with tarfile.open(name=tarfile_path, mode="r:gz") as t_file:
t_files = t_file.getmembers()
for f in t_files:
assert not f.name.endswith(".bak")
|
import numpy as np
import os
from trotter import *
folder='phi0'
dim = 3
for g in np.linspace(-1,0, 21):
for theta in np.linspace(0, np.pi/3, 21):
print(g, theta)
g = float("{0:.5f}".format(g))
J = float("{0:.5f}".format(-1-g)); theta = float("{0:.5f}".format(theta)); phi = float("{0:.5f}".format(0));
alpha = np.array([np.exp(1j*theta),np.exp(-1j*theta)])
beta = np.array([np.exp(1j*phi),np.exp(1j*phi)])
chi = 10; delta = 0.01; N = 500
sigma = np.diag(np.exp(1j*2*np.pi*np.arange(dim)/dim))
tau = np.roll(np.eye(dim), -1, axis = -1)
interactions = list(zip(J*alpha, [sigma**m for m in range(1,dim)], [sigma**(dim-m) for m in range(1,dim)]))
transv_field = list(zip(g*beta, [tau**m for m in range(1,dim)]))
U=ST_step(delta, interactions, transv_field)
GA0 = np.random.rand(dim,chi,chi)
LA0 = np.random.rand(chi)
GB0 = np.random.rand(dim,chi,chi)
LB0 = np.random.rand(chi)
res={}
res_names = ["Energy", "SigmaA", "SigmaB"]
GA = GA0; LA = LA0; GB = GB0; LB=LB0
discarded_weights = []
for step in range(N):
GA, LA, GB, dw, norm_sq = iTEDB(dim, chi, GA, LA, GB, LB, U)
E_sim =-np.log(norm_sq)/delta/2
discarded_weights.append(dw)
GB, LB, GA, dw, norm_sq = iTEDB(dim, chi, GB, LB, GA, LA, U)
E_sim =-np.log(norm_sq)/delta/2
discarded_weights.append(dw)
#res["Energy"] = E_sim
#res["SigmaA"] = np.einsum('sab,tab,st,b,a->', GA, np.conj(GA), sigma, LA**2, LB**2)
#res["SigmaB"] = np.einsum('sab,tab,st,b,a->', GB, np.conj(GB), sigma, LB**2, LA**2)
#for key in res_names: print(key, res[key])
File_code = '{}J_{}g_{}theta_{}phi_{}Chi_{}delta_{}N'.format(J, g, theta, phi, chi, delta,N)
os.system('mkdir -p {}/data_'.format(folder)+File_code)
np.savetxt('./{}/data_{}/Energy.dat'.format(folder,File_code), [E_sim], header=File_code)
np.savetxt('./{}/data_{}/GammaA.dat'.format(folder,File_code), GA.flatten(), header=File_code)
np.savetxt('./{}/data_{}/GammaB.dat'.format(folder, File_code), GB.flatten(), header=File_code)
np.savetxt('./{}/data_{}/LambdaA.dat'.format(folder, File_code), LA, header=File_code)
np.savetxt('./{}/data_{}/LambdaB.dat'.format(folder, File_code), LB, header=File_code)
np.savetxt('./{}/data_{}/DiscardedWeights.dat'.format(folder,File_code), discarded_weights, header=File_code)
#Outfile1 = './clock3/data_{}/Results.dat'.format(File_code)
#Outfile2 = './clock3/data_{}/EntanglementSpectrum.dat'.format(File_code)
# Outfile3 = './clock3/data_{}/DiscardedWeights.dat'.format(File_code)
#
# with open(Outfile1, 'w') as f1:
# print('# ', File_code, file=f1)
# print('# ', *res_names, file=f1)
# print(*[res[key] for key in res_names], file=f1)
# np.savetxt(Outfile2, LB**2, header=File_code)
# np.savetxt(Outfile3, discarded_weights, header=File_code)
|
"""asynchronous clientside protocol for twisted."""
import struct
from twisted.protocols.basic import IntNStringReceiver
from twisted.internet.defer import Deferred
from twisted.python.failure import Failure
from twisted.python import log
from . import data, utils
class VersionMismatch(Exception):
"""Version does not match"""
pass
class ProtocolError(Exception):
"""invalid answer/message"""
pass
class IncorrectPassword(ValueError):
"""Invalid password."""
pass
class ClientProtocol(IntNStringReceiver):
"""
This protocol connects to the router.
You can use the 'callback' attribute to get a deferred which will be called once the handshake is finished.
"""
def __init__(self, password):
if hasattr(IntNStringReceiver, "__init__"):
IntNStringReceiver.__init__(self)
self.password = password
self.requests = {}
self.free = set()
self.range_start = None
self.range_end = None
self.mode = data.MODE_CONNECTING
self.structFormat = data.MESSAGE_LENGTH_FORMAT
self.prefixLength = data.MESSAGE_LENGTH_SIZE
self.callback = Deferred() # will be called once the handshake is finished
def connectionMade(self):
"""called when the connection was established."""
if hasattr(IntNStringReceiver, "connectionMade"):
IntNStringReceiver.connectionMade(self)
# init handshake
log.msg("Connection established, initiating handshake...")
self.mode = data.MODE_VERSION
self.sendString(struct.pack(data.VERSION_FORMAT, data.VERSION))
def stringReceived(self, msg):
"""called when a message was received."""
if self.mode == data.MODE_ERROR:
# error occured, connection is probably already being terminated.
log.err("Received a message, but the protocol is already mismatching. Ignoring Message.")
pass
elif self.mode == data.MODE_VERSION:
# check version answer
if msg == data.STATE_OK:
# Version OK, continue
log.msg("Version OK, sending mode and requesting range...")
self.sendString(data.MODE_CLIENT)
self.mode = data.MODE_RANGE
elif msg == data.STATE_ERROR:
# Version mismatch
log.err("Version is not supported by the server. Losing Connection...")
self.mode = data.MODE_ERROR
f = Failure(VersionMismatch("Version does not match!"))
try:
self.transport.loseConnection()
except:
pass
finally:
self.callback.errback(f)
elif msg == data.STATE_PASSWORD_REQUIRED:
# Version OK, but password required
if self.password is not None:
log.msg("Version OK, sending password...")
self.sendString(self.password)
self.mode = data.MODE_PASSWORD
else:
log.err("Version OK, but a password is required!")
self.mode = data.MODE_ERROR
self.transport.loseConnection()
self.callback.errback(Failure(IncorrectPassword("Password required, but None specified!")))
else:
# unknown answer
log.err("Received unknown answer! Losing Connection...")
self.mode = data.MODE_ERROR
self.callback.errback(Failure(ProtocolError("Unknown Answer during version check!")))
self.transport.loseConnection()
elif self.mode == data.MODE_PASSWORD:
# check answer for password
if msg == data.STATE_OK:
# Password OK, continue
log.msg("Password OK, sending mode and requesting range...")
self.mode = data.MODE_RANGE
self.sendString(data.MODE_CLIENT) # < -- this is correct
elif msg == data.STATE_ERROR:
# Password invalid
log.err("Invalid password!")
self.mode = data.MODE_ERROR
self.callback.errback(Failure(IncorrectPassword("Invalid Password!")))
else:
# invalid answer
log.err("Received unknown Answer. Losing Connection...")
self.mode = data.MODE_ERROR
self.callback.errback(Failure(ProtocolError("Unknown Answer during password check!")))
self.transport.loseConnection()
elif self.mode == data.MODE_RANGE:
self.range_start, self.range_end = struct.unpack(data.RANGE_FORMAT, msg)
log.msg("Range is {s} to {e}.".format(s=self.range_start, e=self.range_end))
self.cur_id = self.range_start
self.mode = data.MODE_CLIENT
self.callback.callback(self)
elif self.mode == data.MODE_CLIENT:
# handle answers
actionbyte = msg[0]
msg = msg[1:]
if actionbyte in (data.ID_ALLKEYS, data.ID_ANSWER, data.ID_NOTFOUND):
rids = msg[:data.MESSAGE_ID_SIZE]
rid = struct.unpack(data.MESSAGE_ID_FORMAT, rids)[0]
self.free.add(rid)
if rid not in self.requests:
pass
else:
d = self.requests[rid]
del self.requests[rid]
if actionbyte == data.ID_ANSWER:
value = msg[data.MESSAGE_ID_SIZE:]
d.callback(value)
elif actionbyte == data.ID_NOTFOUND:
f = Failure(KeyError("Key not found!"))
d.errback(f)
elif actionbyte == data.ID_ALLKEYS:
keystring = msg[data.MESSAGE_ID_SIZE:]
keylist = utils.keystring2list(keystring)
d.callback(keylist)
else:
log.err("Logic Error! Expected all answer IDs to have been checked!")
else:
log.err("Error: Unexpected Answer from server! Losing Connection...")
self.mode = data.MODE_ERROR
self.transport.loseConnection()
self.callback.errback(Failure(ProtocolError("Unknown Answer!")))
else:
log.err("Logic Error: set protocol to unknown mode!")
def get_id(self):
"""returns a request id."""
if len(self.free) > 0:
return self.free.pop()
else:
i = self.cur_id
if self.cur_id + 1 >= self.range_end:
raise RuntimeError("No ids left.")
self.cur_id += 1
return i
def get(self, key):
"""returns a deferred which will be fired with the value of key."""
assert isinstance(key, str), "Expected key to be a string!"
d = Deferred()
rid = self.get_id()
rids = struct.pack(data.MESSAGE_ID_FORMAT, rid)
self.requests[rid] = d
self.sendString(data.ID_GET + rids + key)
return d
def set(self, key, value):
"""sets key to value."""
assert isinstance(key, str) and isinstance(value, str), "Expected key/value to be strings!"
keylength = len(key)
keylengthstring = struct.pack(data.MESSAGE_KEY_LENGTH_FORMAT, keylength)
self.sendString(data.ID_SET + keylengthstring + key + value)
def delete(self, key):
"""deletes the value for key and key."""
assert isinstance(key, str), "Expected key to be a string!"
self.sendString(data.ID_DEL + key)
def getkeys(self):
"""returns a deferred which will be fired with a list of all keys"""
d = Deferred()
rid = self.get_id()
rids = struct.pack(data.MESSAGE_ID_FORMAT, rid)
self.requests[rid] = d
self.sendString(data.ID_GETKEYS + rids)
return d
|
import csv
import io
import re
from datetime import date, datetime, timedelta
import dateparser
import django.conf
import pytz
import yaml
from django.core.mail import EmailMessage
from django.core.management.base import BaseCommand, CommandError
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from pretix_itkexport.exporters import (
EventExporter, PaidOrdersLineExporter, PaidOrdersGroupedExporter,
)
class Command(BaseCommand):
help = 'Exports stuff'
date_format = '%Y-%m-%d'
exporter_classes = {
'event': EventExporter,
'paid-orders': PaidOrdersLineExporter,
'paid-orders-grouped': PaidOrdersGroupedExporter
}
def add_arguments(self, parser):
parser.add_argument('export_type', type=str, help=', '.join(Command.exporter_classes.keys()))
parser.add_argument('--info', action='store_true', help='Show info on the specified export type')
parser.add_argument('--starttime', nargs='?', type=str)
parser.add_argument('--endtime', nargs='?', type=str)
parser.add_argument('--period', nargs='?', type=str,
help='current-year, previous-year, '
'current-month, previous-month, '
'current-week, previous-week, '
'current-day, today, '
'previous-day, yesterday'
', previous-week[±days]'
)
parser.add_argument('--recipient', action='append', nargs='?', type=str, help='Email adress to send export result to (can be used multiple times)')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--verbose', action='store_true')
def handle(self, *args, **options):
try:
from django.conf import settings
translation.activate(settings.LANGUAGE_CODE)
debug = options['debug']
verbose = debug or options['verbose']
settings = self.getSettings(options)
if debug:
print('options:')
print(yaml.dump(options, default_flow_style=False))
print('settings:')
print(yaml.dump(settings, default_flow_style=False))
export_type = settings['export_type']
if export_type not in Command.exporter_classes:
raise CommandError('Unknown export type: {}'.format(export_type))
exporter = Command.exporter_classes[export_type]()
if options['info']:
print(exporter.info())
return
data = exporter.getData(**settings)
recipient_list = settings['recipient_list']
if recipient_list:
now = datetime.now()
filename = 'eventbillet-{}.csv'.format(now.strftime('%Y%m%dT%H%M'))
if 'starttime' in settings:
filename = 'eventbillet-{:%Y%m%d}'.format(settings['starttime'])
if 'endtime' in settings:
filename += '-{:%Y%m%d}'.format(settings['endtime'])
filename += '.csv'
output = io.StringIO()
writer = csv.writer(output, dialect='excel', delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in data:
writer.writerow(row)
content = output.getvalue()
output.close()
subject = _('Order export from {site_name}').format(site_name=django.conf.settings.PRETIX_INSTANCE_NAME)
if 'starttime' in settings:
starttime = settings['starttime']
endtime = settings['endtime'] if 'endtime' in settings else now
subject += ' ({:%Y-%m-%d}–{:%Y-%m-%d})'.format(starttime, endtime)
body = content
from_email = settings['from_email']
email = EmailMessage(
subject=subject,
body=body,
from_email=from_email,
to=recipient_list,
attachments=[
(filename, content, 'text/csv'),
]
)
email.send(fail_silently=False)
if verbose:
print(content)
print('Sent to: {}'.format(', '.join(recipient_list)))
print('Subject: {}'.format(subject))
else:
writer = csv.writer(self.stdout)
for row in data:
writer.writerow(row)
except Exception as e:
raise e if debug else CommandError(e)
def getSettings(self, options):
settings = django.conf.settings.ITK_EXPORT.copy() if hasattr(django.conf.settings, 'ITK_EXPORT') else {}
for name in options:
if options[name] is not None:
settings[name] = options[name]
if 'period' in settings:
(starttime, endtime) = self.getPeriod(settings['period'])
settings['starttime'] = starttime.isoformat()
settings['endtime'] = endtime.isoformat()
if 'starttime' in settings:
d = dateparser.parse(settings['starttime'])
if d is None:
raise CommandError('Error parsing starttime: {}'.format(settings['starttime']))
if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:
d = pytz.utc.localize(d)
settings['starttime'] = d
if 'endtime' in settings:
d = dateparser.parse(settings['endtime'])
if d is None:
raise CommandError('Error parsing endtime: {}'.format(settings['endtime']))
if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:
d = pytz.utc.localize(d)
settings['endtime'] = d
settings['recipient_list'] = settings['recipient'] if 'recipient' in settings else None
settings['from_email'] = settings['sender'] if 'sender' in settings else None
return settings
def getPeriod(self, period):
starttime = None
endtime = None
today = dateparser.parse(date.today().strftime('%Y-%m-%d'))
# Monday in the current week
this_monday = today - timedelta(days=today.weekday())
if period == 'current-year':
starttime = dateparser.parse(datetime.today().strftime('%Y-01-01'))
endtime = dateparser.parse('in 1 year', settings={'RELATIVE_BASE': starttime, 'PREFER_DATES_FROM': 'future'})
elif period == 'previous-year':
start_of_year = dateparser.parse(datetime.today().strftime('%Y-01-01'))
starttime = dateparser.parse('1 year ago', settings={'RELATIVE_BASE': start_of_year})
endtime = dateparser.parse('in 1 year', settings={'RELATIVE_BASE': starttime, 'PREFER_DATES_FROM': 'future'})
elif period == 'current-month':
starttime = dateparser.parse(datetime.today().strftime('%Y-%m-01'))
endtime = dateparser.parse('in 1 month', settings={'RELATIVE_BASE': starttime, 'PREFER_DATES_FROM': 'future'})
elif period == 'previous-month':
start_of_month = dateparser.parse(datetime.today().strftime('%Y-%m-01'))
starttime = dateparser.parse('1 month ago', settings={'RELATIVE_BASE': start_of_month})
endtime = dateparser.parse('in 1 month', settings={'RELATIVE_BASE': starttime, 'PREFER_DATES_FROM': 'future'})
elif period == 'current-week':
starttime = this_monday
endtime = dateparser.parse('in 1 week', settings={'RELATIVE_BASE': starttime, 'PREFER_DATES_FROM': 'future'})
elif re.match(r'previous-week([+-]\d+)?', period):
match = re.match(r'previous-week([+-]\d+)?', period)
offset = int(match.group(1)) if match.group(1) is not None else 0
start_of_week = this_monday
starttime = dateparser.parse('Monday', settings={'RELATIVE_BASE': start_of_week})
endtime = dateparser.parse('in 1 week', settings={'RELATIVE_BASE': starttime, 'PREFER_DATES_FROM': 'future'})
if offset != 0:
starttime += timedelta(days=offset)
endtime += timedelta(days=offset)
elif period == 'current-day' or period == 'today':
starttime = dateparser.parse('00:00:00')
endtime = dateparser.parse('in 1 day', settings={'RELATIVE_BASE': starttime, 'PREFER_DATES_FROM': 'future'})
elif period == 'previous-day' or period == 'yesterday':
starttime = dateparser.parse('yesterday 00:00:00')
endtime = dateparser.parse('in 1 day', settings={'RELATIVE_BASE': starttime, 'PREFER_DATES_FROM': 'future'})
else:
raise CommandError('Invalid period: {}'.format(period))
# https://docs.djangoproject.com/en/1.11/topics/i18n/timezones/
starttime = pytz.utc.localize(starttime)
endtime = pytz.utc.localize(endtime)
return (starttime, endtime)
|
"""
A simple script for importing photos from a memory card
to a computer. Source/destinations are configured via constants
to make the CLI dead-simple under the assumption that these
won't change much, but an argparse interface could be added
for improved flexibility.
"""
import os
from datetime import datetime
import shutil
import argparse
from tqdm import tqdm
SONY_TASKS = [
{
'src': 'K:\\DCIM\\100MSDCF',
'ext': 'ARW',
'dest': 'O:\\Personal Data\\Pictures'
},
{
'src': 'K:\\PRIVATE\\M4ROOT\\CLIP',
'ext': 'MP4',
'dest': 'O:\\Video Editing\\Raw'
}
]
PANA_TASKS = [
{
'src': 'K:\\DCIM\\120_PANA',
'ext': 'RW2',
'dest': 'O:\\Personal Data\\Pictures'
},
{
'src': 'K:\\DCIM\\121_PANA',
'ext': 'RW2',
'dest': 'O:\\Personal Data\\Pictures'
},
{
'src': 'K:\\PRIVATE\\AVCHD\\BDMV\\STREAM',
'ext': 'MTS',
'dest': 'O:\\Video Editing\\Raw'
}
]
DATE_FMT = '%Y%m%d'
INC_FMT = '{:0>3d}'
DELETE_AFTER_COPY = True
class PhotoImporter:
def __init__(self, tasks, delete=DELETE_AFTER_COPY):
self._tasks = tasks
self._delete = delete
self._last_date = ''
self._date_count = 0
def run(self):
i = 1
for task in self._tasks:
print('Running task {} of {}'.format(i, len(self._tasks)))
for f in tqdm(os.listdir(task['src'])):
file_path = os.path.join(task['src'], f)
if os.path.isfile(file_path) and f.endswith(task['ext']):
self.import_file(file_path, task['dest'])
i = i + 1
def import_file(self, file_path, dest):
new_name = self.rename(file_path)
new_path = os.path.join(dest, new_name)
try:
self.transfer(file_path, new_path)
except IOError as e:
os.makedirs(os.path.dirname(new_path))
self.transfer(file_path, new_path)
def transfer(self, src, dest):
if os.path.exists(dest):
split_path = os.path.splitext(dest)
new_dest = split_path[0] + 'b' + split_path[1]
self.transfer(src, new_dest)
else:
if self._delete:
shutil.move(src, dest)
else:
shutil.copy2(src, dest)
def rename(self, file_path):
""" Define your rename logic here."""
file_ext = os.path.splitext(os.path.basename(file_path))[1]
mod_time = datetime.fromtimestamp(os.path.getmtime(file_path))
date_str = mod_time.strftime(DATE_FMT)
if date_str != self._last_date:
self._date_count = 0
self._last_date = date_str
else:
self._date_count = self._date_count + 1
count_str = INC_FMT.format(self._date_count)
year = mod_time.strftime('%Y')
month = mod_time.strftime('%B')
new_name = date_str + '-' + count_str + file_ext
return os.path.join(year, month, new_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Import photos and videos.')
parser.add_argument('--cam', required=True)
args = parser.parse_args()
if args.cam == 'sony':
PhotoImporter(SONY_TASKS).run()
elif args.cam =='pana' or args.cam == 'panasonic':
PhotoImporter(PANA_TASKS).run()
else:
'Camera not recognized.' |
<filename>samples/balloon/deep_lesion_train_key.py
epoch = 100
layers = 'all' #'all' or 'heads' # PREVIOUSLY JUST DID HEADS
patience1=2
"""
Mask R-CNN
Train on the toy Balloon dataset and implement color splash effect.
Copyright (c) 2018 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 deep_lesion_train_key.py train --dataset=/path/to/balloon/dataset --weights=coco
# Resume training a model that you had trained earlier
python3 deep_lesion_train_key.py train --dataset=/path/to/balloon/dataset --weights=last
# Train a new model starting from ImageNet weights
python3 deep_lesion_train_key.py train --dataset=/path/to/balloon/dataset --weights=imagenet
# Apply color splash to an image
python3 deep_lesion_train_key.py splash --weights=/path/to/weights/file.h5 --image=<URL or path to file>
# Apply color splash to video using the last weights you trained
python3 deep_lesion_train_key.py splash --weights=last --video=<URL or path to file>
"""
from random import randint
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
import glob
from imgaug import augmenters as iaa
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils_DL
import utilities as util
import cv2
import pickle
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
### Path to DEXTR
BASE_DIR = os.path.abspath("../../../")
DEXTR_DIR = os.path.join(BASE_DIR,"DEXTR-PyTorch_p")
############################################################
# Configurations
############################################################
class BalloonConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "Lesion"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1 #seems highest is 512 otherwise memory problems.
GPU_COUNT = 1
BATCH_SIZE = IMAGES_PER_GPU*GPU_COUNT
#According to the Keras documentation recommendation:
#STEPS_PER_EPOCH = NUMBER_OF_SAMPLES/BATCH_SIZE
#According to the MaskRCNN code:
#BATCH_SIZE = IMAGES_PER_GPU*GPU_COUNT
#That means that:
#STEPS_PER_EPOCH = NUMBER_OF_SAMPLES/(IMAGES_PER_GPU*GPU_COUNT)
VALIDATION_STEPS = 100/BATCH_SIZE
STEPS_PER_EPOCH = 1000/BATCH_SIZE
# Number of classes (including background)
##############################################
NUM_CLASSES = 1 + 8 # Background + 8 classes (see below)
######## NEED TO CHANGE IF YOU ADD UNKNOWNS BACK IN
# Number of training steps per epoch
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
IMAGE_RESIZE_MODE = "square"
IMAGE_MIN_DIM = 512
IMAGE_MAX_DIM = 512
RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)
TRAIN_ROIS_PER_IMAGE = 128
MAX_GT_INSTANCES = 3
DETECTION_MAX_INSTANCES = 3
DETECTION_MIN_CONFIDENCE = 0.93
DETECTION_NMS_THRESHOLD = 0.3
LEARNING_RATE = 0.00001
LEARNING_MOMENTUM = 0.9
# Weight decay regularization
WEIGHT_DECAY = 0.0001
USE_MINI_MASK = False
########################## make this false?
#USE_MINI_MASK = True
#MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask
##########################
############################################################
# Dataset
############################################################
class Deep_Lesion_Dataset(utils.Dataset):
def load_deep_lesion(self, dataset_dir, subset): #I don't think we need dataset directory.
"""Load a subset of the Balloon dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have only one class to add.
self.add_class("Lesion", 1, "1") # "Bone"
self.add_class("Lesion", 2, "2") # "Abdomen_notLiver_notKidney"
self.add_class("Lesion", 3, "3") # "Mediastinum"
self.add_class("Lesion", 4, "4") # "Liver"
self.add_class("Lesion", 5, "5") #"Lung"
self.add_class("Lesion", 6, "6") #"Kidney"
self.add_class("Lesion", 7, "7") #Soft tissue: miscellaneous lesions in the body wall, muscle, skin, fat, limbs, head, and neck
self.add_class("Lesion", 8, "8") #"Pelvis"
##################### UNKNOWN CASES, WE WILL LEAVE THESE OUT.
#self.add_class("-1", 9, "-1") #Can i have a negative number here? This is straight from Deep Lesion format.
#########################
# Train or validation dataset?
#assert subset in ["train", "val"]
#dataset_dir = os.path.join(dataset_dir, subset)
###### ---> SAVED under "Image" in data.json, there is variable called "train_val_test"
#which has a int value (0-2 or 1-3) indicating it is training, test or validation.
# NEED TO FIGURE OUT HOW TO USE THIS TO ASSIGN TRAIN, VALIDATION, TEST.
##########################
# Load annotations
# VGG Image Annotator (up to version 1.6) saves each image in the form:
# { 'filename': '28503151_5b5b7ec140_b.jpg',
# 'regions': {
# '0': {
# 'region_attributes': {},
# 'shape_attributes': {
# 'all_points_x': [...],
# 'all_points_y': [...],
# 'name': 'polygon'}},
# ... more regions ...
# },
# 'size': 100202
# }
# We mostly care about the x and y coordinates of each region
# Note: In VIA 2.0, regions was changed from a dict to a list.
annotations = json.load(open(os.path.join(DEXTR_DIR, "data.json")))
#annotations = list(annotations.values()) # don't need the dict keys
annotations_seg = annotations['annotations']
#annotations_seg = segmentation[2]
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
#annotations_seg = [a for a in annotations_seg if a['segmentation']]
b=0
for a in annotations_seg:
image_info = annotations['images'][b]
win = annotations_seg[b]['Windowing']
image_id = annotations['images'][b]['id']
image_cat = annotations['categories'][b]['category_id']
image_cat_name = "blank"
if image_cat == 1:
image_cat_name = "Bone"
elif image_cat == 2:
image_cat_name = "Abdomen_notLiver_notKidney"
elif image_cat == 3:
image_cat_name = "Mediastinum"
elif image_cat == 4:
image_cat_name = "Liver"
elif image_cat == 5:
image_cat_name = "Kidney"
elif image_cat == 6:
image_cat_name = "Lung"
elif image_cat == 7:
image_cat_name = "Soft tissue"
elif image_cat == 8:
image_cat_name = "Pelvis"
train_valid_test = annotations['images'][b]['Train_Val_Test']
#### Must use index 'b' before here, because after this point it
# will point to next image/index/
b=b+1
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. These are stores in the
# shape_attributes (see json format above)
# The if condition is needed to support VIA versions 1.x and 2.x.
polygons = a['regions']
######## polygons
# needs to be a list of dictionaries for each lesion
# so if there is one lesion.
# polygons = list of size 1
# dict of size 3.
# all_point_x is list of variable size(depends on number of points)
# same for y
# name is str 'polygon'
objects = [s['region_attributes'] for s in a['regions'].values()]
num_ids = [int(n['class']) for n in objects]
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
###### LETS STORE PATH TO IMAGE INTO JSON FILE.
## --- RERUN. added to images under annotations. current_file_path
#image_path = image_info['File_path']
#image_path = os.path.join(dataset_dir, a['filename'])
image_path = os.path.join(DEXTR_DIR,image_info['File_path'])
#***************************************************************
########################################################### use this to import all png files in this directory and load them as blanks. NaN for segmentation.
#############################################################
#********************************************************************
# use files_bg to add in non-segmentated images to the dataset.
##
#
#
# polygons = [] ? or polygons = NaN?
# Need to figure out what format will work so it will train on these background images and not throw an error.
#
#
##
################### image format should be: unit8 rgb
#image = skimage.io.imread(image_path)
############################### WORKS! LOAD WITH DEFAULT WINDOWING.
#image = util.load_im_with_default_windowing(win,image_path)
############################### LOAD WITH default 16bit format?
image = cv2.imread(image_path, -1)
###############
height, width = image.shape[:2]
#### SEE IMAGE_INFO, INFO BELOW: I think it gets this from here
#### SEE IMAGE_INFO, INFOb = randint(0,len(annotations_seg)) BELOW: I think it gets this from here
#### SEE IMAGE_INFO, INFO BELOW: I think it gets this from here
#### SEE IMAGE_INFO, INFO BELOW: I think it gets this from here
#### SEE IMAGE_INFO, INFO BELOW: I think it gets this from here
if subset == 'train' and train_valid_test==1 and int(image_cat) >= 0: #Last checks that there are no unknowns.
self.add_image(
############ Replace balloon with CLASSES ABOVE, take from category.
#image_cat,
image_cat_name,
############### Replace balloon with CLASSES ABOVE,take from category.
image_id=image_id, # id is set above.
path=image_path,
width=width, height=height,
polygons=polygons,num_ids=num_ids)
if subset == 'val' and train_valid_test==2 and int(image_cat) >= 0: #Last checks that there are no unknowns.
self.add_image(
############ Replace balloon with CLASSES ABOVE, take from category.
#image_cat,
image_cat_name,
############### Replace balloon with CLASSES ABOVE,take from category.
image_id=image_id, # id is set above.
path=image_path,
width=width, height=height,
polygons=polygons,num_ids=num_ids)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a balloon dataset image, delegate to parent class.
image_info = self.image_info[image_id]
###########
##########
######## We want to evaluate every image. We could re-write this to include "1-10,-1" for categoies.
if image_info["source"] != "Lesion":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
annotations = info["polygons"]
count = len(annotations[0]['all_points_x'])
if count == 0:
mask = np.zeros((info["height"], info["width"], 1), dtype=np.uint8)
#class_ids = np.zeros((1,), dtype=np.int32)
else:
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
####
### p needs to be a dictionary that contains three things:
#polygon/name(str), all_points_x(list), all_points_y(list)
### my all_points_x seems to be a list within a list? not sure.
###
#### MY all_points are lists of floats. We need them to be lists of int
p['all_points_y'] = [int(i) for i in p['all_points_y']]
p['all_points_x'] = [int(i) for i in p['all_points_x']]
#############
################
#####################
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
#return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
return mask
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
#################### NEED TO SET THIS equal to something else. or jsut return all.
return info["path"]
if info["source"] == "Lesion":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
################## Not sure about this...
#####################################################################
augmentation = iaa.SomeOf((0, 1), [
iaa.Fliplr(0.5),
iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-25, 25),
shear=(-8, 8)
),
iaa.Multiply((0.9, 1.1))
])
########################################################################################
from keras.callbacks import ReduceLROnPlateau
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=patience1, verbose=1, mode='min', cooldown=0, min_lr=0)
callbacks=[reduce_lr]
def train(model):
"""Train the model."""
# Training dataset.
dataset_train = Deep_Lesion_Dataset()
############################## We need to separate the training and validation
### There is a variable in the dict, which is training_val_testing
### It's in image_info file,
# under annotations, image
#annotations['images'][0]['Train_Val_Test']
### TRAIN = 1
### VALIDATION = 2
### TEST = 3
###### NEED TO EXRACT THREE DIFFERENT DATASETS based on this.
## look at how "train"/"valid" is fed in below
dataset_train.load_deep_lesion(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = Deep_Lesion_Dataset()
dataset_val.load_deep_lesion(args.dataset, "val")
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
#epochs=30,
#layers='heads')
epochs=epoch,
layers=layers,
custom_callbacks=callbacks,#############
augmentation=augmentation)######################
def color_splash(image, mask):
"""Apply color splash effect.
image: RGB image [height, width, 3]
mask: instance segmentation mask [height, width, instance count]
Returns result image.
"""
# Make a grayscale copy of the image. The grayscale copy still
# has 3 RGB channels, though.
gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255
# Copy color pixels from the original color image where mask is set
if mask.shape[-1] > 0:
# We're treating all instances as one, so collapse the mask into one layer
mask = (np.sum(mask, -1, keepdims=True) >= 1)
splash = np.where(mask, image, gray).astype(np.uint8)
else:
splash = gray.astype(np.uint8)
return splash
def detect_and_color_splash(model, image_path=None, video_path=None):
assert image_path or video_path
############################
######## ******************!!!!!!!!!!!!!!!!!!!!!!!!!!!
########### NEED TO MAKE SURE IT IS A TEST IMAGE (see above, 3)
########## ******************!!!!!!!!!!!!!!!!!!!!!!!
#################################
######## LOAD RANDOM IMAGE
annotations = json.load(open(os.path.join(DEXTR_DIR, "data.json")))
annotations_seg = annotations['annotations']
b = randint(0,len(annotations_seg))
image_info = annotations['images'][b]
# Image or video?
if image_path:
# Run model detection and generate the color splash effect
print("Running on:")
print(image_info['File_name'])
print(image_info['File_path'])
image_path = os.path.join(DEXTR_DIR,image_info['File_path'])
win = image_info['DICOM_windows']
win = win.split(",") # turn it into a list.
win = list(map(float, win)) # turn the list of str, into a list of float (in case of decimals)
win = list(map(int, win)) # turn the list of str, into a list of int
################### image format should be: unit8 rgb
#image = skimage.io.imread(image_path)
image = util.load_im_with_default_windowing(win,image_path)
# Read image
#image = skimage.io.imread(args.image)
# Detect objects
r = model.detect([image], verbose=1)[0]
# Color splash
splash = color_splash(image, r['masks'])
# Save output
file_name = "splash_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
skimage.io.imsave(file_name, splash)
elif video_path:
import cv2
# Video capture
vcapture = cv2.VideoCapture(video_path)
width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = vcapture.get(cv2.CAP_PROP_FPS)
# Define codec and create video writer
file_name = "splash_{:%Y%m%dT%H%M%S}.avi".format(datetime.datetime.now())
vwriter = cv2.VideoWriter(file_name,
cv2.VideoWriter_fourcc(*'MJPG'),
fps, (width, height))
count = 0
success = True
while success:
print("frame: ", count)
# Read next image
success, image = vcapture.read()
if success:
# OpenCV returns images as BGR, convert to RGB
image = image[..., ::-1]
# Detect objects
r = model.detect([image], verbose=0)[0]
# Color splash
splash = color_splash(image, r['masks'])
# RGB -> BGR to save image to video
splash = splash[..., ::-1]
# Add image to video writer
vwriter.write(splash)
count += 1
vwriter.release()
print("Saved to ", file_name)
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN to detect balloons.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'splash'")
parser.add_argument('--dataset', required=False,
metavar="/path/to/balloon/dataset/",
help='Directory of the Balloon dataset')
parser.add_argument('--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--image', required=False,
metavar="path or URL to image",
help='Image to apply the color splash effect on')
parser.add_argument('--video', required=False,
metavar="path or URL to video",
help='Video to apply the color splash effect on')
args = parser.parse_args()
# Validate arguments
if args.command == "train":
assert args.dataset, "Argument --dataset is required for training"
elif args.command == "splash":
assert args.image or args.video,\
"Provide --image or --video to apply color splash"
print("Weights: ", args.weights)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = BalloonConfig()
else:
class InferenceConfig(BalloonConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()
elif args.weights.lower() == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model)
elif args.command == "splash":
detect_and_color_splash(model, image_path=args.image,
video_path=args.video)
else:
print("'{}' is not recognized. "
"Use 'train' or 'splash'".format(args.command))
|
<gh_stars>0
# Copyright 2017 The Wallaroo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import print_function
# import requisite components for integration test
from integration import (add_runner,
clean_resilience_path,
ex_validate,
get_port_values,
iter_generator,
Metrics,
MetricsData,
Reader,
Runner,
RunnerChecker,
RunnerReadyChecker,
runners_output_format,
Sender,
setup_resilience_path,
Sink,
SinkAwaitValue,
start_runners,
TimeoutError)
from collections import Counter
from itertools import cycle
import json
import re
from string import lowercase
from struct import pack, unpack
import tempfile
import time
class AutoscaleTestError(Exception):
def __init__(self, args, as_error=None, as_steps=[]):
super(AutoscaleTestError, self).__init__(args)
self.as_error = as_error
self.as_steps = as_steps
class AutoscaleTimeoutError(AutoscaleTestError):
pass
fmt = '>I2sQ'
def decode(bs):
return unpack(fmt, bs)[1:3]
def pre_process(decoded):
totals = {}
for c, v in decoded:
totals[c] = v
return totals
def process(data):
decoded = []
for d in data:
decoded.append(decode(d))
return pre_process(decoded)
def validate(raw_data, expected):
data = process(raw_data)
assert(data == expected)
def query_partitions(host, port):
"""
Query the worker at the given address for its partition routing
information.
"""
cmd = ('external_sender --external {}:{} --type partition-query'
.format(host, port))
success, stdout, retcode, cmd = ex_validate(cmd)
try:
assert(success)
except AssertionError:
raise AssertionError('Failed to query partition data with the '
'following error:\n:{}'.format(stdout))
return json.loads(stdout.splitlines()[-1])
def send_shrink_cmd(host, port, names=[], count=1):
# Trigger log rotation with external message
cmd_external_trigger = ('cluster_shrinker --external {}:{} --workers {}'
.format(host, port,
','.join(names) if names else count))
success, stdout, retcode, cmd = ex_validate(cmd_external_trigger)
try:
assert(success)
except AssertionError:
raise AssertionError('External shrink trigger failed with '
'the error:\n{}'.format(stdout))
def phase_validate_output(runners, sink, expected):
# Validate captured output
try:
validate(sink.data, expected)
except AssertionError:
outputs = runners_output_format(runners)
raise AssertionError('Validation failed on expected output. '
'Worker outputs are included below:'
'\n===\n{}'.format(outputs))
def phase_validate_partitions(runners, partitions, joined=[], left=[]):
"""
Use the partition map to determine whether new workers of joined and
departing workers have left.
"""
joined_set = set(joined)
left_set = set(left)
# Compute set of workers with partitions
workers = set()
for p_type in partitions.values():
for step in p_type.values():
for key in step.keys():
if len(step[key]) > 0:
workers.add(key)
try:
assert(workers.issuperset(joined_set))
except AssertionError as err:
missing = sorted(list(joined_set.difference(workers)))
outputs = runners_output_format(runners)
raise AssertionError('{} do not appear to have joined! '
'Worker outputs are included below:'
'\n===\n{}'.format(missing, outputs))
try:
assert(workers.isdisjoint(left_set))
except AssertionError as err:
reamining = sorted(list(workers.intersection(left_set)))
outputs = runners_output_format(runners)
raise AssertionError('{} do not appear to have left! '
'Worker outputs are included below:'
'\n===\n{}'.format(w, outputs))
def sign(i):
if i > 0:
return 'p'
elif i < 0:
return 'n'
else:
return 'z'
def compact_sign(ops):
new = [ops[0]]
for o in ops[1:]:
if sign(new[-1]) == sign(o):
new[-1] += o
elif sign(o) == 'z':
continue
else:
new.append(o)
return new
def autoscale_sequence(command, ops=[1], cycles=1, initial=None):
"""
Run an autoscale test for a given command by performing grow and shrink
operations, as denoted by positive and negative integers in the `ops`
parameter, a `cycles` number of times.
`initial` may be used to define the starting number of workers. If it is
left undefined, the minimum number required so that the number of workers
never goes below zero will be determined and used.
"""
try:
_autoscale_sequence(command, ops, cycles, initial)
except Exception as err:
if hasattr(err, 'as_steps'):
print("Autoscale Sequence test failed after the operations {}."
.format(err.as_steps))
if hasattr(err, 'as_error'):
print("Autoscale Sequence test had the following the error "
"message:\n{}".format(err.as_error))
raise err
def _autoscale_sequence(command, ops=[1], cycles=1, initial=None):
host = '127.0.0.1'
sources = 1
if isinstance(ops, int):
ops = [ops]
# If no initial workers value is given, determine the minimum number
# required at the start so that the cluster never goes below 1 worker.
# If a number is given, then verify it is sufficient.
bottom = min(min(compact_sign(ops*cycles)), sum(ops*cycles))
if bottom < 1:
min_workers = abs(bottom) + 1
else:
min_workers = 1
if isinstance(initial, int):
assert(initial >= min_workers)
workers = initial
else:
workers = min_workers
batch_size = 10
interval = 0.05
msgs_per_sec = int(batch_size/interval)
base_time = 10 # Seconds
cycle_time = 10 # seconds
expect_time = base_time + cycle_time * cycles # seconds
expect = expect_time * msgs_per_sec
sender_timeout = expect_time + 10 # seconds
join_timeout = 200
runner_join_timeout = 30
res_dir = tempfile.mkdtemp(dir='/tmp/', prefix='res-data.')
setup_resilience_path(res_dir)
steps = []
runners = []
try:
try:
# Create sink, metrics, reader, sender
sink = Sink(host)
metrics = Metrics(host)
lowercase2 = [a + b for a in lowercase for b in lowercase]
char_gen = cycle(lowercase2)
chars = [next(char_gen) for i in range(expect)]
expected = Counter(chars)
reader = Reader(iter_generator(chars,
lambda s: pack('>2sI', s, 1)))
await_values = [pack('>I2sQ', 10, c, v) for c, v in
expected.items()]
# Start sink and metrics, and get their connection info
sink.start()
sink_host, sink_port = sink.get_connection_info()
outputs = '{}:{}'.format(sink_host, sink_port)
metrics.start()
metrics_host, metrics_port = metrics.get_connection_info()
time.sleep(0.05)
num_ports = sources + 3 + (2 * (workers - 1))
ports = get_port_values(num=num_ports, host=host)
(input_ports, (control_port, data_port, external_port),
worker_ports) = (ports[:sources],
ports[sources:sources+3],
zip(ports[-(2*(workers-1)):][::2],
ports[-(2*(workers-1)):][1::2]))
inputs = ','.join(['{}:{}'.format(host, p) for p in
input_ports])
start_runners(runners, command, host, inputs, outputs,
metrics_port, control_port, external_port, data_port,
res_dir, workers, worker_ports)
# Wait for first runner (initializer) to report application ready
runner_ready_checker = RunnerReadyChecker(runners, timeout=30)
runner_ready_checker.start()
runner_ready_checker.join()
if runner_ready_checker.error:
raise runner_ready_checker.error
# Get initial partition data
partitions = query_partitions(host, external_port)
# Verify all workers start with partitions
assert(map(len, partitions['state_partitions']['letter-state']
.values()).count(0) == 0)
# start sender
sender = Sender(host, input_ports[0], reader, batch_size=batch_size,
interval=interval)
sender.start()
time.sleep(2)
# Perform autoscale cycles
start_froms = {r: 0 for r in runners}
for cyc in range(cycles):
for joiners in ops:
steps.append(joiners)
joined = []
left = []
if joiners > 0:
# create a new worker and have it join
new_ports = get_port_values(num=(joiners * 2), host=host,
base_port=25000)
joiner_ports = zip(new_ports[::2], new_ports[1::2])
for i in range(joiners):
add_runner(runners, command, host, inputs, outputs,
metrics_port,
control_port, external_port, data_port, res_dir,
joiners, *joiner_ports[i])
joined.append(runners[-1])
start_froms[runners[-1]] = 0
patterns_i = ([re.escape('***Worker {} attempting to join the '
'cluster. Sent necessary information.***'
.format(r.name)) for r in joined]
+
[re.escape('Migrating partitions to {}'.format(
r.name)) for r in joined]
+
[re.escape('--All new workers have acked migration '
'batch complete'),
re.escape('~~~Resuming message processing.~~~')])
patterns_j = [re.escape('***Successfully joined cluster!***'),
re.escape('~~~Resuming message processing.~~~')]
# Wait for runners to complete joining
join_checkers = []
join_checkers.append(RunnerChecker(runners[0], patterns_i,
timeout=join_timeout,
start_from=start_froms[runners[0]]))
for runner in joined:
join_checkers.append(RunnerChecker(runner, patterns_j,
timeout=join_timeout,
start_from=start_froms[runner]))
for jc in join_checkers:
jc.start()
for jc in join_checkers:
jc.join()
if jc.error:
outputs = runners_output_format(runners)
raise AutoscaleTimeoutError(
"'{}' timed out on JOIN in {} "
"seconds. The cluster had the following outputs:\n===\n{}"
.format(jc.runner_name, jc.timeout, outputs),
as_error=jc.error,
as_steps=steps)
elif joiners < 0: # joiners < 0, e.g. leavers!
# choose the most recent, still-alive runners to leave
leavers = abs(joiners)
idx = 1
while len(left) < leavers and idx < len(runners):
if runners[-idx].is_alive():
left.append(runners[-idx])
idx += 1
if len(left) < leavers:
raise AutoscaleTestError("Not enough workers left to "
"shrink! {} requested but "
"only {} live non-initializer"
"workers found!"
.format(joiners, len(left)))
# Create the checkers
initializer = [runners[0]]
remaining = [r for r in runners if r.is_alive() and r not
in initializer + left]
patterns_i = (
[r'ExternalChannelConnectNotifier: initializer: '
r'server closed',
r'Saving topology!',
r'Saving worker names to file: .*?initializer.'
r'workers'] +
[re.escape(r'LocalTopology._save_worker_names: {}'
.format(r.name)) for r in
initializer + remaining] +
[re.escape(r'~~~Initiating shrink~~~'),
re.escape(r'-- Remaining workers:')] +
[re.escape(r'-- -- {}'.format(r.name)) for n in
initializer + remaining] +
[re.escape(r'~~~Stopping message processing for '
r'leaving workers.~~~'),
re.escape(r'~~~Resuming message processing.~~~')])
patterns_r = (
[re.escape(r'Control Ch: Received Mute Request from initializer'),
re.escape(r'~~~Stopping message processing for leaving workers.~~~'),
re.escape(r'DataChannelConnectNotifier: server closed'),
re.escape(r'ControlSenderConnectNotifier: server closed'),
re.escape(r'BoundaryNotify: closed'),
re.escape(r'Control Ch: Received Unmute Request from initializer'),
re.escape(r'~~~Resuming message processing.~~~'),
re.escape(r'Shutting down OutgoingBoundary'),
re.escape(r'Shutting down ControlConnection')])
patterns_r_per = [
r'ControlChannelConnectNotifier:{}: server closed']
patterns_l = (
[re.escape(r'Control Ch: Received Mute Request from {}'
.format(r.name))
for r in initializer + remaining] +
[re.escape(r'Migrating all partitions to {} remaining '
r'workers'.format(
len(initializer + remaining))),
r'\^\^Migrating \d+ steps to {} workers'
.format(len(initializer + remaining))] +
[r'\^\^Migrating step \d+ to outgoing '
r'boundary {}/[0-9a-f]{{12}}'
.format(r.name) for r in initializer + remaining] +
[re.escape(r'--Sending leaving worker done migration msg to cluster'),
re.escape(r'Connections: Finished shutdown procedure.'),
re.escape(r'Shutting down ControlConnection'),
re.escape(r'Shutting down TCPSink'),
re.escape(r'Shutting down DataReceiver'),
re.escape(r'Shutting down ReconnectingMetricsSink'),
re.escape(r'Shutting down OutgoingBoundary'),
re.escape(r'Shutting down Startup...'),
re.escape(r'Shutting down DataChannel'),
re.escape(r'metrics connection closed'),
re.escape(r'TCPSink connection closed'),
re.escape(r'ControlChannelConnectNotifier: server closed')])
patterns_l_per = []
left_checkers = []
# initializer STDOUT checker
left_checkers.append(RunnerChecker(initializer[0], patterns_i,
timeout=join_timeout,
start_from=start_froms[initializer[0]]))
# remaining workers STDOUT checkers
for runner in remaining:
left_checkers.append(RunnerChecker(runner,
patterns_r + [p.format(runner.name) for p in
patterns_r_per],
timeout=join_timeout,
start_from=start_froms[runner]))
# leaving workers STDOUT checkers
for runner in left:
left_checkers.append(RunnerChecker(runner,
patterns_l + [p.format(runner.name) for p in
patterns_l_per],
timeout=join_timeout,
start_from=start_froms[runner]))
for lc in left_checkers:
lc.start()
# Send the shrink command
send_shrink_cmd(host, external_port, names=[r.name for r in left])
# Wait for output checkers to join
for lc in left_checkers:
lc.join()
if lc.error:
outputs = runners_output_format(runners)
raise AutoscaleTimeoutError(
"'{}' timed out on SHRINK in {} "
"seconds. The cluster had the following outputs:\n===\n{}"
.format(lc.runner_name, lc.timeout, outputs),
as_error=lc.error,
as_steps=steps)
else: # Handle the 0 case as a noop
continue
start_froms = {r: r.tell() for r in runners}
# Validate autoscale via partition query
try:
partitions = query_partitions(host, external_port)
phase_validate_partitions(runners, partitions,
joined=[r.name for r in joined],
left=[r.name for r in left])
except Exception as err:
print('error validating {} have joined and {} have left'
.format([r.name for r in joined],
[r.name for r in left]))
raise err
# wait until sender completes (~10 seconds)
sender.join(sender_timeout)
if sender.error:
raise sender.error
if sender.is_alive():
sender.stop()
raise TimeoutError('Sender did not complete in the expected '
'period')
# Use Sink value to determine when to stop runners and sink
stopper = SinkAwaitValue(sink, await_values, 30)
stopper.start()
stopper.join()
if stopper.error:
print('sink.data', len(sink.data))
print('await_values', len(await_values))
raise stopper.error
# stop application workers
for r in runners:
r.stop()
# Stop sink
sink.stop()
# Stop metrics
metrics.stop()
# validate output
phase_validate_output(runners, sink, expected)
finally:
for r in runners:
r.stop()
# Wait on runners to finish waiting on their subprocesses to exit
for r in runners:
r.join(runner_join_timeout)
alive = []
for r in runners:
if r.is_alive():
alive.append(r)
for r in runners:
ec = r.poll()
if ec != 0:
print('Worker {!r} exited with return code {}'
.format(r.name, ec))
print('Its last 5 log lines were:')
print('\n'.join(r.get_output().splitlines()[-5:]))
print()
if alive:
alive_names = ', '.join((r.name for r in alive))
outputs = runners_output_format(runners)
for a in alive:
a.kill()
clean_resilience_path(res_dir)
if alive:
raise PipelineTestError("Runners [{}] failed to exit cleanly after"
" {} seconds.\n"
"Runner outputs are attached below:"
"\n===\n{}"
.format(alive_names, runner_join_timeout,
outputs))
except Exception as err:
if not hasattr(err, 'as_steps'):
err.as_steps = steps
raise err
|
<filename>tests/links_tests/model_tests/fpn_tests/test_mask_head.py<gh_stars>1000+
from __future__ import division
import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.fpn import mask_head_loss_post
from chainercv.links.model.fpn import mask_head_loss_pre
from chainercv.links.model.fpn import MaskHead
from chainercv.utils import mask_to_bbox
try:
import cv2 # NOQA
_cv2_available = True
except ImportError:
_cv2_available = False
def _random_array(xp, shape):
return xp.array(
np.random.uniform(-1, 1, size=shape), dtype=np.float32)
@testing.parameterize(
{'n_class': 1 + 1},
{'n_class': 5 + 1},
{'n_class': 20 + 1},
)
class TestMaskHead(unittest.TestCase):
def setUp(self):
self.link = MaskHead(
n_class=self.n_class, scales=(1 / 2, 1 / 4, 1 / 8))
def _check_call(self):
hs = [
chainer.Variable(_random_array(self.link.xp, (2, 64, 32, 32))),
chainer.Variable(_random_array(self.link.xp, (2, 64, 16, 16))),
chainer.Variable(_random_array(self.link.xp, (2, 64, 8, 8))),
]
rois = [
self.link.xp.array(((4, 1, 6, 3),), dtype=np.float32),
self.link.xp.array(
((0, 1, 2, 3), (5, 4, 10, 6)), dtype=np.float32),
self.link.xp.array(((10, 4, 12, 10),), dtype=np.float32),
]
roi_indices = [
self.link.xp.array((0,), dtype=np.int32),
self.link.xp.array((1, 0), dtype=np.int32),
self.link.xp.array((1,), dtype=np.int32),
]
segs = self.link(hs, rois, roi_indices)
self.assertIsInstance(segs, chainer.Variable)
self.assertIsInstance(segs.array, self.link.xp.ndarray)
self.assertEqual(
segs.shape,
(4, self.n_class, self.link.segm_size, self.link.segm_size))
def test_call_cpu(self):
self._check_call()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self._check_call()
def _check_distribute(self):
rois = self.link.xp.array((
(0, 0, 10, 10),
(0, 1000, 0, 1000),
(0, 0, 224, 224),
(100, 100, 224, 224),
), dtype=np.float32)
roi_indices = self.link.xp.array((0, 1, 0, 0), dtype=np.int32)
n_roi = len(roi_indices)
rois, roi_indices, order = self.link.distribute(rois, roi_indices)
self.assertEqual(len(rois), 3)
self.assertEqual(len(roi_indices), 3)
for l in range(3):
self.assertIsInstance(rois[l], self.link.xp.ndarray)
self.assertIsInstance(roi_indices[l], self.link.xp.ndarray)
self.assertEqual(rois[l].shape[0], roi_indices[l].shape[0])
self.assertEqual(rois[l].shape[1:], (4,))
self.assertEqual(roi_indices[l].shape[1:], ())
self.assertEqual(sum(rois[l].shape[0] for l in range(3)), 4)
self.assertEqual(len(order), n_roi)
self.assertIsInstance(order, self.link.xp.ndarray)
def test_distribute_cpu(self):
self._check_distribute()
@attr.gpu
def test_distribute_gpu(self):
self.link.to_gpu()
self._check_distribute()
def _check_decode(self):
segms = [
_random_array(
self.link.xp,
(1, self.n_class, self.link.segm_size, self.link.segm_size)),
_random_array(
self.link.xp,
(2, self.n_class, self.link.segm_size, self.link.segm_size)),
_random_array(
self.link.xp,
(1, self.n_class, self.link.segm_size, self.link.segm_size))
]
bboxes = [
self.link.xp.array(((4, 1, 6, 3),), dtype=np.float32),
self.link.xp.array(
((0, 1, 2, 3), (5, 4, 10, 6)), dtype=np.float32),
self.link.xp.array(((10, 4, 12, 10),), dtype=np.float32),
]
labels = [
self.link.xp.random.randint(
0, self.n_class - 1, size=(1,), dtype=np.int32),
self.link.xp.random.randint(
0, self.n_class - 1, size=(2,), dtype=np.int32),
self.link.xp.random.randint(
0, self.n_class - 1, size=(1,), dtype=np.int32),
]
sizes = [(56, 56), (48, 48), (72, 72)]
masks = self.link.decode(
segms, bboxes, labels, sizes)
self.assertEqual(len(masks), 3)
for n in range(3):
self.assertIsInstance(masks[n], self.link.xp.ndarray)
self.assertEqual(masks[n].shape[0], labels[n].shape[0])
self.assertEqual(masks[n].shape[1:], sizes[n])
@unittest.skipUnless(_cv2_available, 'cv2 is not installed')
def test_decode_cpu(self):
self._check_decode()
class TestMaskHeadLoss(unittest.TestCase):
def _check_mask_head_loss_pre(self, xp):
n_inst = 12
segm_size = 28
rois = [
xp.array(((4, 1, 6, 3),), dtype=np.float32),
xp.array(
((0, 1, 2, 3), (5, 4, 10, 6)), dtype=np.float32),
xp.array(((10, 4, 12, 10),), dtype=np.float32),
]
roi_indices = [
xp.array((0,), dtype=np.int32),
xp.array((1, 0), dtype=np.int32),
xp.array((1,), dtype=np.int32),
]
masks = [
_random_array(xp, (n_inst, 60, 70)),
_random_array(xp, (n_inst, 60, 70)),
]
bboxes = [mask_to_bbox(mask) for mask in masks]
labels = [
xp.array((1,), dtype=np.int32),
xp.array((10, 4), dtype=np.int32),
xp.array((3,), dtype=np.int32),
]
rois, roi_indices, gt_segms, gt_mask_labels = mask_head_loss_pre(
rois, roi_indices, masks, bboxes, labels, segm_size)
self.assertEqual(len(rois), 3)
self.assertEqual(len(roi_indices), 3)
self.assertEqual(len(gt_segms), 3)
self.assertEqual(len(gt_mask_labels), 3)
for l in range(3):
self.assertIsInstance(rois[l], xp.ndarray)
self.assertIsInstance(roi_indices[l], xp.ndarray)
self.assertIsInstance(gt_segms[l], xp.ndarray)
self.assertIsInstance(gt_mask_labels[l], xp.ndarray)
self.assertEqual(rois[l].shape[0], roi_indices[l].shape[0])
self.assertEqual(rois[l].shape[0], gt_segms[l].shape[0])
self.assertEqual(rois[l].shape[0], gt_mask_labels[l].shape[0])
self.assertEqual(rois[l].shape[1:], (4,))
self.assertEqual(roi_indices[l].shape[1:], ())
self.assertEqual(gt_segms[l].shape[1:], (segm_size, segm_size))
self.assertEqual(gt_mask_labels[l].shape[1:], ())
self.assertEqual(gt_segms[l].dtype, np.float32)
self.assertEqual(gt_mask_labels[l].dtype, np.int32)
@unittest.skipUnless(_cv2_available, 'cv2 is not installed')
def test_mask_head_loss_pre_cpu(self):
self._check_mask_head_loss_pre(np)
@attr.gpu
@unittest.skipUnless(_cv2_available, 'cv2 is not installed')
def test_mask_head_loss_pre_gpu(self):
import cupy
self._check_mask_head_loss_pre(cupy)
def _check_head_loss_post(self, xp):
B = 2
segms = chainer.Variable(_random_array(xp, (20, 81, 28, 28)))
mask_roi_indices = [
xp.random.randint(0, B, size=5).astype(np.int32),
xp.random.randint(0, B, size=7).astype(np.int32),
xp.random.randint(0, B, size=8).astype(np.int32),
]
gt_segms = [
_random_array(xp, (5, 28, 28)),
_random_array(xp, (7, 28, 28)),
_random_array(xp, (8, 28, 28)),
]
gt_mask_labels = [
xp.random.randint(0, 80, size=5).astype(np.int32),
xp.random.randint(0, 80, size=7).astype(np.int32),
xp.random.randint(0, 80, size=8).astype(np.int32),
]
mask_head_loss = mask_head_loss_post(
segms, mask_roi_indices, gt_segms, gt_mask_labels, B)
self.assertIsInstance(mask_head_loss, chainer.Variable)
self.assertIsInstance(mask_head_loss.array, xp.ndarray)
self.assertEqual(mask_head_loss.shape, ())
def test_head_loss_post_cpu(self):
self._check_head_loss_post(np)
@attr.gpu
def test_head_loss_post_gpu(self):
import cupy
self._check_head_loss_post(cupy)
testing.run_module(__name__, __file__)
|
<reponame>xopr/gigatron-rom<gh_stars>100-1000
import asm
asm.defun('@globals')
asm.glob('ht')
asm.dw(0)
asm.glob('ha')
asm.dw(0)
asm.glob('pvpc')
asm.dw('vPC')
asm.glob('sp')
asm.dw(0x06fe)
asm.glob('rv')
asm.dw(0)
asm.glob('thunk0')
asm.dw('@thunk0')
asm.glob('thunk1')
asm.dw('@thunk1')
asm.glob('thunk2')
asm.dw('@thunk2')
asm.glob('enter')
asm.dw('@enter')
asm.glob('leave')
asm.dw('@leave')
asm.glob('ldloc')
asm.dw('@ldloc')
asm.glob('stloc')
asm.dw('@stloc')
asm.glob('pusha')
asm.dw('@pusha')
asm.glob('lsh')
asm.dw('@lsh')
asm.glob('rsh')
asm.dw('@rsh')
asm.glob('mul')
asm.dw('@mul')
asm.glob('divu')
asm.dw('@divu')
asm.glob('div')
asm.dw('@div')
asm.glob('blkcopy')
asm.dw('@blkcopy')
asm.glob('stlocb')
asm.dw('@stlocb')
asm.glob('ldlocb')
asm.dw('@ldlocb')
asm.glob('tt')
asm.dw(0)
asm.defun('@start')
asm.ldwi('_main')
asm.call('vAC')
asm.label('halt')
asm.ldwi('halt')
asm.jr()
asm.defun('@ldloc')
asm.addw('sp')
asm.deek()
asm.ret()
asm.defun('@ldlocb')
asm.addw('sp')
asm.peek()
asm.ret()
asm.defun('@stloc')
asm.addw('sp')
asm.stw('ht')
asm.ldw('ha')
asm.doke('ht')
asm.ret()
asm.defun('@stlocb')
asm.addw('sp')
asm.stw('ht')
asm.ld('ha')
asm.poke('ht')
asm.ret()
asm.defun('@thunk0')
asm.stw('tt')
asm.inc('vLRH')
asm.ldi(0)
asm.st('vLR')
asm.ldw('tt')
asm.ret()
asm.defun('@thunk1')
asm.stw('tt')
asm.inc('vLRH')
asm.ldi(0xa0)
asm.st('vLR')
asm.ldw('tt')
asm.ret()
asm.defun('@thunk2')
asm.stw('tt')
asm.ldwi(0x08a0)
asm.stw('vLR')
asm.ldw('tt')
asm.ret()
# vAC = bitmask of registers to save. The highest-order bit represents r15.
asm.defun('@enter')
asm.stw('ha')
asm.ldi('r15')
asm.stw('ht')
asm.label('loop')
asm.ldw('ha')
asm.jeq('done')
asm.ldw('ha')
asm.jgt('next')
asm.ldw('ht')
asm.deek()
asm.doke('sp')
asm.ldw('sp')
asm.subi(2)
asm.stw('sp')
asm.label('next')
asm.ldw('ht')
asm.subi(2)
asm.stw('ht')
asm.ldw('ha')
asm.lslw()
asm.stw('ha')
asm.j('loop')
asm.label('done')
asm.ret()
# vAC = bitmask of registers to restore. The highest-order bit represents r1.
asm.defun('@leave')
asm.stw('ha')
asm.ldi('r1')
asm.stw('ht')
asm.label('loop')
asm.ldw('ha')
asm.jeq('done')
asm.ldw('ha')
asm.jgt('next')
asm.ldw('sp')
asm.addi(2)
asm.stw('sp')
asm.deek()
asm.doke('ht')
asm.label('next')
asm.inc('ht')
asm.inc('ht')
asm.ldw('ha')
asm.lslw()
asm.stw('ha')
asm.j('loop')
asm.label('done')
asm.ret()
# vAC = value to push
asm.defun('@pusha')
asm.doke('sp')
asm.ldw('sp')
asm.subi(2)
asm.stw('sp')
asm.ret()
# vAC = shift amount, r1 = value to shift
asm.defun('@lsh')
asm.andi(0x0f)
asm.jeq('done')
asm.stw('ha')
asm.label('loop')
asm.ldw('r1')
asm.lslw()
asm.stw('r1')
asm.ldw('ha')
asm.subi(1)
asm.stw('ha')
asm.jne('loop')
asm.label('done')
asm.ldw('r1')
asm.ret()
# vAC = shift amount, r1 = value to shift
asm.defun('@rsh')
asm.andi(0x0f)
asm.jeq('done')
asm.stw('ha')
asm.ldw('sysFn')
asm.stw('ht')
asm.ldwi('SYS_LSRW1_48')
asm.stw('sysFn')
asm.label('loop')
asm.ldw('r1')
asm.sys(246) # 270-48/2 = 246
asm.stw('r1')
asm.ldw('ha')
asm.subi(1)
asm.stw('ha')
asm.jne('loop')
asm.ldw('ht')
asm.stw('sysFn')
asm.label('done')
asm.ldw('r1')
asm.ret()
# r1 = a, vAC = b
# rv = product, ha = addend, ht = bitmask
asm.defun('@mul')
asm.stw('ha')
asm.ldi(0)
asm.stw('rv')
asm.ldi(1)
asm.stw('ht')
asm.label('loop')
asm.ldw('ht')
asm.andw('r1')
asm.jeq('next')
asm.ldw('rv')
asm.addw('ha')
asm.stw('rv')
asm.label('next')
asm.ldw('ha')
asm.lslw()
asm.stw('ha')
asm.ldw('ht')
asm.lslw()
asm.stw('ht')
asm.jne('loop')
asm.ldw('rv')
asm.ret()
# r1 = dest, ha = src, vAC = size
asm.defun('@blkcopy')
asm.stw('ht')
asm.label('loop')
asm.ldw('ha')
asm.peek()
asm.poke('r1')
asm.ldw('ha')
asm.addi(1)
asm.stw('ha')
asm.ldw('r1')
asm.addi(1)
asm.stw('r1')
asm.ldw('ht')
asm.subi(1)
asm.stw('ht')
asm.jne('loop')
asm.ret()
# Unsigned division
# r1 = r1 / vAC, ht = r1 % vAC
asm.defun('@divu')
asm.push()
asm.stw('ha')
asm.jlt('skip3')
asm.ldi(0)
asm.stw('ht')
asm.stlw(256-2)
asm.label('loop')
asm.ldw('ht')
asm.lslw()
asm.stw('ht')
asm.ldw('r1')
asm.jge('skip1')
asm.inc('ht')
asm.label('skip1')
asm.ldw('r1')
asm.lslw()
asm.stw('r1')
asm.ldw('ht')
asm.subw('ha')
asm.jlt('skip2')
asm.stw('ht')
asm.inc('r1')
asm.label('skip2')
asm.ldlw(256-2)
asm.addi(1)
asm.andi(15)
asm.stlw(256-2)
asm.jne('loop')
asm.popret()
# Special cases (loop can't handle vAC >= 0x8001)
asm.label('skip3')
asm.ldw('r1')
asm.jge('skip4')
asm.subw('ha')
asm.jge('skip5')
# div,mod = 0,r1
asm.ldw('r1')
asm.label('skip4')
asm.stw('ht')
asm.ldi(0)
asm.stw('r1')
asm.popret()
# div,mod = 1,r1-vAC
asm.label('skip5')
asm.stw('ht')
asm.ldi(1)
asm.stw('r1')
asm.popret()
# Signed division
# vAC = r1 / vAC, ht = r1 % vAC
# C99 compliant: rounding towards 0, and (a/b)*b + a%b == a
asm.defun('@div')
asm.push()
asm.stw('ha')
asm.xorw('r1')
asm.stlw(256-6)
asm.ldw('r1')
asm.stlw(256-8)
asm.jge('skip1')
asm.ldi(0)
asm.subw('r1')
asm.stw('r1')
asm.label('skip1')
asm.ldw('ha')
asm.jge('skip2')
asm.ldi(0)
asm.subw('ha')
asm.label('skip2')
asm.call('divu')
asm.ldlw(256-8)
asm.jge('skip3')
asm.ldi(0)
asm.subw('ht')
asm.stw('ht')
asm.label('skip3')
asm.ldlw(256-6)
asm.jlt('skip4')
asm.ldw('r1')
asm.popret()
asm.label('skip4')
asm.ldi(0)
asm.subw('r1')
asm.popret()
|
# Copyright © 2018 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only
# !/usr/bin/python
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vcd_vdc_vm
short_description: Ansible Module to create auto natured vApp from a VM template in vCloud Director.
version_added: "2.4"
description:
- "Ansible Module to create auto natured vApp."
options:
user:
description:
- vCloud Director user name
required: false
password:
description:
- vCloud Director user password
required: false
host:
description:
- vCloud Director host address
required: false
org:
description:
- Organization name on vCloud Director to access
required: false
api_version:
description:
- Pyvcloud API version
required: false
verify_ssl_certs:
description:
- whether to use secure connection to vCloud Director host
required: false
target_vm_name:
description:
- target VM name
required: true
target_vdc:
description:
- target VDC
required: true
source_vm_name:
description:
- source VM name
required: false
source_catalog_name:
description:
- source catalog name
required: false
source_template_name:
description:
- source template name
required: false
hostname:
description:
- target guest hostname
required: false
vmpassword:
description:
- set the administrator password for target machine
required: false
vmpassword_auto:
description:
- true/false, autogenerate administrator password
required: false
vmpassword_reset:
description:
- true if the administrator password for this virtual machine must be reset after first use else false
required: false
cust_script:
description:
- script to run on guest customization
required: false
network:
description:
- Name of the vApp network to connect. If omitted, the VM won't be connected to any network
required: false
storage_profile:
description:
- the name of the storage profile to be used for this VM
required: false
all_eulas_accepted:
description:
- true / false
required: false
ip_allocation_mode:
description:
- dhcp
required: false
power_on:
description:
- power on VMs
required: false
state:
description:
- state of new virtual machines (present).One from state or operation has to be provided.
required: false
author:
- <EMAIL>
'''
EXAMPLES = '''
- name: Test with a message
vcd_vdc_vm:
user: terraform
password: <PASSWORD>
host: csa.sandbox.org
org: Terraform
api_version: 30
verify_ssl_certs: False
target_vm_name = "vm_name"
target_vdc = "vdc"
source_vapp = "folder_name"
source_vm_name = "template_name"
hostname = "vcdcell"
vmpassword = "<PASSWORD>"
vmpassword_auto = "false"
vmpassword_reset = "<PASSWORD>"
cust_script = "/home/setup.sh"
network = "MGMT"
storage_profile = "Standard"
state = "present"
all_eulas_accepted = "true"
'''
RETURN = '''
msg: task details
changed: true if resource has been changed else false
'''
from ansible.module_utils.vcd import VcdAnsibleModule
from lxml.objectify import StringElement
from pyvcloud.vcd.client import E_OVF, E, RelationType
from pyvcloud.vcd.exceptions import EntityNotFoundException
from pyvcloud.vcd.org import Org
from pyvcloud.vcd.utils import task_to_dict
from pyvcloud.vcd.vapp import VApp
from pyvcloud.vcd.vdc import VDC
from pyvcloud.vcd.vm import VM
VM_STATES = ['present', 'absent']
def vm_argument_spec():
return dict(
target_vm_name=dict(type='str', required=True),
target_vdc=dict(type='str', required=True),
target_vapp=dict(type='str', required=False),
source_catalog_name=dict(type='str', required=False),
source_template_name=dict(type='str', required=False),
source_vm_name=dict(type='str', required=False),
hostname=dict(type='str', required=False),
vmpassword=dict(type='str', required=False),
vmpassword_auto=dict(type='bool', required=False),
vmpassword_reset=dict(type='bool', required=False),
cust_script=dict(type='str', required=False, default=''),
network=dict(type='str', required=False),
storage_profile=dict(type='str', required=False, default=''),
ip_allocation_mode=dict(type='str', required=False, default='DHCP'),
power_on=dict(type='bool', required=False, default=True),
all_eulas_accepted=dict(type='bool', required=False, default=None),
state=dict(choices=VM_STATES, required=False),
)
class VdcVM(VcdAnsibleModule):
def __init__(self, **kwargs):
super(VdcVM, self).__init__(**kwargs)
self.vdc = self.get_target_resource()
def manage_states(self):
state = self.params.get('state')
if state == "present":
return self.add_vm()
if state == "absent":
return self.delete_vm()
def get_source_resource(self):
source_catalog_name = self.params.get('source_catalog_name')
source_template_name = self.params.get('source_template_name')
org_resource = Org(self.client, resource=self.client.get_org())
source_vapp_resource = None
if source_catalog_name:
catalog_item = org_resource.get_catalog_item(
source_catalog_name, source_template_name)
source_vapp_resource = self.client.get_resource(
catalog_item.Entity.get('href'))
return source_vapp_resource
def get_target_resource(self):
target_vdc = self.params.get('target_vdc')
org_resource = Org(self.client, resource=self.client.get_org())
vdc = VDC(
self.client, resource=org_resource.get_vdc(target_vdc))
return vdc
def get_storage_profile(self, profile_name):
return self.vdc.get_storage_profile(profile_name)
def get_vm(self):
vapp = VApp(self.client, resource=self.vdc.get_vapp(self.params.get('target_vapp')))
vapp_vm_resource = vapp.get_vm(self.params.get('target_vm_name'))
return VM(self.client, resource=vapp_vm_resource)
def to_instantiate_vm_template_params(self, spec):
source_vapp = VApp(self.client, resource=spec['vapp'])
vm_template = source_vapp.get_vm(spec['source_vm_name'])
params = E.InstantiateVmTemplateParams(
E.SourcedVmTemplateItem(
E.Source(
href=vm_template.get('href'),
id=vm_template.get('id'),
type=vm_template.get('type'),
name=vm_template.get('name'),
)
),
name=spec['target_vm_name'],
powerOn='true' if spec['power_on'] else 'false')
vm_general_params = E.VmGeneralParams()
vm_instantiation_param = E.VmTemplateInstantiationParams()
if spec.get('network'):
primary_index = int(vm_template.NetworkConnectionSection.
PrimaryNetworkConnectionIndex.text)
vm_instantiation_param.append(
E.NetworkConnectionSection(
E_OVF.Info(),
E.NetworkConnection(
E.NetworkConnectionIndex(primary_index),
E.IsConnected(True),
E.IpAddressAllocationMode(spec['ip_allocation_mode'].upper()),
network=spec['network'])))
needs_customization = 'disk_size' in spec or 'password' in spec or \
'cust_script' in spec or 'hostname' in spec
if needs_customization:
guest_customization_param = E.GuestCustomizationSection(
E_OVF.Info(),
E.Enabled(True),
)
if spec.get('password'):
guest_customization_param.append(E.AdminPasswordEnabled(True))
guest_customization_param.append(E.AdminPasswordAuto(False))
guest_customization_param.append(
E.AdminPassword(spec['password']))
else:
if spec.get('password_auto'):
guest_customization_param.append(
E.AdminPasswordEnabled(True))
guest_customization_param.append(E.AdminPasswordAuto(True))
else:
guest_customization_param.append(
E.AdminPasswordEnabled(False))
if spec.get('password_reset'):
guest_customization_param.append(
E.ResetPasswordRequired(spec['password_reset']))
if spec.get('cust_script'):
guest_customization_param.append(
E.CustomizationScript(spec['cust_script']))
if spec.get('hostname'):
guest_customization_param.append(
E.ComputerName(spec['hostname']))
vm_instantiation_param.append(guest_customization_param)
vm_general_params.append(E.NeedsCustomization(needs_customization))
params.SourcedVmTemplateItem.append(vm_general_params)
params.SourcedVmTemplateItem.append(vm_instantiation_param)
if spec.get('storage_profile'):
sp = spec['storage_profile']
storage_profile = E.StorageProfile(
href=sp.get('href'),
id=sp.get('href').split('/')[-1],
type=sp.get('type'),
name=sp.get('name'))
params.SourcedVmTemplateItem.append(storage_profile)
return params
def add_vm(self):
params = self.params
source_vapp_resource = self.get_source_resource()
target_vm_name = params.get('target_vm_name')
hostname = params.get('hostname')
source_vm_name = params.get('source_vm_name')
target_vapp = params.get('target_vapp')
vmpassword = params.get('vmpassword')
vmpassword_auto = params.get('vmpassword_auto')
vmpassword_reset = params.get('vmpassword_reset')
network = params.get('network')
power_on = params.get('power_on')
ip_allocation_mode = params.get('ip_allocation_mode')
cust_script = params.get('cust_script')
storage_profile_name = params.get('storage_profile')
storage_profile = None
all_eulas_accepted = params.get('all_eulas_accepted')
response = dict()
response['msg'] = dict()
response['changed'] = False
if storage_profile_name:
storage_profile = self.get_storage_profile(storage_profile_name)
try:
self.vdc.get_vapp(target_vapp)
except EntityNotFoundException:
spec = {
'source_vm_name': source_vm_name,
'vapp': source_vapp_resource,
'target_vm_name': target_vm_name,
'hostname': hostname,
'password': <PASSWORD>,
'password_auto': <PASSWORD>,
'password_reset': <PASSWORD>_reset,
'ip_allocation_mode': ip_allocation_mode,
'network': network,
'cust_script': cust_script,
'storage_profile': storage_profile,
'power_on': power_on
}
params = self.to_instantiate_vm_template_params(spec)
if all_eulas_accepted is not None:
params.append(E.AllEULAsAccepted(all_eulas_accepted))
add_vms_task = self.client.post_linked_resource(
self.vdc.resource, RelationType.ADD,
'application/vnd.vmware.vcloud.instantiateVmTemplateParams+xml', params)
result = self.execute_task(add_vms_task)
result = task_to_dict(result)
if isinstance(result.get('details'), StringElement):
del result['details']
response['msg'].update(result)
response['changed'] = True
else:
response['warnings'] = "Vapp {} is already present.".format(target_vapp)
return response
def delete_vm(self):
vm_name = self.params.get('target_vm_name')
response = dict()
response['changed'] = False
try:
self.get_vm()
except EntityNotFoundException:
response['warnings'] = 'VM {} is not present.'.format(vm_name)
else:
self.undeploy_vm()
delete_vms_task = self.client.delete_resource(
self.get_vm().resource.get('href'))
self.execute_task(delete_vms_task)
response['msg'] = 'VM {} has been deleted.'.format(vm_name)
response['changed'] = True
return response
def undeploy_vm(self):
vm_name = self.params.get('target_vm_name')
response = dict()
response['changed'] = False
vm = self.get_vm()
if vm.get_resource().get('deployed') == 'true':
undeploy_vm_task = vm.undeploy()
self.execute_task(undeploy_vm_task)
response['msg'] = 'VM {} has been undeployed.'.format(vm_name)
response['changed'] = True
else:
response['warnings'] = 'VM {} is already undeployed.'.format(
vm_name)
return response
def main():
argument_spec = vm_argument_spec()
response = dict(
msg=dict(type='str')
)
module = VdcVM(argument_spec=argument_spec, supports_check_mode=True)
try:
if module.params.get('state'):
response = module.manage_states()
else:
raise Exception('One of the state/operation should be provided.')
except Exception as error:
response['msg'] = error
module.fail_json(**response)
module.exit_json(**response)
if __name__ == '__main__':
main() |
<filename>laygo/generators/splash/adc_sar_sar_wsamp_layout_generator_bb_doubleSA_pe.py<gh_stars>10-100
#!/usr/bin/python
########################################################################################################################
#
# Copyright (c) 2014, Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################################################################################
"""ADC library
"""
import laygo
import numpy as np
from math import log
import yaml
import os
import laygo.GridLayoutGeneratorHelper as laygenhelper #utility functions
#import logging;logging.basicConfig(level=logging.DEBUG)
def generate_sar_wsamp(laygen, objectname_pfix, workinglib, samp_lib, space_1x_lib, sar_name, samp_name, space_1x_name,
placement_grid, routing_grid_m5m6,
routing_grid_m5m6_thick, routing_grid_m5m6_thick_basic,
num_bits=9, origin=np.array([0, 0])):
"""generate sar with sampling frontend """
pg = placement_grid
rg_m5m6 = routing_grid_m5m6
rg_m5m6_thick = routing_grid_m5m6_thick
rg_m5m6_thick_basic = routing_grid_m5m6_thick_basic #for clock routing
# placement
# sar
isar=laygen.place(name="I" + objectname_pfix + 'SAR0', templatename=sar_name,
gridname=pg, xy=origin, template_libname=workinglib)
# samp
isamp = laygen.relplace(name="I" + objectname_pfix + 'SAMP0', templatename=samp_name,
gridname=pg, refinstname=isar.name, direction='top', template_libname=samp_lib)
#prboundary
sar_size = laygen.templates.get_template(sar_name, libname=workinglib).size
samp_size = laygen.templates.get_template(samp_name, libname=samp_lib).size
space_size = laygen.templates.get_template(space_1x_name, libname=space_1x_lib).size
size_x=sar_size[0]
size_y=int((sar_size[1]+samp_size[1])/space_size[1]+1)*space_size[1]
laygen.add_rect(None, np.array([origin, origin+np.array([size_x, size_y])]), laygen.layers['prbnd'])
# template handles
sar_template = laygen.templates.get_template(sar_name, workinglib)
samp_template = laygen.templates.get_template(samp_name, samp_lib)
#reference coordinates
pdict_m5m6=laygen.get_inst_pin_xy(None, None, rg_m5m6)
pdict_m5m6_thick=laygen.get_inst_pin_xy(None, None, rg_m5m6_thick)
pdict_m5m6_thick_basic=laygen.get_inst_pin_xy(None, None, rg_m5m6_thick_basic)
sar_pins=sar_template.pins
samp_pins=samp_template.pins
#sar_xy=isar.xy[0]
#samp_xy=isamp.xy[0]
sar_xy=isar.xy
samp_xy=isamp.xy
#signal route (clk/inp/inm)
#make virtual grids and route on the grids (assuming drc clearance of each block)
rg_m5m6_thick_basic_temp_sig='route_M5_M6_thick_basic_temp_sig'
laygenhelper.generate_grids_from_inst(laygen, gridname_input=rg_m5m6_thick_basic, gridname_output=rg_m5m6_thick_basic_temp_sig,
instname=isamp.name,
inst_pin_prefix=['ckout'], xy_grid_type='xgrid')
pdict_m5m6_thick_basic_temp_sig = laygen.get_inst_pin_xy(None, None, rg_m5m6_thick_basic_temp_sig)
rg_m4m5_basic_thick_temp_sig='route_M4_M5_basic_thick_temp_sig'
laygenhelper.generate_grids_from_inst(laygen, gridname_input=rg_m4m5_basic_thick, gridname_output=rg_m4m5_basic_thick_temp_sig,
instname=isamp.name,
inst_pin_prefix=['outp', 'outn'], xy_grid_type='xgrid')
pdict_m4m5_basic_thick_temp_sig = laygen.get_inst_pin_xy(None, None, rg_m4m5_basic_thick_temp_sig)
#clock
rclk0 = laygen.route(None, laygen.layers['metal'][5],
xy0=pdict_m5m6_thick_basic_temp_sig[isamp.name]['ckout'][0],
xy1=pdict_m5m6_thick_basic_temp_sig[isar.name]['CLK0'][1]-np.array([0,1]), gridname0=rg_m5m6_thick_basic_temp_sig)
laygen.via(None,pdict_m5m6_thick_basic_temp_sig[isar.name]['CLK0'][1], rg_m5m6_thick_basic_temp_sig)
laygen.via(None,pdict_m5m6_thick_basic_temp_sig[isar.name]['CLK1'][1], rg_m5m6_thick_basic_temp_sig)
#laygen.via(None,pdict_m5m6_thick_basic_temp_sig[isar.name]['CLK2'][1], rg_m5m6_thick_basic_temp_sig)
#rclk0 = laygen.route(None, laygen.layers['metal'][5],
# xy0=pdict_m5m6_thick_basic[isamp.name]['ckout'][0],
# xy1=pdict_m5m6_thick_basic[isar.name]['CLK'][1]-np.array([0,1]), gridname0=rg_m5m6_thick_basic)
#laygen.via(None,pdict_m5m6_thick_basic[isar.name]['CLK'][1], rg_m5m6_thick_basic)
#frontend sig
inp_y_list=[]
inm_y_list=[]
for pn, p in pdict_m4m5_basic_thick_temp_sig[isar.name].items():
if pn.startswith('INP'):
inp_y_list.append(p[0][1])
pv=np.array([pdict_m4m5_basic_thick_temp_sig[isamp.name]['outp'][0][0], p[0][1]])
laygen.via(None,pv, rg_m4m5_basic_thick_temp_sig)
#laygen.via(None,p[0], rg_m5m6_thick_basic_temp_sig)
if pn.startswith('INM'):
inm_y_list.append(p[0][1])
pv=np.array([pdict_m4m5_basic_thick_temp_sig[isamp.name]['outn'][0][0], p[0][1]])
laygen.via(None,pv, rg_m4m5_basic_thick_temp_sig)
#laygen.via(None,p[0], rg_m5m6_thick_basic_temp_sig)
inp_y=min(inp_y_list)
inm_y=min(inm_y_list)
rinp0 = laygen.route(None, laygen.layers['metal'][5],
xy0=pdict_m4m5_basic_thick_temp_sig[isamp.name]['outp'][0],
xy1=np.array([pdict_m4m5_basic_thick_temp_sig[isamp.name]['outp'][0][0],inp_y-1]),
gridname0=rg_m4m5_basic_thick_temp_sig)
rinm0 = laygen.route(None, laygen.layers['metal'][5],
xy0=pdict_m4m5_basic_thick_temp_sig[isamp.name]['outn'][0],
xy1=np.array([pdict_m4m5_basic_thick_temp_sig[isamp.name]['outn'][0][0],inm_y-1]),
gridname0=rg_m4m5_basic_thick_temp_sig)
#rinp0 = laygen.route(None, laygen.layers['metal'][5],
# xy0=pdict_m5m6_thick_basic_temp_sig[isamp.name]['outp'][0],
# xy1=np.array([pdict_m5m6_thick_basic_temp_sig[isar.name]['INP0'][0][0],inp_y-1]),
# gridname0=rg_m5m6_thick_basic_temp_sig)
#rinm0 = laygen.route(None, laygen.layers['metal'][5],
# xy0=pdict_m5m6_thick_basic_temp_sig[isamp.name]['outn'][0],
# xy1=np.array([pdict_m5m6_thick_basic_temp_sig[isar.name]['INM0'][0][0],inm_y-1]),
# gridname0=rg_m5m6_thick_basic_temp_sig)
#input pins (just duplicate from lower hierarchy cells)
laygen.add_pin('CLK', 'CLK', samp_xy+samp_pins['ckin']['xy'], samp_pins['ckin']['layer'])
laygen.add_pin('INP', 'INP', samp_xy+samp_pins['inp']['xy'], samp_pins['ckin']['layer'])
laygen.add_pin('INM', 'INM', samp_xy+samp_pins['inn']['xy'], samp_pins['ckin']['layer'])
laygen.add_pin('OSP', 'OSP', sar_xy+sar_pins['OSP']['xy'], sar_pins['OSP']['layer'])
laygen.add_pin('OSM', 'OSM', sar_xy+sar_pins['OSM']['xy'], sar_pins['OSM']['layer'])
for pn, p in sar_pins.items():
if pn.startswith('VREF<0>'):
pxy=sar_xy+sar_pins[pn]['xy']
laygen.add_pin(pn, 'VREF<0>', pxy, sar_pins[pn]['layer'])
if pn.startswith('VREF<1>'):
pxy=sar_xy+sar_pins[pn]['xy']
laygen.add_pin(pn, 'VREF<1>', pxy, sar_pins[pn]['layer'])
if pn.startswith('VREF<2>'):
pxy=sar_xy+sar_pins[pn]['xy']
laygen.add_pin(pn, 'VREF<2>', pxy, sar_pins[pn]['layer'])
#laygen.add_pin('VREF_M5R<2>', 'VREF<2>', sar_xy+sar_pins['VREF_M5R<2>']['xy'], sar_pins['VREF_M5R<2>']['layer'])
#laygen.add_pin('VREF_M5R<1>', 'VREF<1>', sar_xy+sar_pins['VREF_M5R<1>']['xy'], sar_pins['VREF_M5R<1>']['layer'])
#laygen.add_pin('VREF_M5R<0>', 'VREF<0>', sar_xy+sar_pins['VREF_M5R<0>']['xy'], sar_pins['VREF_M5R<0>']['layer'])
#laygen.add_pin('VREF_M5L<2>', 'VREF<2>', sar_xy+sar_pins['VREF_M5L<2>']['xy'], sar_pins['VREF_M5L<2>']['layer'])
#laygen.add_pin('VREF_M5L<1>', 'VREF<1>', sar_xy+sar_pins['VREF_M5L<1>']['xy'], sar_pins['VREF_M5L<1>']['layer'])
#laygen.add_pin('VREF_M5L<0>', 'VREF<0>', sar_xy+sar_pins['VREF_M5L<0>']['xy'], sar_pins['VREF_M5L<0>']['layer'])
laygen.add_pin('CKDSEL0<1>', 'CKDSEL0<1>', sar_xy+sar_pins['CKDSEL0<1>']['xy'], sar_pins['CKDSEL0<1>']['layer'])
laygen.add_pin('CKDSEL0<0>', 'CKDSEL0<0>', sar_xy+sar_pins['CKDSEL0<0>']['xy'], sar_pins['CKDSEL0<0>']['layer'])
laygen.add_pin('CKDSEL1<1>', 'CKDSEL1<1>', sar_xy+sar_pins['CKDSEL1<1>']['xy'], sar_pins['CKDSEL1<1>']['layer'])
laygen.add_pin('CKDSEL1<0>', 'CKDSEL1<0>', sar_xy+sar_pins['CKDSEL1<0>']['xy'], sar_pins['CKDSEL1<0>']['layer'])
#laygen.add_pin('EXTCLK', 'EXTCLK', sar_xy+sar_pins['EXTCLK']['xy'], sar_pins['EXTCLK']['layer'])
laygen.add_pin('EXTSEL_CLK', 'EXTSEL_CLK', sar_xy+sar_pins['EXTSEL_CLK']['xy'], sar_pins['EXTSEL_CLK']['layer'])
#output pins (just duplicate from lower hierarchy cells)
for i in range(num_bits):
pn='ADCOUT'+'<'+str(i)+'>'
laygen.add_pin(pn, pn, sar_xy+sar_pins[pn]['xy'], sar_pins[pn]['layer'])
laygen.add_pin('CLKO0', 'CLKO', sar_xy+sar_pins['CLKOUT0']['xy'], sar_pins['CLKOUT0']['layer'])
laygen.add_pin('CLKO1', 'CLKO', sar_xy+sar_pins['CLKOUT1']['xy'], sar_pins['CLKOUT1']['layer'])
#laygen.add_pin('CLKO2', 'CLKO', sar_xy+sar_pins['CLKOUT2']['xy'], sar_pins['CLKOUT2']['layer'])
#probe pins
laygen.add_pin('CLK0', 'ICLK', sar_xy+sar_pins['CLK0']['xy'], sar_pins['CLK0']['layer'])
laygen.add_pin('CLK1', 'ICLK', sar_xy+sar_pins['CLK1']['xy'], sar_pins['CLK1']['layer'])
#laygen.add_pin('CLK2', 'ICLK', sar_xy+sar_pins['CLK2']['xy'], sar_pins['CLK2']['layer'])
laygen.add_pin('CLKPRB_SAMP', 'CLKPRB_SAMP', samp_xy+samp_pins['ckpg']['xy'], samp_pins['ckpg']['layer'])
#laygen.add_pin('CLKPRB_SAR', 'CLKPRB_SAR', sar_xy+sar_pins['CLKPRB']['xy'], sar_pins['CLKPRB']['layer'])
laygen.add_pin('SAMPP', 'SAMPP', sar_xy+sar_pins['SAINP']['xy'], sar_pins['SAINP']['layer'])
laygen.add_pin('SAMPM', 'SAMPM', sar_xy+sar_pins['SAINM']['xy'], sar_pins['SAINM']['layer'])
laygen.add_pin('SAOP', 'SAOP', sar_xy+sar_pins['SAOP']['xy'], sar_pins['SAOP']['layer'])
laygen.add_pin('SAOM', 'SAOM', sar_xy+sar_pins['SAOM']['xy'], sar_pins['SAOM']['layer'])
laygen.add_pin('SARCLK', 'SARCLK', sar_xy+sar_pins['SARCLK']['xy'], sar_pins['SARCLK']['layer'])
laygen.add_pin('SARCLKB', 'SARCLKB', sar_xy+sar_pins['SARCLKB']['xy'], sar_pins['SARCLKB']['layer'])
#laygen.add_pin('COMPOUT', 'COMPOUT', sar_xy+sar_pins['COMPOUT']['xy'], sar_pins['COMPOUT']['layer'])
laygen.add_pin('DONE', 'DONE', sar_xy+sar_pins['DONE']['xy'], sar_pins['DONE']['layer'])
laygen.add_pin('UP', 'UP', sar_xy+sar_pins['UP']['xy'], sar_pins['UP']['layer'])
laygen.add_pin('PHI0', 'PHI0', sar_xy+sar_pins['PHI0']['xy'], sar_pins['PHI0']['layer'])
for i in range(num_bits):
pn='ZP'+'<'+str(i)+'>'
laygen.add_pin(pn, pn, sar_xy+sar_pins[pn]['xy'], sar_pins[pn]['layer'])
pn='ZMID'+'<'+str(i)+'>'
laygen.add_pin(pn, pn, sar_xy+sar_pins[pn]['xy'], sar_pins[pn]['layer'])
pn='ZM'+'<'+str(i)+'>'
laygen.add_pin(pn, pn, sar_xy+sar_pins[pn]['xy'], sar_pins[pn]['layer'])
pn='SB'+'<'+str(i)+'>'
laygen.add_pin(pn, pn, sar_xy+sar_pins[pn]['xy'], sar_pins[pn]['layer'])
for i in range(num_bits-1):
pn='VOL'+'<'+str(i)+'>'
laygen.add_pin(pn, pn, sar_xy+sar_pins[pn]['xy'], sar_pins[pn]['layer'])
pn='VOR'+'<'+str(i)+'>'
laygen.add_pin(pn, pn, sar_xy+sar_pins[pn]['xy'], sar_pins[pn]['layer'])
#VDD/VSS pin
vddcnt=0
vsscnt=0
for p in pdict_m5m6[isar.name]:
if p.startswith('VDD'):
xy0=pdict_m5m6_thick[isar.name][p]
laygen.pin(name='VDDSAR' + str(vddcnt), layer=laygen.layers['pin'][6], xy=xy0, gridname=rg_m5m6_thick, netname='VDDSAR')
vddcnt+=1
if p.startswith('VSS'):
xy0=pdict_m5m6_thick[isar.name][p]
laygen.pin(name='VSSSAR' + str(vsscnt), layer=laygen.layers['pin'][6], xy=xy0, gridname=rg_m5m6_thick, netname='VSS:')
#laygen.pin(name='VSSSAR' + str(vsscnt), layer=laygen.layers['pin'][6], xy=xy0, gridname=rg_m5m6_thick, netname='VSS')
vsscnt+=1
#extract VDD/VSS grid from samp and make power pins
rg_m5m6_thick_temp_samp='route_M5_M6_thick_temp_samp'
laygenhelper.generate_grids_from_inst(laygen, gridname_input=rg_m5m6_thick, gridname_output=rg_m5m6_thick_temp_samp,
instname=isamp.name,
inst_pin_prefix=['VDD', 'VSS', 'samp_body'], xy_grid_type='ygrid')
pdict_m5m6_thick_temp_samp = laygen.get_inst_pin_xy(None, None, rg_m5m6_thick_temp_samp)
vddcnt=0
vsscnt=0
bodycnt=0
for p in pdict_m5m6_thick_temp_samp[isamp.name]:
if p.startswith('VDD'):
xy0=pdict_m5m6_thick_temp_samp[isamp.name][p]
laygen.pin(name='VDDSAMP' + str(vddcnt), layer=laygen.layers['pin'][6], xy=xy0, gridname=rg_m5m6_thick_temp_samp, netname='VDDSAMP')
vddcnt+=1
if p.startswith('VSS'):
xy0=pdict_m5m6_thick_temp_samp[isamp.name][p]
laygen.pin(name='VSSSAMP' + str(vsscnt), layer=laygen.layers['pin'][6], xy=xy0, gridname=rg_m5m6_thick_temp_samp, netname='VSS:')
#laygen.pin(name='VSSSAMP' + str(vsscnt), layer=laygen.layers['pin'][6], xy=xy0, gridname=rg_m5m6_thick_temp_samp, netname='VSS')
vsscnt+=1
if p.startswith('samp_body'):
xy0=pdict_m5m6_thick_temp_samp[isamp.name][p]
laygen.pin(name='samp_body' + str(bodycnt), layer=laygen.layers['pin'][6], xy=xy0, gridname=rg_m5m6_thick_temp_samp, netname='samp_body')
bodycnt+=1
# VBB
pdict_m3m4 = laygen.get_inst_pin_xy(None, None, rg_m3m4)
rvbb_m3=[]
for p in pdict_m3m4[isar.name]:
if p.startswith('VBB'):
laygen.pin(name='bottom_body'+str(p), layer=laygen.layers['pin'][3], xy=pdict_m3m4[isar.name][p], gridname=rg_m3m4, netname='bottom_body')
if __name__ == '__main__':
laygen = laygo.GridLayoutGenerator(config_file="laygo_config.yaml")
import imp
try:
imp.find_module('bag')
laygen.use_phantom = False
except ImportError:
laygen.use_phantom = True
tech=laygen.tech
utemplib = tech+'_microtemplates_dense'
logictemplib = tech+'_logic_templates'
samp_lib = 'adc_sampler_ec'
samp_name = 'sampler_nmos'
laygen.load_template(filename=tech+'_microtemplates_dense_templates.yaml', libname=utemplib)
laygen.load_grid(filename=tech+'_microtemplates_dense_grids.yaml', libname=utemplib)
laygen.load_template(filename=logictemplib+'.yaml', libname=logictemplib)
laygen.templates.sel_library(utemplib)
laygen.grids.sel_library(utemplib)
#library load or generation
workinglib = 'adc_sar_generated'
laygen.add_library(workinglib)
laygen.sel_library(workinglib)
if os.path.exists(workinglib+'.yaml'): #generated layout file exists
laygen.load_template(filename=workinglib+'.yaml', libname=workinglib)
laygen.templates.sel_library(utemplib)
#grid
pg = 'placement_basic' #placement grid
rg_m1m2 = 'route_M1_M2_cmos'
rg_m1m2_thick = 'route_M1_M2_thick'
rg_m2m3 = 'route_M2_M3_cmos'
rg_m3m4 = 'route_M3_M4_basic'
rg_m4m5 = 'route_M4_M5_basic'
rg_m4m5_basic_thick = 'route_M4_M5_basic_thick'
rg_m5m6 = 'route_M5_M6_basic'
rg_m5m6_thick = 'route_M5_M6_thick'
rg_m5m6_basic_thick = 'route_M5_M6_basic_thick'
rg_m5m6_thick_basic = 'route_M5_M6_thick_basic'
rg_m1m2_pin = 'route_M1_M2_basic'
rg_m2m3_pin = 'route_M2_M3_basic'
mycell_list = []
num_bits=9
#load from preset
load_from_file=True
yamlfile_spec="adc_sar_spec.yaml"
yamlfile_size="adc_sar_size.yaml"
if load_from_file==True:
with open(yamlfile_spec, 'r') as stream:
specdict = yaml.load(stream)
with open(yamlfile_size, 'r') as stream:
sizedict = yaml.load(stream)
num_bits=specdict['n_bit']
if specdict['samp_use_laygo'] is True:
samp_lib = 'adc_sar_generated'
samp_name = 'sarsamp_bb'
else:
laygen.load_template(filename=samp_lib+'.yaml', libname=samp_lib)
#yamlfile_system_input="adc_sar_dsn_system_input.yaml"
#if load_from_file==True:
# with open(yamlfile_system_input, 'r') as stream:
# sysdict_i = yaml.load(stream)
# num_bits=sysdict_i['n_bit']
#sar generation
cellname='sar_wsamp_bb_doubleSA_pe' #_'+str(num_bits)+'b'
sar_name = 'sar_doubleSA_bb_pe' #_'+str(num_bits)+'b'
space_1x_name = 'space_1x'
print(cellname+" generating")
mycell_list.append(cellname)
laygen.add_cell(cellname)
laygen.sel_cell(cellname)
generate_sar_wsamp(laygen, objectname_pfix='SA0', workinglib=workinglib, samp_lib=samp_lib, space_1x_lib=logictemplib, sar_name=sar_name, samp_name=samp_name, space_1x_name=space_1x_name,
placement_grid=pg, routing_grid_m5m6=rg_m5m6, routing_grid_m5m6_thick=rg_m5m6_thick, routing_grid_m5m6_thick_basic=rg_m5m6_thick_basic,
num_bits=num_bits, origin=np.array([0, 0]))
laygen.add_template_from_cell()
laygen.save_template(filename=workinglib+'.yaml', libname=workinglib)
#bag export, if bag does not exist, gds export
import imp
try:
imp.find_module('bag')
import bag
prj = bag.BagProject()
for mycell in mycell_list:
laygen.sel_cell(mycell)
laygen.export_BAG(prj, array_delimiter=['[', ']'])
except ImportError:
laygen.export_GDS('output.gds', cellname=mycell_list, layermapfile=tech+".layermap") # change layermapfile
|
<gh_stars>1-10
import random
import numpy as np
import math
from baseline.utils import export
__all__ = []
exporter = export(__all__)
@exporter
class DataFeed(object):
"""Data collection that, when iterated, produces an epoch of data
This class manages producing a dataset to the trainer, by iterating an epoch and producing
a single step at a time. The data can be shuffled per epoch, if requested, otherwise it is
returned in the order of the dateset
"""
def __init__(self):
self.steps = 0
self.shuffle = False
def _batch(self, i):
pass
def __getitem__(self, i):
return self._batch(i)
def __iter__(self):
shuffle = np.random.permutation(np.arange(self.steps)) if self.shuffle else np.arange(self.steps)
for i in range(self.steps):
si = shuffle[i]
yield self._batch(si)
def __len__(self):
return self.steps
@exporter
class ExampleDataFeed(DataFeed):
"""Abstract base class that works on a list of examples
"""
def __init__(self, examples, batchsz, **kwargs):
"""Constructor from a list of examples
Use the examples requested to provide data. Options for batching and shuffling are supported,
along with some optional processing function pointers
:param examples: A list of examples
:param batchsz: Batch size per step
:param kwargs: See below
:Keyword Arguments:
* *shuffle* -- Shuffle the data per epoch? Defaults to `False`
* *vec_alloc* -- Allocate a new tensor. Defaults to ``numpy.zeros``
* *vec_shape* -- Function to retrieve tensor shape. Defaults to ``numpy.shape``
* *trim* -- Trim batches to the maximum length seen in the batch (defaults to `False`)
This can lead to batches being shorter than the maximum length provided to the system.
Not supported in all frameworks.
* *src_vec_trans* -- A transform function to use on the source tensor (`None`)
"""
super(ExampleDataFeed, self).__init__()
self.examples = examples
self.batchsz = batchsz
self.shuffle = bool(kwargs.get('shuffle', False))
self.vec_alloc = kwargs.get('vec_alloc', np.zeros)
self.vec_shape = kwargs.get('vec_shape', np.shape)
self.src_vec_trans = kwargs.get('src_vec_trans', None)
self.steps = int(math.floor(len(self.examples)/float(batchsz)))
self.trim = bool(kwargs.get('trim', False))
@exporter
class SeqLabelExamples(object):
"""Unstructured prediction examples
Datasets of paired `(x, y)` data, where `x` is a tensor of data over time and `y` is a single label
"""
FORMAT_OBJS = 0
FORMAT_VECS = 1
LABEL = 'y'
SEQ = 'x'
SEQ_CHAR = 'xch'
LABEL = 'y'
SEQ_LENGTH = 'lengths'
SCALARS = {SEQ_LENGTH, LABEL}
def __init__(self, example_list, do_shuffle=True, do_sort=False):
"""Constructor
:param example_list: A list of examples
:param do_shuffle: (``bool``) Shuffle the data? Defaults to `True`
:param do_sort: (``bool``) Sort the data. Defaults to `True`
"""
self.example_list = example_list
self.data_format = SeqLabelExamples.FORMAT_VECS if type(self.example_list) is dict else SeqLabelExamples.FORMAT_OBJS
if do_shuffle and self.data_format == SeqLabelExamples.FORMAT_OBJS:
random.shuffle(self.example_list)
if do_sort:
if self.data_format == SeqLabelExamples.FORMAT_OBJS:
self.example_list = sorted(self.example_list, key=lambda x: x[SeqWordCharTagExamples.SEQ_LEN])
else:
print('Warning: pre-sorting by length not yet supported in vector format, use objs')
def __getitem__(self, i):
"""Get a single example
:param i: (``int``) simple index
:return: an example
"""
if self.data_format == SeqLabelExamples.FORMAT_OBJS:
return self.example_list[i]
obj = dict((k, self.example_list[k][i]) for k in self.example_list.keys())
return obj
def __len__(self):
"""Number of examples
:return: (``int``) length of data
"""
if self.data_format == SeqLabelExamples.FORMAT_OBJS:
return len(self.example_list)
return len(self.example_list[SeqLabelExamples.SEQ])
def width(self):
""" Width of the temporal signal
:return: (``int``) length
"""
if self.data_format == SeqLabelExamples.FORMAT_OBJS:
x_at_0 = self.example_list[0][SeqLabelExamples.SEQ]
else:
x_at_0 = self.example_list[SeqLabelExamples.SEQ][0]
return len(x_at_0)
def _trim_batch(self, batch, keys, max_src_len):
for k in keys:
if k == SeqLabelExamples.SEQ_CHAR:
batch[k] = batch[k][:, 0:max_src_len, :]
elif k in SeqLabelExamples.SCALARS:
pass
else:
batch[k] = batch[k][:, :max_src_len]
return batch
def _batch_objs(self, start, batchsz, trim, vec_alloc, vec_shape):
"""Get a batch of data
:param start: (``int``) The step index
:param batchsz: (``int``) The batch size
:param trim: (``bool``) Trim to maximum length in a batch
:param vec_alloc: A vector allocator
:param vec_shape: A vector shape function
:return: batched `x` word vector, `x` character vector, batched `y` vector, `length` vector, `ids`
"""
ex = self.example_list[start]
keys = ex.keys()
batch = {}
for k in keys:
field = ex[k]
if np.isscalar(field):
# print('Expanding field {} to a list'.format(k))
batch[k] = vec_alloc(batchsz, dtype=np.int)
else:
# print('Expanding field {} to a tensor'.format(k))
batch[k] = vec_alloc([batchsz] + list(vec_shape(ex[k])), dtype=np.int)
sz = len(self.example_list)
idx = start * batchsz
max_src_len = 0
for i in range(batchsz):
if idx >= sz:
idx = 0
ex = self.example_list[idx]
for k in keys:
batch[k][i] = ex[k]
# Hack + 0.5
if trim:
max_src_len = max(max_src_len, ex[SeqWordCharTagExamples.SEQ_LEN])
idx += 1
return batch, keys, max_src_len
def batch(self, start, batchsz, trim=False, vec_alloc=np.empty, vec_shape=np.shape):
"""Get a batch of data
:param start: (``int``) The step index
:param batchsz: (``int``) The batch size
:param trim: (``bool``) Trim to maximum length in a batch
:param vec_alloc: A vector allocator, defaults to `numpy.empty`
:param vec_shape: A vector shape function, defaults to `numpy.shape`
:return: batched `x` word vector, `x` character vector, batched `y` vector, `length` vector, `ids`
"""
if self.data_format == SeqLabelExamples.FORMAT_OBJS:
batch, keys, max_src_len = self._batch_objs(start, batchsz, trim, vec_alloc, vec_shape)
else:
keys = self.example_list.keys()
batch = {}
idx = start * batchsz
for k in keys:
vec = self.example_list[k]
if trim:
batch[k] = vec[idx:idx+batchsz, ].copy()
else:
batch[k] = vec[idx:idx+batchsz, ]
# assert batch[k].base is vec
return self._trim_batch(batch, keys, max_src_len) if trim else batch
@exporter
class SeqLabelDataFeed(ExampleDataFeed):
"""Data feed for :class:`SeqLabelExamples`
"""
def __init__(self, examples, batchsz, **kwargs):
super(SeqLabelDataFeed, self).__init__(examples, batchsz, **kwargs)
def _batch(self, i):
"""
Get a batch of data at step `i`
:param i: (``int``) step index
:return: A batch tensor x, batch tensor y
"""
batch = self.examples.batch(i, self.batchsz, trim=self.trim, vec_alloc=self.vec_alloc, vec_shape=self.vec_shape)
if self.src_vec_trans is not None:
batch[SeqLabelExamples.SEQ] = self.src_vec_trans(batch[SeqLabelExamples.SEQ])
return batch
@exporter
class SeqWordCharTagExamples(object):
"""Examples of sequences of words, characters and tags
"""
SEQ_WORD = 'x'
SEQ_CHAR = 'xch'
SEQ_TAG = 'y'
SEQ_LEN = 'lengths'
SEQ_CHAR = 'xch'
SEQ_ID = 'ids'
SCALARS = [SEQ_LEN, SEQ_ID]
def __init__(self, example_list, do_shuffle=True, do_sort=True):
"""Constructor
:param example_list: A list of examples
:param do_shuffle: (``bool``) Shuffle the data? Defaults to `True`
:param do_sort: (``bool``) Sort the data. Defaults to `True`
"""
self.example_list = example_list
if do_shuffle:
random.shuffle(self.example_list)
if do_sort:
self.example_list = sorted(self.example_list, key=lambda x: x[SeqWordCharTagExamples.SEQ_LEN])
def __getitem__(self, i):
"""Get `ith` example in order `SEQ_WORD`, `SEQ_CHAR`, `SEQ_TAG`, `SEQ_LEN`, `SEQ_ID`
:param i: (``int``) index of example
:return: example in order `SEQ_WORD`, `SEQ_CHAR`, `SEQ_TAG`, `SEQ_LEN`, `SEQ_ID`
"""
return self.example_list[i]
def __len__(self):
"""Get the number of examples
:return: (``int``) number of examples
"""
return len(self.example_list)
def batch(self, start, batchsz, trim=False, vec_alloc=np.empty, vec_shape=np.shape):
"""Get a batch of data
:param start: (``int``) The step index
:param batchsz: (``int``) The batch size
:param trim: (``bool``) Trim to maximum length in a batch
:param vec_alloc: A vector allocator, defaults to `numpy.empty`
:param vec_shape: A vector shape function, defaults to `numpy.shape`
:return: batched `x` word vector, `x` character vector, batched `y` vector, `length` vector, `ids`
"""
ex = self.example_list[start]
keys = ex.keys()
siglen, maxw = vec_shape(ex[SeqWordCharTagExamples.SEQ_CHAR])
batch = {}
for k in keys:
if k == SeqWordCharTagExamples.SEQ_CHAR:
batch[k] = vec_alloc((batchsz, siglen, maxw), dtype=np.int)
elif k in SeqWordCharTagExamples.SCALARS:
batch[k] = vec_alloc((batchsz), dtype=np.int)
else:
batch[k] = vec_alloc((batchsz, siglen), dtype=np.int)
sz = len(self.example_list)
idx = start * batchsz
max_src_len = 0
for i in range(batchsz):
if idx >= sz:
idx = 0
ex = self.example_list[idx]
for k in keys:
batch[k][i] = ex[k]
max_src_len = max(max_src_len, ex[SeqWordCharTagExamples.SEQ_LEN])
idx += 1
if trim:
for k in keys:
if k == SeqWordCharTagExamples.SEQ_CHAR:
batch[k] = batch[k][:, 0:max_src_len, :]
elif k in SeqWordCharTagExamples.SCALARS:
pass
else:
batch[k] = batch[k][:0, max_src_len]
return batch
@exporter
class SeqWordCharLabelDataFeed(ExampleDataFeed):
"""Feed object for sequential prediction training data
"""
def __init__(self, examples, batchsz, **kwargs):
super(SeqWordCharLabelDataFeed, self).__init__(examples, batchsz, **kwargs)
def _batch(self, i):
return self.examples.batch(i, self.batchsz, self.trim, self.vec_alloc, self.vec_shape)
@exporter
class Seq2SeqExamples(object):
"""Paired training examples
"""
SRC = 0
TGT = 1
SRC_LEN = 2
TGT_LEN = 3
def __init__(self, example_list, do_shuffle=True, do_sort=True):
"""Constructor
:param example_list: Training pair examples
:param do_shuffle: Shuffle the data (defaults to `True`)
:param do_sort: Sort the data (defaults to `True`)
"""
self.example_list = example_list
if do_shuffle:
random.shuffle(self.example_list)
if do_sort:
self.example_list = sorted(self.example_list, key=lambda x: x[Seq2SeqExamples.SRC_LEN])
def __getitem__(self, i):
"""Get the `ith` example from the training data
:param i: (``int``) integer offset
:return: Example of `SRC`, `TGT`, `SRC_LEN`, `TGT_LEN`
"""
ex = self.example_list[i]
return ex[Seq2SeqExamples.SRC], ex[Seq2SeqExamples.TGT], ex[Seq2SeqExamples.SRC_LEN], ex[Seq2SeqExamples.TGT_LEN]
def __len__(self):
return len(self.example_list)
def batch(self, start, batchsz, trim=False, vec_alloc=np.zeros):
"""Get a batch of data
:param start: (``int``) The step index
:param batchsz: (``int``) The batch size
:param trim: (``bool``) Trim to maximum length in a batch
:param vec_alloc: A vector allocator, defaults to `numpy.empty`
:param vec_shape: A vector shape function, defaults to `numpy.shape`
:return: batched source vector, target vector, source lengths, target lengths
"""
sig_src_len = len(self.example_list[0][Seq2SeqExamples.SRC])
sig_tgt_len = len(self.example_list[0][Seq2SeqExamples.TGT])
srcs = vec_alloc((batchsz, sig_src_len), dtype=np.int)
tgts = vec_alloc((batchsz, sig_tgt_len), dtype=np.int)
src_lens = vec_alloc((batchsz), dtype=np.int)
tgt_lens = vec_alloc((batchsz), dtype=np.int)
sz = len(self.example_list)
max_src_len = 0
max_tgt_len = 0
idx = start * batchsz
for i in range(batchsz):
if idx >= sz: idx = 0
example = self.example_list[idx]
srcs[i] = example[Seq2SeqExamples.SRC]
tgts[i] = example[Seq2SeqExamples.TGT]
src_lens[i] = example[Seq2SeqExamples.SRC_LEN]
tgt_lens[i] = example[Seq2SeqExamples.TGT_LEN]
max_src_len = max(max_src_len, src_lens[i])
max_tgt_len = max(max_tgt_len, tgt_lens[i])
idx += 1
if trim:
srcs = srcs[:, 0:max_src_len]
tgts = tgts[:, 0:max_tgt_len]
return srcs, tgts, src_lens, tgt_lens
@exporter
def reverse_2nd(vec):
"""Do time-reversal on numpy array of form `B x T`
:param vec: vector to time-reverse
:return: Time-reversed vector
"""
return vec[:, ::-1]
@exporter
class Seq2SeqDataFeed(ExampleDataFeed):
"""Data feed of paired examples
"""
def __init__(self, examples, batchsz, **kwargs):
super(Seq2SeqDataFeed, self).__init__(examples, batchsz, **kwargs)
def _batch(self, i):
src, tgt, src_len, tgt_len = self.examples.batch(i, self.batchsz, self.trim, self.vec_alloc)
if self.src_vec_trans is not None:
src = self.src_vec_trans(src)
return {'src': src, 'dst': tgt, 'src_len': src_len, 'dst_len': tgt_len}
# This one is a little different at the moment
@exporter
class SeqWordCharDataFeed(DataFeed):
"""Data feed to return language modeling training data
"""
def __init__(self, x, xch, nbptt, batchsz, maxw):
"""Constructor
:param x: word tensor
:param xch: character tensor
:param nbptt: Number of steps of BPTT
:param batchsz: Batch size
:param maxw: The maximum word length
"""
super(SeqWordCharDataFeed, self).__init__()
num_examples = x.shape[0]
rest = num_examples // batchsz
self.steps = rest // nbptt
#if num_examples is divisible by batchsz * nbptt (equivalent to rest is divisible by nbptt), we #have a problem. reduce rest in that case.
if rest % nbptt == 0:
rest = rest-1
self.stride_ch = nbptt * maxw
trunc = batchsz * rest
print('Truncating from %d to %d' % (num_examples, trunc))
self.x = x[:trunc].reshape((batchsz, rest))
xch = xch.flatten()
trunc = batchsz * rest * maxw
print('Truncated from %d to %d' % (xch.shape[0], trunc))
self.xch = xch[:trunc].reshape((batchsz, rest * maxw))
self.nbptt = nbptt
self.batchsz = batchsz
self.wsz = maxw
def _batch(self, i):
return {
'x': self.x[:, i*self.nbptt:(i+1)*self.nbptt].reshape((self.batchsz, self.nbptt)),
'xch': self.xch[:, i*self.stride_ch:(i+1)*self.stride_ch].reshape((self.batchsz, self.nbptt, self.wsz)),
'y': self.x[:, i*self.nbptt+1:(i+1)*self.nbptt+1].reshape((self.batchsz, self.nbptt))
}
|
import numpy as np
from aerosandbox import ExplicitAnalysis
from aerosandbox.geometry import *
from aerosandbox.performance import OperatingPoint
from aerosandbox.aerodynamics.aero_3D.singularities.uniform_strength_horseshoe_singularities import \
calculate_induced_velocity_horseshoe
from typing import Dict, Any
### Define some helper functions that take a vector and make it a Nx1 or 1xN, respectively.
# Useful for broadcasting with matrices later.
def tall(array):
return np.reshape(array, (-1, 1))
def wide(array):
return np.reshape(array, (1, -1))
class VortexLatticeMethod(ExplicitAnalysis):
"""
An explicit (linear) vortex-lattice-method aerodynamics analysis.
Usage example:
>>> analysis = asb.VortexLatticeMethod(
>>> airplane=my_airplane,
>>> op_point=asb.OperatingPoint(
>>> velocity=100, # m/s
>>> alpha=5, # deg
>>> beta=4, # deg
>>> p=0.01, # rad/sec
>>> q=0.02, # rad/sec
>>> r=0.03, # rad/sec
>>> )
>>> )
>>> aero_data = analysis.run()
>>> analysis.draw()
"""
def __init__(self,
airplane: Airplane,
op_point: OperatingPoint,
run_symmetric_if_possible: bool = False,
verbose: bool = False,
spanwise_resolution: int = 10,
spanwise_spacing: str = "cosine",
chordwise_resolution: int = 10,
chordwise_spacing: str = "cosine",
vortex_core_radius: float = 1e-8,
align_trailing_vortices_with_wind: bool = False,
):
super().__init__()
self.airplane = airplane
self.op_point = op_point
self.verbose = verbose
self.spanwise_resolution = spanwise_resolution
self.spanwise_spacing = spanwise_spacing
self.chordwise_resolution = chordwise_resolution
self.chordwise_spacing = chordwise_spacing
self.vortex_core_radius = vortex_core_radius
self.align_trailing_vortices_with_wind = align_trailing_vortices_with_wind
### Determine whether you should run the problem as symmetric
self.run_symmetric = False
if run_symmetric_if_possible:
raise NotImplementedError("VLM with symmetry detection not yet implemented!")
# try:
# self.run_symmetric = ( # Satisfies assumptions
# self.op_point.beta == 0 and
# self.op_point.p == 0 and
# self.op_point.r == 0 and
# self.airplane.is_entirely_symmetric()
# )
# except RuntimeError: # Required because beta, p, r, etc. may be non-numeric (e.g. opti variables)
# pass
def run(self) -> Dict[str, Any]:
if self.verbose:
print("Meshing...")
##### Make Panels
front_left_vertices = []
back_left_vertices = []
back_right_vertices = []
front_right_vertices = []
is_trailing_edge = []
for wing in self.airplane.wings:
points, faces = wing.mesh_thin_surface(
method="quad",
chordwise_resolution=self.chordwise_resolution,
chordwise_spacing=self.chordwise_spacing,
spanwise_resolution=self.spanwise_resolution,
spanwise_spacing=self.spanwise_spacing,
add_camber=True
)
front_left_vertices.append(points[faces[:, 0], :])
back_left_vertices.append(points[faces[:, 1], :])
back_right_vertices.append(points[faces[:, 2], :])
front_right_vertices.append(points[faces[:, 3], :])
is_trailing_edge.append(
(np.arange(len(faces)) + 1) % self.chordwise_resolution == 0
)
front_left_vertices = np.concatenate(front_left_vertices)
back_left_vertices = np.concatenate(back_left_vertices)
back_right_vertices = np.concatenate(back_right_vertices)
front_right_vertices = np.concatenate(front_right_vertices)
is_trailing_edge = np.concatenate(is_trailing_edge)
### Compute panel statistics
diag1 = front_right_vertices - back_left_vertices
diag2 = front_left_vertices - back_right_vertices
cross = np.cross(diag1, diag2)
cross_norm = np.linalg.norm(cross, axis=1)
normal_directions = cross / tall(cross_norm)
areas = cross_norm / 2
# Compute the location of points of interest on each panel
left_vortex_vertices = 0.75 * front_left_vertices + 0.25 * back_left_vertices
right_vortex_vertices = 0.75 * front_right_vertices + 0.25 * back_right_vertices
vortex_centers = (left_vortex_vertices + right_vortex_vertices) / 2
vortex_bound_leg = right_vortex_vertices - left_vortex_vertices
collocation_points = (
0.5 * (0.25 * front_left_vertices + 0.75 * back_left_vertices) +
0.5 * (0.25 * front_right_vertices + 0.75 * back_right_vertices)
)
### Save things to the instance for later access
self.front_left_vertices = front_left_vertices
self.back_left_vertices = back_left_vertices
self.back_right_vertices = back_right_vertices
self.front_right_vertices = front_right_vertices
self.is_trailing_edge = is_trailing_edge
self.normal_directions = normal_directions
self.areas = areas
self.left_vortex_vertices = left_vortex_vertices
self.right_vortex_vertices = right_vortex_vertices
self.vortex_centers = vortex_centers
self.vortex_bound_leg = vortex_bound_leg
self.collocation_points = collocation_points
##### Setup Operating Point
if self.verbose:
print("Calculating the freestream influence...")
steady_freestream_velocity = self.op_point.compute_freestream_velocity_geometry_axes() # Direction the wind is GOING TO, in geometry axes coordinates
steady_freestream_direction = steady_freestream_velocity / np.linalg.norm(steady_freestream_velocity)
rotation_freestream_velocities = self.op_point.compute_rotation_velocity_geometry_axes(
collocation_points)
freestream_velocities = np.add(wide(steady_freestream_velocity), rotation_freestream_velocities)
# Nx3, represents the freestream velocity at each panel collocation point (c)
freestream_influences = np.sum(freestream_velocities * normal_directions, axis=1)
### Save things to the instance for later access
self.steady_freestream_velocity = steady_freestream_velocity
self.steady_freestream_direction = steady_freestream_direction
self.freestream_velocities = freestream_velocities
##### Setup Geometry
### Calculate AIC matrix
if self.verbose:
print("Calculating the collocation influence matrix...")
u_collocations_unit, v_collocations_unit, w_collocations_unit = calculate_induced_velocity_horseshoe(
x_field=tall(collocation_points[:, 0]),
y_field=tall(collocation_points[:, 1]),
z_field=tall(collocation_points[:, 2]),
x_left=wide(left_vortex_vertices[:, 0]),
y_left=wide(left_vortex_vertices[:, 1]),
z_left=wide(left_vortex_vertices[:, 2]),
x_right=wide(right_vortex_vertices[:, 0]),
y_right=wide(right_vortex_vertices[:, 1]),
z_right=wide(right_vortex_vertices[:, 2]),
trailing_vortex_direction=steady_freestream_direction if self.align_trailing_vortices_with_wind else np.array([1, 0, 0]),
gamma=1,
vortex_core_radius=self.vortex_core_radius
)
AIC = (
u_collocations_unit * tall(normal_directions[:, 0]) +
v_collocations_unit * tall(normal_directions[:, 1]) +
w_collocations_unit * tall(normal_directions[:, 2])
)
##### Calculate Vortex Strengths
if self.verbose:
print("Calculating vortex strengths...")
self.vortex_strengths = np.linalg.solve(AIC, -freestream_influences)
##### Calculate forces
### Calculate Near-Field Forces and Moments
# Governing Equation: The force on a straight, small vortex filament is F = rho * cross(V, l) * gamma,
# where rho is density, V is the velocity vector, cross() is the cross product operator,
# l is the vector of the filament itself, and gamma is the circulation.
if self.verbose:
print("Calculating forces on each panel...")
# Calculate the induced velocity at the center of each bound leg
V_centers = self.get_velocity_at_points(vortex_centers)
# Calculate forces_inviscid_geometry, the force on the ith panel. Note that this is in GEOMETRY AXES,
# not WIND AXES or BODY AXES.
Vi_cross_li = np.cross(V_centers, vortex_bound_leg, axis=1)
forces_geometry = self.op_point.atmosphere.density() * Vi_cross_li * tall(self.vortex_strengths)
moments_geometry = np.cross(
np.add(vortex_centers, -wide(self.airplane.xyz_ref)),
forces_geometry
)
# Calculate total forces and moments
force_geometry = np.sum(forces_geometry, axis=0)
moment_geometry = np.sum(moments_geometry, axis=0)
force_wind = self.op_point.convert_axes(
force_geometry[0], force_geometry[1], force_geometry[2],
from_axes="geometry",
to_axes="wind"
)
moment_wind = self.op_point.convert_axes(
moment_geometry[0], moment_geometry[1], moment_geometry[2],
from_axes="geometry",
to_axes="wind"
)
### Save things to the instance for later access
self.forces_geometry = forces_geometry
self.moments_geometry = moments_geometry
self.force_geometry = force_geometry
self.force_wind = force_wind
self.moment_geometry = moment_geometry
self.moment_wind = moment_wind
# Calculate dimensional forces
L = -force_wind[2]
D = -force_wind[0]
Y = force_wind[1]
l = moment_wind[0] # TODO review axes
m = moment_wind[1]
n = moment_wind[2]
# Calculate nondimensional forces
q = self.op_point.dynamic_pressure()
s_ref = self.airplane.s_ref
b_ref = self.airplane.b_ref
c_ref = self.airplane.c_ref
CL = L / q / s_ref
CD = D / q / s_ref
CY = Y / q / s_ref
Cl = l / q / s_ref / b_ref
Cm = m / q / s_ref / c_ref
Cn = n / q / s_ref / b_ref
return {
"L" : L,
"D" : D,
"Y" : Y,
"l" : l,
"m" : m,
"n" : n,
"CL" : CL,
"CD" : CD,
"CY" : CY,
"Cl" : Cl,
"Cm" : Cm,
"Cn" : Cn,
"F_g": force_geometry,
"F_w": force_wind,
"M_g": moment_geometry,
"M_w": moment_wind
}
def get_induced_velocity_at_points(self, points: np.ndarray) -> np.ndarray:
"""
Computes the induced velocity at a set of points in the flowfield.
Args:
points: A Nx3 array of points that you would like to know the induced velocities at. Given in geometry axes.
Returns: A Nx3 of the induced velocity at those points. Given in geometry axes.
"""
u_induced, v_induced, w_induced = calculate_induced_velocity_horseshoe(
x_field=tall(points[:, 0]),
y_field=tall(points[:, 1]),
z_field=tall(points[:, 2]),
x_left=wide(self.left_vortex_vertices[:, 0]),
y_left=wide(self.left_vortex_vertices[:, 1]),
z_left=wide(self.left_vortex_vertices[:, 2]),
x_right=wide(self.right_vortex_vertices[:, 0]),
y_right=wide(self.right_vortex_vertices[:, 1]),
z_right=wide(self.right_vortex_vertices[:, 2]),
trailing_vortex_direction=self.steady_freestream_direction if self.align_trailing_vortices_with_wind else np.array([1, 0, 0]),
gamma=wide(self.vortex_strengths),
vortex_core_radius=self.vortex_core_radius
)
u_induced = np.sum(u_induced, axis=1)
v_induced = np.sum(v_induced, axis=1)
w_induced = np.sum(w_induced, axis=1)
V_induced = np.stack([
u_induced, v_induced, w_induced
], axis=1)
return V_induced
def get_velocity_at_points(self, points: np.ndarray) -> np.ndarray:
"""
Computes the velocity at a set of points in the flowfield.
Args:
points: A Nx3 array of points that you would like to know the velocities at. Given in geometry axes.
Returns: A Nx3 of the velocity at those points. Given in geometry axes.
"""
V_induced = self.get_induced_velocity_at_points(points)
rotation_freestream_velocities = self.op_point.compute_rotation_velocity_geometry_axes(
points
)
freestream_velocities = np.add(wide(self.steady_freestream_velocity), rotation_freestream_velocities)
V = V_induced + freestream_velocities
return V
def calculate_streamlines(self,
seed_points: np.ndarray = None,
n_steps: int = 300,
length: float = None
) -> np.ndarray:
"""
Computes streamlines, starting at specific seed points.
After running this function, a new instance variable `VortexLatticeFilaments.streamlines` is computed
Uses simple forward-Euler integration with a fixed spatial stepsize (i.e., velocity vectors are normalized
before ODE integration). After investigation, it's not worth doing fancier ODE integration methods (adaptive
schemes, RK substepping, etc.), due to the near-singular conditions near vortex filaments.
Args:
seed_points: A Nx3 ndarray that contains a list of points where streamlines are started. Will be
auto-calculated if not specified.
n_steps: The number of individual streamline steps to trace. Minimum of 2.
length: The approximate total length of the streamlines desired, in meters. Will be auto-calculated if
not specified.
Returns:
streamlines: a 3D array with dimensions: (n_seed_points) x (3) x (n_steps).
Consists of streamlines data.
Result is also saved as an instance variable, VortexLatticeMethod.streamlines.
"""
if self.verbose:
print("Calculating streamlines...")
if length is None:
length = self.airplane.c_ref * 5
if seed_points is None:
left_TE_vertices = self.back_left_vertices[self.is_trailing_edge.astype(bool)]
right_TE_vertices = self.back_right_vertices[self.is_trailing_edge.astype(bool)]
N_streamlines_target = 200
seed_points_per_panel = np.maximum(1, N_streamlines_target // len(left_TE_vertices))
nondim_node_locations = np.linspace(0, 1, seed_points_per_panel + 1)
nondim_seed_locations = (nondim_node_locations[1:] + nondim_node_locations[:-1]) / 2
seed_points = np.concatenate([
x * left_TE_vertices + (1 - x) * right_TE_vertices
for x in nondim_seed_locations
])
streamlines = np.empty((len(seed_points), 3, n_steps))
streamlines[:, :, 0] = seed_points
for i in range(1, n_steps):
V = self.get_velocity_at_points(streamlines[:, :, i - 1])
streamlines[:, :, i] = (
streamlines[:, :, i - 1] +
length / n_steps * V / tall(np.linalg.norm(V, axis=1))
)
self.streamlines = streamlines
if self.verbose:
print("Streamlines calculated.")
return streamlines
def draw(self,
c: np.ndarray = None,
cmap: str = None,
colorbar_label: str = None,
show: bool = True,
show_kwargs: Dict = None,
draw_streamlines=True,
recalculate_streamlines=False,
backend: str = "pyvista"
):
"""
Draws the solution. Note: Must be called on a SOLVED AeroProblem object.
To solve an AeroProblem, use opti.solve(). To substitute a solved solution, use ap = ap.substitute_solution(sol).
:return:
"""
if show_kwargs is None:
show_kwargs = {}
if c is None:
c = self.vortex_strengths
colorbar_label = "Vortex Strengths"
if draw_streamlines:
if (not hasattr(self, 'streamlines')) or recalculate_streamlines:
self.calculate_streamlines()
if backend == "plotly":
from aerosandbox.visualization.plotly_Figure3D import Figure3D
fig = Figure3D()
for i in range(len(self.front_left_vertices)):
fig.add_quad(
points=[
self.front_left_vertices[i, :],
self.back_left_vertices[i, :],
self.back_right_vertices[i, :],
self.front_right_vertices[i, :],
],
intensity=c[i],
outline=True,
)
if draw_streamlines:
for i in range(self.streamlines.shape[0]):
fig.add_streamline(self.streamlines[i, :, :].T)
return fig.draw(
show=show,
colorbar_title=colorbar_label,
**show_kwargs,
)
elif backend == "pyvista":
import pyvista as pv
plotter = pv.Plotter()
plotter.title = "ASB VortexLatticeMethod"
plotter.add_axes()
plotter.show_grid(color='gray')
### Draw the airplane mesh
points = np.concatenate([
self.front_left_vertices,
self.back_left_vertices,
self.back_right_vertices,
self.front_right_vertices
])
N = len(self.front_left_vertices)
range_N = np.arange(N)
faces = tall(range_N) + wide(np.array([0, 1, 2, 3]) * N)
mesh = pv.PolyData(
*mesh_utils.convert_mesh_to_polydata_format(points, faces)
)
scalar_bar_args = {}
if colorbar_label is not None:
scalar_bar_args["title"] = colorbar_label
plotter.add_mesh(
mesh=mesh,
scalars=c,
show_edges=True,
show_scalar_bar=c is not None,
scalar_bar_args=scalar_bar_args,
cmap=cmap,
)
### Draw the streamlines
if draw_streamlines:
import aerosandbox.tools.pretty_plots as p
for i in range(self.streamlines.shape[0]):
plotter.add_mesh(
pv.Spline(self.streamlines[i, :, :].T),
color=p.adjust_lightness("#7700FF", 1.5),
opacity=0.7,
line_width=1
)
if show:
plotter.show(**show_kwargs)
return plotter
else:
raise ValueError("Bad value of `backend`!")
if __name__ == '__main__':
### Import Vanilla Airplane
import aerosandbox as asb
from pathlib import Path
geometry_folder = Path(asb.__file__).parent.parent / "tutorial" / "04 - Geometry" / "example_geometry"
import sys
sys.path.insert(0, str(geometry_folder))
from vanilla import airplane as vanilla
### Do the AVL run
analysis = VortexLatticeMethod(
airplane=vanilla,
op_point=asb.OperatingPoint(
atmosphere=asb.Atmosphere(altitude=0),
velocity=10,
alpha=0,
beta=0,
p=0,
q=0,
r=0,
),
spanwise_resolution=12,
chordwise_resolution=12,
)
res = analysis.run()
for k, v in res.items():
print(f"{str(k).rjust(10)} : {v:.4f}")
|
#%% pytorch_tools
import torch
import torch.nn as nn
from tools.basics import product, bundle
from tools.record_keeper import RecordKeeper
#%% pytorch_tools
default_seed = 1
torch.manual_seed(default_seed)
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# returns list of tensor sizes
def layer_output_shapes(network, input_shape=None):
# convert OrderedDict's to just lists
from collections import OrderedDict
if isinstance(network, OrderedDict):
network = list(network.values())
# convert lists to sequences
if isinstance(network, list):
network = nn.Sequential(*network)
# run a forward pass to figure it out
neuron_activations = torch.ones((1, *input_shape))
sizes = []
for layer in network:
# if its not a loss function
if not isinstance(layer, torch.nn.modules.loss._Loss):
neuron_activations = layer(neuron_activations)
sizes.append(neuron_activations.size())
return sizes
def read_image(file_path):
from PIL import Image
import torchvision.transforms.functional as TF
image = Image.open(file_path)
return TF.to_tensor(image)
def tensor_to_image(tensor):
import torchvision.transforms.functional as TF
return TF.to_pil_image(tensor)
def to_tensor(an_object):
from tools.basics import is_iterable
# if already a tensor, just return
if isinstance(an_object, torch.Tensor):
return an_object
# if scalar, wrap it with a tensor
if not is_iterable(an_object):
return torch.tensor(an_object)
else:
as_list = tuple([ each for each in an_object ])
# # check for all-scalar container
# is_all_scalar = True
# for each in as_list:
# if is_iterable(each):
# is_all_scalar = False
# break
# if is_all_scalar:
# return torch.tensor(as_list)
size_mismatch = False
biggest_number_of_dimensions = 0
non_one_dimensions = None
converted_data = []
# check the shapes of everything
for each in as_list:
tensor = to_tensor(each)
converted_data.append(tensor)
skipping = True
each_non_one_dimensions = []
for index, each_dimension in enumerate(tensor.shape):
# keep track of number of dimensions
if index+1 > biggest_number_of_dimensions:
biggest_number_of_dimensions += 1
if each_dimension != 1:
skipping = False
if skipping and each_dimension == 1:
continue
else:
each_non_one_dimensions.append(each_dimension)
# if uninitilized
if non_one_dimensions is None:
non_one_dimensions = list(each_non_one_dimensions)
# if dimension already exists
else:
# make sure its the correct shape
if non_one_dimensions != each_non_one_dimensions:
size_mismatch = True
break
if size_mismatch:
sizes = "\n".join([ f" {tuple(to_tensor(each).shape)}" for each in as_list])
raise Exception(f'When converting an object to a torch tensor, there was an issue with the shapes not being uniform. All shapes need to be the same, but instead the shapes were:\n {sizes}')
# make all the sizes the same by filling in the dimensions with a size of one
reshaped_list = []
for each in converted_data:
shape = tuple(each.shape)
number_of_dimensions = len(shape)
number_of_missing_dimensions = biggest_number_of_dimensions - number_of_dimensions
missing_dimensions_tuple = (1,)*number_of_missing_dimensions
reshaped_list.append(torch.reshape(each, (*missing_dimensions_tuple, *shape)))
return torch.stack(reshaped_list).type(torch.float)
class OneHotifier():
def __init__(self, possible_values):
# convert to tuple if needed
if not hasattr(possible_values, "__len__"):
possible_values = tuple(possible_values)
self.possible_values = possible_values
def to_onehot(self, value):
index = self.possible_values.index(value)
return torch.nn.functional.one_hot(
torch.tensor(index),
len(self.possible_values)
)
def from_onehot(self, vector):
vector = to_tensor(vector)
index_value = vector.max(0).indices
return self.possible_values[index_value]
def onehot_argmax(tensor):
tensor = to_tensor(tensor)
the_max = max(each for each in tensor)
onehot_tensor = torch.zeros_like(tensor)
for each_index, each_value in enumerate(tensor):
if each_value == the_max:
onehot_tensor[each_index] = 1
return onehot_tensor
def from_onehot_batch(tensor_batch):
device = None
if isinstance(tensor_batch, torch.Tensor):
device = tensor_batch.device
# make sure its a tensor
tensor_batch = to_tensor(tensor_batch)
output = tensor_batch.max(1, keepdim=True).indices.squeeze()
# send to same device
return output.to(device) if device else output
def from_onehot(tensor):
# make sure its a tensor
tensor = to_tensor(tensor)
return tensor.max(0, keepdim=True).indices.squeeze().item()
def batch_input_and_output(inputs, outputs, batch_size):
from tools.basics import bundle
batches = zip(bundle(inputs, batch_size), bundle(outputs, batch_size))
for each_input_batch, each_output_batch in batches:
yield to_tensor(each_input_batch), to_tensor(each_output_batch)
def unnormalize(mean, std, image):
import torchvision.transforms as transforms
normalizer = transforms.Normalize((-mean / std), (1.0 / std))
return normalizer(image)
from simple_namespace import namespace
@namespace
def Network():
def default_forward(self, input_data):
"""
Uses:
self.device
self.input_shape
self.output_shape
Arguments:
input_data:
either an input image or batch of images
should be a torch tensor with a shape of (batch_size, channels, height, width)
Ouptut:
a torch tensor the shape of the latent space
Examples:
obj.forward(torch.tensor([
# first image in batch
[
# red layer
[
[ 1, 2, 3 ],
[ 4, 5, 6]
],
# blue layer
[
[ 1, 2, 3 ],
[ 4, 5, 6]
],
# green layer
[
[ 1, 2, 3 ],
[ 4, 5, 6]
],
]
]))
"""
# converts to torch if needed
input_data = to_tensor(input_data).type(torch.float).to(self.device)
#
# batch or not?
#
is_a_batch = len(input_data.shape) > len(self.input_shape)
if not is_a_batch:
batch_size = 1
# convert images into batches
input_data = torch.reshape(input_data, (1, *input_data.shape))
output_shape = self.output_shape
else:
batch_size = tuple(input_data.shape)[0]
output_shape = (batch_size, *self.output_shape)
#
# forward pass
#
neuron_activations = input_data
for each_layer in self.children():
# if its not a loss function
if not isinstance(each_layer, torch.nn.modules.loss._Loss):
neuron_activations = each_layer(neuron_activations)
# force the output to be the correct shape
return torch.reshape(neuron_activations, output_shape)
def default_setup(self, config):
# check for pytorch lightning
try:
import pytorch_lightning as pl
LightningModule = pl.LightningModule
Trainer = pl.Trainer
except Exception as error:
LightningModule = None
Trainer = None
self.setup_config = config
self.seed = config.get("seed" , default_seed)
self.suppress_output = config.get("suppress_output", False)
self.log_interval = config.get("log_interval" , 10)
self.hardware = config.get("device" , torch.device("cuda" if torch.cuda.is_available() else "cpu"))
self.record_keeper = config.get("record_keeper" , RecordKeeper()).sub_record_keeper(model=self.__class__.__name__)
self.show = lambda *args, **kwargs: print(*args, **kwargs) if not self.suppress_output else None
self.to(self.hardware)
if not isinstance(self, LightningModule):
self.device = self.hardware
else:
self._is_lightning_module = True
self.new_trainer = lambda *args, **kwargs: Trainer(*args, **{
# default values
**({
"gpus": torch.cuda.device_count(),
"auto_select_gpus": True,
} if torch.cuda.device_count() > 0 else {}),
**kwargs,
})
def default_update_record_keepers(self):
self.setup_config
def default_update_weights(self, batch_of_inputs, batch_of_ideal_outputs, epoch_index, batch_index):
"""
Uses:
self.optimizer # pytorch optimizer class
self.forward(batch_of_inputs)
self.loss_function(batch_of_actual_outputs, batch_of_ideal_outputs)
"""
self.optimizer.zero_grad()
batch_of_actual_outputs = self.forward(batch_of_inputs)
loss = self.loss_function(batch_of_actual_outputs, batch_of_ideal_outputs)
loss.backward()
self.optimizer.step()
return loss
def onehot_correctness_function(self, model_batch_output, ideal_batch_output):
"""
Summary:
This assumes both the output of the network and the output of the dataset
are one-hot encoded.
"""
# convert to a batch of real-numbered outputs
model_batch_output = from_onehot_batch(model_batch_output)
ideal_batch_output = from_onehot_batch(ideal_batch_output)
# element-wise compare how many are equal, then sum them up into a scalar
model_batch_output = model_batch_output.to(self.hardware)
ideal_batch_output = ideal_batch_output.to(self.hardware)
number_correct = model_batch_output.eq(ideal_batch_output).sum().item()
return number_correct
def default_fit(self, *, input_output_pairs=None, dataset=None, loader=None, batch_size=64, shuffle=True, **kwargs):
"""
Uses:
self.update_weights(batch_of_inputs, batch_of_ideal_outputs, epoch_index, batch_index)
self.show(args)
self.train() # provided by pytorch's `nn.Module`
Examples:
model.fit(
dataset=torchvision.datasets.MNIST(<mnist args>),
epochs=4,
batch_size=64,
)
model.fit(
loader=torch.utils.data.DataLoader(<dataloader args>),
epochs=4,
)
"""
# TODO: test input_output_pairs
if input_output_pairs is not None:
# creates batches
def bundle(iterable, bundle_size):
next_bundle = []
for each in iterable:
next_bundle.append(each)
if len(next_bundle) == bundle_size:
yield tuple(next_bundle)
next_bundle = []
# return any half-made bundles
if len(next_bundle) > 0:
yield tuple(next_bundle)
# unpair, batch, then re-pair the inputs and outputs
input_generator = (each for each, _ in input_output_pairs)
ideal_output_generator = (each for _ , each in input_output_pairs)
seperated_batches = zip(bundle(input_generator, batch_size), bundle(ideal_output_generator, batch_size))
loader = ((to_tensor(each_input_batch), to_tensor(each_output_batch)) for each_input_batch, each_output_batch in seperated_batches)
# NOTE: shuffling isn't possible when there is no length (and generators don't have lengths). So maybe think of an alternative
else:
# convert the dataset into a loader (assumming loader was not given)
if isinstance(dataset, torch.utils.data.Dataset):
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
)
if hasattr(self, "_is_lightning_module"):
self.prev_trainer = self.new_trainer(**kwargs)
output = self.prev_trainer.fit(self, loader)
# go back to the hardware to unto the changes made by pytorch lightning
self.to(self.hardware)
return output
else:
train_losses = []
for epoch_index in range(kwargs.get("max_epochs", 1)):
self.train()
for batch_index, (batch_of_inputs, batch_of_ideal_outputs) in enumerate(loader):
loss = self.update_weights(batch_of_inputs, batch_of_ideal_outputs, epoch_index, batch_index)
from tools.basics import to_pure
if batch_index % self.log_interval == 0:
count = batch_index * len(batch_of_inputs)
total = len(loader.dataset)
pure_loss = to_pure(loss)
self.show(f"\r[Train]: epoch: {epoch_index:>4}, batch: {count:>10}/{total}", sep='', end='', flush=True)
train_losses.append(loss)
# TODO: add/allow checkpoints
self.show()
return train_losses
def default_test(self, loader, correctness_function=None, loss_function=None):
"""
Uses:
self.forward(batch_of_inputs)
self.show(args)
self.eval() # provided by pytorch's `nn.Module`
self.hardware # a pytorch device
Optionally Uses:
# returns the pytorch loss
self.loss_function(batch_of_inputs, batch_of_ideal_outputs)
# returns a number (number of correct guesses)
self.correctness_function(batch_of_inputs, batch_of_ideal_outputs)
"""
correctness_function = correctness_function or self.correctness_function
loss_function = loss_function or self.loss_function
self.eval()
test_loss_accumulator = 0
correct_count = 0
with torch.no_grad():
for batch_of_inputs, batch_of_ideal_outputs in loader:
actual_output = self.forward(batch_of_inputs)
actual_output = actual_output.type(torch.float).to(self.device)
batch_of_ideal_outputs = batch_of_ideal_outputs.type(torch.float).to(self.device)
test_loss_accumulator += loss_function(actual_output, batch_of_ideal_outputs)
correct_count += correctness_function(actual_output, batch_of_ideal_outputs)
# convert to regular non-tensor data
from tools.basics import to_pure
sample_count = len(loader.dataset)
accuracy = correct_count / len(loader.dataset)
average_loss = to_pure(test_loss_accumulator) / sample_count
if hasattr(self, "record_keeper"):
self.record_keeper.commit_record(additional_info=dict(
testing=True,
average_loss=average_loss,
accuracy=correct_count / sample_count,
correct=correct_count,
))
self.show(f"[Test]: average_loss: {average_loss:>9.4f}, accuracy: {accuracy:>4.2f}, {correct_count}/{sample_count:.0f}")
return correct_count
return locals()
_image_log_count = 0
def log_image(image_tensor):
global _image_log_count
import torchvision.transforms.functional as F
import os
_image_log_count += 1
os.makedirs("./logs.do_not_sync", exist_ok=True)
image_path = f"./logs.do_not_sync/display_{_image_log_count}.png"
F.to_pil_image(image_tensor).save(image_path)
print("image logged: "+image_path)
#%% |
<reponame>unt-libraries/codalib
import pytest
from codalib import anvl
class Test_readANVLString(object):
def test_with_empty_string(self):
"""
Check that readANVLString can handle an empty string.
"""
actual = anvl.readANVLString('')
expected = {}
assert actual == expected
def test_with_simple_anvl_string(self):
"""
Check that readANVLString parses single key value.
"""
actual = anvl.readANVLString('key:value')
expected = {'key': 'value'}
assert actual == expected
def test_strips_left_whitespace_of_value(self):
"""
Check that readANVLString only strips whitespace on the left side
of the value.
"""
actual = anvl.readANVLString('key: value ')
expected = {'key': 'value '}
assert actual == expected
def test_string_without_colon(self):
"""
Verifies malformed ANVL records raise anvl.InvalidANVLRecord
exception.
"""
with pytest.raises(anvl.InvalidANVLRecord):
anvl.readANVLString('foo bar baz qux')
def test_with_comment(self):
"""
Verify readANVLString can handle a comment with the `#` char
in the first column of a line.
"""
anvl_string = (
"""# this is comment\n
key1: value1\n
key2: value2\n
"""
)
actual = anvl.readANVLString(anvl_string)
expected = {'key1': 'value1', 'key2': 'value2'}
assert actual == expected
@pytest.mark.xfail
def test_with_comment_char_not_in_first_column(self):
"""
readANVLString should be able to pickup a comment char even
if whitespace precedes it. This test verifies the function's
behavior, but it has been marked as an expected failure because
it is not the desired functionality.
"""
anvl_string = '\t# this is comment\n'
with pytest.raises(IndexError):
anvl.readANVLString(anvl_string)
def test_with_empty_lines(self):
"""
Check that readANVLString can handle empty lines.
"""
anvl_string = ('key1: value1\n'
'\n'
'key2: value2\n')
actual = anvl.readANVLString(anvl_string)
expected = {'key1': 'value1', 'key2': 'value2'}
assert actual == expected
def test_captures_buffered_content(self):
"""
Verify that readANVLString parses values that wrap to the next
line.
"""
anvl_string = ('key: Buffered\n'
' Content\n')
actual = anvl.readANVLString(anvl_string)
expected = {'key': 'Buffered Content'}
assert actual == expected
class Test_breakString(object):
def test_breakString_breaks_line(self):
"""
Verify breakString inserts a line break at the correct index.
"""
string = 'This string should have newline after the word "newline"'
output = anvl.breakString(string, width=31)
assert '\n' == output[31]
def test_breakString_offset(self):
"""
Check breakString with the width and firstLineOffset kwargs.
"""
string = 'This is the first line this is the second line'
output = anvl.breakString(string, width=22, firstLineOffset=10)
lines = output.split('\n')
assert len(lines) == 3
assert len(lines[0]) == 11
assert len(lines[1]) == 19
class Test_writeANVLString(object):
def test_output_is_valid_ANVL(self):
"""
Verify the output of writeANVLString is ANVL.
"""
input_dict = dict(foo='bar', baz='qux')
actual = anvl.writeANVLString(input_dict)
expected = 'baz: qux\nfoo: bar'
assert actual == expected
def test_with_empty_dict(self):
"""
Verify writeANVLString with empty input.
"""
input_dict = dict()
output = anvl.writeANVLString(input_dict)
assert output == ''
|
import datetime
import os
from api.domain import sensor
from api.domain.order import Order
from api.domain.scene import Scene
from api.domain.user import User
from api.util.dbconnect import db_instance
from api.util import julian_date_check
from api.providers.ordering import ProviderInterfaceV0
from api import OpenSceneLimitException
from api.providers.configuration.configuration_provider import ConfigurationProvider
from api.providers.caching.caching_provider import CachingProvider
# ----------------------------------------------------------------------------------
from api.system.logger import ilogger as logger # TODO: is this the best place for these?
import copy
import yaml
cache = CachingProvider()
config = ConfigurationProvider()
from api import __location__
class OrderingProviderException(Exception):
pass
class OrderingProvider(ProviderInterfaceV0):
@staticmethod
def sensor_products(product_id):
# coming from uwsgi, product_id is unicode
if isinstance(product_id, basestring):
prod_list = product_id.split(",")
else:
prod_list = product_id
return sensor.available_products(prod_list)
def available_products(self, product_id, username):
"""
Check to see what products are available to user based on
an input list of scenes
:param product_id: list of desired inputs
:param username: username
:return: dictionary
"""
user = User.by_username(username)
pub_prods = copy.deepcopy(OrderingProvider.sensor_products(product_id))
with open(os.path.join(__location__, 'domain/restricted.yaml')) as f:
restricted = yaml.safe_load(f.read())
role = False if user.is_staff() else True
restrict_all = restricted.get('all', {})
all_role = restrict_all.get('role', [])
all_by_date = restrict_all.get('by_date', {})
all_ordering_rsctd = restrict_all.get('ordering', [])
upd = {'date_restricted': {}, 'ordering_restricted': {}, 'not_implemented': []}
for sensor_type, prods in pub_prods.items():
if sensor_type == 'not_implemented':
continue
stype = sensor_type.replace('_collection', '') if '_collection' in sensor_type else sensor_type
sensor_restr = restricted.get(stype, {})
role_restr = sensor_restr.get('role', []) + all_role
by_date_restr = sensor_restr.get('by_date', {})
# All overrides any sensor related dates
by_date_restr.update(all_by_date)
outs = pub_prods[sensor_type]['products']
ins = pub_prods[sensor_type]['inputs']
### Leaving this here as it could be a useful template
### if we introduce new sensors in the future which are
### role restricted.
# Restrict ordering VIIRS to staff
# if role and sensor_type.startswith('vnp'):
# for sc_id in ins:
# upd['not_implemented'].append(sc_id)
# pub_prods.pop(sensor_type)
# continue
# Restrict ordering Sentinel to staff
if role and sensor_type == 'sentinel':
for sc_id in ins:
upd['not_implemented'].append(sc_id)
pub_prods.pop(sensor_type)
continue
if sensor_type in all_ordering_rsctd:
for sc_id in ins:
if sensor_type in upd['ordering_restricted']:
upd['ordering_restricted'][sensor_type].append(sc_id)
else:
upd['ordering_restricted'][sensor_type] = [sc_id]
pub_prods.pop(sensor_type)
continue
remove_me = []
if role:
for prod in role_restr:
try:
outs.remove(prod)
except ValueError:
continue
for prod in outs:
if prod in by_date_restr:
r = sensor_restr['by_date'][prod]
for sc_id in ins:
obj = sensor.instance(sc_id)
julian = '{}{}'.format(obj.year, obj.doy)
if not julian_date_check(julian, r):
remove_me.append(prod)
if prod in upd['date_restricted']:
upd['date_restricted'][prod].append(sc_id)
else:
upd['date_restricted'][prod] = [sc_id]
for rem in remove_me:
try:
outs.remove(rem)
except ValueError:
continue
if upd['date_restricted']:
pub_prods.update(date_restricted=upd['date_restricted'])
if upd['ordering_restricted']:
pub_prods.update(ordering_restricted=upd['ordering_restricted'])
if len(upd['not_implemented']) > 0:
if 'not_implemented' not in pub_prods.keys():
pub_prods.update(not_implemented=upd['not_implemented'])
elif 'not_implemented' in pub_prods.keys():
pub_prods['not_implemented'].extend(upd['not_implemented'])
else: pass
return pub_prods
def fetch_user_orders(self, username='', email='', user_id='', filters=None):
if filters and not isinstance(filters, dict):
raise OrderingProviderException('filters must be dict')
if username:
usearch = {'username': username}
elif email:
usearch = {'email': email}
elif user_id:
usearch = {'id': user_id}
user = User.where(usearch)
if len(user) != 1:
return list()
else:
user = user.pop()
if filters:
params = dict(filters)
params.update({'user_id': user.id})
else:
params = {'user_id': user.id}
resp = Order.where(params)
return resp
def check_open_scenes(self, order, user_id='', filters=None):
"""
Perform a check to determine if the new order plus current open scenes for the current user
is less than the maximum allowed open scene limit (currently 10,000).
"""
limit = config.configuration_keys['policy.open_scene_limit']
if filters and not isinstance(filters, dict):
raise OrderingProviderException('filters must be dict')
# See if the user has any open orders first
user_orders = Order.where({'user_id': user_id, 'status': 'ordered'})
if len(user_orders) > 0:
scenes = Order.get_user_scenes(user_id=user_id, params=filters)
ids = sensor.SensorCONST.instances.keys()
# count number of scenes in the order
order_scenes = 0
for key in order:
if key in ids:
order_scenes += len(order[key]['inputs'])
if (len(scenes) + order_scenes) > int(limit):
diff = (len(scenes) + order_scenes) - int(limit)
msg = "Order will exceed open scene limit of {lim}, please reduce number of ordered scenes by {diff}"
raise OpenSceneLimitException(msg.format(lim=limit, diff=diff))
def fetch_order(self, ordernum):
orders = Order.where({'orderid': ordernum})
return orders
def place_order(self, new_order, user):
"""
Build an order dictionary to be place into the system
:param new_order: dictionary representation of the order received
:param user: user information associated with the order
:return: orderid to be used for tracking
"""
ts = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
order_dict = {'orderid': Order.generate_order_id(user.email),
'user_id': user.id,
'order_type': 'level2_ondemand',
'status': 'ordered',
'product_opts': new_order,
'ee_order_id': '',
'order_source': 'espa',
'order_date': ts,
'priority': 'normal',
'note': new_order.get('note', None),
'email': user.email,
'product_options': ''}
result = Order.create(order_dict)
return result
def cancel_order(self, orderid, request_ip_address):
"""
Cancels an order, and all scenes contained within it
:return:
"""
order = Order.where({'id': orderid})
if len(order) != 1:
raise OrderingProviderException('Order not found')
else:
order = order.pop()
logger.info('Received request to cancel {} from {}'
.format(orderid, request_ip_address))
killable_scene_states = ('submitted', 'oncache', 'onorder', 'retry',
'error', 'unavailable', 'complete')
scenes = order.scenes(sql_dict={'status': killable_scene_states})
if len(scenes) > 0:
Scene.bulk_update([s.id for s in scenes], Scene.cancel_opts())
else:
logger.info('No scenes to cancel for order {}'
.format(orderid, request_ip_address))
order.status = 'cancelled'
order.save()
logger.info('Request to cancel {} from {} successful.'
.format(orderid, request_ip_address))
return order
def item_status(self, orderid, itemid='ALL', username=None, filters=None):
user = User.by_username(username)
if not isinstance(filters, dict):
if filters is None:
filters = dict()
else:
raise TypeError('supplied filters invalid')
if orderid:
orders = Order.where({'orderid': orderid})
else:
orders = Order.where({'user_id': user.id})
search = dict()
if 'status' in filters:
search.update(status=(filters.get('status'),))
if 'name' in filters:
search.update(name=(filters.get('name'),))
elif itemid is not "ALL":
search.update(name=(itemid,))
response = dict()
for order in orders:
response[order.orderid] = order.scenes(search)
return response
def get_system_status(self):
sql = "select key, value from ordering_configuration where " \
"key in ('msg.system_message_body', 'msg.system_message_title', 'system.display_system_message');"
with db_instance() as db:
db.select(sql)
if db:
resp_dict = dict(db.fetcharr)
return {'system_message_body': resp_dict['msg.system_message_body'],
'system_message_title': resp_dict['msg.system_message_title'],
'display_system_message': resp_dict['system.display_system_message']}
else:
return {'system_message_body': None, 'system_message_title': None}
|
<reponame>tdsmith/elisascripts
import argparse
import ijroi
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.ndimage as img
import tifffile as tf
from shapely.geometry import Polygon, Point
from elisa.annotate import annotate_cells, surface_from_array, PIL_from_surface
def tx_matrix(from_points, to_points):
# from_points and to_points are nx3 matrices
# the third column should be 1s
x_tx, _, _, _ = np.linalg.lstsq(from_points, to_points[:, 0])
y_tx, _, _, _ = np.linalg.lstsq(from_points, to_points[:, 1])
tx = np.vstack((x_tx.reshape((1, -1)), y_tx.reshape((1, -1)), [[0, 0, 1]]))
return tx
def tx_matrix_from_rois(livedead_roi, elisa_roi, plot_filename=None):
def prep_rois(fn):
rois = ijroi.read_roi_zip(fn)
rois = np.vstack([roi[1] for roi in rois])
rois = np.hstack((rois, np.ones((len(rois), 1))))
return rois
elisa_cx = prep_rois(elisa_roi)
cell_cx = prep_rois(livedead_roi)
tx = tx_matrix(cell_cx, elisa_cx)
transformed = np.dot(tx, cell_cx.T).T
if plot_filename:
fig, ax = plt.subplots()
ax.plot(elisa_cx[:, 0], elisa_cx[:, 1], 'o', transformed[:, 0], transformed[:, 1], 'r.')
fig.savefig(plot_filename)
return tx
def in_bounds(x, radius, shape):
return ((x[:, 0] - radius >= 0) &
(x[:, 0] + radius < shape[1]) &
(x[:, 1] - radius >= 0) &
(x[:, 1] + radius < shape[0]))
def measure_cells(data, cells, radius):
def circle(rad):
y, x = np.ogrid[-rad:rad, -rad:rad]
mask = x*x + y*y <= rad*rad
return np.ones((2*rad, 2*rad)) * mask
stats = np.zeros((cells.shape[0], 8))
mask = circle(radius) == 0
for i, (x, y, z) in enumerate(list(cells)):
region = data[y-radius:y+radius, x-radius:x+radius]
region = np.ma.array(region, mask=mask)
mean, sd, maxv, minv = region.mean(), region.std(), region.max(), region.min()
integ = region.sum()
valid = region.compressed()
lq = np.percentile(valid, 0.25)
median = np.percentile(valid, 0.5)
uq = np.percentile(valid, 0.75)
stats[i, :] = mean, sd, maxv, minv, integ, lq, median, uq
return stats
def recenter(data, cells, radius):
def circle(rad):
y, x = np.ogrid[-rad:rad, -rad:rad]
mask = x*x + y*y <= rad*rad
return np.ones((2*rad, 2*rad)) * mask
mask = circle(radius) == 0
for i, (x, y, z) in enumerate(list(cells)):
region = data[y-radius:y+radius, x-radius:x+radius]
if mask.shape != region.shape:
continue
region = np.ma.array(region, mask=mask)
com = img.measurements.center_of_mass(region)
if not np.isnan(com).any():
cells[i, :2] += ((com[1], com[0]) - np.array([radius, radius])) * 1.5
return cells
def filter_excluded(cells, roiset_fn):
rois = ijroi.read_roi_zip(roiset_fn)
shapes = []
for roi in rois:
shapes.append(Polygon(list([list(i) for i in roi[1]])))
keep = []
for i, (x, y, z) in enumerate(cells):
good = True
p = Point((x, y))
for shape in shapes:
if shape.contains(p):
good = False
break
if good:
keep.append(i)
return keep
def main():
parser = argparse.ArgumentParser(description='Process single-cell ELISA images.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('livedead', metavar='livedead_results.txt',
help='Output of id_singlets.py.')
parser.add_argument('livedead_roi', metavar='livedead-RoiSet.zip')
parser.add_argument('elisa', metavar='stitched_elisa.tif', type=argparse.FileType('rb'),
help='Stitched ELISA image, post background subtraction.')
parser.add_argument('elisa_roi', metavar='elisa-RoiSet.zip')
parser.add_argument('--well-radius', default=50, type=int,
help='ELISA well radius, in pixels.')
parser.add_argument('--tx-plot', '-t', default='transform.png',
help='Filename for transformation plot.')
parser.add_argument('--exclude', '-e', metavar='exclude-RoiSet.zip',
help='ROI set of regions to exclude from measurement')
parser.add_argument('--output', '-o', default='elisa_results.txt',
help='Filename to save information about the live/dead image.')
args = parser.parse_args()
df = pd.read_csv(args.livedead)
cells = np.vstack([df['x'], df['y'], np.ones_like(df['x'])]).T
tx = tx_matrix_from_rois(args.livedead_roi, args.elisa_roi, args.tx_plot)
tx_cells = np.dot(tx, cells.T).T
print "Loaded {} cells.".format(tx_cells.shape[0])
data = tf.TiffFile(args.elisa)[0].asarray()
row_idx = np.nonzero(in_bounds(tx_cells, args.well_radius*1.5, data.shape))[0]
tx_cells = tx_cells[row_idx]
print "Kept {} in-bounds cells.".format(tx_cells.shape[0])
if args.exclude:
not_excluded = filter_excluded(tx_cells, args.exclude)
row_idx = row_idx[not_excluded]
tx_cells = tx_cells[not_excluded]
print "Kept {} cells not excluded.".format(tx_cells.shape[0])
orig_cells = tx_cells.copy()
for i in range(5):
surface = surface_from_array(data)
surface = annotate_cells(surface, orig_cells, args.well_radius, (1.0, 0.0, 0.0))
if i > 0:
surface = annotate_cells(surface, tx_cells, args.well_radius, (0.0, 0.0, 1.0))
pil_image = PIL_from_surface(surface)
pil_image.save("annotated.{}.jpg".format(i))
np.savetxt("locations.{}.txt".format(i), tx_cells)
tx_cells = recenter(data, tx_cells, 1.5*args.well_radius)
stats = measure_cells(data, tx_cells, args.well_radius)
out = np.hstack([row_idx.reshape((-1, 1)), tx_cells[:, :2], stats])
header = ['cells_row', 'x', 'y', 'mean', 'sd', 'max', 'min', 'integrated', 'q25', 'q50', 'q75']
np.savetxt(args.output, out, delimiter=',',
fmt=['%d'] * 3 + ['%f'] * 8,
header=','.join(header),
comments='')
if __name__ == '__main__':
main()
|
<reponame>Ziems/OBST<filename>src/model/activation.py
import mesh_tensorflow as mtf
import numpy as np
import tensorflow as tf
from .. import tf_wrapper as tfw
from ..dataclass import BlockArgs
from ..mtf_wrapper import relu as _relu, multiply, einsum, constant, sigmoid as _sigmoid, tanh as _tanh, softplus
from ..utils_core import random_name, scoped
tf1 = tf.compat.v1
class MishForward(mtf.Operation):
def __init__(self, x: mtf.Tensor):
super().__init__([x], name=random_name("mish_forward"))
self._outputs = [mtf.Tensor(self, x.shape, x.dtype)]
def gradient(self, grad_ys):
return MishBackward(self.inputs[0], grad_ys[0]).outputs
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
def slicewise_fn(x):
return tfw.multiply(x, tfw.tanh(tfw.softplus(x)))
y = mesh_impl.slicewise(slicewise_fn, lowering.tensors[self.inputs[0]])
lowering.set_tensor_lowering(self.outputs[0], y)
class MishBackward(mtf.Operation):
def __init__(self, x: mtf.Tensor, dy: mtf.Tensor):
super().__init__([x, dy], name=random_name("mish_backward"))
self._outputs = [mtf.Tensor(self, x.shape, x.dtype)]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
def slicewise_fn(x, dy):
gte = tfw.tanh(tfw.softplus(x))
gte += 1. - tfw.square(gte) * x * tfw.sigmoid(x)
return tfw.multiply(dy, gte)
y = mesh_impl.slicewise(slicewise_fn, lowering.tensors[self.inputs[0]], lowering.tensors[self.inputs[1]])
lowering.set_tensor_lowering(self.outputs[0], y)
class SiluForward(mtf.Operation):
def __init__(self, x: mtf.Tensor):
super().__init__([x], name=random_name("silu_forward"))
self._outputs = [mtf.Tensor(self, x.shape, x.dtype)]
def gradient(self, grad_ys):
return SiluBackward(self.inputs[0], grad_ys[0]).outputs
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
def slicewise_fn(x):
return tfw.multiply(x, tfw.sigmoid(x))
y = mesh_impl.slicewise(slicewise_fn, lowering.tensors[self.inputs[0]])
lowering.set_tensor_lowering(self.outputs[0], y)
class SiluBackward(mtf.Operation):
def __init__(self, x: mtf.Tensor, dy: mtf.Tensor):
super().__init__([x, dy], name=random_name("silu_backward"))
self._outputs = [mtf.Tensor(self, x.shape, x.dtype)]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
def slicewise_fn(x, dy):
gte = tfw.sigmoid(x)
return dy * ((x - 1) * gte + 1)
y = mesh_impl.slicewise(slicewise_fn, lowering.tensors[self.inputs[0]], lowering.tensors[self.inputs[1]])
lowering.set_tensor_lowering(self.outputs[0], y)
class LeCunTanhForward(mtf.Operation):
def __init__(self, x: mtf.Tensor):
super().__init__([x], name=random_name("lecun_tanh_forward"))
self._outputs = [mtf.Tensor(self, x.shape, x.dtype)]
def gradient(self, grad_ys):
return LeCunTanhBackward(self.inputs[0], grad_ys[0]).outputs
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
def slicewise_fn(x):
return tfw.tanh(x) + x * 0.1
y = mesh_impl.slicewise(slicewise_fn, lowering.tensors[self.inputs[0]])
lowering.set_tensor_lowering(self.outputs[0], y)
class LeCunTanhBackward(mtf.Operation):
def __init__(self, x: mtf.Tensor, dy: mtf.Tensor):
super().__init__([x, dy], name=random_name("lecun_tanh_backward"))
self._outputs = [mtf.Tensor(self, x.shape, x.dtype)]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
def slicewise_fn(x, dy):
return tfw.multiply(dy, tfw.subtract(1.1, tfw.square(tfw.tanh(x))))
y = mesh_impl.slicewise(slicewise_fn, lowering.tensors[self.inputs[0]], lowering.tensors[self.inputs[1]])
lowering.set_tensor_lowering(self.outputs[0], y)
class SoftsignForward(mtf.Operation):
def __init__(self, x: mtf.Tensor):
super().__init__([x], name=random_name("softsign_forward"))
self._outputs = [mtf.Tensor(self, x.shape, x.dtype)]
def gradient(self, grad_ys):
return SoftsignBackward(self.inputs[0], grad_ys[0]).outputs
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
def slicewise_fn(x):
return x / (1. + tfw.abs(x))
y = mesh_impl.slicewise(slicewise_fn, lowering.tensors[self.inputs[0]])
lowering.set_tensor_lowering(self.outputs[0], y)
class SoftsignBackward(mtf.Operation):
def __init__(self, x: mtf.Tensor, dy: mtf.Tensor):
super().__init__([x, dy], name=random_name("softsign_backward"))
self._outputs = [mtf.Tensor(self, x.shape, x.dtype)]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
def slicewise_fn(x, dy):
return dy / tfw.square(1. + tfw.abs(x))
y = mesh_impl.slicewise(slicewise_fn, lowering.tensors[self.inputs[0]], lowering.tensors[self.inputs[1]])
lowering.set_tensor_lowering(self.outputs[0], y)
def _output0(op):
if not issubclass(op, mtf.Operation):
raise ValueError
def _wrapped(args: BlockArgs):
return op(args.tensor).outputs[0]
return _wrapped
def _gelu(params, tensor: mtf.Tensor):
return einsum([tensor, _tanh(einsum([tensor, tensor, tensor, constant(params, 0.044715)],
output_shape=tensor.shape) + tensor * np.sqrt(2 / np.pi)) + 1.0,
constant(params, 0.5)], output_shape=tensor.shape)
def gelu(args: BlockArgs):
return scoped("gelu", _gelu, args.params, args.tensor)
def relu(args: BlockArgs):
return _relu(args.tensor)
def sigmoid(args: BlockArgs):
return _sigmoid(args.tensor)
def tanh(args: BlockArgs):
return _tanh(args.tensor)
def _mtf_mish(tensor: mtf.Tensor):
return multiply(_tanh(softplus(tensor)), tensor)
def mtf_mish(args: BlockArgs):
return scoped("mtf_mish", _mtf_mish, args.tensor)
ACTIVATIONS = {'relu': relu,
'sigmoid': sigmoid,
'tanh': tanh,
'gelu': gelu,
'lecun_tanh': _output0(LeCunTanhForward),
'silu': _output0(SiluForward),
'mish': _output0(MishForward),
"mtf_mish": mtf_mish,
'softsign': _output0(SoftsignForward)
}
def activate(args: BlockArgs) -> mtf.Tensor:
"""
Call activation function on mtf.Tensor.
"""
for fn_name in args:
if fn_name not in ACTIVATIONS:
continue
return scoped(fn_name, ACTIVATIONS[fn_name], args)
print(f'No activation function found for "{args.name_extras}". Falling back to identity. '
f'Known functions: {list(ACTIVATIONS.keys())}')
return args.tensor
|
# -*- coding: utf-8 -*-
import scrapy
import re
from bgm.items import Record, Index, Friend, User, SubjectInfo, Subject
from bgm.util import *
from scrapy.http import Request
import datetime
import json
mpa = dict([(i, None) for i in range(32)])
class UserSpider(scrapy.Spider):
name = 'user'
def __init__(self, *args, **kwargs):
super(UserSpider, self).__init__(*args, **kwargs)
if not hasattr(self, 'id_max'):
self.id_max=400000
if not hasattr(self, 'id_min'):
self.id_min=1
self.start_urls = ["http://mirror.bgm.rin.cat/user/"+str(i) for i in range(int(self.id_min),int(self.id_max))]
def parse(self, response):
if len(response.xpath(".//*[@id='headerProfile']"))==0:
return
user = response.xpath(".//*[@id='headerProfile']/div/div/h1/div[3]/small/text()").extract()[0][1:]
nickname = response.xpath(".//*[@class='headerContainer']//*[@class='inner']/a/text()").extract()[0].translate(mpa)
# Is blocked?
if len(response.xpath("//ul[@class='timeline']/li"))==0:
return;
if not 'redirect_urls' in response.meta:
uid = int(user)
else:
uid = int(response.meta['redirect_urls'][0].split('/')[-1])
date = response.xpath(".//*[@id='user_home']/div[@class='user_box clearit']/ul/li[1]/span[2]/text()").extract()[0].split(' ')[0]
date = parsedate(date)
yield User(name=user, nickname=nickname, uid=uid, joindate=date)
class IndexSpider(scrapy.Spider):
name='index'
def __init__(self, *args, **kwargs):
super(IndexSpider, self).__init__(*args, **kwargs)
if not hasattr(self, 'id_max'):
self.id_max=20000
if not hasattr(self, 'id_min'):
self.id_min=1
self.start_urls = ["http://mirror.bgm.rin.cat/index/"+str(i) for i in range(int(self.id_min),int(self.id_max))]
def parse(self, response):
if len(response.xpath(".//*[@id='columnSubjectBrowserA']/div[1]/a"))==0:
return
indexid = response.url.split('/')[-1]
indexid=int(indexid)
creator = response.xpath(".//*[@id='columnSubjectBrowserA']/div[1]/a/@href").extract()[0].split('/')[-1]
creator=str(creator).translate(mpa)
td = response.xpath(".//*[@id='columnSubjectBrowserA']/div[1]/span/span[1]/text()").extract()[0]
date = parsedate(td.split(' ')[0])
if len(response.xpath(".//*[@id='columnSubjectBrowserA']/div[1]/span/span"))==2:
favourite = response.xpath(".//*[@id='columnSubjectBrowserA']/div[1]/span/span[2]/text()").extract()[0]
favourite = int(favourite)
else: favourite = 0
items = response.xpath(".//*[@id='columnSubjectBrowserA']/ul/li/@id").extract()
items = [int(itm.split('_')[-1]) for itm in items]
yield Index(indexid=indexid, creator=creator, favourite=favourite, date=date, items=items)
class RecordSpider(scrapy.Spider):
name='record'
def __init__(self, *args, **kwargs):
super(RecordSpider, self).__init__(*args, **kwargs)
if hasattr(self, 'userlist'):
userlist = []
with open(self.userlist, 'r') as fr:
while True:
l = fr.readline().strip()
if not l: break;
userlist.append(l)
self.start_urls = ["http://mirror.bgm.rin.cat/user/"+i for i in userlist]
else:
if not hasattr(self, 'id_max'):
self.id_max=500000
if not hasattr(self, 'id_min'):
self.id_min=1
self.start_urls = ["http://mirror.bgm.rin.cat/user/"+str(i) for i in range(int(self.id_min),int(self.id_max))]
def parse(self, response):
username = response.url.split('/')[-1]
if (not response.xpath(".//*[@id='headerProfile']")) or response.xpath(".//div[@class='tipIntro']"):
return
if username in blockusers:
return
uid = int(response.meta['redirect_urls'][0].split('/')[-1]) if 'redirect_urls' in response.meta else int(username)
nickname = next(iter(response.xpath(".//*[@class='headerContainer']//*[@class='inner']/a/text()").extract()), "").translate(mpa)
date = response.xpath(".//*[@id='user_home']/div[@class='user_box clearit']/ul/li[1]/span[2]/text()").extract()[0].split(' ')[0]
date = parsedate(date)
yield User(name=username, nickname=nickname, uid=uid, joindate=date)
if len(response.xpath(".//*[@id='anime']")):
yield scrapy.Request("http://mirror.bgm.rin.cat/anime/list/"+username, callback = self.merge, meta = { 'uid': uid })
if len(response.xpath(".//*[@id='game']")):
yield scrapy.Request("http://mirror.bgm.rin.cat/game/list/"+username, callback = self.merge, meta = { 'uid': uid })
if len(response.xpath(".//*[@id='book']")):
yield scrapy.Request("http://mirror.bgm.rin.cat/book/list/"+username, callback = self.merge, meta = { 'uid': uid })
if len(response.xpath(".//*[@id='music']")):
yield scrapy.Request("http://mirror.bgm.rin.cat/music/list/"+username, callback = self.merge, meta = { 'uid': uid })
if len(response.xpath(".//*[@id='real']")):
yield scrapy.Request("http://mirror.bgm.rin.cat/real/list/"+username, callback = self.merge, meta = { 'uid': uid })
def merge(self, response):
followlinks = response.xpath("//ul[@class='navSubTabs']/li/a/@href").extract() # a list of links
for link in followlinks:
yield scrapy.Request(u"http://mirror.bgm.rin.cat"+link, callback = self.parse_recorder, meta = { 'uid': response.meta['uid'] })
def parse_recorder(self, response):
state = response.url.split('/')[-1].split('?')[0]
page = 1 if '=' not in response.url else int(response.url.split('=')[1])
tp = response.url.split('/')[-4]
items = response.xpath(".//*[@id='browserItemList']/li")
for item in items:
item_id = int(re.match(r"item_(\d+)",item.xpath("./@id").extract()[0]).group(1))
item_date = parsedate(item.xpath("./div/p[@class='collectInfo']/span[@class='tip_j']/text()").extract()[0])
if item.xpath("./div/p[@class='collectInfo']/span[@class='tip']"):
item_tags = item.xpath("./div/p[@class='collectInfo']/span[@class='tip']/text()").extract()[0].split(u' ')[2:-1]
else:
item_tags=None
try_match = next(iter(item.xpath("./div/p[@class='collectInfo']/span[@class='starstop-s']/span/@class").extract()), None)
if try_match is not None:
mtch = re.match(r'starlight stars(\d+)', try_match)
item_rate = mtch.group(1)
item_rate = int(item_rate)
else:
item_rate = None
comment = item.xpath(".//div[@class='text']/text()").extract()[0] if len(item.xpath(".//div[@class='text']")) > 0 else None
watchRecord = Record(
uid = response.meta['uid'],
typ = tp, state = state,
iid = item_id,
adddate = item_date
)
if item_tags:
watchRecord["tags"]=item_tags
if item_rate:
watchRecord["rate"]=item_rate
if comment:
watchRecord["comment"]=comment.translate(mpa)
yield watchRecord
total_count = int(re.search(r"(\d+)", response.xpath("//ul[@class='navSubTabs']/li/a[@class='focus']/span/text()").extract()[0]).group(1))
if 24 * page < total_count:
yield scrapy.Request(getnextpage(response.url),callback = self.parse_recorder, meta = { 'uid': response.meta['uid'] })
class FriendsSpider(scrapy.Spider):
name='friends'
handle_httpstatus_list = [302]
def __init__(self, *args, **kwargs):
super(FriendsSpider, self).__init__(*args, **kwargs)
if not hasattr(self, 'id_max'):
self.id_max=400000
if not hasattr(self, 'id_min'):
self.id_min=1
self.start_urls = ["http://mirror.bgm.rin.cat/user/"+str(i)+"/friends" for i in range(int(self.id_min),int(self.id_max))]
def parse(self, response):
user = response.url.split('/')[-2]
lst = response.xpath(".//*[@id='memberUserList']/li//@href").extract()
for itm in lst:
yield Friend(user = user, friend = str(itm.split('/')[-1]))
class SubjectInfoSpider(scrapy.Spider):
name="subjectinfo"
def __init__(self, *args, **kwargs):
super(SubjectInfoSpider, self).__init__(*args, **kwargs)
if not hasattr(self, 'id_max'):
self.id_max=300000
if not hasattr(self, 'id_min'):
self.id_min=1
self.start_urls = ["http://mirror.bgm.rin.cat/subject/"+str(i) for i in range(int(self.id_min),int(self.id_max))]
def parse(self, response):
subject_id = int(response.url.split('/')[-1])
if not response.xpath(".//*[@id='headerSubject']"):
return
if response.xpath(".//div[@class='tipIntro']"):
return
typestring = response.xpath(".//div[@class='global_score']/div/small[1]/text()").extract()[0]
typestring = typestring.split(' ')[1];
infobox = [itm.extract()[:-2] for itm in response.xpath(".//div[@class='infobox']//span/text()")]
infobox = set(infobox)
relations = [itm.extract() for itm in response.xpath(".//ul[@class='browserCoverMedium clearit']/li[@class='sep']/span/text()")]
relations = set(relations)
yield SubjectInfo(subjectid=subject_id,
subjecttype=typestring,
infobox=infobox,
relations=relations)
class SubjectSpider(scrapy.Spider):
name="subject"
def __init__(self, *args, **kwargs):
super(SubjectSpider, self).__init__(*args, **kwargs)
if hasattr(self, 'itemlist'):
itemlist = []
with open(self.itemlist, 'r') as fr:
while True:
l = fr.readline().strip()
if not l: break;
itemlist.append(l)
self.start_urls = ["http://mirror.bgm.rin.cat/subject/"+i for i in itemlist]
else:
if not hasattr(self, 'id_max'):
self.id_max=300000
if not hasattr(self, 'id_min'):
self.id_min=1
self.start_urls = ["http://mirror.bgm.rin.cat/subject/"+str(i) for i in range(int(self.id_min),int(self.id_max))]
def parse(self, response):
subjectid = int(response.url.split('/')[-1]) # trueid
if not response.xpath(".//*[@id='headerSubject']"):
return
# This is used to filter those locked items
# However, considering that current Bangumi ranking list does not exclude blocked items,
# we include them in our spider.
#if response.xpath(".//div[@class='tipIntro']"):
# return;
if 'redirect_urls' in response.meta:
order = int(response.meta['redirect_urls'][0].split('/')[-1])
else:
order = subjectid; # id
subjectname = response.xpath(".//*[@id='headerSubject']/h1/a/attribute::title").extract()[0]
if not subjectname:
subjectname = response.xpath(".//*[@id='headerSubject']/h1/a/text()").extract()[0]
subjecttype = response.xpath(".//div[@class='global_score']/div/small[1]/text()").extract()[0]
subjecttype = subjecttype.split(' ')[1].lower();
infokey = [itm[:-2] for itm in response.xpath(".//div[@class='infobox']//li/span/text()").extract()]
infoval = response.xpath(".//div[@class='infobox']//li")
infobox = dict()
alias = []
for key,val in zip(infokey, infoval):
if val.xpath("a"):
infobox[key]=[ref.split('/')[-1] for ref in
val.xpath("a/@href").extract()]
if key == '别名':
alias.append(val.xpath('text()').extract()[0])
relateditms = response.xpath(".//ul[@class='browserCoverMedium clearit']/li")
relations = dict()
for itm in relateditms:
if itm.xpath("@class"):
relationtype = itm.xpath("span/text()").extract()[0]
relations[relationtype]=[itm.xpath("a[@class='title']/@href").
extract()[0].split('/')[-1]]
else:
relations[relationtype].append(itm.xpath("a[@class='title']/@href").
extract()[0].split('/')[-1])
brouche = response.xpath(".//ul[@class='browserCoverSmall clearit']/li")
if brouche:
relations['单行本']=[itm.split('/')[-1] for itm in
brouche.xpath("a/@href").extract()]
yield Subject(subjectid=subjectid,
subjecttype=subjecttype,
subjectname=subjectname,
order=order,
alias=alias,
staff=infobox,
relations=relations)
|
<gh_stars>1-10
#!/usr/bin/env python3
"""Copyright (c) 2020 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied."""
from netmiko import ConnectHandler
import json
from details import *
#open the file with the radio configuration information and save the contents to config_info
radio_file_path = config_file_dir + "/simulatedRadios.json"
radio_file = open(radio_file_path)
config_info = json.load(radio_file)
radio_file.close()
#open the file with the access point information and save the contents to ap_info
ap_file_path = config_file_dir + "/accessPoints.json"
ap_file = open(ap_file_path)
ap_info = json.load(ap_file)
ap_file.close()
#open the file with the matrix that maps the power levels in dBm to the Cisco power levels and save it to power_model_channel_matrix
matrix_file = open('powerByModelandChannel.json')
power_model_channel_matrix = json.load(matrix_file)
matrix_file.close()
#these Boolean variables will indicate whether or not to change the channel, change the power, or shut down the access point entirely
change_channel = False
change_power = False
shutdown = False
ap_dict = {} #the ap_dict will map the AP IDs to the name and model of the AP
channels24 = [1, 6, 11] #these channels indicate that we are configuring the 2.4 GHz radio
matrix_keys = power_model_channel_matrix.keys() #the matrix keys are the AP models
#open connection with WLC through netmiko
with ConnectHandler(ip=ip_addr,
port=22,
username=username,
password=password,
device_type="cisco_wlc_ssh") as ch:
#find the AP name, model, and id from the file contents of the access point Ekahau file and save that info the the ap_dict
for ap in ap_info["accessPoints"]:
ap_name = ap["name"]
ap_id = ap["id"]
ap_model = ap["model"]
ap_dict[ap_id] = {
"name": ap_name,
"model": ap_model
}
#find the configuration of the AP (power level and channel) from the simulated radios Ekahau file
for radio in config_info["simulatedRadios"]:
#find the AP name and model associated with the AP id given in the simulated radios file
ap_id = radio["accessPointId"]
ap_name = ap_dict[ap_id]["name"]
ap_model = ap_dict[ap_id]["model"]
#find the power and channels associated with the model of this AP
for model in matrix_keys:
if model in ap_model:
power_channel_matrix = power_model_channel_matrix[model]
#check if a new channel is given in the Ekahau file
if "channel" in radio.keys():
channel = radio["channel"][0] #save channel value from Ekahau file
change_channel = True #indicate the channel needs to be changed for this AP
power_matrix = power_channel_matrix[str(channel)] #retrieve the different power levels available for this AP at this channel
print("CHANNEL VALUE: {}".format(channel))
#check if a new power level is given in the Ekahau file
if "transmitPower" in radio.keys():
power = radio["transmitPower"] #save the power value from the Ekahau file
if power == 0.0:
shutdown = True #if the power level is set to 0, the AP should be shutdown
print("AP will be shutdown")
else:
#check which Cisco power level most closely corresponds with the power level given from Ekahau
min_diff = 100
#the key represents the Cisco power level and the value represents the power level in dBm
for key, value in power_matrix.items():
diff = abs(int(value) - int(power))
if diff < min_diff:
min_diff = diff
power_value = key #the Cisco level with the closest corresponding power level in dBm is the power value the AP will be configured with
print("POWER VALUE: {}".format(power_value))
change_power = True #indicate the power value needs to be changed for this AP
#now take all the information gathered above and configure the AP
if change_channel: #channel needs to be changed
ch.send_command("ap name {} no shut".format(ap_name)) #if channel is being configured, we need to make sure the AP is not shutdown
if channel in channels24 and "5GHz" in ap_model: #we are configuring a channel in the 2.4 GHz range, but the AP has a 5 GHz radio - thus, it is dual band. This is important for the exact CLI command we want to use.
channel_command = "ap name {} dot11 dual-band channel {}".format(ap_name, channel)
ch.send_command(channel_command)
print(ch.send_command("show ap dot11 dual-band summary")) #print results of this CLI command
elif channel in channels24: #channel is in 2.4 GHz range, and the AP does not have a 5 GHz radio - plain ole 2.4 GHz
channel_command = "ap name {} dotll 24ghz channel {}".format(ap_name, channel)
ch.send_command(channel_command)
print(ch.send_command("show ap dot11 24ghz summary")) #print results of this CLI command
else: #channel is not in 2.4 GHz range, so it must be 5 GHz
channel_command = "ap name {} dot11 5ghz channel {}".format(ap_name, channel)
ch.send_command(channel_command)
print(ch.send_command("show ap dot11 5ghz summary")) #print results of this CLI command
if change_power: #power needs to be changed
ch.send_command("ap name {} no shut".format(ap_name)) #if power is being configured, we need to make sure the AP is not shutdown
if channel in channels24 and "5GHz" in ap_model: #the channel was in the 2.4 GHz range, but the AP has a 5 GHz radio - thus it's dual band. This is important for the exact CLI command we want to use.
power_command = "ap name {} dot11 dual-band txpower {}".format(ap_name, power_value)
ch.send_command(power_command)
print(ch.send_command("show ap dot11 dual-band summary")) #print results of this CLI command
elif channel in channels24: #channel is in 2.4 GHz range, and the AP does not have a 5 GHz radio
power_command = "ap name {} dot11 24ghz txpower {}".format(ap_name, power_value)
ch.send_command(power_command)
print(ch.send_command("show ap dot11 24ghz summary")) #print results of this CLI command
else: #channel is not in the 2.4 GHz range, so it must be 5 GHz
power_command = "ap name {} dot11 5ghz txpower {}".format(ap_name, power_value)
ch.send_command(power_command)
print(ch.send_command("show ap dot11 5ghz summary")) #print results of this CLI command
if shutdown: #ap needs to be shutdown
shutdown_command = "ap name {} shutdown".format(ap_name)
ch.send_command(shutdown_command)
print(ch.send_command("show ap status")) #print results of this CLI command
#reset these values for the next AP checked
change_channel = False
change_power = False
shutdown = False
|
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_main.models import Location
import urllib.parse
class LocationViewTestCase(TestCase):
""" location view tests """
@classmethod
def setUpTestData(cls):
# create object
Location.objects.create(location_name='location_1')
# create user
test_user = User.objects.create_user(username='testuser_location', password='<PASSWORD>')
def test_locations_list_not_logged_in(self):
""" test list view """
# create url
destination = '/login/?next=' + urllib.parse.quote('/locations/', safe='')
# get response
response = self.client.get('/locations/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_locations_list_logged_in(self):
""" test list view """
# login testuser
login = self.client.login(username='testuser_location', password='<PASSWORD>')
# get response
response = self.client.get('/locations/')
# compare
self.assertEqual(response.status_code, 200)
def test_locations_list_template(self):
""" test list view """
# login testuser
login = self.client.login(username='testuser_location', password='<PASSWORD>')
# get response
response = self.client.get('/locations/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/location/locations_list.html')
def test_locations_list_get_user_context(self):
""" test list view """
# login testuser
login = self.client.login(username='testuser_location', password='<PASSWORD>')
# get response
response = self.client.get('/locations/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_location')
def test_locations_detail_not_logged_in(self):
""" test detail view """
# get object
location_1 = Location.objects.get(location_name='location_1')
# create url
destination = '/login/?next=' + urllib.parse.quote('/locations/' + str(location_1.location_id), safe='')
# get response
response = self.client.get('/locations/' + str(location_1.location_id), follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_locations_detail_logged_in(self):
""" test detail view """
# get object
location_1 = Location.objects.get(location_name='location_1')
# login testuser
login = self.client.login(username='testuser_location', password='<PASSWORD>')
# get response
response = self.client.get('/locations/' + str(location_1.location_id))
# compare
self.assertEqual(response.status_code, 200)
def test_locations_detail_template(self):
""" test detail view """
# get object
location_1 = Location.objects.get(location_name='location_1')
# login testuser
login = self.client.login(username='testuser_location', password='<PASSWORD>')
# get response
response = self.client.get('/locations/' + str(location_1.location_id))
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/location/locations_detail.html')
def test_locations_detail_get_user_context(self):
""" test detail view """
# get object
location_1 = Location.objects.get(location_name='location_1')
# login testuser
login = self.client.login(username='testuser_location', password='<PASSWORD>')
# get response
response = self.client.get('/locations/' + str(location_1.location_id))
# compare
self.assertEqual(str(response.context['user']), 'testuser_location')
def test_locations_add_not_logged_in(self):
""" test add view """
# create url
destination = '/login/?next=' + urllib.parse.quote('/locations/add/', safe='')
# get response
response = self.client.get('/locations/add/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_locations_add_logged_in(self):
""" test add view """
# login testuser
login = self.client.login(username='testuser_location', password='<PASSWORD>')
# get response
response = self.client.get('/locations/add/')
# compare
self.assertEqual(response.status_code, 200)
def test_locations_add_template(self):
""" test add view """
# login testuser
login = self.client.login(username='testuser_location', password='<PASSWORD>')
# get response
response = self.client.get('/locations/add/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/location/locations_add.html')
def test_locations_add_get_user_context(self):
""" test add view """
# login testuser
login = self.client.login(username='testuser_location', password='<PASSWORD>')
# get response
response = self.client.get('/locations/add/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_location')
def test_locations_edit_not_logged_in(self):
""" test edit view """
# get object
location_1 = Location.objects.get(location_name='location_1')
# create url
destination = '/login/?next=' + urllib.parse.quote('/locations/' + str(location_1.location_id) + '/edit/', safe='')
# get response
response = self.client.get('/locations/' + str(location_1.location_id) + '/edit/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_locations_edit_logged_in(self):
""" test edit view """
# get object
location_1 = Location.objects.get(location_name='location_1')
# login testuser
login = self.client.login(username='testuser_location', password='<PASSWORD>')
# get response
response = self.client.get('/locations/' + str(location_1.location_id) + '/edit/')
# compare
self.assertEqual(response.status_code, 200)
def test_locations_edit_template(self):
""" test edit view """
# get object
location_1 = Location.objects.get(location_name='location_1')
# login testuser
login = self.client.login(username='testuser_location', password='<PASSWORD>')
# get response
response = self.client.get('/locations/' + str(location_1.location_id) + '/edit/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/location/locations_edit.html')
def test_locations_edit_get_user_context(self):
""" test edit view """
# get object
location_1 = Location.objects.get(location_name='location_1')
# login testuser
login = self.client.login(username='testuser_location', password='<PASSWORD>')
# get response
response = self.client.get('/locations/' + str(location_1.location_id) + '/edit/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_location')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/7/20 10:48
# @Author : bxf
# @File : TRANSFORM_OPT.py
# @Software: PyCharm
from concurrent.futures import ThreadPoolExecutor
from model.util.newID import *
from model.util.PUB_RESP import *
from model.FUNC.GROUP_OPT import *
import threading
import time
'''
"case_exe_type": "执行类型(1-手工、2-自动化)"
"case_type": "用例类型(1-功能测试,2-性能测试,3-安全测试,4-接口测试,5-压力测试,6-其他)"
"case_exe_plus-in": "执行插件(1-Platform,2-JMeter,3-Appium,4-Python,5-UIAutomation,6-无)",
rqmt_task_type:1-手工,2-自动化(JMeter),3-自动化(Appium),4-性能,5-安全
手工 执行类型
自动化 执行插件
用例类型
先以插件分类 生成自动任务 按照插件分类
将 插件分类为 无的
按照用例类型分类 生成 性能 安全 接口 压力等任务
再将 功能测试和接口测试类型的数据结合 生成 手工用例
'''
class TRANSFORM_OPT:
'''
转换基类
'''
def __init__(self, id, table,token):
self.id = id
self.table = table
self.token=token
self.lock=threading.Lock()
self.executor = ThreadPoolExecutor(max_workers=100)
@staticmethod
def _get_rqmt_desc(rqmt_id):
'''
获取需求基本信息
:return:
'''
sql = 'SELECT rqmt_desc FROM t_requirements_info WHERE rqmt_id="' + rqmt_id + '"'
rqmt_desc = getJsonFromDatabase(sql)[0]['rqmt_desc']
return rqmt_desc
def transform_opt(self, **kwargs):
'''
case_exe_type 执行类型 1-手动 2-自动化
case_exe_plugin 执行插件
case_id 测试用例
switch (val) {
case '100':
return '平台,对应手工测试'
case '200':
return '接口测试(平台)'
case '201':
return '接口测试(Python)'
case '202':
return '接口测试(JMeter)'
case '301':
return 'PC端UI测试(Python)'
case '401':
return '安卓端UI测试(Appium)'
case '402':
return '安卓端UI测试(UIAutomation)'
case '501':
return 'IOS端UI测试(Appium)'
case '901':
return '综合测试(RobotFramework)'
}
'''
sql_doc = ''
#task_lists = dict()
for i in kwargs:
col = i
val = kwargs[i]
sql_doc = ' WHERE ' + col + ' like "' + str(val) + '%" and case_exe_status = 1'
case_lists_sql = 'SELECT * FROM ' + self.table + ' ' + sql_doc + ' '
case_lists = getJsonFromDatabase(case_lists_sql)
return case_lists
# manual_lists = []
# API_platform_lists = []#'接口测试(平台)'200
# API_python_lists=[]#'接口测试(Python)'201
# JMeter_lists = []#'接口测试(JMeter)'202
# PC_AT_lists = [] # 'PC端UI测试(Python)'301
# Appium_lists = []#'安卓端UI测试(Appium)'401
# UIAutomation_lists = []#'安卓端UI测试(UIAutomation)'402
# IOS_lists=[]#'IOS端UI测试(Appium)'501
# RTF_lists=[]#'综合测试(RobotFramework)'901
# if case_lists:
# for i in case_lists:
#
# case_exe_plugin = i['case_exe_plugin']
# case_exe_type = i['case_exe_type']
# if case_exe_type == 1 or case_exe_plugin==100 :
# manual_lists.append(i)
# #else:
# #case_exe_plugin = i['case_exe_plugin']
# elif case_exe_plugin==200:
# API_platform_lists.append(i)
# elif case_exe_plugin==201:
# API_python_lists.append(i)
# elif case_exe_plugin==202:
# JMeter_lists.append(i)
# elif case_exe_plugin==401:
# Appium_lists.append(i)
# elif case_exe_plugin==301:
# PC_AT_lists.append(i)
# elif case_exe_plugin==402:
# UIAutomation_lists.append(i)
# elif case_exe_plugin==501:
# IOS_lists.append(i)
# elif case_exe_plugin==901:
# RTF_lists.append(i)
# # if case_exe_plugin == 1:
# # platform_lists.append(i)
# # elif case_exe_plugin == 2:
# # JMeter_lists.append(i)
# # elif case_exe_plugin == 3:
# # Appium_lists.append(i)
# # elif case_exe_plugin == 4:
# # Python_lists.append(i)
# # elif case_exe_plugin == 5:
# # UIAutomation_lists.append(i)
# else:
# pass
# task_lists['manual_lists'] = manual_lists
# task_lists['API_platform_lists'] = API_platform_lists
# task_lists['API_python_lists'] = API_python_lists
# task_lists['JMeter_lists'] = JMeter_lists
# task_lists['Appium_lists'] = Appium_lists
# task_lists['PC_AT_lists'] = PC_AT_lists
# task_lists['UIAutomation_lists'] = UIAutomation_lists
# task_lists['IOS_lists'] = IOS_lists
# task_lists['RTF_lists'] = RTF_lists
# return task_lists
def insert_to_casetable(self, online_timea,group_id):
if self.table == 'rqmt_case_info':
online_time = ''
task_lists = self.transform_opt(rqmt_id=self.id)
elif self.table == 'regress_case_info':
online_time = online_timea
task_lists = self.transform_opt(group_id=group_id)
elif self.table == 'core_case_info':
online_time = online_timea
task_lists = self.transform_opt(group_id=group_id)
if task_lists:
groupdict={}
for i in task_lists:
case_exe_plugin = i.get('case_exe_plugin')
task_id=groupdict.get(str(case_exe_plugin))
if not task_id:
task_id = newID().TK_ID()
groupdict[str(case_exe_plugin)]=task_id
self.insert_task_lists(task_id, case_exe_plugin, online_time, group_id)
self.executor.submit(self.insert_case, task_id, i)
# for key, val in task_lists.items():
# #listcase = task_lists.get(key)
# if len(val) == 0:
# pass
# else:
# task_id = newID().TK_ID()
# #time.sleep(1)
# for i in range(len(val)):
# #casedict = val[i]
# self.executor.submit(self.insert_case,task_id, val[i])
# #self.insert_case(task_id, val[i])
# if key == 'manual_lists':
# self.insert_task_lists(task_id, 100, online_time,group_id)
# elif key == 'API_platform_lists':
# self.insert_task_lists(task_id, 200, online_time,group_id)
# elif key == 'API_python_lists':
# self.insert_task_lists(task_id, 201, online_time,group_id)
# elif key == 'JMeter_lists':
# self.insert_task_lists(task_id, 202, online_time,group_id)
# elif key == 'Appium_lists':
# self.insert_task_lists(task_id, 401, online_time,group_id)
# elif key == 'PC_AT_lists':
# self.insert_task_lists(task_id, 301, online_time,group_id)
# elif key == 'UIAutomation_lists':
# self.insert_task_lists(task_id, 402, online_time,group_id)
# elif key == 'IOS_lists':
# self.insert_task_lists(task_id, 501, online_time,group_id)
# elif key == 'RTF_lists':
# self.insert_task_lists(task_id, 901, online_time,group_id)
# else:
# pass
return_data = respdata().sucessMessage('', '任务转换完成,请查看!~')
return json.dumps(return_data, ensure_ascii=False)
def insert_task_lists(self, task_id, type, online_time,group_id):
# group_id=GROP_OPT(self.token).getGroupID()
try:
if self.table == 'rqmt_case_info':
rqmt_desc = TRANSFORM_OPT._get_rqmt_desc(self.id)
rqmt_desc = rqmt_desc + '_需求测试用例'
insert_sql_rqmt_task_info = 'INSERT INTO rqmt_task_info (rqmt_id,rqmt_task_id,rqmt_task_desc,rqmt_task_type,group_id) VALUES ("' + self.id + '" , "' + task_id + '" , "' + rqmt_desc + '" , "' + str(type)+'" , "' + str(group_id) + '" )'
elif self.table == 'regress_case_info':
rqmt_desc = '回归测试任务('+getGroupName(group_id)+')-'
task_exe_env = '1'
insert_sql_rqmt_task_info = 'INSERT INTO regress_task_info (task_id,task_desc,task_type,group_id,online_time,task_exe_env) VALUES ("' + task_id + '" , "' + rqmt_desc + '" , "' + str(
type) + '" , "' + str(group_id) + '" , "' + str(online_time) + '" , "' + task_exe_env + '" )'
elif self.table == 'core_case_info':
rqmt_desc = '核心测试任务('+getGroupName(group_id)+')-'
task_exe_env = '1'
insert_sql_rqmt_task_info = 'INSERT INTO core_task_info (task_id,task_desc,task_type,group_id,online_time,task_exe_env) VALUES ("' + task_id + '" , "' + rqmt_desc + '" , "' + str(
type) + '" , "' + str(group_id) + '" , "' + str(online_time) + '" , "' + task_exe_env + '" )'
# num = DB_CONN().db_Update(insert_sql_rqmt_task_info)
t = threading.Thread(target=DB_CONN().db_Update, args=(insert_sql_rqmt_task_info,))
t.start()
return True
except Exception as e:
exeLog("**********转换错误!!请检查,错误代码:" + str(e))
return False
def insert_case(self, task_id, case_list):
'''
插入数据库
:param case_list:
:param type:
:return:
'''
try:
case_id = case_list['case_id']
insert_sql_task_to_case = 'INSERT INTO t_task_to_case (task_id,case_id) VALUES ("' + task_id + '","' + case_id + '" )'
with self.lock:
DB_CONN().db_Update(insert_sql_task_to_case)
#t = threading.Thread(target=DB_CONN().db_Update, args=(insert_sql_task_to_case,))
#t.start()
# if self.table == 'rqmt_case_info':
# rqmt_desc = TRANSFORM_OPT._get_rqmt_desc(self.id)
# rqmt_desc = rqmt_desc + '_需求测试用例'
# insert_sql_rqmt_task_info = 'INSERT INTO rqmt_task_info (rqmt_id,rqmt_task_id,rqmt_task_desc,rqmt_task_type) VALUES ("' + self.id + '" , "' + task_id + '" , "' + rqmt_desc + '" , "' + str(
# type) + '" )'
# else:
# rqmt_desc='回归测试用例——'
# task_exe_env='1'
# insert_sql_rqmt_task_info = 'INSERT INTO regress_task_info (task_id,task_desc,task_type,group_id,online_time,task_exe_env) VALUES ("' + task_id + '" , "' + rqmt_desc + '" , "' + str(type) + '" , "' + str(self.id)+'" , "' + str(online_time)+'" , "' + task_exe_env+'" )'
# num = DB_CONN().db_Update(insert_sql_rqmt_task_info)
# exeLog("**********按照 " + str(type) + " 分类任务转换成功")
return_data = respdata().sucessMessage('', '任务转换成功')
return True
except Exception as e:
exeLog("**********转换错误!!请检查,错误代码:" + str(e))
return False
|
<gh_stars>1-10
#!/usr/bin/env python
"""
Provides simple 'get()' interface for accessing default value overrides
Checks environment variables first, then chplconfig file for definitions
"""
import os
import sys
chplenv_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(chplenv_dir))
from utils import memoize
# List of Chapel Environment Variables
chplvars = [
'CHPL_HOME',
'CHPL_HOST_PLATFORM',
'CHPL_HOST_COMPILER',
'CHPL_TARGET_PLATFORM',
'CHPL_TARGET_COMPILER',
'CHPL_TARGET_ARCH',
'CHPL_LOCALE_MODEL',
'CHPL_COMM',
'CHPL_COMM_SUBSTRATE',
'CHPL_GASNET_SEGMENT',
'CHPL_TASKS',
'CHPL_LAUNCHER',
'CHPL_TIMERS',
'CHPL_UNWIND',
'CHPL_MEM',
'CHPL_MAKE',
'CHPL_ATOMICS',
'CHPL_NETWORK_ATOMICS',
'CHPL_GMP',
'CHPL_HWLOC',
'CHPL_REGEXP',
'CHPL_WIDE_POINTERS',
'CHPL_LLVM',
'CHPL_AUX_FILESYS',
]
class ChapelConfig(object):
""" Class for parsing chplconfig file and providing 'get' interface """
def __init__(self):
""" Find and parse chplconfig file, populating self.chplconfig """
# Dictionary containing all of the env vars defined in chplconfig
self.chplconfig = {}
# List of warnings to track if a warning has occurred
self.warnings = []
self.chplconfigfile = None
self.prettypath = None
# Find and define chplconfigfile and prettypath
self.find()
# Populate chplconfig
if self.chplconfigfile:
self.parse()
# Print any warnings accumulated throughout constructor
if self.warnings:
self.printwarnings()
def get(self, var):
""" Wrapper for chplconfig[var] intended to mirror os.environ.get() """
if var in self.chplconfig.keys():
return self.chplconfig[var]
return None
def find(self):
""" Find chplconfig file path"""
# Places to look for a chplconfig file, in order of priority
chpl_config = os.environ.get('CHPL_CONFIG')
home = os.path.expanduser('~')
chpl_home = os.environ.get('CHPL_HOME')
if self.chplconfig_found(chpl_config, 'CHPL_CONFIG'):
return
elif self.chplconfig_found(home, '~'):
return
elif self.chplconfig_found(chpl_home, 'CHPL_HOME'):
return
# No chplconfig file was found
self.chplconfigfile = None
return
def chplconfig_found(self, path, name):
""" Check path for chplconfig and set chplconfigfile & prettypath """
if not path:
self.chplconfigfile = None
return False
# Search for both visible and hidden files
visible = os.path.join(path, 'chplconfig')
hidden = os.path.join(path, '.chplconfig')
# Set self.chplconfigfile if it exists
if os.path.isfile(visible):
self.chplconfigfile = visible
self.prettypath = ''.join(['$', name, '/chplconfig'])
return True
elif os.path.isfile(hidden):
self.chplconfigfile = hidden
self.prettypath = ''.join(['$', name, '/.chplconfig'])
return True
else:
self.chplconfigfile = None
if name == 'CHPL_CONFIG':
self.warnings.append(
(
'Warning: No chplconfig or .chplconfig file is found in '
'the defined $CHPL_CONFIG\n'
))
return False
def parse(self):
""" Parse chplconfig file for acceptable env var overrides """
# Parse the chplconfig file and populate the chplconfig dict
with open(self.chplconfigfile, 'r') as ccfile:
for linenum, line in enumerate(ccfile.readlines()):
# Strip comments and trailing white space from line
line = line.split('#')[0].strip()
if self.skip_line(line, linenum):
continue
var, val = [f.strip() for f in line.split('=')]
self.chplconfig[var] = val
def skip_line(self, line, linenum):
"""
Check the various conditions for skipping a line, accumulate warnings.
"""
# Check if line is comment, by taking length of stripped line
if len(line) == 0:
return True
# Check for syntax errors
try:
var, val = [f.strip() for f in line.split('=')]
except ValueError:
self.warnings.append(
(
'Syntax Error: {0}:line {1}\n'
' > {2}\n'
' Expected format is:\n'
' > CHPL_VAR = VALUE\n'
).format(self.prettypath, linenum, line.strip('\n')))
return True
# Check if var is in the list of approved special variables
if var not in chplvars:
self.warnings.append(
(
'Warning: {0}:line {1}: '
'"{2}" is not an acceptable variable\n'
).format(self.prettypath, linenum, var))
return True
# Warn about duplicate entries, but don't skip, just overwrite
elif var in self.chplconfig.keys():
self.warnings.append(
(
'Warning: {0}:line {1}: '
'Duplicate entry of "{2}"\n'
).format(self.prettypath, linenum, var))
# If we reach here, this is a valid assignment, so don't skip
return False
def printwarnings(self):
""" Print any warnings accumulated throughout constructor """
sys.stderr.write('\n')
for warning in self.warnings:
sys.stderr.write(warning)
sys.stderr.write('\n')
# Global instance that contains parsed chplconfig assignments
chplconfig = ChapelConfig()
@memoize
def get(var, default=None):
""" Check if variable has a default defined somewhere and return value """
return os.environ.get(var) or chplconfig.get(var) or default
def allvars():
""" Generate overrides currently set via environment/chplconfig """
for var in [v for v in chplvars if get(v)]:
varstring = '{0}={1}'.format(var, get(var))
yield varstring
def _main():
""" Print overrides that are currently set via environment/chplconfig """
for var in allvars():
sys.stdout.write(var)
sys.stdout.write('\n')
if __name__ == '__main__':
_main()
|
<reponame>netsec/cinder<filename>cinder/volume/drivers/kaminario/kaminario_common.py<gh_stars>0
# Copyright (c) 2016 by Kaminario Technologies, Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume driver for Kaminario K2 all-flash arrays."""
import math
import re
import threading
import time
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import units
from oslo_utils import versionutils
import requests
import six
import cinder
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder import utils
from cinder.volume import configuration
from cinder.volume.drivers.san import san
from cinder.volume import utils as vol_utils
krest = importutils.try_import("krest")
K2_MIN_VERSION = '2.2.0'
K2_LOCK_PREFIX = 'Kaminario'
MAX_K2_RETRY = 5
K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER
LOG = logging.getLogger(__name__)
kaminario_opts = [
cfg.BoolOpt('auto_calc_max_oversubscription_ratio',
default=False,
help="K2 driver will calculate max_oversubscription_ratio "
"on setting this option as True."),
cfg.BoolOpt('unique_fqdn_network',
default=True,
help="Whether or not our private network has unique FQDN on "
"each initiator or not. For example networks with QA "
"systems usually have multiple servers/VMs with the same "
"FQDN. When true this will create host entries on K2 "
"using the FQDN, when false it will use the reversed "
"IQN/WWNN."),
cfg.BoolOpt('disable_discovery',
default=False,
help="Disabling iSCSI discovery (sendtargets) for multipath "
"connections on K2 driver."),
]
CONF = cfg.CONF
CONF.register_opts(kaminario_opts, group=configuration.SHARED_CONF_GROUP)
K2HTTPError = requests.exceptions.HTTPError
K2_RETRY_ERRORS = ("MC_ERR_BUSY", "MC_ERR_BUSY_SPECIFIC",
"MC_ERR_INPROGRESS", "MC_ERR_START_TIMEOUT")
class KaminarioCinderDriverException(exception.VolumeDriverException):
message = _("KaminarioCinderDriver failure: %(reason)s")
class KaminarioRetryableException(exception.VolumeDriverException):
message = _("Kaminario retryable exception: %(reason)s")
if krest:
class KrestWrap(krest.EndPoint):
def __init__(self, *args, **kwargs):
self.krestlock = threading.Lock()
super(KrestWrap, self).__init__(*args, **kwargs)
def _should_retry(self, err_code, err_msg):
if err_code == 400:
for er in K2_RETRY_ERRORS:
if er in err_msg:
LOG.debug("Retry ERROR: %d with status %s",
err_code, err_msg)
return True
return False
@utils.retry(KaminarioRetryableException,
retries=MAX_K2_RETRY)
def _request(self, method, *args, **kwargs):
try:
self.krestlock.acquire()
return super(KrestWrap, self)._request(method,
*args, **kwargs)
except K2HTTPError as err:
err_code = err.response.status_code
err_msg = err.response.text
if self._should_retry(err_code, err_msg):
raise KaminarioRetryableException(
reason=six.text_type(err_msg))
raise
finally:
self.krestlock.release()
class Replication(object):
def __init__(self, config, *args, **kwargs):
self.backend_id = config.get('backend_id')
self.login = config.get('login')
self.password = config.get('password')
self.rpo = config.get('rpo')
class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
VENDOR = "Kaminario"
stats = {}
def __init__(self, *args, **kwargs):
super(KaminarioCinderDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(san.san_opts)
self.configuration.append_config_values(kaminario_opts)
self.replica = None
self._protocol = None
k2_lock_sfx = self.configuration.safe_get('san_ip')
self.k2_lock_name = "%s-%s" % (K2_LOCK_PREFIX, k2_lock_sfx)
@staticmethod
def get_driver_options():
return kaminario_opts
@utils.trace
def check_for_setup_error(self):
if krest is None:
msg = _("Unable to import 'krest' python module.")
LOG.error(msg)
raise KaminarioCinderDriverException(reason=msg)
else:
conf = self.configuration
self.client = KrestWrap(conf.san_ip,
conf.san_login,
conf.san_password,
ssl_validate=False)
if self.replica:
self.target = KrestWrap(self.replica.backend_id,
self.replica.login,
self.replica.password,
ssl_validate=False)
v_rs = self.client.search("system/state")
if hasattr(v_rs, 'hits') and v_rs.total != 0:
ver = v_rs.hits[0].rest_api_version
ver_exist = versionutils.convert_version_to_int(ver)
ver_min = versionutils.convert_version_to_int(K2_MIN_VERSION)
if ver_exist < ver_min:
msg = _("K2 REST API version should be "
">= %s.") % K2_MIN_VERSION
LOG.error(msg)
raise KaminarioCinderDriverException(reason=msg)
else:
msg = _("K2 REST API version search failed.")
LOG.error(msg)
raise KaminarioCinderDriverException(reason=msg)
def _check_ops(self):
"""Ensure that the options we care about are set."""
required_ops = ['san_ip', 'san_login', 'san_password']
for attr in required_ops:
if not getattr(self.configuration, attr, None):
raise exception.InvalidInput(reason=_('%s is not set.') % attr)
replica = self.configuration.safe_get('replication_device')
if replica and isinstance(replica, list):
replica_ops = ['backend_id', 'login', 'password', '<PASSWORD>']
for attr in replica_ops:
if attr not in replica[0]:
msg = _('replication_device %s is not set.') % attr
raise exception.InvalidInput(reason=msg)
self.replica = Replication(replica[0])
@utils.trace
def do_setup(self, context):
super(KaminarioCinderDriver, self).do_setup(context)
self._check_ops()
@utils.trace
def create_volume(self, volume):
"""Volume creation in K2 needs a volume group.
- create a volume group
- create a volume in the volume group
"""
vg_name = self.get_volume_group_name(volume.id)
vol_name = self.get_volume_name(volume.id)
prov_type = self._get_is_dedup(volume.get('volume_type'))
try:
LOG.debug("Creating volume group with name: %(name)s, "
"quota: unlimited and dedup_support: %(dedup)s",
{'name': vg_name, 'dedup': prov_type})
vg = self.client.new("volume_groups", name=vg_name, quota=0,
is_dedup=prov_type).save()
LOG.debug("Creating volume with name: %(name)s, size: %(size)s "
"GB, volume_group: %(vg)s",
{'name': vol_name, 'size': volume.size, 'vg': vg_name})
vol = self.client.new("volumes", name=vol_name,
size=volume.size * units.Mi,
volume_group=vg).save()
except Exception as ex:
vg_rs = self.client.search("volume_groups", name=vg_name)
if vg_rs.total != 0:
LOG.debug("Deleting vg: %s for failed volume in K2.", vg_name)
vg_rs.hits[0].delete()
LOG.exception("Creation of volume %s failed.", vol_name)
raise KaminarioCinderDriverException(reason=ex)
if self._get_is_replica(volume.volume_type) and self.replica:
self._create_volume_replica(volume, vg, vol, self.replica.rpo)
@utils.trace
def _create_volume_replica(self, volume, vg, vol, rpo):
"""Volume replica creation in K2 needs session and remote volume.
- create a session
- create a volume in the volume group
"""
session_name = self.get_session_name(volume.id)
rsession_name = self.get_rep_name(session_name)
rvg_name = self.get_rep_name(vg.name)
rvol_name = self.get_rep_name(vol.name)
k2peer_rs = self.client.search("replication/peer_k2arrays",
mgmt_host=self.replica.backend_id)
if hasattr(k2peer_rs, 'hits') and k2peer_rs.total != 0:
k2peer = k2peer_rs.hits[0]
else:
msg = _("Unable to find K2peer in source K2:")
LOG.error(msg)
raise KaminarioCinderDriverException(reason=msg)
try:
LOG.debug("Creating source session with name: %(sname)s and "
" target session name: %(tname)s",
{'sname': session_name, 'tname': rsession_name})
src_ssn = self.client.new("replication/sessions")
src_ssn.replication_peer_k2array = k2peer
src_ssn.auto_configure_peer_volumes = "False"
src_ssn.local_volume_group = vg
src_ssn.replication_peer_volume_group_name = rvg_name
src_ssn.remote_replication_session_name = rsession_name
src_ssn.name = session_name
src_ssn.rpo = rpo
src_ssn.save()
LOG.debug("Creating remote volume with name: %s",
rvol_name)
self.client.new("replication/peer_volumes",
local_volume=vol,
name=rvol_name,
replication_session=src_ssn).save()
src_ssn.state = "in_sync"
src_ssn.save()
except Exception as ex:
LOG.exception("Replication for the volume %s has "
"failed.", vol.name)
self._delete_by_ref(self.client, "replication/sessions",
session_name, 'session')
self._delete_by_ref(self.target, "replication/sessions",
rsession_name, 'remote session')
self._delete_by_ref(self.target, "volumes",
rvol_name, 'remote volume')
self._delete_by_ref(self.client, "volumes", vol.name, "volume")
self._delete_by_ref(self.target, "volume_groups",
rvg_name, "remote vg")
self._delete_by_ref(self.client, "volume_groups", vg.name, "vg")
raise KaminarioCinderDriverException(reason=ex)
@utils.trace
def _create_failover_volume_replica(self, volume, vg_name, vol_name):
"""Volume replica creation in K2 needs session and remote volume.
- create a session
- create a volume in the volume group
"""
session_name = self.get_session_name(volume.id)
rsession_name = self.get_rep_name(session_name)
rvg_name = self.get_rep_name(vg_name)
rvol_name = self.get_rep_name(vol_name)
rvg = self.target.search("volume_groups", name=rvg_name).hits[0]
rvol = self.target.search("volumes", name=rvol_name).hits[0]
k2peer_rs = self.target.search("replication/peer_k2arrays",
mgmt_host=self.configuration.san_ip)
if hasattr(k2peer_rs, 'hits') and k2peer_rs.total != 0:
k2peer = k2peer_rs.hits[0]
else:
msg = _("Unable to find K2peer in source K2:")
LOG.error(msg)
raise KaminarioCinderDriverException(reason=msg)
try:
LOG.debug("Creating source session with name: %(sname)s and "
" target session name: %(tname)s",
{'sname': rsession_name, 'tname': session_name})
tgt_ssn = self.target.new("replication/sessions")
tgt_ssn.replication_peer_k2array = k2peer
tgt_ssn.auto_configure_peer_volumes = "False"
tgt_ssn.local_volume_group = rvg
tgt_ssn.replication_peer_volume_group_name = vg_name
tgt_ssn.remote_replication_session_name = session_name
tgt_ssn.name = rsession_name
tgt_ssn.rpo = self.replica.rpo
tgt_ssn.save()
LOG.debug("Creating remote volume with name: %s",
rvol_name)
self.target.new("replication/peer_volumes",
local_volume=rvol,
name=vol_name,
replication_session=tgt_ssn).save()
tgt_ssn.state = "in_sync"
tgt_ssn.save()
except Exception as ex:
LOG.exception("Replication for the volume %s has "
"failed.", rvol_name)
self._delete_by_ref(self.target, "replication/sessions",
rsession_name, 'session')
self._delete_by_ref(self.client, "replication/sessions",
session_name, 'remote session')
self._delete_by_ref(self.client, "volumes", vol_name, "volume")
self._delete_by_ref(self.client, "volume_groups", vg_name, "vg")
raise KaminarioCinderDriverException(reason=ex)
@utils.trace
def _delete_by_ref(self, device, url, name, msg):
rs = device.search(url, name=name)
for result in rs.hits:
result.delete()
LOG.debug("Deleting %(msg)s: %(name)s", {'msg': msg, 'name': name})
@utils.trace
def _failover_volume(self, volume):
"""Promoting a secondary volume to primary volume."""
session_name = self.get_session_name(volume.id)
rsession_name = self.get_rep_name(session_name)
tgt_ssn = self.target.search("replication/sessions",
name=rsession_name).hits[0]
if tgt_ssn.state == 'in_sync':
tgt_ssn.state = 'failed_over'
tgt_ssn.save()
LOG.debug("The target session: %s state is "
"changed to failed_over ", rsession_name)
@utils.trace
def failover_host(self, context, volumes, secondary_id=None, groups=None):
"""Failover to replication target."""
volume_updates = []
back_end_ip = None
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(context, svc_host,
'cinder-volume')
if secondary_id and secondary_id != self.replica.backend_id:
LOG.error("Kaminario driver received failover_host "
"request, But backend is non replicated device")
raise exception.UnableToFailOver(reason=_("Failover requested "
"on non replicated "
"backend."))
if (service.active_backend_id and
service.active_backend_id != self.configuration.san_ip):
self.snap_updates = []
rep_volumes = []
# update status for non-replicated primary volumes
for v in volumes:
vol_name = self.get_volume_name(v['id'])
vol = self.client.search("volumes", name=vol_name)
if v.replication_status != K2_REP_FAILED_OVER and vol.total:
status = 'available'
if v.volume_attachment:
map_rs = self.client.search("mappings",
volume=vol.hits[0])
status = 'in-use'
if map_rs.total:
map_rs.hits[0].delete()
volume_updates.append({'volume_id': v['id'],
'updates':
{'status': status}})
else:
rep_volumes.append(v)
# In-sync from secondaray array to primary array
for v in rep_volumes:
vol_name = self.get_volume_name(v['id'])
vol = self.client.search("volumes", name=vol_name)
rvol_name = self.get_rep_name(vol_name)
rvol = self.target.search("volumes", name=rvol_name)
session_name = self.get_session_name(v['id'])
rsession_name = self.get_rep_name(session_name)
ssn = self.target.search("replication/sessions",
name=rsession_name)
if ssn.total:
tgt_ssn = ssn.hits[0]
ssn = self.client.search("replication/sessions",
name=session_name)
if ssn.total:
src_ssn = ssn.hits[0]
if (tgt_ssn.state == 'failed_over' and
tgt_ssn.current_role == 'target' and vol.total and src_ssn):
map_rs = self.client.search("mappings", volume=vol.hits[0])
if map_rs.total:
map_rs.hits[0].delete()
tgt_ssn.state = 'in_sync'
tgt_ssn.save()
self._check_for_status(src_ssn, 'in_sync')
if (rvol.total and src_ssn.state == 'in_sync' and
src_ssn.current_role == 'target'):
gen_no = self._create_volume_replica_user_snap(self.target,
tgt_ssn)
self.snap_updates.append({'tgt_ssn': tgt_ssn,
'gno': gen_no,
'stime': time.time()})
LOG.debug("The target session: %s state is "
"changed to in sync", rsession_name)
self._is_user_snap_sync_finished()
# Delete secondary volume mappings and create snapshot
for v in rep_volumes:
vol_name = self.get_volume_name(v['id'])
vol = self.client.search("volumes", name=vol_name)
rvol_name = self.get_rep_name(vol_name)
rvol = self.target.search("volumes", name=rvol_name)
session_name = self.get_session_name(v['id'])
rsession_name = self.get_rep_name(session_name)
ssn = self.target.search("replication/sessions",
name=rsession_name)
if ssn.total:
tgt_ssn = ssn.hits[0]
ssn = self.client.search("replication/sessions",
name=session_name)
if ssn.total:
src_ssn = ssn.hits[0]
if (rvol.total and src_ssn.state == 'in_sync' and
src_ssn.current_role == 'target'):
map_rs = self.target.search("mappings",
volume=rvol.hits[0])
if map_rs.total:
map_rs.hits[0].delete()
gen_no = self._create_volume_replica_user_snap(self.target,
tgt_ssn)
self.snap_updates.append({'tgt_ssn': tgt_ssn,
'gno': gen_no,
'stime': time.time()})
self._is_user_snap_sync_finished()
# changing source sessions to failed-over
for v in rep_volumes:
vol_name = self.get_volume_name(v['id'])
vol = self.client.search("volumes", name=vol_name)
rvol_name = self.get_rep_name(vol_name)
rvol = self.target.search("volumes", name=rvol_name)
session_name = self.get_session_name(v['id'])
rsession_name = self.get_rep_name(session_name)
ssn = self.target.search("replication/sessions",
name=rsession_name)
if ssn.total:
tgt_ssn = ssn.hits[0]
ssn = self.client.search("replication/sessions",
name=session_name)
if ssn.total:
src_ssn = ssn.hits[0]
if (rvol.total and src_ssn.state == 'in_sync' and
src_ssn.current_role == 'target'):
src_ssn.state = 'failed_over'
src_ssn.save()
self._check_for_status(tgt_ssn, 'suspended')
LOG.debug("The target session: %s state is "
"changed to failed over", session_name)
src_ssn.state = 'in_sync'
src_ssn.save()
LOG.debug("The target session: %s state is "
"changed to in sync", session_name)
rep_status = fields.ReplicationStatus.DISABLED
volume_updates.append({'volume_id': v['id'],
'updates':
{'replication_status': rep_status}})
back_end_ip = self.configuration.san_ip
else:
"""Failover to replication target."""
for v in volumes:
vol_name = self.get_volume_name(v['id'])
rv = self.get_rep_name(vol_name)
if self.target.search("volumes", name=rv).total:
self._failover_volume(v)
volume_updates.append(
{'volume_id': v['id'],
'updates':
{'replication_status': K2_REP_FAILED_OVER}})
else:
volume_updates.append({'volume_id': v['id'],
'updates': {'status': 'error', }})
back_end_ip = self.replica.backend_id
return back_end_ip, volume_updates, []
@utils.trace
def _create_volume_replica_user_snap(self, k2, sess):
snap = k2.new("snapshots")
snap.is_application_consistent = "False"
snap.replication_session = sess
snap.save()
return snap.generation_number
def _is_user_snap_sync_finished(self):
# waiting for user snapshot to be synced
while len(self.snap_updates) > 0:
for l in self.snap_updates:
sess = l.get('tgt_ssn')
gno = l.get('gno')
stime = l.get('stime')
sess.refresh()
if (sess.generation_number == gno and
sess.current_snapshot_progress == 100
and sess.current_snapshot_id is None):
if time.time() - stime > 300:
gen_no = self._create_volume_replica_user_snap(
self.target,
sess)
self.snap_updates.append({'tgt_ssn': sess,
'gno': gen_no,
'stime': time.time()})
self.snap_updates.remove(l)
eventlet.sleep(1)
@utils.trace
def create_volume_from_snapshot(self, volume, snapshot):
"""Create volume from snapshot.
- search for snapshot and retention_policy
- create a view from snapshot and attach view
- create a volume and attach volume
- copy data from attached view to attached volume
- detach volume and view and finally delete view
"""
snap_name = self.get_snap_name(snapshot.id)
view_name = self.get_view_name(volume.id)
vol_name = self.get_volume_name(volume.id)
cview = src_attach_info = dest_attach_info = None
rpolicy = self.get_policy()
properties = utils.brick_get_connector_properties()
LOG.debug("Searching for snapshot: %s in K2.", snap_name)
snap_rs = self.client.search("snapshots", short_name=snap_name)
if hasattr(snap_rs, 'hits') and snap_rs.total != 0:
snap = snap_rs.hits[0]
LOG.debug("Creating a view: %(view)s from snapshot: %(snap)s",
{'view': view_name, 'snap': snap_name})
try:
cview = self.client.new("snapshots",
short_name=view_name,
source=snap, retention_policy=rpolicy,
is_exposable=True).save()
except Exception as ex:
LOG.exception("Creating a view: %(view)s from snapshot: "
"%(snap)s failed", {"view": view_name,
"snap": snap_name})
raise KaminarioCinderDriverException(reason=ex)
else:
msg = _("Snapshot: %s search failed in K2.") % snap_name
LOG.error(msg)
raise KaminarioCinderDriverException(reason=msg)
try:
conn = self.initialize_connection(cview, properties)
src_attach_info = self._connect_device(conn)
self.create_volume(volume)
conn = self.initialize_connection(volume, properties)
dest_attach_info = self._connect_device(conn)
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
snapshot.volume.size * units.Ki,
self.configuration.volume_dd_blocksize,
sparse=True)
self._kaminario_disconnect_volume(src_attach_info,
dest_attach_info)
self.terminate_connection(volume, properties)
self.terminate_connection(cview, properties)
cview.delete()
except Exception as ex:
self._kaminario_disconnect_volume(src_attach_info,
dest_attach_info)
self.terminate_connection(cview, properties)
self.terminate_connection(volume, properties)
cview.delete()
self.delete_volume(volume)
LOG.exception("Copy to volume: %(vol)s from view: %(view)s "
"failed", {"vol": vol_name, "view": view_name})
raise KaminarioCinderDriverException(reason=ex)
@utils.trace
def create_cloned_volume(self, volume, src_vref):
"""Create a clone from source volume.
- attach source volume
- create and attach new volume
- copy data from attached source volume to attached new volume
- detach both volumes
"""
clone_name = self.get_volume_name(volume.id)
src_name = self.get_volume_name(src_vref.id)
src_vol = self.client.search("volumes", name=src_name)
src_map = self.client.search("mappings", volume=src_vol)
src_attach_info = dest_attach_info = None
if src_map.total != 0:
msg = _("K2 driver does not support clone of an attached volume. "
"To get this done, create a snapshot from the attached "
"volume and then create a volume from the snapshot.")
LOG.error(msg)
raise KaminarioCinderDriverException(reason=msg)
try:
properties = utils.brick_get_connector_properties()
conn = self.initialize_connection(src_vref, properties)
src_attach_info = self._connect_device(conn)
self.create_volume(volume)
conn = self.initialize_connection(volume, properties)
dest_attach_info = self._connect_device(conn)
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
src_vref.size * units.Ki,
self.configuration.volume_dd_blocksize,
sparse=True)
self._kaminario_disconnect_volume(src_attach_info,
dest_attach_info)
self.terminate_connection(volume, properties)
self.terminate_connection(src_vref, properties)
except Exception as ex:
self._kaminario_disconnect_volume(src_attach_info,
dest_attach_info)
self.terminate_connection(src_vref, properties)
self.terminate_connection(volume, properties)
self.delete_volume(volume)
LOG.exception("Create a clone: %s failed.", clone_name)
raise KaminarioCinderDriverException(reason=ex)
@utils.trace
def delete_volume(self, volume):
"""Volume in K2 exists in a volume group.
- delete the volume
- delete the corresponding volume group
"""
vg_name = self.get_volume_group_name(volume.id)
vol_name = self.get_volume_name(volume.id)
try:
if self._get_is_replica(volume.volume_type) and self.replica:
self._delete_volume_replica(volume, vg_name, vol_name)
LOG.debug("Searching and deleting volume: %s in K2.", vol_name)
vol_rs = self.client.search("volumes", name=vol_name)
if vol_rs.total != 0:
vol_rs.hits[0].delete()
LOG.debug("Searching and deleting vg: %s in K2.", vg_name)
vg_rs = self.client.search("volume_groups", name=vg_name)
if vg_rs.total != 0:
vg_rs.hits[0].delete()
except Exception as ex:
LOG.exception("Deletion of volume %s failed.", vol_name)
raise KaminarioCinderDriverException(reason=ex)
@utils.trace
def _delete_volume_replica(self, volume, vg_name, vol_name):
rvg_name = self.get_rep_name(vg_name)
rvol_name = self.get_rep_name(vol_name)
session_name = self.get_session_name(volume.id)
rsession_name = self.get_rep_name(session_name)
src_ssn = self.client.search('replication/sessions',
name=session_name).hits[0]
tgt_ssn = self.target.search('replication/sessions',
name=rsession_name).hits[0]
src_ssn.state = 'suspended'
src_ssn.save()
self._check_for_status(tgt_ssn, 'suspended')
src_ssn.state = 'idle'
src_ssn.save()
self._check_for_status(tgt_ssn, 'idle')
tgt_ssn.delete()
src_ssn.delete()
LOG.debug("Searching and deleting snapshots for volume groups:"
"%(vg1)s, %(vg2)s in K2.", {'vg1': vg_name, 'vg2': rvg_name})
vg = self.client.search('volume_groups', name=vg_name).hits
rvg = self.target.search('volume_groups', name=rvg_name).hits
snaps = self.client.search('snapshots', volume_group=vg).hits
for s in snaps:
s.delete()
rsnaps = self.target.search('snapshots', volume_group=rvg).hits
for s in rsnaps:
s.delete()
self._delete_by_ref(self.target, "volumes", rvol_name, 'remote volume')
self._delete_by_ref(self.target, "volume_groups",
rvg_name, "remote vg")
@utils.trace
def _delete_failover_volume_replica(self, volume, vg_name, vol_name):
rvg_name = self.get_rep_name(vg_name)
rvol_name = self.get_rep_name(vol_name)
session_name = self.get_session_name(volume.id)
rsession_name = self.get_rep_name(session_name)
tgt_ssn = self.target.search('replication/sessions',
name=rsession_name).hits[0]
tgt_ssn.state = 'idle'
tgt_ssn.save()
tgt_ssn.delete()
LOG.debug("Searching and deleting snapshots for target volume group "
"and target volume: %(vol)s, %(vg)s in K2.",
{'vol': rvol_name, 'vg': rvg_name})
rvg = self.target.search('volume_groups', name=rvg_name).hits
rsnaps = self.target.search('snapshots', volume_group=rvg).hits
for s in rsnaps:
s.delete()
def _check_for_status(self, obj, status):
while obj.state != status:
obj.refresh()
eventlet.sleep(1)
@utils.trace
def get_volume_stats(self, refresh=False):
if refresh:
self.update_volume_stats()
stats = self.stats
stats['storage_protocol'] = self._protocol
stats['driver_version'] = self.VERSION
stats['vendor_name'] = self.VENDOR
backend_name = self.configuration.safe_get('volume_backend_name')
stats['volume_backend_name'] = (backend_name or
self.__class__.__name__)
return stats
def create_export(self, context, volume, connector):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
@utils.trace
def create_snapshot(self, snapshot):
"""Create a snapshot from a volume_group."""
vg_name = self.get_volume_group_name(snapshot.volume_id)
snap_name = self.get_snap_name(snapshot.id)
rpolicy = self.get_policy()
try:
LOG.debug("Searching volume_group: %s in K2.", vg_name)
vg = self.client.search("volume_groups", name=vg_name).hits[0]
LOG.debug("Creating a snapshot: %(snap)s from vg: %(vg)s",
{'snap': snap_name, 'vg': vg_name})
self.client.new("snapshots", short_name=snap_name,
source=vg, retention_policy=rpolicy,
is_auto_deleteable=False).save()
except Exception as ex:
LOG.exception("Creation of snapshot: %s failed.", snap_name)
raise KaminarioCinderDriverException(reason=ex)
@utils.trace
def delete_snapshot(self, snapshot):
"""Delete a snapshot."""
snap_name = self.get_snap_name(snapshot.id)
try:
LOG.debug("Searching and deleting snapshot: %s in K2.", snap_name)
snap_rs = self.client.search("snapshots", short_name=snap_name)
if snap_rs.total != 0:
snap_rs.hits[0].delete()
except Exception as ex:
LOG.exception("Deletion of snapshot: %s failed.", snap_name)
raise KaminarioCinderDriverException(reason=ex)
@utils.trace
def extend_volume(self, volume, new_size):
"""Extend volume."""
vol_name = self.get_volume_name(volume.id)
try:
LOG.debug("Searching volume: %s in K2.", vol_name)
vol = self.client.search("volumes", name=vol_name).hits[0]
vol.size = new_size * units.Mi
LOG.debug("Extending volume: %s in K2.", vol_name)
vol.save()
except Exception as ex:
LOG.exception("Extending volume: %s failed.", vol_name)
raise KaminarioCinderDriverException(reason=ex)
def update_volume_stats(self):
conf = self.configuration
LOG.debug("Searching system capacity in K2.")
cap = self.client.search("system/capacity").hits[0]
LOG.debug("Searching total volumes in K2 for updating stats.")
total_volumes = self.client.search("volumes").total - 1
provisioned_vol = cap.provisioned_volumes
if (conf.auto_calc_max_oversubscription_ratio and cap.provisioned
and (cap.total - cap.free) != 0):
ratio = provisioned_vol / float(cap.total - cap.free)
else:
ratio = vol_utils.get_max_over_subscription_ratio(
conf.max_over_subscription_ratio, supports_auto=True)
self.stats = {'QoS_support': False,
'free_capacity_gb': cap.free / units.Mi,
'total_capacity_gb': cap.total / units.Mi,
'thin_provisioning_support': True,
'sparse_copy_volume': True,
'total_volumes': total_volumes,
'thick_provisioning_support': False,
'provisioned_capacity_gb': provisioned_vol / units.Mi,
'max_over_subscription_ratio': ratio,
'kaminario:thin_prov_type': 'dedup/nodedup',
'replication_enabled': True,
'kaminario:replication': True}
def get_initiator_host_name(self, connector):
"""Return the initiator host name or unique ID.
Unique ID when configuration's unique_fqdn_network is false will be
the reversed IQN/WWPNS.
Valid characters: 0-9, a-z, A-Z, '-', '_'
All other characters are replaced with '_'.
Total characters in initiator host name: 32
"""
name = connector.get('initiator',
connector.get('wwnns', [''])[0])[::-1]
if self.configuration.unique_fqdn_network:
name = connector.get('host', name)
return re.sub('[^0-9a-zA-Z-_]', '_', name[:32])
def get_volume_group_name(self, vid):
"""Return the volume group name."""
return "cvg-{0}".format(vid)
def get_volume_name(self, vid):
"""Return the volume name."""
return "cv-{0}".format(vid)
def get_session_name(self, vid):
"""Return the volume name."""
return "ssn-{0}".format(vid)
def get_snap_name(self, sid):
"""Return the snapshot name."""
return "cs-{0}".format(sid)
def get_view_name(self, vid):
"""Return the view name."""
return "cview-{0}".format(vid)
def get_rep_name(self, name):
"""Return the corresponding replication names."""
return "r{0}".format(name)
@utils.trace
def _delete_host_by_name(self, name):
"""Deleting host by name."""
host_rs = self.client.search("hosts", name=name)
if hasattr(host_rs, "hits") and host_rs.total != 0:
host = host_rs.hits[0]
host.delete()
def get_policy(self):
"""Return the retention policy."""
try:
LOG.debug("Searching for retention_policy in K2.")
return self.client.search("retention_policies",
name="Best_Effort_Retention").hits[0]
except Exception as ex:
LOG.exception("Retention policy search failed in K2.")
raise KaminarioCinderDriverException(reason=ex)
def _get_volume_object(self, volume):
vol_name = self.get_volume_name(volume.id)
if volume.replication_status == K2_REP_FAILED_OVER:
vol_name = self.get_rep_name(vol_name)
LOG.debug("Searching volume : %s in K2.", vol_name)
vol_rs = self.client.search("volumes", name=vol_name)
if not hasattr(vol_rs, 'hits') or vol_rs.total == 0:
msg = _("Unable to find volume: %s from K2.") % vol_name
LOG.error(msg)
raise KaminarioCinderDriverException(reason=msg)
return vol_rs.hits[0]
def _get_lun_number(self, vol, host):
volsnap = None
LOG.debug("Searching volsnaps in K2.")
volsnap_rs = self.client.search("volsnaps", snapshot=vol)
if hasattr(volsnap_rs, 'hits') and volsnap_rs.total != 0:
volsnap = volsnap_rs.hits[0]
LOG.debug("Searching mapping of volsnap in K2.")
map_rs = self.client.search("mappings", volume=volsnap, host=host)
return map_rs.hits[0].lun
def initialize_connection(self, volume, connector):
pass
@utils.trace
def terminate_connection(self, volume, connector):
"""Terminate connection of volume from host."""
# Get volume object
if type(volume).__name__ != 'RestObject':
vol_name = self.get_volume_name(volume.id)
if volume.replication_status == K2_REP_FAILED_OVER:
vol_name = self.get_rep_name(vol_name)
LOG.debug("Searching volume: %s in K2.", vol_name)
volume_rs = self.client.search("volumes", name=vol_name)
if hasattr(volume_rs, "hits") and volume_rs.total != 0:
volume = volume_rs.hits[0]
else:
vol_name = volume.name
host_name = ""
if connector is None:
vol_map_rs = self.client.search("mappings", {"volume": volume})
if hasattr(vol_map_rs, "hits") and vol_map_rs.total != 0:
host_name = vol_map_rs.hits[0].host.name
else:
# Get host object.
host_name = self.get_initiator_host_name(connector)
host_rs = self.client.search("hosts", name=host_name)
if hasattr(host_rs, "hits") and host_rs.total != 0 and volume:
host = host_rs.hits[0]
LOG.debug("Searching and deleting mapping of volume: %(name)s to "
"host: %(host)s", {'host': host_name, 'name': vol_name})
map_rs = self.client.search("mappings", volume=volume, host=host)
if hasattr(map_rs, "hits") and map_rs.total != 0:
map_rs.hits[0].delete()
if self.client.search("mappings", host=host).total == 0:
LOG.debug("Deleting initiator hostname: %s in K2.", host_name)
host.delete()
else:
LOG.warning("Host: %s not found on K2.", host_name)
@utils.trace
def k2_initialize_connection(self, volume, connector):
# Get volume object.
if type(volume).__name__ != 'RestObject':
vol = self._get_volume_object(volume)
else:
vol = volume
# Get host object.
host, host_rs, host_name = self._get_host_object(connector)
try:
# Map volume object to host object.
LOG.debug("Mapping volume: %(vol)s to host: %(host)s",
{'host': host_name, 'vol': vol.name})
mapping = self.client.new("mappings", volume=vol, host=host).save()
except Exception as ex:
if host_rs.total == 0:
self._delete_host_by_name(host_name)
LOG.exception("Unable to map volume: %(vol)s to host: "
"%(host)s", {'host': host_name,
'vol': vol.name})
raise KaminarioCinderDriverException(reason=ex)
# Get lun number.
if type(volume).__name__ == 'RestObject':
return self._get_lun_number(vol, host)
else:
return mapping.lun
def _get_host_object(self, connector):
pass
def _get_is_dedup(self, vol_type):
if vol_type:
specs_val = vol_type.get('extra_specs', {}).get(
'kaminario:thin_prov_type')
if specs_val == 'nodedup':
return False
else:
return True
else:
return True
def _get_is_replica(self, vol_type):
replica = False
if vol_type and vol_type.get('extra_specs'):
specs = vol_type.get('extra_specs')
if (specs.get('kaminario:replication') == 'enabled' and
self.replica):
replica = True
return replica
def _get_replica_status(self, vg_name):
vg_rs = self.client.search("volume_groups", name=vg_name)
if vg_rs.total:
vg = vg_rs.hits[0]
if self.client.search("replication/sessions",
local_volume_group=vg).total:
return True
return False
@utils.trace
def manage_existing(self, volume, existing_ref):
vol_name = existing_ref['source-name']
new_name = self.get_volume_name(volume.id)
vg_new_name = self.get_volume_group_name(volume.id)
vg_name = None
is_dedup = self._get_is_dedup(volume.get('volume_type'))
reason = None
try:
LOG.debug("Searching volume: %s in K2.", vol_name)
vol = self.client.search("volumes", name=vol_name).hits[0]
vg = vol.volume_group
nvol = self.client.search("volumes", volume_group=vg).total
vg_replica = self._get_replica_status(vg.name)
vol_map = False
if self.client.search("mappings", volume=vol).total != 0:
vol_map = True
if is_dedup != vg.is_dedup:
reason = 'dedup type mismatch for K2 volume group.'
elif vg_replica:
reason = 'replication enabled K2 volume group.'
elif vol_map:
reason = 'attached K2 volume.'
elif nvol != 1:
reason = 'multiple volumes in K2 volume group.'
if reason:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=_('Unable to manage K2 volume due to: %s') % reason)
vol.name = new_name
vg_name = vg.name
LOG.debug("Manage new volume name: %s", new_name)
vg.name = vg_new_name
LOG.debug("Manage volume group name: %s", vg_new_name)
vg.save()
LOG.debug("Manage volume: %s in K2.", vol_name)
vol.save()
except exception.ManageExistingInvalidReference:
LOG.exception("manage volume: %s failed.", vol_name)
raise
except Exception:
LOG.exception("manage volume: %s failed.", vol_name)
vg_rs = self.client.search("volume_groups", name=vg_new_name)
if hasattr(vg_rs, 'hits') and vg_rs.total != 0:
vg = vg_rs.hits[0]
if vg_name and vg.name == vg_new_name:
vg.name = vg_name
LOG.debug("Updating vg new name to old name: %s ", vg_name)
vg.save()
raise
@utils.trace
def manage_existing_get_size(self, volume, existing_ref):
vol_name = existing_ref['source-name']
v_rs = self.client.search("volumes", name=vol_name)
if hasattr(v_rs, 'hits') and v_rs.total != 0:
vol = v_rs.hits[0]
size = vol.size / units.Mi
return math.ceil(size)
else:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=_('Unable to get size of manage volume.'))
@utils.trace
def after_volume_copy(self, ctxt, volume, new_volume, remote=None):
self.delete_volume(volume)
vg_name_old = self.get_volume_group_name(volume.id)
vol_name_old = self.get_volume_name(volume.id)
vg_name_new = self.get_volume_group_name(new_volume.id)
vol_name_new = self.get_volume_name(new_volume.id)
vg_new = self.client.search("volume_groups", name=vg_name_new).hits[0]
vg_new.name = vg_name_old
vg_new.save()
vol_new = self.client.search("volumes", name=vol_name_new).hits[0]
vol_new.name = vol_name_old
vol_new.save()
@utils.trace
def retype(self, ctxt, volume, new_type, diff, host):
old_type = volume.get('volume_type')
vg_name = self.get_volume_group_name(volume.id)
vol_name = self.get_volume_name(volume.id)
vol_rs = self.client.search("volumes", name=vol_name)
if vol_rs.total:
vol = vol_rs.hits[0]
vmap = self.client.search("mappings", volume=vol).total
old_rep_type = self._get_replica_status(vg_name)
new_rep_type = self._get_is_replica(new_type)
new_prov_type = self._get_is_dedup(new_type)
old_prov_type = self._get_is_dedup(old_type)
# Change dedup<->nodedup with add/remove replication is complex in K2
# since K2 does not have api to change dedup<->nodedup.
if new_prov_type == old_prov_type:
if not old_rep_type and new_rep_type:
self._add_replication(volume)
return True
elif old_rep_type and not new_rep_type:
self._delete_replication(volume)
return True
elif not new_rep_type and not old_rep_type:
msg = ("Use '--migration-policy on-demand' to change 'dedup "
"without replication'<->'nodedup without replication'.")
if vol_rs.total and vmap:
msg = "Unattach volume and {0}".format(msg)
LOG.debug(msg)
return False
else:
LOG.error('Change from type1: %(type1)s to type2: %(type2)s '
'is not supported directly in K2.',
{'type1': old_type, 'type2': new_type})
return False
def _add_replication(self, volume):
vg_name = self.get_volume_group_name(volume.id)
vol_name = self.get_volume_name(volume.id)
if volume.replication_status == K2_REP_FAILED_OVER:
self._create_failover_volume_replica(volume, vg_name, vol_name)
else:
LOG.debug("Searching volume group with name: %(name)s",
{'name': vg_name})
vg = self.client.search("volume_groups", name=vg_name).hits[0]
LOG.debug("Searching volume with name: %(name)s",
{'name': vol_name})
vol = self.client.search("volumes", name=vol_name).hits[0]
self._create_volume_replica(volume, vg, vol, self.replica.rpo)
def _delete_replication(self, volume):
vg_name = self.get_volume_group_name(volume.id)
vol_name = self.get_volume_name(volume.id)
if volume.replication_status == K2_REP_FAILED_OVER:
self._delete_failover_volume_replica(volume, vg_name, vol_name)
else:
self._delete_volume_replica(volume, vg_name, vol_name)
def _kaminario_disconnect_volume(self, *attach_info):
for info in attach_info:
if (info and info.get('connector') and
info.get('conn', {}).get('data') and info.get('device')):
info['connector'].disconnect_volume(info['conn']['data'],
info['device'])
|
<reponame>dnguyen800/home-assistant
"""Support for ZHA covers."""
from datetime import timedelta
import functools
import logging
from zigpy.zcl.foundation import Status
from homeassistant.components.cover import ATTR_POSITION, DOMAIN, CoverDevice
from homeassistant.const import STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .core.const import (
CHANNEL_COVER,
DATA_ZHA,
DATA_ZHA_DISPATCHERS,
SIGNAL_ATTR_UPDATED,
ZHA_DISCOVERY_NEW,
)
from .core.registries import ZHA_ENTITIES
from .entity import ZhaEntity
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(minutes=60)
STRICT_MATCH = functools.partial(ZHA_ENTITIES.strict_match, DOMAIN)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Zigbee Home Automation cover from config entry."""
async def async_discover(discovery_info):
await _async_setup_entities(
hass, config_entry, async_add_entities, [discovery_info]
)
unsub = async_dispatcher_connect(
hass, ZHA_DISCOVERY_NEW.format(DOMAIN), async_discover
)
hass.data[DATA_ZHA][DATA_ZHA_DISPATCHERS].append(unsub)
covers = hass.data.get(DATA_ZHA, {}).get(DOMAIN)
if covers is not None:
await _async_setup_entities(
hass, config_entry, async_add_entities, covers.values()
)
del hass.data[DATA_ZHA][DOMAIN]
async def _async_setup_entities(
hass, config_entry, async_add_entities, discovery_infos
):
"""Set up the ZHA covers."""
entities = []
for discovery_info in discovery_infos:
zha_dev = discovery_info["zha_device"]
channels = discovery_info["channels"]
entity = ZHA_ENTITIES.get_entity(DOMAIN, zha_dev, channels, ZhaCover)
if entity:
entities.append(entity(**discovery_info))
if entities:
async_add_entities(entities, update_before_add=True)
@STRICT_MATCH(channel_names=CHANNEL_COVER)
class ZhaCover(ZhaEntity, CoverDevice):
"""Representation of a ZHA cover."""
def __init__(self, unique_id, zha_device, channels, **kwargs):
"""Init this sensor."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._cover_channel = self.cluster_channels.get(CHANNEL_COVER)
self._current_position = None
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
await self.async_accept_signal(
self._cover_channel, SIGNAL_ATTR_UPDATED, self.async_set_position
)
@callback
def async_restore_last_state(self, last_state):
"""Restore previous state."""
self._state = last_state.state
if "current_position" in last_state.attributes:
self._current_position = last_state.attributes["current_position"]
@property
def is_closed(self):
"""Return if the cover is closed."""
if self.current_cover_position is None:
return None
return self.current_cover_position == 0
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self._state == STATE_OPENING
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self._state == STATE_CLOSING
@property
def current_cover_position(self):
"""Return the current position of ZHA cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return self._current_position
def async_set_position(self, pos):
"""Handle position update from channel."""
_LOGGER.debug("setting position: %s", pos)
self._current_position = 100 - pos
if self._current_position == 0:
self._state = STATE_CLOSED
elif self._current_position == 100:
self._state = STATE_OPEN
self.async_schedule_update_ha_state()
def async_set_state(self, state):
"""Handle state update from channel."""
_LOGGER.debug("state=%s", state)
self._state = state
self.async_schedule_update_ha_state()
async def async_open_cover(self, **kwargs):
"""Open the window cover."""
res = await self._cover_channel.up_open()
if isinstance(res, list) and res[1] is Status.SUCCESS:
self.async_set_state(STATE_OPENING)
async def async_close_cover(self, **kwargs):
"""Close the window cover."""
res = await self._cover_channel.down_close()
if isinstance(res, list) and res[1] is Status.SUCCESS:
self.async_set_state(STATE_CLOSING)
async def async_set_cover_position(self, **kwargs):
"""Move the roller shutter to a specific position."""
new_pos = kwargs[ATTR_POSITION]
res = await self._cover_channel.go_to_lift_percentage(100 - new_pos)
if isinstance(res, list) and res[1] is Status.SUCCESS:
self.async_set_state(
STATE_CLOSING if new_pos < self._current_position else STATE_OPENING
)
async def async_stop_cover(self, **kwargs):
"""Stop the window cover."""
res = await self._cover_channel.stop()
if isinstance(res, list) and res[1] is Status.SUCCESS:
self._state = STATE_OPEN if self._current_position > 0 else STATE_CLOSED
self.async_schedule_update_ha_state()
async def async_update(self):
"""Attempt to retrieve the open/close state of the cover."""
await super().async_update()
await self.async_get_state()
async def async_get_state(self, from_cache=True):
"""Fetch the current state."""
_LOGGER.debug("polling current state")
if self._cover_channel:
pos = await self._cover_channel.get_attribute_value(
"current_position_lift_percentage", from_cache=from_cache
)
_LOGGER.debug("read pos=%s", pos)
if pos is not None:
self._current_position = 100 - pos
self._state = (
STATE_OPEN if self.current_cover_position > 0 else STATE_CLOSED
)
else:
self._current_position = None
self._state = None
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 14:07:39 2016
@author: pablo
"""
import numpy as np
import abc
import matplotlib.pyplot as plt
class Hyperplume():
""" Parent class Hyperplume loads target plasma and defines common attributes as well as
shared methods in the AEM and SSM plume classes"""
__metaclass__= abc.ABCMeta # Python decorator used to define abstract methods at any location in the class
@abc.abstractclassmethod # Defining abstract method
def solver(self):
"""Solver Abstract Method to be particularised by each Plume code. It is only defined for
structure purposes in parent class Hyperplume"""
return
@abc.abstractclassmethod
def query(self,z,r):
"""Query abstract method returns plasma profile data at specified grid points. query method is
to be particularised by each plume code.It is only defined forstructure purposes
in parent class Hyperplume"""
return
def __init__(self,plasma={'Electrons': {'Gamma': 1,'T_0_electron': 2.1801714e-19,'q_electron': -1.6e-19},'Ions': {'mass_ion': 2.1801714e-25, 'q_ion': 1.6e-19}},z_span=np.linspace(0,100,500),r_span=np.linspace(0,40,500),n_init=0.0472*np.linspace(1,0,500)**2):
""" plume_constructor loads common class properties for AEM and SSM plume classes
Args:
plasma (dict): simple_plasma object dictionary containing basic plasma parameters.
z_span (numpy.ndarray): axial region where the problem will be integrated.
r_span (numpy.ndarray): initial far-field plasma radial profile.
n_init (numpy.ndarray): initial dimensional density front.
Usage:
>>> Plasma = {'Electrons': {'Gamma': 1,'T_0_electron': 2.1801714e-19,'q_electron': -1.6e-19},'Ions': {'mass_ion': 2.1801714e-25, 'q_ion': 1.6e-19}}
>>> z_span = np.linspace(0,100,100)
>>> r0 = np.linspace(0,3,100)
>>> n0 = np.exp(-6.15/2*r_span**2)
>>> Plume = Hyperplume(Plasma,z_span,r0,n0)
"""
self.plasma = plasma
self.Gamma = plasma['Electrons']['Gamma']
self.T_0 = plasma['Electrons']['T_0_electron']
self.m_ion = plasma['Ions']['mass_ion']
self.q_ion = plasma['Ions']['q_ion']
self.z_span = z_span
self.eta = r_span
self.n0 = n_init
def simple_plasma(self,charge=1.6e-19,ion_mass=2.1801714e-25,init_plasma_temp=2.1801714e-19,Gamma=1):
""" Method simple_plasma allows the user to quickly create a Plasma dictionary with two particle species (ions and electrons),
and well defined attributes.
Args:
charge (float): Electron charge given dimensional in units [C]
ion_mass(float): Ion mass given in dimensional units [Kg]
init_plasma_temp(float): Initial plasma temperature given in dimensional units [J]
Gamma(int or float): Dimensionless thermal expansion constant. Must be inside isothermal and polytropic boundaries [1,5/3]
Returns:
plasma (dict): Dictionary containing two simple plasma species (ions and electrons) with the before mentioned
properties stored in favorable form
Usage:
>>> Plasma = Hyperplume().simple_plasma(charge=1.6e-19,ion_mass=2.1801714e-25,init_plasma_temp=2.1801714e-19,Gamma=1)
"""
if Gamma < 1 or Gamma > 2: #checking thermal expansion model
print ('Gamma is outside isothermal or polytropic boundaries')
else:
plasma={'Ions':{'mass_ion': ion_mass,'q_ion':charge}, 'Electrons':{'q_electron': -charge,'T_0_electron':init_plasma_temp,'Gamma':Gamma} }
return plasma
def temp(self,n,n_0,T_0,Gamma):
""" Method temp calculates plasma temperature (T) as function of plasma density (n)
Args:
n(int or np.ndarray): plasma density at specific (z,r) location in the plume grid
n_0 (int):Iinitial density of plasma
T_0 (float): Initial temperature of plasma
Gamma (int): Dimensionless thermal expansion constant
Returns:
T (float or np.ndarray): Temperature of plasma at targeted (z,r) grid points in plume
Usage:
>>> T = Hyperplume().temp(n=0.65,n_0=1,T_0=2.1801714e-19,Gamma=1)
"""
if Gamma == 1: #Checking expansion model
T = T_0*(n*0 + 1)
else:
T = T_0*((n/n_0)**(Gamma-1))
return T
def phi (self,n,n_0,T_0,Gamma,e_charge):
"""Method phi calculates electric potential (\phi) as function of plasma density (n)
Args:
n(int or np.ndarray): plasma density at specific (z,r) location in the plume grid
n_0 (int):Iinitial density of plasma
T_0 (float): Initial temperature of plasma
Gamma (int): Dimensionless thermal expansion constant
e_charge (float):Electron charge
Returns:
phi(float or np.ndarray): Electric potential of plasma at (z,r) targeted grid point
Usage:
>>> phi = Hyperplume().phi(n=0.65,n_0=1,T_0=2.1801714e-19,Gamma=1,e_charge=-1.6e-19)
"""
if Gamma == 1: #Checking expansion model
phi = (T_0/e_charge)*np.log(n/n_0)
else :
phi = (T_0/e_charge)*(Gamma / ((Gamma - 1)) * ((n/n_0)**(Gamma-1)-1))
return phi
def n(self,n_0,T_0,phi,Gamma,e_charge):
"""Method n calculates plasma density (n) as function of plasma potential (\phi)
Args:
n_0 (int):Iinitial density of plasma
T_0 (float): Initial temperature of plasma
Gamma (int): Dimensionless thermal expansion constant
e_charge (float):Electron charge
Returns:
n (float or numpy.ndarray): Pasma density at (z,r) targeted grid point in the plume.
Usage:
n = Hyperplume.n(n_0=1,T_0=2.1801714e-19,phi=-5.7,Gamma=1,e_charge=-1.6e-19)
"""
if Gamma == 1: #Checking expansion model
n = n_0*np.exp(phi*e_charge/T_0)
else:
n = n_0*(((Gamma-1)/Gamma*phi*e_charge/T_0 + 1 )**1/(Gamma-1))
return n
def eta_deriver(self,x,y):
"""Method eta_derivar calculates the numerical derivatives of the variables along eta, with a
Args:
x (np.ndarray): represents the derivative step (dx,dy)
y (np.ndarray): vector to derive with respect to x
Returns:
y_prime(np.ndarray): derivaive of y over x stored in array format
Usage:
>>> x = np.array([0,0.5,1,1.2,2,2.3,2.6])
>>> y = np.array([10,17,23,27,36,40,45])
>>> dydx = Hyperplume.eta_deriver(x,y)
"""
dx = np.gradient(x)
y_prime = np.gradient(y,dx)
return y_prime
def plot(self,z=np.array([15,20,25,30]),r=np.array([20,25,30,35]),var_name='n',contour_levels=[0,1,2,3,4,5,6,7,8]):
""" Hyperplume Class method to plot the contours of important plasma variables along the specified (z,r) plume grid points
Args:
z (int,float, or np.ndarray): new interpolation axial region where plasma variabes are to be calculated and plotted. Must be inside z_grid limits
r (int,float, or np.ndarray): new interpolation axial region where plasma variabes are to be calculated and plotted. Must be inside z_grid limits
var_name (str): string containing the name of the variable to be visualized. Options are:
'lnn': logarithm of plasma density
'u_z': axial plume velocity
'u_r':radial plume velocity
'T': plasmaTemperature
'phi': ambipolar electric field
'eta': ion stream lines
contour_levels (array or of list): contour lables of plasma varialbled at the targets z,r points.
Returns:
None
Usage:
>>> Plasma = Hyperplume().SIMPLE_plasma()
>>> Plume = AEM()
"""
lnn,u_z,u_r,T,phi,error,eta = self.query(z,r) #Retrievibg plasma variables at z,r gid points
fig = plt.figure()
CE = plt.contour(z,r,eval(var_name),contour_levels)
plt.title(var_name)
plt.xlabel(r'$\ z/R_0 $')
plt.ylabel(r'$\ r/R_0 $')
plt.ylim(0,10)
plt.clabel(CE,CE.levels,fontsize=6)
plt.savefig(var_name + '.pdf',bbox_inches='tight')
fig.show()
|
<reponame>soulmerge/pymment
from datetime import datetime
import json
import logging
import os
import sqlite3
import urllib.parse
import uuid
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
file = os.path.join(os.path.dirname(__file__), 'comments.sqlite3')
log.info(file)
connection = sqlite3.connect(file)
found_comments = False
found_users = False
cursor = connection.cursor()
cursor.execute('SELECT name FROM sqlite_master')
for row in cursor:
if row[0] == 'users':
found_users = True
elif row[0] == 'comments':
found_comments = True
if not found_users:
cursor.execute('''
CREATE TABLE users (
id INTEGER PRIMARY KEY AUTOINCREMENT,
password TEXT,
name TEXT UNIQUE
)
''')
if not found_comments:
cursor.execute('''
CREATE TABLE comments (
id INTEGER PRIMARY KEY AUTOINCREMENT,
itemId INTEGER,
parentId INTEGER,
userId INTEGER,
message TEXT,
time INTEGER
)
''')
class User:
def __init__(self, id, name, password):
self.id = id
self.name = name
self.password = password
@property
def json(self):
return {
'id': self.id,
'name': self.name,
}
class Comment:
def __init__(self, id, itemId, parent, user, message, time):
self.id = id
self.itemId = itemId
self.parent = parent
self.user = user
self.message = message
self.time = time
@property
def json(self):
return {
'id': self.id,
'itemId': self.itemId,
'parentId': self.parent.id if self.parent else None,
'user': self.user.json,
'message': self.message,
'time': self.time.timestamp(),
}
def create_user(name):
password = <PASSWORD>
cursor.execute('INSERT INTO users (name, password) VALUES (?, ?)', (name, password))
connection.commit()
id = cursor.lastrowid
return User(id, name, password)
def get_user(id, password):
cursor.execute('SELECT name FROM users WHERE id = ? AND password = ?', (id, password))
row = cursor.fetchone()
if row is None:
return None
return User(id, row[0], password)
def change_username(id, password, name):
user = get_user(id, password)
assert user is not None
cursor.execute('UPDATE users SET name = ? WHERE id = ?', (name, id))
connection.commit()
user.name = name
return user
def create_comment(itemId, parentId, userId, userPassword, message):
user = get_user(userId, userPassword)
assert user is not None
time = datetime.now()
cursor.execute('INSERT INTO comments (itemId, parentId, userId, message, time) VALUES (?, ?, ?, ?, ?)', (itemId, parentId, user.id, message, time.timestamp()))
id = cursor.lastrowid
connection.commit()
return Comment(id, itemId, None, user, message, time)
def comments(itemId, lastId):
time_threshold = 0
if int(lastId):
cursor.execute('SELECT time FROM comments WHERE id = ?', (lastId,))
time_threshold = cursor.fetchone()[0]
cursor.execute('''
SELECT
u.id, u.name, c.id, c.message, c.time
FROM
users u
INNER JOIN comments c on c.userId = u.id
WHERE
c.itemId = ?
AND c.time > ?
ORDER BY c.time ASC
LIMIT 10
''', (itemId, time_threshold))
return map(lambda row: Comment(
row[2],
itemId,
None,
User(
row[0],
row[1],
None
),
row[3],
datetime.fromtimestamp(int(row[4])),
), cursor)
def application(env, start_response):
try:
if env['REQUEST_METHOD'] == 'POST':
args = urllib.parse.parse_qs(str(env['wsgi.input'].readline(), 'UTF-8'))
if args['op'][0] == 'user':
user = create_user(args['name'][0])
body = {
'id': user.id,
'name': user.name,
'password': <PASSWORD>,
}
elif args['op'][0] == 'username':
user = change_username(args['id'][0], args['password'][0], args['name'][0])
body = user.json
elif args['op'][0] == 'comment':
body = create_comment(
args['itemId'][0],
args['parentId'][0],
args['userId'][0],
args['userPassword'][0],
args['message'][0]
).json
else:
args = urllib.parse.parse_qs(env['QUERY_STRING'], encoding='UTF-8')
log.debug(args)
if args['op'][0] == 'comments':
body = []
for comment in comments(args['itemId'][0], args['lastId'][0]):
body.append(comment.json)
log.debug(args)
start_response('200 OK', [('Content-Type','application/json')])
return [json.dumps(body).encode('UTF-8')]
except Exception as e:
start_response('500 Internal Server Error', [('Content-Type','text/html')])
log.exception(e)
## from abc import abstractmethod, ABCMeta
##
##
## class User:
##
## def __init__(self, name):
## self.name = name
##
##
## class Comment:
##
## def __init__(self, parent, user, text, time):
## self.parent = parent
## self.user = user
## self.item = item
## self.text = text
## self.time = time
##
##
## class Item:
##
## def __init__(self, storage, id):
## self.storage = storage
## self.id = id
##
## @property
## def comments(self):
## pass
##
##
## class Storage(metaclass=ABCMeta):
##
## def item(self, id):
## return Item(self, id)
##
## @abstractmethod
## def comment(self, item_id, comment_id):
## pass
##
## @abstractmethod
## def comments(self, item_id):
## pass
##
## @abstractmethod
## def comment_count(self, item_id):
## pass
##
##
## class SqliteCommentsIterator:
##
## def __init__(self, cursor):
## self.cursor = cursor
## self.remaining_rows = None
## self.return_count = 0
##
## def __iter__(self):
## return self
##
## def __next__(self):
## if self.remaining_rows is not None:
## try:
## row = self.remaining_rows.pop(0)
## except IndexError:
## raise StopIteration()
## else:
## row = next(self.cursor)
## self.return_count += 1
## return Comment(*row)
##
## def __len__(self):
## if self.remaining_rows is None:
## self.remaining_rows = self.cursor.fetchall()
## return self.return_count + len(self.remaining_rows)
##
##
## class SqliteStorage(Storage):
##
## def __init__(self, file):
## self.file = file
## self._cursor = None
##
## def comments(self, item_id):
## cursor = self.connection.cursor()
## cursor.execute("""
## SELECT parent, user, item, text, time FROM comment WHERE item = ?
## """, item_id)
## return SqliteCommentsIterator(cursor)
##
## def comment_count(self, item_id):
## cursor = self.connection.cursor()
## cursor.execute("SELECT COUNT(*) FROM comment WHERE item = ?", item_id)
## return cursor.fetchone()[0]
##
## @property
## def connection(self):
## if not self._connection:
## self._connection = sqlite3.connect(self.file)
## return self._connection
|
<gh_stars>0
from tkinter import *
import random
renkler = ('red', 'blue', 'orange', 'green', 'gray')
carpan = 10 # Boyutlari pixele cevirmek icin kullanilir
siralar = (1, 3, 1, 2, 3, 1, 2, 3, 1)
yonler = (1, 0, 1, 0, 1, 0, 1, 1, 0) # 0:normal, 1: 90 derece donuk
class Cisim:
def __init__(self, tip, t, h, y):
self.tip = tip
self.yon = y # 0:normal, 1: 90 derece donuk
if y > 0:
self.taban = h
self.yuks = t
else:
self.taban = t
self.yuks = h
self.yerlesti = 0 # yerlesince 1 olacak
self.x1 = 0
self.x2 = 0
self.y1 = 0
self.y2 = 0
def yerlestir(self, x, y):
delta = 0.1
self.x1 = x + delta
self.y1 = y + delta
self.x2 = self.x1 + self.taban - delta
self.y2 = self.y1 + self.yuks - delta
self.yerlesti = 1
def cakisiyormu(self, x, y, dx, dy):
r1 = (x, y, x + dx, y + dy)
r2 = (self.x1, self.y1, self.x2, self.y2)
if ((r1[0] < r2[2]) and (r1[2] > r2[0]) and (r1[1] < r2[3]) and (r1[3] > r2[1])):
return True
else:
return False
class Zincir:
def __init__(self, siralar, yonler):
self.zeminyuksekligi = 12
self.maksimumzemingenisligi = 100
self.siralar = siralar
self.yonler = yonler
self.zincir = list()
self.kullanilangenislik = 0
self.zinciryap()
def zinciryap(self):
# sekilleri olusturalim
for sira, yon in zip(self.siralar, self.yonler):
taban, yuks, adet = cisimler[sira]
cisim = Cisim(sira, taban, yuks, yon)
self.zincir.append(cisim)
def cismi_zemineyerlestir(self, dx, dy):
for xx in range(0, self.maksimumzemingenisligi, 1):
for yy in range(0, self.zeminyuksekligi, 1):
yerlesmiscisimler = [yc for yc in self.zincir if yc.yerlesti == 1]
for yerlesmiscisim in yerlesmiscisimler:
cevap = yerlesmiscisim.cakisiyormu(xx, yy, dx, dy)
if cevap == True:
break
else:
# Zeminin kenarına geldik mi
if (yy + dy) <= self.zeminyuksekligi:
return xx, yy
else:
print('Hata oluyor. %s metre genişlik yetmiyor.' % self.maksimumzemingenisligi)
return -1, -1
def cisimleri_zemineyerlestir(self):
for cisim in self.zincir:
dx, dy = cisim.taban, cisim.yuks
x, y = self.cismi_zemineyerlestir(dx, dy)
if x > -1:
cisim.yerlestir(x, y)
if x + dx > self.kullanilangenislik:
self.kullanilangenislik = x + dx
else:
print('Hata')
return
def bas(self):
print('Kullanılan:', self.kullanilangenislik)
satir = "Tip:%s Taban:%s Yuks:%s Yon:%s Yerlesti:%s x1:%3.1f y1:%3.1f x2:%3.1f y2:%3.1f "
for c in self.zincir:
print(satir % (c.tip, c.taban, c.yuks, c.yon, c.yerlesti, c.x1, c.y1, c.x2, c.y2))
class Hesap:
def __init__(self):
self.random_cekirdegi = 999
self.ilkjenerasyonAdedi = 5
self.tipler = []
self.bireyler = dict()
self.cisimler = dict()
self.dnalar = dict()
self.dnaAdet = 0
self.zeminyuksekligi = 12
# tip: taban, h, adet
# self.cisimler = {1: (2, 3, 4), 2: (4, 5, 2), 3: (1, 4, 3)}
# self.dnalar = {1: (2, 3), 2: (2, 3), 3: (2, 3), 4: (2, 3),
# 5: (4, 5), 6: (4, 5),
# 7: (1, 4), 8: (1, 4), 9: (1, 4)}
self.dosyaOku()
self.dnaUret()
def dosyaOku(self):
filename = "bilgi.txt"
with open(filename, 'r') as f:
lines = f.readlines()
self.zeminyuksekligi = int(lines[0])
for line in lines[2:]:
a = tuple(map(int, line.split()))
# print(a)
self.tipler.append(a[0])
self.cisimler[a[0]] = a[1:]
#print(self.cisimler)
def dnaUret(self):
sira = 0
for tip in self.tipler:
taban, yuks, adet = self.cisimler[tip]
for adetsay in range(adet):
sira += 1
self.dnalar[sira] = (taban, yuks)
self.dnaAdet = sira
print('dnalar:',self.dnalar)
def ilkjenerasyonuUret(self):
random.seed(self.random_cekirdegi)
siralardizisi = [n for n in range(1, self.dnaAdet+1)]
# print('Sira:',siralardizisi)
#Yönler
for jen in range(1, self.ilkjenerasyonAdedi+1):
yonlerdizisi = [random.randint(0,1) for nn in range(self.dnaAdet)]
random.shuffle(siralardizisi)
# print(siralardizisi, yonlerdizisi)
# print(jen)
self.bireyler[jen] = tuple(siralardizisi), tuple(yonlerdizisi)
print(self.bireyler)
def bireyHesapla(self, bireyno):
bireysira, bireyyon = self.bireyler[bireyno]
zincir = Zincir(bireysira, bireyyon)
if __name__ == "__main__":
Hesap().ilkjenerasyonuUret()
exit()
zincir1 = Zincir(siralar, yonler)
zincir1.cisimleri_zemineyerlestir()
zincir1.bas() |
<reponame>AlainDaccache/Quantropy
import json
import os
import pickle
from datetime import date, datetime, timedelta
import typing
import requests
import yfinance as yf
from alpha_vantage.timeseries import TimeSeries
import re
import pandas as pd
from matilda import config
class StockPriceScraper:
def __init__(self, ticker, period='1mo', from_date=None, to_date=None, frequency='1d'):
"""
For the illusion of real-time, call it with period='min'
:param ticker: can be string (i.e. 'AAPL') or list of strings (i.e. ['AAPL', 'FB', 'MSFT']).
:param period: use instead of from_date and to_date. This is different than frequency. Period just says take date from now to `period`
time ago. Valid periods: min,1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,YTD,max.
By default, '1mo'. (NB: 'min' stands for minimum, not minute)
:param from_date: if not selected, will use the default value of period
:param to_date: if not selected, will use datetime.now()
:param frequency: specifies the time interval between two consecutive data points in the time series.
can be in ['1s', '5s', '15s', '30s', '1min', '5min', '15min', '30min',
'1h', '4h', '1d', '1w', '1mo', '1y'] according to implementation.
i.e. it can't be lower than the minimum frequency of the implementation.
:return:
"""
self.frequency_hierarchy = ['1s', '5s', '15s', '30s', '1min', '5min', '15min', '30min',
'1h', '4h', '1d', '1w', '1mo', '1y']
lookback_period_mapper = {'1d': timedelta(days=1), '5d': timedelta(days=5),
'1mo': timedelta(days=30), '3M': timedelta(days=92),
'6M': timedelta(days=183), '1Y': timedelta(days=365),
'2Y': timedelta(days=730), '5Y': timedelta(days=1826),
'10Y': timedelta(days=3652)}
if frequency not in self.frequency_hierarchy:
raise Exception('Invalid frequency')
self.frequency = frequency
if isinstance(ticker, str):
self.ticker = [ticker]
elif isinstance(ticker, list):
self.ticker = ticker
else:
raise Exception('Invalid ticker type')
if to_date is None:
to_date = datetime.now()
self.to_date = to_date
if from_date is None:
if period == 'YTD':
from_date = datetime(year=to_date.year, month=1, day=1)
elif period == 'max':
from_date = date.min
elif period == 'min':
from_date = to_date
else:
from_date = to_date - lookback_period_mapper[period]
self.from_date = from_date
def convert_format(self, format):
"""
:param format: 'dict', 'json', 'pandas'
:return:
"""
output = {}
for date, open_, high, low, close, volume in zip(self.Dates, self.Open, self.High,
self.Low, self.Close, self.Volume):
output[date] = {'Open': open_, 'High': high, 'Low': low, 'Close': close, 'Volume': volume}
if re.match('dict', format, re.IGNORECASE):
return output
elif re.match('json', format, re.IGNORECASE):
return json.dumps(output)
elif re.match('pandas', format, re.IGNORECASE):
return pd.DataFrame.from_dict(output, orient='index')
else:
raise Exception('Please input a valid `format`')
class AlphaVantage(StockPriceScraper):
def __init__(self, ticker, period='1mo', from_date=None, to_date=datetime.now(), frequency='1d'):
# TODO not supporting ticker lists yet
super().__init__(ticker, period, from_date, to_date, frequency)
intraday_frequency_mapper = {'1min': '1min', '5min': '5min', '15min': '15min', '30min': '30min', '1h': '60min'}
ts = TimeSeries(config.ALPHAVANTAGE_API_KEY)
df_cols = {'1. open': 'Open', '2. high': 'High', '3. low': 'Low',
'4. close': 'Close', '5. volume': 'Volume'}
to_resample = False
if self.frequency_hierarchy.index(frequency) < self.frequency_hierarchy.index('1min'):
raise Exception("AlphaVantage can't support an interval lower than 1 minute")
elif frequency in intraday_frequency_mapper.keys(): # AlphaVantage has a function to get intraday
data, meta_data = ts.get_intraday(symbol=ticker, interval=intraday_frequency_mapper[frequency],
outputsize='full')
else:
if frequency == '1d': # AlphaVantage has another function to get daily
data, meta_data = ts.get_daily_adjusted(symbol=ticker)
df_cols = {'1. open': 'Open', '2. high': 'High', '3. low': 'Low',
'5. adjusted close': 'Close', '6. volume': 'Volume'}
else: # not supported, but can resample
data, meta_data = ts.get_intraday(symbol=ticker, interval='60min', outputsize='full')
to_resample = True
df = pd.DataFrame.from_dict(data=data, orient='index')
df.index = pd.to_datetime(df.index)
if to_resample:
df = df.resample(frequency).first().dropna(how='all')
df = df[(df.index >= self.from_date) & (df.index <= self.to_date)]
df = df.rename(columns=df_cols)
df = df.iloc[::-1] # AlphaVantage returns dates in reverse chronological order, so should reverse
df.index = df.index + timedelta(days=1) - timedelta(seconds=1)
# with open('temp_prices.pkl', 'wb') as handle:
# pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
# with open('temp_prices.pkl', 'rb') as handle:
# data = pickle.load(handle)
self.Open, self.High, self.Low = df['Open'], df['High'], df['Low']
self.Close, self.Volume, self.Dates = df['Close'], df['Volume'], df.index.to_list()
class YahooFinance(StockPriceScraper):
def __init__(self, ticker, from_date=None, to_date=datetime.now(), period='1mo', frequency='1d'):
super().__init__(ticker, period, from_date, to_date, frequency)
if isinstance(ticker, typing.List):
ticker = [stk.replace('.', '-') for stk in ticker]
else:
ticker = ticker.replace('.', '-')
if self.frequency_hierarchy.index(frequency) < self.frequency_hierarchy.index('1min'):
raise Exception("YahooFinance can't support an interval lower than 1 minute")
resp = yf.Ticker(ticker).history(from_date=self.from_date, to_date=self.to_date,
period=period, interval=frequency)
self.Open = resp['Open']
self.High = resp['High']
self.Low = resp['Low']
self.Close = resp['Close']
self.Volume = resp['Volume']
self.Dates = resp.index + timedelta(days=1) - timedelta(seconds=1) # offset end of day
class GoogleFinance(StockPriceScraper):
def __init__(self, ticker, from_date, to_date=datetime.now(), period='1mo', frequency='1d'):
super().__init__(ticker, period, from_date, to_date, frequency)
rsp = requests.get(f'https://finance.google.com/finance?q={ticker}&output=json')
if rsp.status_code in (200,):
# Cut out various leading characters from the JSON response, as well as trailing stuff (a terminating ']\n'
# sequence), and then we decode the escape sequences in the response
# This then allows you to load the resulting string with the JSON module.
print(rsp.content)
fin_data = json.loads(rsp.content[6:-2].decode('unicode_escape'))
class Quandl(StockPriceScraper):
def __init__(self, ticker, from_date, to_date=datetime.now(), period='1mo', frequency='1d'):
super().__init__(ticker, period, from_date, to_date, frequency)
def get_prices_wrapper(source: str, ticker, period, from_date, to_date, frequency):
if re.match('Quandl', source, re.IGNORECASE):
pass
elif re.match('Yahoo', source, re.IGNORECASE):
pass
elif re.match('Alpha Vantage', source, re.IGNORECASE):
pass
elif re.match('Google', source, re.IGNORECASE):
pass
else:
raise Exception("Please make sure the `source` is either 'Quandl', 'Yahoo', 'Google', or 'Alpha Vantage'.")
if __name__ == '__main__':
# not supporting list of tickers yet
df = YahooFinance(ticker='AAPL', period='YTD').convert_format('pandas')
print(df.index)
|
#!/usr/bin/env python
#
# __init__.py - Funtions for managing OpenGL shader programs.
#
# Author: <NAME> <<EMAIL>>
#
"""The ``shaders`` package contains classes and functions for finding,
parsing, compiling, and managing OpenGL shader programs. Two types of shader
program are supported:
- GLSL 1.20 vertex and fragment shaders.
- ``ARB_vertex_program`` and ``ARB_fragment_program`` shader programs.
The :mod:`.glsl` and :mod:`.arbp` packages respectively define the
:class:`.GLSLShader` and :class:`.ARBPShader` classes, which may be
used to manage shader programs of the corresponding type.
Some package-level functions are defined here, for finding and loading
shader source code:
.. autosummary::
:nosignatures:
getShaderDir
getShaderSuffix
getVertexShader
getFragmentShader
"""
# Import open from the io module, because it gives
# us an interface compatible across python 2 and 3
# (i.e. it allows us to specify the file encoding,
# and thus allows shader files to contain non-ascii
# characters).
from io import open
import os.path as op
import fsleyes
import fsleyes.gl as fslgl
from .glsl import program as glslprogram
from .arbp import program as arbpprogram
GLSLShader = glslprogram.GLSLShader
ARBPShader = arbpprogram.ARBPShader
def getShaderDir():
"""Returns the directory in which the ``ARB`` and ``glsl`` shader program
source files can be found. A different directory will be returned depending
on which OpenGL version is in use.
"""
if fslgl.GL_COMPATIBILITY == '2.1': subdir = 'gl21'
elif fslgl.GL_COMPATIBILITY == '1.4': subdir = 'gl14'
return op.join(fsleyes.assetDir, 'gl', subdir)
def getShaderSuffix():
"""Returns the shader program file suffix to use. A different suffix will be
returned depending on which OpenGL version is in use.
"""
if fslgl.GL_COMPATIBILITY == '2.1': return 'glsl'
elif fslgl.GL_COMPATIBILITY == '1.4': return 'prog'
def getVertexShader(prefix):
"""Returns the vertex shader source for the given GL type (e.g.
'glvolume').
"""
return _getShader(prefix, 'vert')
def getFragmentShader(prefix):
"""Returns the fragment shader source for the given GL type."""
return _getShader(prefix, 'frag')
def _getShader(prefix, shaderType):
"""Returns the shader source for the given GL type and the given
shader type ('vert' or 'frag').
"""
fname = _getFileName(prefix, shaderType)
with open(fname, 'rt', encoding='utf-8') as f:
src = f.read()
return preprocess(src)
def _getFileName(prefix, shaderType):
"""Returns the file name of the shader program for the given GL type
and shader type.
"""
suffix = getShaderSuffix()
if shaderType not in ('vert', 'frag'):
raise RuntimeError('Invalid shader type: {}'.format(shaderType))
return op.join(getShaderDir(), '{}_{}.{}'.format(
prefix, shaderType, suffix))
def preprocess(src):
"""'Preprocess' the given shader source.
This amounts to searching for lines containing '#pragma include filename',
and replacing those lines with the contents of the specified files.
"""
lines = src.split('\n')
lines = [l.strip() for l in lines]
pragmas = []
for linei, line in enumerate(lines):
if line.startswith('#pragma'):
pragmas.append(linei)
includes = []
for linei in pragmas:
line = lines[linei].split()
if len(line) != 3: continue
if line[1] != 'include': continue
includes.append((linei, line[2]))
for linei, fname in includes:
fname = op.join(getShaderDir(), fname)
with open(fname, 'rt', encoding='utf-8') as f:
lines[linei] = f.read()
return '\n'.join(lines)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module allows you to bring up and tear down keyspaces."""
import cgi
import decimal
import json
import subprocess
import threading
import time
from vtdb import keyrange
from vtdb import vtgate_client
# TODO(sougou): remove this import once the deps are fixed
import google.protobuf
from vtdb import grpc_vtgate_client # pylint: disable=unused-import
def exec_query(conn, title, query, response, keyspace=None, kr=None): # pylint: disable=missing-docstring
if kr:
# v2 cursor to address individual shards directly, for debug display
cursor = conn.cursor(
tablet_type="master", keyspace=keyspace,
keyranges=[keyrange.KeyRange(kr)])
else:
# v3 cursor is automated
cursor = conn.cursor(
tablet_type="master", keyspace=keyspace, writable=True)
try:
if not query or query == "undefined":
return
if query.startswith("select"):
cursor.execute(query, {})
else:
cursor.begin()
cursor.execute(query, {})
cursor.commit()
response[title] = {
"title": title,
"description": cursor.description,
"rowcount": cursor.rowcount,
"lastrowid": cursor.lastrowid,
"results": cursor.results,
}
cursor.close()
except Exception as e: # pylint: disable=broad-except
response[title] = {
"title": title,
"error": str(e),
}
cursor.rollback()
cursor.close()
def capture_log(port, queries): # pylint: disable=missing-docstring
p = subprocess.Popen(
["curl", "-s", "-N", "http://localhost:%d/debug/querylog" % port],
stdout=subprocess.PIPE)
def collect():
for line in iter(p.stdout.readline, ""):
query = line.split("\t")[12].strip('"')
if not query:
continue
queries.append(query)
t = threading.Thread(target=collect)
t.daemon = True
t.start()
return p
def main():
print "Content-Type: application/json\n"
try:
conn = vtgate_client.connect("grpc", "localhost:12346", 10.0)
args = cgi.FieldStorage()
query = args.getvalue("query")
response = {}
try:
queries = []
stats = capture_log(12345, queries)
time.sleep(0.25)
exec_query(conn, "result", query, response)
finally:
stats.terminate()
time.sleep(0.25)
response["queries"] = queries
# user table
exec_query(
conn, "user0",
"select * from user", response, keyspace="user", kr="-80")
exec_query(
conn, "user1",
"select * from user", response, keyspace="user", kr="80-")
# user_extra table
exec_query(
conn, "user_extra0",
"select * from user_extra", response, keyspace="user", kr="-80")
exec_query(
conn, "user_extra1",
"select * from user_extra", response, keyspace="user", kr="80-")
# music table
exec_query(
conn, "music0",
"select * from music", response, keyspace="user", kr="-80")
exec_query(
conn, "music1",
"select * from music", response, keyspace="user", kr="80-")
# music_extra table
exec_query(
conn, "music_extra0",
"select * from music_extra", response, keyspace="user", kr="-80")
exec_query(
conn, "music_extra1",
"select * from music_extra", response, keyspace="user", kr="80-")
# name_info table
exec_query(
conn, "name_info0",
"select * from name_info", response, keyspace="user", kr="-80")
exec_query(
conn, "name_info1",
"select * from name_info", response, keyspace="user", kr="80-")
# music_user_idx table
exec_query(
conn, "music_user_idx0",
"select * from music_user_idx", response, keyspace="user", kr="-80")
exec_query(
conn, "music_user_idx1",
"select * from music_user_idx", response, keyspace="user", kr="80-")
# lookup tables
exec_query(
conn, "user_seq", "select * from user_seq", response,
keyspace="lookup", kr="-")
exec_query(
conn, "music_seq", "select * from music_seq", response,
keyspace="lookup", kr="-")
exec_query(
conn, "name_user_idx", "select * from name_user_idx", response,
keyspace="lookup", kr="-")
print json.dumps(response, default=decimal_default)
except Exception as e: # pylint: disable=broad-except
print json.dumps({"error": str(e)})
def decimal_default(obj):
"""Provide json-encodable conversion for decimal.Decimal type.
json encoding fails on decimal.Decimal. This
function converts the decimal into a float object
which json knows how to encode.
"""
if isinstance(obj, decimal.Decimal):
return float(obj)
raise TypeError
if __name__ == "__main__":
main()
|
<reponame>LeonDante-ctrl/leons-blog<gh_stars>0
# -*- coding: utf-8 -*- vim: fileencoding=utf-8 :
""" Dictionary-like interfaces to RFC822-like files
The Python deb822 aims to provide a dict-like interface to various RFC822-like
Debian data formats, like Packages/Sources, .changes/.dsc, pdiff Index files,
etc. As well as the generic :class:`Deb822` class, the specialised versions
of these classes (:class:`Packages`, :class:`Sources`, :class:`Changes` etc)
know about special fields that contain specially formatted data such as
dependency lists or whitespace separated sub-fields.
This module has few external dependencies, but can use python-apt if available
to parse the data, which gives a very significant performance boost when
iterating over big Packages files.
Whitespace separated data within fields are known as "multifields".
The "Files" field in Sources files, for instance, has three subfields, while
"Files" in .changes files, has five; the relevant classes here know this and
correctly handle these cases.
Key lookup in Deb822 objects and their multifields is case-insensitive, but
the original case is always preserved, for example when printing the object.
The Debian project and individual developers make extensive use of GPG
signatures including in-line signatures. GPG signatures are automatically
detected, verified and the payload then offered to the parser.
Relevant documentation on the Deb822 file formats available here.
- `deb-control(5)
<https://manpages.debian.org/stretch/dpkg-dev/deb-control.5.html>`_,
the `control` file in the binary package (generated from
`debian/control` in the source package)
- `deb-changes(5)
<https://manpages.debian.org/stretch/dpkg-dev/deb-changes.5.html>`_,
`changes` files that developers upload to add new packages to the
archive.
- `dsc(5) <https://manpages.debian.org/stretch/dpkg-dev/dsc.5.html>`_,
Debian Source Control file that defines the files that are part of a
source package.
- `Debian mirror format <http://wiki.debian.org/RepositoryFormat>`_,
including documentation for Packages, Sources files etc.
Overview of deb822 Classes
--------------------------
Classes that deb822 provides:
* :class:`Deb822` base class with no multifields. A Deb822 object holds a
single entry from a Deb822-style file, where paragraphs are separated by
blank lines and there may be many paragraphs within a file. The
:func:`~Deb822.iter_paragraphs` function yields paragraphs from a data
source.
* :class:`Packages` represents a Packages file from a Debian mirror.
It extends the Deb822 class by interpreting fields that
are package relationships (Depends, Recommends etc). Iteration is forced
through python-apt for performance and conformance.
* :class:`Dsc` represents .dsc files (Debian Source Control) that are the
metadata file of the source package.
Multivalued fields:
* Files: md5sum, size, name
* Checksums-Sha1: sha1, size, name
* Checksums-Sha256: sha256, size, name
* Checksums-Sha512: sha512, size, name
* :class:`Sources` represents a Sources file from a Debian mirror.
It extends the Dsc class by interpreting fields that
are package relationships (Build-Depends, Build-Conflicts etc).
Iteration is forced through python-apt for performance and conformance.
* :class:`Release` represents a Release file from a Debian mirror.
Multivalued fields:
* MD5Sum: md5sum, size, name
* SHA1: sha1, size, name
* SHA256: sha256, size, name
* SHA512: sha512, size, name
* :class:`Changes` represents a .changes file that is uploaded to "change
the archive" by including new source or binary packages.
Multivalued fields:
* Files: md5sum, size, section, priority, name
* Checksums-Sha1: sha1, size, name
* Checksums-Sha256: sha256, size, name
* Checksums-Sha512: sha512, size, name
* :class:`PdiffIndex` represents a pdiff Index file (`foo`.diff/Index) file
from a Debian mirror.
Multivalued fields:
* SHA1-Current: SHA1, size
* SHA1-History: SHA1, size, date
* SHA1-Patches: SHA1, size, date
* :class:`Removals` represents the ftp-master removals file listing when
and why source and binary packages are removed from the archive.
Input
=====
Deb822 objects are normally initialized from a file object (from which
at most one paragraph is read) or a string. Alternatively, any sequence
that returns one line of input at a time may be used, e.g a list of strings.
PGP signatures, if present, will be stripped.
Example::
>>> from debian.deb822 import Deb822
>>> filename = '/var/lib/apt/lists/deb.debian.org_debian_dists_sid_InRelease'
>>> with open(filename) as fh:
... rel = Deb822(fh)
>>> print('Origin: {Origin}\\nCodename: {Codename}\\nDate: {Date}'.format_map(rel))
Origin: Debian
Codename: sid
Date: Sat, 07 Apr 2018 14:41:12 UTC
>>> print(list(rel.keys()))
['Origin', 'Label', 'Suite', 'Codename', 'Changelogs', 'Date',
'Valid-Until', 'Acquire-By-Hash', 'Architectures', 'Components',
'Description', 'MD5Sum', 'SHA256']
In the above, the `MD5Sum` and `SHA256` fields are just a very long string. If
instead the :class:`Release` class is used, these fields are interpreted and
can be addressed::
>>> from debian.deb822 import Release
>>> filename = '/var/lib/apt/lists/deb.debian.org_debian_dists_sid_InRelease'
>>> with open(filename) as fh:
... rel = Release(fh)
>>> wanted = 'main/binary-amd64/Packages'
>>> [(l['sha256'], l['size']) for l in rel['SHA256'] if l['name'] == wanted]
[('c0f7aa0b92ebd6971c0b64f93f52a8b2e15b0b818413ca13438c50eb82586665', '45314424')]
Iteration
=========
All classes use the :func:`~Deb822.iter_paragraphs` class method to easily
iterate through each paragraph in a file that contains multiple entries
(e.g. a Packages.gz file).
For example::
>>> with open('/mirror/debian/dists/sid/main/binary-i386/Sources') as f:
... for src in Sources.iter_paragraphs(f):
... print(src['Package'], src['Version'])
The `iter_paragraphs` methods can use python-apt if available to parse
the data, since it significantly boosts performance.
If python-apt is not present and the
file is a compressed file, it must first be decompressed manually. Note that
python-apt should not be used on `debian/control` files since python-apt is
designed to be strict and fast while the syntax of `debian/control` is a
superset of what python-apt is designed to parse.
This function is overridden to force use of the
python-apt parser using `use_apt_pkg` in the :func:`~Packages.iter_paragraphs`
and :func:`~Sources.iter_paragraphs` functions.
Sample usage
============
Manipulating a .dsc file::
from debian import deb822
with open('foo_1.1.dsc') as f:
d = deb822.Dsc(f)
source = d['Source']
version = d['Version']
for f in d['Files']:
print('Name:', f['name'])
print('Size:', f['size'])
print('MD5sum:', f['md5sum'])
# If we like, we can change fields
d['Standards-Version'] = '3.7.2'
# And then dump the new contents
with open('foo_1.1.dsc2', 'w') as new_dsc:
d.dump(new_dsc)
(TODO: Improve, expand)
Deb822 Classes
--------------
"""
# Copyright (C) 2005-2006 <NAME> <<EMAIL>>
# Copyright (C) 2006-2010 <NAME> <<EMAIL>>
# Copyright (C) 2006 <NAME> <<EMAIL>>
# Copyright (C) 2008 <NAME> <<EMAIL>>
# Copyright (C) 2014 Google, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from __future__ import absolute_import, print_function
import collections
try:
# Python 3
import collections.abc as collections_abc
except ImportError:
# Python 2.7 cruft
# pylint: disable=reimported
import collections as collections_abc # type: ignore
import datetime
import email.utils
import io
import re
import subprocess
import sys
import warnings
import chardet # type: ignore
import six
try:
# pylint: disable=unused-import
from typing import (
Any,
Callable,
Dict,
Iterator,
Iterable,
IO,
List,
Mapping,
MutableMapping,
Optional,
#Pattern,
Set,
Text,
Tuple,
Union,
cast,
)
IterableInputDataType = Union[
bytes,
Text,
IO[Text],
Iterable[Text],
Iterable[bytes],
]
IterableDataSourceType = Union[
IO[Text],
Iterable[Text],
Iterable[bytes],
]
Deb822ValueType = Any # this really is Union[str, List] but that is a can of worms
Deb822Mapping = Mapping[str, Deb822ValueType]
import builtins # pylint: disable=unused-import
except ImportError:
# Lack of typing is not important at runtime
pass
from debian.deprecation import function_deprecated_by
import debian.debian_support
try:
import apt_pkg # type: ignore
# This module uses apt_pkg only for its TagFile interface.
apt_pkg.TagFile # pylint: disable=pointless-statement
_have_apt_pkg = True
except (ImportError, AttributeError):
_have_apt_pkg = False
def _has_fileno(f):
# type: (Any) -> bool
""" test that a file-like object is really a filehandle
Only filehandles can be given to apt_pkg.TagFile.
"""
try:
f.fileno()
return True
except (AttributeError, io.UnsupportedOperation):
return False
GPGV_DEFAULT_KEYRINGS = frozenset(['/usr/share/keyrings/debian-keyring.gpg'])
GPGV_EXECUTABLE = '/usr/bin/gpgv'
class Error(Exception):
"""Base class for custom exceptions in this module."""
class RestrictedFieldError(Error):
"""Raised when modifying the raw value of a field is not allowed."""
class TagSectionWrapper(collections_abc.Mapping):
"""Wrap a TagSection object, using its find_raw method to get field values
This allows us to pick which whitespace to strip off the beginning and end
of the data, so we don't lose leading newlines.
"""
def __init__(self,
section, # type: apt_pkg.TagSection
decoder=None, # type: Optional[_AutoDecoder]
):
# type: (...) -> None
self.__section = section
self.decoder = decoder or _AutoDecoder()
super(TagSectionWrapper, self).__init__()
def __iter__(self):
# type: () -> Iterator[str]
for key in self.__section.keys():
if not key.startswith('#'):
yield key
def __len__(self):
return len([key for key in self.__section.keys()
if not key.startswith('#')])
def __getitem__(self, key):
# type: (_CaseInsensitiveString) -> str
# find_raw may give str or bytes depending on how it goes with decoding
# and how it is set up by the TagFile iterator
s = self.__section.find_raw(key)
s = self.decoder.decode(s)
if s is None:
raise KeyError(key)
# Get just the stuff after the first ':'
# Could use s.partition if we only supported python >= 2.5
data = s[s.find(':')+1:]
# Get rid of spaces and tabs after the ':', but not newlines, and strip
# off any newline at the end of the data.
return data.lstrip(' \t').rstrip('\n')
class OrderedSet(object):
"""A set-like object that preserves order when iterating over it
We use this to keep track of keys in Deb822Dict, because it's much faster
to look up if a key is in a set than in a list.
"""
def __init__(self, iterable=None):
# type: (Optional[Iterable[str]]) -> None
self.__set = set() # type: Set[str]
self.__order = [] # type: List[str]
if iterable is None:
iterable = []
for item in iterable:
self.add(item)
def add(self, item):
# type: (str) -> None
if item not in self:
# set.add will raise TypeError if something's unhashable, so we
# don't have to handle that ourselves
self.__set.add(item)
self.__order.append(item)
def remove(self, item):
# type: (str) -> None
# set.remove will raise KeyError, so we don't need to handle that
# ourselves
self.__set.remove(item)
self.__order.remove(item)
def __iter__(self):
# type: () -> Iterator
# Return an iterator of items in the order they were added
return iter(self.__order)
def __len__(self):
# type: () -> int
return len(self.__order)
def __contains__(self, item):
# type: (str) -> bool
# This is what makes OrderedSet faster than using a list to keep track
# of keys. Lookup in a set is O(1) instead of O(n) for a list.
return item in self.__set
# ### list-like methods
append = add
def extend(self, iterable):
# type: (List[str]) -> None
for item in iterable:
self.add(item)
# ###
class Deb822Dict(collections_abc.MutableMapping):
"""A dictionary-like object suitable for storing RFC822-like data.
Deb822Dict behaves like a normal dict, except:
- key lookup is case-insensitive
- key order is preserved
- if initialized with a _parsed parameter, it will pull values from
that dictionary-like object as needed (rather than making a copy).
The _parsed dict is expected to be able to handle case-insensitive
keys.
If _parsed is not None, an optional _fields parameter specifies which keys
in the _parsed dictionary are exposed.
"""
# See the end of the file for the definition of _strI
def __init__(self,
_dict=None, # type: Optional[Union[Deb822Mapping, Iterable[Tuple[str,str]]]]
_parsed=None, # type: Optional[Union[Deb822, TagSectionWrapper]]
_fields=None, # type: Optional[List[str]]
encoding="utf-8", # type: str
):
# type: (...) -> None
self.__dict = {} # type: Dict[_CaseInsensitiveString, Deb822ValueType]
self.__keys = OrderedSet()
self.__parsed = None # type: Optional[Union[Deb822, TagSectionWrapper]]
self.encoding = encoding
self.decoder = _AutoDecoder(self.encoding)
super(Deb822Dict, self).__init__()
if _dict is not None:
# _dict may be a dict or a list of two-sized tuples
# define the type in advance and then ignore the next assignments
# https://github.com/python/mypy/issues/1424
items = [] # type: List[Tuple[str,str]]
if hasattr(_dict, 'items'):
items = _dict.items() # type: ignore
else:
items = list(_dict) # type: ignore
try:
for k, v in items:
self[k] = v
except ValueError:
this = len(self.__keys)
len_ = len(items[this])
raise ValueError(
'dictionary update sequence element #%d has '
'length %d; 2 is required' % (this, len_))
if _parsed is not None:
self.__parsed = _parsed
if _fields is None:
self.__keys.extend([_strI(k) for k in self.__parsed])
else:
self.__keys.extend([_strI(f) for f in _fields if f in self.__parsed])
# ### BEGIN collections.abc.MutableMapping methods
def __iter__(self):
# type: () -> Iterator[str]
for key in self.__keys:
yield str(key)
def __len__(self):
# type: () -> int
return len(self.__keys)
def __setitem__(self, key, value):
# type: (str, Deb822ValueType) -> None
keyi = _strI(key)
self.__keys.add(keyi)
self.__dict[keyi] = value
def __getitem__(self, key):
# type: (str) -> Deb822ValueType
keyi = _strI(key)
try:
value = self.__dict[keyi]
except KeyError:
if self.__parsed is not None and keyi in self.__keys:
value = self.__parsed[keyi]
else:
raise
# TODO(jsw): Move the decoding logic into __setitem__ so that we decode
# it once instead of every time somebody asks for it. Even better if
# Deb822* classes dealt in pure unicode and didn't care about the
# encoding of the files they came from...but I don't know how to fix
# that without breaking a bunch of users.
return self.decoder.decode(value)
def __delitem__(self, key):
# type: (str) -> None
keyi = _strI(key)
self.__keys.remove(keyi)
try:
del self.__dict[keyi]
except KeyError:
# If we got this far, the key was in self.__keys, so it must have
# only been in the self.__parsed dict.
pass
def __contains__(self, key):
# type: (Any) -> bool
keyi = _strI(key)
return keyi in self.__keys
if sys.version < '3':
has_key = __contains__
# ### END collections.abc.MutableMapping methods
def __repr__(self):
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def __eq__(self, other):
# type: (Any) -> bool
mykeys = sorted(self)
otherkeys = sorted(other)
if not mykeys == otherkeys:
return False
for key in mykeys:
if self[key] != other[key]:
return False
# If we got here, everything matched
return True
# Overriding __eq__ blocks inheritance of __hash__ in Python 3, and
# instances of this class are not sensibly hashable anyway.
__hash__ = None # type: ignore
def copy(self):
# type: () -> Union[Deb822, Deb822Dict]
# Use self.__class__ so this works as expected for subclasses
copy = self.__class__(self)
return copy
# TODO implement __str__() and make dump() use that?
class Deb822(Deb822Dict):
""" Generic Deb822 data
:param sequence: a string, or any any object that returns a line of
input each time, normally a file. Alternately, sequence can
be a dict that contains the initial key-value pairs. When
python-apt is present, sequence can also be a compressed object,
for example a file object associated to something.gz.
:param fields: if given, it is interpreted as a list of fields that
should be parsed (the rest will be discarded).
:param _parsed: internal parameter.
:param encoding: When parsing strings, interpret them in this encoding.
(All values are given back as unicode objects, so an encoding is
necessary in order to properly interpret the strings.)
:param strict: Dict controlling the strictness of the internal parser
to permit tuning of its behaviour between "generous in what it
accepts" and "strict conformance". Known keys are described below.
*Internal parser tuning*
- `whitespace-separates-paragraphs`: (default: `True`)
Blank lines between paragraphs should not have any whitespace in them
at all. However:
- Policy §5.1 permits `debian/control` in source packages to separate
packages with lines containing whitespace to allow human edited
files to have stray whitespace. Failing to honour this breaks
tools such as
`wrap-and-sort <https://manpages.debian.org/wrap-and-sort>`_
(see, for example,
`Debian Bug 715558 <https://bugs.debian.org/715558/>`_).
- `apt_pkg.TagFile` accepts whitespace-only lines within the
`Description` field; strictly matching the behaviour of apt's
Deb822 parser requires setting this key to `False` (as is done
by default for :class:`Sources` and :class:`Packages`.
(see, for example,
`Debian Bug 913274 <https://bugs.debian.org/913274/>`_).
Note that these tuning parameter are only for the parser that is
internal to `Deb822` and do not apply to python-apt's apt_pkg.TagFile
parser which would normally be used for Packages and Sources files.
"""
def __init__(self,
sequence=None, # type: Optional[Union[IterableDataSourceType, Deb822Mapping]]
fields=None, # type: Optional[List[str]]
_parsed=None, # type: Optional[Union[Deb822, TagSectionWrapper]]
encoding="utf-8", # type: str
strict=None, # type: Optional[Dict]
):
# type: (...) -> None
_dict = {} # type: Mapping[str, str]
iterable = None # type: Optional[IterableDataSourceType]
if hasattr(sequence, 'items'):
_dict = sequence # type: ignore
else:
iterable = sequence
Deb822Dict.__init__(self, _dict=_dict, _parsed=_parsed, _fields=fields,
encoding=encoding)
if iterable is not None:
try:
self._internal_parser(iterable, fields, strict)
except EOFError:
pass
self.gpg_info = None # type: Optional[GpgInfo]
#self.raw_text = None # type: Optional[bytes]
@classmethod
def iter_paragraphs(cls,
sequence, # type: IterableInputDataType
fields=None, # type: Optional[List[str]]
use_apt_pkg=False, # type: bool
shared_storage=False, # type: bool
encoding="utf-8", # type: str
strict=None, # type: Optional[Dict]
):
# type: (...) -> Iterator[Deb822]
"""Generator that yields a Deb822 object for each paragraph in sequence.
:param sequence: same as in __init__.
:param fields: likewise.
:param use_apt_pkg: if sequence is a file, apt_pkg can be used
if available to parse the file, since it's much much faster. Set
this parameter to True to enable use of apt_pkg. Note that the
TagFile parser from apt_pkg is a much stricter parser of the
Deb822 format, particularly with regards whitespace between
paragraphs and comments within paragraphs. If these features are
required (for example in debian/control files), ensure that this
parameter is set to False.
:param shared_storage: not used, here for historical reasons. Deb822
objects never use shared storage anymore.
:param encoding: Interpret the paragraphs in this encoding.
(All values are given back as unicode objects, so an encoding is
necessary in order to properly interpret the strings.)
:param strict: dict of settings to tune the internal parser if that is
being used. See the documentation for :class:`Deb822` for details.
"""
# pylint: disable=unused-argument
apt_pkg_allowed = use_apt_pkg and _has_fileno(sequence)
if use_apt_pkg and not _have_apt_pkg:
# warn that apt_pkg was requested but not installed
msg = (
"Parsing of Deb822 data with python{pyver}-apt's apt_pkg was "
"requested but this package is not importable. "
"Is python{pyver}-apt installed?"
).format(
pyver=('3' if sys.version_info[0] == 3 else '')
)
warnings.warn(msg)
elif use_apt_pkg and not apt_pkg_allowed:
# warn that apt_pkg was requested but can't be used
msg = (
"Parsing of Deb822 data with python-apt's apt_pkg was "
"requested but this cannot be done on non-file input."
)
warnings.warn(msg)
if _have_apt_pkg and apt_pkg_allowed:
# pylint: disable=no-member
parser = apt_pkg.TagFile(sequence, bytes=True)
for section in parser:
paragraph = cls(fields=fields,
_parsed=TagSectionWrapper(section, _AutoDecoder(encoding)),
encoding=encoding)
if paragraph:
yield paragraph
else:
# Split this into multiple conditionals so that type checking
# can follow the types through
iterable = [] # type: IterableDataSourceType
if isinstance(sequence, six.string_types):
iterable = iter(sequence.splitlines())
elif isinstance(sequence, six.binary_type):
iterable = iter(sequence.splitlines())
else:
# StringIO/list can be iterated directly
iterable = iter(sequence) # type: ignore
while True:
x = cls(iterable, fields, encoding=encoding, strict=strict)
if not x:
break
yield x
###
@staticmethod
def _skip_useless_lines(sequence):
# type: (IterableDataSourceType) -> Union[Iterator[bytes], Iterator[str]]
"""Yields only lines that do not begin with '#'.
Also skips any blank lines at the beginning of the input.
"""
at_beginning = True
for line in sequence:
# The bytes/str polymorphism required here to support Python 3
# is unpleasant, but fortunately limited. We need this because
# at this point we might have been given either bytes or
# Unicode, and we haven't yet got to the point where we can try
# to decode a whole paragraph and detect its encoding.
if isinstance(line, bytes):
if line.startswith(b'#'):
continue
else:
if line.startswith('#'):
continue
if at_beginning:
if isinstance(line, bytes):
if not line.rstrip(b'\r\n'):
continue
else:
if not line.rstrip('\r\n'):
continue
at_beginning = False
yield line
def _internal_parser(self,
sequence, # type: IterableDataSourceType
fields=None, # type: Optional[List[str]]
strict=None, # type: Optional[Dict]
):
# The key is non-whitespace, non-colon characters before any colon.
key_part = r"^(?P<key>[^: \t\n\r\f\v]+)\s*:\s*"
single = re.compile(key_part + r"(?P<data>\S.*?)\s*$")
multi = re.compile(key_part + r"$")
multidata = re.compile(r"^\s(?P<data>.+?)\s*$")
def wanted_field(f):
# type: (str) -> bool
return fields is None or f in fields
if isinstance(sequence, (six.string_types, bytes)):
sequence = sequence.splitlines()
curkey = None
content = ""
for linebytes in self.gpg_stripped_paragraph(
self._skip_useless_lines(sequence), strict):
line = self.decoder.decode(linebytes)
m = single.match(line)
if m:
if curkey:
self[curkey] = content
if not wanted_field(m.group('key')):
curkey = None
continue
curkey = m.group('key')
content = m.group('data')
continue
m = multi.match(line)
if m:
if curkey:
self[curkey] = content
if not wanted_field(m.group('key')):
curkey = None
continue
curkey = m.group('key')
content = ""
continue
m = multidata.match(line)
if m:
content += '\n' + line # XXX not m.group('data')?
continue
if curkey:
self[curkey] = content
def __str__(self):
return self.dump()
def __unicode__(self):
return self.dump()
if sys.version >= '3':
def __bytes__(self):
return self.dump().encode(self.encoding)
# __repr__ is handled by Deb822Dict
def get_as_string(self, key):
# type: (str) -> str
"""Return the self[key] as a string (or unicode)
The default implementation just returns unicode(self[key]); however,
this can be overridden in subclasses (e.g. _multivalued) that can take
special values.
"""
return six.text_type(self[key])
def dump(self,
fd=None, # type: Optional[Union[IO[str], IO[bytes]]]
encoding=None, # type: Optional[str]
text_mode=False, # type: bool
):
# type: (...) -> Optional[str]
"""Dump the the contents in the original format
:param fd: file-like object to which the data should be written
(see notes below)
:param encoding: str, optional (Defaults to object default).
Encoding to use when writing out the data.
:param text_mode: bool, optional (Defaults to ``False``).
Encoding should be undertaken by this function rather than by the
caller.
If fd is None, returns a unicode object. Otherwise, fd is assumed to
be a file-like object, and this method will write the data to it
instead of returning a unicode object.
If fd is not none and text_mode is False, the data will be encoded
to a byte string before writing to the file. The encoding used is
chosen via the encoding parameter; None means to use the encoding the
object was initialized with (utf-8 by default). This will raise
UnicodeEncodeError if the encoding can't support all the characters in
the Deb822Dict values.
"""
# Ideally this would never try to encode (that should be up to the
# caller when opening the file), but we may still have users who rely
# on the binary mode encoding. But...might it be better to break them
# than to introduce yet another parameter relating to encoding?
if fd is None:
fd = io.StringIO()
return_string = True
else:
return_string = False
if encoding is None:
# Use the encoding we've been using to decode strings with if none
# was explicitly specified
encoding = self.encoding
for key in self:
value = self.get_as_string(key)
if not value or value[0] == '\n':
# Avoid trailing whitespace after "Field:" if it's on its own
# line or the value is empty. We don't have to worry about the
# case where value == '\n', since we ensure that is not the
# case in __setitem__.
entry = '%s:%s\n' % (key, value)
else:
entry = '%s: %s\n' % (key, value)
if not return_string and not text_mode:
fd.write(entry.encode(encoding)) # type: ignore
else:
fd.write(entry) # type: ignore
if return_string:
return fd.getvalue() # type: ignore
return None
###
@staticmethod
def is_single_line(s):
# type: (str) -> bool
return not s.count("\n")
isSingleLine = function_deprecated_by(is_single_line)
@staticmethod
def is_multi_line(s):
# type: (str) -> bool
return not Deb822.is_single_line(s)
isMultiLine = function_deprecated_by(is_multi_line)
def _merge_fields(self,
s1, # type: str
s2, # type: str
):
# type: (...) -> str
if not s2:
return s1
if not s1:
return s2
if self.is_single_line(s1) and self.is_single_line(s2):
# some fields are delimited by a single space, others
# a comma followed by a space. this heuristic assumes
# that there are multiple items in one of the string fields
# so that we can pick up on the delimiter being used
delim = ' '
if (s1 + s2).count(', '):
delim = ', '
L = sorted((s1 + delim + s2).split(delim))
prev = merged = L[0]
for item in L[1:]:
# skip duplicate entries
if item == prev:
continue
merged = merged + delim + item
prev = item
return merged
if self.is_multi_line(s1) and self.is_multi_line(s2):
for item in s2.splitlines(True):
if item not in s1.splitlines(True):
s1 = s1 + "\n" + item
return s1
raise ValueError
_mergeFields = function_deprecated_by(_merge_fields)
def merge_fields(self,
key, # type: str
d1, # type: Mapping[str, str]
d2=None, # type: Optional[Mapping[str, str]]
):
# type: (...) -> Optional[str]
# this method can work in two ways - abstract that away
if d2 is None:
x1 = self # type: Union[Mapping[str, str], Deb822]
x2 = d1
else:
x1 = d1
x2 = d2
# we only have to do work if both objects contain our key
# otherwise, we just take the one that does, or raise an
# exception if neither does
if key in x1 and key in x2:
merged = self._merge_fields(x1[key], x2[key])
elif key in x1:
merged = x1[key]
elif key in x2:
merged = x2[key]
else:
raise KeyError
# back to the two different ways - if this method was called
# upon an object, update that object in place.
# return nothing in this case, to make the author notice a
# problem if she assumes the object itself will not be modified
if d2 is None:
self[key] = merged
return None
return merged
mergeFields = function_deprecated_by(merge_fields)
@staticmethod
def split_gpg_and_payload(sequence, # type: Iterable[bytes]
strict=None, # type: Optional[Dict]
):
# type: (...) -> Tuple[List[bytes], List[bytes], List[bytes]]
"""Return a (gpg_pre, payload, gpg_post) tuple
Each element of the returned tuple is a list of lines (with trailing
whitespace stripped).
:param sequence: iterable.
An iterable that yields lines of data (str, unicode,
bytes) to be parsed, possibly including a GPG in-line signature.
:param strict: dict, optional.
Control over the strictness of the parser. See the :class:`Deb822`
class documentation for details.
"""
# pylint: disable=too-many-branches
if not strict:
strict = {}
gpg_pre_lines = [] # type: List[bytes]
lines = [] # type: List[bytes]
gpg_post_lines = [] # type: List[bytes]
state = b'SAFE'
gpgre = re.compile(br'^-----(?P<action>BEGIN|END) '
br'PGP (?P<what>[^-]+)-----[\r\t ]*$')
initial_blank_line = re.compile(br'^\s*$')
# Include whitespace-only lines in blank lines to split paragraphs.
# (see #715558)
if strict.get('whitespace-separates-paragraphs', True):
blank_line = re.compile(br'^\s*$')
else:
blank_line = re.compile(br'^$')
first_line = True
for line in sequence:
# Some consumers of this method require bytes (encoding
# detection and signature checking). However, we might have
# been given a file opened in text mode, in which case it's
# simplest to encode to bytes.
if sys.version >= '3' and isinstance(line, str):
line = line.encode()
line = line.strip(b'\r\n')
# skip initial blank lines, if any
if first_line:
if initial_blank_line.match(line):
continue
first_line = False
m = gpgre.match(line)
if not m:
if state == b'SAFE':
if not blank_line.match(line):
lines.append(line)
else:
if not gpg_pre_lines:
# There's no gpg signature, so we should stop at
# this blank line
break
elif state == b'SIGNED MESSAGE':
if blank_line.match(line):
state = b'SAFE'
else:
gpg_pre_lines.append(line)
elif state == b'SIGNATURE':
gpg_post_lines.append(line)
else:
if m.group('action') == b'BEGIN':
state = m.group('what')
elif m.group('action') == b'END':
gpg_post_lines.append(line)
break
if not blank_line.match(line):
if not lines:
gpg_pre_lines.append(line)
else:
gpg_post_lines.append(line)
if lines:
return (gpg_pre_lines, lines, gpg_post_lines)
raise EOFError('only blank lines found in input')
@classmethod
def gpg_stripped_paragraph(cls, sequence, strict=None):
# type: (Iterator, Optional[Dict]) -> List[bytes]
return cls.split_gpg_and_payload(sequence, strict)[1]
def get_gpg_info(self, keyrings=None):
# type: (List[str]) -> GpgInfo
"""Return a GpgInfo object with GPG signature information
This method will raise ValueError if the signature is not available
(e.g. the original text cannot be found).
:param keyrings: list of keyrings to use (see GpgInfo.from_sequence)
"""
# raw_text is saved (as a string) only for Changes and Dsc (see
# _gpg_multivalued.__init__) which is small compared to Packages or
# Sources which contain no signature
if not hasattr(self, 'raw_text'):
raise ValueError("original text cannot be found")
if self.gpg_info is None:
# pylint: disable=no-member
# (raw_text is checked above)
self.gpg_info = GpgInfo.from_sequence(self.raw_text, # type: ignore
keyrings=keyrings)
return self.gpg_info
def validate_input(self, key, value):
# type: (str, str) -> None
# pylint: disable=no-self-use,unused-argument
"""Raise ValueError if value is not a valid value for key
Subclasses that do interesting things for different keys may wish to
override this method.
"""
# FIXME: key is not validated, contrary to docstring
# The value cannot end in a newline (if it did, dumping the object
# would result in multiple stanzas)
if value.endswith('\n'):
raise ValueError("value must not end in '\\n'")
# Make sure there are no blank lines (actually, the first one is
# allowed to be blank, but no others), and each subsequent line starts
# with whitespace
for line in value.splitlines()[1:]:
if not line:
raise ValueError("value must not have blank lines")
if not line[0].isspace():
raise ValueError("each line must start with whitespace")
def __setitem__(self, key, value):
# type: (str, str) -> None
self.validate_input(key, value)
Deb822Dict.__setitem__(self, key, value)
# XXX check what happens if input contains more that one signature
class GpgInfo(dict):
"""A wrapper around gnupg parsable output obtained via --status-fd
This class is really a dictionary containing parsed output from gnupg plus
some methods to make sense of the data.
Keys are keywords and values are arguments suitably split.
See /usr/share/doc/gnupg/DETAILS.gz"""
# keys with format "key keyid uid"
uidkeys = ('GOODSIG', 'EXPSIG', 'EXPKEYSIG', 'REVKEYSIG', 'BADSIG')
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
super(GpgInfo, self).__init__(*args, **kwargs)
self.out = None # type: Optional[List[str]]
self.err = None # type: Optional[List[str]]
def valid(self):
# type: () -> bool
"""Is the signature valid?"""
return 'GOODSIG' in self or 'VALIDSIG' in self
# XXX implement as a property?
# XXX handle utf-8 %-encoding
def uid(self):
"""Return the primary ID of the signee key, None is not available"""
@classmethod
def from_output(cls, out, err=None):
# type: (Union[str, List[str]], Union[str, List[str]]) -> GpgInfo
""" Create a GpgInfo object based on the gpg or gpgv output
Create a new GpgInfo object from gpg(v) --status-fd output (out) and
optionally collect stderr as well (err).
Both out and err can be lines in newline-terminated sequence or
regular strings.
"""
n = cls()
if isinstance(out, six.string_types):
n.out = out.split('\n')
else:
n.out = out
if isinstance(err, six.string_types):
n.err = err.split('\n')
else:
n.err = err
header = '[GNUPG:] '
for line in n.out:
if not line.startswith(header):
continue
line = line[len(header):]
line = line.strip('\n')
# str.partition() would be better, 2.5 only though
s = line.find(' ')
key = line[:s]
if key in cls.uidkeys:
# value is "keyid UID", don't split UID
value = line[s+1:].split(' ', 1)
else:
value = line[s+1:].split(' ')
# Skip headers in the gpgv output that are not interesting
# note NEWSI is actually NEWSIG but the above parsing loses the 'G'
# if no keyid is included in the message. See
# /usr/share/doc/gnupg/DETAILS.gz
if key in ('NEWSI', 'NEWSIG', 'KEY_CONSIDERED', 'PROGRESS'):
continue
n[key] = value
return n
@classmethod
def from_sequence(cls,
sequence, # type: Union[bytes, Iterable[bytes]]
keyrings=None, # type: Iterable[str]
executable=None # type: Optional[Iterable[str]]
):
# type: (...) -> GpgInfo
"""Create a new GpgInfo object from the given sequence.
:param sequence: sequence of lines of bytes or a single byte string
:param keyrings: list of keyrings to use (default:
['/usr/share/keyrings/debian-keyring.gpg'])
:param executable: list of args for subprocess.Popen, the first element
being the gpgv executable (default: ['/usr/bin/gpgv'])
"""
keyrings = keyrings or GPGV_DEFAULT_KEYRINGS
executable = executable or [GPGV_EXECUTABLE]
# XXX check for gpg as well and use --verify accordingly?
args = list(executable)
# args.extend(["--status-fd", "1", "--no-default-keyring"])
args.extend(["--status-fd", "1"])
for k in keyrings:
args.extend(["--keyring", k])
if "--keyring" not in args:
raise IOError("cannot access any of the given keyrings")
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=False)
# XXX what to do with exit code?
if isinstance(sequence, bytes):
inp = sequence
else:
inp = cls._get_full_bytes(sequence)
out, err = p.communicate(inp)
return cls.from_output(out.decode('utf-8'),
err.decode('utf-8'))
@staticmethod
def _get_full_bytes(sequence):
# type: (Iterable[bytes]) -> bytes
"""Return a byte string from a sequence of lines of bytes.
This method detects if the sequence's lines are newline-terminated, and
constructs the byte string appropriately.
"""
# Peek at the first line to see if it's newline-terminated.
sequence_iter = iter(sequence)
try:
first_line = next(sequence_iter)
except StopIteration:
return b""
join_str = b'\n'
if first_line.endswith(b'\n'):
join_str = b''
return first_line + join_str + join_str.join(sequence_iter)
@classmethod
def from_file(cls, target, *args, **kwargs):
# type: (str, *Any, **Any) -> GpgInfo
"""Create a new GpgInfo object from the given file.
See GpgInfo.from_sequence.
"""
with open(target, 'rb') as target_file:
return cls.from_sequence(target_file, *args, **kwargs)
class PkgRelation(object):
"""Inter-package relationships
Structured representation of the relationships of a package to another,
i.e. of what can appear in a Deb882 field like Depends, Recommends,
Suggests, ... (see Debian Policy 7.1).
"""
# XXX *NOT* a real dependency parser, and that is not even a goal here, we
# just parse as much as we need to split the various parts composing a
# dependency, checking their correctness wrt policy is out of scope
__dep_RE = re.compile(
r'^\s*(?P<name>[a-zA-Z0-9.+\-]{2,})'
r'(:(?P<archqual>([a-zA-Z0-9][a-zA-Z0-9-]*)))?'
r'(\s*\(\s*(?P<relop>[>=<]+)\s*'
r'(?P<version>[0-9a-zA-Z:\-+~.]+)\s*\))?'
r'(\s*\[(?P<archs>[\s!\w\-]+)\])?\s*'
r'((?P<restrictions><.+>))?\s*'
r'$')
__comma_sep_RE = re.compile(r'\s*,\s*')
__pipe_sep_RE = re.compile(r'\s*\|\s*')
__blank_sep_RE = re.compile(r'\s+')
__restriction_sep_RE = re.compile(r'>\s*<')
__restriction_RE = re.compile(
r'(?P<enabled>\!)?'
r'(?P<profile>[^\s]+)')
ArchRestriction = collections.namedtuple('ArchRestriction',
['enabled', 'arch'])
BuildRestriction = collections.namedtuple('BuildRestriction',
['enabled', 'profile'])
@classmethod
def parse_relations(cls, raw):
# type: (str) -> List[List[Dict[str, Optional[Union[str, list, Tuple[str, str]]]]]]
"""Parse a package relationship string (i.e. the value of a field like
Depends, Recommends, Build-Depends ...)
"""
def parse_archs(raw):
# type: (str) -> List[PkgRelation.ArchRestriction]
# assumption: no space between '!' and architecture name
archs = []
for arch in cls.__blank_sep_RE.split(raw.strip()):
disabled = arch[0] == '!'
if disabled:
arch = arch[1:]
archs.append(cls.ArchRestriction(not disabled, arch))
return archs
def parse_restrictions(raw):
# type: (str) -> List[List[PkgRelation.BuildRestriction]]
""" split a restriction formula into a list of restriction lists
Each term in the restriction list is a namedtuple of form:
(enabled, label)
where
enabled: bool: whether the restriction is positive or negative
profile: the profile name of the term e.g. 'stage1'
"""
restrictions = []
groups = cls.__restriction_sep_RE.split(raw.lower().strip('<> '))
for rgrp in groups:
group = []
for restriction in cls.__blank_sep_RE.split(rgrp):
match = cls.__restriction_RE.match(restriction)
if match:
parts = match.groupdict()
group.append(
cls.BuildRestriction(
parts['enabled'] != '!',
parts['profile'],
))
restrictions.append(group)
return restrictions
def parse_rel(raw):
# type: (str) -> Dict[str, Optional[Union[str, list, Tuple[str, str]]]]
match = cls.__dep_RE.match(raw)
if match:
parts = match.groupdict()
d = {
'name': parts['name'],
'archqual': parts['archqual'],
'version': None,
'arch': None,
'restrictions': None,
} # type: Dict[str, Optional[Union[str, list, Tuple[str, str]]]]
if parts['relop'] or parts['version']:
d['version'] = (parts['relop'], parts['version'])
if parts['archs']:
d['arch'] = parse_archs(parts['archs'])
if parts['restrictions']:
d['restrictions'] = parse_restrictions(
parts['restrictions'])
return d
warnings.warn(
'cannot parse package'
' relationship "%s", returning it raw' % raw)
return {
'name': raw,
'version': None,
'arch': None
}
tl_deps = cls.__comma_sep_RE.split(raw.strip()) # top-level deps
cnf = map(cls.__pipe_sep_RE.split, tl_deps)
return [[parse_rel(or_dep) for or_dep in or_deps] for or_deps in cnf]
@staticmethod
def str(rels):
# type: (List[List[Dict[builtins.str, Any]]]) -> builtins.str
"""Format to string structured inter-package relationships
Perform the inverse operation of parse_relations, returning a string
suitable to be written in a package stanza.
"""
def pp_arch(arch_spec):
# type: (PkgRelation.ArchRestriction) -> str
return '%s%s' % (
'' if arch_spec.enabled else '!',
arch_spec.arch,
)
def pp_restrictions(restrictions):
# type: (List[PkgRelation.BuildRestriction]) -> str
s = []
for term in restrictions:
s.append(
'%s%s' % (
'' if term.enabled else '!',
term.profile
)
)
return '<%s>' % ' '.join(s)
def pp_atomic_dep(dep):
# type: (Dict[str, Any]) -> str
s = dep['name']
if dep.get('archqual') is not None:
s += ':%s' % dep['archqual']
if dep.get('version') is not None:
s += ' (%s %s)' % dep['version']
if dep.get('arch') is not None:
s += ' [%s]' % ' '.join(map(pp_arch, dep['arch']))
if dep.get('restrictions') is not None:
s += ' %s' % ' '.join(map(pp_restrictions,
dep['restrictions']))
return s
return ', '.join(
map(lambda deps: ' | '.join(map(pp_atomic_dep, deps)), rels))
class _lowercase_dict(dict):
"""Dictionary wrapper which lowercase keys upon lookup."""
def __getitem__(self, key):
# type: (str) -> Optional[List]
return dict.__getitem__(self, key.lower())
class _VersionAccessorMixin(object):
"""Give access to Version keys as debian_support.Version objects."""
def get_version(self):
return debian.debian_support.Version(self['Version'])
def set_version(self, version):
self['Version'] = str(version)
class _PkgRelationMixin(object):
"""Package relationship mixin
Inheriting from this mixin you can extend a :class:`Deb822` object with
attributes letting you access inter-package relationship in a structured
way, rather than as strings.
For example, while you can usually use ``pkg['depends']`` to
obtain the Depends string of package pkg, mixing in with this class you
gain pkg.depends to access Depends as a Pkgrel instance
To use, subclass _PkgRelationMixin from a class with a _relationship_fields
attribute. It should be a list of field names for which structured access
is desired; for each of them a method wild be added to the inherited class.
The method name will be the lowercase version of field name; '-' will be
mangled as '_'. The method would return relationships in the same format of
the PkgRelation' relations property.
See Packages and Sources as examples.
"""
_relationship_fields = [] # type: List[str]
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
# pylint: disable=unused-argument
# (accept anything via constructors)
self.__relations = _lowercase_dict({})
self.__parsed_relations = False
for name in self._relationship_fields:
# To avoid reimplementing Deb822 key lookup logic we use a really
# simple dict subclass which just lowercase keys upon lookup. Since
# dictionary building happens only here, we ensure that all keys
# are in fact lowercase.
# With this trick we enable users to use the same key (i.e. field
# name) of Deb822 objects on the dictionary returned by the
# relations property.
keyname = name.lower()
if name in self: # type: ignore # Mixin is used with Deb822Dict
self.__relations[keyname] = None # lazy value
# all lazy values will be expanded before setting
# __parsed_relations to True
else:
self.__relations[keyname] = []
@property
def relations(self):
# type: () -> _lowercase_dict
"""Return a dictionary of inter-package relationships among the current
and other packages.
Dictionary keys depend on the package kind. Binary packages have keys
like 'depends', 'recommends', ... while source packages have keys like
'build-depends', 'build-depends-indep' and so on. See the Debian policy
for the comprehensive field list.
Dictionary values are package relationships returned as lists of lists
of dictionaries (see below for some examples).
The encoding of package relationships is as follows:
- the top-level lists corresponds to the comma-separated list of
:class:`Deb822`, their components form a conjunction, i.e. they
have to be AND-ed together
- the inner lists corresponds to the pipe-separated list of
:class:`Deb822`,
their components form a disjunction, i.e. they have to be OR-ed
together
- member of the inner lists are dictionaries with the following keys:
``name``
package (or virtual package) name
``version``
A pair <`operator`, `version`> if the relationship is
versioned, None otherwise. operator is one of ``<<``,
``<=``, ``=``, ``>=``, ``>>``; version is the given version as
a string.
``arch``
A list of pairs <`enabled`, `arch`> if the
relationship is architecture specific, None otherwise.
Enabled is a boolean (``False`` if the architecture is
negated with ``!``, ``True`` otherwise), arch the
Debian architecture name as a string.
``restrictions``
A list of lists of tuples <`enabled`, `profile`>
if there is a restriction formula defined, ``None``
otherwise. Each list of tuples represents a restriction
list while each tuple represents an individual term
within the restriction list. Enabled is a boolean
(``False`` if the restriction is negated with ``!``,
``True`` otherwise). The profile is the name of the
build restriction.
https://wiki.debian.org/BuildProfileSpec
The arch and restrictions tuples are available as named tuples so
elements are available as `term[0]` or alternatively as
`term.enabled` (and so forth).
Examples:
``"emacs | emacsen, make, debianutils (>= 1.7)"``
becomes::
[
[ {'name': 'emacs'}, {'name': 'emacsen'} ],
[ {'name': 'make'} ],
[ {'name': 'debianutils', 'version': ('>=', '1.7')} ]
]
``"tcl8.4-dev, procps [!hurd-i386]"``
becomes::
[
[ {'name': 'tcl8.4-dev'} ],
[ {'name': 'procps', 'arch': (false, 'hurd-i386')} ]
]
``"texlive <!cross>"``
becomes::
[
[ {'name': 'texlive', 'restriction': [[(false, 'cross')]]} ]
]
"""
if not self.__parsed_relations:
lazy_rels = filter(lambda n: self.__relations[n] is None,
self.__relations.keys())
for n in lazy_rels:
# Mixin is used with Deb822Dict so self becomes indexable
self.__relations[n] = PkgRelation.parse_relations(self[n]) # type: ignore
self.__parsed_relations = True
return self.__relations
class _multivalued(Deb822):
"""A class with (R/W) support for multivalued fields.
To use, create a subclass with a _multivalued_fields attribute. It should
be a dictionary with *lower-case* keys, with lists of human-readable
identifiers of the fields as the values.
Please see :class:`Dsc`, :class:`Changes`, and :class:`PdiffIndex`
as examples.
"""
_multivalued_fields = {} # type: Dict[str, List[str]]
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
Deb822.__init__(self, *args, **kwargs)
for field, fields in self._multivalued_fields.items():
try:
contents = self[field]
except KeyError:
continue
if self.is_multi_line(contents):
self[field] = [] # type: ignore
updater_method = self[field].append
else:
self[field] = Deb822Dict() # type: ignore
updater_method = self[field].update
for line in filter(None, contents.splitlines()): # type: str
updater_method(Deb822Dict(zip(fields, line.split())))
def validate_input(self, key, value):
# type: (str, Union[List[Dict[str, str]], str]) -> None
if key.lower() in self._multivalued_fields:
# It's difficult to write a validator for multivalued fields, and
# basically futile, since we allow mutable lists. In any case,
# with sanity checking in get_as_string, we shouldn't ever output
# unparseable data.
pass
else:
super(_multivalued, self).validate_input(key, value) # type: ignore
def get_as_string(self, key):
# type: (str) -> str
keyl = key.lower()
if keyl in self._multivalued_fields:
fd = io.StringIO()
if hasattr(self[key], 'keys'): # single-line
array = [self[key]]
else: # multi-line
fd.write(six.u("\n"))
array = self[key]
order = self._multivalued_fields[keyl]
field_lengths = {} # type: Mapping[str, Mapping[str, int]]
try:
field_lengths = self._fixed_field_lengths # type: ignore # lazy added member
except AttributeError:
pass
for item in array:
for x in order:
raw_value = six.text_type(item[x])
try:
length = field_lengths[keyl][x]
except KeyError:
value = raw_value
else:
value = (length - len(raw_value)) * " " + raw_value
if "\n" in value:
raise ValueError("'\\n' not allowed in component of "
"multivalued field %s" % key)
fd.write(six.u(" %s") % value)
fd.write(six.u("\n"))
return fd.getvalue().rstrip("\n")
return Deb822.get_as_string(self, key)
class _gpg_multivalued(_multivalued):
"""A _multivalued class that can support gpg signed objects
This class's feature is that it stores the raw text before parsing so that
gpg can verify the signature. Use it just like you would use the
_multivalued class.
This class only stores raw text if it is given a raw string, or if it
detects a gpg signature when given a file or sequence of lines (see
Deb822.split_gpg_and_payload for details).
"""
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
self.raw_text = None # type: Optional[bytes]
try:
sequence = args[0]
except IndexError:
sequence = kwargs.get("sequence", None)
strict = kwargs.get("strict", None)
if sequence is not None:
# If the input is a unicode object or a file opened in text mode,
# we'll need to encode it back to bytes for gpg. If it's not
# actually in the encoding that we guess, then this probably won't
# verify correctly, but this is the best we can reasonably manage.
# For accurate verification, the file should be opened in binary
# mode.
encoding = (getattr(sequence, 'encoding', None)
or kwargs.get('encoding', 'utf-8') or 'utf-8')
if isinstance(sequence, bytes):
self.raw_text = sequence
elif isinstance(sequence, six.string_types):
self.raw_text = sequence.encode(encoding)
elif hasattr(sequence, "items"):
# sequence is actually a dict(-like) object, so we don't have
# the raw text.
pass
else:
try:
gpg_pre_lines, lines, gpg_post_lines = \
self.split_gpg_and_payload(
(self._bytes(s, encoding) for s in sequence),
strict)
except EOFError:
# Empty input
gpg_pre_lines = lines = gpg_post_lines = []
if gpg_pre_lines and gpg_post_lines:
raw_text = io.BytesIO()
raw_text.write(b"\n".join(gpg_pre_lines))
raw_text.write(b"\n\n")
raw_text.write(b"\n".join(lines))
raw_text.write(b"\n\n")
raw_text.write(b"\n".join(gpg_post_lines))
self.raw_text = raw_text.getvalue()
try:
argsl = list(args)
argsl[0] = lines
args = tuple(argsl)
except IndexError:
kwargs["sequence"] = lines
_multivalued.__init__(self, *args, **kwargs)
@staticmethod
def _bytes(s, encoding):
# type: (Union[bytes, str], str) -> bytes
"""Converts s to bytes if necessary, using encoding.
If s is already bytes type, returns it directly.
"""
if isinstance(s, bytes):
return s
if isinstance(s, six.string_types):
return s.encode(encoding)
raise TypeError('bytes or unicode/string required, not %s' % type(s))
class Dsc(_gpg_multivalued, _VersionAccessorMixin):
""" Representation of a .dsc (Debian Source Control) file
This class is a thin wrapper around the transparent GPG handling
of :class:`_gpg_multivalued` and the parsing of :class:`Deb822`.
"""
_multivalued_fields = {
"files": ["md5sum", "size", "name"],
"checksums-sha1": ["sha1", "size", "name"],
"checksums-sha256": ["sha256", "size", "name"],
"checksums-sha512": ["sha512", "size", "name"],
}
class Changes(_gpg_multivalued, _VersionAccessorMixin):
""" Representation of a .changes (archive changes) file
This class is a thin wrapper around the transparent GPG handling
of :class:`_gpg_multivalued` and the parsing of :class:`Deb822`.
"""
_multivalued_fields = {
"files": ["md5sum", "size", "section", "priority", "name"],
"checksums-sha1": ["sha1", "size", "name"],
"checksums-sha256": ["sha256", "size", "name"],
"checksums-sha512": ["sha512", "size", "name"],
}
def get_pool_path(self):
"""Return the path in the pool where the files would be installed"""
# This is based on the section listed for the first file. While
# it is possible, I think, for a package to provide files in multiple
# sections, I haven't seen it in practice. In any case, this should
# probably detect such a situation and complain, or return a list...
s = self['files'][0]['section']
try:
section, _ = s.split('/')
except ValueError:
# main is implicit
section = 'main'
if self['source'].startswith('lib'):
subdir = self['source'][:4]
else:
subdir = self['source'][0]
return 'pool/%s/%s/%s' % (section, subdir, self['source'])
class PdiffIndex(_multivalued):
""" Representation of a foo.diff/Index file from a Debian mirror
This class is a thin wrapper around the transparent GPG handling
of :class:`_gpg_multivalued` and the parsing of :class:`Deb822`.
"""
_multivalued_fields = {
"sha1-current": ["SHA1", "size"],
"sha1-history": ["SHA1", "size", "date"],
"sha1-patches": ["SHA1", "size", "date"],
}
@property
def _fixed_field_lengths(self):
fixed_field_lengths = {}
for key in self._multivalued_fields:
if hasattr(self[key], 'keys'):
# Not multi-line -- don't need to compute the field length for
# this one
continue
length = self._get_size_field_length(key)
fixed_field_lengths[key] = {"size": length}
return fixed_field_lengths
def _get_size_field_length(self, key):
lengths = [len(str(item['size'])) for item in self[key]]
return max(lengths)
class Release(_multivalued):
"""Represents a Release file
Set the size_field_behavior attribute to "dak" to make the size field
length only as long as the longest actual value. The default,
"apt-ftparchive" makes the field 16 characters long regardless.
This class is a thin wrapper around the parsing of :class:`Deb822`.
"""
# FIXME: Add support for detecting the behavior of the input, if
# constructed from actual 822 text.
_multivalued_fields = {
"md5sum": ["md5sum", "size", "name"],
"sha1": ["sha1", "size", "name"],
"sha256": ["sha256", "size", "name"],
"sha512": ["sha512", "size", "name"],
}
__size_field_behavior = "apt-ftparchive"
def set_size_field_behavior(self, value):
if value not in ["apt-ftparchive", "dak"]:
raise ValueError("size_field_behavior must be either "
"'apt-ftparchive' or 'dak'")
self.__size_field_behavior = value
size_field_behavior = property(lambda self: self.__size_field_behavior,
set_size_field_behavior)
@property
def _fixed_field_lengths(self):
fixed_field_lengths = {}
for key in self._multivalued_fields:
length = self._get_size_field_length(key)
fixed_field_lengths[key] = {"size": length}
return fixed_field_lengths
def _get_size_field_length(self, key):
# type: (str) -> int
if self.size_field_behavior == "apt-ftparchive":
return 16
if self.size_field_behavior == "dak":
lengths = [len(str(item['size'])) for item in self[key]]
return max(lengths)
raise ValueError("Illegal value for size_field_behavior")
class Sources(Dsc, _PkgRelationMixin):
"""Represent an APT source package list
This class is a thin wrapper around the parsing of :class:`Deb822`,
using the field parsing of :class:`_PkgRelationMixin`.
"""
_relationship_fields = [
'build-depends', 'build-depends-indep',
'build-conflicts', 'build-conflicts-indep',
'binary',
]
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
Dsc.__init__(self, *args, **kwargs)
_PkgRelationMixin.__init__(self, *args, **kwargs)
@classmethod
def iter_paragraphs(cls,
sequence, # type: IterableInputDataType
fields=None, # type: Optional[List[str]]
use_apt_pkg=True, # type: bool
shared_storage=False, # type: bool
encoding="utf-8", # type: str
strict=None, # type: Optional[Dict]
):
# type: (...) -> Iterator
"""Generator that yields a Deb822 object for each paragraph in Sources.
Note that this overloaded form of the generator uses apt_pkg (a strict
but fast parser) by default.
See the :func:`~Deb822.iter_paragraphs` function for details.
"""
if not strict:
strict = {
'whitespace-separates-paragraphs': False,
}
return super(Sources, cls).iter_paragraphs(
sequence, fields, use_apt_pkg, shared_storage, encoding, strict)
class Packages(Deb822, _PkgRelationMixin, _VersionAccessorMixin):
"""Represent an APT binary package list
This class is a thin wrapper around the parsing of :class:`Deb822`,
using the field parsing of :class:`_PkgRelationMixin`.
"""
_relationship_fields = [
'depends', 'pre-depends', 'recommends', 'suggests',
'breaks', 'conflicts', 'provides', 'replaces',
'enhances', 'built-using',
]
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
Deb822.__init__(self, *args, **kwargs)
_PkgRelationMixin.__init__(self, *args, **kwargs)
@classmethod
def iter_paragraphs(cls,
sequence, # type: IterableInputDataType
fields=None, # type: Optional[List[str]]
use_apt_pkg=True, # type: bool
shared_storage=False, # type: bool
encoding="utf-8", # type: str
strict=None, # type: Optional[Dict]
):
# type: (...) -> Iterator
"""Generator that yields a Deb822 object for each paragraph in Packages.
Note that this overloaded form of the generator uses apt_pkg (a strict
but fast parser) by default.
See the :func:`~Deb822.iter_paragraphs` function for details.
"""
if not strict:
strict = {
'whitespace-separates-paragraphs': False,
}
return super(Packages, cls).iter_paragraphs(
sequence, fields, use_apt_pkg, shared_storage, encoding, strict)
class _ClassInitMeta(type):
"""Metaclass for classes that can be initialized at creation time.
Implement the method::
@classmethod
def _class_init(cls, new_attrs):
pass
on a class, and apply this metaclass to it. The _class_init method will be
called right after the class is created. The 'new_attrs' param is a dict
containing the attributes added in the definition of the class.
"""
def __init__(cls, name, bases, attrs):
super(_ClassInitMeta, cls).__init__(name, bases, attrs)
cls._class_init(attrs)
class RestrictedField(collections.namedtuple(
'RestrictedField', 'name from_str to_str allow_none')):
"""Placeholder for a property providing access to a restricted field.
Use this as an attribute when defining a subclass of RestrictedWrapper.
It will be replaced with a property. See the RestrictedWrapper
documentation for an example.
"""
def __new__(cls, name, from_str=None, to_str=None, allow_none=True):
"""Create a new RestrictedField placeholder.
The getter that will replace this returns (or applies the given to_str
function to) None for fields that do not exist in the underlying data
object.
:param name: The name of the deb822 field.
:param from_str: The function to apply for getters (default is to
return the string directly).
:param to_str: The function to apply for setters (default is to use the
value directly). If allow_none is True, this function may return
None, in which case the underlying key is deleted.
:param allow_none: Whether it is allowed to set the value to None
(which results in the underlying key being deleted).
"""
return super(RestrictedField, cls).__new__(
cls, name, from_str=from_str, to_str=to_str,
allow_none=allow_none)
@six.add_metaclass(_ClassInitMeta)
class RestrictedWrapper(object):
"""Base class to wrap a Deb822 object, restricting write access to some keys.
The underlying data is hidden internally. Subclasses may keep a reference
to the data before giving it to this class's constructor, if necessary, but
RestrictedField should cover most use-cases. The dump method from
Deb822 is directly proxied.
Typical usage::
class Foo(object):
def __init__(self, ...):
# ...
@staticmethod
def from_str(self, s):
# Parse s...
return Foo(...)
def to_str(self):
# Return in string format.
return ...
class MyClass(deb822.RestrictedWrapper):
def __init__(self):
data = deb822.Deb822()
data['Bar'] = 'baz'
super(MyClass, self).__init__(data)
foo = deb822.RestrictedField(
'Foo', from_str=Foo.from_str, to_str=Foo.to_str)
bar = deb822.RestrictedField('Bar', allow_none=False)
d = MyClass()
d['Bar'] # returns 'baz'
d['Bar'] = 'quux' # raises RestrictedFieldError
d.bar = 'quux'
d.bar # returns 'quux'
d['Bar'] # returns 'quux'
d.foo = Foo(...)
d['Foo'] # returns string representation of foo
"""
__restricted_fields = frozenset() # type: frozenset
@classmethod
def _class_init(cls, new_attrs):
restricted_fields = []
for attr_name, val in new_attrs.items():
if isinstance(val, RestrictedField):
restricted_fields.append(val.name.lower())
cls.__init_restricted_field(attr_name, val)
cls.__restricted_fields = frozenset(restricted_fields)
@classmethod
def __init_restricted_field(cls, attr_name, field):
def getter(self):
# type: (RestrictedWrapper) -> Union[None, Tuple[str], str]
val = self.__data.get(field.name)
if field.from_str is not None:
return field.from_str(val)
return val
def setter(self, val):
# type: (RestrictedWrapper, Any) -> None
if val is not None and field.to_str is not None:
val = field.to_str(val)
if val is None:
if field.allow_none:
if field.name in self.__data:
del self.__data[field.name]
else:
raise TypeError('value must not be None')
else:
self.__data[field.name] = val
setattr(cls, attr_name, property(getter, setter, None, field.name))
def __init__(self, data):
# type: (Deb822) -> None
"""Initializes the wrapper over 'data', a Deb822 object."""
super(RestrictedWrapper, self).__init__()
self.__data = data
def __getitem__(self, key):
# type: (str) -> str
return self.__data[key]
def __setitem__(self, key, value):
# type: (str, str) -> None
if key.lower() in self.__restricted_fields:
raise RestrictedFieldError(
'%s may not be modified directly; use the associated'
' property' % key)
self.__data[key] = value
def __delitem__(self, key):
# type: (str) -> None
if key.lower() in self.__restricted_fields:
raise RestrictedFieldError(
'%s may not be modified directly; use the associated'
' property' % key)
del self.__data[key]
def __iter__(self):
return iter(self.__data)
def __len__(self):
return len(self.__data)
def dump(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[str]
"""Calls dump() on the underlying data object.
See Deb822.dump for more information.
"""
return self.__data.dump(*args, **kwargs)
class Removals(Deb822):
"""Represent an ftp-master removals.822 file
Removal of packages from the archive are recorded by ftp-masters.
See https://ftp-master.debian.org/#removed
Note: this API is experimental and backwards-incompatible changes might be
required in the future. Please use it and help us improve it!
"""
__sources_line_re = re.compile(
r'\s*'
r'(?P<package>.+?)'
r'_'
r'(?P<version>[^\s]+)'
r'\s*'
)
__binaries_line_re = re.compile(
r'\s*'
r'(?P<package>.+?)'
r'_'
r'(?P<version>[^\s]+)'
r'\s+'
r'\[(?P<archs>.+)\]'
)
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
super(Removals, self).__init__(*args, **kwargs)
self._sources = None # type: Optional[List[Dict[str, Iterable]]]
self._binaries = None # type: Optional[List[Dict[str, Iterable]]]
@property
def date(self):
# type: () -> datetime.datetime
""" a datetime object for the removal action """
timearray = email.utils.parsedate_tz(self['date'])
if timearray is None:
raise ValueError("No date specified")
ts = email.utils.mktime_tz(timearray)
return datetime.datetime.fromtimestamp(ts)
@property
def bug(self):
# type: () -> List[int]
""" list of bug numbers that had requested the package removal
The bug numbers are returned as integers.
Note: there is normally only one entry in this list but there may be
more than one.
"""
if 'bug' not in self:
return []
return [int(b) for b in self['bug'].split(",")]
@property
def also_wnpp(self):
# type: () -> List[int]
""" list of WNPP bug numbers closed by the removal
The bug numbers are returned as integers.
"""
if 'also-wnpp' not in self:
return []
return [int(b) for b in self['also-wnpp'].split(" ")]
@property
def also_bugs(self):
# type: () -> List[int]
""" list of bug numbers in the package closed by the removal
The bug numbers are returned as integers.
Removal of a package implicitly also closes all bugs associated with
the package.
"""
if 'also-bugs' not in self:
return []
return [int(b) for b in self['also-bugs'].split(" ")]
@property
def sources(self):
# type: () -> List[Dict[str, Iterable]]
""" list of source packages that were removed
A list of dicts is returned, each dict has the form::
{
'source': 'some-package-name',
'version': '1.2.3-1'
}
Note: There may be no source packages removed at all if the removal is
only of a binary package. An empty list is returned in that case.
"""
if self._sources is not None:
return self._sources
s = [] # type: List[Dict[str, Iterable]]
if 'sources' in self:
for line in self['sources'].splitlines():
matches = self.__sources_line_re.match(line)
if matches:
s.append(
{
'source': matches.group('package'),
'version': matches.group('version'),
})
self._sources = s
return s
@property
def binaries(self):
# type: () -> List[Dict[str, Iterable]]
""" list of binary packages that were removed
A list of dicts is returned, each dict has the form::
{
'package': 'some-package-name',
'version': '1.2.3-1',
'architectures': set(['i386', 'amd64'])
}
"""
if self._binaries is not None:
return self._binaries
b = [] # type: List[Dict[str, Iterable]]
if 'binaries' in self:
for line in self['binaries'].splitlines():
matches = self.__binaries_line_re.match(line)
if matches:
b.append({
'package': matches.group('package'),
'version': matches.group('version'),
'architectures':
set(matches.group('archs').split(', ')),
})
self._binaries = b
return b
class _CaseInsensitiveString(str):
"""Case insensitive string.
"""
# Fake definitions because mypy doesn't find them in __new__ ## CRUFT
# https://github.com/python/mypy/issues/1021
str_lower = ''
str_lower_hash = 0
def __new__(cls, str_):
s = str.__new__(cls, str_)
s.str_lower = str_.lower()
s.str_lower_hash = hash(s.str_lower)
return s
def __hash__(self):
# type: () -> int
return self.str_lower_hash
def __eq__(self, other):
# type: (Any) -> bool
try:
return self.str_lower == other.lower()
except AttributeError:
return False
def lower(self):
# type: () -> str
return self.str_lower
_strI = _CaseInsensitiveString
class _AutoDecoder(object):
def __init__(self, encoding=None):
# type: (Optional[str]) -> None
self.encoding = encoding or 'UTF-8'
def decode(self, value):
# type: (Union[str, bytes]) -> str
"""If value is not already Unicode, decode it intelligently."""
if isinstance(value, bytes):
try:
return value.decode(self.encoding)
except UnicodeDecodeError as e:
# Evidently, the value wasn't encoded with the encoding the
# user specified. Try detecting it.
warnings.warn('decoding from %s failed; attempting to detect '
'the true encoding' % self.encoding,
UnicodeWarning)
result = chardet.detect(value)
try:
return value.decode(result['encoding'])
except UnicodeDecodeError:
raise e
else:
# Assume the rest of the paragraph is in this encoding as
# well (there's no sense in repeating this exercise for
# every field).
self.encoding = result['encoding']
else:
return value
|
<reponame>alibaba/FederatedScope<filename>federatedscope/mf/dataloader/dataloader.py
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from numpy.random import shuffle
import numpy as np
import collections
import importlib
MFDATA_CLASS_DICT = {
"vflmovielens1m": "VFLMovieLens1M",
"vflmovielens10m": "VFLMovieLens10M",
"hflmovielens1m": "HFLMovieLens1M",
"hflmovielens10m": "HFLMovieLens10M"
}
def load_mf_dataset(config=None):
"""Return the dataset of matrix factorization
Format:
{
'client_id': {
'train': DataLoader(),
'test': DataLoader(),
'val': DataLoader()
}
}
"""
if config.data.type.lower() in MFDATA_CLASS_DICT:
# Dataset
dataset = getattr(
importlib.import_module("federatedscope.mf.dataset.movielens"),
MFDATA_CLASS_DICT[config.data.type.lower()])(
root=config.data.root,
num_client=config.federate.client_num,
train_portion=config.data.splits[0],
download=True)
else:
raise NotImplementedError("Dataset {} is not implemented.".format(
config.data.type))
data_local_dict = collections.defaultdict(dict)
for id_client, data in dataset.data.items():
data_local_dict[id_client]["train"] = MFDataLoader(
data["train"],
shuffle=config.data.shuffle,
batch_size=config.data.batch_size,
drop_last=config.data.drop_last,
theta=config.sgdmf.theta)
data_local_dict[id_client]["test"] = MFDataLoader(
data["test"],
shuffle=False,
batch_size=config.data.batch_size,
drop_last=config.data.drop_last,
theta=config.sgdmf.theta)
# Modify config
config.merge_from_list(['model.num_user', dataset.n_user])
config.merge_from_list(['model.num_item', dataset.n_item])
return data_local_dict, config
class MFDataLoader(object):
"""DataLoader for MF dataset
Args:
data (csc_matrix): sparse MF dataset
batch_size (int): the size of batch data
shuffle (bool): shuffle the dataset
drop_last (bool): drop the last batch if True
theta (int): the maximal number of ratings for each user
"""
def __init__(self,
data: csc_matrix,
batch_size: int,
shuffle=True,
drop_last=False,
theta=None):
super(MFDataLoader, self).__init__()
self.dataset = self._trim_data(data, theta)
self.shuffle = shuffle
self.batch_size = batch_size
self.drop_last = drop_last
self.n_row = self.dataset.shape[0]
self.n_col = self.dataset.shape[1]
self.n_rating = self.dataset.count_nonzero()
self._idx_samples = None
self._idx_cur = None
self._reset()
def _trim_data(self, data, theta=None):
"""Trim rating data by parameter theta (per-user privacy)
Arguments:
data (csc_matrix): the dataset
theta (int): The maximal number of ratings for each user
"""
if theta is None or theta <= 0:
return data
else:
# Each user has at most $theta$ items
dataset = data.tocoo()
user2items = collections.defaultdict(list)
for idx, user_id in enumerate(dataset.row):
user2items[user_id].append(idx)
# sample theta each
idx_select = list()
for items in user2items.values():
if len(items) > theta:
idx_select += np.random.choice(items, theta,
replace=False).tolist()
else:
idx_select += items
dataset = coo_matrix(
(dataset.data[idx_select],
(dataset.row[idx_select], dataset.col[idx_select])),
shape=dataset.shape).tocsc()
return dataset
def _reset(self):
self._idx_cur = 0
if self._idx_samples is None:
self._idx_samples = np.arange(self.n_rating)
if self.shuffle:
shuffle(self._idx_samples)
def _sample_data(self, sampled_rating_idx):
dataset = self.dataset.tocoo()
data = dataset.data[sampled_rating_idx]
rows = dataset.row[sampled_rating_idx]
cols = dataset.col[sampled_rating_idx]
return (rows, cols), data
def __len__(self):
"""The number of batches within an epoch
"""
if self.drop_last:
return int(self.n_rating / self.batch_size)
else:
return int(self.n_rating / self.batch_size) + int(
(self.n_rating % self.batch_size) != 0)
def __next__(self, theta=None):
"""Get the next batch of data
Args:
theta (int): the maximal number of ratings for each user
"""
idx_end = self._idx_cur + self.batch_size
if self._idx_cur == len(
self._idx_samples) or self.drop_last and idx_end > len(
self._idx_samples):
raise StopIteration
idx_end = min(idx_end, len(self._idx_samples))
idx_choice_samples = self._idx_samples[self._idx_cur:idx_end]
self._idx_cur = idx_end
return self._sample_data(idx_choice_samples)
def __iter__(self):
self._reset()
return self
|
# coding=utf-8
__author__ = 'kohlmannj'
import os
import copy
import codecs
from collections import defaultdict
from Ity.Formatters import Formatter
from jinja2 import Environment, FileSystemLoader
class LineGraphFormatter(Formatter):
"""
An Ity Formatter subclass which outputs SVG-based line graphs for the tags
returned by a TopicModelTagger's self.tag() method. Unfortunately this
class, as currently written, relies on tag-specific data which only
TopicModelTagger.tag() returns. That could change, it'd need to be
refactored to give some kind of weight to individual tags (to make a summed
area table and thus the actual graph data points).
"""
def __init__(
self,
debug=None,
template="standalone.svg",
partial_template="partial.svg",
css_file="styles.css",
js_file=None,
template_root=None
):
super(LineGraphFormatter, self).__init__(debug)
self.template_root = template_root
if self.template_root is None:
self.template_root = os.path.join(
os.path.dirname(__file__),
"templates"
)
# Jinja2 Environment initialization
self.env = Environment(
loader=FileSystemLoader(searchpath=self.template_root),
extensions=[
'jinja2.ext.do',
'Support.jinja2_htmlcompress.jinja2htmlcompress.HTMLCompress'
]
)
# Template Initialization
self.template = self.env.get_template(template)
self.partial_template = self.env.get_template(partial_template)
self.css_file = css_file
if self.css_file is not None:
self.css_path = os.path.join(
self.template_root,
self.css_file
)
else:
self.css_path = None
self.js_file = js_file
if self.js_file is not None:
self.js_path = os.path.join(
self.template_root,
self.css_file
)
else:
self.js_path = None
#TODO: change "partial" argument to "options" dict-based argument.
def format(
self,
tags=None,
tokens=None,
s=None,
partial=False,
pixel_size=None,
text_name=None,
included_rules=()
):
if pixel_size is None:
pixel_size = 50
# Make sure we have enough data to actually do things.
if (
(tags is None or len(tags) != 2)
):
raise ValueError("Not enough valid input data given to format() method.")
# Do work, son.
# Get the summed area table.
#TODO: Add option for winnerTakesAll in format() arguments.
#summed_area_table = self.getSAT(tags, False, True)
summed_area_table = self.getSAT(tags, True, False)
# If included_rules is empty, include all available rules!
if len(included_rules) == 0:
included_rules = tags[0].keys()
# Get the window scores.
window_scores, max_window_score = self.computeWindowScores(summed_area_table, pixel_size, included_rules)
# Figure out if we're using the full or partial template.
template_to_use = self.template
if partial:
template_to_use = self.partial_template
# Get the contents of the stylesheet.
css_file = codecs.open(self.css_path, encoding="utf-8")
css_str = css_file.read()
css_file.close()
# Render the template.
return template_to_use.render(
tag_data=tags[0],
tag_maps=tags[1],
window_scores=window_scores,
pixel_size=pixel_size,
max_window_score=max_window_score,
text_name=text_name,
styles=css_str
)
# Returns summed area table over a given tags list.
# winnerTakesAll: if true, just count top topic
# props: if true, add up proportions. if false, add up counts.
def getSAT(self, tags, winnerTakesAll, props):
numTags = len(tags[0].keys())
tag_maps = tags[1]
numTokens = len(tag_maps)
sat = [0 for i in range(numTokens)]
currLine = defaultdict(float)
for i in range(numTokens):
tagDict = tag_maps[i]
for (topicID, topicProp, tagRampIndex) in tagDict['rules']:
# topicNum = int(topicID.split('_')[1])
currLine[topicID] += topicProp if props else 1
if winnerTakesAll:
break
sat[i] = copy.deepcopy(currLine)
return sat
def computeWindowScores(self, sat, pixelSize, topicsToInclude):
# Should I include some sort of step size here, too? Or calculate window score at each token?
# topicsToInclude is a list of topic_keys (i.e. strs)
windowScores = {}
numTokens = len(sat)
stepSize = max(1, numTokens/pixelSize)
windowSize = 50*stepSize
maxWindowScore = 0
for topic in topicsToInclude:
#windowScores[topic] = [0.0 for i in range(numTokens)]
windowScores[topic] = [0.0 for i in range(numTokens/stepSize + 1)]
i = 0
windowCount = 0
while i < numTokens:
#for i in range(numTokens):
startIndex = max(0, i - windowSize/2)
endIndex = min(numTokens - 1, i + windowSize/2)
indexRange = endIndex - startIndex
for topic in topicsToInclude:
windowScore = (sat[endIndex][topic] - sat[startIndex][topic])/float(indexRange)
# Update maxWindowScore, maybe.
if windowScore > maxWindowScore:
maxWindowScore = windowScore
windowScores[topic][windowCount] = windowScore
i += stepSize
windowCount += 1
return windowScores, maxWindowScore
|
"""
Key Codes used by the keyboard system.
Note that only base (unshifted) symbols and keys have keycodes.
There is significant variation between layouts of different countries; keys are
given by their semantic meaning and not by their position. But these are also
not suitable for text input.
"""
import ppb.flags
class KeyCode(ppb.flags.Flag, abstract=True):
"""
A raw keyboard scan code.
"""
class A(KeyCode):
""
class B(KeyCode):
""
class C(KeyCode):
""
class D(KeyCode):
""
class E(KeyCode):
""
class F(KeyCode):
""
class G(KeyCode):
""
class H(KeyCode):
""
class I(KeyCode):
""
class J(KeyCode):
""
class K(KeyCode):
""
class L(KeyCode):
""
class M(KeyCode):
""
class N(KeyCode):
""
class O(KeyCode):
""
class P(KeyCode):
""
class Q(KeyCode):
""
class R(KeyCode):
""
class S(KeyCode):
""
class T(KeyCode):
""
class U(KeyCode):
""
class V(KeyCode):
""
class W(KeyCode):
""
class X(KeyCode):
""
class Y(KeyCode):
""
class Z(KeyCode):
""
class One(KeyCode):
"""
1
Shift+1 is ! on american keyboards
"""
class Two(KeyCode):
"""
2
Shift+2 is @ on american keyboards
"""
class Three(KeyCode):
"""
3
Shift+3 is # on american keyboards
"""
class Four(KeyCode):
"""
4
Shift+4 is $ on american keyboards
"""
class Five(KeyCode):
"""
5
Shift+5 is % on american keyboards
"""
class Six(KeyCode):
"""
6
Shift+6 is ^ on american keyboards
"""
class Seven(KeyCode):
"""
7
Shift+7 is & on american keyboards
"""
class Eight(KeyCode):
"""
8
Shift+8 is * on american keyboards
"""
class Nine(KeyCode):
"""
9
Shift+9 is ( on american keyboards
"""
class Zero(KeyCode):
"""
0
Shift+0 is ) on american keyboards
"""
class F1(KeyCode):
""
class F2(KeyCode):
""
class F3(KeyCode):
""
class F4(KeyCode):
""
class F5(KeyCode):
""
class F6(KeyCode):
""
class F7(KeyCode):
""
class F8(KeyCode):
""
class F9(KeyCode):
""
class F10(KeyCode):
""
class F11(KeyCode):
""
class F12(KeyCode):
""
class F13(KeyCode):
""
class F14(KeyCode):
""
class F15(KeyCode):
""
class F16(KeyCode):
""
class F17(KeyCode):
""
class F18(KeyCode):
""
class F19(KeyCode):
""
class F20(KeyCode):
""
class AltRight(KeyCode):
"Right Alt modifier, called Option on mac"
class AltLeft(KeyCode):
"Left Alt modifier, called Option on mac"
class Backslash(KeyCode):
"""
\\
Shift+\\ is | on american keyboards
"""
class Backspace(KeyCode):
""
class BracketLeft(KeyCode):
"""
[
Shift+[ is { on american keyboards
"""
class BracketRight(KeyCode):
"""
]
Shift+] is } on american keyboards
"""
class CapsLock(KeyCode):
""
class Comma(KeyCode):
"""
,
Shift+, is < on american keyboards
"""
class CtrlLeft(KeyCode):
"Left Control modifier"
class CtrlRight(KeyCode):
"Right Control modifier"
class Delete(KeyCode):
""
class Down(KeyCode):
"Down navigation arrow"
class End(KeyCode):
""
class Enter(KeyCode):
"Enter, return, newline"
class Equals(KeyCode):
"""
=
Shift+= is + on american keyboards
"""
class Escape(KeyCode):
""
class Function(KeyCode):
"Fn, if reported by your keyboard"
class Grave(KeyCode):
"""
`, usually to the left of 1 or F1 on american keyboards.
Shift+` is ~ on american keyboards
"""
class Home(KeyCode):
""
class Insert(KeyCode):
""
class Left(KeyCode):
"Left navigation arrow"
class Menu(KeyCode):
""
class Minus(KeyCode):
"""
-
Shift+- is _ on american keyboards
"""
class NumLock(KeyCode):
""
class PageDown(KeyCode):
""
class PageUp(KeyCode):
""
class Pause(KeyCode):
"""
Pause, generally lives next to Print Screen and Scroll Lock.
Also Break.
"""
class Period(KeyCode):
"""
.
Shift+. is > on american keyboards
"""
class PrintScreen(KeyCode):
"""
PrtSc, PrtScrn, etc
Also System Request/SysReq/SysRq/etc.
"""
class Quote(KeyCode):
"""
', the single quote
Shift+' is " on american keyboards
"""
class Right(KeyCode):
"Right navigation arrow"
class ScrollLock(KeyCode):
""
class Semicolon(KeyCode):
"""
;
Shift+; is : on american keyboards
"""
class ShiftLeft(KeyCode):
"Left shift modifier"
class ShiftRight(KeyCode):
"Right shift modifier"
class Slash(KeyCode):
"""
/
Shift+/ is ? on american keyboards
"""
class Space(KeyCode):
""
class SuperLeft(KeyCode):
"Left Super modifier, also called Windows or Command"
class SuperRight(KeyCode):
"Right Super modifier, also called Windows or Command"
class Tab(KeyCode):
""
class Up(KeyCode):
"Up navigation arrow"
# Numpad codes (unified between pygame and pyglet)
# 0 1 2 3 4 5 6 7 8 9 add begin decimal delete divide down end enter equals
# f1 f2 f3 f4 home insert left minus multiply next page down page up period plus
# prior right separator space subtract tab up
|
<filename>glue/qt/glue_toolbar.py
import os
import matplotlib
from matplotlib.backends.backend_qt4 import NavigationToolbar2QT
from ..external.qt import QtCore, QtGui
from ..external.qt.QtGui import QMenu
from ..external.qt.QtCore import Qt, Signal
from ..core.callback_property import add_callback
from .qtutil import get_icon, nonpartial
class GlueToolbar(NavigationToolbar2QT):
pan_begin = Signal()
pan_end = Signal()
mode_activated = Signal()
mode_deactivated = Signal()
def __init__(self, canvas, frame, name=None):
""" Create a new toolbar object
Parameters
----------
data_collection : DataCollection instance
The data collection that this toolbar is meant to edit.
The toolbar looks to this collection for the available subsets
to manipulate.
canvas : Maptloblib canvas instance
The drawing canvas to interact with
frame : QWidget
The QT frame that the canvas is embedded within.
"""
self.buttons = {}
self.__active = None
self.basedir = None
NavigationToolbar2QT.__init__(self, canvas, frame)
if name is not None:
self.setWindowTitle(name)
self.setIconSize(QtCore.QSize(25, 25))
self.layout().setSpacing(1)
self.setFocusPolicy(Qt.StrongFocus)
self._idKey = None
# pyside is prone to segfaults if slots hold the only
# reference to a signal, so we hold an extra reference
# see https://bugreports.qt-project.org/browse/PYSIDE-88
self.__signals = []
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams['datapath'], 'images')
parent = QtGui.QToolBar.parent(self)
a = QtGui.QAction(get_icon('glue_home'),
'Home', parent)
a.triggered.connect(nonpartial(self.home))
a.setToolTip('Reset original zoom')
a.setShortcut('H')
a.setShortcutContext(Qt.WidgetShortcut)
parent.addAction(a)
self.buttons['HOME'] = a
self.addAction(a)
a = QtGui.QAction(get_icon('glue_filesave'),
'Save', parent)
a.triggered.connect(nonpartial(self.save_figure))
a.setToolTip('Save the figure')
a.setShortcut('Ctrl+Shift+S')
parent.addAction(a)
self.buttons['SAVE'] = a
self.addAction(a)
a = QtGui.QAction(get_icon('glue_back'),
'Back', parent)
a.triggered.connect(nonpartial(self.back))
parent.addAction(a)
self.addAction(a)
self.buttons['BACK'] = a
a.setToolTip('Back to previous view')
a = QtGui.QAction(get_icon('glue_forward'),
'Forward', parent)
a.triggered.connect(nonpartial(self.forward))
a.setToolTip('Forward to next view')
parent.addAction(a)
self.buttons['FORWARD'] = a
self.addAction(a)
a = QtGui.QAction(get_icon('glue_move'),
'Pan', parent)
a.triggered.connect(nonpartial(self.pan))
a.setToolTip('Pan axes with left mouse, zoom with right')
a.setCheckable(True)
a.setShortcut('M')
a.setShortcutContext(Qt.WidgetShortcut)
parent.addAction(a)
self.addAction(a)
self.buttons['PAN'] = a
a = QtGui.QAction(get_icon('glue_zoom_to_rect'),
'Zoom', parent)
a.triggered.connect(nonpartial(self.zoom))
a.setToolTip('Zoom to rectangle')
a.setShortcut('Z')
a.setShortcutContext(Qt.WidgetShortcut)
a.setCheckable(True)
parent.addAction(a)
self.addAction(a)
self.buttons['ZOOM'] = a
#self.adj_window = None
@property
def _active(self):
return self.__active
@_active.setter
def _active(self, value):
if self.__active == value:
return
self.__active = value
if value not in [None, '']:
self.mode_activated.emit()
else:
self.mode_deactivated.emit()
def home(self, *args):
super(GlueToolbar, self).home(*args)
self.canvas.homeButton.emit()
def zoom(self, *args):
self._deactivate_custom_modes()
super(GlueToolbar, self).zoom(*args)
self._update_buttons_checked()
def pan(self, *args):
self._deactivate_custom_modes()
super(GlueToolbar, self).pan(*args)
self._update_buttons_checked()
def _deactivate_custom_modes(self):
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
if self._idDrag is not None:
self._idDrag = self.canvas.mpl_disconnect(
self._idDrag)
self._idDrag = self.canvas.mpl_connect('motion_notify_event',
self.mouse_move)
if self._idKey is not None:
self._idKey = self.canvas.mpl_disconnect(self._idKey)
self.mode = ''
def add_mode(self, mode):
parent = QtGui.QToolBar.parent(self)
def toggle():
self._custom_mode(mode)
def enable():
# turn on if not
if self._active != mode.mode_id:
self._custom_mode(mode)
action = QtGui.QAction(mode.icon, mode.action_text, parent)
action.triggered.connect(nonpartial(toggle))
parent.addAction(action)
self.__signals.extend([toggle, enable])
if mode.shortcut is not None:
action.setShortcut(mode.shortcut)
action.setShortcutContext(Qt.WidgetShortcut)
action.setToolTip(mode.tool_tip)
action.setCheckable(True)
self.buttons[mode.mode_id] = action
menu_actions = mode.menu_actions()
if len(menu_actions) > 0:
menu = QMenu(self)
for ma in mode.menu_actions():
ma.setParent(self)
menu.addAction(ma)
action.setMenu(menu)
menu.triggered.connect(nonpartial(enable))
self.addAction(action)
# bind action status to mode.enabled
def toggle(state):
action.setVisible(state)
action.setEnabled(state)
add_callback(mode, 'enabled', toggle)
return action
def set_mode(self, mode):
if self._active != mode.mode_id:
self._custom_mode(mode)
def _custom_mode(self, mode):
if self._active == mode.mode_id:
self._active = None
else:
self._active = mode.mode_id
self._deactivate_custom_modes()
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', mode.press)
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', mode.move)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', mode.release)
self._idKey = self.canvas.mpl_connect(
'key_press_event', mode.key)
self.mode = mode.action_text
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(None)
self.set_message(self.mode)
self._update_buttons_checked()
def press_pan(self, event):
self.pan_begin.emit()
super(GlueToolbar, self).press_pan(event)
def release_pan(self, event):
self.pan_end.emit()
super(GlueToolbar, self).release_pan(event)
def _update_buttons_checked(self):
for mode in self.buttons:
self.buttons[mode].setChecked(self._active == mode)
def set_message(self, s):
self.emit(QtCore.SIGNAL("message"), s)
parent = QtGui.QToolBar.parent(self)
if parent is None:
return
sb = parent.statusBar()
if sb is None:
return
sb.showMessage(s.replace(', ', '\n'))
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import itertools
import optparse
import os
import Queue
import sys
import threading
import time
import traceback
from conary.conaryclient import callbacks as clientCallbacks
from conary.conaryclient import cmdline
from conary import conarycfg, callbacks, trove
from conary.lib import cfg, util, log
from conary.repository import errors, changeset, netclient
from conary.deps.deps import parseFlavor
class OptionError(Exception):
def __init__(self, errcode, errmsg, *args):
self.errcode = errcode
self.errmsg = errmsg
Exception.__init__(self, *args)
def parseArgs(argv):
parser = optparse.OptionParser(version = '%prog 0.1')
parser.add_option("--config-file", dest = "configFile",
help = "configuration file", metavar = "FILE")
parser.add_option("--full-sig-sync", dest = "infoSync",
action = "store_true", default = False,
help = "deprecated: alias to --full-info-sync")
parser.add_option("--full-info-sync", dest = "infoSync",
action = "store_true", default = False,
help = "replace all the trove signatures and metadata "
"in the target repository")
parser.add_option("--fast-sync", dest = "fastSync",
action = "store_true", default = False,
help = "skip checking/mirroring of changed info records "
"for already mirrored troves")
parser.add_option("--absolute", dest = "absolute",
action = "store_true", default = False,
help = "use only absolute changesets when mirroring content")
parser.add_option("--full-trove-sync", dest = "sync", action = "store_true",
default = False,
help = "ignore the last-mirrored timestamp in the "
"target repository")
parser.add_option("--check-sync", dest = "checkSync", action = "store_true",
default = False,
help = "only check if the source and target(s) are in sync")
parser.add_option("--test", dest = "test", action = "store_true",
default = False,
help = "skip commiting changes to the target repository")
parser.add_option("-v", "--verbose", dest = "verbose",
action = "store_true", default = False,
help = "display information on what is going on")
(options, args) = parser.parse_args(argv)
if options.configFile is None:
raise OptionError(1, 'a mirror configuration must be provided')
elif args:
raise OptionError(1, 'unexpected arguments: %s' % " ".join(args))
return options
class VerboseChangesetCallback(clientCallbacks.ChangesetCallback):
def done(self):
self.clearPrefix()
self._message('\r')
class ChangesetCallback(callbacks.ChangesetCallback):
def setPrefix(self, *args):
pass
def clearPrefix(self):
pass
class MirrorConfigurationSection(cfg.ConfigSection):
repositoryMap = conarycfg.CfgRepoMap
user = conarycfg.CfgUserInfo
entitlement = conarycfg.CfgEntitlement
class MirrorFileConfiguration(cfg.SectionedConfigFile):
host = cfg.CfgString
entitlementDirectory = cfg.CfgPath
labels = conarycfg.CfgInstallLabelPath
matchTroves = cfg.CfgSignedRegExpList
matchTroveSpecs = cfg.CfgSignedRegExpList
recurseGroups = (cfg.CfgBool, False)
uploadRateLimit = (conarycfg.CfgInt, 0,
"Upload rate limit, in bytes per second")
downloadRateLimit = (conarycfg.CfgInt, 0,
"Download rate limit, in bytes per second")
lockFile = cfg.CfgString
useHiddenCommits = (cfg.CfgBool, True)
absoluteChangesets = (cfg.CfgBool, False)
includeSources = (cfg.CfgBool, False)
splitNodes = (cfg.CfgBool, False,
"Split jobs that would commit two versions of a trove at once. "
"Needed for compatibility with older repositories.")
noPGP = (cfg.CfgBool, False)
_allowNewSections = True
_defaultSectionType = MirrorConfigurationSection
# some sanity checks for the mirror configuration
def checkConfig(cfg):
if not cfg.host:
log.error("ERROR: cfg.host is not defined")
raise RuntimeError("cfg.host is not defined")
# make sure that each label belongs to the host we're mirroring
for label in cfg.labels:
if label.getHost() != cfg.host:
log.error("ERROR: label %s is not on host %s", label, cfg.host)
raise RuntimeError("label %s is not on host %s", label, cfg.host)
def _getMirrorClient(mirrorCfg, section):
section = mirrorCfg.getSection(section)
cfg = conarycfg.ConaryConfiguration(False)
for name in ['repositoryMap', 'user', 'entitlement']:
cfg[name] = section[name]
for name in ['uploadRateLimit', 'downloadRateLimit', 'entitlementDirectory']:
cfg[name] = mirrorCfg[name]
return netclient.NetworkRepositoryClient(cfg=cfg)
def mainWorkflow(cfg = None, callback=ChangesetCallback(),
test=False, sync=False, infoSync=False,
checkSync=False, fastSync=False):
import fcntl
if cfg.lockFile:
try:
log.debug('checking for lock file')
lock = open(cfg.lockFile, 'w')
fcntl.lockf(lock, fcntl.LOCK_EX|fcntl.LOCK_NB)
except IOError:
log.warning('lock held by another process, exiting')
return
# need to make sure we have a 'source' section
if not cfg.hasSection('source'):
log.debug("ERROR: mirror configuration file is missing a [source] section")
raise RuntimeError("Mirror configuration file is missing a [source] section")
sourceRepos = _getMirrorClient(cfg, 'source')
# Optional reference repository
if cfg.hasSection('reference'):
refRepos = _getMirrorClient(cfg, 'reference')
else:
refRepos = sourceRepos
# we need to build a target repo client for each of the "target*"
# sections in the config file
targets = []
for name in cfg.iterSectionNames():
if not name.startswith("target"):
continue
target = _getMirrorClient(cfg, name)
target = TargetRepository(target, cfg, name, test=test)
targets.append(target)
# checkSync is a special operation...
if checkSync:
return checkSyncRepos(cfg, refRepos, targets)
# we pass in the sync flag only the first time around, because after
# that we need the targetRepos mark to advance accordingly after being
# reset to -1
callAgain = mirrorRepository(sourceRepos, targets, cfg,
test = test, sync = sync,
syncSigs = infoSync,
callback = callback,
fastSync = fastSync,
referenceRepos=refRepos,
)
while callAgain:
callAgain = mirrorRepository(sourceRepos, targets, cfg,
test = test, callback = callback,
fastSync = fastSync,
referenceRepos=refRepos,
)
def Main(argv=None):
if argv is None:
argv = argv=sys.argv[1:]
try:
options = parseArgs(argv)
except OptionError, e:
sys.stderr.write(e.errmsg)
sys.stderr.write("\n")
return e.errcode
cfg = MirrorFileConfiguration()
cfg.read(options.configFile, exception = True)
callback = ChangesetCallback()
if options.absolute:
cfg.absoluteChangesets = True
if options.verbose:
log.setVerbosity(log.DEBUG)
callback = VerboseChangesetCallback()
if options.fastSync: # make --fast-sync imply --full-trove-sync
options.sync = True
try:
mainWorkflow(cfg, callback, options.test,
sync = options.sync, infoSync = options.infoSync,
fastSync = options.fastSync, checkSync = options.checkSync)
except KeyboardInterrupt:
print >> sys.stderr
print >> sys.stderr, 'Terminating due to user interrupt'
sys.exit(1)
def groupTroves(troveList):
# combine the troves into indisolvable groups based on their version and
# flavor; it's assumed that adjacent troves with the same version/flavor
# must be in a single commit
grouping = {}
for info in troveList:
(n, v, f) = info[1]
crtGrp = grouping.setdefault((v,f), [])
crtGrp.append(info)
grouping = grouping.values()
# make sure the groups are sorted in ascending order of their mark
def _groupsort(a, b):
ret = cmp(a[0][0], b[0][0])
if ret:
return ret
# if they have the same mark, sort the groups at the end
ahasgrp = [x[1][1] for x in a if trove.troveIsGroup(x[1][0])]
bhasgrp = [x[1][1] for x in b if trove.troveIsGroup(x[1][0])]
if len(ahasgrp) > len(bhasgrp):
return 1
if len(bhasgrp) > len(ahasgrp):
return -1
return cmp(ahasgrp, bhasgrp)
grouping.sort(_groupsort)
return grouping
def buildJobList(src, target, groupList, absolute=False, splitNodes=True,
jobSize=20):
# Match each trove with something we already have; this is to mirror
# using relative changesets, which is a lot more efficient than using
# absolute ones.
q = {}
srcAvailable = {}
for group in groupList:
for mark, (name, version, flavor) in group:
# force groups to always be transferred using absolute changesets
if trove.troveIsGroup(name):
continue
srcAvailable[(name,version,flavor)] = True
d = q.setdefault(name, {})
l = d.setdefault(version.branch(), [])
l.append(flavor)
# check that the latestavailable versions from the target are
# present on the source to be able to use relative changesets
latestAvailable = {}
if len(q):
latestAvailable = target.getTroveLeavesByBranch(q)
latestAvailable = dict(
(name, dict(
(version, set(flavors))
for (version, flavors) in versions.iteritems()
)) for (name, versions) in latestAvailable.iteritems())
if len(latestAvailable):
def _tol(d):
for n, vd in d.iteritems():
for v, fl in vd.iteritems():
for f in fl:
yield (n,v,f)
ret = src.hasTroves(list(_tol(latestAvailable)), hidden=True)
srcAvailable.update(ret)
def _split():
# Stop adding troves to this job and allow its troves to be used for
# the next job's relative changesets.
for mark, job in jobList[-1]:
name = job[0]
if trove.troveIsGroup(name):
continue
oldVersion, oldFlavor = job[1]
newVersion, newFlavor = job[2]
srcAvailable[(name, newVersion, newFlavor)] = True
d = latestAvailable.setdefault(name, {})
if oldVersion in d and oldVersion.branch() == newVersion.branch():
# If the old version is on the same branch as the new one,
# replace the old with the new. If it's on a different
# branch, we'll track both.
flavorList = d[oldVersion]
flavorList.discard(oldFlavor)
if not flavorList:
del d[oldVersion]
flavorList = d.setdefault(newVersion, set())
flavorList.add(newFlavor)
if jobList[-1]:
jobList.append([])
# we'll keep latestAvailable in sync with what the target will look like
# as the mirror progresses
jobList = [[]]
currentNodes = set()
currentHost = None
for group in groupList:
# for each job find what it's relative to and build up a job list
thisJob = []
for mark, (name, version, flavor) in group:
# name, version, versionDistance, flavorScore
currentMatch = (None, None, None, None)
if absolute or name not in latestAvailable:
job = (name, (None, None), (version, flavor), True)
else:
d = latestAvailable[name]
for repVersion, flavorList in d.iteritems():
# the versions have to be on the same host to be
# able to generate relative changesets
if version.getHost() != repVersion.getHost():
continue
for repFlavor in flavorList:
if not srcAvailable.get((name, repVersion, repFlavor), False):
continue
score = flavor.score(repFlavor)
if score is False:
continue
if repVersion == version:
closeness = 100000
else:
closeness = version.closeness(repVersion)
if score < currentMatch[3]:
continue
elif score > currentMatch[3]:
currentMatch = (repVersion, repFlavor, closeness,
score)
elif closeness < currentMatch[2]:
continue
else:
currentMatch = (repVersion, repFlavor, closeness,
score)
job = (name, (currentMatch[0], currentMatch[1]),
(version, flavor), currentMatch[0] is None)
thisJob.append((mark, job))
newNodes = set((x[1][0], x[1][2][0].branch()) for x in thisJob)
newHosts = set(x[1][2][0].getHost() for x in thisJob)
assert len(newHosts) == 1
newHost = list(newHosts)[0]
if (len(jobList[-1]) >= jobSize
# Can't commit two versions of the same trove
or (splitNodes and newNodes & currentNodes)
# Can't commit troves on different hosts
or currentHost not in (None, newHost)
):
_split()
currentNodes = set()
jobList[-1].extend(thisJob)
currentNodes.update(newNodes)
currentHost = newHost
if not jobList[-1]:
jobList.pop()
return jobList
recursedGroups = set()
def recurseTrove(sourceRepos, name, version, flavor,
callback = ChangesetCallback()):
global recursedGroups
assert(trove.troveIsGroup(name))
# there's nothing much we can recurse from the source
if name.endswith(":source"):
return []
# avoid grabbing the same group multiple times
if (name, version, flavor) in recursedGroups:
return []
log.debug("recursing group trove: %s=%s[%s]" % (name, version, flavor))
groupCs = sourceRepos.createChangeSet(
[(name, (None, None), (version, flavor), True)],
withFiles=False, withFileContents=False, recurse=False,
callback = callback)
recursedGroups.add((name, version, flavor))
ret = []
for troveCs in groupCs.iterNewTroveList():
for name, ops in troveCs.iterChangedTroves(True, True):
for oper, version, flavor, byDefault in ops:
if oper != '-':
ret.append((name, version, flavor))
return ret
def _toBraces(items):
if len(items) > 1:
return '{%s}' % (','.join(sorted(items)))
else:
return list(items)[0]
def formatTroveNames(names):
"""Group trove names by package and format them like a shell glob."""
# Group names by package
packages = {}
for name in names:
if ':' in name:
package, component = name.split(':')
component = ':' + component
else:
package, component = name, ''
packages.setdefault(package, []).append(component)
# If all the component sets are the same, collapse them.
componentSets = set(tuple(x) for x in packages.values())
if len(componentSets) == 1:
components = list(componentSets)[0]
if len(components) > 1:
prefix = _toBraces(packages)
suffix = _toBraces(components)
return prefix + suffix
# Format the components for each package
nameList = []
for package, components in sorted(packages.items()):
if len(components) == 1:
# foo or foo:bar
formatted = package + components[0]
else:
# foo and foo:bar
components.sort()
formatted = package + _toBraces(components)
nameList.append(formatted)
# Combine into one big set
if len(nameList) == 1:
return nameList[0]
else:
nameList.sort()
return _toBraces(nameList)
def displayBundle(bundle):
"""Format a job bundle for display"""
minMark = min([x[0] for x in bundle])
# Group by version and flavor
trovesByVF = {}
for mark, (name, oldVF, newVF, absolute) in bundle:
trovesByVF.setdefault((oldVF, newVF), set()).add(name)
# Within each VF set, sort and fold the names and format for display.
lines = []
for (oldVF, newVF), names in trovesByVF.items():
allNames = formatTroveNames(names)
# Add version and flavor info
if oldVF[0]:
if oldVF[1] != newVF[1]:
oldInfo = '%s[%s]--' % oldVF
else:
oldInfo = '%s--' % (oldVF[0],)
else:
oldInfo = ''
newInfo = '%s[%s]' % newVF
lines.append(''.join((allNames, '=', oldInfo, newInfo)))
lines.sort()
lines.insert(0, '')
lines.append('New mark: %.0f' % (minMark,))
return "\n ".join(lines)
# wrapper for displaying a simple jobList
def displayJobList(jobList):
return displayBundle([(0, x) for x in jobList])
# mirroring stuff when we are running into PathIdConflict errors
def splitJobList(jobList, src, targetSet, hidden = False, callback = ChangesetCallback()):
log.debug("Changeset Key conflict detected; splitting job further...")
jobs = {}
for job in jobList:
name = job[0]
if ':' in name:
name = name.split(':')[0]
l = jobs.setdefault(name, [])
l.append(job)
i = 0
for smallJobList in jobs.itervalues():
(outFd, tmpName) = util.mkstemp()
os.close(outFd)
log.debug("jobsplit %d of %d %s" % (
i + 1, len(jobs), displayBundle([(0,x) for x in smallJobList])))
src.createChangeSetFile(smallJobList, tmpName, recurse = False,
callback = callback, mirrorMode = True)
_parallel(targetSet, TargetRepository.commitChangeSetFile,
tmpName, hidden=hidden, callback=callback)
os.unlink(tmpName)
callback.done()
i += 1
return
# filter a trove tuple based on cfg
def _filterTup(troveTup, cfg):
(n, v, f) = troveTup
troveSpec = cmdline.toTroveSpec(n, str(v), f)
# filter by trovespec
if cfg.matchTroveSpecs and cfg.matchTroveSpecs.match(troveSpec) <= 0:
return False
# if we're matching troves
if cfg.matchTroves and cfg.matchTroves.match(n) <= 0:
return False
# filter by host/label
if v.getHost() != cfg.host:
return False
if cfg.labels and v.branch().label() not in cfg.labels:
return False
return True
# get all the trove info to be synced
def _getAllInfo(src, cfg):
log.debug("resync all trove info from source. This will take a while...")
# grab the full list of all the trove versions and flavors in the src
troveDict = src.getTroveVersionList(cfg.host, { None : None })
troveList = []
# filter out the stuff we don't need
for name, versionD in troveDict.iteritems():
for version, flavorList in versionD.iteritems():
for flavor in flavorList:
tup = (name, version, flavor)
troveList.append(tup)
del troveDict
# retrieve the sigs and the metadata records to sync over
sigList = src.getTroveSigs(troveList)
metaList = src.getTroveInfo(trove._TROVEINFO_TAG_METADATA, troveList)
infoList = []
for t, s, ti in itertools.izip(troveList, sigList, metaList):
if ti is None:
ti = trove.TroveInfo()
ti.sigs.thaw(s)
infoList.append((t, ti))
return infoList
# while talking to older repos - get the new trove sigs
def _getNewSigs(src, cfg, mark):
# talking to an old source server. We do the best and we get the sigs out
sigList = src.getNewSigList(cfg.host, str(mark))
log.debug("obtained %d changed trove sigs", len(sigList))
sigList = [ x for x in sigList if _filterTup(x[1], cfg) ]
log.debug("%d changed sigs after label and match filtering", len(sigList))
# protection against duplicate items returned in the list by some servers
sigList = list(set(sigList))
sigList.sort(lambda a,b: cmp(a[0], b[0]))
log.debug("downloading %d signatures from source repository", len(sigList))
# XXX: we could also get the metadata in here, but getTroveInfo
# would use a getChangeSet call against older repos, severely
# impacting performance
sigs = src.getTroveSigs([ x[1] for x in sigList ])
# need to convert the sigs into TroveInfo instances
def _sig2info(sig):
ti = trove.TroveInfo()
ti.sigs.thaw(sig)
return ti
sigs = [ _sig2info(s) for s in sigs]
# we're gonna iterate repeatedely over the returned set, no itertools can do
return [(m, t, ti) for (m,t),ti in itertools.izip(sigList, sigs) ]
# get the changed trove info entries for the troves comitted
def _getNewInfo(src, cfg, mark):
# first, try the new getNewTroveInfo call
labels = cfg.labels or []
mark = str(long(mark)) # xmlrpc chokes on longs
infoTypes = [trove._TROVEINFO_TAG_SIGS, trove._TROVEINFO_TAG_METADATA]
try:
infoList = src.getNewTroveInfo(cfg.host, mark, infoTypes, labels)
except errors.InvalidServerVersion:
# otherwise we mirror just the sigs...
infoList = _getNewSigs(src, cfg, mark)
return infoList
def _parallel_run(index, results, targets, classMethod, args, kwargs):
try:
target = targets[index]
ret = (index, True, classMethod(target, *args, **kwargs))
except Exception as err:
ret = (index, False, (err, traceback.format_exc()))
results.put(ret)
def _parallel(targets, classMethod, *args, **kwargs):
"""
Map a method call across multiple targets concurrently
"""
if len(targets) == 1:
return [classMethod(targets[0], *args, **kwargs)]
results = Queue.Queue()
threads = []
for index in range(len(targets)):
thread = threading.Thread(target=_parallel_run,
args=(index, results, targets, classMethod, args, kwargs,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
ret = [None] * len(targets)
last_error = None
for thread in threads:
index, ok, result = results.get()
if ok:
ret[index] = result
else:
last_error, trace = result
log.error("Error updating target %s:\n%s",
targets[index].name, trace)
if last_error is not None:
raise last_error
return ret
# mirror new trove info for troves we have already mirrored.
def mirrorTroveInfo(src, targets, mark, cfg, resync=False):
if resync:
log.debug("performing a full trove info sync")
infoList = _getAllInfo(src, cfg)
infoList = [(mark, t, ti) for t, ti in infoList ]
else:
log.debug("getting new trove info entries")
infoList = _getNewInfo(src, cfg, mark)
log.debug("obtained %d trove info records for mirroring", len(infoList))
infoList = [(m,t,ti) for (m,t,ti) in infoList if _filterTup(t, cfg)]
if not len(infoList):
log.debug("no troveinfo records need to be mirrored")
return 0
log.debug("mirroring %d changed trove info records" % len(infoList))
updateCount = sum(_parallel(targets,
TargetRepository.setTroveInfo, infoList))
return updateCount
# this mirrors all the troves marked as removed from the sourceRepos into the targetRepos
def mirrorRemoved(sourceRepos, targetRepos, troveSet, test = False, callback = ChangesetCallback()):
if not troveSet:
return 0
log.debug("checking on %d removed troves", len(troveSet))
# these removed troves better exist on the target
present = targetRepos.hasTroves(list(troveSet))
missing = [ x for x in troveSet if not present[x] ]
# we can not have any "missing" troves while we mirror removals
for t in missing:
log.warning("Mirroring removed trove: valid trove not found on target: %s", t)
troveSet.remove(t)
# for the remaining removed troves, are any of them already mirrored?
jobList = [ (name, (None, None), (version, flavor), True) for
(name, version, flavor) in troveSet ]
cs = targetRepos.createChangeSet(jobList, recurse=False, withFiles=False,
withFileContents=False, callback=callback)
for trvCs in cs.iterNewTroveList():
if trvCs.getType() == trove.TROVE_TYPE_REMOVED:
troveSet.remove(trvCs.getNewNameVersionFlavor())
log.debug("mirroring %d removed troves", len(troveSet))
if not troveSet:
return 0
jobList = [ (name, (None, None), (version, flavor), True) for
(name, version, flavor) in troveSet ]
log.debug("mirroring removed troves %s" % (displayJobList(jobList),))
# grab the removed troves changeset
cs = sourceRepos.createChangeSet(jobList, recurse = False,
withFiles = False, withFileContents = False,
callback = callback)
log.debug("committing")
targetRepos.commitChangeSet(cs, mirror = True, callback = callback)
callback.done()
return len(jobList)
# target repo class that helps dealing with testing mode
class TargetRepository:
def __init__(self, repo, cfg, name = 'target', test=False):
self.repo = repo
self.test = test
self.cfg = cfg
self.mark = None
self.name = name
self.__gpg = {}
def getMirrorMark(self):
if self.mark is None:
self.mark = self.repo.getMirrorMark(self.cfg.host)
self.mark = str(long(self.mark))
return long(self.mark)
def setMirrorMark(self, mark):
self.mark = str(long(mark))
log.debug("%s setting mirror mark to %s", self.name, self.mark)
if self.test:
return
self.repo.setMirrorMark(self.cfg.host, self.mark)
def mirrorGPG(self, src, host):
if self.cfg.noPGP:
return
if self.__gpg.has_key(host):
return
keyList = src.getNewPGPKeys(host, -1)
self.__gpg[host] = keyList
if not len(keyList):
return
log.debug("%s adding %d gpg keys", self.name, len(keyList))
if self.test:
return
self.repo.addPGPKeyList(self.cfg.host, keyList)
def setTroveInfo(self, infoList):
log.debug("%s checking what troveinfo needs to be mirrored", self.name)
# Items whose mark is the same as currentMark might not have their trove
# available on the server (it might be coming as part of this mirror
# run).
inQuestion = [ x[1] for x in infoList if str(long(x[0])) >= self.mark ]
present = self.repo.hasTroves(inQuestion, hidden=True)
# filter out the not present troves which will get mirrored in
# the current mirror run
infoList = [ (t, ti) for (m, t, ti) in infoList if present.get(t, True) ]
# avoid busy work for troveinfos which are empty
infoList = [ (t, ti) for (t, ti) in infoList if len(ti.freeze()) > 0 ]
if self.test:
return 0
try:
self.repo.setTroveInfo(infoList)
except errors.InvalidServerVersion: # to older servers we can only transport sigs
infoList = [ (t, ti.sigs.freeze()) for t, ti in infoList ]
# only send up the troves that actually have a signature change
infoList = [ x for x in infoList if len(x[1]) > 0 ]
log.debug("%s pushing %d trove sigs...", self.name, len(infoList))
self.repo.setTroveSigs(infoList)
else:
log.debug("%s uploaded %d info records", self.name, len(infoList))
return len(infoList)
def addTroveList(self, tl):
# Filter out troves which are already in the local repository. Since
# the marks aren't distinct (they increase, but not monotonically), it's
# possible that something new got committed with the same mark we
# last updated to, so we have to look again at all of the troves in the
# source repository with the last mark which made it into our target.
present = self.repo.hasTroves([ x[1] for x in tl ], hidden = True)
ret = [ x for x in tl if not present[x[1]] ]
log.debug("%s found %d troves not present", self.name, len(ret))
return ret
def commitChangeSetFile(self, filename, hidden, callback):
if self.test:
return 0
callback = copy.copy(callback)
callback.setPrefix(self.name + ": ")
t1 = time.time()
ret = self.repo.commitChangeSetFile(filename, mirror=True, hidden=hidden,
callback=callback)
t2 = time.time()
callback.done()
hstr = ""
if hidden: hstr = "hidden "
log.debug("%s %scommit (%.2f sec)", self.name, hstr, t2-t1)
return ret
def presentHiddenTroves(self, newMark):
log.debug("%s unhiding comitted troves", self.name)
self.repo.presentHiddenTroves(self.cfg.host)
self.setMirrorMark(newMark)
# split a troveList in changeset jobs
def buildBundles(sourceRepos, target, troveList, absolute=False,
splitNodes=True):
bundles = []
log.debug("grouping %d troves based on version and flavor", len(troveList))
groupList = groupTroves(troveList)
log.debug("building grouped job list")
bundles = buildJobList(sourceRepos, target.repo, groupList, absolute,
splitNodes)
return bundles
# return the new list of troves to process after filtering and sanity checks
def getTroveList(src, cfg, mark):
# FIXME: getNewTroveList should accept and only return troves on
# the labels we're interested in
log.debug("looking for new troves")
# make sure we always treat the mark as an integer
troveList = [(long(m), (n,v,f), t) for m, (n,v,f), t in
src.getNewTroveList(cfg.host, str(mark))]
if not len(troveList):
# this should be the end - no more troves to look at
log.debug("no new troves found")
return (mark, [])
# we need to protect ourselves from duplicate items in the troveList
l = len(troveList)
troveList = list(set(troveList))
if len(troveList) < l:
l = len(troveList)
log.debug("after duplicate elimination %d troves are left", len(troveList))
# if we filter out the entire list of troves we have been
# returned, we need to tell the caller what was the highest mark
# we had so it can continue asking for more
maxMark = max([x[0] for x in troveList])
# filter out troves on labels and parse through matchTroves
troveList = [ x for x in troveList if _filterTup(x[1],cfg) ]
if len(troveList) < l:
l = len(troveList)
log.debug("after label filtering and matchTroves %d troves are left", l)
if not troveList:
return (maxMark, [])
# sort deterministically by mark, version, flavor, reverse name
troveList.sort(lambda a,b: cmp(a[0], b[0]) or
cmp(a[1][1], b[1][1]) or
cmp(a[1][2], b[1][2]) or
cmp(b[1][0], a[1][0]) )
log.debug("%d new troves returned", len(troveList))
# We cut off the last troves that have the same flavor, version to
# avoid committing an incomplete trove. This could happen if the
# server side only listed some of a trove's components due to
# server side limits on how many results it can return on each query
lastIdx = len(troveList)-1
# compare with the last one
ml, (nl,vl,fl), tl = troveList[-1]
while lastIdx >= 0:
lastIdx -= 1
m, (n,v,f), t = troveList[lastIdx]
if v == vl and f == fl:
continue
lastIdx += 1
break
# the min mark of the troves we skip has to be higher than max
# mark of troves we'll commit or otherwise we'll skip them for good...
if lastIdx >= 0:
firstMark = max([x[0] for x in troveList[:lastIdx]])
lastMark = min([x[0] for x in troveList[lastIdx:]])
if lastMark > firstMark:
troveList = troveList[:lastIdx]
log.debug("reduced new trove list to %d to avoid partial commits", len(troveList))
# since we're returning at least on trove, the caller will make the next mark decision
return (mark, troveList)
def _makeTargets(cfg, targetRepos, test = False):
if not hasattr(targetRepos, '__iter__'):
targetRepos = [ targetRepos ]
targets = []
for t in targetRepos:
if isinstance(t, netclient.NetworkRepositoryClient):
targets.append(TargetRepository(t, cfg, test=test))
elif isinstance(t, TargetRepository):
targets.append(t)
else:
raise RuntimeError("Can not handle unknown target repository type", t)
return targets
# syncSigs really means "resync all info", but we keep the parameter
# name for compatibility reasons
def mirrorRepository(sourceRepos, targetRepos, cfg,
test = False, sync = False, syncSigs = False,
callback = ChangesetCallback(),
fastSync = False,
referenceRepos=None,
):
if referenceRepos is None:
referenceRepos = sourceRepos
checkConfig(cfg)
targets = _makeTargets(cfg, targetRepos, test)
log.debug("-" * 20 + " start loop " + "-" * 20)
hidden = len(targets) > 1 or cfg.useHiddenCommits
if hidden:
log.debug("will use hidden commits to synchronize target mirrors")
marks = _parallel(targets, TargetRepository.getMirrorMark)
if sync:
currentMark = -1
else:
# we use the oldest mark as a starting point (since we have to
# get stuff from source for that oldest one anyway)
currentMark = min(marks)
log.debug("using common mirror mark %s", currentMark)
# reset mirror mark to the lowest common denominator
for t, mark in zip(targets, marks):
if mark != currentMark:
t.setMirrorMark(currentMark)
# mirror gpg signatures from the src into the targets
_parallel(targets, TargetRepository.mirrorGPG, referenceRepos, cfg.host)
# mirror changed trove information for troves already mirrored
if fastSync:
updateCount = 0
log.debug("skip trove info records sync because of fast-sync")
else:
updateCount = mirrorTroveInfo(referenceRepos, targets, currentMark,
cfg, syncSigs)
newMark, troveList = getTroveList(referenceRepos, cfg, currentMark)
if not troveList:
if newMark > currentMark: # something was returned, but filtered out
_parallel(targets, TargetRepository.setMirrorMark, newMark)
return -1 # call again
return 0
# prepare a new max mark to be used when we need to break out of a loop
crtMaxMark = max(long(x[0]) for x in troveList)
if currentMark > 0 and crtMaxMark == currentMark:
# if we're hung on the current max then we need to
# forcibly advance the mark in case we're stuck
crtMaxMark += 1 # only used if we filter out all troves below
initTLlen = len(troveList)
# removed troves are a special blend - we keep them separate
removedSet = set([ x[1] for x in troveList if x[2] == trove.TROVE_TYPE_REMOVED ])
troveList = [ (x[0], x[1]) for x in troveList if x[2] != trove.TROVE_TYPE_REMOVED ]
# figure out if we need to recurse the group-troves
if cfg.recurseGroups:
# avoid adding duplicates
troveSetList = set([x[1] for x in troveList])
for mark, (name, version, flavor) in troveList:
if trove.troveIsGroup(name):
recTroves = recurseTrove(referenceRepos, name,
version, flavor, callback=callback)
# add sources here:
if cfg.includeSources:
troveInfo = referenceRepos.getTroveInfo(
trove._TROVEINFO_TAG_SOURCENAME, recTroves)
sourceComps = set()
for nvf, source in itertools.izip(recTroves, troveInfo):
sourceComps.add((source(), nvf[1].getSourceVersion(),
parseFlavor('')))
recTroves.extend(sourceComps)
# add the results at the end with the current mark
for (n, v, f) in recTroves:
if (n, v, f) not in troveSetList:
troveList.append((mark, (n, v, f)))
troveSetList.add((n, v, f))
log.debug("after group recursion %d troves are needed", len(troveList))
# we need to make sure we mirror the GPG keys of any newly added troves
newHosts = set([x[1].getHost() for x in troveSetList.union(removedSet)])
for host in newHosts.difference(set([cfg.host])):
_parallel(targets, TargetRepository.mirrorGPG,
referenceRepos, host)
# we check which troves from the troveList are needed on each
# target and we split the troveList into separate lists depending
# on how many targets require each
byTarget = {}
targetSetList = []
if len(troveList):
byTrove = {}
for i, target in enumerate(targets):
for t in target.addTroveList(troveList):
bt = byTrove.setdefault(t, set())
bt.add(i)
# invert the dict by target now
for trv, ts in byTrove.iteritems():
targetSet = [ targets[i] for i in ts ]
try:
targetIdx = targetSetList.index(targetSet)
except ValueError:
targetSetList.append(targetSet)
targetIdx = len(targetSetList)-1
bt = byTarget.setdefault(targetIdx, [])
bt.append(trv)
del byTrove
# if we were returned troves, but we filtered them all out, advance the
# mark and signal "try again"
if len(byTarget) == 0 and len(removedSet) == 0 and initTLlen:
# we had troves and now we don't
log.debug("no troves found for our label %s" % cfg.labels)
_parallel(targets, TargetRepository.setMirrorMark, crtMaxMark)
# try again
return -1
# now we get each section of the troveList for each targetSet. We
# start off mirroring by those required by fewer targets, using
# the assumption that those troves are what is required for the
# targets to catch up to a common set
if len(byTarget) > 1:
log.debug("split %d troves into %d chunks by target", len(troveList), len(byTarget))
# sort the targetSets by length
targetSets = list(enumerate(targetSetList))
targetSets.sort(lambda a,b: cmp(len(a[1]), len(b[1])))
bundlesMark = 0
for idx, targetSet in targetSets:
troveList = byTarget[idx]
if not troveList: # XXX: should not happen...
continue
log.debug("mirroring %d troves into %d targets", len(troveList), len(targetSet))
# since these troves are required for all targets, we can use
# the "first" one to build the relative changeset requests
target = list(targetSet)[0]
bundles = buildBundles(sourceRepos, target, troveList,
cfg.absoluteChangesets, cfg.splitNodes)
for i, bundle in enumerate(bundles):
jobList = [ x[1] for x in bundle ]
# XXX it's a shame we can't give a hint as to what server to use
# to avoid having to open the changeset and read in bits of it
if test:
log.debug("test mode: not mirroring (%d of %d) %s" % (i + 1, len(bundles), jobList))
updateCount += len(bundle)
continue
(outFd, tmpName) = util.mkstemp()
os.close(outFd)
log.debug("getting (%d of %d) %s" % (i + 1, len(bundles), displayBundle(bundle)))
try:
sourceRepos.createChangeSetFile(jobList, tmpName, recurse = False,
callback = callback, mirrorMode = True)
except changeset.ChangeSetKeyConflictError:
splitJobList(jobList, sourceRepos, targetSet, hidden=hidden,
callback=callback)
else:
_parallel(targetSet, TargetRepository.commitChangeSetFile,
tmpName, hidden=hidden, callback=callback)
try:
os.unlink(tmpName)
except OSError:
pass
callback.done()
updateCount += len(bundle)
# compute the max mark of the bundles we comitted
mark = max([min([x[0] for x in bundle]) for bundle in bundles])
if mark > bundlesMark:
bundlesMark = mark
else: # only when we're all done looping advance mark to the new max
if bundlesMark == 0 or bundlesMark <= currentMark:
bundlesMark = crtMaxMark # avoid repeating the same query...
if hidden: # if we've hidden the last commits, show them now
_parallel(targets, TargetRepository.presentHiddenTroves,
bundlesMark)
else:
_parallel(targets, TargetRepository.setMirrorMark, bundlesMark)
# mirroring removed troves requires one by one processing
for target in targets:
copySet = removedSet.copy()
updateCount += mirrorRemoved(referenceRepos, target.repo, copySet,
test=test, callback=callback)
# if this was a noop because the removed troves were already mirrored
# we need to keep going
if updateCount == 0 and len(removedSet):
_parallel(targets, TargetRepository.setMirrorMark, crtMaxMark)
return -1
return updateCount
# check if the sourceRepos is in sync with targetRepos
def checkSyncRepos(config, sourceRepos, targetRepos):
checkConfig(config)
targets = _makeTargets(config, targetRepos)
log.setVerbosity(log.DEBUG)
# retrieve the set of troves from a give repository
def _getTroveSet(config, repo):
def _flatten(troveSpec):
l = []
for name, versionD in troveSpec.iteritems():
for version, flavorList in versionD.iteritems():
l += [ (name, version, flavor) for flavor in flavorList ]
return set(l)
troveSpecs = {}
if config.labels:
d = troveSpecs.setdefault(None, {})
for l in config.labels:
d[l] = ''
t = repo.getTroveVersionsByLabel(troveSpecs, troveTypes = netclient.TROVE_QUERY_ALL)
else:
troveSpecs = {None : None}
t = repo.getTroveVersionList(config.host, troveSpecs,
troveTypes = netclient.TROVE_QUERY_ALL)
return _flatten(t)
# compare source with each target
def _compare(src, dst):
srcName, srcSet = src
dstName, dstSet = dst
counter = 0
for x in srcSet.difference(dstSet):
log.debug(" - %s %s " % (srcName, x))
counter += 1
for x in dstSet.difference(srcSet):
log.debug(" + %s %s" % (dstName, x))
counter += 1
return counter
log.debug("Retrieving list of troves from source %s" % str(sourceRepos.c.map))
sourceSet = _getTroveSet(config, sourceRepos)
hasDiff = 0
for target in targets:
log.debug("Retrieving list of troves from %s %s" % (target.name, str(target.repo.c.map)))
targetSet = _getTroveSet(config, target.repo)
log.debug("Diffing source and %s" % target.name)
hasDiff += _compare( ("source", sourceSet), (target.name, targetSet) )
log.debug("Done")
return hasDiff
if __name__ == '__main__':
sys.exit(Main())
|
<reponame>ons-eq-team/eq-questionnaire-runner
from typing import Mapping
from flask import url_for
from app.questionnaire import QuestionnaireSchema
from .context import Context
from .list_context import ListContext
from .summary import Group
class SectionSummaryContext(Context):
def __call__(self, current_location):
summary = self._build_summary(current_location)
return {
"summary": {
"title": self._title_for_location(current_location),
"summary_type": "SectionSummary",
"answers_are_editable": True,
**summary,
}
}
def _build_summary(self, location):
"""
Build a summary context for a particular location.
Does not support generating multiple sections at a time (i.e. passing no list_item_id for repeating section).
"""
section = self._schema.get_section(location.section_id)
collapsible = {
"collapsible": section.get("summary", {}).get("collapsible", False)
}
if section.get("summary", {}).get("items"):
summary_elements = {
"custom_summary": list(
self._custom_summary_elements(
section["summary"]["items"], location, section
)
)
}
return {**collapsible, **summary_elements}
routing_path = self._router.routing_path(
location.section_id, location.list_item_id
)
return {
**collapsible,
"groups": [
Group(
group,
routing_path,
self._answer_store,
self._list_store,
self._metadata,
self._schema,
location,
self._language,
).serialize()
for group in section["groups"]
],
}
def _title_for_location(self, location):
title = None
if location.block_id:
title = self._schema.get_block(location.block_id).get("title")
if not title:
title = self._schema.get_section(location.section_id).get("title")
if location.list_item_id:
repeating_title = self._schema.get_repeating_title_for_section(
location.section_id
)
if repeating_title:
title = self._placeholder_renderer.render_placeholder(
repeating_title, location.list_item_id
)
return title
def _custom_summary_elements(self, section_summary, current_location, section):
for summary_element in section_summary:
if summary_element["type"] == "List":
yield self._list_summary_element(
summary_element, current_location, section
)
def _list_summary_element(
self, summary: Mapping, current_location, section: Mapping
) -> Mapping:
list_collector_block = self._schema.get_list_collectors_for_list(
section, for_list=summary["for_list"]
)[0]
add_link = self._add_link(
summary, current_location, section, list_collector_block
)
list_context = ListContext(
self._language,
self._schema,
self._answer_store,
self._list_store,
self._progress_store,
self._metadata,
)
rendered_summary = self._placeholder_renderer.render(
summary, current_location.list_item_id
)
return {
"title": rendered_summary["title"],
"type": rendered_summary["type"],
"add_link": add_link,
"add_link_text": rendered_summary["add_link_text"],
"empty_list_text": rendered_summary["empty_list_text"],
"list_name": rendered_summary["for_list"],
**list_context(
list_collector_block["summary"],
for_list=list_collector_block["for_list"],
return_to=current_location.block_id,
edit_block_id=list_collector_block["edit_block"]["id"],
remove_block_id=list_collector_block["remove_block"]["id"],
),
}
def _add_link(self, summary, current_location, section, list_collector_block):
routing_path = self._router.routing_path(
section["id"], current_location.list_item_id
)
if list_collector_block["id"] in routing_path:
return url_for(
"questionnaire.block",
list_name=summary["for_list"],
block_id=list_collector_block["add_block"]["id"],
return_to=current_location.block_id,
)
driving_question_block = QuestionnaireSchema.get_driving_question_for_list(
section, summary["for_list"]
)
if driving_question_block:
return url_for(
"questionnaire.block",
block_id=driving_question_block["id"],
return_to=current_location.block_id,
)
|
<reponame>MArtinherz/sportsipy<filename>tests/integration/boxscore/test_ncaab_boxscore.py<gh_stars>100-1000
import mock
import os
import pandas as pd
from datetime import datetime
from flexmock import flexmock
from sportsipy import utils
from sportsipy.constants import HOME
from sportsipy.ncaab.constants import BOXSCORES_URL, SCHEDULE_URL
from sportsipy.ncaab.boxscore import Boxscore, Boxscores
MONTH = 1
YEAR = 2020
BOXSCORE = '2020-01-22-19-louisville'
def read_file(filename):
filepath = os.path.join(os.path.dirname(__file__), 'ncaab', filename)
return open('%s' % filepath, 'r', encoding='utf8').read()
def mock_pyquery(url):
class MockPQ:
def __init__(self, html_contents):
self.status_code = 200
self.html_contents = html_contents
self.text = html_contents
def __call__(self, div):
return read_file('table.html')
if url == BOXSCORES_URL % (MONTH, 5, YEAR):
return MockPQ(read_file('boxscores-1-5-2020.html'))
if url == BOXSCORES_URL % (MONTH, 6, YEAR):
return MockPQ(read_file('boxscores-1-6-2020.html'))
boxscore = read_file('%s.html' % BOXSCORE)
return MockPQ(boxscore)
class MockDateTime:
def __init__(self, year, month):
self.year = year
self.month = month
class TestNCAABBoxscore:
@mock.patch('requests.get', side_effect=mock_pyquery)
def setup_method(self, *args, **kwargs):
self.results = {
'date': 'January 22, 2020',
'location': 'KFC Yum! Center, Louisville, Kentucky',
'winner': HOME,
'winning_name': 'Louisville',
'winning_abbr': 'LOUISVILLE',
'losing_name': 'Georgia Tech',
'losing_abbr': 'GEORGIA-TECH',
'pace': 66.2,
'away_ranking': None,
'away_win_percentage': .421,
'away_wins': 8,
'away_losses': 11,
'away_minutes_played': 200,
'away_field_goals': 22,
'away_field_goal_attempts': 48,
'away_field_goal_percentage': .458,
'away_two_point_field_goals': 17,
'away_two_point_field_goal_attempts': 31,
'away_two_point_field_goal_percentage': .548,
'away_three_point_field_goals': 5,
'away_three_point_field_goal_attempts': 17,
'away_three_point_field_goal_percentage': .294,
'away_free_throws': 15,
'away_free_throw_attempts': 20,
'away_free_throw_percentage': .750,
'away_offensive_rebounds': 7,
'away_defensive_rebounds': 23,
'away_total_rebounds': 30,
'away_assists': 11,
'away_steals': 4,
'away_blocks': 4,
'away_turnovers': 16,
'away_personal_fouls': 18,
'away_points': 64,
'away_true_shooting_percentage': .557,
'away_effective_field_goal_percentage': .510,
'away_three_point_attempt_rate': .354,
'away_free_throw_attempt_rate': .417,
'away_offensive_rebound_percentage': 28.0,
'away_defensive_rebound_percentage': 63.9,
'away_total_rebound_percentage': 49.2,
'away_assist_percentage': 50.0,
'away_steal_percentage': 6.1,
'away_block_percentage': 10.5,
'away_turnover_percentage': 22.0,
'away_offensive_rating': 97.0,
'away_defensive_rating': 103.0,
'home_ranking': 6,
'home_win_percentage': .842,
'home_wins': 16,
'home_losses': 3,
'home_minutes_played': 200,
'home_field_goals': 24,
'home_field_goal_attempts': 58,
'home_field_goal_percentage': .414,
'home_two_point_field_goals': 18,
'home_two_point_field_goal_attempts': 38,
'home_two_point_field_goal_percentage': .474,
'home_three_point_field_goals': 6,
'home_three_point_field_goal_attempts': 20,
'home_three_point_field_goal_percentage': .300,
'home_free_throws': 14,
'home_free_throw_attempts': 23,
'home_free_throw_percentage': .609,
'home_offensive_rebounds': 13,
'home_defensive_rebounds': 18,
'home_total_rebounds': 31,
'home_assists': 12,
'home_steals': 9,
'home_blocks': 3,
'home_turnovers': 10,
'home_personal_fouls': 17,
'home_points': 68,
'home_true_shooting_percentage': .493,
'home_effective_field_goal_percentage': .466,
'home_three_point_attempt_rate': .345,
'home_free_throw_attempt_rate': .397,
'home_offensive_rebound_percentage': 36.1,
'home_defensive_rebound_percentage': 72.0,
'home_total_rebound_percentage': 50.8,
'home_assist_percentage': 50.0,
'home_steal_percentage': 13.6,
'home_block_percentage': 9.7,
'home_turnover_percentage': 12.8,
'home_offensive_rating': 103.0,
'home_defensive_rating': 97.0
}
flexmock(utils) \
.should_receive('_todays_date') \
.and_return(MockDateTime(YEAR, MONTH))
self.boxscore = Boxscore('2020-01-22-19-louisville')
def test_ncaab_boxscore_returns_requested_boxscore(self):
for attribute, value in self.results.items():
assert getattr(self.boxscore, attribute) == value
assert getattr(self.boxscore, 'summary') == {
# Box score is not parsed correctly
'away': [],
'home': []
}
def test_invalid_url_yields_empty_class(self):
flexmock(Boxscore) \
.should_receive('_retrieve_html_page') \
.and_return(None)
boxscore = Boxscore(BOXSCORE)
for key, value in boxscore.__dict__.items():
if key == '_uri':
continue
assert value is None
def test_ncaab_boxscore_dataframe_returns_dataframe_of_all_values(self):
df = pd.DataFrame([self.results], index=[BOXSCORE])
# Pandas doesn't natively allow comparisons of DataFrames.
# Concatenating the two DataFrames (the one generated during the test
# and the expected one above) and dropping duplicate rows leaves only
# the rows that are unique between the two frames. This allows a quick
# check of the DataFrame to see if it is empty - if so, all rows are
# duplicates, and they are equal.
frames = [df, self.boxscore.dataframe]
df1 = pd.concat(frames).drop_duplicates(keep=False)
assert df1.empty
def test_ncaab_boxscore_players(self):
boxscore = Boxscore(BOXSCORE)
assert len(boxscore.home_players) == 10
assert len(boxscore.away_players) == 7
for player in boxscore.home_players:
assert not player.dataframe.empty
for player in boxscore.away_players:
assert not player.dataframe.empty
def test_ncaab_boxscore_string_representation(self):
expected = ('Boxscore for Georgia Tech '
'at Louisville (January 22, 2020)')
boxscore = Boxscore(BOXSCORE)
assert boxscore.__repr__() == expected
class TestNCAABBoxscores:
def setup_method(self):
self.expected = {
'1-5-2020': [
{'boxscore': '2020-01-05-13-michigan-state',
'away_name': 'Michigan',
'away_abbr': 'michigan',
'away_score': 69,
'away_rank': 12,
'home_name': 'Michigan State',
'home_abbr': 'michigan-state',
'home_score': 87,
'home_rank': 14,
'non_di': False,
'top_25': True,
'winning_name': 'Michigan State',
'winning_abbr': 'michigan-state',
'losing_name': 'Michigan',
'losing_abbr': 'michigan'},
{'boxscore': '2020-01-05-13-saint-josephs',
'away_name': 'Dayton',
'away_abbr': 'dayton',
'away_score': 80,
'away_rank': 20,
'home_name': "<NAME>",
'home_abbr': 'saint-josephs',
'home_score': 67,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'Dayton',
'winning_abbr': 'dayton',
'losing_name': "<NAME>",
'losing_abbr': 'saint-josephs'},
{'boxscore': '2020-01-05-15-american',
'away_name': 'Boston University',
'away_abbr': 'boston-university',
'away_score': 63,
'away_rank': None,
'home_name': 'American',
'home_abbr': 'american',
'home_score': 67,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'American',
'winning_abbr': 'american',
'losing_name': 'Boston University',
'losing_abbr': 'boston-university'},
{'boxscore': '2020-01-05-14-lafayette',
'away_name': 'Bucknell',
'away_abbr': 'bucknell',
'away_score': 78,
'away_rank': None,
'home_name': 'Lafayette',
'home_abbr': 'lafayette',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Bucknell',
'winning_abbr': 'bucknell',
'losing_name': 'Lafayette',
'losing_abbr': 'lafayette'},
{'boxscore': '2020-01-05-14-duquesne',
'away_name': 'Davidson',
'away_abbr': 'davidson',
'away_score': 64,
'away_rank': None,
'home_name': 'Duquesne',
'home_abbr': 'duquesne',
'home_score': 71,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Duquesne',
'winning_abbr': 'duquesne',
'losing_name': 'Davidson',
'losing_abbr': 'davidson'},
{'boxscore': '2020-01-05-16-south-dakota',
'away_name': 'Denver',
'away_abbr': 'denver',
'away_score': 78,
'away_rank': None,
'home_name': 'South Dakota',
'home_abbr': 'south-dakota',
'home_score': 80,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'South Dakota',
'winning_abbr': 'south-dakota',
'losing_name': 'Denver',
'losing_abbr': 'denver'},
{'boxscore': '2020-01-05-14-canisius',
'away_name': 'Fairfield',
'away_abbr': 'fairfield',
'away_score': 46,
'away_rank': None,
'home_name': 'Canisius',
'home_abbr': 'canisius',
'home_score': 42,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Fairfield',
'winning_abbr': 'fairfield',
'losing_name': 'Canisius',
'losing_abbr': 'canisius'},
{'boxscore': '2020-01-05-17-northwestern-state',
'away_name': '<NAME>',
'away_abbr': 'houston-baptist',
'away_score': 79,
'away_rank': None,
'home_name': 'Northwestern State',
'home_abbr': 'northwestern-state',
'home_score': 106,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Northwestern State',
'winning_abbr': 'northwestern-state',
'losing_name': '<NAME>',
'losing_abbr': 'houston-baptist'},
{'boxscore': '2020-01-05-14-milwaukee',
'away_name': 'UIC',
'away_abbr': 'illinois-chicago',
'away_score': 62,
'away_rank': None,
'home_name': 'Milwaukee',
'home_abbr': 'milwaukee',
'home_score': 64,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Milwaukee',
'winning_abbr': 'milwaukee',
'losing_name': 'UIC',
'losing_abbr': 'illinois-chicago'},
{'boxscore': '2020-01-05-14-monmouth',
'away_name': 'Iona',
'away_abbr': 'iona',
'away_score': 61,
'away_rank': None,
'home_name': 'Monmouth',
'home_abbr': 'monmouth',
'home_score': 73,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Monmouth',
'winning_abbr': 'monmouth',
'losing_name': 'Iona',
'losing_abbr': 'iona'},
{'boxscore': '2020-01-05-17-north-dakota',
'away_name': "<NAME>",
'away_abbr': 'ipfw',
'away_score': 69,
'away_rank': None,
'home_name': 'North Dakota',
'home_abbr': 'north-dakota',
'home_score': 83,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'North Dakota',
'winning_abbr': 'north-dakota',
'losing_name': "<NAME>",
'losing_abbr': 'ipfw'},
{'boxscore': '2020-01-05-14-green-bay',
'away_name': 'IUPUI',
'away_abbr': 'iupui',
'away_score': 93,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'green-bay',
'home_score': 78,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'IUPUI',
'winning_abbr': 'iupui',
'losing_name': '<NAME>',
'losing_abbr': 'green-bay'},
{'boxscore': '2020-01-05-14-fordham',
'away_name': '<NAME>',
'away_abbr': 'la-salle',
'away_score': 66,
'away_rank': None,
'home_name': 'Fordham',
'home_abbr': 'fordham',
'home_score': 60,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'la-salle',
'losing_name': 'Fordham',
'losing_abbr': 'fordham'},
{'boxscore': '2020-01-05-14-lehigh',
'away_name': 'Loyola (MD)',
'away_abbr': 'loyola-md',
'away_score': 71,
'away_rank': None,
'home_name': 'Lehigh',
'home_abbr': 'lehigh',
'home_score': 78,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Lehigh',
'winning_abbr': 'lehigh',
'losing_name': 'Loyola (MD)',
'losing_abbr': 'loyola-md'},
{'boxscore': '2020-01-05-13-niagara',
'away_name': 'Manhattan',
'away_abbr': 'manhattan',
'away_score': 67,
'away_rank': None,
'home_name': 'Niagara',
'home_abbr': 'niagara',
'home_score': 62,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Manhattan',
'winning_abbr': 'manhattan',
'losing_name': 'Niagara',
'losing_abbr': 'niagara'},
{'boxscore': '2020-01-05-14-saint-peters',
'away_name': 'Marist',
'away_abbr': 'marist',
'away_score': 40,
'away_rank': None,
'home_name': "<NAME>",
'home_abbr': 'saint-peters',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': "<NAME>",
'winning_abbr': 'saint-peters',
'losing_name': 'Marist',
'losing_abbr': 'marist'},
{'boxscore': '2020-01-05-16-saint-louis',
'away_name': 'UMass',
'away_abbr': 'massachusetts',
'away_score': 80,
'away_rank': None,
'home_name': 'Saint Louis',
'home_abbr': 'saint-louis',
'home_score': 83,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'saint-louis',
'losing_name': 'UMass',
'losing_abbr': 'massachusetts'},
{'boxscore': '2020-01-05-12-holy-cross',
'away_name': 'Navy',
'away_abbr': 'navy',
'away_score': 61,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'holy-cross',
'home_score': 63,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'holy-cross',
'losing_name': 'Navy',
'losing_abbr': 'navy'},
{'boxscore': '2020-01-05-15-oakland',
'away_name': 'Northern Kentucky',
'away_abbr': 'northern-kentucky',
'away_score': 75,
'away_rank': None,
'home_name': 'Oakland',
'home_abbr': 'oakland',
'home_score': 64,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Northern Kentucky',
'winning_abbr': 'northern-kentucky',
'losing_name': 'Oakland',
'losing_abbr': 'oakland'},
{'boxscore': '2020-01-05-15-north-dakota-state',
'away_name': 'Northland',
'away_abbr': 'Northland',
'away_score': 43,
'away_rank': None,
'home_name': 'North Dakota State',
'home_abbr': 'north-dakota-state',
'home_score': 97,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'North Dakota State',
'winning_abbr': 'north-dakota-state',
'losing_name': 'Northland',
'losing_abbr': 'Northland'},
{'boxscore': '2020-01-05-19-minnesota',
'away_name': 'Northwestern',
'away_abbr': 'northwestern',
'away_score': 68,
'away_rank': None,
'home_name': 'Minnesota',
'home_abbr': 'minnesota',
'home_score': 77,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Minnesota',
'winning_abbr': 'minnesota',
'losing_name': 'Northwestern',
'losing_abbr': 'northwestern'},
{'boxscore': '2020-01-05-18-colorado',
'away_name': 'Oregon State',
'away_abbr': 'oregon-state',
'away_score': 76,
'away_rank': None,
'home_name': 'Colorado',
'home_abbr': 'colorado',
'home_score': 68,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Oregon State',
'winning_abbr': 'oregon-state',
'losing_name': 'Colorado',
'losing_abbr': 'colorado'},
{'boxscore': '2020-01-05-20-illinois',
'away_name': 'Purdue',
'away_abbr': 'purdue',
'away_score': 37,
'away_rank': None,
'home_name': 'Illinois',
'home_abbr': 'illinois',
'home_score': 63,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Illinois',
'winning_abbr': 'illinois',
'losing_name': 'Purdue',
'losing_abbr': 'purdue'},
{'boxscore': '2020-01-05-12-rhode-island',
'away_name': 'Richmond',
'away_abbr': 'richmond',
'away_score': 69,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'rhode-island',
'home_score': 61,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Richmond',
'winning_abbr': 'richmond',
'losing_name': '<NAME>',
'losing_abbr': 'rhode-island'},
{'boxscore': '2020-01-05-14-rider',
'away_name': 'Siena',
'away_abbr': 'siena',
'away_score': 77,
'away_rank': None,
'home_name': 'Rider',
'home_abbr': 'rider',
'home_score': 85,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Rider',
'winning_abbr': 'rider',
'losing_name': 'Siena',
'losing_abbr': 'siena'},
{'boxscore': '2020-01-05-22-washington',
'away_name': 'USC',
'away_abbr': 'southern-california',
'away_score': 40,
'away_rank': None,
'home_name': 'Washington',
'home_abbr': 'washington',
'home_score': 72,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Washington',
'winning_abbr': 'washington',
'losing_name': 'USC',
'losing_abbr': 'southern-california'},
{'boxscore': '2020-01-05-16-george-washington',
'away_name': 'St. Bonaventure',
'away_abbr': 'st-bonaventure',
'away_score': 71,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'george-washington',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'St. Bonaventure',
'winning_abbr': 'st-bonaventure',
'losing_name': '<NAME>',
'losing_abbr': 'george-washington'},
{'boxscore': '2020-01-05-16-xavier',
'away_name': "<NAME> (NY)",
'away_abbr': 'st-johns-ny',
'away_score': 67,
'away_rank': None,
'home_name': 'Xavier',
'home_abbr': 'xavier',
'home_score': 75,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Xavier',
'winning_abbr': 'xavier',
'losing_name': "<NAME> (NY)",
'losing_abbr': 'st-johns-ny'},
{'boxscore': '2020-01-05-13-maine',
'away_name': '<NAME>',
'away_abbr': 'stony-brook',
'away_score': 73,
'away_rank': None,
'home_name': 'Maine',
'home_abbr': 'maine',
'home_score': 52,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'stony-brook',
'losing_name': 'Maine',
'losing_abbr': 'maine'},
{'boxscore': '2020-01-05-12-george-mason',
'away_name': 'VCU',
'away_abbr': 'virginia-commonwealth',
'away_score': 72,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'george-mason',
'home_score': 59,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'VCU',
'winning_abbr': 'virginia-commonwealth',
'losing_name': '<NAME>',
'losing_abbr': 'george-mason'},
{'boxscore': '2020-01-05-13-detroit-mercy',
'away_name': "Wright State",
'away_abbr': "wright-state",
'away_score': 70,
'away_rank': None,
'home_name': 'Detroit',
'home_abbr': 'detroit-mercy',
'home_score': 69,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Wright State',
'winning_abbr': 'wright-state',
'losing_name': "Detroit",
'losing_abbr': "detroit-mercy"}
]
}
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search(self, *args, **kwargs):
result = Boxscores(datetime(2020, 1, 5)).games
assert result == self.expected
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search_invalid_end(self, *args, **kwargs):
result = Boxscores(datetime(2020, 1, 5),
datetime(2020, 1, 4)).games
assert result == self.expected
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search_multiple_days(self, *args, **kwargs):
expected = {
'1-5-2020': [
{'boxscore': '2020-01-05-13-michigan-state',
'away_name': 'Michigan',
'away_abbr': 'michigan',
'away_score': 69,
'away_rank': 12,
'home_name': 'Michigan State',
'home_abbr': 'michigan-state',
'home_score': 87,
'home_rank': 14,
'non_di': False,
'top_25': True,
'winning_name': 'Michigan State',
'winning_abbr': 'michigan-state',
'losing_name': 'Michigan',
'losing_abbr': 'michigan'},
{'boxscore': '2020-01-05-13-saint-josephs',
'away_name': 'Dayton',
'away_abbr': 'dayton',
'away_score': 80,
'away_rank': 20,
'home_name': "<NAME>",
'home_abbr': 'saint-josephs',
'home_score': 67,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'Dayton',
'winning_abbr': 'dayton',
'losing_name': "<NAME>",
'losing_abbr': 'saint-josephs'},
{'boxscore': '2020-01-05-15-american',
'away_name': 'Boston University',
'away_abbr': 'boston-university',
'away_score': 63,
'away_rank': None,
'home_name': 'American',
'home_abbr': 'american',
'home_score': 67,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'American',
'winning_abbr': 'american',
'losing_name': 'Boston University',
'losing_abbr': 'boston-university'},
{'boxscore': '2020-01-05-14-lafayette',
'away_name': 'Bucknell',
'away_abbr': 'bucknell',
'away_score': 78,
'away_rank': None,
'home_name': 'Lafayette',
'home_abbr': 'lafayette',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Bucknell',
'winning_abbr': 'bucknell',
'losing_name': 'Lafayette',
'losing_abbr': 'lafayette'},
{'boxscore': '2020-01-05-14-duquesne',
'away_name': 'Davidson',
'away_abbr': 'davidson',
'away_score': 64,
'away_rank': None,
'home_name': 'Duquesne',
'home_abbr': 'duquesne',
'home_score': 71,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Duquesne',
'winning_abbr': 'duquesne',
'losing_name': 'Davidson',
'losing_abbr': 'davidson'},
{'boxscore': '2020-01-05-16-south-dakota',
'away_name': 'Denver',
'away_abbr': 'denver',
'away_score': 78,
'away_rank': None,
'home_name': 'South Dakota',
'home_abbr': 'south-dakota',
'home_score': 80,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'South Dakota',
'winning_abbr': 'south-dakota',
'losing_name': 'Denver',
'losing_abbr': 'denver'},
{'boxscore': '2020-01-05-14-canisius',
'away_name': 'Fairfield',
'away_abbr': 'fairfield',
'away_score': 46,
'away_rank': None,
'home_name': 'Canisius',
'home_abbr': 'canisius',
'home_score': 42,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Fairfield',
'winning_abbr': 'fairfield',
'losing_name': 'Canisius',
'losing_abbr': 'canisius'},
{'boxscore': '2020-01-05-17-northwestern-state',
'away_name': '<NAME>',
'away_abbr': 'houston-baptist',
'away_score': 79,
'away_rank': None,
'home_name': 'Northwestern State',
'home_abbr': 'northwestern-state',
'home_score': 106,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Northwestern State',
'winning_abbr': 'northwestern-state',
'losing_name': 'Houston Baptist',
'losing_abbr': 'houston-baptist'},
{'boxscore': '2020-01-05-14-milwaukee',
'away_name': 'UIC',
'away_abbr': 'illinois-chicago',
'away_score': 62,
'away_rank': None,
'home_name': 'Milwaukee',
'home_abbr': 'milwaukee',
'home_score': 64,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Milwaukee',
'winning_abbr': 'milwaukee',
'losing_name': 'UIC',
'losing_abbr': 'illinois-chicago'},
{'boxscore': '2020-01-05-14-monmouth',
'away_name': 'Iona',
'away_abbr': 'iona',
'away_score': 61,
'away_rank': None,
'home_name': 'Monmouth',
'home_abbr': 'monmouth',
'home_score': 73,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Monmouth',
'winning_abbr': 'monmouth',
'losing_name': 'Iona',
'losing_abbr': 'iona'},
{'boxscore': '2020-01-05-17-north-dakota',
'away_name': "<NAME>",
'away_abbr': 'ipfw',
'away_score': 69,
'away_rank': None,
'home_name': 'North Dakota',
'home_abbr': 'north-dakota',
'home_score': 83,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'North Dakota',
'winning_abbr': 'north-dakota',
'losing_name': "<NAME>",
'losing_abbr': 'ipfw'},
{'boxscore': '2020-01-05-14-green-bay',
'away_name': 'IUPUI',
'away_abbr': 'iupui',
'away_score': 93,
'away_rank': None,
'home_name': 'Green Bay',
'home_abbr': 'green-bay',
'home_score': 78,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'IUPUI',
'winning_abbr': 'iupui',
'losing_name': '<NAME>',
'losing_abbr': 'green-bay'},
{'boxscore': '2020-01-05-14-fordham',
'away_name': '<NAME>',
'away_abbr': 'la-salle',
'away_score': 66,
'away_rank': None,
'home_name': 'Fordham',
'home_abbr': 'fordham',
'home_score': 60,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'la-salle',
'losing_name': 'Fordham',
'losing_abbr': 'fordham'},
{'boxscore': '2020-01-05-14-lehigh',
'away_name': 'Loyola (MD)',
'away_abbr': 'loyola-md',
'away_score': 71,
'away_rank': None,
'home_name': 'Lehigh',
'home_abbr': 'lehigh',
'home_score': 78,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Lehigh',
'winning_abbr': 'lehigh',
'losing_name': 'Loyola (MD)',
'losing_abbr': 'loyola-md'},
{'boxscore': '2020-01-05-13-niagara',
'away_name': 'Manhattan',
'away_abbr': 'manhattan',
'away_score': 67,
'away_rank': None,
'home_name': 'Niagara',
'home_abbr': 'niagara',
'home_score': 62,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Manhattan',
'winning_abbr': 'manhattan',
'losing_name': 'Niagara',
'losing_abbr': 'niagara'},
{'boxscore': '2020-01-05-14-saint-peters',
'away_name': 'Marist',
'away_abbr': 'marist',
'away_score': 40,
'away_rank': None,
'home_name': "<NAME>",
'home_abbr': 'saint-peters',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': "<NAME>",
'winning_abbr': 'saint-peters',
'losing_name': 'Marist',
'losing_abbr': 'marist'},
{'boxscore': '2020-01-05-16-saint-louis',
'away_name': 'UMass',
'away_abbr': 'massachusetts',
'away_score': 80,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'saint-louis',
'home_score': 83,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Saint Louis',
'winning_abbr': 'saint-louis',
'losing_name': 'UMass',
'losing_abbr': 'massachusetts'},
{'boxscore': '2020-01-05-12-holy-cross',
'away_name': 'Navy',
'away_abbr': 'navy',
'away_score': 61,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'holy-cross',
'home_score': 63,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'holy-cross',
'losing_name': 'Navy',
'losing_abbr': 'navy'},
{'boxscore': '2020-01-05-15-oakland',
'away_name': 'Northern Kentucky',
'away_abbr': 'northern-kentucky',
'away_score': 75,
'away_rank': None,
'home_name': 'Oakland',
'home_abbr': 'oakland',
'home_score': 64,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>entucky',
'winning_abbr': 'northern-kentucky',
'losing_name': 'Oakland',
'losing_abbr': 'oakland'},
{'boxscore': '2020-01-05-15-north-dakota-state',
'away_name': 'Northland',
'away_abbr': 'Northland',
'away_score': 43,
'away_rank': None,
'home_name': 'North Dakota State',
'home_abbr': 'north-dakota-state',
'home_score': 97,
'home_rank': None,
'non_di': True,
'top_25': False,
'winning_name': 'North Dakota State',
'winning_abbr': 'north-dakota-state',
'losing_name': 'Northland',
'losing_abbr': 'Northland'},
{'boxscore': '2020-01-05-19-minnesota',
'away_name': 'Northwestern',
'away_abbr': 'northwestern',
'away_score': 68,
'away_rank': None,
'home_name': 'Minnesota',
'home_abbr': 'minnesota',
'home_score': 77,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Minnesota',
'winning_abbr': 'minnesota',
'losing_name': 'Northwestern',
'losing_abbr': 'northwestern'},
{'boxscore': '2020-01-05-18-colorado',
'away_name': 'Oregon State',
'away_abbr': 'oregon-state',
'away_score': 76,
'away_rank': None,
'home_name': 'Colorado',
'home_abbr': 'colorado',
'home_score': 68,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Oregon State',
'winning_abbr': 'oregon-state',
'losing_name': 'Colorado',
'losing_abbr': 'colorado'},
{'boxscore': '2020-01-05-20-illinois',
'away_name': 'Purdue',
'away_abbr': 'purdue',
'away_score': 37,
'away_rank': None,
'home_name': 'Illinois',
'home_abbr': 'illinois',
'home_score': 63,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Illinois',
'winning_abbr': 'illinois',
'losing_name': 'Purdue',
'losing_abbr': 'purdue'},
{'boxscore': '2020-01-05-12-rhode-island',
'away_name': 'Richmond',
'away_abbr': 'richmond',
'away_score': 69,
'away_rank': None,
'home_name': 'Rhode Island',
'home_abbr': 'rhode-island',
'home_score': 61,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Richmond',
'winning_abbr': 'richmond',
'losing_name': '<NAME>',
'losing_abbr': 'rhode-island'},
{'boxscore': '2020-01-05-14-rider',
'away_name': 'Siena',
'away_abbr': 'siena',
'away_score': 77,
'away_rank': None,
'home_name': 'Rider',
'home_abbr': 'rider',
'home_score': 85,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Rider',
'winning_abbr': 'rider',
'losing_name': 'Siena',
'losing_abbr': 'siena'},
{'boxscore': '2020-01-05-22-washington',
'away_name': 'USC',
'away_abbr': 'southern-california',
'away_score': 40,
'away_rank': None,
'home_name': 'Washington',
'home_abbr': 'washington',
'home_score': 72,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Washington',
'winning_abbr': 'washington',
'losing_name': 'USC',
'losing_abbr': 'southern-california'},
{'boxscore': '2020-01-05-16-george-washington',
'away_name': '<NAME>',
'away_abbr': 'st-bonaventure',
'away_score': 71,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'george-washington',
'home_score': 66,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'st-bonaventure',
'losing_name': '<NAME>',
'losing_abbr': 'george-washington'},
{'boxscore': '2020-01-05-16-xavier',
'away_name': "<NAME> (NY)",
'away_abbr': 'st-johns-ny',
'away_score': 67,
'away_rank': None,
'home_name': 'Xavier',
'home_abbr': 'xavier',
'home_score': 75,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Xavier',
'winning_abbr': 'xavier',
'losing_name': "<NAME> (NY)",
'losing_abbr': 'st-johns-ny'},
{'boxscore': '2020-01-05-13-maine',
'away_name': '<NAME>',
'away_abbr': 'stony-brook',
'away_score': 73,
'away_rank': None,
'home_name': 'Maine',
'home_abbr': 'maine',
'home_score': 52,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'stony-brook',
'losing_name': 'Maine',
'losing_abbr': 'maine'},
{'boxscore': '2020-01-05-12-george-mason',
'away_name': 'VCU',
'away_abbr': 'virginia-commonwealth',
'away_score': 72,
'away_rank': None,
'home_name': '<NAME>',
'home_abbr': 'george-mason',
'home_score': 59,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'VCU',
'winning_abbr': 'virginia-commonwealth',
'losing_name': '<NAME>',
'losing_abbr': 'george-mason'},
{'boxscore': '2020-01-05-13-detroit-mercy',
'away_name': "<NAME>",
'away_abbr': "wright-state",
'away_score': 70,
'away_rank': None,
'home_name': 'Detroit',
'home_abbr': 'detroit-mercy',
'home_score': 69,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'wright-state',
'losing_name': "Detroit",
'losing_abbr': "detroit-mercy"}
],
'1-6-2020': [
{'boxscore': '2020-01-06-21-oklahoma-state',
'away_name': 'West Virginia',
'away_abbr': 'west-virginia',
'away_score': 55,
'away_rank': 17,
'home_name': 'Oklahoma State',
'home_abbr': 'oklahoma-state',
'home_score': 41,
'home_rank': None,
'non_di': False,
'top_25': True,
'winning_name': 'West Virginia',
'winning_abbr': 'west-virginia',
'losing_name': 'Oklahoma State',
'losing_abbr': 'oklahoma-state'},
{'boxscore': '2020-01-06-20-jackson-state',
'away_name': 'Alabama A&M',
'away_abbr': 'alabama-am',
'away_score': 66,
'away_rank': None,
'home_name': 'Jackson State',
'home_abbr': 'jackson-state',
'home_score': 57,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Alabama A&M',
'winning_abbr': 'alabama-am',
'losing_name': '<NAME>',
'losing_abbr': 'jackson-state'},
{'boxscore': '2020-01-06-20-grambling',
'away_name': 'Alabama State',
'away_abbr': 'alabama-state',
'away_score': 63,
'away_rank': None,
'home_name': 'Grambling',
'home_abbr': 'grambling',
'home_score': 68,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Grambling',
'winning_abbr': 'grambling',
'losing_name': 'Alabama State',
'losing_abbr': 'alabama-state'},
{'boxscore': '2020-01-06-20-texas-southern',
'away_name': 'Alcorn State',
'away_abbr': 'alcorn-state',
'away_score': 95,
'away_rank': None,
'home_name': 'Texas Southern',
'home_abbr': 'texas-southern',
'home_score': 80,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Al<NAME>',
'winning_abbr': 'alcorn-state',
'losing_name': 'Texas Southern',
'losing_abbr': 'texas-southern'},
{'boxscore': '2020-01-06-19-howard',
'away_name': 'Bethune-Cookman',
'away_abbr': 'bethune-cookman',
'away_score': 102,
'away_rank': None,
'home_name': 'Howard',
'home_abbr': 'howard',
'home_score': 73,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Bethune-Cookman',
'winning_abbr': 'bethune-cookman',
'losing_name': 'Howard',
'losing_abbr': 'howard'},
{'boxscore': '2020-01-06-19-army',
'away_name': 'Colgate',
'away_abbr': 'colgate',
'away_score': 70,
'away_rank': None,
'home_name': 'Army',
'home_abbr': 'army',
'home_score': 65,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Colgate',
'winning_abbr': 'colgate',
'losing_name': 'Army',
'losing_abbr': 'army'},
{'boxscore': '2020-01-06-19-north-carolina-at',
'away_name': 'Florida A&M',
'away_abbr': 'florida-am',
'away_score': 90,
'away_rank': None,
'home_name': 'North Carolina A&T',
'home_abbr': 'north-carolina-at',
'home_score': 97,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'North Carolina A&T',
'winning_abbr': 'north-carolina-at',
'losing_name': 'Florida A&M',
'losing_abbr': 'florida-am'},
{'boxscore': '2020-01-06-19-arkansas-little-rock',
'away_name': 'Georgia Southern',
'away_abbr': 'georgia-southern',
'away_score': 73,
'away_rank': None,
'home_name': '<NAME>ock',
'home_abbr': 'little-rock',
'home_score': 79,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'little-rock',
'losing_name': 'Georgia Southern',
'losing_abbr': 'georgia-southern'},
{'boxscore': '2020-01-06-20-arkansas-state',
'away_name': 'Georgia State',
'away_abbr': 'georgia-state',
'away_score': 87,
'away_rank': None,
'home_name': 'Arkansas State',
'home_abbr': 'arkansas-state',
'home_score': 90,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Arkansas State',
'winning_abbr': 'arkansas-state',
'losing_name': 'Georgia State',
'losing_abbr': 'georgia-state'},
{'boxscore': '2020-01-06-19-appalachian-state',
'away_name': 'Louisiana',
'away_abbr': 'louisiana',
'away_score': 81,
'away_rank': None,
'home_name': 'Appalachian State',
'home_abbr': 'appalachian-state',
'home_score': 73,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Louisiana',
'winning_abbr': 'louisiana',
'losing_name': 'Appalachian State',
'losing_abbr': 'appalachian-state'},
{'boxscore': '2020-01-06-19-coastal-carolina',
'away_name': 'Louisiana-Monroe',
'away_abbr': 'louisiana-monroe',
'away_score': 64,
'away_rank': None,
'home_name': 'Coastal Carolina',
'home_abbr': 'coastal-carolnia',
'home_score': 93,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Coastal Carolina',
'winning_abbr': 'coastal-carolina',
'losing_name': 'Louisiana-Monroe',
'losing_abbr': 'louisiana-monroe'},
{'boxscore': '2020-01-06-19-coppin-state',
'away_name': 'Norfolk State',
'away_abbr': 'norfolk-state',
'away_score': 82,
'away_rank': None,
'home_name': 'Coppin State',
'home_abbr': 'coppin-state',
'home_score': 59,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Norfolk State',
'winning_abbr': 'norfolk-state',
'losing_name': 'Coppin State',
'losing_abbr': 'coppin-state'},
{'boxscore': '2020-01-06-20-texas-arlington',
'away_name': 'South Alabama',
'away_abbr': 'south-alabama',
'away_score': 66,
'away_rank': None,
'home_name': 'Texas-Arlington',
'home_abbr': 'texas-arlington',
'home_score': 54,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'South Alabama',
'winning_abbr': 'south-alabama',
'losing_name': 'Texas-Arlington',
'losing_abbr': 'texas-arlington'},
{'boxscore': '2020-01-06-19-morgan-state',
'away_name': 'South Carolina State',
'away_abbr': 'south-carolina-state',
'away_score': 63,
'away_rank': None,
'home_name': 'Morgan State',
'home_abbr': 'morgan-state',
'home_score': 77,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Morgan State',
'winning_abbr': 'morgan-state',
'losing_name': 'South Carolina State',
'losing_abbr': 'south-carolina-state'},
{'boxscore': '2020-01-06-21-prairie-view',
'away_name': 'Southern',
'away_abbr': 'southern',
'away_score': 54,
'away_rank': None,
'home_name': 'Prairie View',
'home_abbr': 'prairie-view',
'home_score': 64,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': '<NAME>',
'winning_abbr': 'prairie-view',
'losing_name': 'Southern',
'losing_abbr': 'southern'},
{'boxscore': '2020-01-06-20-texas-state',
'away_name': 'Troy',
'away_abbr': 'troy',
'away_score': 71,
'away_rank': None,
'home_name': 'Texas State',
'home_abbr': 'texas-state',
'home_score': 63,
'home_rank': None,
'non_di': False,
'top_25': False,
'winning_name': 'Troy',
'winning_abbr': 'troy',
'losing_name': 'Texas State',
'losing_abbr': 'texas-state'}
]
}
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search_string_representation(self, *args, **kwargs):
result = Boxscores(datetime(2020, 1, 5))
assert result.__repr__() == 'NCAAB games for 1-5-2020'
|
import random
import cv2
import numpy as np
import time
import os
import pymunk
import robolib.modelmanager.downloader as downloader
# ==Win==
pointsToWin = 3
# ==MODEL==
MODEL_FILE = downloader.get_model(downloader.HAARCASCADE_FRONTALFACE_ALT, True)
face_cascades = cv2.CascadeClassifier(MODEL_FILE)
# ==WINDOW==
def nothing(a):
pass
WINDOW_NAME = 'img'
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_KEEPRATIO)
cv2.resizeWindow(WINDOW_NAME, 1000, 800)
cv2.namedWindow('Debug', cv2.WINDOW_KEEPRATIO)
cv2.createTrackbar("MinWidth", 'Debug', 45, 200, nothing)
cv2.createTrackbar("MinHeight", 'Debug', 45, 200, nothing)
cv2.createTrackbar("MaxWidth", 'Debug', 300, 600, nothing)
cv2.createTrackbar("MaxHeight", 'Debug', 300, 600, nothing)
cv2.createTrackbar("MaxSpeed", 'Debug', 400, 1000, nothing)
fullscreen = False
def min_size():
return cv2.getTrackbarPos("MinWidth", 'Debug'), cv2.getTrackbarPos("MinHeight", 'Debug')
def max_size():
return cv2.getTrackbarPos("MaxWidth", 'Debug'), cv2.getTrackbarPos("MaxHeight", 'Debug')
def max_speed():
return cv2.getTrackbarPos("MaxSpeed", 'Debug')
# ==OPEN CV==
cap = cv2.VideoCapture(0)
_, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = []
# minSize = (20, 20)
# maxSize = (150, 150)
# maxSize = (300, 300)
# ==GameStats==
frameCount = 0
paused = True
timeout = 10
lastFaces = [(0, 0), (0, 0)]
# ==Ball Stats==
directY = random.uniform(-0.9, 0.9)
direction = ((1 - directY) ** 0.5, directY)
speed = 300 # Pixel/Sec
ballPos = (img.shape[1] / 2, img.shape[0] / 2)
# ==FPS==
lastLoop = time.time()
def resize(l_tuple, l_new_len):
length = (l_tuple[0]**2+l_tuple[1]**2)**0.5
if length > l_new_len:
normal = (l_tuple[0]/length*l_new_len, l_tuple[1]/length*l_new_len)
else:
normal = l_tuple
return normal
def reset():
global speed
global ballPos
speed = 300
ballBody.position = (width / 2, height / 2)
ballPos = ballBody.position
l_dir = random.randint(0, 1)
if l_dir == 0:
ballBody.velocity = (50, 0)
else:
ballBody.velocity = (-50, 0)
# == Pymunk ==
insets = (80, 20) # Top, Bottom
pymunkSpace = pymunk.Space()
pymunkSpace.gravity = (0.0, 0.0)
mass = 10
radius = 25
inertia = pymunk.moment_for_circle(mass, 0, radius, (0, 0))
ballBody = pymunk.Body(mass, inertia)
ballShape = pymunk.Circle(ballBody, radius, (0, 0))
ballShape.elasticity = 0.95
ballShape.friction = 0.9
pymunkSpace.add(ballBody, ballShape)
faceOneBody = pymunk.Body(body_type=pymunk.Body.KINEMATIC)
faceOneShape = pymunk.Circle(faceOneBody, 50, (0, 0))
faceOneShape.elasticity = 0.8
faceTwoBody = pymunk.Body(body_type=pymunk.Body.KINEMATIC)
faceTwoShape = pymunk.Circle(faceTwoBody, 50, (0, 0))
faceTwoShape.elasticity = 0.8
faceOneBody.position = (0, 0)
faceTwoBody.position = (0, 0)
width = img.shape[1]
height = img.shape[0]
pymunkSpace.add(faceOneBody, faceOneShape)
pymunkSpace.add(faceTwoBody, faceTwoShape)
borderThickness = 100
bottomBody = pymunk.Body(body_type=pymunk.Body.STATIC)
bottomShape = pymunk.Poly(bottomBody, [(0, 0), (0, borderThickness), (width, borderThickness), (width, 0)])
bottomShape.elasticity = 1.0
bottomBody.position = 0, height-insets[1]
pymunkSpace.add(bottomBody, bottomShape)
topBody = pymunk.Body(body_type=pymunk.Body.STATIC)
topShape = pymunk.Poly(topBody, [(0, -borderThickness), (0, 0), (width, 0), (width, -borderThickness)])
topShape.elasticity = 1.0
topBody.position = 0, insets[0]
pymunkSpace.add(topBody, topShape)
leftBody = pymunk.Body(body_type=pymunk.Body.STATIC)
leftShape = pymunk.Poly(leftBody, [(-borderThickness, -borderThickness), (-borderThickness, height+borderThickness),
(0, height+borderThickness), (0, -borderThickness)])
leftShape.elasticity = 1.0
leftBody.position = 0, 0
pymunkSpace.add(leftBody, leftShape)
rightBody = pymunk.Body(body_type=pymunk.Body.STATIC)
rightShape = pymunk.Poly(rightBody, [(0, -borderThickness), (0, height+borderThickness),
(borderThickness, height+borderThickness), (borderThickness, -borderThickness)])
rightShape.elasticity = 1.0
rightBody.position = width, 0
pymunkSpace.add(rightBody, rightShape)
slowdown = 1
pointsLeft = 0
pointsRight = 0
reset()
debug = np.zeros(img.shape)
winTime = 0
shouldDebug = True
def find_one_and_only_face(l_faces):
largest = None
largest_size = 0
for (lx, ly, lw, lh) in l_faces:
if y > largest_size:
largest_size = y
largest = [lx, ly, lw, lh]
print(largest)
return largest
def win():
r = random.uniform(0, 9999999)
d = os.path.dirname(__file__)
filename = os.path.join(d, '/winFaces/Test0.png')
print(filename)
# cv2.imwrite(filename, img)
# == Performance ==
# == Better Faces ==
winPaused = False
while True:
# == Calc FPS
currentTime = time.time()
delta = currentTime-lastLoop
lastLoop = currentTime
fps = 1/delta
# == Read Image ==
_, img = cap.read()
debug = np.zeros(img.shape)
cv2.flip(img, 1, img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
field_size = int(img.shape[1] / 3)
facesLeft, rejectLevelsLeft, levelWeightsLeft = face_cascades.detectMultiScale3(gray[0:img.shape[0], 0:field_size],
1.3, 5, 0, min_size(), max_size(), True)
facesRight, rejectLevelsRight, levelWeightsRight = face_cascades.detectMultiScale3(
gray[0:img.shape[0], 2 * field_size:3 * field_size], 1.3, 5, 0, min_size(), max_size(), True)
if len(facesLeft) != 0 and len(facesRight) != 0:
paused = False
timeout = 10
leftInd = np.argmax(levelWeightsLeft)
rightInd = np.argmax(levelWeightsRight)
faces = [facesLeft[leftInd], facesRight[rightInd]]
faces[1][0] += 2 * field_size
else:
if len(facesLeft) == 0 and timeout == 0:
cv2.rectangle(img, (0, 0), (field_size, img.shape[0]), (0, 0, 255), 5)
if len(facesRight) == 0 and timeout == 0:
cv2.rectangle(img, (2 * field_size, 0), (3 * field_size, img.shape[0]), (0, 0, 255), 5)
if timeout > 0:
timeout -= 1
else:
paused = True
# == Game Loop ==
if not paused and not winPaused:
x1, y1, w1, h1 = faces[0]
x2, y2, w2, h2 = faces[1]
currFaces = [(x1 + w1 / 2, y1 + h1 / 2), (x2 + w2 / 2, y2 + h2 / 2)]
faceVelocities = np.divide(np.subtract(currFaces, lastFaces), max(delta, 0.00001))
lastFaces = currFaces
faceOneBody.velocity = faceVelocities[0]*slowdown
faceTwoBody.velocity = faceVelocities[1]*slowdown
ballBody.velocity = resize(ballBody.velocity, speed)
if delta != 0:
pymunkSpace.step(delta/slowdown)
# Move ball
ballPos = ballBody.position
# Detect goals
if ballPos[0] < 25:
# RESET
pointsRight += 1
reset()
elif ballPos[0] + 25 > width:
# RESET
pointsLeft += 1
reset()
if ballPos[0] < -borderThickness or ballPos[1] < -borderThickness or ballPos[0] > width+borderThickness or \
ballPos[1] > height+borderThickness:
reset()
# Speed increase
if speed < max_speed():
speed = speed * 1.001
# == Detect win ==
if winTime == 0 and (pointsLeft == pointsToWin or pointsRight == pointsToWin):
winTime = time.time()
winPaused = True
win()
if pointsLeft == pointsToWin:
cv2.putText(img, "Spieler links gewinnt!", (int(width / 2) - 200, int(height / 2)), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 255, 0), 2)
elif pointsRight == pointsToWin:
cv2.putText(img, "Spieler rechts gewinnt!", (int(width / 2) - 200, int(height / 2)), cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 255, 0), 2)
# == Reset on win ==
if winTime != 0 and time.time() - winTime > 3:
winPaused = False
pointsLeft = 0
pointsRight = 0
winTime = 0
# == Draw Ball ==
realBallPos = (int(ballPos[0]), int(ballPos[1]))
cv2.circle(img, realBallPos, 20, (0, 0, 255), 5)
cv2.circle(img, realBallPos, 10, (255, 0, 0), 5)
cv2.circle(img, (int(faceOneBody.position.x), int(faceOneBody.position.y)), 50, (255, 0, 0), 3)
cv2.circle(img, (int(faceTwoBody.position.x), int(faceTwoBody.position.y)), 50, (255, 0, 0), 3)
# == Draw Fieldlines ==
cv2.line(img, (int(img.shape[1] / 3), 0), (int(img.shape[1] / 3), img.shape[0]), (0, 0, 0), 2)
cv2.line(img, (int(img.shape[1] / 3 * 2), 0), (int(img.shape[1] / 3 * 2), img.shape[0]), (0, 0, 0), 2)
# cv2.line(img, (0, insets[0]), (width, insets[0]), (0, 0, 0), 2)
# cv2.line(img, (0, height-insets[1]), (width, height-insets[1]), (0, 0, 0), 2)
cv2.rectangle(img, (0, 0), (width, insets[0]), (255, 255, 255), -1)
cv2.rectangle(img, (0, height-insets[1]), (width, height), (255, 255, 255), -1)
# == Debug Data ==
textPos = int(img.shape[1] / 2) - 100
if shouldDebug:
cv2.putText(debug, "Paused: {}".format(paused), (textPos, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.putText(debug, "WinPaused: {}".format(winPaused), (textPos, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.putText(debug, "Timeout: {}".format(timeout), (textPos, 75), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.putText(debug, "Speed: {}".format(int(speed)), (textPos, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.putText(debug, "FPS: {:.2f}".format(fps), (textPos, 125), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
for (x, y, w, h) in faces:
cv2.putText(debug, "W{}H{}".format(w, h), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# == Show Points
pointsPos = textPos + 55
if pointsLeft > 9:
pointsPos -= 25
cv2.putText(img, "{}:{}".format(pointsLeft, pointsRight),
(pointsPos, insets[0]-5), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 0), 2)
cv2.putText(img, "Auf {} Punkte".format(pointsToWin), (0, insets[0]-5), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
# == Update Windows ==
cv2.imshow(WINDOW_NAME, img)
if shouldDebug:
cv2.imshow("Debug", debug)
cv2.putText(debug, "Paused: {}".format(paused), (textPos, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
cv2.putText(debug, "WinPaused: {}".format(winPaused), (textPos, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
cv2.putText(debug, "Timeout: {}".format(timeout), (textPos, 75), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
cv2.putText(debug, "Speed: {}".format(speed), (textPos, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
cv2.putText(debug, "FPS: {:.2f}".format(fps), (textPos, 125), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
for (x, y, w, h) in facesLeft:
cv2.rectangle(debug, (x, y), (x + w, y + h), (0, 0, 0), 2)
for (x, y, w, h) in facesRight:
cv2.rectangle(debug, (x+2 * field_size, y), (x + w + 2 * field_size, y + h), (0, 0, 0), 2)
for (x, y, w, h) in faces:
cv2.putText(debug, "W{}H{}".format(w, h), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
# == Key-Controls ==
k = cv2.waitKey(30) & 0xff
if k == 27:
break
elif k == 49:
pointsLeft += 1
elif k == 50:
pointsRight += 1
elif k == 100:
shouldDebug = not shouldDebug
elif k == 114:
reset()
elif k == 112:
pointsLeft = 0
pointsRight = 0
elif k == 200:
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_NORMAL if fullscreen else cv2.WINDOW_FULLSCREEN)
fullscreen = not fullscreen
elif k == 98:
reset()
pointsRight = 0
pointsLeft = 0
cap.release()
cv2.destroyAllWindows()
|
from dynatrace import Dynatrace
from dynatrace.configuration_v1.oneagent_on_a_host import (
HostConfig,
HostAutoUpdateConfig,
MonitoringConfig,
AutoUpdateSetting,
TechMonitoringList,
EffectiveSetting,
MonitoringMode,
)
from dynatrace.configuration_v1.schemas import UpdateWindowsConfig, UpdateWindow, ConfigurationMetadata, Technology, TechnologyType, SettingScope
HOST_ID = "HOST-abcd123457"
CLUSTER_VERSION = "1.222.47.20210712-162143"
def test_get(dt: Dynatrace):
oa_host_config = dt.oneagents_config_host.get(HOST_ID)
# type checks
assert isinstance(oa_host_config, HostConfig)
assert isinstance(oa_host_config.id, str)
assert isinstance(oa_host_config.auto_update_config, HostAutoUpdateConfig)
assert isinstance(oa_host_config.monitoring_config, MonitoringConfig)
assert isinstance(oa_host_config.tech_monitoring_config_list, TechMonitoringList)
# value checks
assert oa_host_config.id == HOST_ID
def test_get_autoupdate(dt: Dynatrace):
oa_autoupdate = dt.oneagents_config_host.get_autoupdate(HOST_ID)
# type checks
assert isinstance(oa_autoupdate, HostAutoUpdateConfig)
assert isinstance(oa_autoupdate.id, str)
assert isinstance(oa_autoupdate.setting, AutoUpdateSetting)
assert isinstance(oa_autoupdate.version, (str, type(None)))
assert isinstance(oa_autoupdate.effective_setting, EffectiveSetting)
assert isinstance(oa_autoupdate.effective_version, (str, type(None)))
assert isinstance(oa_autoupdate.update_windows, UpdateWindowsConfig)
assert isinstance(oa_autoupdate.metadata, ConfigurationMetadata)
assert all(isinstance(uw, UpdateWindow) for uw in oa_autoupdate.update_windows.windows)
# value checks
assert oa_autoupdate.id == HOST_ID
assert oa_autoupdate.setting == AutoUpdateSetting.DISABLED
assert oa_autoupdate.version is None
assert oa_autoupdate.effective_setting == EffectiveSetting.DISABLED
assert oa_autoupdate.effective_version is None
assert oa_autoupdate.update_windows.windows == []
assert oa_autoupdate.metadata.cluster_version == CLUSTER_VERSION
def test_get_monitoring(dt: Dynatrace):
oa_monitoring = dt.oneagents_config_host.get_monitoring(HOST_ID)
# type checks
assert isinstance(oa_monitoring, MonitoringConfig)
assert isinstance(oa_monitoring.id, str)
assert isinstance(oa_monitoring.monitoring_enabled, bool)
assert isinstance(oa_monitoring.monitoring_mode, MonitoringMode)
assert isinstance(oa_monitoring.metadata, ConfigurationMetadata)
# value checks
assert oa_monitoring.id == HOST_ID
assert oa_monitoring.monitoring_enabled == True
assert oa_monitoring.monitoring_mode == MonitoringMode.FULL_STACK
assert oa_monitoring.metadata.cluster_version == CLUSTER_VERSION
def test_get_technologies(dt: Dynatrace):
oa_technologies = dt.oneagents_config_host.get_technologies(HOST_ID)
# type checks
assert isinstance(oa_technologies.metadata, ConfigurationMetadata)
assert isinstance(oa_technologies, TechMonitoringList)
assert all(isinstance(t, Technology) for t in oa_technologies.technologies)
for tech in oa_technologies.technologies:
assert isinstance(tech.type, TechnologyType)
assert isinstance(tech.monitoring_enabled, bool)
assert isinstance(tech.scope, (SettingScope, type(None)))
# value checks
assert len(oa_technologies.technologies) == 4
assert oa_technologies.technologies[0].type == TechnologyType.LOG_ANALYTICS
assert oa_technologies.technologies[0].monitoring_enabled == True
assert oa_technologies.technologies[0].scope == SettingScope.ENVIRONMENT
assert oa_technologies.metadata.cluster_version == CLUSTER_VERSION
|
import json
import copy
import requests
from django.core.management.base import BaseCommand
# Use "./manage.py help food_json" for help on how to use this script!
# Please update this on next use:
# (it shows the command we last used)
#
# ./manage.py food_json 1066 -ec 3977 3758 283 163 3 48 135 247
# -ep 2779663 2779666 2779667 2779668 2779669 2779650
# Which output
# Excluded categories 3977 - Super Bowl, 3758 - Chokolade & Slik, 283 - Chips, 163 - Snacks, 3 - Desserter,
# 48 - Vine, 135 - Spiritus -min. 18 år, 247 - Diverse
# Excluded products 2779663 - Red Bull, 2779666 - Øl (Carlsberg), 2779667 - Øl (Heineken), 2779668 - Øl (Tuborg),
# 2779669 - Husets vin, 2779650 - Chilisauce - License to Eat
CHROME_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'
JUST_EAT_MENU_URL = 'https://www.just-eat.dk/menu/getproductsformenu?'
HELP = """
Loads food data from just eat
Needs a menuId, find it by going to a menu page on just-eat.dk and opening the developer console and executing:
JustEatData.MenuId
To find category and product ids execute:
document.querySelectorAll('.menuCard-category').forEach(function (c) {console.log(c.querySelector('h3').textContent.trim(), c.dataset.categoryId);c.querySelectorAll('.menu-product').forEach(function (p) {console.log(' ', p.querySelector('h4').textContent.trim(), p.dataset.productId);})})
"""
class Command(BaseCommand):
help = HELP
menu = None
def add_arguments(self, parser):
parser.add_argument('menuId', nargs=1, type=str)
parser.add_argument('--exclude-categories', '-ec', nargs='+', type=int, dest='categories')
parser.add_argument('--exclude-products', '-ep', nargs='+', type=int, dest='products')
def del_product(self, product):
for i, item in enumerate(self.menu['products']):
if item.get('Id', 0) == product['Id']:
self.menu['products'][i] = {}
break
def get_product_name(self, product):
actual_product = next((item for item in self.menu['products'] if item.get('Id', 0) == product['Id']), None)
name = actual_product['Name']
if actual_product['Syn']:
name += ' ({})'.format(actual_product['Syn'])
return name
def handle(self, *args, **options):
r = requests.get(JUST_EAT_MENU_URL,
params={'menuId': options['menuId']},
headers={'User-Agent': CHROME_USER_AGENT})
print(r.status_code)
if r.status_code != 200:
print("Error")
print(r.text)
self.menu = r.json()['Menu']
excluded_categories = []
excluded_products = []
for i, category in enumerate(copy.deepcopy(self.menu)['Categories']):
if category['Id'] in options['categories']:
excluded_categories.append('{} - {}'.format(category['Id'], category['Name']))
self.menu['Categories'][i] = {}
for j, item in enumerate(category['Items']):
for k, product in enumerate(item['Products']):
if product['Id'] in options['products']:
excluded_products.append('{} - {}'.format(product['Id'], self.get_product_name(product)))
self.menu['Categories'][i]['Items'][j]['Products'][k] = {}
if product['Id'] in options['products'] or category['Id'] in options['categories']:
self.del_product(product)
# Clean out descriptions
for i, product in enumerate(self.menu['products']):
if product:
self.menu['products'][i]['Desc'] = ''
print('Excluded categories', ', '.join(excluded_categories))
print('Excluded products', ', '.join(excluded_products))
with open('main/static/main/food.json', 'w') as f:
json.dump({'Menu': self.menu}, f, separators=(',', ':'), indent=None)
|
"""Support for Pollen.com allergen and cold/flu sensors."""
from datetime import timedelta
import logging
from statistics import mean
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, ATTR_STATE, CONF_MONITORED_CONDITIONS)
from homeassistant.helpers import aiohttp_client
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTR_ALLERGEN_AMOUNT = 'allergen_amount'
ATTR_ALLERGEN_GENUS = 'allergen_genus'
ATTR_ALLERGEN_NAME = 'allergen_name'
ATTR_ALLERGEN_TYPE = 'allergen_type'
ATTR_CITY = 'city'
ATTR_OUTLOOK = 'outlook'
ATTR_RATING = 'rating'
ATTR_SEASON = 'season'
ATTR_TREND = 'trend'
ATTR_ZIP_CODE = 'zip_code'
CONF_ZIP_CODE = 'zip_code'
DEFAULT_ATTRIBUTION = 'Data provided by IQVIA™'
DEFAULT_SCAN_INTERVAL = timedelta(minutes=30)
TYPE_ALLERGY_FORECAST = 'allergy_average_forecasted'
TYPE_ALLERGY_HISTORIC = 'allergy_average_historical'
TYPE_ALLERGY_INDEX = 'allergy_index'
TYPE_ALLERGY_OUTLOOK = 'allergy_outlook'
TYPE_ALLERGY_TODAY = 'allergy_index_today'
TYPE_ALLERGY_TOMORROW = 'allergy_index_tomorrow'
TYPE_ALLERGY_YESTERDAY = 'allergy_index_yesterday'
TYPE_ASTHMA_FORECAST = 'asthma_average_forecasted'
TYPE_ASTHMA_HISTORIC = 'asthma_average_historical'
TYPE_ASTHMA_INDEX = 'asthma_index'
TYPE_ASTHMA_TODAY = 'asthma_index_today'
TYPE_ASTHMA_TOMORROW = 'asthma_index_tomorrow'
TYPE_ASTHMA_YESTERDAY = 'asthma_index_yesterday'
TYPE_DISEASE_FORECAST = 'disease_average_forecasted'
SENSORS = {
TYPE_ALLERGY_FORECAST: (
'ForecastSensor', 'Allergy Index: Forecasted Average', 'mdi:flower'),
TYPE_ALLERGY_HISTORIC: (
'HistoricalSensor', 'Allergy Index: Historical Average', 'mdi:flower'),
TYPE_ALLERGY_TODAY: ('IndexSensor', 'Allergy Index: Today', 'mdi:flower'),
TYPE_ALLERGY_TOMORROW: (
'IndexSensor', 'Allergy Index: Tomorrow', 'mdi:flower'),
TYPE_ALLERGY_YESTERDAY: (
'IndexSensor', 'Allergy Index: Yesterday', 'mdi:flower'),
TYPE_ASTHMA_TODAY: ('IndexSensor', 'Asthma Index: Today', 'mdi:flower'),
TYPE_ASTHMA_TOMORROW: (
'IndexSensor', 'Asthma Index: Tomorrow', 'mdi:flower'),
TYPE_ASTHMA_YESTERDAY: (
'IndexSensor', 'Asthma Index: Yesterday', 'mdi:flower'),
TYPE_ASTHMA_FORECAST: (
'ForecastSensor', 'Asthma Index: Forecasted Average', 'mdi:flower'),
TYPE_ASTHMA_HISTORIC: (
'HistoricalSensor', 'Asthma Index: Historical Average', 'mdi:flower'),
TYPE_DISEASE_FORECAST: (
'ForecastSensor', 'Cold & Flu: Forecasted Average', 'mdi:snowflake')
}
RATING_MAPPING = [{
'label': 'Low',
'minimum': 0.0,
'maximum': 2.4
}, {
'label': 'Low/Medium',
'minimum': 2.5,
'maximum': 4.8
}, {
'label': 'Medium',
'minimum': 4.9,
'maximum': 7.2
}, {
'label': 'Medium/High',
'minimum': 7.3,
'maximum': 9.6
}, {
'label': 'High',
'minimum': 9.7,
'maximum': 12
}]
TREND_INCREASING = 'Increasing'
TREND_SUBSIDING = 'Subsiding'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ZIP_CODE):
str,
vol.Required(CONF_MONITORED_CONDITIONS, default=list(SENSORS)):
vol.All(cv.ensure_list, [vol.In(SENSORS)])
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Configure the platform and add the sensors."""
from pypollencom import Client
websession = aiohttp_client.async_get_clientsession(hass)
pollen = PollenComData(
Client(config[CONF_ZIP_CODE], websession),
config[CONF_MONITORED_CONDITIONS])
await pollen.async_update()
sensors = []
for kind in config[CONF_MONITORED_CONDITIONS]:
sensor_class, name, icon = SENSORS[kind]
sensors.append(
globals()[sensor_class](
pollen, kind, name, icon, config[CONF_ZIP_CODE]))
async_add_entities(sensors, True)
def calculate_average_rating(indices):
"""Calculate the human-friendly historical allergy average."""
ratings = list(
r['label'] for n in indices for r in RATING_MAPPING
if r['minimum'] <= n <= r['maximum'])
return max(set(ratings), key=ratings.count)
def calculate_trend(indices):
"""Calculate the "moving average" of a set of indices."""
import numpy as np
def moving_average(data, samples):
"""Determine the "moving average" (http://tinyurl.com/yaereb3c)."""
ret = np.cumsum(data, dtype=float)
ret[samples:] = ret[samples:] - ret[:-samples]
return ret[samples - 1:] / samples
increasing = np.all(np.diff(moving_average(np.array(indices), 4)) > 0)
if increasing:
return TREND_INCREASING
return TREND_SUBSIDING
class BaseSensor(Entity):
"""Define a base Pollen.com sensor."""
def __init__(self, pollen, kind, name, icon, zip_code):
"""Initialize the sensor."""
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._icon = icon
self._kind = kind
self._name = name
self._state = None
self._zip_code = zip_code
self.pollen = pollen
@property
def available(self):
"""Return True if entity is available."""
if self._kind in (TYPE_ALLERGY_TODAY, TYPE_ALLERGY_TOMORROW,
TYPE_ALLERGY_YESTERDAY):
return bool(self.pollen.data[TYPE_ALLERGY_INDEX])
if self._kind in (TYPE_ASTHMA_TODAY, TYPE_ASTHMA_TOMORROW,
TYPE_ASTHMA_YESTERDAY):
return bool(self.pollen.data[TYPE_ASTHMA_INDEX])
return bool(self.pollen.data[self._kind])
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
def name(self):
"""Return the name."""
return self._name
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, HASS-friendly identifier for this entity."""
return '{0}_{1}'.format(self._zip_code, self._kind)
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return 'index'
class ForecastSensor(BaseSensor):
"""Define sensor related to forecast data."""
async def async_update(self):
"""Update the sensor."""
await self.pollen.async_update()
if not self.pollen.data:
return
data = self.pollen.data[self._kind].get('Location')
if not data:
return
indices = [p['Index'] for p in data['periods']]
average = round(mean(indices), 1)
[rating] = [
i['label'] for i in RATING_MAPPING
if i['minimum'] <= average <= i['maximum']
]
self._attrs.update({
ATTR_CITY: data['City'].title(),
ATTR_RATING: rating,
ATTR_STATE: data['State'],
ATTR_TREND: calculate_trend(indices),
ATTR_ZIP_CODE: data['ZIP']
})
if self._kind == TYPE_ALLERGY_FORECAST:
outlook = self.pollen.data[TYPE_ALLERGY_OUTLOOK]
self._attrs[ATTR_OUTLOOK] = outlook.get('Outlook')
self._attrs[ATTR_SEASON] = outlook.get('Season')
self._state = average
class HistoricalSensor(BaseSensor):
"""Define sensor related to historical data."""
async def async_update(self):
"""Update the sensor."""
await self.pollen.async_update()
if not self.pollen.data:
return
data = self.pollen.data[self._kind].get('Location')
if not data:
return
indices = [p['Index'] for p in data['periods']]
average = round(mean(indices), 1)
self._attrs.update({
ATTR_CITY: data['City'].title(),
ATTR_RATING: calculate_average_rating(indices),
ATTR_STATE: data['State'],
ATTR_TREND: calculate_trend(indices),
ATTR_ZIP_CODE: data['ZIP']
})
self._state = average
class IndexSensor(BaseSensor):
"""Define sensor related to indices."""
async def async_update(self):
"""Update the sensor."""
await self.pollen.async_update()
if not self.pollen.data:
return
data = {}
if self._kind in (TYPE_ALLERGY_TODAY, TYPE_ALLERGY_TOMORROW,
TYPE_ALLERGY_YESTERDAY):
data = self.pollen.data[TYPE_ALLERGY_INDEX].get('Location')
elif self._kind in (TYPE_ASTHMA_TODAY, TYPE_ASTHMA_TOMORROW,
TYPE_ASTHMA_YESTERDAY):
data = self.pollen.data[TYPE_ASTHMA_INDEX].get('Location')
if not data:
return
key = self._kind.split('_')[-1].title()
[period] = [p for p in data['periods'] if p['Type'] == key]
[rating] = [
i['label'] for i in RATING_MAPPING
if i['minimum'] <= period['Index'] <= i['maximum']
]
self._attrs.update({
ATTR_CITY: data['City'].title(),
ATTR_RATING: rating,
ATTR_STATE: data['State'],
ATTR_ZIP_CODE: data['ZIP']
})
if self._kind in (TYPE_ALLERGY_TODAY, TYPE_ALLERGY_TOMORROW,
TYPE_ALLERGY_YESTERDAY):
for idx, attrs in enumerate(period['Triggers']):
index = idx + 1
self._attrs.update({
'{0}_{1}'.format(ATTR_ALLERGEN_GENUS, index):
attrs['Genus'],
'{0}_{1}'.format(ATTR_ALLERGEN_NAME, index):
attrs['Name'],
'{0}_{1}'.format(ATTR_ALLERGEN_TYPE, index):
attrs['PlantType'],
})
elif self._kind in (TYPE_ASTHMA_TODAY, TYPE_ASTHMA_TOMORROW,
TYPE_ASTHMA_YESTERDAY):
for idx, attrs in enumerate(period['Triggers']):
index = idx + 1
self._attrs.update({
'{0}_{1}'.format(ATTR_ALLERGEN_NAME, index):
attrs['Name'],
'{0}_{1}'.format(ATTR_ALLERGEN_AMOUNT, index):
attrs['PPM'],
})
self._state = period['Index']
class PollenComData:
"""Define a data object to retrieve info from Pollen.com."""
def __init__(self, client, sensor_types):
"""Initialize."""
self._client = client
self._sensor_types = sensor_types
self.data = {}
async def _get_data(self, method, key):
"""Return API data from a specific call."""
from pypollencom.errors import PollenComError
try:
data = await method()
self.data[key] = data
except PollenComError as err:
_LOGGER.error('Unable to get "%s" data: %s', key, err)
self.data[key] = {}
@Throttle(DEFAULT_SCAN_INTERVAL)
async def async_update(self):
"""Update Pollen.com data."""
from pypollencom.errors import InvalidZipError
# Pollen.com requires a bit more complicated error handling, given that
# it sometimes has parts (but not the whole thing) go down:
#
# 1. If `InvalidZipError` is thrown, quit everything immediately.
# 2. If an individual request throws any other error, try the others.
try:
if TYPE_ALLERGY_FORECAST in self._sensor_types:
await self._get_data(
self._client.allergens.extended, TYPE_ALLERGY_FORECAST)
await self._get_data(
self._client.allergens.outlook, TYPE_ALLERGY_OUTLOOK)
if TYPE_ALLERGY_HISTORIC in self._sensor_types:
await self._get_data(
self._client.allergens.historic, TYPE_ALLERGY_HISTORIC)
if any(s in self._sensor_types
for s in [TYPE_ALLERGY_TODAY, TYPE_ALLERGY_TOMORROW,
TYPE_ALLERGY_YESTERDAY]):
await self._get_data(
self._client.allergens.current, TYPE_ALLERGY_INDEX)
if TYPE_ASTHMA_FORECAST in self._sensor_types:
await self._get_data(
self._client.asthma.extended, TYPE_ASTHMA_FORECAST)
if TYPE_ASTHMA_HISTORIC in self._sensor_types:
await self._get_data(
self._client.asthma.historic, TYPE_ASTHMA_HISTORIC)
if any(s in self._sensor_types
for s in [TYPE_ASTHMA_TODAY, TYPE_ASTHMA_TOMORROW,
TYPE_ASTHMA_YESTERDAY]):
await self._get_data(
self._client.asthma.current, TYPE_ASTHMA_INDEX)
if TYPE_DISEASE_FORECAST in self._sensor_types:
await self._get_data(
self._client.disease.extended, TYPE_DISEASE_FORECAST)
_LOGGER.debug("New data retrieved: %s", self.data)
except InvalidZipError:
_LOGGER.error(
"Cannot retrieve data for ZIP code: %s", self._client.zip_code)
self.data = {}
|
import ast
import re
from ate import utils
from ate.exception import ParamsError
variable_regexp = r"\$([\w_]+)"
function_regexp = r"\$\{[\w_]+\([\$\w_ =,]*\)\}"
function_regexp_compile = re.compile(r"^\$\{([\w_]+)\(([\$\w_ =,]*)\)\}$")
def extract_variables(content):
""" extract all variable names from content, which is in format $variable
@param (str) content
@return (list) variable name list
e.g. $variable => ["variable"]
/blog/$postid => ["postid"]
/$var1/$var2 => ["var1", "var2"]
abc => []
"""
try:
return re.findall(variable_regexp, content)
except TypeError:
return []
def extract_functions(content):
""" extract all functions from string content, which are in format ${fun()}
@param (str) content
@return (list) functions list
e.g. ${func(5)} => ["${func(5)}"]
${func(a=1, b=2)} => ["${func(a=1, b=2)}"]
/api/1000?_t=${get_timestamp()} => ["get_timestamp()"]
/api/${add(1, 2)} => ["add(1, 2)"]
"/api/${add(1, 2)}?_t=${get_timestamp()}" => ["${add(1, 2)}", "${get_timestamp()}"]
"""
try:
return re.findall(function_regexp, content)
except TypeError:
return []
def parse_string_value(str_value):
""" parse string to number if possible
e.g. "123" => 123
"12.2" => 12.3
"abc" => "abc"
"$var" => "$var"
"""
try:
return ast.literal_eval(str_value)
except ValueError:
return str_value
except SyntaxError:
# e.g. $var, ${func}
return str_value
def parse_function(content):
""" parse function name and args from string content.
@param (str) content
@return (dict) function name and args
e.g. ${func()} => {'func_name': 'func', 'args': [], 'kwargs': {}}
${func(5)} => {'func_name': 'func', 'args': [5], 'kwargs': {}}
${func(1, 2)} => {'func_name': 'func', 'args': [1, 2], 'kwargs': {}}
${func(a=1, b=2)} => {'func_name': 'func', 'args': [], 'kwargs': {'a': 1, 'b': 2}}
${func(1, 2, a=3, b=4)} => {'func_name': 'func', 'args': [1, 2], 'kwargs': {'a':3, 'b':4}}
"""
function_meta = {
"args": [],
"kwargs": {}
}
matched = function_regexp_compile.match(content)
function_meta["func_name"] = matched.group(1)
args_str = matched.group(2).replace(" ", "")
if args_str == "":
return function_meta
args_list = args_str.split(',')
for arg in args_list:
if '=' in arg:
key, value = arg.split('=')
function_meta["kwargs"][key] = parse_string_value(value)
else:
function_meta["args"].append(parse_string_value(arg))
return function_meta
def eval_content_variables(content, variable_mapping):
""" replace all variables of string content with mapping value.
@param (str) content
@return (str) parsed content
e.g.
variable_mapping = {
"var_1": "abc",
"var_2": "def"
}
$var_1 => "abc"
$var_1#XYZ => "abc#XYZ"
/$var_1/$var_2/var3 => "/abc/def/var3"
${func($var_1, $var_2, xyz)} => "${func(abc, def, xyz)}"
"""
variables_list = extract_variables(content)
for variable_name in variables_list:
if variable_name not in variable_mapping:
raise ParamsError(
"%s is not defined in bind variables!" % variable_name)
variable_value = variable_mapping.get(variable_name)
if "${}".format(variable_name) == content:
# content is a variable
content = variable_value
else:
# content contains one or many variables
content = content.replace(
"${}".format(variable_name),
str(variable_value), 1
)
return content
class TestcaseParser(object):
def __init__(self, variables_binds={}, functions_binds={}):
self.bind_variables(variables_binds)
self.bind_functions(functions_binds)
def bind_variables(self, variables_binds):
""" bind variables to current testcase parser
@param (dict) variables_binds, variables binds mapping
{
"authorization": "a83de0ff8d2e896dbd8efb81ba14e17d",
"random": "A2dEx",
"data": {"name": "user", "password": "<PASSWORD>"},
"uuid": 1000
}
"""
self.variables_binds = variables_binds
def bind_functions(self, functions_binds):
""" bind functions to current testcase parser
@param (dict) functions_binds, functions binds mapping
{
"add_two_nums": lambda a, b=1: a + b
}
"""
self.functions_binds = functions_binds
def eval_content_functions(self, content):
functions_list = extract_functions(content)
for func_content in functions_list:
function_meta = parse_function(func_content)
func_name = function_meta['func_name']
func = self.functions_binds.get(func_name)
if func is None:
raise ParamsError(
"%s is not defined in bind functions!" % func_name)
args = function_meta.get('args', [])
kwargs = function_meta.get('kwargs', {})
args = self.parse_content_with_bindings(args)
kwargs = self.parse_content_with_bindings(kwargs)
eval_value = func(*args, **kwargs)
if func_content == content:
# content is a variable
content = eval_value
else:
# content contains one or many variables
content = content.replace(
func_content,
str(eval_value), 1
)
return content
def parse_content_with_bindings(self, content):
""" parse content recursively, each variable and function in content will be evaluated.
@param (dict) content in any data structure
{
"url": "http://127.0.0.1:5000/api/users/$uid/${add_two_nums(1, 1)}",
"method": "POST",
"headers": {
"Content-Type": "application/json",
"authorization": "$authorization",
"random": "$random",
"sum": "${add_two_nums(1, 2)}"
},
"body": "$data"
}
@return (dict) parsed content with evaluated bind values
{
"url": "http://127.0.0.1:5000/api/users/1000/2",
"method": "POST",
"headers": {
"Content-Type": "application/json",
"authorization": "a83de0ff8d2e896dbd8efb81ba14e17d",
"random": "A2dEx",
"sum": 3
},
"body": {"name": "user", "password": "<PASSWORD>"}
}
"""
if isinstance(content, (list, tuple)):
return [
self.parse_content_with_bindings(item)
for item in content
]
if isinstance(content, dict):
evaluated_data = {}
for key, value in content.items():
eval_key = self.parse_content_with_bindings(key)
eval_value = self.parse_content_with_bindings(value)
evaluated_data[eval_key] = eval_value
return evaluated_data
if isinstance(content, (int, float)):
return content
# content is in string format here
content = "" if content is None else content.strip()
# replace functions with evaluated value
# Notice: eval_content_functions must be called before eval_content_variables
content = self.eval_content_functions(content)
# replace variables with binding value
content = eval_content_variables(content, self.variables_binds)
return content
|
'''
Tests for ph5api
'''
import unittest
from ph5.core import ph5api
class TestPH5API(unittest.TestCase):
def setUp(self):
self.ph5API_object = ph5api.PH5(path='ph5/test_data/ph5',
nickname='master.ph5')
def test_load_ph5(self):
"""
Tries to load the PH5 test file.
Checks if it is an instance of ph5.core.ph5api.PH5
"""
self.assertTrue(isinstance(self.ph5API_object, ph5api.PH5))
self.assertTrue(self.ph5API_object.ph5.isopen)
def test_experiment_t(self):
"""
check reading of experiment table
"""
# experiment table is initally empty
self.assertIsNone(self.ph5API_object.Experiment_t)
# load experiment table and it shouldn't be empty
self.ph5API_object.read_experiment_t()
self.assertIsNotNone(self.ph5API_object.Experiment_t)
# keys should match
keys = ['experiment_id_s', 'net_code_s', 'nickname_s', 'longname_s',
'PIs_s', 'institutions_s', 'north_west_corner/X/value_d',
'north_west_corner/X/units_s', 'north_west_corner/Y/value_d',
'north_west_corner/Y/units_s', 'north_west_corner/Z/value_d',
'north_west_corner/Z/units_s',
'north_west_corner/coordinate_system_s',
'north_west_corner/projection_s',
'north_west_corner/ellipsoid_s',
'north_west_corner/description_s',
'south_east_corner/X/value_d', 'south_east_corner/X/units_s',
'south_east_corner/Y/value_d', 'south_east_corner/Y/units_s',
'south_east_corner/Z/value_d', 'south_east_corner/Z/units_s',
'south_east_corner/coordinate_system_s',
'south_east_corner/projection_s',
'south_east_corner/ellipsoid_s',
'south_east_corner/description_s', 'summary_paragraph_s',
'time_stamp/ascii_s', 'time_stamp/epoch_l',
'time_stamp/micro_seconds_i', 'time_stamp/type_s']
self.assertEqual(keys, self.ph5API_object.Experiment_t['keys'])
# expect only one row in experiment table
self.assertEqual(1, len(self.ph5API_object.Experiment_t['rows']))
# make sure experiment table matches what we think it should
experiment_t = self.ph5API_object.Experiment_t['rows']
experiment_t[0]['net_code_s']
self.assertEqual(experiment_t[0]['net_code_s'], 'AA')
self.assertEqual(experiment_t[0]['experiment_id_s'], '99-999')
self.assertEqual(experiment_t[0]['nickname_s'], 'PH5 Test')
self.assertEqual(experiment_t[0]['longname_s'], 'PH5 TEST SET')
self.assertEqual(experiment_t[0]['PIs_s'], '<NAME>')
self.assertEqual(experiment_t[0]['institutions_s'], 'PASSCAL')
def test_array_t(self):
# make sure array_t_names is empty then load them
# and make sure they match expected array names
self.assertFalse(self.ph5API_object.Array_t_names)
self.ph5API_object.read_array_t_names()
self.assertTrue(self.ph5API_object.Array_t_names)
self.assertTrue('Array_t_001' in self.ph5API_object.Array_t_names)
self.assertEqual([1, 2, 3],
self.ph5API_object.channels_Array_t('Array_t_001'))
self.assertTrue('Array_t_002' in self.ph5API_object.Array_t_names)
self.assertEqual([1],
self.ph5API_object.channels_Array_t('Array_t_002'))
self.assertTrue('Array_t_003' in self.ph5API_object.Array_t_names)
self.assertEqual([1],
self.ph5API_object.channels_Array_t('Array_t_003'))
self.assertTrue('Array_t_004' in self.ph5API_object.Array_t_names)
self.assertEqual([-2],
self.ph5API_object.channels_Array_t('Array_t_004'))
self.assertTrue('Array_t_008' in self.ph5API_object.Array_t_names)
self.assertEqual([1, 2, 3],
self.ph5API_object.channels_Array_t('Array_t_008'))
self.assertTrue('Array_t_009' in self.ph5API_object.Array_t_names)
self.assertEqual([1],
self.ph5API_object.channels_Array_t('Array_t_009'))
# now test to see if certain value are in each array as expected
# checking 3rd channel of array 1... auto generated by segdtoph5
self.assertFalse(hasattr(self.ph5API_object.Array_t, 'Array_t_001'))
self.ph5API_object.read_array_t('Array_t_001')
# check keys
keys = ['id_s', 'location/X/value_d', 'location/X/units_s',
'location/Y/value_d', 'location/Y/units_s',
'location/Z/value_d', 'location/Z/units_s',
'location/coordinate_system_s', 'location/projection_s',
'location/ellipsoid_s', 'location/description_s',
'deploy_time/ascii_s', 'deploy_time/epoch_l',
'deploy_time/micro_seconds_i', 'deploy_time/type_s',
'pickup_time/ascii_s', 'pickup_time/epoch_l',
'pickup_time/micro_seconds_i', 'pickup_time/type_s',
'das/serial_number_s', 'das/model_s', 'das/manufacturer_s',
'das/notes_s', 'sensor/serial_number_s', 'sensor/model_s',
'sensor/manufacturer_s', 'sensor/notes_s', 'description_s',
'seed_band_code_s', 'sample_rate_i',
'sample_rate_multiplier_i', 'seed_instrument_code_s',
'seed_orientation_code_s', 'seed_location_code_s',
'seed_station_name_s', 'channel_number_i',
'receiver_table_n_i', 'response_table_n_i']
self.assertEqual(keys,
self.ph5API_object.Array_t['Array_t_001']['keys'])
self.assertEqual([1, 2, 3],
self.ph5API_object.channels('Array_t_001', '500'))
channel = self.ph5API_object.Array_t['Array_t_001']['byid']\
.get('500').get(3)
self.assertEqual(channel[0]['id_s'], '500')
self.assertEqual(channel[0]['seed_station_name_s'], '500')
self.assertEqual(channel[0]['das/serial_number_s'], '3X500')
self.assertEqual(channel[0]['location/X/value_d'], -105.405489539)
self.assertEqual(channel[0]['location/Y/value_d'], 47.6790599342)
self.assertEqual(channel[0]['location/Z/value_d'], 0.0)
self.assertEqual(channel[0]['das/model_s'], 'ZLAND 3C')
self.assertEqual(channel[0]['sample_rate_i'], 500)
self.assertEqual(channel[0]['sample_rate_multiplier_i'], 1)
self.assertEqual(channel[0]['seed_band_code_s'], 'D')
self.assertEqual(channel[0]['seed_instrument_code_s'], 'P')
self.assertEqual(channel[0]['seed_orientation_code_s'], 'Z')
self.assertEqual(channel[0]['receiver_table_n_i'], 0)
self.assertEqual(channel[0]['response_table_n_i'], 7)
self.assertEqual(channel[0]['channel_number_i'], 3)
# checking 1st channel of array 2... auto generated by obsoytoph5
self.assertFalse(hasattr(self.ph5API_object.Array_t, 'Array_t_002'))
self.assertEqual([1],
self.ph5API_object.channels('Array_t_002', '0407'))
self.ph5API_object.read_array_t('Array_t_002')
channel = self.ph5API_object.Array_t['Array_t_002']['byid'] \
.get('0407').get(1)
self.assertEqual(channel[0]['id_s'], '0407')
self.assertEqual(channel[0]['seed_station_name_s'], '0407')
self.assertEqual(channel[0]['das/serial_number_s'], '5553')
self.assertEqual(channel[0]['location/X/value_d'], 94.88888)
self.assertEqual(channel[0]['location/Y/value_d'], 73.77777)
self.assertEqual(channel[0]['location/Z/value_d'], 90.0)
self.assertEqual(channel[0]['sample_rate_i'], 200)
self.assertEqual(channel[0]['sample_rate_multiplier_i'], 1)
self.assertEqual(channel[0]['seed_band_code_s'], 'H')
self.assertEqual(channel[0]['seed_instrument_code_s'], 'H')
self.assertEqual(channel[0]['seed_orientation_code_s'], 'N')
self.assertEqual(channel[0]['receiver_table_n_i'], 1)
self.assertEqual(channel[0]['response_table_n_i'], 5)
self.assertEqual(channel[0]['channel_number_i'], 1)
# checking 1st channel of array 4... auto generated by obsoytoph5
# LOG so channel -2
self.assertFalse(hasattr(self.ph5API_object.Array_t, 'Array_t_004'))
self.assertEqual([-2],
self.ph5API_object.channels('Array_t_004', '0407'))
self.ph5API_object.read_array_t('Array_t_004')
channel = self.ph5API_object.Array_t['Array_t_004']['byid'] \
.get('0407').get(-2)
self.assertEqual(channel[0]['id_s'], '0407')
self.assertEqual(channel[0]['seed_station_name_s'], '0407')
self.assertEqual(channel[0]['das/serial_number_s'], '5553')
self.assertEqual(channel[0]['location/X/value_d'], 94.88888)
self.assertEqual(channel[0]['location/Y/value_d'], 73.77777)
self.assertEqual(channel[0]['location/Z/value_d'], 90.0)
self.assertEqual(channel[0]['sample_rate_i'], 0)
self.assertEqual(channel[0]['sample_rate_multiplier_i'], 1)
self.assertEqual(channel[0]['seed_band_code_s'], 'L')
self.assertEqual(channel[0]['seed_instrument_code_s'], 'O')
self.assertEqual(channel[0]['seed_orientation_code_s'], 'G')
self.assertEqual(channel[0]['receiver_table_n_i'], 1)
self.assertEqual(channel[0]['response_table_n_i'], -1)
self.assertEqual(channel[0]['channel_number_i'], -2)
# checking 2nd channel of array 8... hand created loaded by keftoph5
self.assertFalse(hasattr(self.ph5API_object.Array_t, 'Array_t_008'))
self.assertEqual([1, 2, 3],
self.ph5API_object.channels('Array_t_008', '8001'))
self.ph5API_object.read_array_t('Array_t_008')
channel = self.ph5API_object.Array_t['Array_t_008']['byid'] \
.get('8001').get(2)
self.assertEqual(channel[0]['id_s'], '8001')
self.assertEqual(channel[0]['seed_station_name_s'], '8001')
self.assertEqual(channel[0]['das/serial_number_s'], '9EEF')
self.assertEqual(channel[0]['location/X/value_d'], -106.916169)
self.assertEqual(channel[0]['location/Y/value_d'], 34.154673)
self.assertEqual(channel[0]['location/Z/value_d'], 1403.0)
self.assertEqual(channel[0]['sample_rate_i'], 100)
self.assertEqual(channel[0]['sample_rate_multiplier_i'], 1)
self.assertEqual(channel[0]['seed_band_code_s'], 'H')
self.assertEqual(channel[0]['seed_instrument_code_s'], 'L')
self.assertEqual(channel[0]['seed_orientation_code_s'], '1')
self.assertEqual(channel[0]['receiver_table_n_i'], 1)
self.assertEqual(channel[0]['response_table_n_i'], 7)
self.assertEqual(channel[0]['channel_number_i'], 2)
# array shouldn't have station 0407
self.assertEqual([],
self.ph5API_object.channels('Array_t_008', '0407'))
# array 9 should have channels
self.assertNotEqual([],
self.ph5API_object.channels('Array_t_009',
'9001'))
# array 1 should have channels
self.assertNotEqual([],
self.ph5API_object.channels_Array_t(
'Array_t_001'))
# no array 0
self.assertFalse('Array_t_000' in self.ph5API_object.Array_t_names)
# no array 5
self.assertFalse('Array_t_005' in self.ph5API_object.Array_t_names)
def test_index_t(self):
"""
Test reading of index table and checking contents
"""
# able to read index table
self.assertFalse(self.ph5API_object.Index_t)
self.ph5API_object.read_index_t()
self.assertTrue(self.ph5API_object.Index_t)
# it contains keys adn they are correct
self.assertTrue(self.ph5API_object.Index_t['keys'])
keys = ['end_time/ascii_s', 'end_time/epoch_l',
'end_time/micro_seconds_i', 'end_time/type_s',
'external_file_name_s', 'hdf5_path_s', 'serial_number_s',
'start_time/ascii_s', 'start_time/epoch_l',
'start_time/micro_seconds_i', 'start_time/type_s',
'time_stamp/ascii_s', 'time_stamp/epoch_l',
'time_stamp/micro_seconds_i', 'time_stamp/type_s']
self.assertEqual(keys, self.ph5API_object.Index_t['keys'])
# are there as many rows as we expect?
self.assertEqual(11, len(self.ph5API_object.Index_t['rows']))
# check row 6.
self.assertEqual('3X500',
self.ph5API_object.Index_t['rows'][5]
['serial_number_s'])
def test_receiver_t(self):
"""
test reading receiver table
:return:
"""
# able to read index table
self.assertFalse(self.ph5API_object.Receiver_t)
self.ph5API_object.read_receiver_t()
self.assertTrue(self.ph5API_object.Receiver_t)
keys = ['orientation/azimuth/value_f',
'orientation/azimuth/units_s',
'orientation/dip/value_f',
'orientation/dip/units_s',
'orientation/description_s',
'orientation/channel_number_i']
self.assertEqual(keys, self.ph5API_object.Receiver_t['keys'])
# expect 4 rows in receiver table
self.assertEqual(4, len(self.ph5API_object.Receiver_t['rows']))
# get each receiver table entry by it's n_i number
table = self.ph5API_object.get_receiver_t_by_n_i(0)
self.assertEqual(0.0, table['orientation/azimuth/value_f'])
self.assertEqual(90.0, table['orientation/dip/value_f'])
self.assertEqual('Z', table['orientation/description_s'])
table = self.ph5API_object.get_receiver_t_by_n_i(1)
self.assertEqual(0.0, table['orientation/azimuth/value_f'])
self.assertEqual(0.0, table['orientation/dip/value_f'])
self.assertEqual('N', table['orientation/description_s'])
table = self.ph5API_object.get_receiver_t_by_n_i(2)
self.assertEqual(90.0, table['orientation/azimuth/value_f'])
self.assertEqual(0.0, table['orientation/dip/value_f'])
self.assertEqual('E', table['orientation/description_s'])
table = self.ph5API_object.get_receiver_t_by_n_i(3)
self.assertEqual(0.0, table['orientation/azimuth/value_f'])
self.assertEqual(-90.0, table['orientation/dip/value_f'])
self.assertEqual('Z', table['orientation/description_s'])
def test_das_t(self):
# read in das group names
self.assertFalse(self.ph5API_object.Das_g_names)
self.ph5API_object.read_das_g_names()
self.assertTrue(self.ph5API_object.Das_g_names)
# check that all dases are there
# rt125a das
self.assertTrue('Das_g_12183' in self.ph5API_object.Das_g_names)
# rt130 das
self.assertTrue('Das_g_9EEF' in self.ph5API_object.Das_g_names)
# Node das
self.assertTrue('Das_g_3X500' in self.ph5API_object.Das_g_names)
# miniSEED das
self.assertTrue('Das_g_5553' in self.ph5API_object.Das_g_names)
# test various read methods
# should NOT return anything
table = self.ph5API_object.query_das_t('3X500',
chan=8,
start_epoch=1502293592,
stop_epoch=1502309218,
sample_rate=500,
sample_rate_multiplier=1)
self.assertFalse(table)
self.ph5API_object.forget_das_t('3X500')
# should return anything
table = self.ph5API_object.query_das_t('3X500',
chan=1,
start_epoch=1502294430.380000,
stop_epoch=1502309218,
sample_rate=500,
sample_rate_multiplier=1)
self.assertTrue(table)
self.assertEqual(1, len(table))
# we should get the 2nd recording window
self.assertEqual(2, table[0]['event_number_i'])
self.ph5API_object.forget_das_t('3X500')
# test query LOG channel
table = self.ph5API_object.query_das_t('5553',
chan=-2,
start_epoch=1545088203,
stop_epoch=1547489525,
sample_rate=0,
sample_rate_multiplier=1)
self.assertTrue(table)
self.assertEqual(1, len(table))
# no samples in a textural LOG
self.assertEqual(0, table[0]['sample_count_i'])
self.ph5API_object.forget_das_t('5553')
# check read_das_t now
# read entire rt125a das table
self.ph5API_object.read_das_t('12183')
# we expect 9 rows
self.assertTrue(9, len(self.ph5API_object.Das_t['12183']['rows']))
# check that all the keys are there
keys = ['array_name_SOH_a', 'array_name_data_a', 'array_name_event_a',
'array_name_log_a', 'channel_number_i', 'event_number_i',
'raw_file_name_s', 'receiver_table_n_i', 'response_table_n_i',
'sample_count_i', 'sample_rate_i', 'sample_rate_multiplier_i',
'stream_number_i', 'time/ascii_s', 'time/epoch_l',
'time/micro_seconds_i', 'time/type_s', 'time_table_n_i']
self.assertEqual(keys, self.ph5API_object.Das_t['12183']['keys'])
# check an entry to make sure it is what we expect
self.assertEqual(1550850153,
self.ph5API_object.Das_t['12183']['rows']
[7]['time/epoch_l'])
# forget 12183 and read time range
self.ph5API_object.forget_das_t('12183')
self.assertFalse(hasattr(self.ph5API_object.Das_t, '12183'))
self.ph5API_object.read_das_t('12183', start_epoch=1550850093,
stop_epoch=1550850152)
# we should get 2 rows
self.assertTrue(2, len(self.ph5API_object.Das_t['12183']['rows']))
# rows should be for data array 6 and 7
self.assertEqual('Data_a_0006',
self.ph5API_object.Das_t['12183']['rows']
[0]['array_name_data_a'])
self.assertEqual('Data_a_0007',
self.ph5API_object.Das_t['12183']['rows']
[1]['array_name_data_a'])
self.ph5API_object.forget_das_t('12183')
# read das with multiple channels
self.ph5API_object.read_das_t('9EEF')
# we expect 3 rows and expect row 0 to be ch 1, 1 to be ch 2
# and 2, to be ch 3
self.assertTrue(3, len(self.ph5API_object.Das_t['9EEF']['rows']))
self.assertEqual(1,
self.ph5API_object.Das_t['9EEF']['rows']
[0]['channel_number_i'])
self.assertEqual(2,
self.ph5API_object.Das_t['9EEF']['rows']
[1]['channel_number_i'])
self.assertEqual(3,
self.ph5API_object.Das_t['9EEF']['rows']
[2]['channel_number_i'])
self.ph5API_object.forget_das_t('9EEF')
def test_time_t(self):
"""
tests reading of time_t
"""
self.assertFalse(self.ph5API_object.Time_t)
self.ph5API_object.read_time_t()
self.assertTrue(self.ph5API_object.Time_t)
keys = ['corrected_i', 'das/serial_number_s', 'das/model_s',
'das/manufacturer_s', 'das/notes_s', 'description_s',
'end_time/ascii_s', 'end_time/epoch_l',
'end_time/micro_seconds_i', 'end_time/type_s', 'offset_d',
'slope_d', 'start_time/ascii_s', 'start_time/epoch_l',
'start_time/micro_seconds_i', 'start_time/type_s']
self.assertEqual(keys, self.ph5API_object.Time_t['keys'])
# check an entry to make sure it is what we expect
self.assertEqual(-8.10623e-06,
self.ph5API_object.Time_t['rows'][0]['offset_d'])
self.assertEqual(-1.66452e-09,
self.ph5API_object.Time_t['rows'][0]['slope_d'])
# get a time_t for a specific das
# first try one that doesn't exist
table = self.ph5API_object.get_time_t('12345')
self.assertFalse(table)
# now get a real one
table = self.ph5API_object.get_time_t('12183')
self.assertTrue(table)
self.assertEqual('12183',
table[0]['das/serial_number_s'])
self.assertEqual(-8.10623e-06,
table[0]['offset_d'])
self.assertEqual(-1.66452e-09,
table[0]['slope_d'])
def test_response_t(self):
"""
test reading of response table
"""
self.assertFalse(self.ph5API_object.Response_t)
self.ph5API_object.read_response_t()
self.assertTrue(self.ph5API_object.Response_t)
keys = ['n_i', 'bit_weight/value_d', 'bit_weight/units_s',
'gain/units_s', 'gain/value_i', 'response_file_a',
'response_file_das_a', 'response_file_sensor_a']
self.assertEqual(keys, self.ph5API_object.Response_t['keys'])
# check an entry to make sure it is what we expect
self.assertEqual(1.88039941931e-05,
self.ph5API_object.Response_t['rows'][0]
['bit_weight/value_d'])
self.assertEqual('/Experiment_g/Responses_g/ZLAND3C_500_1_24',
self.ph5API_object.Response_t['rows'][0]
['response_file_das_a'])
self.assertEqual('',
self.ph5API_object.Response_t['rows'][0]
['response_file_sensor_a'])
self.assertEqual(1.584,
self.ph5API_object.Response_t['rows'][1]
['bit_weight/value_d'])
self.assertEqual('/Experiment_g/Responses_g/rt130_100_1_1',
self.ph5API_object.Response_t['rows'][1]
['response_file_das_a'])
self.assertEqual('/Experiment_g/Responses_g/cmg3t',
self.ph5API_object.Response_t['rows'][1]
['response_file_sensor_a'])
self.assertEqual(0.0,
self.ph5API_object.Response_t['rows'][5]
['bit_weight/value_d'])
self.assertEqual(
'/Experiment_g/Responses_g/NoneQ330_NoneCMG3T_200HHN',
self.ph5API_object.Response_t['rows'][5]
['response_file_das_a'])
self.assertEqual('',
self.ph5API_object.Response_t['rows'][5]
['response_file_sensor_a'])
self.assertEqual(5.96046447754e-08,
self.ph5API_object.Response_t['rows'][4]
['bit_weight/value_d'])
self.assertEqual('/Experiment_g/Responses_g/rt125a_500_1_32',
self.ph5API_object.Response_t['rows'][4]
['response_file_das_a'])
self.assertEqual('/Experiment_g/Responses_g/gs11v',
self.ph5API_object.Response_t['rows'][4]
['response_file_sensor_a'])
# response table for specific das
self.ph5API_object.read_das_t('12183')
# should fail giving it rows and keys
table = self.ph5API_object.get_response_t(
self.ph5API_object.Das_t['12183'])
self.assertFalse(table)
# should read given only rows
table = self.ph5API_object.get_response_t(
self.ph5API_object.Das_t['12183']['rows'])
self.assertTrue(table)
# table values are as expected
self.assertEqual(4, table['n_i'])
self.assertEqual(5.96046447754e-08, table['bit_weight/value_d'])
self.assertEqual('/Experiment_g/Responses_g/rt125a_500_1_32',
table['response_file_das_a'])
self.assertEqual('/Experiment_g/Responses_g/gs11v',
table['response_file_sensor_a'])
# response table by n_i
# shouldn't exist
table = self.ph5API_object.get_response_t_by_n_i(99)
self.assertFalse(table)
# get a good entry by n_i
table = self.ph5API_object.get_response_t_by_n_i(6)
self.assertTrue(table)
self.assertEqual(6, table['n_i'])
self.assertEqual(0.0, table['bit_weight/value_d'])
self.assertEqual(
'/Experiment_g/Responses_g/NoneQ330_NoneCMG3T_100LHN',
table['response_file_das_a'])
self.assertEqual('',
table['response_file_sensor_a'])
def test_offset_t(self):
"""
test methods related to reading offset table
"""
# get list of offset_t names
self.assertFalse(self.ph5API_object.Offset_t_names)
self.ph5API_object.read_offset_t_names()
self.assertTrue(self.ph5API_object.Offset_t_names)
self.assertEqual(['Offset_t_001_001',
'Offset_t_008_001',
'Offset_t_002_001',
'Offset_t_004_001',
'Offset_t_009_001',
'Offset_t_003_001'],
self.ph5API_object.Offset_t_names)
# read an offset_t
self.assertFalse(hasattr(self.ph5API_object.Offset_t,
'Offset_t_001_001'))
self.ph5API_object.read_offset_t(
'Offset_t_001_001',
id_order='event_id_s')
self.assertTrue(self.ph5API_object.Offset_t['Offset_t_001_001'])
keys = ['azimuth/value_f', 'azimuth/units_s', 'event_id_s',
'offset/value_d', 'offset/units_s', 'receiver_id_s']
self.assertEqual(keys,
self.ph5API_object.Offset_t
['Offset_t_001_001']['keys'])
# doesn't exist
# read an offset_t
self.assertFalse(hasattr(self.ph5API_object.Offset_t,
'Offset_t_099_123'))
self.ph5API_object.read_offset_t(
'Offset_t_099_123',
id_order='event_id_s')
self.assertFalse(hasattr(self.ph5API_object.Offset_t,
'Offset_t_099_123'))
# read an offset_t
self.assertFalse(hasattr(self.ph5API_object.Offset_t,
'Offset_t_008_001'))
self.ph5API_object.read_offset_t(
'Offset_t_008_001',
id_order='receiver_id_s')
self.assertTrue(self.ph5API_object.Offset_t['Offset_t_008_001'])
# shot order offsets
# should exist offset_t
offset_t = self.ph5API_object.read_offsets_shot_order(
'Array_t_009',
'7001',
'Event_t_001')
# check metadata is correct
self.assertEqual('7001', offset_t['9001']['event_id_s'])
self.assertEqual(-7673.76009838, offset_t['9001']['offset/value_d'])
self.assertEqual(
-4.136306285858154, offset_t['9001']['azimuth/value_f'])
self.assertEqual('9001', offset_t['9001']['receiver_id_s'])
# should not exist offset_t
offset_t = self.ph5API_object.read_offsets_shot_order(
'Array_t_009',
'7002',
'Event_t_001')
self.assertFalse(offset_t['9001'])
# receiver order offsets
# should exist
offset_t = self.ph5API_object.read_offsets_receiver_order(
'Array_t_009',
'9001',
'Event_t_001')
# check metadata is correct
self.assertEqual('7001', offset_t['7001']['event_id_s'])
self.assertEqual(-7673.76009838, offset_t['7001']['offset/value_d'])
self.assertEqual(
-4.136306285858154, offset_t['7001']['azimuth/value_f'])
self.assertEqual('9001', offset_t['7001']['receiver_id_s'])
# should not exist offset_t
offset_t = self.ph5API_object.read_offsets_receiver_order(
'Array_t_009',
'9002',
'Event_t_001')
self.assertFalse(offset_t['7001'])
# test calculating offsets
offset_t = self.ph5API_object.calc_offsets(
'Array_t_009',
'7001',
'Event_t_001')
keys = ['event_id_s', 'offset/value_d', 'azimuth/units_s',
'offset/units_s', 'receiver_id_s', 'azimuth/value_f']
self.assertEqual(keys, offset_t['keys'])
self.assertEqual('7001', offset_t['byid']['9001']['event_id_s'])
self.assertAlmostEqual(-7673.76009838,
offset_t['byid']['9001']['offset/value_d'], 6)
self.assertAlmostEqual(-4.136306285858154,
offset_t['byid']['9001']['azimuth/value_f'], 6)
self.assertEqual('9001', offset_t['byid']['9001']['receiver_id_s'])
# get offset
offset_t = self.ph5API_object.get_offset(
'Array_t_009',
'9001',
'Event_t_001',
'7001')
self.assertEqual('7001', offset_t['event_id_s'])
self.assertEqual('9001', offset_t['receiver_id_s'])
self.assertAlmostEqual(abs(-7673.76009838),
offset_t['offset/value_d'], 6)
self.assertAlmostEqual(-4.136306285858154,
offset_t['azimuth/value_f'], 6)
# get offset doesn't exist
offset_t = self.ph5API_object.get_offset(
'Array_t_009',
'9002',
'Event_t_001',
'7001')
self.assertFalse(offset_t)
def test_event_t(self):
"""
test methodfs related to event tables
"""
# test reading event table names
self.assertFalse(self.ph5API_object.Event_t_names)
self.ph5API_object.read_event_t_names()
self.assertTrue(self.ph5API_object.Event_t_names)
self.assertEqual(1, len(self.ph5API_object.Event_t_names))
self.assertEqual('Event_t_001', self.ph5API_object.Event_t_names[0])
# read event_t
self.assertFalse(hasattr(self.ph5API_object.Event_t, 'Event_t_001'))
self.ph5API_object.read_event_t('Event_t_001')
self.assertTrue(self.ph5API_object.Event_t['Event_t_001'])
keys = ['id_s', 'location/X/value_d', 'location/X/units_s',
'location/Y/value_d', 'location/Y/units_s',
'location/Z/value_d', 'location/Z/units_s',
'location/coordinate_system_s', 'location/projection_s',
'location/ellipsoid_s', 'location/description_s',
'time/ascii_s', 'time/epoch_l', 'time/micro_seconds_i',
'time/type_s', 'size/value_d', 'size/units_s',
'depth/value_d', 'depth/units_s', 'description_s']
self.assertEqual(keys,
self.ph5API_object.Event_t['Event_t_001']['keys'])
self.assertEqual(1550850060,
self.ph5API_object.Event_t['Event_t_001']['byid']
['7001']['time/epoch_l'])
def test_sort_t(self):
"""
test methods related to sort_t
"""
# read the sort_t
self.assertFalse(self.ph5API_object.Sort_t)
self.ph5API_object.read_sort_t()
self.assertTrue(self.ph5API_object.Sort_t)
self.assertTrue('Array_t_001' in self.ph5API_object.Sort_t)
self.assertTrue('Array_t_002' in self.ph5API_object.Sort_t)
self.assertTrue('Array_t_003' in self.ph5API_object.Sort_t)
self.assertTrue('Array_t_008' in self.ph5API_object.Sort_t)
self.assertTrue('Array_t_009' in self.ph5API_object.Sort_t)
# shouldn't exist
self.assertFalse('Array_t_006' in self.ph5API_object.Sort_t)
# array 4 is real but log channels shouldn't be in sort
self.assertFalse('Array_t_004' in self.ph5API_object.Sort_t)
# get table entry on start time and array
sort_t = self.ph5API_object.get_sort_t(
1550849943,
'Array_t_009')
self.assertTrue(sort_t)
self.assertEqual(1, len(sort_t))
# entry doesn't exist
sort_t = self.ph5API_object.get_sort_t(
99999,
'Array_t_009')
self.assertFalse(sort_t)
sort_t = self.ph5API_object.get_sort_t(
99999,
'Array_t_999')
self.assertFalse(sort_t)
def test_read_t(self):
"""
tests reading of table and outputting kef
"""
# experiment tableF
table = self.ph5API_object.read_t("Experiment_t")
self.assertIn('/Experiment_g/Experiment_t', table)
self.assertIn('experiment_id_s = 99-999', table)
self.assertIn('net_code_s = AA', table)
# Array tables
table = self.ph5API_object.read_t("Array_t", 1)
self.assertIn('\'id_s\': \'500\'', table)
table = self.ph5API_object.read_t("Array_t", 8)
self.assertIn('\'seed_station_name_s\': \'8001\'', table)
# Response table
table = self.ph5API_object.read_t("Response_t")
self.assertIn('/Experiment_g/Responses_g/Response_t', table)
self.assertIn('/Experiment_g/Responses_g/cmg3t', table)
# Receiver table
table = self.ph5API_object.read_t("Receiver_t")
self.assertIn('/Experiment_g/Receivers_g/Receiver_t', table)
self.assertIn('orientation/azimuth/value_f', table)
# Index table
table = self.ph5API_object.read_t("Index_t")
self.assertIn('/Experiment_g/Receivers_g/Index_t', table)
self.assertIn('./miniPH5_00001.ph5', table)
# Das table
table = self.ph5API_object.read_t("Das_t", '12183')
self.assertIn('/Experiment_g/Receivers_g/Das_t_12183/Das_t', table)
self.assertNotIn('/Experiment_g/Receivers_g/Das_g_9EEF/Das_t', table)
self.assertIn('raw_file_name_s = I2183RAW.TRD', table)
# a key error should be raised because there is no das XXX
self.assertRaises(KeyError, self.ph5API_object.read_t, "Das_t", "XXX")
# Time table
table = self.ph5API_object.read_t("Time_t")
self.assertIn('/Experiment_g/Receivers_g/Time_t', table)
self.assertIn('das/serial_number_s = 12183', table)
# Sort table
table = self.ph5API_object.read_t("Sort_t")
self.assertIn('/Experiment_t/Sorts_g/Sort_t', table)
# Event table
table = self.ph5API_object.read_t("Event_t")
self.assertIn('/Experiment_g/Sorts_g/Event_t_001', table)
# Offset table
table = self.ph5API_object.read_t("Offset_t")
self.assertIn('Experiment_g/Sorts_g/Offset_t_001_001', table)
# table that doesnt exist
table = self.ph5API_object.read_t("Random_t")
self.assertFalse(table)
def test_textural_cut(self):
"""
tests cutting of text data from arrays
using textural_cut method
"""
# try das that doesn't exist
traces = self.ph5API_object.textural_cut('0407',
1545088203,
1547489525,
-2,
None)
self.assertFalse(traces)
# no das_t
traces = self.ph5API_object.textural_cut('5553',
1545088203,
1547489525,
-2,
None)
# traces exist
self.assertTrue(traces)
# 1 trace
self.assertEqual(1, len(traces))
# 0 sample rate
self.assertEqual(0, traces[0].sample_rate)
# ttype is '|S1' (one character per sample)
self.assertEqual('|S1', traces[0].ttype)
# turn data into string to more easily read it
str1 = ''.join(traces[0].data)
self.assertIn('TIME JUMP OF 3.000400 SECONDS', str1)
# with das_t
self.ph5API_object.read_das_t('5553')
das_t = self.ph5API_object.Das_t['5553']['rows']
traces = self.ph5API_object.textural_cut('5553',
1545088203,
1547489525,
-2,
das_t)
# traces exist
self.assertTrue(traces)
# 1 trace
self.assertEqual(1, len(traces))
# 0 sample rate
self.assertEqual(0, traces[0].sample_rate)
# ttype is '|S1' (one character per sample)
self.assertEqual('|S1', traces[0].ttype)
# turn data into string to more easily read it
str1 = ''.join(traces[0].data)
self.assertIn('TIME JUMP OF 3.000400 SECONDS', str1)
# try channel with data instead of text
traces = self.ph5API_object.textural_cut('5553',
0,
9547489525,
1,
None)
self.assertFalse(traces)
def test_cut(self):
"""
test regular cut method
"""
# try cutting das that doesn't exist
# should return a single trace object with no data
traces = self.ph5API_object.cut('9999',
0,
1599999999,
1,
250,
True,
das_t=None)
self.assertTrue(1, len(traces))
self.assertFalse(traces[0].data)
# read actual data no time correction
traces = self.ph5API_object.cut('12183',
1550849943,
1550850189,
1,
500,
False,
das_t=None)
# 9 traces with NO time corrections
# all same sample rate and ttype
self.assertTrue(9, len(traces))
for trace in traces:
self.assertEqual(0.0, trace.time_correction_ms)
self.assertEqual(500, trace.sample_rate)
self.assertEqual('int', trace.ttype)
# check start times match
self.assertEqual(1550849943.00,
traces[0].start_time.epoch(fepoch=True))
# check a few samples
self.assertEqual(-1412641,
traces[0].data[0])
self.assertEqual(-1180944,
traces[0].data[872])
self.assertEqual(-1371008,
traces[0].data[-1])
self.assertEqual(1550850123.00,
traces[6].start_time.epoch(fepoch=True))
self.assertEqual(-1407159,
traces[6].data[0])
self.assertEqual(-1185688,
traces[6].data[872])
self.assertEqual(-1366398,
traces[6].data[-1])
# read actual data no time correction
# give das_t this time
self.ph5API_object.read_das_t('12183')
das_t = self.ph5API_object.Das_t['12183']['rows']
traces = self.ph5API_object.cut('12183',
1550849943,
1550850189,
1,
500,
False,
das_t=das_t)
# 9 traces with NO time corrections
# all same sample rate and ttype
self.assertTrue(9, len(traces))
for trace in traces:
self.assertEqual(0.0, trace.time_correction_ms)
self.assertEqual(500, trace.sample_rate)
self.assertEqual('int', trace.ttype)
# check nodes since they don't star on even seconds\
# read actual data no time correction
traces = self.ph5API_object.cut('3X500',
1502294405.38,
1502294410.38,
3,
500,
False,
das_t=None)
self.assertEqual(1502294405.38,
traces[0].start_time.epoch(fepoch=True))
# 1 traces with NO time corrections
# all same sample rate and ttype
self.assertTrue(1, len(traces))
for trace in traces:
self.assertEqual(0.0, trace.time_correction_ms)
self.assertEqual(500, trace.sample_rate)
self.assertEqual('int', trace.ttype)
# 2500 samples
self.assertEqual(2500, traces[0].nsamples)
# should start half way thorugh data array
# check a few samples
# should match sample 2500 in array 005
self.assertEqual(1317166976,
traces[0].data[0])
# should match sample 4999 in array 005
self.assertEqual(-122580344,
traces[0].data[-1])
def test_get_extent(self):
"""
test get extent functionality
"""
# test das that exists
earliest, latest = self.ph5API_object.get_extent(
'9EEF',
1,
100,
start=None,
end=None)
self.assertEqual(1463568480, earliest)
self.assertEqual(1463568517.88, latest)
# test das that doesn't exist
earliest, latest = self.ph5API_object.get_extent(
'xxxx',
1,
900,
start=None,
end=None)
self.assertFalse(earliest)
self.assertFalse(latest)
# test with multiple das table entries
earliest, latest = self.ph5API_object.get_extent(
'3X500',
2,
500,
start=None,
end=None)
self.assertEqual(1502294400.38, earliest)
self.assertEqual(1502294460.38, latest)
# test giving start time and end time
earliest, latest = self.ph5API_object.get_extent(
'3X500',
1,
500,
start=1502294405.38,
end=1502294459.00)
self.assertEqual(1502294400.38, earliest)
self.assertEqual(1502294460.38, latest)
# test giving start time only
self.assertRaises(ValueError,
self.ph5API_object.get_extent,
'3X500',
1,
500,
start=1502294405.38,
end=None)
# test giving end time only
self.assertRaises(ValueError,
self.ph5API_object.get_extent,
'3X500',
1,
500,
start=None,
end=1502294460.38)
# test LOG
earliest, latest = self.ph5API_object.get_extent(
'5553',
-2,
0,
start=1545088205,
end=1545088205)
self.assertEqual(1545088205, earliest)
self.assertEqual(1545088205, latest)
# test LOG no times
earliest, latest = self.ph5API_object.get_extent(
'5553',
-2,
0,
start=None,
end=None)
self.assertEqual(1545088205, earliest)
self.assertEqual(1545088205, latest)
# test das with same chan # and various sample rates
earliest, latest = self.ph5API_object.get_extent(
'5553',
1,
100,
start=None,
end=None)
self.assertEqual(1545085230.681998, earliest)
self.assertEqual(1545085240.691998, latest)
earliest, latest = self.ph5API_object.get_extent(
'5553',
1,
200,
start=None,
end=None)
self.assertEqual(1545085230.917, earliest)
self.assertAlmostEqual(1545085240.922, latest, 5)
def test_get_availability(self):
"""
test get_availability functionality
"""
# test das that doesn't exist
times = self.ph5API_object.get_availability(
'12345',
10000,
99)
self.assertFalse(times)
# simple with gaps
times = self.ph5API_object.get_availability(
'12183',
500,
1)
self.assertEqual(9, len(times))
self.assertEqual((500, 1550849943, 1550849949), times[0])
self.assertEqual((500, 1550850033, 1550850034), times[3])
self.assertEqual((500, 1550850183, 1550850189), times[8])
# no gaps single sample rate per ch
# multiple windows
times = self.ph5API_object.get_availability(
'3X500',
500,
3)
self.assertEqual(1, len(times))
self.assertEqual((500, 1502294400.38, 1502294460.38), times[0])
# no gaps single sample rate per ch
# single window
times = self.ph5API_object.get_availability(
'9EEF',
100,
2)
self.assertEqual(1, len(times))
self.assertEqual((100, 1463568480, 1463568517.88), times[0])
# no gaps multiple sample rate per ch
# single window
times = self.ph5API_object.get_availability(
'5553',
200,
1)
self.assertEqual(1, len(times))
self.assertEqual((200.0, 1545085230.917, 1545085240.9220002),
times[0])
times = self.ph5API_object.get_availability(
'5553',
100,
1)
self.assertEqual(1, len(times))
self.assertEqual((100.0, 1545085230.681998, 1545085240.691998),
times[0])
def test_channels(self):
"""
rest channels method
"""
# should give 3 channels
chans = self.ph5API_object.channels(
'Array_t_001',
'500')
self.assertEqual(3, len(chans))
self.assertEqual([1, 2, 3], chans)
# should give 1 channels
chans = self.ph5API_object.channels(
'Array_t_004',
'0407')
self.assertEqual(1, len(chans))
self.assertEqual([-2], chans)
# should give 0 channels
chans = self.ph5API_object.channels(
'Array_t_099',
'1234')
self.assertEqual(0, len(chans))
self.assertFalse(chans)
def test_trace_padding(self):
"""
test padding of traces
"""
# get all traces from station 9001
# pad them to be the same length
traces = self.ph5API_object.cut(
'12183',
1550849840,
1550851189,
chan=1,
sample_rate=500,
apply_time_correction=True,
das_t=None)
self.assertEqual(9, len(traces))
# check that trace lengths vary
sample_total = 0
for x in traces:
sample_total = sample_total + len(x.data)
start_time = traces[0].start_time
end_time = traces[-1].start_time + (
traces[-1].nsamples/traces[-1].sample_rate)
# now we should have a single trace with gaps padded
# with the mean
traces = ph5api.pad_traces(traces)
self.assertTrue(traces)
self.assertTrue(sample_total, len(traces.data))
self.assertTrue(start_time, traces.start_time)
self.assertTrue(end_time, traces.start_time+(
traces.nsamples/traces.sample_rate))
self.assertTrue(-119984, traces.padding)
def test_mix_ins(self):
"""
test the added mjix in at end of ph5api that
aren't used within api itself
"""
# true case
result = ph5api.is_in(
0,
10000,
5,
1000)
self.assertTrue(result)
# false
result = ph5api.is_in(
99999,
10000,
5,
1000)
self.assertFalse(result)
# get float epoch
fepoch = ph5api.fepoch(1000000, 9555)
self.assertEqual(1000000.009555, fepoch)
fepoch = ph5api.fepoch(1978346, 100000000)
self.assertEqual(1978446, fepoch)
# filter a das
self.ph5API_object.read_das_t('5553')
das_t = ph5api.filter_das_t(
self.ph5API_object.Das_t['5553']['rows'],
1)
self.assertEqual(2, len(das_t))
das_t = ph5api.filter_das_t(
self.ph5API_object.Das_t['5553']['rows'],
-2)
self.assertEqual(1, len(das_t))
das_t = ph5api.filter_das_t(
self.ph5API_object.Das_t['5553']['rows'],
-9)
self.assertEqual(0, len(das_t))
def test_close_ph5(self):
"""
close ph5 object
"""
self.ph5API_object.clear()
self.ph5API_object.close()
self.assertIsNone(self.ph5API_object.ph5)
def tearDown(self):
self.ph5API_object.close()
if __name__ == "__main__":
unittest.main()
|
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.core.files.base import ContentFile
from django.contrib.admin.views.decorators import staff_member_required
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError:
from django.contrib.auth.models import User
from django.contrib.auth.decorators import permission_required
from django.db.models import Q
from django.db.models.fields.related import ReverseManyRelatedObjectsDescriptor
from django.forms.models import inlineformset_factory
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import (
render_to_response,
redirect,
get_object_or_404,
render)
from django.template import RequestContext
from .models import Report, DisplayField, FilterField, Format
from .utils import (
javascript_date_format,
duplicate,
)
from django.utils.decorators import method_decorator
from django.views.generic.edit import CreateView, UpdateView
from django.views.generic import TemplateView, View
from django import forms
from report_utils.model_introspection import get_relation_fields_from_model
from report_utils.mixins import GetFieldsMixin, DataExportMixin
import warnings
import datetime
import time
import re
from decimal import Decimal
from numbers import Number
import copy
from dateutil import parser
import json
class ReportForm(forms.ModelForm):
class Meta:
model = Report
fields = ['name', 'distinct', 'root_model']
class ReportEditForm(forms.ModelForm):
class Meta:
model = Report
fields = ['name', 'distinct', 'description',]
widgets = {
'description': forms.TextInput(
attrs={'style': 'width:99%;', 'placeholder': 'Description'}),
}
class DisplayFieldForm(forms.ModelForm):
class Meta:
model = DisplayField
fields = ('name', 'path', 'path_verbose', 'field_verbose', 'field', 'position',
'width', 'total', 'sort', 'aggregate', 'group', 'display_format')
widgets = {
'path': forms.HiddenInput(),
'path_verbose': forms.TextInput(attrs={'readonly':'readonly'}),
'field_verbose': forms.TextInput(attrs={'readonly':'readonly'}),
'field': forms.HiddenInput(),
'width': forms.TextInput(attrs={'class':'small_input'}),
'total': forms.CheckboxInput(attrs={'class':'small_input'}),
'sort': forms.TextInput(attrs={'class':'small_input'}),
}
class FilterFieldForm(forms.ModelForm):
class Meta:
model = FilterField
fields = ('path', 'path_verbose', 'field_verbose', 'field', 'filter_type',
'filter_value', 'filter_value2', 'exclude', 'position')
widgets = {
'path': forms.HiddenInput(),
'path_verbose': forms.TextInput(attrs={'readonly':'readonly'}),
'field_verbose': forms.TextInput(attrs={'readonly':'readonly'}),
'field': forms.HiddenInput(),
'filter_type': forms.Select(attrs={'onchange':'check_filter_type(event.target)'})
}
def __init__(self, *args, **kwargs):
super(FilterFieldForm, self).__init__(*args, **kwargs)
# override the filter_value field with the models native ChoiceField
if self.instance.choices:
self.fields['filter_value'].widget = forms.Select(choices=self.instance.choices)
if 'DateField' in self.instance.field_verbose or 'DateTimeField' in self.instance.field_verbose:
widget = self.fields['filter_value'].widget
widget.attrs['class'] = 'datepicker'
widget.attrs['data-date-format'] = javascript_date_format(settings.DATE_FORMAT)
class ReportCreateView(CreateView):
form_class = ReportForm
template_name = 'report_new.html'
def filter_property(filter_field, value):
filter_type = filter_field.filter_type
filter_value = filter_field.filter_value
filtered = True
#TODO: i10n
WEEKDAY_INTS = {
'monday': 0,
'tuesday': 1,
'wednesday': 2,
'thursday': 3,
'friday': 4,
'saturday': 5,
'sunday': 6,
}
#TODO instead of catch all, deal with all cases
# Example is 'a' < 2 is a valid python comparison
# But what about 2 < '1' which yeilds true! Not intuitive for humans.
try:
if filter_type == 'exact' and str(value) == filter_value:
filtered = False
if filter_type == 'iexact' and str(value).lower() == str(filter_value).lower():
filtered = False
if filter_type == 'contains' and filter_value in value:
filtered = False
if filter_type == 'icontains' and str(filter_value).lower() in str(value).lower():
filtered = False
if filter_type == 'in' and value in filter_value:
filtered = False
# convert dates and datetimes to timestamps in order to compare digits and date/times the same
if isinstance(value, datetime.datetime) or isinstance(value, datetime.date):
value = str(time.mktime(value.timetuple()))
try:
filter_value_dt = parser.parse(filter_value)
filter_value = str(time.mktime(filter_value_dt.timetuple()))
except ValueError:
pass
if filter_type == 'gt' and Decimal(value) > Decimal(filter_value):
filtered = False
if filter_type == 'gte' and Decimal(value) >= Decimal(filter_value):
filtered = False
if filter_type == 'lt' and Decimal(value) < Decimal(filter_value):
filtered = False
if filter_type == 'lte' and Decimal(value) <= Decimal(filter_value):
filtered = False
if filter_type == 'startswith' and str(value).startswith(str(filter_value)):
filtered = False
if filter_type == 'istartswith' and str(value).lower().startswith(str(filter_value)):
filtered = False
if filter_type == 'endswith' and str(value).endswith(str(filter_value)):
filtered = False
if filter_type == 'iendswith' and str(value).lower().endswith(str(filter_value)):
filtered = False
if filter_type == 'range' and value in [int(x) for x in filter_value]:
filtered = False
if filter_type == 'week_day' and WEEKDAY_INTS.get(str(filter_value).lower()) == value.weekday:
filtered = False
if filter_type == 'isnull' and value == None:
filtered = False
if filter_type == 'regex' and re.search(filter_value, value):
filtered = False
if filter_type == 'iregex' and re.search(filter_value, value, re.I):
filtered = False
except:
pass
if filter_field.exclude:
return not filtered
return filtered
class AjaxGetRelated(GetFieldsMixin, TemplateView):
template_name = "report_builder/report_form_related_li.html"
def get_context_data(self, **kwargs):
context = super(AjaxGetRelated, self).get_context_data(**kwargs)
request = self.request
model_class = ContentType.objects.get(pk=request.GET['model']).model_class()
path = request.GET['path']
path_verbose = request.GET['path_verbose']
new_fields, model_ct, path = self.get_related_fields(
model_class,
request.GET['field'],
path,
path_verbose,)
context['model_ct'] = model_ct
context['related_fields'] = new_fields
context['path'] = path
context['path_verbose'] = path_verbose
return context
def fieldset_string_to_field(fieldset_dict, model):
if isinstance(fieldset_dict['fields'], tuple):
fieldset_dict['fields'] = list(fieldset_dict['fields'])
i = 0
for dict_field in fieldset_dict['fields']:
if isinstance(dict_field, basestring):
fieldset_dict['fields'][i] = model._meta.get_field_by_name(dict_field)[0]
elif isinstance(dict_field, list) or isinstance(dict_field, tuple):
dict_field[1]['recursive'] = True
fieldset_string_to_field(dict_field[1], model)
i += 1
def get_fieldsets(model):
""" fieldsets are optional, they are defined in the Model.
"""
fieldsets = getattr(model, 'report_builder_fieldsets', None)
if fieldsets:
for fieldset_name, fieldset_dict in model.report_builder_fieldsets:
fieldset_string_to_field(fieldset_dict, model)
return fieldsets
class AjaxGetFields(GetFieldsMixin, TemplateView):
""" Get fields from a particular model """
template_name = 'report_builder/report_form_fields_li.html'
def get_context_data(self, **kwargs):
context = super(AjaxGetFields, self).get_context_data(**kwargs)
field_name = self.request.GET.get('field')
model_class = ContentType.objects.get(pk=self.request.GET['model']).model_class()
path = self.request.GET['path']
path_verbose = self.request.GET.get('path_verbose')
root_model = model_class.__name__.lower()
field_data = self.get_fields(model_class, field_name, path, path_verbose)
ctx = context.copy()
ctx.update(field_data.items())
return ctx
@staff_member_required
def ajax_get_choices(request):
path_verbose = request.GET.get('path_verbose')
label = request.GET.get('label')
root_model = request.GET.get('root_model')
app_label = request.GET.get('app_label')
model_name = path_verbose or root_model
model_name = model_name.split(':')[-1]
model = ContentType.objects.get(model=model_name, app_label=app_label).model_class()
choices = FilterField().get_choices(model, label)
select_widget = forms.Select(choices=[('','---------')] + list(choices))
options_html = select_widget.render_options([], [0])
return HttpResponse(options_html)
@staff_member_required
def ajax_get_formats(request):
choices = Format.objects.values_list('pk', 'name')
select_widget = forms.Select(choices=[('','---------')] + list(choices))
options_html = select_widget.render_options([], [0])
return HttpResponse(options_html)
class AjaxPreview(DataExportMixin, TemplateView):
""" This view is intended for a quick preview useful when debugging
reports. It limits to 50 objects.
"""
template_name = "report_builder/html_report.html"
@method_decorator(staff_member_required)
def dispatch(self, *args, **kwargs):
return super(AjaxPreview, self).dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
return self.get(self, request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(AjaxPreview, self).get_context_data(**kwargs)
report = get_object_or_404(Report, pk=self.request.POST['report_id'])
queryset, message = report.get_query()
property_filters = report.filterfield_set.filter(
Q(field_verbose__contains='[property]') | Q(field_verbose__contains='[custom')
)
objects_list, message = self.report_to_list(
queryset,
report.displayfield_set.all(),
self.request.user,
property_filters=property_filters,
preview=True,)
context['report'] = report
context['objects_dict'] = objects_list
context['message'] = message
return context
class ReportUpdateView(GetFieldsMixin, UpdateView):
""" This view handles the edit report builder
It includes attached formsets for display and criteria fields
"""
model = Report
form_class = ReportEditForm
success_url = './'
@method_decorator(permission_required('report_builder.change_report'))
def dispatch(self, request, *args, **kwargs):
return super(ReportUpdateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(ReportUpdateView, self).get_context_data(**kwargs)
model_class = self.object.root_model.model_class()
model_ct = ContentType.objects.get_for_model(model_class)
relation_fields = get_relation_fields_from_model(model_class)
DisplayFieldFormset = inlineformset_factory(
Report,
DisplayField,
extra=0,
can_delete=True,
form=DisplayFieldForm)
FilterFieldFormset = inlineformset_factory(
Report,
FilterField,
extra=0,
can_delete=True,
form=FilterFieldForm)
if self.request.POST:
ctx['field_list_formset'] = DisplayFieldFormset(self.request.POST, instance=self.object)
ctx['field_filter_formset'] = FilterFieldFormset(self.request.POST, instance=self.object, prefix="fil")
else:
ctx['field_list_formset'] = DisplayFieldFormset(instance=self.object)
ctx['field_filter_formset'] = FilterFieldFormset(instance=self.object, prefix="fil")
ctx['related_fields'] = relation_fields
ctx['fieldsets'] = get_fieldsets(model_class)
ctx['model_ct'] = model_ct
ctx['root_model'] = model_ct.model
ctx['app_label'] = model_ct.app_label
if getattr(settings, 'REPORT_BUILDER_ASYNC_REPORT', False):
ctx['async_report'] = True
field_context = self.get_fields(model_class)
ctx = ctx.copy()
ctx.update(field_context)
return ctx
def form_valid(self, form):
context = self.get_context_data()
field_list_formset = context['field_list_formset']
field_filter_formset = context['field_filter_formset']
if field_list_formset.is_valid() and field_filter_formset.is_valid():
self.object = form.save()
field_list_formset.report = self.object
field_list_formset.save()
field_filter_formset.report = self.object
field_filter_formset.save()
self.object.check_report_display_field_positions()
return HttpResponseRedirect(self.get_success_url())
else:
return self.render_to_response(self.get_context_data(form=form))
class DownloadXlsxView(DataExportMixin, View):
@method_decorator(staff_member_required)
def dispatch(self, *args, **kwargs):
return super(DownloadXlsxView, self).dispatch(*args, **kwargs)
def process_report(self, report_id, user_id, to_response, queryset=None):
report = get_object_or_404(Report, pk=report_id)
user = User.objects.get(pk=user_id)
if not queryset:
queryset, message = report.get_query()
property_filters = report.filterfield_set.filter(
Q(field_verbose__contains='[property]') | Q(field_verbose__contains='[custom')
)
objects_list, message = self.report_to_list(
queryset,
report.displayfield_set.all(),
user,
property_filters=property_filters,
preview=False,)
title = re.sub(r'\W+', '', report.name)[:30]
header = []
widths = []
for field in report.displayfield_set.all():
header.append(field.name)
widths.append(field.width)
if to_response:
return self.list_to_xlsx_response(objects_list, title, header, widths)
else:
self.async_report_save(report, objects_list, title, header, widths)
def async_report_save(self, report, objects_list, title, header, widths):
xlsx_file = self.list_to_xlsx_file(objects_list, title, header, widths)
if not title.endswith('.xlsx'):
title += '.xlsx'
report.report_file.save(title, ContentFile(xlsx_file.getvalue()))
report.report_file_creation = datetime.datetime.today()
report.save()
def get(self, request, *args, **kwargs):
report_id = kwargs['pk']
if getattr(settings, 'REPORT_BUILDER_ASYNC_REPORT', False):
from .tasks import report_builder_async_report_save
report_task = report_builder_async_report_save.delay(report_id, request.user.pk)
task_id = report_task.task_id
return HttpResponse(json.dumps({'task_id': task_id}), content_type="application/json")
else:
return self.process_report(report_id, request.user.pk, to_response=True)
@staff_member_required
def ajax_add_star(request, pk):
""" Star or unstar report for user
"""
report = get_object_or_404(Report, pk=pk)
user = request.user
if user in report.starred.all():
added = False
report.starred.remove(request.user)
else:
added = True
report.starred.add(request.user)
return HttpResponse(added)
@staff_member_required
def create_copy(request, pk):
""" Copy a report including related fields """
report = get_object_or_404(Report, pk=pk)
new_report = duplicate(report, changes=(
('name', '{0} (copy)'.format(report.name)),
('user_created', request.user),
('user_modified', request.user),
))
# duplicate does not get related
for display in report.displayfield_set.all():
new_display = copy.copy(display)
new_display.pk = None
new_display.report = new_report
new_display.save()
for report_filter in report.filterfield_set.all():
new_filter = copy.copy(report_filter)
new_filter.pk = None
new_filter.report = new_report
new_filter.save()
return redirect(new_report)
class ExportToReport(DownloadXlsxView, TemplateView):
""" Export objects (by ID and content type) to an existing or new report
In effect this runs the report with it's display fields. It ignores
filters and filters instead the provided ID's. It can be select
as a global admin action.
"""
template_name = "report_builder/export_to_report.html"
def get_context_data(self, **kwargs):
ctx = super(ExportToReport, self).get_context_data(**kwargs)
ctx['admin_url'] = self.request.GET.get('admin_url', '/')
ct = ContentType.objects.get_for_id(self.request.GET['ct'])
ids = self.request.GET['ids'].split(',')
ctx['number_objects'] = len(ids)
ctx['object_list'] = Report.objects.filter(root_model=ct).order_by('-modified')
ctx['mode'] = ct.model_class()._meta.verbose_name
return ctx
def get(self, request, *args, **kwargs):
if 'download' in request.GET:
ct = ContentType.objects.get_for_id(request.GET['ct'])
ids = self.request.GET['ids'].split(',')
report = get_object_or_404(Report, pk=request.GET['download'])
queryset = ct.model_class().objects.filter(pk__in=ids)
return self.process_report(report.id, request.user.pk, to_response=True, queryset=queryset)
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
@staff_member_required
def check_status(request, pk, task_id):
""" Check if the asyncronous report is ready to download """
from celery.result import AsyncResult
res = AsyncResult(task_id)
link = ''
if res.state == 'SUCCESS':
report = get_object_or_404(Report, pk=pk)
link = report.report_file.url
return HttpResponse(json.dumps({'state': res.state, 'link': link }), content_type="application/json")
|
"""
Utilities for meta & bulk Sample operations
"""
import os
from glob import glob
import numpy as np
import statsmodels.api as sm
import warnings
from .. import Sample, Matrix
# FCS 3.1 reserves certain keywords as being part of the FCS standard. Some
# of these are required, and others are optional. However, all of these
# keywords shall be prefixed by the '$' character. No other keywords shall
# begin with the '$' character. All keywords are case-insensitive, however
# most cytometers use all uppercase for keyword strings. FlowKit follows
# the convention used in FlowIO and internally stores and references all
# FCS keywords as lowercase for more convenient typing by developers.
FCS_STANDARD_KEYWORDS = [
'beginanalysis',
'begindata',
'beginstext',
'byteord',
'datatype',
'endanalysis',
'enddata',
'endstext',
'mode',
'nextdata',
'par',
'tot',
# start optional standard keywords
'abrt',
'btim',
'cells',
'com',
'csmode',
'csvbits',
'cyt',
'cytsn',
'data',
'etim',
'exp',
'fil',
'gate',
'inst',
'last_modified',
'last_modifier',
'lost',
'op',
'originality',
'plateid',
'platename',
'proj',
'smno',
'spillover',
'src',
'sys',
'timestep',
'tr',
'vol',
'wellid'
]
def _get_samples_from_paths(sample_paths):
"""
Load multiple Sample instances from a list of file paths
:param sample_paths: list of file paths containing FCS files
:return: list of Sample instances
"""
samples = []
for path in sample_paths:
samples.append(Sample(path))
return samples
def load_samples(fcs_samples):
"""
Returns a list of Sample instances from a variety of input types (fcs_samples), such as file or
directory paths, a Sample instance, or lists of the previous types.
:param fcs_samples: str or list. If given a string, it can be a directory path or a file path.
If a directory, any .fcs files in the directory will be loaded. If a list, then it must
be a list of file paths or a list of Sample instances. Lists of mixed types are not
supported.
:return: list of Sample instances
"""
sample_list = []
if isinstance(fcs_samples, list):
# 'fcs_samples' is a list of either file paths or Sample instances
sample_types = set()
for sample in fcs_samples:
sample_types.add(type(sample))
if len(sample_types) > 1:
raise ValueError(
"Each item in 'fcs_sample' list must be a FCS file path or Sample instance"
)
if Sample in sample_types:
sample_list = fcs_samples
elif str in sample_types:
sample_list = _get_samples_from_paths(fcs_samples)
elif isinstance(fcs_samples, Sample):
# 'fcs_samples' is a single Sample instance
sample_list = [fcs_samples]
elif isinstance(fcs_samples, str):
# 'fcs_samples' is a str to either a single FCS file or a directory
# If directory, search non-recursively for files w/ .fcs extension
if os.path.isdir(fcs_samples):
fcs_paths = glob(os.path.join(fcs_samples, '*.fcs'))
if len(fcs_paths) > 0:
sample_list = _get_samples_from_paths(fcs_paths)
elif os.path.isfile(fcs_samples):
sample_list = _get_samples_from_paths([fcs_samples])
return sample_list
def _process_bead_samples(bead_samples):
# do nothing if there are no bead samples
bead_sample_count = len(bead_samples)
if bead_sample_count == 0:
warnings.warn("No bead samples were loaded")
return
bead_lut = {}
# all the bead samples must have the same panel, use the 1st one to
# determine the fluorescence channels
fluoro_indices = bead_samples[0].fluoro_indices
# 1st check is to make sure the # of bead samples matches the #
# of fluorescence channels
if bead_sample_count != len(fluoro_indices):
raise ValueError("Number of bead samples must match the number of fluorescence channels")
# get PnN channel names from 1st bead sample
pnn_labels = []
for f_idx in fluoro_indices:
pnn_label = bead_samples[0].pnn_labels[f_idx]
if pnn_label not in pnn_labels:
pnn_labels.append(pnn_label)
bead_lut[f_idx] = {'pnn_label': pnn_label}
else:
raise ValueError("Duplicate channel labels are not supported")
# now, determine which bead file goes with which channel, and make sure
# they all have the same channels
for i, bs in enumerate(bead_samples):
# check file name for a match with a channel
if bs.fluoro_indices != fluoro_indices:
raise ValueError("All bead samples must have the same channel labels")
for channel_idx, lut in bead_lut.items():
# file names typically don't have the "-A", "-H', or "-W" sub-strings
pnn_label = lut['pnn_label'].replace("-A", "")
if pnn_label in bs.original_filename:
lut['bead_index'] = i
lut['pns_label'] = bs.pns_labels[channel_idx]
return bead_lut
def calculate_compensation_from_beads(comp_bead_samples, matrix_id='comp_bead'):
"""
Calculates spillover from a list of FCS bead files.
:param comp_bead_samples: str or list. If given a string, it can be a directory path or a file path.
If a directory, any .fcs files in the directory will be loaded. If a list, then it must
be a list of file paths or a list of Sample instances. Lists of mixed types are not
supported.
:param matrix_id: label for the calculated Matrix
:return: a Matrix instance
"""
bead_samples = load_samples(comp_bead_samples)
bead_lut = _process_bead_samples(bead_samples)
if len(bead_lut) == 0:
warnings.warn("No bead samples were loaded")
return
detectors = []
fluorochromes = []
comp_values = []
for channel_idx in sorted(bead_lut.keys()):
detectors.append(bead_lut[channel_idx]['pnn_label'])
fluorochromes.append(bead_lut[channel_idx]['pns_label'])
bead_idx = bead_lut[channel_idx]['bead_index']
x = bead_samples[bead_idx].get_events(source='raw')[:, channel_idx]
good_events = x < (2 ** 18) - 1
x = x[good_events]
comp_row_values = []
for channel_idx2 in sorted(bead_lut.keys()):
if channel_idx == channel_idx2:
comp_row_values.append(1.0)
else:
y = bead_samples[bead_idx].get_events(source='raw')[:, channel_idx2]
y = y[good_events]
rlm_res = sm.RLM(y, x).fit()
# noinspection PyUnresolvedReferences
comp_row_values.append(rlm_res.params[0])
comp_values.append(comp_row_values)
return Matrix(matrix_id, np.array(comp_values), detectors, fluorochromes)
|
<reponame>str4nd/sikteeri<gh_stars>10-100
# encoding: UTF-8
import csv
import logging
from datetime import datetime, timedelta
from io import StringIO
from decimal import Decimal
from django.conf import settings
from membership.models import Bill, CancelledBill
logger = logging.getLogger("membership.billing.procountor")
class ProcountorBillDelivery(object):
EMAIL = 1
POST = 2
EBILL = 3
NO_DELIVERY = 6
def finnish_timeformat(t):
return t.strftime("%d.%m.%Y")
ft = finnish_timeformat
# noinspection SpellCheckingInspection
def _bill_to_rows(bill, cancel=False):
"""Map bills to Procountor CSV format
http://support.procountor.com/fi/aineiston-sisaanluku/laskuaineiston-siirtotiedosto.html
"""
rows = []
c = bill.billingcycle
if c.membership.type in ['H']:
return rows
bill_delivery = ProcountorBillDelivery.NO_DELIVERY
if c.membership.get_billing_contact():
billing_address = '%s\%s\%s\%s\%s' % (
c.membership.name(),
c.membership.get_billing_contact().street_address,
c.membership.get_billing_contact().postal_code,
c.membership.get_billing_contact().post_office,
'FI')
billing_email = c.membership.get_billing_contact().email
else:
billing_email = ""
billing_address = ""
if bill_delivery == ProcountorBillDelivery.POST:
logger.critical("No billing contact found for member {member}".format(member=str(c.membership)))
return []
else:
logger.warning("No billing contact found for member {member}".format(member=str(c.membership)))
rows.append([
'M', # laskutyyppi
'EUR', # valuuttakoodi
c.reference_number, # viitenumero
settings.IBAN_ACCOUNT_NUMBER, # pankkitili
'', # Y-tunnus/HETU/ALV-tunnus
'tilisiirto', # Maksutapa
c.membership.name(), # Liikekumppanin nimi
'', # Toimitustapa
'0', # Laskun alennus %
't', # Sis. alv koodi
'f' if cancel else 't', # Hyvityslaskukoodi
'0.0', # Viivästyskorko %
ft(bill.created), # Laskun päivä
ft(bill.created), # Toimituspäivämäärä
ft(bill.created + timedelta(days=settings.BILL_DAYS_TO_DUE)), # Eräpäivämäärä
'', # Liikekumppanin osoite
billing_address, # Laskutusosoite
'', # Toimitusosoite
'', # Laskun lisätiedot
'%s %d sikteerissä, tuotu %s, jäsen %d' % ('Hyvityslasku' if cancel else 'Lasku', bill.id, ft(datetime.now()),
c.membership.id), # Muistiinpanot
billing_email, # Sähköpostiosoite
'', # Maksupäivämäärä
'', # Valuuttakurssi
"%.2f" % Decimal.copy_negate(c.get_fee()) if cancel else c.get_fee(), # Laskun loppusumma
"%d" % c.get_vat_percentage(), # ALV-%
'%d' % bill_delivery, # Laskukanava
'', # Verkkolaskutunnus
'%d' % bill.id, # Tilausviite
't', # Kirjanpito riveittäin -koodi)
'', # Finvoice-osoite 1(ei enää käytössä)
'', # Finvoice-osoite 2(ei enää käytössä)
'%d' % c.membership.id, # Asiakasnumero
'X', # Automaattinen lähetys tai maksettu muualla tieto
'', # Liitetiedoston nimi ZIP-paketissa
'', # Yhteyshenkilö
'', # Liikekumppanin pankin SWIFT-tunnus
'', # Verkkolaskuoperaattori
'', # Liikekumppanin OVT-tunnus
"%s" % bill.id, # Laskuttajan laskunumero
'', # Faktoring-rahoitussopimuksen numero
'', # ALV-käsittelyn maakoodi
'', # Kielikoodi
'0', # Käteisalennuksen päivien lukumäärä
'0' # Käteisalennuksen prosentti
])
member_type = settings.BILLING_ACCOUNTING_MAP[c.membership.type]
r2 = ['', # TYHJÄ
'', # Tuotteen kuvaus
'%s%s' % (member_type[0], c.start.strftime("%y")), # Tuotteen koodi
'-1' if cancel else '1', # Määrä
'', # Yksikkö
'%.2f' % c.get_fee(), # Yksikköhinta
'0', # Rivin alennusprosentti
"%d" % c.get_vat_percentage(), # Rivin ALV-%
'', # Rivikommentti
'', # Tilausviite
'', # Asiakkaan ostotilausnumero
'', # Tilausvahvistusnumero
'', # Lähetysluettelonumero
'%s' % member_type[1] # Kirjanpitotili
]
r2 += [''] * (len(rows[0]) - len(r2))
rows.append(r2)
return rows
def create_csv(start=None, mark_cancelled=True):
"""
Create procountor bill export csv
:return: path to csv file
"""
if start is None:
start = datetime.now()
start = datetime(year=start.year, month=start.month, day=1)
filehandle = StringIO()
output = csv.writer(filehandle, delimiter=';', quoting=csv.QUOTE_NONE)
for bill in Bill.objects.filter(created__gte=start, reminder_count=0).all():
for row in _bill_to_rows(bill):
output.writerow(row)
cancelled_bills = CancelledBill.objects.filter(exported=False)
for cb in cancelled_bills:
for row in _bill_to_rows(cb.bill, cancel=True):
output.writerow(row)
if mark_cancelled:
cancelled_bills.update(exported=True)
logger.info("Marked all cancelled bills as exported.")
return filehandle.getvalue()
|
import json
import os
import tempfile
from unittest.mock import patch
import pytest
import yaml
from chaoslib import convert_vars, merge_vars
from chaoslib.configuration import load_configuration
from chaoslib.exceptions import InvalidExperiment
@patch.dict("os.environ", {"KUBE_TOKEN": "value2"})
def test_should_load_configuration():
config = load_configuration(
{
"token1": "value1",
"token2": {"type": "env", "key": "KUBE_TOKEN"},
"token3": {"type": "env", "key": "UNDEFINED", "default": "value3"},
}
)
assert config["token1"] == "value1"
assert config["token2"] == "value2"
assert config["token3"] == "value3"
@patch.dict("os.environ", {"KUBE_TOKEN": "value2"})
def test_should_load_configuration_with_empty_string_as_default():
config = load_configuration(
{
"token1": "value1",
"token2": {"type": "env", "key": "KUBE_TOKEN"},
"token3": {"type": "env", "key": "UNDEFINED", "default": ""},
}
)
assert config["token1"] == "value1"
assert config["token2"] == "value2"
assert config["token3"] == ""
@patch.dict("os.environ", {"KUBE_TOKEN": ""})
def test_should_load_configuration_with_empty_string_as_input():
config = load_configuration(
{
"token1": "value1",
"token2": {"type": "env", "key": "KUBE_TOKEN"},
"token3": {"type": "env", "key": "UNDEFINED", "default": "value3"},
}
)
assert config["token1"] == "value1"
assert config["token2"] == ""
assert config["token3"] == "value3"
@patch.dict("os.environ", {"KUBE_TOKEN": ""})
def test_should_load_configuration_with_empty_string_as_input_while_default_is_define():
config = load_configuration(
{
"token1": "value1",
"token2": {"type": "env", "key": "KUBE_TOKEN", "default": "value2"},
"token3": {"type": "env", "key": "UNDEFINED", "default": "value3"},
}
)
assert config["token1"] == "value1"
assert config["token2"] == ""
assert config["token3"] == "value3"
@patch.dict("os.environ", {})
def test_load_configuration_should_raise_exception():
with pytest.raises(InvalidExperiment) as x:
load_configuration(
{
"token1": "value1",
"token2": {"type": "env", "key": "KUBE_TOKEN"},
"token3": {"type": "env", "key": "UNDEFINED", "default": ""},
}
)
assert str(x.value) == (
"Configuration makes reference to an environment key that does not exist:"
" KUBE_TOKEN"
)
@patch.dict("os.environ", {"KUBE_TOKEN": "value2"})
def test_can_override_experiment_inline_config_keys():
config = load_configuration(
{
"token1": "value1",
"token2": {"type": "env", "key": "KUBE_TOKEN"},
"token3": {"type": "env", "key": "UNDEFINED", "default": "value3"},
},
extra_vars={"token1": "extravalue"},
)
assert config["token1"] == "extravalue"
assert config["token2"] == "value2"
assert config["token3"] == "value3"
@patch.dict("os.environ", {"KUBE_TOKEN": "value2"})
def test_default_value_is_overriden_in_inline_config_keys():
config = load_configuration(
{
"token1": "value1",
"token2": {"type": "env", "key": "KUBE_TOKEN"},
"token3": {"type": "env", "key": "UNDEFINED", "default": "value3"},
},
extra_vars={"token3": "extravalue"},
)
assert config["token1"] == "value1"
assert config["token2"] == "value2"
assert config["token3"] == "extravalue"
def test_merge_vars_from_keys_only_for_configs():
assert merge_vars({"stuff": "todo"}) == ({"stuff": "todo"}, {})
def test_merge_config_vars_from_json_file():
with tempfile.NamedTemporaryFile(suffix=".json") as f:
f.write(
json.dumps({"configuration": {"otherstuff": "tobedone"}}).encode("utf-8")
)
f.seek(0)
assert merge_vars({"stuff": "todo"}, [f.name]) == (
{"stuff": "todo", "otherstuff": "tobedone"},
{},
)
def test_merge_config_vars_from_cli_override_from_file():
with tempfile.NamedTemporaryFile(suffix=".json") as f:
f.write(json.dumps({"configuration": {"stuff": "tobedone"}}).encode("utf-8"))
f.seek(0)
assert merge_vars({"stuff": "todo"}, [f.name]) == ({"stuff": "todo"}, {})
def test_merge_secret_vars_from_json_file():
with tempfile.NamedTemporaryFile(suffix=".json") as f:
f.write(json.dumps({"secrets": {"otherstuff": "tobedone"}}).encode("utf-8"))
f.seek(0)
assert merge_vars({"stuff": "todo"}, [f.name]) == (
{"stuff": "todo"},
{"otherstuff": "tobedone"},
)
def test_merge_config_vars_from_yaml_file():
with tempfile.NamedTemporaryFile(suffix=".yaml") as f:
f.write(
yaml.dump({"configuration": {"otherstuff": "tobedone"}}).encode("utf-8")
)
f.seek(0)
assert merge_vars({"stuff": "todo"}, [f.name]) == (
{"stuff": "todo", "otherstuff": "tobedone"},
{},
)
def test_merge_secret_vars_from_yaml_file():
with tempfile.NamedTemporaryFile(suffix=".yaml") as f:
f.write(yaml.dump({"secrets": {"otherstuff": "tobedone"}}).encode("utf-8"))
f.seek(0)
assert merge_vars({"stuff": "todo"}, [f.name]) == (
{"stuff": "todo"},
{"otherstuff": "tobedone"},
)
def test_read_env_from_env_file():
assert "STUFF" not in os.environ
with tempfile.NamedTemporaryFile(suffix=".env") as f:
f.write(b"STUFF=todo")
f.seek(0)
merge_vars(var_files=[f.name])
assert os.environ["STUFF"] == "todo"
os.environ.clear()
def test_convert_int_var():
assert convert_vars(["age:int=45"]) == {"age": 45}
def test_convert_float_var():
assert convert_vars(["age:float=45"]) == {"age": 45.0}
def test_convert_bytes_var():
assert convert_vars(["todo:bytes=stuff"]) == {"todo": b"stuff"}
def test_convert_str_var():
assert convert_vars(["todo:str=stuff"]) == {"todo": "stuff"}
def test_convert_default_to_str_var():
assert convert_vars(["todo=stuff"]) == {"todo": "stuff"}
def test_convert_invalid_format():
with pytest.raises(ValueError):
convert_vars(["todo/stuff"])
def test_convert_invalid_type():
with pytest.raises(ValueError):
convert_vars(["todo:object=stuff"])
@patch.dict("os.environ", {"KUBE_TOKEN": "value2"})
def test_should_override_load_configuration_with_var():
config = load_configuration(
{
"token1": "value1",
"token2": {"type": "env", "key": "KUBE_TOKEN"},
"token3": {"type": "env", "key": "UNDEFINED", "default": "value3"},
},
{"token1": "othervalue1", "token2": "othervalue2"},
)
assert config["token1"] == "othervalue1"
assert config["token2"] == "othervalue2"
assert config["token3"] == "value3"
# see https://github.com/chaostoolkit/chaostoolkit-lib/issues/195
def test_load_nested_object_configuration():
config = load_configuration(
{"nested": {"onea": "fdsfdsf", "lol": {"haha": [1, 2, 3]}}}
)
assert isinstance(config["nested"], dict)
assert config["nested"]["onea"] == "fdsfdsf"
assert config["nested"]["lol"] == {"haha": [1, 2, 3]}
@patch.dict(
"os.environ",
{
"TEST_ENV_VAR_NO_TYPE": "should_be_a_string",
"TEST_ENV_VAR_STRING": "should_also_be_a_string",
"TEST_ENV_VAR_INT": "1000",
"TEST_ENV_VAR_FLOAT": "30.54321",
"TEST_ENV_VAR_BYTES": "these_are_bytes",
},
)
def test_that_environment_variables_are_typed_correctly():
config = load_configuration(
{
"token1": {"type": "env", "key": "TEST_ENV_VAR_NO_TYPE"},
"token2": {
"type": "env",
"key": "TEST_ENV_VAR_STRING",
"env_var_type": "str",
},
"token3": {"type": "env", "key": "TEST_ENV_VAR_INT", "env_var_type": "int"},
"token4": {
"type": "env",
"key": "TEST_ENV_VAR_FLOAT",
"env_var_type": "float",
},
"token5": {
"type": "env",
"key": "TEST_ENV_VAR_BYTES",
"env_var_type": "bytes",
},
}
)
assert config["token1"] == "should_be_a_string"
assert config["token2"] == "should_also_be_a_string"
assert config["token3"] == int(1000)
assert config["token4"] == <PASSWORD>
assert config["token5"] == b"<PASSWORD>"
|
<gh_stars>1-10
# (c) Copyright [2018-2022] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# |_ |~) _ _| _ /~\ _ |.
# |_)\/ |_)(_|(_|| \_/|_|(_|||
# /
# ____________ ______
# / __ `\ / /
# | \/ / / /
# |______ / / /
# |____/ / /
# _____________ / /
# \ / / /
# \ / / /
# \_______/ / /
# ______ / /
# \ / / /
# \ / / /
# \/ / /
# / /
# / /
# \ /
# \ /
# \/
# _
# \ / _ __|_. _ _ |_)
# \/ (/_| | |(_(_|| \/
# /
# VerticaPy is a Python library with scikit-like functionality for conducting
# data science projects on data stored in Vertica, taking advantage Vertica’s
# speed and built-in analytics and machine learning features. It supports the
# entire data science life cycle, uses a ‘pipeline’ mechanism to sequentialize
# data transformation operations, and offers beautiful graphical options.
#
# VerticaPy aims to do all of the above. The idea is simple: instead of moving
# data around for processing, VerticaPy brings the logic to the data.
#
#
# Modules
#
# Standard Python Modules
import shutil, re, sys, warnings, random, itertools, datetime, time, html
from collections.abc import Iterable
# VerticaPy Modules
import verticapy
from verticapy.errors import *
# Other Modules
import numpy as np
#
#
# Functions to use to simplify the coding.
#
# ---#
def all_comb(X: list):
all_configuration = []
for r in range(len(X) + 1):
combinations_object = itertools.combinations(X, r)
combinations_list = list(combinations_object)
if combinations_list[0]:
all_configuration += combinations_list
return all_configuration
# ---#
def arange(start: float, stop: float, step: float):
check_types(
[
("start", start, [int, float]),
("stop", stop, [int, float]),
("step", step, [int, float]),
]
)
if step < 0:
raise ParameterError("Parameter 'step' must be greater than 0")
L_final = []
tmp = start
while tmp < stop:
L_final += [tmp]
tmp += step
return L_final
# ---#
def bin_spatial_to_str(category: str, column: str = "{}"):
if category == "binary":
return "TO_HEX({})".format(column)
elif category == "spatial":
return "ST_AsText({})".format(column)
else:
return column
# ---#
def check_types(types_list: list = []):
for elem in types_list:
list_check = False
for sub_elem in elem[2]:
if not (isinstance(sub_elem, type)):
list_check = True
if list_check:
if not (isinstance(elem[1], str)) and (elem[1] != None):
warning_message = (
"Parameter '{0}' must be of type {1}, found type {2}"
).format(elem[0], str, type(elem[1]))
warnings.warn(warning_message, Warning)
if (elem[1] != None) and (
elem[1].lower() not in elem[2] and elem[1] not in elem[2]
):
warning_message = "Parameter '{}' must be in [{}], found '{}'".format(
elem[0], "|".join(elem[2]), elem[1]
)
warnings.warn(warning_message, Warning)
else:
all_types = elem[2] + [type(None)]
if str in all_types:
all_types += [str_sql]
if not (isinstance(elem[1], tuple(all_types))):
if (
(list in elem[2])
and isinstance(elem[1], Iterable)
and not (isinstance(elem[1], (dict, str)))
):
pass
elif len(elem[2]) == 1:
warning_message = "Parameter '{0}' must be of type {1}, found type {2}".format(
elem[0], elem[2][0], type(elem[1])
)
warnings.warn(warning_message, Warning)
else:
warning_message = (
"Parameter '{0}' type must be one of the following"
" {1}, found type {2}"
).format(elem[0], elem[2], type(elem[1]))
warnings.warn(warning_message, Warning)
# ---#
def clean_query(query: str):
res = re.sub(r"--.+(\n|\Z)", "", query)
res = res.replace("\t", " ").replace("\n", " ")
res = re.sub(" +", " ", res)
while len(res) > 0 and (res[-1] in (";", " ")):
res = res[0:-1]
while len(res) > 0 and (res[0] in (";", " ")):
res = res[1:]
return res
# ---#
def color_dict(d: dict, idx: int = 0):
if "color" in d:
if isinstance(d["color"], str):
return d["color"]
else:
return d["color"][idx % len(d["color"])]
else:
from verticapy.plot import gen_colors
return gen_colors()[idx % len(gen_colors())]
# ---#
def flat_dict(d: dict) -> str:
# converts dictionary to string with a specific format
res = []
for elem in d:
q = '"' if isinstance(d[elem], str) else ""
res += ["{}={}{}{}".format(elem, q, d[elem], q)]
res = ", ".join(res)
if res:
res = ", {}".format(res)
return res
# ---#
def executeSQL(
query: str,
title: str = "",
data: list = [],
method: str = "cursor",
path: str = "",
print_time_sql: bool = True,
):
check_types(
[
("query", query, [str]),
("title", title, [str]),
(
"method",
method,
["cursor", "fetchrow", "fetchall", "fetchfirstelem", "copy"],
),
]
)
from verticapy.connect import current_cursor
query = clean_query(query)
cursor = current_cursor()
if verticapy.options["sql_on"] and print_time_sql:
print_query(query, title)
start_time = time.time()
if data:
cursor.executemany(query, data)
elif method == "copy":
with open(path, "r") as fs:
cursor.copy(query, fs)
else:
cursor.execute(query)
elapsed_time = time.time() - start_time
if verticapy.options["time_on"] and print_time_sql:
print_time(elapsed_time)
if method == "fetchrow":
return cursor.fetchone()
elif method == "fetchfirstelem":
return cursor.fetchone()[0]
elif method == "fetchall":
return cursor.fetchall()
return cursor
# ---#
def format_magic(x, return_cat: bool = False, cast_float_int_to_str: bool = False):
from verticapy.vcolumn import vColumn
if isinstance(x, vColumn):
val = x.alias
elif (isinstance(x, (int, float)) and not (cast_float_int_to_str)) or isinstance(
x, str_sql
):
val = x
elif isinstance(x, type(None)):
val = "NULL"
elif isinstance(x, (int, float)) or not (cast_float_int_to_str):
val = "'{}'".format(str(x).replace("'", "''"))
else:
val = x
if return_cat:
return (val, get_category_from_python_type(x))
else:
return val
# ---#
def gen_name(L: list):
return "_".join(
[
"".join(ch for ch in str(elem).lower() if ch.isalnum() or ch == "_")
for elem in L
]
)
# ---#
def get_category_from_python_type(expr):
try:
category = expr.category()
except:
if isinstance(expr, (float)):
category = "float"
elif isinstance(expr, (int)):
category = "int"
elif isinstance(expr, (str)):
category = "text"
elif isinstance(expr, (datetime.date, datetime.datetime)):
category = "date"
else:
category = ""
return category
# ---#
def get_category_from_vertica_type(ctype: str = ""):
check_types([("ctype", ctype, [str])])
ctype = ctype.lower()
if ctype != "":
if (
(ctype[0:4] == "date")
or (ctype[0:4] == "time")
or (ctype == "smalldatetime")
or (ctype[0:8] == "interval")
):
return "date"
elif (
(ctype[0:3] == "int")
or (ctype[0:4] == "bool")
or (ctype in ("tinyint", "smallint", "bigint"))
):
return "int"
elif (
(ctype[0:3] == "num")
or (ctype[0:5] == "float")
or (ctype[0:7] == "decimal")
or (ctype == "money")
or (ctype[0:6] == "double")
or (ctype[0:4] == "real")
):
return "float"
elif ctype[0:3] == "geo" or ("long varbinary" in ctype.lower()):
return "spatial"
elif ("byte" in ctype) or (ctype == "raw") or ("binary" in ctype):
return "binary"
elif "uuid" in ctype:
return "uuid"
else:
return "text"
else:
return "undefined"
# ---#
def get_match_index(x: str, col_list: list, str_check: bool = True):
for idx, col in enumerate(col_list):
if (str_check and quote_ident(x.lower()) == quote_ident(col.lower())) or (
x == col
):
return idx
return None
# ---#
def get_narrow_tablesample(t, use_number_as_category: bool = False):
result = []
t = t.values
if use_number_as_category:
categories_alpha = t["index"]
categories_beta = [elem for elem in t]
del categories_beta[0]
bijection_categories = {}
for idx, elem in enumerate(categories_alpha):
bijection_categories[elem] = idx
for idx, elem in enumerate(categories_beta):
bijection_categories[elem] = idx
for elem in t:
if elem != "index":
for idx, val_tmp in enumerate(t[elem]):
try:
val = float(val_tmp)
except:
val = val_tmp
if not (use_number_as_category):
result += [[elem, t["index"][idx], val]]
else:
result += [
[
bijection_categories[elem],
bijection_categories[t["index"][idx]],
val,
]
]
if use_number_as_category:
return result, categories_alpha, categories_beta
else:
return result
# ---#
def get_magic_options(line: str):
# parsing the line
i, n, splits = 0, len(line), []
while i < n:
while i < n and line[i] == " ":
i += 1
if i < n:
k = i
op = line[i]
if op in ('"', "'"):
i += 1
while i < n - 1:
if line[i] == op and line[i + 1] != op:
break
i += 1
i += 1
quote_in = True
else:
while i < n and line[i] != " ":
i += 1
quote_in = False
if quote_in:
splits += [line[k + 1 : i - 1]]
else:
splits += [line[k:i]]
# Creating the dictionary
n, i, all_options_dict = len(splits), 0, {}
while i < n:
if splits[i][0] != "-":
raise ParsingError(
"Can not parse option '{0}'. Options must start with '-'.".format(
splits[i][0]
)
)
all_options_dict[splits[i]] = splits[i + 1]
i += 2
return all_options_dict
# ---#
def get_random_function(rand_int=None):
random_state = verticapy.options["random_state"]
if isinstance(rand_int, int):
if isinstance(random_state, int):
random_func = "FLOOR({} * SEEDED_RANDOM({}))".format(rand_int, random_state)
else:
random_func = "RANDOMINT({})".format(rand_int)
else:
if isinstance(random_state, int):
random_func = "SEEDED_RANDOM({})".format(random_state)
else:
random_func = "RANDOM()"
return random_func
# ---#
def get_session(add_username: bool = True):
query = "SELECT CURRENT_SESSION();"
result = executeSQL(query, method="fetchfirstelem", print_time_sql=False)
result = result.split(":")[1]
result = int(result, base=16)
if add_username:
query = "SELECT USERNAME();"
result = "{}_{}".format(
executeSQL(query, method="fetchfirstelem", print_time_sql=False), result
)
return result
# ---#
def gen_tmp_name(schema: str = "", name: str = ""):
session_user = get_session()
L = session_user.split("_")
L[0] = "".join(filter(str.isalnum, L[0]))
L[1] = "".join(filter(str.isalnum, L[1]))
random_int = random.randint(0, 10e9)
name = '"_verticapy_tmp_{}_{}_{}_{}_"'.format(name.lower(), L[0], L[1], random_int)
if schema:
name = "{}.{}".format(quote_ident(schema), name)
return name
# ---#
def get_verticapy_function(key: str, method: str = ""):
key = key.lower()
if key in ("median", "med"):
key = "50%"
elif key in ("approx_median", "approximate_median"):
key = "approx_50%"
elif key == "100%":
key = "max"
elif key == "0%":
key = "min"
elif key == "approximate_count_distinct":
key = "approx_unique"
elif key == "approximate_count_distinct":
key = "approx_unique"
elif key == "ema":
key = "exponential_moving_average"
elif key == "mean":
key = "avg"
elif key in ("stddev", "stdev"):
key = "std"
elif key == "product":
key = "prod"
elif key == "variance":
key = "var"
elif key == "kurt":
key = "kurtosis"
elif key == "skew":
key = "skewness"
elif key in ("top1", "mode"):
key = "top"
elif key == "top1_percent":
key = "top_percent"
elif "%" == key[-1]:
start = 7 if len(key) >= 7 and key[0:7] == "approx_" else 0
if float(key[start:-1]) == int(float(key[start:-1])):
key = "{}%".format(int(float(key[start:-1])))
if start == 7:
key = "approx_" + key
elif key == "row":
key = "row_number"
elif key == "first":
key = "first_value"
elif key == "last":
key = "last_value"
elif key == "next":
key = "lead"
elif key in ("prev", "previous"):
key = "lag"
if method == "vertica":
if key == "var":
key = "variance"
elif key == "std":
key = "stddev"
return key
# ---#
def heuristic_length(i):
GAMMA = 0.5772156649
if i == 2:
return 1
elif i > 2:
return 2 * (np.log(i - 1) + GAMMA) - 2 * (i - 1) / i
else:
return 0
# ---#
def indentSQL(query: str):
query = (
query.replace("SELECT", "\n SELECT\n ")
.replace("FROM", "\n FROM\n")
.replace(",", ",\n ")
)
query = query.replace("VERTICAPY_SUBTABLE", "\nVERTICAPY_SUBTABLE")
n = len(query)
return_l = []
j = 1
while j < n - 9:
if (
query[j] == "("
and (query[j - 1].isalnum() or query[j - 5 : j] == "OVER ")
and query[j + 1 : j + 7] != "SELECT"
):
k = 1
while k > 0 and j < n - 9:
j += 1
if query[j] == "\n":
return_l += [j]
elif query[j] == ")":
k -= 1
elif query[j] == "(":
k += 1
else:
j += 1
query_print = ""
i = 0 if query[0] != "\n" else 1
while return_l:
j = return_l[0]
query_print += query[i:j]
if query[j] != "\n":
query_print += query[j]
else:
i = j + 1
while query[i] == " " and i < n - 9:
i += 1
query_print += " "
del return_l[0]
query_print += query[i:n]
return query_print
# ---#
def insert_verticapy_schema(
model_name: str,
model_type: str,
model_save: dict,
category: str = "VERTICAPY_MODELS",
):
sql = "SELECT * FROM columns WHERE table_schema='verticapy';"
result = executeSQL(sql, method="fetchrow", print_time_sql=False)
if not (result):
warning_message = (
"The VerticaPy schema doesn't exist or is "
"incomplete. The model can not be stored.\n"
"Please use create_verticapy_schema function "
"to set up the schema and the drop function to "
"drop it if it is corrupted."
)
warnings.warn(warning_message, Warning)
else:
size = sys.getsizeof(model_save)
create_time = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
try:
model_name = quote_ident(model_name)
sql = "SELECT * FROM verticapy.models WHERE LOWER(model_name) = '{}'".format(
model_name.lower()
)
result = executeSQL(sql, method="fetchrow", print_time_sql=False)
if result:
raise NameError("The model named {} already exists.".format(model_name))
else:
sql = (
"INSERT INTO verticapy.models(model_name, category, "
"model_type, create_time, size) VALUES ('{}', '{}', '{}', "
"'{}', {});"
).format(model_name, category, model_type, create_time, size)
executeSQL(sql, print_time_sql=False)
executeSQL("COMMIT;", print_time_sql=False)
for elem in model_save:
sql = (
"INSERT INTO verticapy.attr(model_name, attr_name, value) "
"VALUES ('{0}', '{1}', '{2}');"
).format(model_name, elem, str(model_save[elem]).replace("'", "''"))
executeSQL(sql, print_time_sql=False)
executeSQL("COMMIT;", print_time_sql=False)
except Exception as e:
warning_message = "The VerticaPy model could not be stored:\n{}".format(e)
warnings.warn(warning_message, Warning)
raise
# ---#
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return True # Jupyter notebook or qtconsole
elif shell == "TerminalInteractiveShell":
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
# ---#
def levenshtein(s: str, t: str):
rows = len(s) + 1
cols = len(t) + 1
dist = [[0 for x in range(cols)] for x in range(rows)]
for i in range(1, rows):
dist[i][0] = i
for i in range(1, cols):
dist[0][i] = i
for col in range(1, cols):
for row in range(1, rows):
if s[row - 1] == t[col - 1]:
cost = 0
else:
cost = 1
dist[row][col] = min(
dist[row - 1][col] + 1,
dist[row][col - 1] + 1,
dist[row - 1][col - 1] + cost,
)
return dist[row][col]
# ---#
def print_query(query: str, title: str = ""):
screen_columns = shutil.get_terminal_size().columns
query_print = indentSQL(query)
if isnotebook():
from IPython.core.display import HTML, display
display(HTML("<h4>{}</h4>".format(title)))
query_print = query_print.replace("\n", " <br>").replace(" ", "   ")
display(HTML(query_print))
else:
print("$ {} $\n".format(title))
print(query_print)
print("-" * int(screen_columns) + "\n")
# ---#
def print_table(
data_columns,
is_finished: bool = True,
offset: int = 0,
repeat_first_column: bool = False,
first_element: str = "",
return_html: bool = False,
dtype: dict = {},
percent: dict = {},
):
if not (return_html):
data_columns_rep = [] + data_columns
if repeat_first_column:
del data_columns_rep[0]
columns_ljust_val = min(
len(max([str(item) for item in data_columns[0]], key=len)) + 4, 40
)
else:
columns_ljust_val = len(str(len(data_columns[0]))) + 2
screen_columns = shutil.get_terminal_size().columns
formatted_text = ""
rjust_val = []
for idx in range(0, len(data_columns_rep)):
rjust_val += [
min(
len(max([str(item) for item in data_columns_rep[idx]], key=len))
+ 2,
40,
)
]
total_column_len = len(data_columns_rep[0])
while rjust_val != []:
columns_to_print = [data_columns_rep[0]]
columns_rjust_val = [rjust_val[0]]
max_screen_size = int(screen_columns) - 14 - int(rjust_val[0])
del data_columns_rep[0]
del rjust_val[0]
while (max_screen_size > 0) and (rjust_val != []):
columns_to_print += [data_columns_rep[0]]
columns_rjust_val += [rjust_val[0]]
max_screen_size = max_screen_size - 7 - int(rjust_val[0])
del data_columns_rep[0]
del rjust_val[0]
if repeat_first_column:
columns_to_print = [data_columns[0]] + columns_to_print
else:
columns_to_print = [
[i + offset for i in range(0, total_column_len)]
] + columns_to_print
columns_to_print[0][0] = first_element
columns_rjust_val = [columns_ljust_val] + columns_rjust_val
column_count = len(columns_to_print)
for i in range(0, total_column_len):
for k in range(0, column_count):
val = columns_to_print[k][i]
if len(str(val)) > 40:
val = str(val)[0:37] + "..."
if k == 0:
formatted_text += str(val).ljust(columns_rjust_val[k])
else:
formatted_text += str(val).rjust(columns_rjust_val[k]) + " "
if rjust_val != []:
formatted_text += " \\\\"
formatted_text += "\n"
if not (is_finished) and (i == total_column_len - 1):
for k in range(0, column_count):
if k == 0:
formatted_text += "...".ljust(columns_rjust_val[k])
else:
formatted_text += "...".rjust(columns_rjust_val[k]) + " "
if rjust_val != []:
formatted_text += " \\\\"
formatted_text += "\n"
return formatted_text
else:
if not (repeat_first_column):
data_columns = [
[""] + list(range(1 + offset, len(data_columns[0]) + offset))
] + data_columns
m, n = len(data_columns), len(data_columns[0])
cell_width = []
for elem in data_columns:
cell_width += [min(5 * max([len(str(item)) for item in elem]) + 80, 280)]
html_table = "<table>"
for i in range(n):
if i == 0:
html_table += '<thead style = "display: table; ">'
if i == 1 and n > 0:
html_table += (
'<tbody style = "display: block; max-height: '
'300px; overflow-y: scroll;">'
)
html_table += "<tr>"
for j in range(m):
val = data_columns[j][i]
if isinstance(val, str):
val = html.escape(val)
if val == None:
val = "[null]"
color = "#999999"
else:
if isinstance(val, bool) and (
verticapy.options["mode"] in ("full", None)
):
val = (
"<center>✅</center>"
if (val)
else "<center>❌</center>"
)
color = "black"
html_table += '<td style="background-color: '
if (
(j == 0)
or (i == 0)
or (verticapy.options["mode"] not in ("full", None))
):
html_table += " #FFFFFF; "
elif val == "[null]":
html_table += " #EEEEEE; "
else:
html_table += " #FAFAFA; "
html_table += "color: {}; white-space:nowrap; ".format(color)
if verticapy.options["mode"] in ("full", None):
if (j == 0) or (i == 0):
html_table += "border: 1px solid #AAAAAA; "
else:
html_table += "border-top: 1px solid #DDDDDD; "
if ((j == m - 1) and (i == n - 1)) or (j == m - 1):
html_table += "border-right: 1px solid #AAAAAA; "
else:
html_table += "border-right: 1px solid #DDDDDD; "
if ((j == m - 1) and (i == n - 1)) or (i == n - 1):
html_table += "border-bottom: 1px solid #AAAAAA; "
else:
html_table += "border-bottom: 1px solid #DDDDDD; "
if i == 0:
html_table += "height: 30px; "
if (j == 0) or (cell_width[j] < 120):
html_table += "text-align: center; "
else:
html_table += "text-align: center; "
html_table += 'min-width: {}px; max-width: {}px;"'.format(
cell_width[j], cell_width[j]
)
if (j == 0) or (i == 0):
if j != 0:
type_val, category, missing_values = "", "", ""
if data_columns[j][0] in dtype and (
verticapy.options["mode"] in ("full", None)
):
if dtype[data_columns[j][0]] != "undefined":
type_val = dtype[data_columns[j][0]].capitalize()
category = get_category_from_vertica_type(type_val)
if (category == "spatial") or (
(
"lat" in val.lower().split(" ")
or "latitude" in val.lower().split(" ")
or "lon" in val.lower().split(" ")
or "longitude" in val.lower().split(" ")
)
and category == "float"
):
category = '<div style="margin-bottom: 6px;">🌎</div>'
elif type_val.lower() == "boolean":
category = '<div style="margin-bottom: 6px; color: #0073E7;">010</div>'
elif category in ("int", "float", "binary", "uuid"):
category = '<div style="margin-bottom: 6px; color: #19A26B;">123</div>'
elif category == "text":
category = (
'<div style="margin-bottom: 6px;">Abc</div>'
)
elif category == "date":
category = '<div style="margin-bottom: 6px;">📅</div>'
else:
category = '<div style="margin-bottom: 6px;"></div>'
if type_val != "":
ctype = (
'<div style="color: #FE5016; margin-top: 6px; '
'font-size: 0.95em;">{0}</div>'
).format(dtype[data_columns[j][0]].capitalize())
else:
ctype = '<div style="color: #FE5016; margin-top: 6px; font-size: 0.95em;"></div>'
if data_columns[j][0] in percent:
per = int(float(percent[data_columns[j][0]]))
try:
if per == 100:
diff = 36
elif per > 10:
diff = 30
else:
diff = 24
except:
pass
missing_values = (
'<div style="float: right; margin-top: 6px;">{0}%</div><div '
'style="width: calc(100% - {1}px); height: 8px; margin-top: '
'10px; border: 1px solid black;"><div style="width: {0}%; '
'height: 6px; background-color: orange;"></div></div>'
).format(per, diff)
else:
ctype, missing_values, category = "", "", ""
if (i == 0) and (j == 0):
if dtype and (verticapy.options["mode"] in ("full", None)):
val = verticapy.gen_verticapy_logo_html(size="45px")
else:
val = ""
elif cell_width[j] > 240:
val = (
'<input style="border: none; text-align: center; width: {0}px;" '
'type="text" value="{1}" readonly>'
).format(cell_width[j] - 10, val)
html_table += ">{}<b>{}</b>{}{}</td>".format(
category, val, ctype, missing_values
)
elif cell_width[j] > 240:
background = "#EEEEEE" if val == "[null]" else "#FAFAFA"
if verticapy.options["mode"] not in ("full", None):
background = "#FFFFFF"
html_table += (
'><input style="background-color: {0}; border: none; '
'text-align: center; width: {1}px;" type="text" '
'value="{2}" readonly></td>'
).format(background, cell_width[j] - 10, val)
else:
html_table += ">{}</td>".format(val)
html_table += "</tr>"
if i == 0:
html_table += "</thead>"
if i == n - 1 and n > 0:
html_table += "</tbody>"
html_table += "</table>"
return html_table
# ---#
def print_time(elapsed_time: float):
screen_columns = shutil.get_terminal_size().columns
if isnotebook():
from IPython.core.display import HTML, display
display(
HTML("<div><b>Execution: </b> {0}s</div>".format(round(elapsed_time, 3)))
)
else:
print("Execution: {0}s".format(round(elapsed_time, 3)))
print("-" * int(screen_columns) + "\n")
# ---#
def quote_ident(column: str):
"""
---------------------------------------------------------------------------
Returns the specified string argument in the format that is required in
order to use that string as an identifier in an SQL statement.
Parameters
----------
column: str
Column's name.
Returns
-------
str
Formatted column' name.
"""
tmp_column = str(column)
if len(tmp_column) >= 2 and (tmp_column[0] == tmp_column[-1] == '"'):
tmp_column = tmp_column[1:-1]
return '"{}"'.format(str(tmp_column).replace('"', '""'))
# ---#
def replace_vars_in_query(query: str, locals_dict: dict):
from verticapy import vDataFrame, tablesample, pandas_to_vertica
import pandas as pd
variables, query_tmp = re.findall("(?<!:):[A-Za-z0-9_\[\]]+", query), query
for v in variables:
try:
var = v[1:]
n, splits = var.count("["), []
if var.count("]") == n and n > 0:
i, size = 0, len(var)
while i < size:
if var[i] == "[":
k = i + 1
while i < size and var[i] != "]":
i += 1
splits += [(k, i)]
i += 1
var = var[: splits[0][0] - 1]
val = locals_dict[var]
if splits:
for s in splits:
val = val[int(v[s[0] + 1 : s[1] + 1])]
fail = False
except Exception as e:
warning_message = "Failed to replace variables in the query.\nError: {0}".format(
e
)
warnings.warn(warning_message, Warning)
fail = True
if not (fail):
if isinstance(val, vDataFrame):
val = val.__genSQL__()
elif isinstance(val, tablesample):
val = "({0}) VERTICAPY_SUBTABLE".format(val.to_sql())
elif isinstance(val, pd.DataFrame):
val = pandas_to_vertica(val).__genSQL__()
elif isinstance(val, list):
val = ", ".join(["NULL" if elem is None else str(elem) for elem in val])
query_tmp = query_tmp.replace(v, str(val))
return query_tmp
# ---#
def reverse_score(metric: str):
if metric in [
"logloss",
"max",
"mae",
"median",
"mse",
"msle",
"rmse",
"aic",
"bic",
"auto",
]:
return False
return True
# ---#
def schema_relation(relation):
from verticapy import vDataFrame
if isinstance(relation, vDataFrame):
schema, relation = verticapy.options["temp_schema"], ""
else:
quote_nb = relation.count('"')
if quote_nb not in (0, 2, 4):
raise ParsingError("The format of the input relation is incorrect.")
if quote_nb == 4:
schema_input_relation = relation.split('"')[1], relation.split('"')[3]
elif quote_nb == 4:
schema_input_relation = (
relation.split('"')[1],
relation.split('"')[2][1:]
if (relation.split('"')[0] == "")
else relation.split('"')[0][0:-1],
relation.split('"')[1],
)
else:
schema_input_relation = relation.split(".")
if len(schema_input_relation) == 1:
schema, relation = "public", relation
else:
schema, relation = schema_input_relation[0], schema_input_relation[1]
return (quote_ident(schema), quote_ident(relation))
# ---#
def type_code_to_dtype(
type_code: int, display_size: int = 0, precision: int = 0, scale: int = 0
):
"""
Takes as input the Vertica Python type code and returns its corresponding data type.
"""
types = {
5: "Boolean",
6: "Integer",
7: "Float",
8: "Char",
9: "Varchar",
10: "Date",
11: "Time",
12: "Datetime",
13: "Timestamp with Timezone",
14: "Interval",
15: "Time with Timezone",
16: "Numeric",
17: "Varbinary",
114: "Interval Year to Month",
115: "Long Varchar",
116: "Long Varbinary",
117: "Binary",
}
if type_code in types:
if display_size == None:
display_size = 0
if precision == None:
precision = 0
if scale == None:
scale = 0
result = types[type_code]
if type_code in (8, 9, 17, 115, 116, 117) and (display_size > 0):
result += "({})".format(display_size)
elif type_code == 16 and (precision > 0):
result += "({},{})".format(precision, scale)
return result
else:
return "Undefined"
# ---#
def updated_dict(
d1: dict, d2: dict, color_idx: int = 0,
):
d = {}
for elem in d1:
d[elem] = d1[elem]
for elem in d2:
if elem == "color":
if isinstance(d2["color"], str):
d["color"] = d2["color"]
elif color_idx < 0:
d["color"] = [elem for elem in d2["color"]]
else:
d["color"] = d2["color"][color_idx % len(d2["color"])]
else:
d[elem] = d2[elem]
return d
# ---#
class str_sql:
# ---#
def __init__(self, alias, category=""):
self.alias = alias
self.category_ = category
# ---#
def __repr__(self):
return str(self.alias)
# ---#
def __str__(self):
return str(self.alias)
# ---#
def __abs__(self):
return str_sql("ABS({})".format(self.alias), self.category())
# ---#
def __add__(self, x):
val = format_magic(x)
op = (
"||" if self.category() == "text" and isinstance(x, (str, str_sql)) else "+"
)
return str_sql("({}) {} ({})".format(self.alias, op, val), self.category())
# ---#
def __radd__(self, x):
val = format_magic(x)
op = (
"||" if self.category() == "text" and isinstance(x, (str, str_sql)) else "+"
)
return str_sql("({}) {} ({})".format(val, op, self.alias), self.category())
# ---#
def __and__(self, x):
val = format_magic(x)
return str_sql("({}) AND ({})".format(self.alias, val), self.category())
# ---#
def __rand__(self, x):
val = format_magic(x)
return str_sql("({}) AND ({})".format(val, self.alias), self.category())
# ---#
def _between(self, x, y):
val1 = str(format_magic(x))
val2 = str(format_magic(y))
return str_sql(
"({}) BETWEEN ({}) AND ({})".format(self.alias, val1, val2), self.category()
)
# ---#
def _in(self, *argv):
if (len(argv) == 1) and (isinstance(argv[0], list)):
x = argv[0]
elif len(argv) == 0:
ParameterError("Method 'in_' doesn't work with no parameters.")
else:
x = [elem for elem in argv]
assert isinstance(x, Iterable) and not (
isinstance(x, str)
), "Method '_in' only works on iterable elements other than str. Found {}.".format(
x
)
val = [str(format_magic(elem)) for elem in x]
val = ", ".join(val)
return str_sql("({}) IN ({})".format(self.alias, val), self.category())
# ---#
def _not_in(self, *argv):
if (len(argv) == 1) and (isinstance(argv[0], list)):
x = argv[0]
elif len(argv) == 0:
ParameterError("Method '_not_in' doesn't work with no parameters.")
else:
x = [elem for elem in argv]
assert isinstance(x, Iterable) and not (
isinstance(x, str)
), "Method '_not_in' only works on iterable elements other than str. Found {}.".format(
x
)
val = [str(format_magic(elem)) for elem in x]
val = ", ".join(val)
return str_sql("({}) NOT IN ({})".format(self.alias, val), self.category())
# ---#
def _as(self, x):
return str_sql("({}) AS {}".format(self.alias, x), self.category())
# ---#
def _distinct(self):
return str_sql("DISTINCT ({})".format(self.alias), self.category())
# ---#
def _over(self, by: (str, list) = [], order_by: (str, list) = []):
if isinstance(by, str):
by = [by]
if isinstance(order_by, str):
order_by = [order_by]
by = ", ".join([str(elem) for elem in by])
if by:
by = "PARTITION BY {}".format(by)
order_by = ", ".join([str(elem) for elem in order_by])
if order_by:
order_by = "ORDER BY {}".format(order_by)
return str_sql(
"{} OVER ({} {})".format(self.alias, by, order_by), self.category()
)
# ---#
def __eq__(self, x):
op = "IS" if (x == None) and not (isinstance(x, str_sql)) else "="
val = format_magic(x)
if val != "NULL":
val = "({})".format(val)
return str_sql("({}) {} {}".format(self.alias, op, val), self.category())
# ---#
def __ne__(self, x):
op = "IS NOT" if (x == None) and not (isinstance(x, str_sql)) else "!="
val = format_magic(x)
if val != "NULL":
val = "({})".format(val)
return str_sql("({}) {} {}".format(self.alias, op, val), self.category())
# ---#
def __ge__(self, x):
val = format_magic(x)
return str_sql("({}) >= ({})".format(self.alias, val), self.category())
# ---#
def __gt__(self, x):
val = format_magic(x)
return str_sql("({}) > ({})".format(self.alias, val), self.category())
# ---#
def __le__(self, x):
val = format_magic(x)
return str_sql("({}) <= ({})".format(self.alias, val), self.category())
# ---#
def __lt__(self, x):
val = format_magic(x)
return str_sql("({}) < ({})".format(self.alias, val), self.category())
# ---#
def __mul__(self, x):
if self.category() == "text" and isinstance(x, (int)):
return str_sql("REPEAT({}, {})".format(self.alias, x), self.category())
val = format_magic(x)
return str_sql("({}) * ({})".format(self.alias, val), self.category())
# ---#
def __rmul__(self, x):
if self.category() == "text" and isinstance(x, (int)):
return str_sql("REPEAT({}, {})".format(self.alias, x), self.category())
val = format_magic(x)
return str_sql("({}) * ({})".format(val, self.alias), self.category())
# ---#
def __or__(self, x):
val = format_magic(x)
return str_sql("({}) OR ({})".format(self.alias, val), self.category())
# ---#
def __ror__(self, x):
val = format_magic(x)
return str_sql("({}) OR ({})".format(val, self.alias), self.category())
# ---#
def __pos__(self):
return str_sql("+({})".format(self.alias), self.category())
# ---#
def __neg__(self):
return str_sql("-({})".format(self.alias), self.category())
# ---#
def __pow__(self, x):
val = format_magic(x)
return str_sql("POWER({}, {})".format(self.alias, val), self.category())
# ---#
def __rpow__(self, x):
val = format_magic(x)
return str_sql("POWER({}, {})".format(val, self.alias), self.category())
# ---#
def __mod__(self, x):
val = format_magic(x)
return str_sql("MOD({}, {})".format(self.alias, val), self.category())
# ---#
def __rmod__(self, x):
val = format_magic(x)
return str_sql("MOD({}, {})".format(val, self.alias), self.category())
# ---#
def __sub__(self, x):
val = format_magic(x)
return str_sql("({}) - ({})".format(self.alias, val), self.category())
# ---#
def __rsub__(self, x):
val = format_magic(x)
return str_sql("({}) - ({})".format(val, self.alias), self.category())
# ---#
def __truediv__(self, x):
val = format_magic(x)
return str_sql("({}) / ({})".format(self.alias, val), self.category())
# ---#
def __rtruediv__(self, x):
val = format_magic(x)
return str_sql("({}) / ({})".format(val, self.alias), self.category())
# ---#
def __floordiv__(self, x):
val = format_magic(x)
return str_sql("({}) // ({})".format(self.alias, val), self.category())
# ---#
def __rfloordiv__(self, x):
val = format_magic(x)
return str_sql("({}) // ({})".format(val, self.alias), self.category())
# ---#
def __ceil__(self):
return str_sql("CEIL({})".format(self.alias), self.category())
# ---#
def __floor__(self):
return str_sql("FLOOR({})".format(self.alias), self.category())
# ---#
def __trunc__(self):
return str_sql("TRUNC({})".format(self.alias), self.category())
# ---#
def __invert__(self):
return str_sql("-({}) - 1".format(self.alias), self.category())
# ---#
def __round__(self, x):
return str_sql("ROUND({}, {})".format(self.alias, x), self.category())
def category(self):
return self.category_
|
<filename>Input/instance_generator.py
from Input.fixed_file_variables import FixedFileVariables
from Input.dynamic_file_variables import DynamicFileVariables
import numpy as np
import random
from scipy.spatial import distance
class Instance:
def __init__(self, n_stations, n_vehicles, n_time_hor):
self.n_stations = n_stations
self.n_vehicles = n_vehicles
self.time_horizon = n_time_hor
self.fixed = FixedFileVariables()
self.fixed.time_horizon = self.time_horizon
self.dynamic = DynamicFileVariables()
self.set_stations()
self.set_vehicles()
self.set_time_matrix()
self.set_station_cap(10)
self.set_vehicle_cap(10)
self.set_init_station_load()
self.set_init_vehicle_load()
self.set_start_stations()
self.set_station_rates()
self.set_ideal_state(5)
self.set_time_to_start()
self.write_to_file()
def set_time_matrix(self):
time_cap = 15
matrix = np.zeros((self.n_stations, self.n_stations))
geographical_points = [(random.randint(0, time_cap), random.randint(0, time_cap)) for i in range(self.n_stations)]
for i in range(self.n_stations-1):
for j in range(i, self.n_stations-1):
if i == j:
continue
else:
time_x = round(distance.euclidean(geographical_points[i], geographical_points[j]), 1)
matrix[i][j] = time_x
matrix[j][i] = time_x
self.fixed.driving_times = matrix
def set_time_to_start(self):
self.dynamic.driving_to_start = [0] * self.n_vehicles
time_cap = 4
for vehicle in self.fixed.vehicles:
self.dynamic.driving_to_start[vehicle] = round(random.random() * time_cap, 2)
def set_stations(self):
self.fixed.stations = [i for i in range(self.n_stations)]
def set_vehicles(self):
self.fixed.vehicles = [i for i in range(self.n_vehicles)]
def set_start_stations(self):
start = []
for i in range(self.n_vehicles):
station = random.randint(0, self.n_stations - 2)
while station in start:
station = random.randint(0, self.n_stations - 2)
start.append(station)
self.dynamic.start_stations = start
def set_vehicle_cap(self, cap):
self.fixed.vehicle_cap = [0] * self.n_vehicles
for i in range(self.n_vehicles):
self.fixed.vehicle_cap[i] = cap
def set_init_vehicle_load(self):
self.dynamic.init_vehicle_load = [random.randint(0, self.fixed.vehicle_cap[i]) for i in self.fixed.vehicles]
def set_init_station_load(self):
self.dynamic.init_flat_station_load = [0] * self.n_stations
self.dynamic.init_station_load = [0] * self.n_stations
for station in self.fixed.stations[1:-1]:
battery = random.randint(0, self.fixed.station_cap[station])
flat = random.randint(0, self.fixed.station_cap[station])
while battery + flat > self.fixed.station_cap[station]:
battery = random.randint(0, self.fixed.station_cap[station])
flat = random.randint(0, self.fixed.station_cap[station])
self.dynamic.init_station_load[station] = battery
self.dynamic.init_flat_station_load[station] = flat
def set_station_cap(self, cap):
self.fixed.station_cap = [0] * self.n_stations
for i in self.fixed.stations[1:-1]:
self.fixed.station_cap[i] = cap
def set_station_rates(self):
self.dynamic.demand = [0] * self.n_stations
self.dynamic.incoming_rate = [0] * self.n_stations
self.dynamic.incoming_flat_rate = [0] * self.n_stations
for station in self.fixed.stations[1:-1]:
demand = round(random.random(), 2)
battery_rate = round(random.random(), 2)
flat_rate = round(random.random(), 2)
while demand < (battery_rate + flat_rate):
demand = round(random.random(), 2)
battery_rate = round(random.random(), 2)
flat_rate = round(random.random(), 2)
self.dynamic.demand[station] = demand
self.dynamic.incoming_rate[station] = battery_rate
self.dynamic.incoming_flat_rate[station] = flat_rate
def set_ideal_state(self, ideal):
self.dynamic.ideal_state = [0] * self.n_stations
for station in self.fixed.stations[1:-1]:
self.dynamic.ideal_state[station] = ideal
def write_to_file(self):
f = open("../Input/input_params.txt", 'w')
f.write("------------ FIXED ------------------------ \n")
f.write("self.stations = " + str(self.fixed.stations) + "\n")
f.write("self.vehicles = " + str(self.fixed.vehicles) + "\n")
f.write("self.time_horizon = " + str(self.fixed.time_horizon) + "\n")
f.write("self.vehicle_cap = " + str(self.fixed.vehicle_cap) + "\n")
f.write("self.station_cap = " + str(self.fixed.station_cap) + "\n")
f.write("self.driving_times = " + repr(self.fixed.driving_times) + "\n")
f.write("self.parking_time = " + str(self.fixed.parking_time) + "\n")
f.write("self.handling_time = " + str(self.fixed.handling_time) + "\n")
f.write("self.M = " + str(self.fixed.M) + "\n")
f.write("Reward weight deviation = " + str(self.fixed.w_dev_reward) + "\n")
f.write("Reward weight driving time = " + str(self.fixed.w_driving_time) + "\n")
f.write("Weight deviation = " + str(self.fixed.w_dev_obj) + "\n")
f.write("Weight violation = " + str(self.fixed.w_violation) + "\n")
f.write("Weight reward = " + str(self.fixed.w_reward) + "\n \n")
f.write("------------ DYNAMIC ------------------------ \n")
f.write("self.start_stations = " + str(self.dynamic.start_stations) + "\n")
f.write("self.init_vehicle_load = " + str(self.dynamic.init_vehicle_load) + "\n")
f.write("self.init_station_load = " + str(self.dynamic.init_station_load) + "\n")
f.write("self.init_flat_station_load = " + str(self.dynamic.init_flat_station_load) + "\n")
f.write("self.ideal_state = " + str(self.dynamic.ideal_state) + "\n")
f.write("self.driving_to_start = " + str(self.dynamic.driving_to_start) + "\n")
f.write("self.demand = " + str(self.dynamic.demand) + "\n")
f.write("self.incoming_rate = " + str(self.dynamic.incoming_rate) + "\n")
f.write("self.incoming_flat_rate = " + str(self.dynamic.incoming_flat_rate) + "\n")
|
<reponame>anewmark/galaxy_dark_matter
print('Will plot single galaxy luminosity density profiles')
import astropy.table as table
from defcuts import *
from def_get_mags import *
from my_def_plots import *
from defflags import many_flags
import matplotlib.pyplot as plt
indir='/Users/amandanewmark/repositories/galaxy_dark_matter/GAH/'
outdir='/Users/amandanewmark/repositories/galaxy_dark_matter/lumprofplots/single_plot/'
datatab = table.Table.read(indir+ 'LOWZ_HSCGAMA15_apmgs.fits')
bands=['g', 'r', 'i','z', 'y']
parm=['flags_pixel_saturated_center','flags_pixel_edge','flags_pixel_interpolated_center','flags_pixel_cr_center','flags_pixel_suspect_center', 'flags_pixel_clipped_any','flags_pixel_bad']
daperture=[1.01,1.51,2.02,3.02,4.03,5.71,8.40,11.8,16.8,23.5]
aperture=[x*0.5 for x in daperture]
#get rid of cuts
mincut=0.1
maxcut=''
cutdatag, crange=out_cut(datatab, bands[0], 'mag_aperture00',mincut, maxcut)
cutdatai, crange=out_cut(cutdatag, bands[2], 'mag_aperture00',mincut, maxcut)
cutdatar, crange=out_cut(cutdatai, bands[1], 'mag_aperture00',mincut, maxcut)
cutdatay, crange=out_cut(cutdatar, bands[4], 'mag_aperture00', mincut, maxcut)
cutdataz, crange=out_cut(cutdatay, bands[3], 'mag_aperture00',mincut, maxcut)
ne=[199.99, 99.99]
cutdata1=not_cut(cutdataz, bands[1], 'mag_aperture00', ne)
cutdata=not_cut(cutdata1, bands[0], 'mag_aperture00', ne)
#get rid of flagged galaxies
newdata=many_flags(cutdata, parm, 'i') #I think flags are only in band i
Naps=len(aperture)
objID=newdata['object_id']
redshift=newdata['Z']
Ndat=len(redshift)
DM= get_zdistmod(newdata, 'Z')
kcorrect=get_kcorrect2(newdata,'mag_aperture0', '_err', bands, '0', 'hsc_filters.dat', redshift)
#for n in range(0, Ndat,10):
for n in range(0,Ndat):
#this goes through every galaxy
name=objID[n]
name=str(name)
LG=[]
LR=[]
LI=[]
LZ=[]
LY=[]
radkpc=aper_and_comov(aperture, redshift[n])
for a in range(0, Naps):
#this goes through every aperture
ns=str(a)
print(ns)
#get magnitude
absg, absr, absi, absz, absy= abs_mag(newdata[n], 'mag_aperture0', kcorrect, DM[n], bands, ns, n)
Lumg, Lumr, Lumi, Lumz, Lumy=abs2lum(absg, absr, absi, absz, absy)
Lg, Lr, Li, Lz, Ly=lumdensity(Lumg, Lumr, Lumi, Lumz, Lumy, radkpc[a])
LG.append(Lg)
LR.append(Lr)
LI.append(Li)
LZ.append(Lz)
LY.append(Ly)
# break
#creating luminosity densities for the apertures at each band
print('Galaxy # ', n)
lum_comov_plot(LG, LR, LI, LZ, LY, radkpc, name, outdir)
#will eventually need comoving |
<filename>graphik/graphs/graph_revolute.py
from typing import Dict, List, Any
import numpy as np
import numpy.linalg as la
from graphik.robots import RobotRevolute
from graphik.graphs.graph_base import ProblemGraph
from graphik.utils import *
from liegroups.numpy import SE3
from liegroups.numpy.se3 import SE3Matrix
from liegroups.numpy.so3 import SO3Matrix
import networkx as nx
from numpy import cos, pi, sqrt, arctan2, cross
class ProblemGraphRevolute(ProblemGraph):
def __init__(
self,
robot: RobotRevolute,
params: Dict = {},
):
super(ProblemGraphRevolute, self).__init__(robot, params)
base = self.base_subgraph()
structure = self.structure_graph()
composition = nx.compose(base, structure)
self.add_nodes_from(composition.nodes(data=True))
self.add_edges_from(composition.edges(data=True))
self.set_limits()
self.root_angle_limits()
def base_subgraph(self) -> nx.DiGraph:
axis_length = self.axis_length
base = nx.DiGraph(
[
("p0", "x"),
("p0", "y"),
("p0", "q0"),
("x", "y"),
("y", "q0"),
("q0", "x"),
]
)
base.add_nodes_from(
[
("p0", {POS: np.array([0, 0, 0]), TYPE: [ROBOT, BASE]}),
("x", {POS: np.array([axis_length, 0, 0]), TYPE: [BASE]}),
("y", {POS: np.array([0, -axis_length, 0]), TYPE: [BASE]}),
("q0", {POS: np.array([0, 0, axis_length]), TYPE: [ROBOT, BASE]}),
]
)
for u, v in base.edges():
base[u][v][DIST] = la.norm(base.nodes[u][POS] - base.nodes[v][POS])
base[u][v][LOWER] = base[u][v][DIST]
base[u][v][UPPER] = base[u][v][DIST]
base[u][v][BOUNDED] = []
return base
def structure_graph(self):
trans_z = trans_axis(self.axis_length, "z")
robot = self.robot
structure = nx.empty_graph(create_using=nx.DiGraph)
for ee in robot.end_effectors:
k_map = robot.kinematic_map[ROOT][ee]
for idx in range(len(k_map)):
cur, aux_cur = k_map[idx], f"q{k_map[idx][1:]}"
cur_pos, aux_cur_pos = (
robot.nodes[cur]["T0"].trans,
robot.nodes[cur]["T0"].dot(trans_z).trans,
)
dist = la.norm(cur_pos - aux_cur_pos)
# Add nodes for joint and edge between them
structure.add_nodes_from(
[(cur, {POS: cur_pos}), (aux_cur, {POS: aux_cur_pos})]
)
structure.add_edge(
cur, aux_cur, **{DIST: dist, LOWER: dist, UPPER: dist, BOUNDED: []}
)
# If there exists a preceeding joint, connect it to new
if idx != 0:
pred, aux_pred = (k_map[idx - 1], f"q{k_map[idx-1][1:]}")
for u in [pred, aux_pred]:
for v in [cur, aux_cur]:
dist = la.norm(
structure.nodes[u][POS] - structure.nodes[v][POS]
)
structure.add_edge(
u,
v,
**{DIST: dist, LOWER: dist, UPPER: dist, BOUNDED: []},
)
# Delete positions used for weights
for u in structure.nodes:
del structure.nodes[u][POS]
# Set node type to robot
nx.set_node_attributes(structure, [ROBOT], TYPE)
structure.nodes[ROOT][TYPE] = [ROBOT, BASE]
structure.nodes["q0"][TYPE] = [ROBOT, BASE]
return structure
def root_angle_limits(self):
axis_length = self.axis_length
robot = self.robot
upper_limits = self.robot.ub
limited_joints = self.limited_joints
T1 = robot.nodes[ROOT]["T0"]
base_names = ["x", "y"]
names = ["p1", "q1"]
T_axis = trans_axis(axis_length, "z")
for base_node in base_names:
for node in names:
T0 = SE3.from_matrix(np.identity(4))
T0.trans = self.nodes[base_node][POS]
if node[0] == "p":
T2 = robot.nodes["p1"]["T0"]
else:
T2 = robot.nodes["p1"]["T0"].dot(T_axis)
N = T1.as_matrix()[0:3, 2]
C = T1.trans + (N.dot(T2.trans - T1.trans)) * N
r = np.linalg.norm(T2.trans - C)
P = T0.trans
d_max, d_min = max_min_distance_revolute(r, P, C, N)
d = np.linalg.norm(T2.trans - T0.trans)
if d_max == d_min:
limit = False
elif d == d_max:
limit = BELOW
elif d == d_min:
limit = ABOVE
else:
limit = None
if limit:
if node[0] == "p":
T_rel = T1.inv().dot(robot.nodes["p1"]["T0"])
else:
T_rel = T1.inv().dot(robot.nodes["p1"]["T0"].dot(T_axis))
d_limit = la.norm(
T1.dot(rot_axis(upper_limits["p1"], "z")).dot(T_rel).trans
- T0.trans
)
if limit == ABOVE:
d_max = d_limit
else:
d_min = d_limit
limited_joints += ["p1"] # joint at p0 is limited
self.add_edge(base_node, node)
if d_max == d_min:
self[base_node][node][DIST] = d_max
self[base_node][node][BOUNDED] = [limit]
self[base_node][node][UPPER] = d_max
self[base_node][node][LOWER] = d_min
def set_limits(self):
"""
Sets known bounds on the distances between joints.
This is induced by link length and joint limits.
"""
S = self.structure
robot = self.robot
kinematic_map = self.robot.kinematic_map
T_axis = trans_axis(self.axis_length, "z")
end_effectors = self.robot.end_effectors
upper_limits = self.robot.ub
limited_joints = [] # joint limits that can be enforced
for ee in end_effectors:
k_map = kinematic_map[ROOT][ee]
for idx in range(2, len(k_map)):
cur, prev = k_map[idx], k_map[idx - 2]
names = [
(MAIN_PREFIX + str(prev[1:]), MAIN_PREFIX + str(cur[1:])),
(MAIN_PREFIX + str(prev[1:]), AUX_PREFIX + str(cur[1:])),
(AUX_PREFIX + str(prev[1:]), MAIN_PREFIX + str(cur[1:])),
(AUX_PREFIX + str(prev[1:]), AUX_PREFIX + str(cur[1:])),
]
for ids in names:
path = kinematic_map[prev][cur]
T0, T1, T2 = [
robot.nodes[path[0]]["T0"],
robot.nodes[path[1]]["T0"],
robot.nodes[path[2]]["T0"],
]
if AUX_PREFIX in ids[0]:
T0 = T0.dot(T_axis)
if AUX_PREFIX in ids[1]:
T2 = T2.dot(T_axis)
N = T1.as_matrix()[0:3, 2]
C = T1.trans + (N.dot(T2.trans - T1.trans)) * N
r = la.norm(T2.trans - C)
P = T0.trans
d_max, d_min = max_min_distance_revolute(r, P, C, N)
d = la.norm(T2.trans - T0.trans)
if d_max == d_min:
limit = False
elif d == d_max:
limit = BELOW
elif d == d_min:
limit = ABOVE
else:
limit = None
if limit:
rot_limit = rot_axis(upper_limits[cur], "z")
T_rel = T1.inv().dot(T2)
d_limit = la.norm(T1.dot(rot_limit).dot(T_rel).trans - T0.trans)
if limit == ABOVE:
d_max = d_limit
else:
d_min = d_limit
limited_joints += [cur]
self.add_edge(ids[0], ids[1])
if d_max == d_min:
S[ids[0]][ids[1]][DIST] = d_max
self[ids[0]][ids[1]][BOUNDED] = [limit]
self[ids[0]][ids[1]][UPPER] = d_max
self[ids[0]][ids[1]][LOWER] = d_min
self.limited_joints = limited_joints
def _pose_goal(self, T_goal: Dict[str, SE3]) -> Dict[str, ArrayLike]:
pos = {}
for u, T_goal_u in T_goal.items():
v = AUX_PREFIX + u[1:]
pos[u] = T_goal_u.trans
pos[v] = T_goal_u.dot(trans_axis(self.axis_length, "z")).trans
return pos
def joint_variables(
self, G: nx.DiGraph, T_final: Dict[str, SE3] = None
) -> Dict[str, Any]:
"""
Calculate joint angles from a complete set of point positions.
"""
# TODO: make this more readable
tol = 1e-10
q_zero = list_to_variable_dict(self.robot.n * [0])
axis_length = self.axis_length
end_effectors = self.robot.end_effectors
kinematic_map = self.robot.kinematic_map
T = {}
T[ROOT] = self.robot.T_base
# resolve scale
x_hat = G.nodes["x"][POS] - G.nodes["p0"][POS]
y_hat = G.nodes["y"][POS] - G.nodes["p0"][POS]
z_hat = G.nodes["q0"][POS] - G.nodes["p0"][POS]
# resolve rotation and translation
x = normalize(x_hat)
y = normalize(y_hat)
z = normalize(z_hat)
R = np.vstack((x, -y, z)).T
B = SE3Matrix(SO3Matrix(R), G.nodes[ROOT][POS])
theta = {}
for ee in end_effectors:
k_map = kinematic_map[ROOT][ee]
for idx in range(1, len(k_map)):
cur, aux_cur = k_map[idx], f"q{k_map[idx][1:]}"
pred, aux_pred = (k_map[idx - 1], f"q{k_map[idx-1][1:]}")
T_prev = T[pred]
T_prev_0 = self.get_pose(q_zero, pred)
T_0 = self.get_pose(q_zero, cur)
T_rel = T_prev_0.inv().dot(T_0)
T_0_q = self.get_pose(q_zero, cur).dot(trans_axis(axis_length, "z"))
T_rel_q = T_prev_0.inv().dot(T_0_q)
p = B.inv().dot(G.nodes[cur][POS]) - T_prev.trans
qnorm = G.nodes[cur][POS] + (
G.nodes[aux_cur][POS] - G.nodes[cur][POS]
) / la.norm(G.nodes[aux_cur][POS] - G.nodes[cur][POS])
q = B.inv().dot(qnorm) - T_prev.trans
ps = T_prev.inv().as_matrix()[:3, :3].dot(p) # in prev. joint frame
qs = T_prev.inv().as_matrix()[:3, :3].dot(q) # in prev. joint frame
zs = skew(np.array([0, 0, 1]))
cp = (T_rel.trans - ps) + zs.dot(zs).dot(T_rel.trans)
cq = (T_rel_q.trans - qs) + zs.dot(zs).dot(T_rel_q.trans)
ap = zs.dot(T_rel.trans)
aq = zs.dot(T_rel_q.trans)
bp = zs.dot(zs).dot(T_rel.trans)
bq = zs.dot(zs).dot(T_rel_q.trans)
c0 = cp.dot(cp) + cq.dot(cq)
c1 = 2 * (cp.dot(ap) + cq.dot(aq))
c2 = 2 * (cp.dot(bp) + cq.dot(bq))
c3 = ap.dot(ap) + aq.dot(aq)
c4 = bp.dot(bp) + bq.dot(bq)
c5 = 2 * (ap.dot(bp) + aq.dot(bq))
diff = np.asarray(
[
c1 - c5,
2 * c2 + 4 * c3 - 4 * c4,
3 * c1 + 3 * c5,
8 * c2 + 4 * c3 - 4 * c4,
-4 * c1 + 4 * c5,
]
)
if all(diff < tol):
theta[cur] = 0
else:
sols = np.roots(
diff
) # solutions to the Whaba problem for fixed axis
def error_test(x):
if abs(x.imag) > 0:
return 1e6
x = -2 * arctan2(x.real, 1)
return (
c0
+ c1 * sin(x)
- c2 * cos(x)
+ c3 * sin(x) ** 2
+ c4 * cos(x) ** 2
- c5 * sin(2 * x) / 2
)
sol = min(sols, key=error_test)
theta[cur] = -2 * arctan2(sol.real, 1)
T[cur] = (T_prev.dot(rot_axis(theta[cur], "z"))).dot(T_rel)
if T_final is None:
return theta
if (
T_final[ee] is not None
and la.norm(cross(T_rel.trans, np.asarray([0, 0, 1]))) < tol
):
T_th = (T[cur]).inv().dot(T_final[ee]).as_matrix()
theta[ee] += arctan2(T_th[1, 0], T_th[0, 0])
return theta
def get_pose(self, joint_angles: dict, query_node: str) -> SE3:
T = self.robot.pose(joint_angles, query_node)
if query_node[0] == AUX_PREFIX:
T_trans = trans_axis(self.axis_length, "z")
T = T.dot(T_trans)
return T
def distance_bounds_from_sampling(self):
robot = self.robot
ids = self.node_ids
q_rand = robot.random_configuration()
D_min = self.distance_matrix_from_joints(q_rand)
D_max = self.distance_matrix_from_joints(q_rand)
for _ in range(2000):
q_rand = robot.random_configuration()
D_rand = self.distance_matrix_from_joints(q_rand)
D_max[D_rand > D_max] = D_rand[D_rand > D_max]
D_min[D_rand < D_min] = D_rand[D_rand < D_min]
for idx in range(len(D_max)):
for jdx in range(len(D_max)):
e1 = ids[idx]
e2 = ids[jdx]
self.add_edge(e1, e2)
self[e1][e2][LOWER] = sqrt(D_min[idx, jdx])
self[e1][e2][UPPER] = sqrt(D_max[idx, jdx])
if abs(D_max[idx, jdx] - D_min[idx, jdx]) < 1e-5:
self[e1][e2][DIST] = abs(D_max[idx, jdx] - D_min[idx, jdx])
if __name__ == "__main__":
import graphik
from graphik.utils.roboturdf import RobotURDF
n = 6
ub = (pi) * np.ones(n)
lb = -ub
modified_dh = False
fname = graphik.__path__[0] + "/robots/urdfs/ur10_mod.urdf"
# fname = graphik.__path__[0] + "/robots/urdfs/lwa4p.urdf"
# fname = graphik.__path__[0] + "/robots/urdfs/lwa4d.urdf"
# fname = graphik.__path__[0] + "/robots/urdfs/panda_arm.urdf"
# fname = graphik.__path__[0] + "/robots/urdfs/kuka_iiwr.urdf"
# fname = graphik.__path__[0] + "/robots/urdfs/kuka_lwr.urdf"
# fname = graphik.__path__[0] + "/robots/urdfs/jaco2arm6DOF_no_hand.urdf"
urdf_robot = RobotURDF(fname)
robot = urdf_robot.make_Revolute3d(ub, lb) # make the Revolute class from a URDF
graph = ProblemGraphRevolute(robot)
print(graph.nodes(data=True))
print(graph.base_nodes)
print(graph.structure_nodes)
print(graph.end_effector_nodes)
# import timeit
# print(
# max(
# timeit.repeat(
# "graph.realization(robot.random_configuration())",
# globals=globals(),
# number=1,
# repeat=1000,
# )
# )
# )
|
<filename>fake_texts/pytorch_dataset_fake_2.py
import sys
sys.path.append("fake_texts/")
#sys.path.append("/home/leander/AI/repos/gen_text/TextRecognitionDataGenerator/fonts")
import argparse
import os, errno
import random
from random import randint
import string
from tqdm import tqdm
from string_generator import (
create_strings_from_dict,
create_strings_from_file,
create_strings_from_wikipedia,
create_strings_randomly
)
from data_generator import FakeTextDataGenerator
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
import torchvision.datasets as datasets
import matplotlib.pyplot as plt
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug_transformations import augmentations
# from skimage.transform import resize
import cv2
#Basically we want to add the
def load_dict(lang):
"""
Read the dictionnary file and returns all words in it.
"""
lang_dict = []
with open(os.path.join('dicts', lang + '.txt'), 'r', encoding="utf8", errors='ignore') as d:
lang_dict = d.readlines()
return lang_dict
def load_fonts(lang):
"""
Load all fonts in the fonts directories
"""
if lang == 'cn':
return [os.path.join('fonts/cn', font) for font in os.listdir('fonts/cn')]
else:
return [os.path.join("fonts/latin/", font) for font in os.listdir("fonts/latin/")]
import numpy as np
from nltk import word_tokenize
import torch
import torch
from torch.utils import data
class Dataset(data.Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self,
epoch_size=10,
random_strings=True,
num_words=5,
transform=False,
width=-1,
alignment=1,
height=32,
random_skew = True,
random_blur = True,
):
'Initialization'
#Still in inti
self.transform = transform
self.random_sequences = False
self.random_strings = True
self.include_letters = True
self.include_numbers = True
self.include_symbols = True
self.length = 10
#self.random = False
self.format = 32
self.use_wikipedia = False
self.text_color = '#282828'
self.orientation = 0
self.extension = "jpg"
self.handwritten = False
self.name_format = 0
self.random_skew = random_skew
self.random_blur = random_blur
pool = ''
# pool += "abcdefghijklmnopqrstuvwxyz"
pool += "0123456789"
pool += ".-"
# pool += "!\"#$%&'()*+,-./:;?@[\\]^_`{|}~"
pool += ' '
self.pool = pool
self.keys = list(pool)
self.values = np.array(range(1,len(pool)+1))
self.dictionary = dict(zip(self.keys, self.values))
self.fonts = load_fonts("en")
self.decode_dict=dict((v,k) for k,v in self.dictionary.items())
self.unknown_char_idx = len(pool)#67
self.decode_dict.update({self.unknown_char_idx: "OOK"}) # unknown keyword
self.seq = augmentations
def __len__(self):
'Denotes the total number of samples'
return 10000
def transform(self,img):
return self.seq.augment_images(img)
def __getitem__(self, index):
#We are actually only getting one item here obvoiusly!
num_words=randint(3,4) # 5,6
language="en"
count=32
skew_angle=0
random_skew=self.random_skew
blur=0
random_blur=self.random_blur
background=randint(0,1)
distorsion=randint(0,2)
distorsion_orientation=randint(0,2)
#as class function
def take_10(x):
if len(x)>num_words:
return x[0:num_words]
else:
return x
#if random.random()>0.8:
# self.width=random.randint(500,800)
#else:
width=-1
alignment=random.randint(0,2)
strings = []
#try:
# strings = create_strings_from_wikipedia(1, 1, "en")
#except:
strings = create_strings_randomly(length=num_words,
allow_variable=False,
count=1,
pool=self.pool,
lang='en')
# def create_strings_randomly(length, allow_variable, count, let, num, sym, lang):
# strings = create_strings_randomly(num_words, False, 1,
# True, True,True, "en")
strings =[" ".join(take_10(word_tokenize(x))) for x in strings]
strings_=[list(x)for x in strings]
self.strings_int=[[self.dictionary[x.lower() ] if x.lower() in self.keys else self.unknown_char_idx for x in m ] for m in strings_]
self.strings_len=[len(x)for x in self.strings_int]
string_count = len(strings)
#What we do here is we space the words quite far appart.
# if random.random()>0.8:
# width_scale=random.random()*800#900
# space_width=width_scale/np.sum(self.strings_len)
# else:
# #width_scale=random.randint(50,100)/100
# #width_scale=1
# #space_width=width_scale/np.sum(self.strings_len)
# space_width=2#random.randint(50,100)/100
# if random.random()>0.85 and np.max(self.strings_len)<30:
# width=random.randint(500,800)
width = random.randint(500, 700)
space_width = 2
image_list=[np.expand_dims(np.array(FakeTextDataGenerator.generate(*j)),0) for j in zip(
[i for i in range(0, string_count)],
strings,
[self.fonts[random.randrange(0, len(self.fonts))] for _ in range(0, string_count)],
[self.format] * string_count,
[self.extension] * string_count,
[skew_angle] * string_count,
[random_skew] * string_count,
[blur] * string_count,
[random_blur] * string_count,
[background] * string_count,
[distorsion] * string_count,
[distorsion_orientation] * string_count,
[self.handwritten] * string_count,
[self.name_format] * string_count,
[width] * string_count,
[alignment] * string_count,
[self.text_color] * string_count,
[self.orientation] * string_count,
[space_width] * string_count )]
X = image_list[0]
y = self.strings_int[0]
y_len = len(y)
#Here we include some random horizontal lines cause they appear quite often in real life.
if random.random()>0.8:
for j in range(random.randint(0,3)):
random_channel = random.randint(0,2)
random_h = random.randint(0,31)
random_w_s = random.randint(0,int(X.shape[2]/2))
random_w_e = random.randint(int(X.shape[2]/2),int(X.shape[2]))
X[0,random_h,random_w_s:random_w_e,random_channel]=random.randint(0,255)
if self.transform== True:
X=self.seq.augment_images(X)
#X=np.squeeze(X)
#X=np.expand_dims(X,0)
X =X/255
x_len=X.shape[2]
return X, y,x_len,y_len
|
from django.urls import path
from django.conf import settings
from drf_yasg import views, openapi, generators, inspectors
from rest_framework import permissions
APP_VERSION = getattr(settings, 'VERSION', '')
APP_NAME = getattr(settings, 'APP_NAME', 'Purplship')
EMAIL_SUPPORT = getattr(settings, 'EMAIL_SUPPORT', '<EMAIL>')
SCHEMA_VIEW_DESCRIPTION = f"""
## API Reference
{APP_NAME} is an open source multi-carrier shipping API that simplifies the integration of logistic carrier services.
The {APP_NAME} API is organized around REST. Our API has predictable resource-oriented URLs, accepts JSON-encoded
request bodies, returns JSON-encoded responses, and uses standard HTTP response codes, authentication, and verbs.
The {APP_NAME} API differs for every account as we release new versions.
These docs are customized to your version of the API.
## Versioning
When backwards-incompatible changes are made to the API, a new, dated version is released.
The current version is `{settings.VERSION}`.
Read our API changelog and to learn more about backwards compatibility.
As a precaution, use API versioning to check a new API version before committing to an upgrade.
## Pagination
All top-level API resources have support for bulk fetches via "list" API methods. For instance, you can list addresses,
list shipments, and list trackers. These list API methods share a common structure, taking at least these
two parameters: limit, and offset.
{APP_NAME} utilizes offset-based pagination via the offset and limit parameters.
Both parameters take a number as value (see below) and return objects in reverse chronological order.
The offset parameter returns objects listed after an index.
The limit parameter take a limit on the number of objects to be returned from 1 to 100.
```json
{{
"next": "/v1/shipments?limit=25&offset=25",
"previous": "/v1/shipments?limit=25&offset=25",
"results": [
]
}}
```
## Environments
The {APP_NAME} API offer the possibility to create and retrieve certain objects in `test_mode`.
In development, it is therefore possible to add carrier connections, get live rates,
buy labels, create trackers and schedule pickups in `test_mode`.
"""
AUTHENTICATION_DESCRIPTION = """
For client-side code, we encourage the use of JSON Web Tokens (JWT) to authenticate your app.
The JWT tokens changes for every new session and have an expiration timestamp.
To authenticate via JWT access key, use `-H "Authorization: Bearer <KEY>"`.
"""
class OpenAPISchemaGenerator(generators.OpenAPISchemaGenerator):
def get_schema(self, request=None, public=False):
"""Generate a :class:`.Swagger` object with custom tags"""
swagger = super().get_schema(request, public)
swagger.tags = [
{
"name": "API",
"description": AUTHENTICATION_DESCRIPTION
},
{
"name": "Addresses",
"description": f"""
This is an object representing your a {APP_NAME} shipping address.
You can retrieve all addresses related to your {APP_NAME} account.
Address objects are linked to your shipment history, and can be used for recurring shipping
to / from the same locations.
"""
},
{
"name": "Carriers",
"description": f"""
This is an object representing your a {APP_NAME} carrier account connectsions.
You can retrieve all configured connections available to your {APP_NAME} account.
The `carrier_id` is a nickname you assign to your connection.
"""
},
{
"name": "Customs",
"description": f"""
This is an object representing your a {APP_NAME} shipping customs declaration.
You can retrieve all customs declarations used historically with your {APP_NAME} account shipments.
"""
},
{
"name": "Parcels",
"description": f"""
This is an object representing your a {APP_NAME} shipping parcel.
Parcel objects are linked to your shipment history, and can be used for recurring shipping
using the same packaging.
"""
},
{
"name": "Shipments",
"description": f"""
This is an object representing your a {APP_NAME} shipment.
A Shipment guides you through process of preparing and purchasing a label for an order.
A Shipment transitions through multiple statuses throughout its lifetime as the package
shipped makes its journey to it's destination.
"""
},
{
"name": "Trackers",
"description": f"""
This is an object representing your a {APP_NAME} shipment tracker.
A shipment tracker is an object attached to a shipment by it's tracking number.
The tracker provide the latest tracking status and events associated with a shipment
"""
},
{
"name": "Pickups",
"description": f"""
This is an object representing your a {APP_NAME} pickup booking.
You can retrieve all pickup booked historically for your {APP_NAME} account shipments.
"""
},
{
"name": "Webhooks",
"description": f"""
This is an object representing your a {APP_NAME} webhook.
You can configure webhook endpoints via the API to be notified about events that happen in your
{APP_NAME} account.
"""
},
{
"name": "Proxy",
"description": f"""
In some scenarios, all we need is to send request to a carrier using the {APP_NAME} unified API.
The Proxy API comes handy for that as it turn {APP_NAME} into a simple middleware that converts and
validate your request and simply forward it to the shipping carrier server.
> **Note**
>
> When using the proxy API, no objects are created in the {APP_NAME} system.
"""
},
]
return swagger
class SwaggerAutoSchema(inspectors.SwaggerAutoSchema):
def get_operation(self, operation_keys=None):
operation = super().get_operation(operation_keys)
return openapi.Operation(
operation.operation_id,
**{k: v for k,v in operation.items() if k != operation.operation_id},
**{'x-code-samples': self.overrides.get('code_examples')}
)
swagger_info = openapi.Info(
title=f"{APP_NAME} API",
default_version=APP_VERSION,
description=SCHEMA_VIEW_DESCRIPTION,
contact=openapi.Contact(email=EMAIL_SUPPORT),
)
schema_view = views.get_schema_view(
swagger_info,
public=True,
permission_classes=(permissions.AllowAny,),
generator_class=OpenAPISchemaGenerator,
)
urlpatterns = [
path('shipping-openapi.json', schema_view.without_ui(cache_timeout=0), name='schema-json'),
path(settings.OPEN_API_PATH, schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]
|
<filename>custom_lcv/custom_lcv/doctype/custom_lcv/custom_lcv.py
# -*- coding: utf-8 -*-
# Copyright (c) 2018, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe import _
from frappe.utils import flt
from frappe.model.meta import get_field_precision
from frappe.model.document import Document
from erpnext.stock.doctype.serial_no.serial_no import get_serial_nos
from erpnext.accounts.party import get_party_account
class CustomLCV(Document):
def get_items_from_purchase_receipts(self):
self.set("items", [])
self.total_amount = 0
self.total_weight = 0
for pr in self.get("purchase_receipts"):
if pr.receipt_document_type and pr.receipt_document:
pr_items = frappe.db.sql("""
select item.item_code, item.description,
item.qty, item.total_weight, item.weight_uom, item.base_rate, item.base_amount, item.name, item.cost_center
from `tab{doctype} Item` item where parent = %s
and exists(select name from tabItem where name = item.item_code and is_stock_item = 1)
""".format(doctype=pr.receipt_document_type), pr.receipt_document, as_dict=True)
for d in pr_items:
item = self.append("items")
item.item_code = d.item_code
item.description = d.description
item.qty = d.qty
item.rate = d.base_rate
item.cost_center = d.cost_center or erpnext.get_default_cost_center(self.company)
item.amount = d.base_amount
item.receipt_document_type = pr.receipt_document_type
item.receipt_document = pr.receipt_document
item.purchase_receipt_item = d.name
item_master = frappe.db.get_value("Item", item.item_code, ["weight_uom", "weight_per_unit"], as_dict=1)
item.weight = flt(item_master.weight_per_unit * d.qty, item.precision("weight"))
item.weight_uom = item_master.weight_uom
self.total_amount += item.amount
self.total_weight += item.weight
def validate(self):
self.check_mandatory()
self.validate_purchase_receipts()
self.set_total_taxes_and_charges()
if not self.get("items"):
self.get_items_from_purchase_receipts()
else:
self.validate_applicable_charges_for_item()
self.validate_weight_uoms()
def validate_weight_uoms(self):
if self.distribute_charges_based_on == "Weight":
weight_uoms = set()
for item in self.items:
weight_uoms.add(str(item.weight_uom))
if len(weight_uoms) > 1:
frappe.throw(_("Weight UOMs of all items must be the same. UOMs: {0}").format(", ".join(weight_uoms)))
def check_mandatory(self):
if not self.get("purchase_receipts"):
frappe.throw(_("Please enter Receipt Document"))
def validate_purchase_receipts(self):
receipt_documents = []
for d in self.get("purchase_receipts"):
if frappe.db.get_value(d.receipt_document_type, d.receipt_document, "docstatus") != 1:
frappe.throw(_("Receipt document must be submitted"))
else:
receipt_documents.append(d.receipt_document)
for item in self.get("items"):
if not item.receipt_document:
frappe.throw(_("Item must be added using 'Get Items from Purchase Receipts' button"))
elif item.receipt_document not in receipt_documents:
frappe.throw(_("Item Row {idx}: {doctype} {docname} does not exist in above '{doctype}' table")
.format(idx=item.idx, doctype=item.receipt_document_type, docname=item.receipt_document))
if not item.cost_center:
frappe.throw(_("Row {0}: Cost center is required for an item {1}")
.format(item.idx, item.item_code))
def set_total_taxes_and_charges(self):
self.total_taxes_and_charges = sum([flt(d.amount) for d in self.get("taxes")])
def validate_applicable_charges_for_item(self):
based_on = self.distribute_charges_based_on.lower()
total = sum([flt(d.get(based_on)) for d in self.get("items")])
if not total:
frappe.throw(_("Total {0} for all items is zero, may be you should change 'Distribute Charges Based On'").format(based_on))
total_applicable_charges = sum([flt(d.applicable_charges) for d in self.get("items")])
precision = get_field_precision(frappe.get_meta("Landed Cost Item").get_field("applicable_charges"),
currency=frappe.db.get_value("Company", self.company, "default_currency", cache=True))
diff = flt(self.total_taxes_and_charges) - flt(total_applicable_charges)
diff = flt(diff, precision)
if abs(diff) < (2.0 / (10**precision)):
self.items[-1].applicable_charges += diff
else:
frappe.throw(_("Total Applicable Charges in Purchase Receipt Items table must be same as Total Taxes and Charges"))
def on_submit(self):
self.update_landed_cost()
self.make_payable_jv()
def on_cancel(self):
self.update_landed_cost()
def make_payable_jv(self):
jv = frappe.new_doc("Journal Entry")
jv.linked_custom_lcv = self.name
jv.company = self.company
jv.posting_date = frappe.utils.nowdate()
jv.append("accounts", {
"account": self.expense_account,
"debit": flt(self.total_taxes_and_charges),
"debit_in_account_currency": flt(self.total_taxes_and_charges),
})
jv.append("accounts", {
"account": get_party_account("Supplier", self.supplier, self.company),
"party_type": "Supplier",
"party": self.supplier,
"credit": flt(self.total_taxes_and_charges),
"credit_in_account_currency": flt(self.total_taxes_and_charges),
})
jv.insert()
jv.submit()
def update_landed_cost(self):
for d in self.get("purchase_receipts"):
doc = frappe.get_doc(d.receipt_document_type, d.receipt_document)
# set landed cost voucher amount in pr item
doc.set_landed_cost_voucher_amount()
# set valuation amount in pr item
doc.update_valuation_rate("items")
# db_update will update and save landed_cost_voucher_amount and voucher_amount in PR
for item in doc.get("items"):
item.db_update()
# update latest valuation rate in serial no
self.update_rate_in_serial_no(doc)
# update stock & gl entries for cancelled state of PR
doc.docstatus = 2
doc.update_stock_ledger(allow_negative_stock=True, via_landed_cost_voucher=True)
doc.make_gl_entries_on_cancel(repost_future_gle=False)
# update stock & gl entries for submit state of PR
doc.docstatus = 1
doc.update_stock_ledger(via_landed_cost_voucher=True)
doc.make_gl_entries()
def update_rate_in_serial_no(self, receipt_document):
for item in receipt_document.get("items"):
if item.serial_no:
serial_nos = get_serial_nos(item.serial_no)
if serial_nos:
frappe.db.sql("update `tabSerial No` set purchase_rate=%s where name in ({0})"
.format(", ".join(["%s"]*len(serial_nos))), tuple([item.valuation_rate] + serial_nos))
|
<filename>docs/pylib/update_default_cmd_index.py
#
# This creates a Google wiki page for all default commands with __doc__ strings.
#
# Import this from a Django-aware shell, then call run_update.
#
#
from os.path import dirname, abspath, join as pathjoin
from evennia.utils.utils import (
mod_import, variable_from_module, callables_from_module
)
__all__ = ("run_update")
PAGE = """
# Default Commands
The full set of default Evennia commands currently contains {ncommands} commands in {nfiles} source
files. Our policy for adding default commands is outlined [here](Using-MUX-as-a-Standard). The
[Commands](Commands) documentation explains how Commands work as well as make new or customize
existing ones. Note that this page is auto-generated. Report problems to the [issue
tracker](github:issues).
```{{note}}
Some game-states adds their own Commands which are not listed here. Examples include editing a text
with [EvEditor](EvEditor), flipping pages in [EvMore](EvMore) or using the
[Batch-Processor](Batch-Processors)'s interactive mode.
```
{alphabetical}
"""
def run_update(no_autodoc=False):
if no_autodoc:
return
cmdsets = (
("evennia.commands.default.cmdset_character", "CharacterCmdSet"),
("evennia.commands.default.cmdset_account", "AccountCmdSet"),
("evennia.commands.default.cmdset_unloggedin", "UnloggedinCmdSet"),
("evennia.commands.default.cmdset_session", "SessionCmdSet"),
)
cmd_modules = (
"evennia.commands.default.account",
"evennia.commands.default.batchprocess",
"evennia.commands.default.building",
"evennia.commands.default.comms",
"evennia.commands.default.general",
"evennia.commands.default.help",
"evennia.commands.default.syscommandsyyp",
"evennia.commands.default.system",
"evennia.commands.default.unloggedin",
)
cmds_per_cmdset = {}
cmd_to_cmdset_map = {}
for modname, cmdsetname in cmdsets:
cmdset = variable_from_module(modname, variable=cmdsetname)()
cmdset.at_cmdset_creation()
cmds_per_cmdset[cmdsetname] = cmdset.commands
for cmd in cmdset.commands:
cmd_to_cmdset_map[f"{cmd.__module__}.{cmd.__class__.__name__}"] = cmdset
cmds_per_module = {}
cmd_to_module_map = {}
cmds_alphabetically = []
for modname in cmd_modules:
module = mod_import(modname)
cmds_per_module[module] = [
cmd for cmd in callables_from_module(module).values() if cmd.__name__.startswith("Cmd")]
for cmd in cmds_per_module[module]:
cmd_to_module_map[cmd] = module
cmds_alphabetically.append(cmd)
cmds_alphabetically = list(sorted(cmds_alphabetically, key=lambda c: c.key))
cmd_infos = []
for cmd in cmds_alphabetically:
aliases = [alias[1:] if alias and alias[0] == "@" else alias
for alias in sorted(cmd.aliases)]
aliases = f" [{', '.join(sorted(cmd.aliases))}]" if aliases else ""
cmdlink = f"[**{cmd.key}**{aliases}]({cmd.__module__}.{cmd.__name__})"
category = f"help-category: _{cmd.help_category.capitalize()}_"
cmdset = cmd_to_cmdset_map.get(f"{cmd.__module__}.{cmd.__name__}", None)
if cmdset:
cmodule = cmdset.__module__
cname = cmdset.__class__.__name__
cmdsetlink = f"cmdset: [{cname}]({cmodule}.{cname}), "
else:
# we skip commands not in the default cmdsets
continue
cmd_infos.append(f"{cmdlink} ({cmdsetlink}{category})")
txt = PAGE.format(
ncommands=len(cmd_to_cmdset_map),
nfiles=len(cmds_per_module),
alphabetical="\n".join(f"- {info}" for info in cmd_infos))
outdir = pathjoin(dirname(dirname(abspath(__file__))), "source", "Components")
fname = pathjoin(outdir, "Default-Commands.md")
with open(fname, 'w') as fil:
fil.write(txt)
print(" -- Updated Default Command index.")
if __name__ == "__main__":
run_update()
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD Meta-architecture definition.
General tensorflow implementation of convolutional Multibox/SSD detection
models.
"""
from abc import abstractmethod
import re
import tensorflow as tf
from object_detection.core import box_list
from object_detection.core import box_predictor as bpredictor
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner
from object_detection.utils import shape_utils
from object_detection.utils import visualization_utils
slim = tf.contrib.slim
class SSDFeatureExtractor(object):
"""SSD Feature Extractor definition."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams,
batch_norm_trainable=True,
reuse_weights=None):
"""Constructor.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops.
batch_norm_trainable: Whether to update batch norm parameters during
training or not. When training with a small batch size
(e.g. 1), it is desirable to disable batch norm update and use
pretrained batch norm params.
reuse_weights: whether to reuse variables. Default is None.
"""
self._is_training = is_training
self._depth_multiplier = depth_multiplier
self._min_depth = min_depth
self._pad_to_multiple = pad_to_multiple
self._conv_hyperparams = conv_hyperparams
self._batch_norm_trainable = batch_norm_trainable
self._reuse_weights = reuse_weights
@abstractmethod
def preprocess(self, resized_inputs):
"""Preprocesses images for feature extraction (minus image resizing).
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
pass
@abstractmethod
def extract_features(self, preprocessed_inputs):
"""Extracts features from preprocessed inputs.
This function is responsible for extracting feature maps from preprocessed
images.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
pass
class SSDMetaArch(model.DetectionModel):
"""SSD Meta-architecture definition."""
def __init__(self,
is_training,
anchor_generator,
box_predictor,
box_coder,
feature_extractor,
matcher,
region_similarity_calculator,
image_resizer_fn,
non_max_suppression_fn,
score_conversion_fn,
classification_loss,
localization_loss,
classification_loss_weight,
localization_loss_weight,
normalize_loss_by_num_matches,
hard_example_miner,
add_summaries=True):
"""SSDMetaArch Constructor.
TODO: group NMS parameters + score converter into a class and loss
parameters into a class and write config protos for postprocessing
and losses.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
anchor_generator: an anchor_generator.AnchorGenerator object.
box_predictor: a box_predictor.BoxPredictor object.
box_coder: a box_coder.BoxCoder object.
feature_extractor: a SSDFeatureExtractor object.
matcher: a matcher.Matcher object.
region_similarity_calculator: a
region_similarity_calculator.RegionSimilarityCalculator object.
image_resizer_fn: a callable for image resizing. This callable always
takes a rank-3 image tensor (corresponding to a single image) and
returns a rank-3 image tensor, possibly with new spatial dimensions.
See builders/image_resizer_builder.py.
non_max_suppression_fn: batch_multiclass_non_max_suppression
callable that takes `boxes`, `scores` and optional `clip_window`
inputs (with all other inputs already set) and returns a dictionary
hold tensors with keys: `detection_boxes`, `detection_scores`,
`detection_classes` and `num_detections`. See `post_processing.
batch_multiclass_non_max_suppression` for the type and shape of these
tensors.
score_conversion_fn: callable elementwise nonlinearity (that takes tensors
as inputs and returns tensors). This is usually used to convert logits
to probabilities.
classification_loss: an object_detection.core.losses.Loss object.
localization_loss: a object_detection.core.losses.Loss object.
classification_loss_weight: float
localization_loss_weight: float
normalize_loss_by_num_matches: boolean
hard_example_miner: a losses.HardExampleMiner object (can be None)
add_summaries: boolean (default: True) controlling whether summary ops
should be added to tensorflow graph.
"""
super(SSDMetaArch, self).__init__(num_classes=box_predictor.num_classes)
self._is_training = is_training
# Needed for fine-tuning from classification checkpoints whose
# variables do not have the feature extractor scope.
self._extract_features_scope = 'FeatureExtractor'
self._anchor_generator = anchor_generator
self._box_predictor = box_predictor
self._box_coder = box_coder
self._feature_extractor = feature_extractor
self._matcher = matcher
self._region_similarity_calculator = region_similarity_calculator
# TODO: handle agnostic mode and positive/negative class weights
unmatched_cls_target = None
unmatched_cls_target = tf.constant([1] + self.num_classes * [0], tf.float32)
self._target_assigner = target_assigner.TargetAssigner(
self._region_similarity_calculator,
self._matcher,
self._box_coder,
positive_class_weight=1.0,
negative_class_weight=1.0,
unmatched_cls_target=unmatched_cls_target)
self._classification_loss = classification_loss
self._localization_loss = localization_loss
self._classification_loss_weight = classification_loss_weight
self._localization_loss_weight = localization_loss_weight
self._normalize_loss_by_num_matches = normalize_loss_by_num_matches
self._hard_example_miner = hard_example_miner
self._image_resizer_fn = image_resizer_fn
self._non_max_suppression_fn = non_max_suppression_fn
self._score_conversion_fn = score_conversion_fn
self._anchors = None
self._add_summaries = add_summaries
@property
def anchors(self):
if not self._anchors:
raise RuntimeError('anchors have not been constructed yet!')
if not isinstance(self._anchors, box_list.BoxList):
raise RuntimeError('anchors should be a BoxList object, but is not.')
return self._anchors
def preprocess(self, inputs):
"""Feature-extractor specific preprocessing.
See base class.
Args:
inputs: a [batch, height_in, width_in, channels] float tensor representing
a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, height_out, width_out, channels] float
tensor representing a batch of images.
Raises:
ValueError: if inputs tensor does not have type tf.float32
"""
if inputs.dtype is not tf.float32:
raise ValueError('`preprocess` expects a tf.float32 tensor')
with tf.name_scope('Preprocessor'):
# TODO: revisit whether to always use batch size as the number of parallel
# iterations vs allow for dynamic batching.
resized_inputs = tf.map_fn(self._image_resizer_fn,
elems=inputs,
dtype=tf.float32)
return self._feature_extractor.preprocess(resized_inputs)
def predict(self, preprocessed_inputs):
"""Predicts unpostprocessed tensors from input tensor.
This function takes an input batch of images and runs it through the forward
pass of the network to yield unpostprocessesed predictions.
A side effect of calling the predict method is that self._anchors is
populated with a box_list.BoxList of anchors. These anchors must be
constructed before the postprocess or loss functions can be called.
Args:
preprocessed_inputs: a [batch, height, width, channels] image tensor.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) box_encodings: 4-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
2) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions (at class index 0).
3) feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i].
4) anchors: 2-D float tensor of shape [num_anchors, 4] containing
the generated anchors in normalized coordinates.
"""
with tf.variable_scope(None, self._extract_features_scope,
[preprocessed_inputs]):
feature_maps = self._feature_extractor.extract_features(
preprocessed_inputs)
feature_map_spatial_dims = self._get_feature_map_spatial_dims(feature_maps)
image_shape = tf.shape(preprocessed_inputs)
self._anchors = self._anchor_generator.generate(
feature_map_spatial_dims,
im_height=image_shape[1],
im_width=image_shape[2])
(box_encodings, class_predictions_with_background
) = self._add_box_predictions_to_feature_maps(feature_maps)
predictions_dict = {
'box_encodings': box_encodings,
'class_predictions_with_background': class_predictions_with_background,
'feature_maps': feature_maps,
'anchors': self._anchors.get()
}
return predictions_dict
def _add_box_predictions_to_feature_maps(self, feature_maps):
"""Adds box predictors to each feature map and returns concatenated results.
Args:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
Returns:
box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions (at class index 0).
Raises:
RuntimeError: if the number of feature maps extracted via the
extract_features method does not match the length of the
num_anchors_per_locations list that was passed to the constructor.
RuntimeError: if box_encodings from the box_predictor does not have
shape of the form [batch_size, num_anchors, 1, code_size].
"""
num_anchors_per_location_list = (
self._anchor_generator.num_anchors_per_location())
if len(feature_maps) != len(num_anchors_per_location_list):
raise RuntimeError('the number of feature maps must match the '
'length of self.anchors.NumAnchorsPerLocation().')
box_encodings_list = []
cls_predictions_with_background_list = []
for idx, (feature_map, num_anchors_per_location
) in enumerate(zip(feature_maps, num_anchors_per_location_list)):
box_predictor_scope = 'BoxPredictor_{}'.format(idx)
box_predictions = self._box_predictor.predict(feature_map,
num_anchors_per_location,
box_predictor_scope)
box_encodings = box_predictions[bpredictor.BOX_ENCODINGS]
cls_predictions_with_background = box_predictions[
bpredictor.CLASS_PREDICTIONS_WITH_BACKGROUND]
box_encodings_shape = box_encodings.get_shape().as_list()
if len(box_encodings_shape) != 4 or box_encodings_shape[2] != 1:
raise RuntimeError('box_encodings from the box_predictor must be of '
'shape `[batch_size, num_anchors, 1, code_size]`; '
'actual shape', box_encodings_shape)
box_encodings = tf.squeeze(box_encodings, axis=2)
box_encodings_list.append(box_encodings)
cls_predictions_with_background_list.append(
cls_predictions_with_background)
num_predictions = sum(
[tf.shape(box_encodings)[1] for box_encodings in box_encodings_list])
num_anchors = self.anchors.num_boxes()
anchors_assert = tf.assert_equal(num_anchors, num_predictions, [
'Mismatch: number of anchors vs number of predictions', num_anchors,
num_predictions
])
with tf.control_dependencies([anchors_assert]):
box_encodings = tf.concat(box_encodings_list, 1)
class_predictions_with_background = tf.concat(
cls_predictions_with_background_list, 1)
return box_encodings, class_predictions_with_background
def _get_feature_map_spatial_dims(self, feature_maps):
"""Return list of spatial dimensions for each feature map in a list.
Args:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i].
Returns:
a list of pairs (height, width) for each feature map in feature_maps
"""
feature_map_shapes = [
shape_utils.combined_static_and_dynamic_shape(
feature_map) for feature_map in feature_maps
]
return [(shape[1], shape[2]) for shape in feature_map_shapes]
def postprocess(self, prediction_dict):
"""Converts prediction tensors to final detections.
This function converts raw predictions tensors to final detection results by
slicing off the background class, decoding box predictions and applying
non max suppression and clipping to the image window.
See base class for output format conventions. Note also that by default,
scores are to be interpreted as logits, but if a score_conversion_fn is
used, then scores are remapped (and may thus have a different
interpretation).
Args:
prediction_dict: a dictionary holding prediction tensors with
1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
2) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions.
Returns:
detections: a dictionary containing the following fields
detection_boxes: [batch, max_detections, 4]
detection_scores: [batch, max_detections]
detection_classes: [batch, max_detections]
detection_keypoints: [batch, max_detections, num_keypoints, 2] (if
encoded in the prediction_dict 'box_encodings')
num_detections: [batch]
Raises:
ValueError: if prediction_dict does not contain `box_encodings` or
`class_predictions_with_background` fields.
"""
if ('box_encodings' not in prediction_dict or
'class_predictions_with_background' not in prediction_dict):
raise ValueError('prediction_dict does not contain expected entries.')
with tf.name_scope('Postprocessor'):
box_encodings = prediction_dict['box_encodings']
class_predictions = prediction_dict['class_predictions_with_background']
detection_boxes, detection_keypoints = self._batch_decode(box_encodings)
detection_boxes = tf.expand_dims(detection_boxes, axis=2)
class_predictions_without_background = tf.slice(class_predictions,
[0, 0, 1],
[-1, -1, -1])
detection_scores = self._score_conversion_fn(
class_predictions_without_background)
clip_window = tf.constant([0, 0, 1, 1], tf.float32)
additional_fields = None
if detection_keypoints is not None:
additional_fields = {
fields.BoxListFields.keypoints: detection_keypoints}
(nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields,
num_detections) = self._non_max_suppression_fn(
detection_boxes,
detection_scores,
clip_window=clip_window,
additional_fields=additional_fields)
detection_dict = {'detection_boxes': nmsed_boxes,
'detection_scores': nmsed_scores,
'detection_classes': nmsed_classes,
'num_detections': tf.to_float(num_detections)}
if (nmsed_additional_fields is not None and
fields.BoxListFields.keypoints in nmsed_additional_fields):
detection_dict['detection_keypoints'] = nmsed_additional_fields[
fields.BoxListFields.keypoints]
return detection_dict
def loss(self, prediction_dict, scope=None):
"""Compute scalar loss tensors with respect to provided groundtruth.
Calling this function requires that groundtruth tensors have been
provided via the provide_groundtruth function.
Args:
prediction_dict: a dictionary holding prediction tensors with
1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
2) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions.
scope: Optional scope name.
Returns:
a dictionary mapping loss keys (`localization_loss` and
`classification_loss`) to scalar tensors representing corresponding loss
values.
"""
with tf.name_scope(scope, 'Loss', prediction_dict.values()):
keypoints = None
if self.groundtruth_has_field(fields.BoxListFields.keypoints):
keypoints = self.groundtruth_lists(fields.BoxListFields.keypoints)
(batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, match_list) = self._assign_targets(
self.groundtruth_lists(fields.BoxListFields.boxes),
self.groundtruth_lists(fields.BoxListFields.classes),
keypoints)
if self._add_summaries:
self._summarize_input(
self.groundtruth_lists(fields.BoxListFields.boxes), match_list)
num_matches = tf.stack(
[match.num_matched_columns() for match in match_list])
location_losses = self._localization_loss(
prediction_dict['box_encodings'],
batch_reg_targets,
ignore_nan_targets=True,
weights=batch_reg_weights)
# print('skye location_losses=', location_losses)
# print('skye location_losses.shape=', location_losses.shape)
cls_losses = self._classification_loss(
prediction_dict['class_predictions_with_background'],
batch_cls_targets,
weights=batch_cls_weights)
if self._hard_example_miner:
(localization_loss, classification_loss) = self._apply_hard_mining(
location_losses, cls_losses, prediction_dict, match_list)
if self._add_summaries:
self._hard_example_miner.summarize()
else:
if self._add_summaries:
class_ids = tf.argmax(batch_cls_targets, axis=2)
flattened_class_ids = tf.reshape(class_ids, [-1])
flattened_classification_losses = tf.reshape(cls_losses, [-1])
self._summarize_anchor_classification_loss(
flattened_class_ids, flattened_classification_losses)
localization_loss = tf.reduce_sum(location_losses)
classification_loss = tf.reduce_sum(cls_losses)
# Optionally normalize by number of positive matches
normalizer = tf.constant(1.0, dtype=tf.float32)
if self._normalize_loss_by_num_matches:
normalizer = tf.maximum(tf.to_float(tf.reduce_sum(num_matches)), 1.0)
with tf.name_scope('localization_loss'):
localization_loss = ((self._localization_loss_weight / normalizer) *
localization_loss)
with tf.name_scope('classification_loss'):
classification_loss = ((self._classification_loss_weight / normalizer) *
classification_loss)
loss_dict = {
'localization_loss': localization_loss,
'classification_loss': classification_loss
}
return loss_dict
def _summarize_anchor_classification_loss(self, class_ids, cls_losses):
positive_indices = tf.where(tf.greater(class_ids, 0))
positive_anchor_cls_loss = tf.squeeze(
tf.gather(cls_losses, positive_indices), axis=1)
visualization_utils.add_cdf_image_summary(positive_anchor_cls_loss,
'PositiveAnchorLossCDF')
negative_indices = tf.where(tf.equal(class_ids, 0))
negative_anchor_cls_loss = tf.squeeze(
tf.gather(cls_losses, negative_indices), axis=1)
visualization_utils.add_cdf_image_summary(negative_anchor_cls_loss,
'NegativeAnchorLossCDF')
def _assign_targets(self, groundtruth_boxes_list, groundtruth_classes_list,
groundtruth_keypoints_list=None):
"""Assign groundtruth targets.
Adds a background class to each one-hot encoding of groundtruth classes
and uses target assigner to obtain regression and classification targets.
Args:
groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4]
containing coordinates of the groundtruth boxes.
Groundtruth boxes are provided in [y_min, x_min, y_max, x_max]
format and assumed to be normalized and clipped
relative to the image window with y_min <= y_max and x_min <= x_max.
groundtruth_classes_list: a list of 2-D one-hot (or k-hot) tensors of
shape [num_boxes, num_classes] containing the class targets with the 0th
index assumed to map to the first non-background class.
groundtruth_keypoints_list: (optional) a list of 3-D tensors of shape
[num_boxes, num_keypoints, 2]
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
"""
groundtruth_boxlists = [
box_list.BoxList(boxes) for boxes in groundtruth_boxes_list
]
groundtruth_classes_with_background_list = [
tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT')
for one_hot_encoding in groundtruth_classes_list
]
if groundtruth_keypoints_list is not None:
for boxlist, keypoints in zip(
groundtruth_boxlists, groundtruth_keypoints_list):
boxlist.add_field(fields.BoxListFields.keypoints, keypoints)
return target_assigner.batch_assign_targets(
self._target_assigner, self.anchors, groundtruth_boxlists,
groundtruth_classes_with_background_list)
def _summarize_input(self, groundtruth_boxes_list, match_list):
"""Creates tensorflow summaries for the input boxes and anchors.
This function creates four summaries corresponding to the average
number (over images in a batch) of (1) groundtruth boxes, (2) anchors
marked as positive, (3) anchors marked as negative, and (4) anchors marked
as ignored.
Args:
groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4]
containing corners of the groundtruth boxes.
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
"""
num_boxes_per_image = tf.stack(
[tf.shape(x)[0] for x in groundtruth_boxes_list])
pos_anchors_per_image = tf.stack(
[match.num_matched_columns() for match in match_list])
neg_anchors_per_image = tf.stack(
[match.num_unmatched_columns() for match in match_list])
ignored_anchors_per_image = tf.stack(
[match.num_ignored_columns() for match in match_list])
tf.summary.scalar('Input/AvgNumGroundtruthBoxesPerImage',
tf.reduce_mean(tf.to_float(num_boxes_per_image)))
tf.summary.scalar('Input/AvgNumPositiveAnchorsPerImage',
tf.reduce_mean(tf.to_float(pos_anchors_per_image)))
tf.summary.scalar('Input/AvgNumNegativeAnchorsPerImage',
tf.reduce_mean(tf.to_float(neg_anchors_per_image)))
tf.summary.scalar('Input/AvgNumIgnoredAnchorsPerImage',
tf.reduce_mean(tf.to_float(ignored_anchors_per_image)))
def _apply_hard_mining(self, location_losses, cls_losses, prediction_dict,
match_list):
"""Applies hard mining to anchorwise losses.
Args:
location_losses: Float tensor of shape [batch_size, num_anchors]
representing anchorwise location losses.
cls_losses: Float tensor of shape [batch_size, num_anchors]
representing anchorwise classification losses.
prediction_dict: p a dictionary holding prediction tensors with
1) box_encodings: 3-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
2) class_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions.
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
Returns:
mined_location_loss: a float scalar with sum of localization losses from
selected hard examples.
mined_cls_loss: a float scalar with sum of classification losses from
selected hard examples.
"""
class_predictions = tf.slice(
prediction_dict['class_predictions_with_background'], [0, 0,
1], [-1, -1, -1])
decoded_boxes, _ = self._batch_decode(prediction_dict['box_encodings'])
decoded_box_tensors_list = tf.unstack(decoded_boxes)
class_prediction_list = tf.unstack(class_predictions)
decoded_boxlist_list = []
for box_location, box_score in zip(decoded_box_tensors_list,
class_prediction_list):
decoded_boxlist = box_list.BoxList(box_location)
decoded_boxlist.add_field('scores', box_score)
decoded_boxlist_list.append(decoded_boxlist)
return self._hard_example_miner(
location_losses=location_losses,
cls_losses=cls_losses,
decoded_boxlist_list=decoded_boxlist_list,
match_list=match_list)
def _batch_decode(self, box_encodings):
"""Decodes a batch of box encodings with respect to the anchors.
Args:
box_encodings: A float32 tensor of shape
[batch_size, num_anchors, box_code_size] containing box encodings.
Returns:
decoded_boxes: A float32 tensor of shape
[batch_size, num_anchors, 4] containing the decoded boxes.
decoded_keypoints: A float32 tensor of shape
[batch_size, num_anchors, num_keypoints, 2] containing the decoded
keypoints if present in the input `box_encodings`, None otherwise.
"""
combined_shape = shape_utils.combined_static_and_dynamic_shape(
box_encodings)
batch_size = combined_shape[0]
tiled_anchor_boxes = tf.tile(
tf.expand_dims(self.anchors.get(), 0), [batch_size, 1, 1])
tiled_anchors_boxlist = box_list.BoxList(
tf.reshape(tiled_anchor_boxes, [-1, 4]))
decoded_boxes = self._box_coder.decode(
tf.reshape(box_encodings, [-1, self._box_coder.code_size]),
tiled_anchors_boxlist)
decoded_keypoints = None
if decoded_boxes.has_field(fields.BoxListFields.keypoints):
decoded_keypoints = decoded_boxes.get_field(
fields.BoxListFields.keypoints)
num_keypoints = decoded_keypoints.get_shape()[1]
decoded_keypoints = tf.reshape(
decoded_keypoints,
tf.stack([combined_shape[0], combined_shape[1], num_keypoints, 2]))
decoded_boxes = tf.reshape(decoded_boxes.get(), tf.stack(
[combined_shape[0], combined_shape[1], 4]))
return decoded_boxes, decoded_keypoints
def restore_map(self, from_detection_checkpoint=True):
"""Returns a map of variables to load from a foreign checkpoint.
See parent class for details.
Args:
from_detection_checkpoint: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
variables_to_restore = {}
for variable in tf.global_variables():
if variable.op.name.startswith(self._extract_features_scope):
var_name = variable.op.name
if not from_detection_checkpoint:
var_name = (re.split('^' + self._extract_features_scope + '/',
var_name)[-1])
variables_to_restore[var_name] = variable
return variables_to_restore
|
import numpy as np
import pandas as pd
import random
def sir_network(graph, num_nodes, t_sim, infection_probability=0.3, recovery_period=7, num_infected_init=None):
Pi = infection_probability # beta (infection probability)
Pr = 1/recovery_period # gamma (1/7 days) (recovery time)
if not num_infected_init:
num_infected_init = int(num_nodes/100)
# State vector # (0 susceptible, 1 infected, 2 recovered)
S = np.zeros((t_sim,num_nodes))
idx_init = list(np.random.randint(0, num_nodes, num_infected_init))
S[0,idx_init] = 1; # Seed the initial infected
df_states = pd.DataFrame(index = pd.date_range(start=0, periods=t_sim, freq='D'), columns = ['S','I','R']).fillna(0)
susceptibles_time = [num_nodes-num_infected_init ]
infected_time = [num_infected_init ]
recovered_time = [0]
# Time loop
for t in range(1,t_sim):
# Individual loop
# Individuals in each state.
infected = []
recovered = []
for ni, node_i in enumerate(list(graph.nodes)):
# check if already recovered:
if S[t-1, ni]==2:
recovered.append( ni )
# check if infected
if S[t-1, ni]==1:
# Two cases: (A) Recover or (B) infect neighbors
if random.random() < Pr: #or S[:,ni].sum()>=int(1/Pr):
recovered.append( ni )
else:
infected.append( ni )
# Loop over neighbors
for nn in graph.neighbors(node_i):
# Check if neighbor is susceptible and infect it
if S[t-1, nn] == 0 and random.random() < Pi:
infected.append( nn )
infected = list(np.unique(infected))
recovered = list(np.unique(recovered))
susceptibles_time.append(num_nodes-len(infected)-len(recovered))
infected_time.append(len(infected))
recovered_time.append(len(recovered))
# Update graph state
S[t, infected] = 1
S[t, recovered] = 2
df_states['S'] = susceptibles_time
df_states['I'] = infected_time
df_states['R'] = recovered_time
df_graph = pd.DataFrame(S)
df_graph.index = df_states.index.values
df_graph.rename({i: ni for i, ni in enumerate(graph.nodes) } )
# return DF with graph sates and variables in time
return (df_graph, df_states)
def seir_network(graph, num_nodes, t_sim, infection_probability=0.3, incubation_period=4, recovery_period=7, num_infected_init=None):
Pe = infection_probability # beta (infection probability)
Pi = 1/incubation_period # kappa (1/5 days) (incubation time)
Pr = 1/recovery_period # gamma (1/7 days) (recovery time)
if not num_infected_init:
num_infected_init = round(num_nodes/100)+1
# State vector # (0 susceptible, 1 exposed, 2 infected, 3 recovered)
S = np.zeros((t_sim,num_nodes))
idx_init = list(np.random.randint(0, num_nodes, num_infected_init))
S[0,idx_init] = 2; # Seed the initial infected
df_states = pd.DataFrame(index = pd.date_range(start=0, periods=t_sim, freq='D'), columns = ['S','E','I','R']).fillna(0)
susceptibles_time = [num_nodes-num_infected_init ]
exposed_time = [0]
infected_time = [num_infected_init ]
recovered_time = [0]
# Time loop
for t in range(1,t_sim):
# Individual loop
# Individuals in each state.
exposed = []
infected = []
recovered = []
for ni, node_i in enumerate( graph.nodes ):
# check if already recovered:
if S[t-1, ni]==3:
recovered.append( ni )
# check if exposed
if S[t-1, ni]==1 and random.random() < Pi: #or S[t-int(1/Pi):t-1, ni].sum()>=1/Pi:
infected.append( ni )
elif S[t-1, ni]==1:
exposed.append( ni )
# check if infected
if S[t-1, ni]==2:
# Two cases: (A) Recover or (B) infect neighbors
if random.random() < Pr:# or S[t-int(1/Pr)-1:t+1, ni].sum() /2>=1/Pr:
recovered.append( ni )
else:
infected.append( ni )
# Loop over neighbors
for nn in graph.neighbors( node_i ):
# Check if neighbor is susceptible and infect it
if S[t-1, nn] == 0 and random.random() < Pe:
exposed.append( nn )
exposed = list(np.unique(exposed))
infected = list(np.unique(infected))
recovered = list(np.unique(recovered))
susceptibles_time.append(num_nodes-len(exposed)-len(infected)-len(recovered))
exposed_time.append(len(exposed))
infected_time.append(len(infected))
recovered_time.append(len(recovered))
# Update graph state
S[t, exposed] = 1
S[t, infected] = 2
S[t, recovered] = 3
df_states['S'] = susceptibles_time
df_states['E'] = exposed_time
df_states['I'] = infected_time
df_states['R'] = recovered_time
df_graph = pd.DataFrame(S)
df_graph.index = df_states.index.values
df_graph.rename({i: ni for i, ni in enumerate(graph.nodes) } )
# return DF with graph sates and variables in time
return (df_graph, df_states) |
# -*- coding: utf-8 -*-
"""nDimSegmentTree.py
This module implements multi dimensional segment tree.
A segment tree also known as a statistic tree is a tree data structure used
for storing information about intervals, or segments. It allows querying
which of the stored segments contain a given point. It is, in principle, a
static structure; that is, it's a structure that cannot be modified once it's
built.
A segment tree for a set I of n intervals uses O(n log n) storage and can be
built in O(n log n) time. Segment trees support searching for all the intervals
that contain a query point in O(log n + k), k being the number of retrieved
intervals or segments.
For multi dimensional implementation, there are some modifications to some of
the object like axis index. However 1-dim segment tree is still available, and
segmentTree.py is redundant.
"""
# To Do: how to implement dynamic update?
import collections
from functools import partial
import math
import weakref
class Interval(object):
def __init__(self, left_endpoint, right_endpoint, l_closed, r_closed):
self.left_endpoint = left_endpoint # float or int
self.right_endpoint = right_endpoint # float or int
self.left_closed = l_closed # bool, whether left enpoint is closed
self.right_closed = r_closed # bool, whether right enpoint is closed
def __repr__(self):
""" mathematical representation of an interval """
s = "{}, {}".format(self.left_endpoint, self.right_endpoint)
if self.left_closed:
left_bracket = '['
else:
left_bracket = '('
if self.right_closed:
right_bracket = ']'
else:
right_bracket = ')'
interval_string = left_bracket + s + right_bracket
return 'Interval({})'.format(interval_string)
def contains(self, another_interval):
""" check if this interval contains another_interval """
if another_interval.left_endpoint < self.left_endpoint:
return False
if another_interval.left_endpoint == self.left_endpoint:
if not self.left_closed and another_interval.left_closed:
return False
if another_interval.right_endpoint > self.right_endpoint:
return False
if another_interval.right_endpoint == self.right_endpoint:
if not self.right_closed and another_interval.right_closed:
return False
return True
def intersects(self, interval):
raise NotImplementedError('To do: implement intersection check')
class Cube(object):
""" n-dimensional cube which is a product of intervals"""
interval2cube = weakref.WeakKeyDictionary() # Globally accessable with Cube class
def __init__(self, *args):
if len(args) == 1:
# if supplied with only one argument, it is assumed to be iterable
sides = args[0]
else:
sides = args
# sides should be a list of Interval objects
self.dimension = len(sides)
self.sides = sides
self.interval2axis = dict()
for axis, interval in enumerate(sides):
self.interval2cube[interval] = self
self.interval2axis[interval] = axis
def __repr__(self):
return 'Cube'+repr(tuple(self.sides))
@classmethod
def find_cube(cls, interval):
return cls.interval2cube[interval]
class TreeNode(object):
def __init__(self, left_endpoint, right_endpoint, l_closed, r_closed):
self.left = None
self.right = None
intv = Interval(left_endpoint, right_endpoint, l_closed, r_closed)
self.interval = intv
self.left_endpoint = left_endpoint
self.right_endpoint = right_endpoint
self.left_closed = l_closed
self.right_closed = r_closed
self.subset = [] # the canonical subset of given intervals
def __repr__(self):
""" mathematical representation of the node's interval """
s = "{}, {}".format(self.left_endpoint, self.right_endpoint)
if self.left_closed:
left_bracket = '['
else:
left_bracket = '('
if self.right_closed:
right_bracket = ']'
else:
right_bracket = ')'
interval_string = left_bracket + s + right_bracket
return 'TreeNode({})'.format(interval_string)
def query(self, point):
""" return list of Interval objects containing point in the subtree """
point_interval = Interval(point, point, True, True)
# point is also a closed interval [p, p]
if not self.interval.contains(point_interval):
# this node's interval doesn't contain point
return []
found = []
for intv in self.subset:
found.append(intv)
# because intv contains the node's interval, which contains point
if self.left is not None:
for intv in self.left.query(point):
found.append(intv)
if self.right is not None:
for intv in self.right.query(point):
found.append(intv)
return found
def path_to_leaf(self, point, prev_path=None):
point_interval = Interval(point, point, True, True)
if prev_path is None:
prev_path = [self]
else:
prev_path.append(self)
if self.left is not None:
if self.left.interval.contains(point_interval):
return self.left.path_to_leaf(point, prev_path=prev_path)
if self.right is not None:
if self.right.interval.contains(point_interval):
return self.right.path_to_leaf(point, prev_path=prev_path)
else:
return []
else:
return prev_path
class SegmentTree(object):
def __init__(self, intervals):
self.intervals = intervals
self.root = None
self.build_tree()
def __repr__(self):
return 'SegmentTree({})'.format(self.intervals)
def query(self, point):
""" return list of all Interval objects containing point """
if self.root is None:
raise Exception('tree must be built first')
return self.root.query(point)
def root_to_leaf(self, query_point):
return self.root.path_to_leaf(query_point)
def build_tree(self):
""" Build segment tree from given intervals and return the root.
Takes O(n log(n)) time.
"""
intervals = self.intervals
# sort all endpoints and make intervals for leaf nodes
endpoints = []
for interval in intervals:
endpoints.append(interval.left_endpoint)
endpoints.append(interval.right_endpoint)
endpoints.append(float('inf'))
endpoints.append(float('-inf'))
endpoints.sort()
unique_endpoints = []
for i, ep in enumerate(endpoints):
if i + 1 < len(endpoints) and ep == endpoints[i + 1]:
continue
else:
unique_endpoints.append(ep)
# append tuples for making intervals:
# (left_endpoint, right_endpoint, l_closed, r_closed)
# if left_enpoint == right_endpoint: it represents a point
endpoints = unique_endpoints
elements = []
for i, ep in enumerate(endpoints):
if i == 0:
prev = ep
continue
elif i < len(endpoints) - 1:
elements.append((prev, ep, False, False))
elements.append((ep, ep, True, True))
prev = ep
else: # i == len(endpoints)-1
elements.append((prev, ep, False, False))
num_leaves = len(elements)
max_depth = int(math.log(num_leaves) / math.log(2)) + 1
num_last_leaves = 2 * (num_leaves - 2**(max_depth - 1))
# build tree from bottom to up
# make a queue for each depth
q = []
for i, elem in enumerate(elements):
if i < num_last_leaves:
if i % 2 == 0:
prev = elem
else:
left_node = TreeNode(*prev)
right_node = TreeNode(*elem)
node = TreeNode(prev[0], elem[1], prev[2], elem[3])
node.left = left_node
node.right = right_node
q.append(node)
else:
node = TreeNode(*elem)
q.append(node)
# while depth > 0
while len(q) > 1:
tmp_q = []
for i, node in enumerate(q):
if i % 2 == 0:
prev = node
else:
left_ep = prev.left_endpoint
right_ep = node.right_endpoint
l_closed = prev.left_closed
r_closed = node.right_closed
new_node = TreeNode(left_ep, right_ep, l_closed, r_closed)
new_node.left = prev
new_node.right = node
tmp_q.append(new_node)
q = tmp_q
self.root = q[0]
# add canonical subsets
for interval in intervals:
self._append_subset(self.root, interval)
return self.root
def _append_subset(self, node, interval):
"""Recursive function to add canonical subsets"""
if interval.contains(node.interval):
node.subset.append(interval)
return None
if node.left is not None:
self._append_subset(node.left, interval)
if node.right is not None:
self._append_subset(node.right, interval)
class nDimSegmentTree(object):
def __init__(self, cubes):
intervals = [c.sides[0] for c in cubes]
self.tree = SegmentTree(intervals)
self._node2attached_tree = dict()
self.dimension = cubes[0].dimension
same_dim = all([c.dimension == self.dimension for c in cubes])
if not same_dim:
raise Exception('all cubes must have the same dimension')
self.build_tree()
def __repr__(self):
return 'nDimSegmentTree({})'.format()
def find_attached_tree(self, node):
if node in self._node2attached_tree:
return self._node2attached_tree[node]
else:
return None
def attach_one_tree(self, node, axis):
""" attach a new segment tree to every TreeNode of the segment tree in axis=axis """
if axis + 1 >= self.dimension:
return None
cannonical_subset = node.subset
intervals = []
for intv in cannonical_subset:
c = Cube.find_cube(intv)
intervals.append(c.sides[axis+1])
if not intervals:
return None
sub_seg_tree = SegmentTree(intervals)
self._node2attached_tree[node] = sub_seg_tree
return sub_seg_tree
def attach_all_trees(self, node, axis):
""" attach segment trees to each nodes while traversing the subtree of the node"""
sub_seg_tree = self.attach_one_tree(node, axis)
if sub_seg_tree is not None:
self._queue[axis+1].append(sub_seg_tree)
if node.left is not None:
self.attach_all_trees(node.left, axis)
if node.right is not None:
self.attach_all_trees(node.right, axis)
def build_tree(self):
self._queue = collections.defaultdict(list) # temporary queue for each dimension
dim = 0
self.attach_all_trees(self.tree.root, dim)
while dim < self.dimension:
dim += 1
for tree in self._queue[dim]:
self.attach_all_trees(tree.root, dim)
del self._queue
def query(self, point):
if len(point) != self.dimension:
raise Exception('point must have the same dimension as the tree')
trees = [self.tree]
for axis in range(self.dimension-1): # To Do: ensure dimension > 1 at the beggining
trees = self.get_next_trees(point, trees, axis)
selected_cubes = []
for tree in trees:
last_intervals = tree.query(point[-1])
for intv in last_intervals:
c = Cube.find_cube(intv)
selected_cubes.append(c)
return selected_cubes
def get_next_trees(self, point, trees, axis):
next_trees = []
for tree in trees:
path = tree.root_to_leaf(point[axis]) # candidate nodes in axis=axis
for node in path:
sub_seg_tree = self.find_attached_tree(node)
if sub_seg_tree is not None:
next_trees.append(sub_seg_tree)
return next_trees
def example():
import random
print('1-dim example:')
intervals = []
for _ in range(10):
endpoints = []
endpoints.append(random.choice(list(range(100))))
endpoints.append(random.choice(list(range(100))))
l_closed = random.choice([False, True])
r_closed = random.choice([False, True])
intv = Interval(min(endpoints), max(endpoints), l_closed, r_closed)
intervals.append(intv)
print('intervals:', intervals)
seg_tree = SegmentTree(intervals)
print('intervals containing 34:', seg_tree.query(34))
print()
print('2-dim example:')
rectangles = []
for _ in range(5):
rect = []
for _ in range(2):
endpoints = []
endpoints.append(random.choice(list(range(100))))
endpoints.append(random.choice(list(range(100))))
l_closed = random.choice([False, True])
r_closed = random.choice([False, True])
intv = Interval(min(endpoints), max(endpoints), l_closed, r_closed)
rect.append(intv)
rectangles.append(Cube(rect))
# build trees
n_dim_seg_tree = nDimSegmentTree(rectangles)
print('input:', rectangles)
print('querying (40, 50)...')
x = 40
y = 50
found = n_dim_seg_tree.query((x,y))
print('found rectangles', found)
print('{} rectangle(s) out of {}'.format(len(found), len(rectangles)))
print()
print('5-dim example')
cubes = []
for _ in range(100):
rect = []
for _ in range(5):
endpoints = []
endpoints.append(random.choice(list(range(100))))
endpoints.append(random.choice(list(range(100))))
l_closed = random.choice([False, True])
r_closed = random.choice([False, True])
intv = Interval(min(endpoints), max(endpoints), l_closed, r_closed)
rect.append(intv)
cubes.append(Cube(rect))
# build trees
n_dim_seg_tree = nDimSegmentTree(cubes)
point = (52,52,52,52,52)
print('querying {}...'.format(point))
found = n_dim_seg_tree.query(point)
print('found cubes', found)
print('{} cube(s) out of {}'.format(len(found), len(cubes)))
if __name__ == '__main__':
example() |
<reponame>morganwu277/chan
# coding: utf-8
import sys
import warnings
sys.path.insert(0, '.')
sys.path.insert(0, '..')
import os
import pandas as pd
import czsc
from czsc.analyze import KlineAnalyze, find_zs
warnings.warn("czsc version is {}".format(czsc.__version__))
# cur_path = os.path.split(os.path.realpath(__file__))[0]
cur_path = "./test"
file_kline = os.path.join(cur_path, "data/000001.SH_D.csv")
kline = pd.read_csv(file_kline, encoding="utf-8")
kline.loc[:, "dt"] = pd.to_datetime(kline.dt)
kline1 = kline.iloc[:2000]
kline2 = kline.iloc[2000:]
ka = KlineAnalyze(kline1, name="日线", max_raw_len=2000, verbose=True)
def test_get_sub_section():
sub_kn = ka.get_sub_section(ka.fx_list[-2]['dt'], ka.fx_list[-1]['dt'], mode='kn', is_last=True)
assert sub_kn[0]['dt'] == ka.fx_list[-2]['dt'] and sub_kn[-1]['dt'] == ka.fx_list[-1]['dt']
sub_fx = ka.get_sub_section(ka.bi_list[-2]['dt'], ka.bi_list[-1]['dt'], mode='fx', is_last=True)
assert sub_fx[0]['dt'] == ka.bi_list[-2]['dt'] and sub_fx[-1]['dt'] == ka.bi_list[-1]['dt']
sub_bi = ka.get_sub_section(ka.xd_list[-2]['dt'], ka.xd_list[-1]['dt'], mode='bi', is_last=True)
assert sub_bi[0]['dt'] == ka.xd_list[-2]['dt'] and sub_bi[-1]['dt'] == ka.xd_list[-1]['dt']
sub_xd = ka.get_sub_section(ka.xd_list[-10]['dt'], ka.xd_list[-1]['dt'], mode='xd', is_last=True)
assert sub_xd[0]['dt'] == ka.xd_list[-10]['dt'] and sub_xd[-1]['dt'] == ka.xd_list[-1]['dt']
def test_kline_analyze():
# 测试绘图
file_img = "kline.png"
ka.to_image(file_img, max_k_count=5000)
assert os.path.exists(file_img)
for _, row in kline2.iterrows():
ka.update(row.to_dict())
assert ka.kline_raw[-1]['dt'] == row['dt']
# 测试分型识别结果
assert ka.fx_list[-1]['fx_mark'] == 'g'
assert ka.fx_list[-5]['fx_mark'] == 'g'
# 测试笔识别结果
assert ka.bi_list[-1]['fx_mark'] == 'g'
assert ka.bi_list[-4]['fx_mark'] == 'd'
# 测试线段识别结果
assert ka.xd_list[-2]['fx_mark'] == 'g'
assert ka.xd_list[-3]['fx_mark'] == 'd'
# 测试增量更新
ka_raw_len = len(ka.kline_raw)
for x in [2890, 2910, 2783, 3120]:
k = dict(ka.kline_raw[-1])
k['close'] = x
ka.update(k)
assert len(ka.kline_raw) == ka_raw_len
assert ka.kline_raw[-1]['close'] == x
def test_bei_chi():
bi1 = {"start_dt": ka.bi_list[-11]['dt'], "end_dt": ka.bi_list[-10]['dt'], "direction": "down"}
bi2 = {"start_dt": ka.bi_list[-13]['dt'], "end_dt": ka.bi_list[-12]['dt'], "direction": "down"}
x1 = ka.is_bei_chi(bi1, bi2, mode="bi", adjust=0.9)
xd1 = {"start_dt": ka.xd_list[-4]['dt'], "end_dt": ka.xd_list[-3]['dt'], "direction": "down"}
xd2 = {"start_dt": ka.xd_list[-6]['dt'], "end_dt": ka.xd_list[-5]['dt'], "direction": "down"}
x2 = ka.is_bei_chi(xd1, xd2, mode='xd', adjust=0.9)
print('背驰计算结果:{},{}'.format(x1, x2))
def test_update_ta():
ka = KlineAnalyze(kline, name="日线", max_raw_len=2000, verbose=False)
ma_x1 = dict(ka.ma[-1])
macd_x1 = dict(ka.macd[-1])
ka.update(kline.iloc[-1].to_dict())
ma_x2 = dict(ka.ma[-1])
macd_x2 = dict(ka.macd[-1])
assert ma_x1['dt'] == ma_x2['dt']
assert [round(x, 2) for x in ma_x1.values() if isinstance(x, float)] == \
[round(x, 2) for x in ma_x2.values() if isinstance(x, float)]
assert macd_x1['dt'] == macd_x2['dt']
assert [round(x, 2) for x in macd_x1.values() if isinstance(x, float)] == \
[round(x, 2) for x in macd_x2.values() if isinstance(x, float)]
def test_find_zs():
bi_zs = find_zs(ka.bi_list)
xd_zs = find_zs(ka.xd_list)
# 造数测试
points = [
{"dt": 0, "fx_mark": "d", "xd": 8},
{"dt": 1, "fx_mark": "g", "xd": 10},
{"dt": 2, "fx_mark": "d", "xd": 9},
{"dt": 3, "fx_mark": "g", "xd": 11},
{"dt": 4, "fx_mark": "d", "xd": 10.5},
{"dt": 5, "fx_mark": "g", "xd": 12},
{"dt": 6, "fx_mark": "d", "xd": 11.1},
{"dt": 7, "fx_mark": "g", "xd": 14},
{"dt": 8, "fx_mark": "d", "xd": 13},
{"dt": 9, "fx_mark": "g", "xd": 13.8},
{"dt": 10, "fx_mark": "d", "xd": 12.9},
{"dt": 11, "fx_mark": "g", "xd": 14.5},
{"dt": 12, "fx_mark": "d", "xd": 13.2},
{"dt": 13, "fx_mark": "g", "xd": 15},
{"dt": 14, "fx_mark": "d", "xd": 14.3},
{"dt": 15, "fx_mark": "g", "xd": 16.2},
{"dt": 16, "fx_mark": "d", "xd": 15.3},
{"dt": 17, "fx_mark": "g", "xd": 17.6},
{"dt": 18, "fx_mark": "d", "xd": 15.9},
{"dt": 19, "fx_mark": "g", "xd": 18.2},
{"dt": 20, "fx_mark": "d", "xd": 16.8},
{"dt": 21, "fx_mark": "g", "xd": 17.8},
{"dt": 22, "fx_mark": "d", "xd": 16.9},
{"dt": 23, "fx_mark": "g", "xd": 18.1},
]
zss = find_zs(points[:8])
assert len(zss) == 1
zss = find_zs(points[:15])
assert len(zss) == 2
zss = find_zs(points)
assert len(zss) == 3 and zss[0]['ZG'] < zss[1]['ZD'] and zss[1]['ZG'] < zss[2]['ZD']
# 获取用于比较趋势背驰的两端
fd1 = [x for x in points if x['dt'] >= zss[2]['end_point']['dt']]
fd2 = [x for x in points if zss[2]['start_point']['dt'] > x['dt'] >= zss[1]['end_point']['dt']]
fd3 = [x for x in points if zss[1]['start_point']['dt'] > x['dt'] >= zss[0]['end_point']['dt']]
fd4 = [x for x in points if x['dt'] <= zss[0]['start_point']['dt']]
assert fd1[0]['fx_mark'] == fd2[0]['fx_mark'] == fd3[0]['fx_mark'] == fd4[0]['fx_mark'] == 'd'
assert fd1[-1]['fx_mark'] == fd2[-1]['fx_mark'] == fd3[-1]['fx_mark'] == fd4[-1]['fx_mark'] == 'g'
|
import torch
import optuna
import torch.nn as nn
from utils.utils import progress_bar
import matplotlib.pyplot as plt
from utils.utils import pgd
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision.transforms import ToTensor, Compose
class CURELearner():
'''Strongly modified version of the implementation of "Robustness via curvature regularization, and vice versa"
in https://arxiv.org/abs/1811.09716. This version includes higher and mixed order regularization and higher
accuracy approximations of the Hessian vector product in CURE.
Args:
net (Pytorch nn): network structure
trainloader (PyTorch Dataloader): The train loader
testloader (PyTorch Dataloader): The test loader
device ('cpu' or 'cuda' if GPU available): type of decide to move tensors
lambda_ (float): power of regularization
path (string): path to save the best model
acc ([1, 2, 4, 6, 8]): level of accuracy for the computation of the Hessian vector product'''
def __init__(self, net, trainloader, testloader, lambda_0=4, lambda_1=4, lambda_2=0, transformer=None, trial=None, image_min=0, image_max=1, device='cuda',
path='./checkpoint', acc=0):
if not torch.cuda.is_available() and device == 'cuda':
raise ValueError("cuda is not available")
self.net = net.to(device)
if transformer is not None and type(transformer.transforms[0]) == ToTensor:
self.transformer = Compose(transformer.transforms[1:])
else:
self.transformer = transformer
self.criterion = nn.CrossEntropyLoss()
self.trial = trial
self.device = device
self.lambda_0 = lambda_0
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.trainloader, self.testloader = trainloader, testloader
self.path = path
self.test_acc_adv_best = 0
self.image_min = image_min
self.image_max = image_max
self.acc = acc
self.train_loss, self.train_acc = [], []
self.test_loss, self.test_acc_adv, self.test_acc_clean = [], [], []
self.train_curv_total, self.test_curv_total = [], []
self.train_curv_0, self.train_curv_1, self.train_curv_2 = [], [], []
self.test_curv_0, self.test_curv_1, self.test_curv_2 = [], [], []
def set_optimizer(self, optim_alg='Adam', args={'lr': 1e-4}, scheduler=None, args_scheduler={}):
'''Set the optimizer of the network
Args:
optim_alg (string): Name of the optimizer
args (dict): Parameter of the optimizer
scheduler (optim.lr_scheduler): Learning rate scheduler
args_scheduler (dict): Parameters of the scheduler'''
self.optimizer = getattr(optim, optim_alg)(
self.net.parameters(), **args)
if not scheduler:
self.scheduler = optim.lr_scheduler.StepLR(
self.optimizer, step_size=7, gamma=0.1)
else:
self.scheduler = getattr(optim.lr_scheduler, scheduler)(
self.optimizer, **args_scheduler)
def train(self, h=[3], epochs=15, epsilon=8/255):
'''Train the network
Args:
h (list): List with length less than the number of epochs. Different h for different epochs of training,
can have a single number or a list of floats for each epoch
epochs (int): Number of epochs'''
if len(h) > epochs:
raise ValueError(
'Length of h should be less than number of epochs')
if len(h) == 1:
h_all = epochs * [h[0]]
else:
h_all = epochs * [1.0]
h_all[:len(h)] = list(h[:])
h_all[len(h):] = (epochs - len(h)) * [h[-1]]
for epoch, h_tmp in enumerate(h_all):
self._train(epoch, h=h_tmp)
self.test(epoch, h=h_tmp, eps=epsilon)
# This is used for hyperparameter tuning with optuna
if self.trial is not None:
current_acc_adv = self.test_acc_adv[-1]
self.trial.report(current_acc_adv, epoch)
if self.trial.should_prune():
raise optuna.TrialPruned()
self.scheduler.step()
def _train(self, epoch, h):
'''Train the model'''
print('\nEpoch: %d' % epoch)
train_loss, total = 0, 0
num_correct = 0
curv, curvature, norm_grad_sum = 0, 0, 0
curvature_0, curvature_1, curvature_2 = 0, 0, 0
for batch_idx, (inputs, targets) in enumerate(self.trainloader):
inputs, targets = inputs.to(self.device), targets.to(self.device)
self.optimizer.zero_grad()
total += targets.size(0)
outputs = self.net.train()(inputs)
regularizer, grad_norm, curvatures_split_up = self.regularizer(inputs, targets, h=h)
curvature += regularizer.item()
curvature_0 += curvatures_split_up[0].item()
curvature_1 += curvatures_split_up[1].item()
curvature_2 += curvatures_split_up[2].item()
loss = self.criterion(outputs, targets)
loss = loss + regularizer
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
train_loss += loss.item()
_, predicted = outputs.max(1)
outcome = predicted.data == targets
num_correct += outcome.sum().item()
progress_bar(batch_idx, len(self.trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d) | curvature: %.3f ' %
(train_loss/(batch_idx+1), 100.*num_correct/total, num_correct, total, curvature/(batch_idx+1)))
self.train_loss.append(train_loss/(batch_idx+1))
self.train_acc.append(100.*num_correct/total)
self.train_curv_total.append(curvature/(batch_idx+1))
self.train_curv_0.append(curvature_0 / (batch_idx + 1))
self.train_curv_1.append(curvature_1 / (batch_idx + 1))
self.train_curv_2.append(curvature_2 / (batch_idx + 1))
def test(self, epoch, h, num_pgd_steps=20, eps=8/255):
'''Test the model'''
test_loss, adv_acc, total, curvature, clean_acc, grad_sum = 0, 0, 0, 0, 0, 0
curvature_0, curvature_1, curvature_2 = 0, 0, 0
for batch_idx, (inputs, targets) in enumerate(self.testloader):
inputs, targets = inputs.to(self.device), targets.to(self.device)
outputs = self.net.eval()(inputs)
loss = self.criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
clean_acc += predicted.eq(targets).sum().item()
total += targets.size(0)
inputs_pert = pgd(inputs, self.net, epsilon=eps, targets=targets, step_size=0.04, num_steps=num_pgd_steps,
normalizer=self.transformer, device=self.device, clip_min=self.image_min, clip_max=self.image_max)
outputs = self.net(inputs_pert)
probs, predicted = outputs.max(1)
adv_acc += predicted.eq(targets).sum().item()
cur, norm_grad, curvatures_split_up = self.regularizer(inputs, targets, h=h)
grad_sum += norm_grad
curvature += cur.item()
curvature_0 += curvatures_split_up[0].item()
curvature_1 += curvatures_split_up[1].item()
curvature_2 += curvatures_split_up[2].item()
test_loss += cur.item()
print(f'epoch = {epoch}, adv_acc = {100.*adv_acc/total}, clean_acc = {100.*clean_acc/total}, loss = {test_loss/(batch_idx+1)}',
f'curvature = {curvature/(batch_idx+1)}')
self.test_loss.append(test_loss/(batch_idx+1))
self.test_acc_adv.append(100.*adv_acc/total)
self.test_acc_clean.append(100.*clean_acc/total)
self.test_curv_total.append(curvature/(batch_idx+1))
self.test_curv_0.append(curvature_0 / (batch_idx + 1))
self.test_curv_1.append(curvature_1 / (batch_idx + 1))
self.test_curv_2.append(curvature_2 / (batch_idx + 1))
if self.test_acc_adv[-1] > self.test_acc_adv_best:
self.test_acc_adv_best = self.test_acc_adv[-1]
print(f'Saving the best model to {self.path}')
self.save_model(self.path)
return test_loss/(batch_idx+1), 100.*adv_acc/total, 100.*clean_acc/total, curvature/(batch_idx+1)
def _find_z(self, inputs, targets):
'''Find the direction in the regularizer'''
inputs.requires_grad_()
outputs = self.net.eval()(inputs)
loss_z = self.criterion(self.net.eval()(inputs), targets)
loss_z.backward()
grad = inputs.grad.data + 0.0
norm_grad = grad.norm().item()
z = torch.sign(grad).detach() + 0.
z = 1. * (z+1e-7) / (z.reshape(z.size(0), -
1).norm(dim=1)[:, None, None, None]+1e-7)
# zero_gradients (on the inputs)
inputs.grad.detach_()
inputs.grad.zero_()
self.net.zero_grad()
return z, norm_grad
def _3_diff(self, in_0, in_1, in_2):
'''Compute third order derivative without dividing by infinitesimal (absorbed by regularizer)'''
return in_0-2*in_1+in_2
def regularizer(self, inputs, targets, h=3.):
'''Taylor regularizer. Includes curvature regularization of accuracy determined by acc property.
Also includes gradient and third order regularizers.'''
acc = self.acc
z, norm_grad = self._find_z(inputs, targets)
inputs.requires_grad_()
outputs_orig = self.net.eval()(inputs)
loss_orig = self.criterion(outputs_orig, targets)
reg_0 = torch.Tensor([0]).to(self.device)
if self.lambda_0 != 0:
# first order regularization
first_order = torch.autograd.grad(loss_orig, inputs, create_graph=True)[0].requires_grad_()
reg_0 = torch.sum(torch.pow(first_order, 2) * self.lambda_0)
self.net.zero_grad()
reg_1 = torch.tensor([0]).to(self.device)
if acc==1:
outputs_pos = self.net.eval()(inputs + h*z)
loss_pos = self.criterion(outputs_pos, targets)
approx = torch.autograd.grad((loss_pos-loss_orig), inputs, create_graph=True)[0]
pre = approx.reshape(approx.size(0), -1).norm(dim=1)
reg_1 = torch.sum(pre * self.lambda_1)
if acc in [2, 4, 6, 8]:
if acc == 2:
# CURE regularization with higher order accuracy O(h^4) instead of O(h^2)
# using central finite difference. These coefficients are fixed constants (see https://en.wikipedia.org/wiki/Finite_difference_coefficient)
coeffs = torch.tensor([-1/2, +1/2], requires_grad=False).to(self.device)
# evaluation points
evals = [self.net.eval()(inputs - h*z), self.net.eval()(inputs + h*z)]
elif acc == 4:
# CURE regularization with higher order accuracy O(h^6) instead of O(h^2)
coeffs = torch.tensor([1/12, -2/3, 2/3, -1/12], requires_grad=False).to(self.device)
# evaluation points
evals = [self.net.eval()(inputs - 2*h*z), self.net.eval()(inputs - h*z), self.net.eval()(inputs + h*z), self.net.eval()(inputs + 2*h*z)]
elif acc==6:
# CURE regularization with higher order accuracy O(h^8) instead of O(h^2)
coeffs = torch.tensor([-1/60, 3/20, -3/4, 3/4, -3/20, 1/60], requires_grad=False).to(self.device)
# evaluation points
evals = [self.net.eval()(inputs - 3*h*z), self.net.eval()(inputs - 2*h*z), self.net.eval()(inputs - h*z),
self.net.eval()(inputs + h*z), self.net.eval()(inputs + 2*h*z), self.net.eval()(inputs + 3*h*z)]
elif acc==8:
# CURE regularization with higher order accuracy O(h^10) instead of O(h^2)
coeffs = torch.tensor([1/280, -4/105, 1/5, -4/5, 4/5, -1/5, 4/105, -1/280], requires_grad=False).to(self.device)
# evaluation points
evals = [self.net.eval()(inputs - 4*h*z), self.net.eval()(inputs - 3*h*z), self.net.eval()(inputs - 2*h*z),
self.net.eval()(inputs - h*z), self.net.eval()(inputs + h*z), self.net.eval()(inputs + 2*h*z),
self.net.eval()(inputs + 3*h*z), self.net.eval()(inputs + 4*h*z)]
losses = torch.stack([self.criterion(ev, targets) for ev in evals])
lin_comb = torch.sum(coeffs * losses)
approx = torch.autograd.grad((lin_comb), inputs, create_graph=True)[0]
pre = approx.reshape(approx.size(0), -1).norm(dim=1)
reg_1 = torch.sum(pre * self.lambda_1)
self.net.zero_grad()
# third order regularization
reg_2 = torch.Tensor([0]).to(self.device)
if self.lambda_2 != 0:
loss_1 = self.criterion(self.net.eval()(inputs - h*torch.ones_like(inputs)), targets)
loss_2 = self.criterion(self.net.eval()(inputs), targets)
loss_3 = self.criterion(self.net.eval()(inputs + h*torch.ones_like(inputs)), targets)
total_fin_dif = self._3_diff(loss_1, loss_2, loss_3)
third_order_approx = torch.autograd.grad(total_fin_dif, inputs, create_graph=True)[0]
reg_2 = torch.sum(torch.pow(third_order_approx, 2) * self.lambda_2)
self.net.zero_grad()
return (reg_0 + reg_1 + reg_2) / float(inputs.size(0)), norm_grad, [reg_0 / float(inputs.size(0)), reg_1 / float(inputs.size(0)), reg_2 / float(inputs.size(0))]
def save_model(self, path):
'''Save the model
Args:
path (string): path to save the model'''
print('Saving...')
state = {
'net': self.net.state_dict(),
'optimizer': self.optimizer.state_dict()
}
torch.save(state, path)
def save_state(self, path):
print('Saving...')
state = {
'net': self.net.state_dict(),
'optimizer': self.optimizer.state_dict(),
'train_acc': self.train_acc,
'test_acc_clean': self.test_acc_clean,
'test_acc_adv': self.test_acc_adv,
'train_curv_total': self.train_curv_total,
'test_curv_total': self.test_curv_total,
'train_curv_0': self.train_curv_0,
'train_curv_1': self.train_curv_1,
'train_curv_2': self.train_curv_2,
'test_curv_0': self.test_curv_0,
'test_curv_1': self.test_curv_1,
'test_curv_2': self.test_curv_2,
'train_loss': self.train_loss,
'test_loss': self.test_loss
}
torch.save(state, path)
def import_model(self, path):
'''Import the pre-trained model'''
checkpoint = torch.load(path)
self.net.load_state_dict(checkpoint['net'])
def import_state(self, path):
checkpoint = torch.load(path)
self.net.load_state_dict(checkpoint['net'])
self.train_acc = checkpoint['train_acc']
self.test_acc_clean = checkpoint['test_acc_clean']
self.test_acc_adv = checkpoint['test_acc_adv']
self.train_curv_total = checkpoint['train_curv_total']
self.test_curv_total = checkpoint['test_curv_total']
self.train_curv_0 = checkpoint['train_curv_0']
self.train_curv_1 = checkpoint['train_curv_1']
self.train_curv_2 = checkpoint['train_curv_2']
self.test_curv_0 = checkpoint['test_curv_0']
self.test_curv_1 = checkpoint['test_curv_1']
self.test_curv_2 = checkpoint['test_curv_2']
self.train_loss = checkpoint['train_loss']
self.test_loss = checkpoint['test_loss']
def plot_results(self, title=""):
"""Plot the results"""
plt.figure(figsize=(18, 12))
plt.suptitle(title + 'Results', fontsize=18, y=0.96)
plt.subplot(4, 4, 1)
plt.plot(self.train_acc, Linewidth=2, c='C0')
plt.plot(self.test_acc_clean, Linewidth=2, c='C1')
plt.plot(self.test_acc_adv, Linewidth=2, c='C2')
plt.legend(['train_clean', 'test_clean', 'test_adv'], fontsize=14)
plt.title('Accuracy', fontsize=14)
plt.ylabel('Accuracy', fontsize=14)
plt.xlabel('epoch', fontsize=14)
plt.grid()
plt.subplot(4, 4, 2)
plt.plot(self.train_curv_total, Linewidth=4, c='black')
plt.plot(self.train_curv_0, Linewidth=2, c='C0', label='train_curv_0')
plt.plot(self.train_curv_1, Linewidth=2, c='C1', label='train_curv_1')
plt.plot(self.train_curv_2, Linewidth=2, c='C2', label='train_curv_2')
plt.legend(fontsize=14)
plt.title('Train Curvatures', fontsize=14)
plt.ylabel('curv', fontsize=14)
plt.xlabel('epoch', fontsize=14)
plt.grid()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.subplot(4, 4, 3)
plt.plot(self.test_curv_total, Linewidth=4, c='black')
plt.plot(self.test_curv_0, Linewidth=2, c='C0', label='test_curv_0')
plt.plot(self.test_curv_1, Linewidth=2, c='C1', label='test_curv_1')
plt.plot(self.test_curv_2, Linewidth=2, c='C2', label='test_curv_2')
plt.legend(fontsize=14)
plt.title('Test Curvatures', fontsize=14)
plt.ylabel('curv', fontsize=14)
plt.xlabel('epoch', fontsize=14)
plt.grid()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.subplot(4, 4, 4)
plt.plot(self.train_loss, Linewidth=2, c='C0')
plt.plot(self.test_loss, Linewidth=2, c='C1')
plt.legend(['train', 'test'], fontsize=14)
plt.title('Loss', fontsize=14)
plt.ylabel('loss', fontsize=14)
plt.xlabel('epoch', fontsize=14)
plt.grid()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
|
<reponame>pulumi/pulumi-alicloud<gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['AccountArgs', 'Account']
@pulumi.input_type
class AccountArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
account_password: pulumi.Input[str],
db_cluster_id: pulumi.Input[str],
account_description: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Account resource.
:param pulumi.Input[str] account_name: Account name: lowercase letters, numbers, underscores, lowercase letter; length no more than 16 characters.
:param pulumi.Input[str] account_password: The account password: uppercase letters, lowercase letters, lowercase letters, numbers, and special characters (special character! #$%^& author (s):_+-=) in a length of 8-32 bit.
:param pulumi.Input[str] db_cluster_id: The db cluster id.
:param pulumi.Input[str] account_description: In Chinese, English letter. May contain Chinese and English characters, lowercase letters, numbers, and underscores (_), the dash (-). Cannot start with http:// and https:// at the beginning. Length is from 2 to 256 characters.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "account_password", account_password)
pulumi.set(__self__, "db_cluster_id", db_cluster_id)
if account_description is not None:
pulumi.set(__self__, "account_description", account_description)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Account name: lowercase letters, numbers, underscores, lowercase letter; length no more than 16 characters.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="accountPassword")
def account_password(self) -> pulumi.Input[str]:
"""
The account password: uppercase letters, lowercase letters, lowercase letters, numbers, and special characters (special character! #$%^& author (s):_+-=) in a length of 8-32 bit.
"""
return pulumi.get(self, "account_password")
@account_password.setter
def account_password(self, value: pulumi.Input[str]):
pulumi.set(self, "account_password", value)
@property
@pulumi.getter(name="dbClusterId")
def db_cluster_id(self) -> pulumi.Input[str]:
"""
The db cluster id.
"""
return pulumi.get(self, "db_cluster_id")
@db_cluster_id.setter
def db_cluster_id(self, value: pulumi.Input[str]):
pulumi.set(self, "db_cluster_id", value)
@property
@pulumi.getter(name="accountDescription")
def account_description(self) -> Optional[pulumi.Input[str]]:
"""
In Chinese, English letter. May contain Chinese and English characters, lowercase letters, numbers, and underscores (_), the dash (-). Cannot start with http:// and https:// at the beginning. Length is from 2 to 256 characters.
"""
return pulumi.get(self, "account_description")
@account_description.setter
def account_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_description", value)
@pulumi.input_type
class _AccountState:
def __init__(__self__, *,
account_description: Optional[pulumi.Input[str]] = None,
account_name: Optional[pulumi.Input[str]] = None,
account_password: Optional[pulumi.Input[str]] = None,
db_cluster_id: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Account resources.
:param pulumi.Input[str] account_description: In Chinese, English letter. May contain Chinese and English characters, lowercase letters, numbers, and underscores (_), the dash (-). Cannot start with http:// and https:// at the beginning. Length is from 2 to 256 characters.
:param pulumi.Input[str] account_name: Account name: lowercase letters, numbers, underscores, lowercase letter; length no more than 16 characters.
:param pulumi.Input[str] account_password: The account password: uppercase letters, lowercase letters, lowercase letters, numbers, and special characters (special character! #$%^& author (s):_+-=) in a length of 8-32 bit.
:param pulumi.Input[str] db_cluster_id: The db cluster id.
:param pulumi.Input[str] status: The status of the resource. Valid Status: `Creating`,`Available`,`Deleting`.
"""
if account_description is not None:
pulumi.set(__self__, "account_description", account_description)
if account_name is not None:
pulumi.set(__self__, "account_name", account_name)
if account_password is not None:
pulumi.set(__self__, "account_password", account_password)
if db_cluster_id is not None:
pulumi.set(__self__, "db_cluster_id", db_cluster_id)
if status is not None:
pulumi.set(__self__, "status", status)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="accountDescription")
def account_description(self) -> Optional[pulumi.Input[str]]:
"""
In Chinese, English letter. May contain Chinese and English characters, lowercase letters, numbers, and underscores (_), the dash (-). Cannot start with http:// and https:// at the beginning. Length is from 2 to 256 characters.
"""
return pulumi.get(self, "account_description")
@account_description.setter
def account_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_description", value)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> Optional[pulumi.Input[str]]:
"""
Account name: lowercase letters, numbers, underscores, lowercase letter; length no more than 16 characters.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="accountPassword")
def account_password(self) -> Optional[pulumi.Input[str]]:
"""
The account password: uppercase letters, lowercase letters, lowercase letters, numbers, and special characters (special character! #$%^& author (s):_+-=) in a length of 8-32 bit.
"""
return pulumi.get(self, "account_password")
@account_password.setter
def account_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_password", value)
@property
@pulumi.getter(name="dbClusterId")
def db_cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
The db cluster id.
"""
return pulumi.get(self, "db_cluster_id")
@db_cluster_id.setter
def db_cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "db_cluster_id", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the resource. Valid Status: `Creating`,`Available`,`Deleting`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
class Account(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_description: Optional[pulumi.Input[str]] = None,
account_name: Optional[pulumi.Input[str]] = None,
account_password: Optional[pulumi.Input[str]] = None,
db_cluster_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Click House Account resource.
For information about Click House Account and how to use it, see [What is Account](https://www.alibabacloud.com/product/clickhouse).
> **NOTE:** Available in v1.134.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
config = pulumi.Config()
name = config.get("name")
if name is None:
name = "testaccountname"
pwd = <PASSWORD>("<PASSWORD>")
if pwd is None:
pwd = "<PASSWORD>"
default_db_cluster = alicloud.clickhouse.DbCluster("defaultDbCluster",
db_cluster_version="172.16.17.32",
category="Basic",
db_cluster_class="S8",
db_cluster_network_type="vpc",
db_cluster_description=name,
db_node_group_count=1,
payment_type="PayAsYouGo",
db_node_storage="500",
storage_type="cloud_essd",
vswitch_id="your_vswitch_id")
default_account = alicloud.clickhouse.Account("defaultAccount",
db_cluster_id=default_db_cluster.id,
account_description="your_description",
account_name=name,
account_password=<PASSWORD>)
```
## Import
Click House Account can be imported using the id, e.g.
```sh
$ pulumi import alicloud:clickhouse/account:Account example <db_cluster_id>:<account_name>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_description: In Chinese, English letter. May contain Chinese and English characters, lowercase letters, numbers, and underscores (_), the dash (-). Cannot start with http:// and https:// at the beginning. Length is from 2 to 256 characters.
:param pulumi.Input[str] account_name: Account name: lowercase letters, numbers, underscores, lowercase letter; length no more than 16 characters.
:param pulumi.Input[str] account_password: The account password: uppercase letters, lowercase letters, lowercase letters, numbers, and special characters (special character! #$%^& author (s):_+-=) in a length of 8-32 bit.
:param pulumi.Input[str] db_cluster_id: The db cluster id.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AccountArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Click House Account resource.
For information about Click House Account and how to use it, see [What is Account](https://www.alibabacloud.com/product/clickhouse).
> **NOTE:** Available in v1.134.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
config = pulumi.Config()
name = config.get("name")
if name is None:
name = "testaccountname"
pwd = config.get("pwd")
if pwd is None:
pwd = "<PASSWORD>"
default_db_cluster = alicloud.clickhouse.DbCluster("defaultDbCluster",
db_cluster_version="172.16.17.32",
category="Basic",
db_cluster_class="S8",
db_cluster_network_type="vpc",
db_cluster_description=name,
db_node_group_count=1,
payment_type="PayAsYouGo",
db_node_storage="500",
storage_type="cloud_essd",
vswitch_id="your_vswitch_id")
default_account = alicloud.clickhouse.Account("defaultAccount",
db_cluster_id=default_db_cluster.id,
account_description="your_description",
account_name=name,
account_password=<PASSWORD>)
```
## Import
Click House Account can be imported using the id, e.g.
```sh
$ pulumi import alicloud:clickhouse/account:Account example <db_cluster_id>:<account_name>
```
:param str resource_name: The name of the resource.
:param AccountArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AccountArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_description: Optional[pulumi.Input[str]] = None,
account_name: Optional[pulumi.Input[str]] = None,
account_password: Optional[pulumi.Input[str]] = None,
db_cluster_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AccountArgs.__new__(AccountArgs)
__props__.__dict__["account_description"] = account_description
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
if account_password is None and not opts.urn:
raise TypeError("Missing required property 'account_password'")
__props__.__dict__["account_password"] = account_password
if db_cluster_id is None and not opts.urn:
raise TypeError("Missing required property 'db_cluster_id'")
__props__.__dict__["db_cluster_id"] = db_cluster_id
__props__.__dict__["status"] = None
__props__.__dict__["type"] = None
super(Account, __self__).__init__(
'alicloud:clickhouse/account:Account',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
account_description: Optional[pulumi.Input[str]] = None,
account_name: Optional[pulumi.Input[str]] = None,
account_password: Optional[pulumi.Input[str]] = None,
db_cluster_id: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None) -> 'Account':
"""
Get an existing Account resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_description: In Chinese, English letter. May contain Chinese and English characters, lowercase letters, numbers, and underscores (_), the dash (-). Cannot start with http:// and https:// at the beginning. Length is from 2 to 256 characters.
:param pulumi.Input[str] account_name: Account name: lowercase letters, numbers, underscores, lowercase letter; length no more than 16 characters.
:param pulumi.Input[str] account_password: The account password: uppercase letters, lowercase letters, lowercase letters, numbers, and special characters (special character! #$%^& author (s):_+-=) in a length of 8-32 bit.
:param pulumi.Input[str] db_cluster_id: The db cluster id.
:param pulumi.Input[str] status: The status of the resource. Valid Status: `Creating`,`Available`,`Deleting`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AccountState.__new__(_AccountState)
__props__.__dict__["account_description"] = account_description
__props__.__dict__["account_name"] = account_name
__props__.__dict__["account_password"] = <PASSWORD>
__props__.__dict__["db_cluster_id"] = db_cluster_id
__props__.__dict__["status"] = status
__props__.__dict__["type"] = type
return Account(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accountDescription")
def account_description(self) -> pulumi.Output[Optional[str]]:
"""
In Chinese, English letter. May contain Chinese and English characters, lowercase letters, numbers, and underscores (_), the dash (-). Cannot start with http:// and https:// at the beginning. Length is from 2 to 256 characters.
"""
return pulumi.get(self, "account_description")
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Output[str]:
"""
Account name: lowercase letters, numbers, underscores, lowercase letter; length no more than 16 characters.
"""
return pulumi.get(self, "account_name")
@property
@pulumi.getter(name="accountPassword")
def account_password(self) -> pulumi.Output[str]:
"""
The account password: uppercase letters, lowercase letters, lowercase letters, numbers, and special characters (special character! #$%^& author (s):_+-=) in a length of 8-32 bit.
"""
return pulumi.get(self, "account_password")
@property
@pulumi.getter(name="dbClusterId")
def db_cluster_id(self) -> pulumi.Output[str]:
"""
The db cluster id.
"""
return pulumi.get(self, "db_cluster_id")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of the resource. Valid Status: `Creating`,`Available`,`Deleting`.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
|
<gh_stars>0
# -*- coding: utf-8 -*-
import math,string,itertools,fractions,heapq,collections,re,array,bisect
class XorAndSum:
def maxSum(self, number):
if not number:
return 0
number = list(number)
while True:
hasMore = True
for i, j in itertools.combinations(range(len(number)), 2):
v = number[i] ^ number[j]
if v <= number[i] and v <= number[j]:
hasMore = False
break
if v > number[i] and v > number[j]:
if number[i] > number[j]:
number[j] = v
else:
number[i] = v
elif v > number[i]:
number[i] = v
elif v > number[j]:
number[j] = v
if not hasMore:
break
print(number)
return sum(number)
# CUT begin
# TEST CODE FOR PYTHON {{{
import sys, time, math
def tc_equal(expected, received):
try:
_t = type(expected)
received = _t(received)
if _t == list or _t == tuple:
if len(expected) != len(received): return False
return all(tc_equal(e, r) for (e, r) in zip(expected, received))
elif _t == float:
eps = 1e-9
d = abs(received - expected)
return not math.isnan(received) and not math.isnan(expected) and d <= eps * max(1.0, abs(expected))
else:
return expected == received
except:
return False
def pretty_str(x):
if type(x) == str:
return '"%s"' % x
elif type(x) == tuple:
return '(%s)' % (','.join( (pretty_str(y) for y in x) ) )
else:
return str(x)
def do_test(number, __expected):
startTime = time.time()
instance = XorAndSum()
exception = None
try:
__result = instance.maxSum(number);
except:
import traceback
exception = traceback.format_exc()
elapsed = time.time() - startTime # in sec
if exception is not None:
sys.stdout.write("RUNTIME ERROR: \n")
sys.stdout.write(exception + "\n")
return 0
if tc_equal(__expected, __result):
sys.stdout.write("PASSED! " + ("(%.3f seconds)" % elapsed) + "\n")
return 1
else:
sys.stdout.write("FAILED! " + ("(%.3f seconds)" % elapsed) + "\n")
sys.stdout.write(" Expected: " + pretty_str(__expected) + "\n")
sys.stdout.write(" Received: " + pretty_str(__result) + "\n")
return 0
def run_tests():
sys.stdout.write("XorAndSum (1000 Points)\n\n")
passed = cases = 0
case_set = set()
for arg in sys.argv[1:]:
case_set.add(int(arg))
with open("XorAndSum.sample", "r") as f:
while True:
label = f.readline()
if not label.startswith("--"): break
number = []
for i in range(0, int(f.readline())):
number.append(int(f.readline().rstrip()))
number = tuple(number)
f.readline()
__answer = int(f.readline().rstrip())
cases += 1
if len(case_set) > 0 and (cases - 1) in case_set: continue
sys.stdout.write(" Testcase #%d ... " % (cases - 1))
passed += do_test(number, __answer)
sys.stdout.write("\nPassed : %d / %d cases\n" % (passed, cases))
T = time.time() - 1430835073
PT, TT = (T / 60.0, 75.0)
points = 1000 * (0.3 + (0.7 * TT * TT) / (10.0 * PT * PT + TT * TT))
sys.stdout.write("Time : %d minutes %d secs\n" % (int(T/60), T%60))
sys.stdout.write("Score : %.2f points\n" % points)
if __name__ == '__main__':
run_tests()
# }}}
# CUT end
|
# PyGetWindow
# A cross-platform module to find information about the windows on the screen.
# Work in progress
# Useful info:
# https://stackoverflow.com/questions/373020/finding-the-current-active-window-in-mac-os-x-using-python
# https://stackoverflow.com/questions/7142342/get-window-position-size-with-python
# win32 api and ctypes on Windows
# cocoa api and pyobjc on Mac
# Xlib on linux
# Possible Future Features:
# get/click menu (win32: GetMenuItemCount, GetMenuItemInfo, GetMenuItemID, GetMenu, GetMenuItemRect)
__version__ = "0.0.9"
import sys, collections, pyrect
class PyGetWindowException(Exception):
"""
Base class for exceptions raised when PyGetWindow functions
encounter a problem. If PyGetWindow raises an exception that isn't
this class, that indicates a bug in the module.
"""
pass
def pointInRect(x, y, left, top, width, height):
"""Returns ``True`` if the ``(x, y)`` point is within the box described
by ``(left, top, width, height)``."""
return left < x < left + width and top < y < top + height
# NOTE: `Rect` is a named tuple for use in Python, while structs.RECT represents
# the win32 RECT struct. PyRect's Rect class is used for handling changing
# geometry of rectangular areas.
Rect = collections.namedtuple("Rect", "left top right bottom")
Point = collections.namedtuple("Point", "x y")
Size = collections.namedtuple("Size", "width height")
class BaseWindow:
def __init__(self):
pass
def _setupRectProperties(self):
def _onRead(attrName):
r = self._getWindowRect()
self._rect._left = r.left # Setting _left directly to skip the onRead.
self._rect._top = r.top # Setting _top directly to skip the onRead.
self._rect._width = r.right - r.left # Setting _width directly to skip the onRead.
self._rect._height = r.bottom - r.top # Setting _height directly to skip the onRead.
def _onChange(oldBox, newBox):
self.moveTo(newBox.left, newBox.top)
self.resizeTo(newBox.width, newBox.height)
r = self._getWindowRect()
self._rect = pyrect.Rect(r.left, r.top, r.right - r.left, r.bottom - r.top, onChange=_onChange, onRead=_onRead)
def _getWindowRect(self):
raise NotImplementedError
def __str__(self):
r = self._getWindowRect()
width = r.right - r.left
height = r.bottom - r.top
return '<%s left="%s", top="%s", width="%s", height="%s", title="%s">' % (
self.__class__.__qualname__,
r.left,
r.top,
width,
height,
self.title,
)
def close(self):
"""Closes this window. This may trigger "Are you sure you want to
quit?" dialogs or other actions that prevent the window from
actually closing. This is identical to clicking the X button on the
window."""
raise NotImplementedError
def minimize(self):
"""Minimizes this window."""
raise NotImplementedError
def maximize(self):
"""Maximizes this window."""
raise NotImplementedError
def restore(self):
"""If maximized or minimized, restores the window to it's normal size."""
raise NotImplementedError
def activate(self):
"""Activate this window and make it the foreground window."""
raise NotImplementedError
def resizeRel(self, widthOffset, heightOffset):
"""Resizes the window relative to its current size."""
raise NotImplementedError
def resizeTo(self, newWidth, newHeight):
"""Resizes the window to a new width and height."""
raise NotImplementedError
def moveRel(self, xOffset, yOffset):
"""Moves the window relative to its current position."""
raise NotImplementedError
def moveTo(self, newLeft, newTop):
"""Moves the window to new coordinates on the screen."""
raise NotImplementedError
@property
def isMinimized(self):
"""Returns True if the window is currently minimized."""
raise NotImplementedError
@property
def isMaximized(self):
"""Returns True if the window is currently maximized."""
raise NotImplementedError
@property
def isActive(self):
"""Returns True if the window is currently the active, foreground window."""
raise NotImplementedError
@property
def title(self):
"""Returns the window title as a string."""
raise NotImplementedError
@property
def visible(self):
raise NotImplementedError
# Wrappers for pyrect.Rect object's properties:
@property
def left(self):
return self._rect.left
@left.setter
def left(self, value):
# import pdb; pdb.set_trace()
self._rect.left # Run rect's onRead to update the Rect object.
self._rect.left = value
@property
def right(self):
return self._rect.right
@right.setter
def right(self, value):
self._rect.right # Run rect's onRead to update the Rect object.
self._rect.right = value
@property
def top(self):
return self._rect.top
@top.setter
def top(self, value):
self._rect.top # Run rect's onRead to update the Rect object.
self._rect.top = value
@property
def bottom(self):
return self._rect.bottom
@bottom.setter
def bottom(self, value):
self._rect.bottom # Run rect's onRead to update the Rect object.
self._rect.bottom = value
@property
def topleft(self):
return self._rect.topleft
@topleft.setter
def topleft(self, value):
self._rect.topleft # Run rect's onRead to update the Rect object.
self._rect.topleft = value
@property
def topright(self):
return self._rect.topright
@topright.setter
def topright(self, value):
self._rect.topright # Run rect's onRead to update the Rect object.
self._rect.topright = value
@property
def bottomleft(self):
return self._rect.bottomleft
@bottomleft.setter
def bottomleft(self, value):
self._rect.bottomleft # Run rect's onRead to update the Rect object.
self._rect.bottomleft = value
@property
def bottomright(self):
return self._rect.bottomright
@bottomright.setter
def bottomright(self, value):
self._rect.bottomright # Run rect's onRead to update the Rect object.
self._rect.bottomright = value
@property
def midleft(self):
return self._rect.midleft
@midleft.setter
def midleft(self, value):
self._rect.midleft # Run rect's onRead to update the Rect object.
self._rect.midleft = value
@property
def midright(self):
return self._rect.midright
@midright.setter
def midright(self, value):
self._rect.midright # Run rect's onRead to update the Rect object.
self._rect.midright = value
@property
def midtop(self):
return self._rect.midtop
@midtop.setter
def midtop(self, value):
self._rect.midtop # Run rect's onRead to update the Rect object.
self._rect.midtop = value
@property
def midbottom(self):
return self._rect.midbottom
@midbottom.setter
def midbottom(self, value):
self._rect.midbottom # Run rect's onRead to update the Rect object.
self._rect.midbottom = value
@property
def center(self):
return self._rect.center
@center.setter
def center(self, value):
self._rect.center # Run rect's onRead to update the Rect object.
self._rect.center = value
@property
def centerx(self):
return self._rect.centerx
@centerx.setter
def centerx(self, value):
self._rect.centerx # Run rect's onRead to update the Rect object.
self._rect.centerx = value
@property
def centery(self):
return self._rect.centery
@centery.setter
def centery(self, value):
self._rect.centery # Run rect's onRead to update the Rect object.
self._rect.centery = value
@property
def width(self):
return self._rect.width
@width.setter
def width(self, value):
self._rect.width # Run rect's onRead to update the Rect object.
self._rect.width = value
@property
def height(self):
return self._rect.height
@height.setter
def height(self, value):
self._rect.height # Run rect's onRead to update the Rect object.
self._rect.height = value
@property
def size(self):
return self._rect.size
@size.setter
def size(self, value):
self._rect.size # Run rect's onRead to update the Rect object.
self._rect.size = value
@property
def area(self):
return self._rect.area
@area.setter
def area(self, value):
self._rect.area # Run rect's onRead to update the Rect object.
self._rect.area = value
@property
def box(self):
return self._rect.box
@box.setter
def box(self, value):
self._rect.box # Run rect's onRead to update the Rect object.
self._rect.box = value
if sys.platform == "darwin":
# raise NotImplementedError('PyGetWindow currently does not support macOS. If you have Appkit/Cocoa knowledge, please contribute! https://github.com/asweigart/pygetwindow') # TODO - implement mac
from ._pygetwindow_macos import *
Window = MacOSWindow
elif sys.platform == "win32":
from ._pygetwindow_win import (
Win32Window,
getActiveWindow,
getActiveWindowTitle,
getWindowsAt,
getWindowsWithTitle,
getAllWindows,
getAllTitles,
)
Window = Win32Window
else:
raise NotImplementedError(
"PyGetWindow currently does not support Linux. If you have Xlib knowledge, please contribute! https://github.com/asweigart/pygetwindow"
)
|
<filename>glucosetracker/glucoses/views.py
# -*- coding: utf-8 -*-
import json
import logging
import operator
import mpld3
from datetime import datetime, timedelta
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render_to_response, HttpResponse
from django.template import RequestContext
from django.views.generic import (
CreateView,
DeleteView,
FormView,
TemplateView,
UpdateView,
)
from braces.views import LoginRequiredMixin
from django_datatables_view.base_datatable_view import BaseDatatableView
from core.utils import glucose_by_unit_setting, to_mg
from .utils import get_initial_category, import_glucose_from_csv, algo, generate_figure
from .models import Glucose
from .reports import GlucoseCsvReport, GlucosePdfReport, ChartData, UserStats
from .forms import (
GlucoseCreateForm,
GlucoseImportForm,
GlucoseEmailReportForm,
GlucoseFilterForm,
GlucoseQuickAddForm,
GlucoseUpdateForm,
)
DATE_FORMAT = '%m/%d/%Y'
TIME_FORMAT = '%I:%M %p'
logger = logging.getLogger(__name__)
@login_required
def import_data(request):
if request.method == 'POST':
form = GlucoseImportForm(request.POST, request.FILES)
if form.is_valid():
try:
logger.info('Importing data from uploaded CSV file for user: %s',
request.user)
import_glucose_from_csv(request.user, request.FILES['file'])
except ValueError, e:
logger.error('Could not import data from uploaded CSV file for'
' user: %s. Details: %s', request.user, e)
message = 'Could not import your data. Make sure that it follows' \
' the suggested format. (Error Details: %s)' % e
messages.add_message(request, messages.WARNING, message)
return render_to_response(
'glucoses/glucose_import.html',
{'form': form},
context_instance=RequestContext(request),
)
return HttpResponseRedirect(reverse('dashboard'))
else:
form = GlucoseImportForm()
return render_to_response(
'glucoses/glucose_import.html',
{'form': form},
context_instance=RequestContext(request),
)
@login_required
def filter_view(request):
"""
Displays the glucose data table for the currently logged in user with
filter options.
The data is loaded by the GlucoseListJson view and rendered by the
Datatables plugin via Javascript.
"""
form = GlucoseFilterForm(request.user)
form.fields['start_date'].initial = (datetime.now(
tz=request.user.settings.time_zone) - timedelta(days=7)) \
.date().strftime(DATE_FORMAT)
form.fields['end_date'].initial = datetime.now(
tz=request.user.settings.time_zone).date().strftime(DATE_FORMAT)
data = reverse('glucose_list_json')
if request.method == 'POST' and request.is_ajax:
# We need to create a copy of request.POST because it's immutable and
# we need to convert the content of the Value field to mg/dL if the
# user's glucose unit setting is set to mmol/L.
params = request.POST.copy()
if request.user.settings.glucose_unit.name == 'mmol/L':
# Only do the conversion if the values are not None or empty.
if params['start_value']:
params['start_value'] = to_mg(params['start_value'])
if params['end_value']:
params['end_value'] = to_mg(params['end_value'])
# Create the URL query string and strip the last '&' at the end.
data = ('%s?%s' % (reverse('glucose_list_json'), ''.join(
['%s=%s&' % (k, v) for k, v in params.iteritems()]))) \
.rstrip('&')
return HttpResponse(json.dumps(data), content_type='application/json')
return render_to_response(
'glucoses/glucose_filter.html',
{'form': form, 'data': data},
context_instance=RequestContext(request),
)
@login_required
def dashboard(request):
"""
Displays the glucose data table for the currently logged in user. A form
for quickly adding glucose values is also included.
The data is loaded by the GlucoseListJson view and rendered by the
Datatables plugin via Javascript.
"""
form = GlucoseQuickAddForm()
form.fields['category'].initial = get_initial_category(request.user)
return render_to_response(
'core/dashboard.html',
{'form': form,
'glucose_unit_name': request.user.settings.glucose_unit.name},
context_instance=RequestContext(request),
)
@login_required
def chart_data_json(request):
data = {}
params = request.GET
days = params.get('days', 0)
name = params.get('name', '')
if name == 'avg_by_category':
data['chart_data'] = ChartData.get_avg_by_category(
user=request.user, days=int(days))
elif name == 'avg_by_day':
data['chart_data'] = ChartData.get_avg_by_day(
user=request.user, days=int(days))
elif name == 'level_breakdown':
data['chart_data'] = ChartData.get_level_breakdown(
user=request.user, days=int(days))
elif name == 'count_by_category':
data['chart_data'] = ChartData.get_count_by_category(
user=request.user, days=int(days))
return HttpResponse(json.dumps(data), content_type='application/json')
@login_required
def my_json(request):
data = {}
params = request.GET
x, y = algo()
data['x'] = x
data['y'] = y
return HttpResponse(json.dumps(data), content_type='application/json')
@login_required
def my_figure(request):
params = request.GET
data = generate_figure()
print(data)
return HttpResponse(json.dumps(mpld3.fig_to_dict(data)), content_type='application/json')
@login_required
def stats_json(request):
data = {'stats': UserStats(request.user).user_stats}
return HttpResponse(json.dumps(data), content_type='application/json')
@login_required
def quick_add(request):
if request.method == 'POST' and request.is_ajax:
# We need to create a copy of request.POST because it's immutable and
# we need to convert the content of the Value field to mg/dL if the
# user's glucose unit setting is set to mmol/L.
post_values = request.POST.copy()
if request.user.settings.glucose_unit.name == 'mmol/L':
post_values['value'] = to_mg(post_values['value'])
form = GlucoseCreateForm(post_values)
if form.is_valid():
user = request.user
obj = form.save(commit=False)
obj.user = user
obj.record_date = datetime.now(tz=user.settings.time_zone).date()
obj.record_time = datetime.now(tz=user.settings.time_zone).time()
obj.save()
logger.info('Quick Add by %s: %s', request.user, post_values['value'])
message = {'success': True}
return HttpResponse(json.dumps(message))
else:
message = {
'success': False,
'error': 'Invalid value.'
}
return HttpResponse(json.dumps(message))
raise PermissionDenied
class GlucoseChartsView(LoginRequiredMixin, TemplateView):
template_name = 'glucoses/glucose_charts.html'
class GlucoseEmailReportView(LoginRequiredMixin, FormView):
"""
Sends out an email containing the glucose data report.
"""
success_url = '.'
form_class = GlucoseEmailReportForm
template_name = 'glucoses/glucose_email_report.html'
def get_initial(self):
display_name = self.request.user.get_full_name() or \
self.request.user.username
message = 'Glucose data for %s.\n\nThis email was sent by: %s' % (
display_name, self.request.user.email)
return {'recipient': self.request.user.email, 'message': message}
def form_valid(self, form):
messages.add_message(self.request, messages.SUCCESS, 'Email Sent!')
return super(GlucoseEmailReportView, self).form_valid(form)
def form_invalid(self, form):
messages.add_message(self.request, messages.WARNING,
'Email not sent. Please try again.')
return super(GlucoseEmailReportView, self).form_invalid(form)
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
optional_fields = form.cleaned_data['optional_fields']
if form.cleaned_data['report_format'] == 'pdf':
report = GlucosePdfReport(form.cleaned_data['start_date'],
form.cleaned_data['end_date'],
request.user,
'notes' in optional_fields,
'tags' in optional_fields)
else:
report = GlucoseCsvReport(form.cleaned_data['start_date'],
form.cleaned_data['end_date'],
request.user,
'notes' in optional_fields,
'tags' in optional_fields)
logger.info(
'Sending email report from user: %s, subject: %s, recipient: %s',
request.user,
form.cleaned_data['subject'],
form.cleaned_data['recipient']
)
report.email(form.cleaned_data['recipient'],
form.cleaned_data['subject'],
form.cleaned_data['message'])
return self.form_valid(form)
else:
return self.form_invalid(form)
class GlucoseCreateView(LoginRequiredMixin, CreateView):
model = Glucose
success_url = '/dashboard/'
template_name = 'glucoses/glucose_create.html'
form_class = GlucoseCreateForm
def get_initial(self):
time_zone = self.request.user.settings.time_zone
record_date = datetime.now(tz=time_zone).date().strftime(DATE_FORMAT)
record_time = datetime.now(tz=time_zone).time().strftime(TIME_FORMAT)
return {
'category': get_initial_category(self.request.user),
'record_date': record_date,
'record_time': record_time,
}
def form_valid(self, form):
# If the 'Save & Add Another' button is clicked, the submit_button_type
# field will be set to 'submit_and_add' by Javascript. We'll change
# the success URL to go back to the Add Glucose page and display a
# successful message in this case.
if form.cleaned_data['submit_button_type'] == 'submit_and_add':
self.success_url = '/glucoses/add/'
value = form.cleaned_data['value']
messages.add_message(self.request, messages.SUCCESS,
"Glucose '%s' successfully added. You may "
"add another." % value)
# Set the value of the 'user' field to the currently logged-in user.
form.instance.user = self.request.user
# Set the values of the record date and time to the current date and
# time factoring in the user's timezone setting if they're not
# specified.
if not form.instance.record_date:
form.instance.record_date = datetime.now(
tz=self.request.user.settings.time_zone).date()
if not form.instance.record_time:
form.instance.record_time = datetime.now(
tz=self.request.user.settings.time_zone).time()
return super(GlucoseCreateView, self).form_valid(form)
def post(self, request, *args, **kwargs):
# We need to create a copy of request.POST because it's immutable and
# we need to convert the content of the Value field to mg/dL if the
# user's glucose unit setting is set to mmol/L.
request.POST = request.POST.copy()
if request.user.settings.glucose_unit.name == 'mmol/L':
request.POST['value'] = to_mg(request.POST['value'])
logger.info('New glucose added by %s: %s', request.user,
request.POST['value'])
return super(GlucoseCreateView, self).post(request, *args, **kwargs)
class GlucoseUpdateView(LoginRequiredMixin, UpdateView):
model = Glucose
context_object_name = 'glucose'
template_name = 'glucoses/glucose_update.html'
form_class = GlucoseUpdateForm
def get(self, request, *args, **kwargs):
self.object = self.get_object()
# If the record's user doesn't match the currently logged-in user,
# deny viewing/updating of the object by showing the 403.html
# forbidden page. This can occur when the user changes the id in
# the URL field to a record that the user doesn't own.
if self.object.user != request.user:
raise PermissionDenied
else:
return super(GlucoseUpdateView, self).get(request, *args, **kwargs)
def get_success_url(self):
return reverse('dashboard')
def get_object(self, queryset=None):
try:
obj = Glucose.objects.get(pk=self.kwargs['pk'])
except Glucose.DoesNotExist:
raise Http404
# Convert the value based on user's glucose unit setting.
obj.value = glucose_by_unit_setting(self.request.user, obj.value)
return obj
def post(self, request, *args, **kwargs):
# We need to create a copy of request.POST because it's immutable and
# we need to convert the content of the Value field to mg/dL if the
# user's glucose unit setting is set to mmol/L.
request.POST = request.POST.copy()
if request.user.settings.glucose_unit.name == 'mmol/L':
request.POST['value'] = to_mg(request.POST['value'])
logger.info('Glucose updated by %s, glucose id: %s', request.user,
kwargs['pk'])
return super(GlucoseUpdateView, self).post(request, *args, **kwargs)
class GlucoseDeleteView(LoginRequiredMixin, DeleteView):
model = Glucose
success_url = '/dashboard/'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
# If the record's user doesn't match the currently logged-in user,
# deny viewing/updating of the object by showing the 403.html
# forbidden page. This can occur when the user changes the id in
# the URL field to a record that the user doesn't own.
if self.object.user != request.user:
raise PermissionDenied
else:
# Convert the value based on user's glucose unit setting.
self.object.value = glucose_by_unit_setting(request.user,
self.object.value)
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
class GlucoseListJson(LoginRequiredMixin, BaseDatatableView):
model = Glucose
columns = ['value', 'category', 'record_date', 'record_time',
'notes', 'tags', 'delete']
order_columns = ['value', 'category', 'record_date', 'record_time', 'notes']
max_display_length = 500
def render_column(self, row, column):
user = self.request.user
user_settings = user.settings
low = user_settings.glucose_low
high = user_settings.glucose_high
target_min = user_settings.glucose_target_min
target_max = user_settings.glucose_target_max
if column == 'value':
value_by_unit_setting = glucose_by_unit_setting(user, row.value)
edit_url = reverse('glucose_update', args=(row.id,))
text_class = 'text-primary'
if row.value < low or row.value > high:
text_class = 'text-danger'
elif target_max >= row.value >= target_min:
text_class = 'text-success'
return '''<center><a class="%s" href="%s">%s</a></center>''' % \
(text_class, edit_url, value_by_unit_setting)
elif column == 'category':
return '%s' % row.category.name
elif column == 'record_date':
return row.record_date.strftime('%m/%d/%Y')
elif column == 'record_time':
return row.record_time.strftime('%I:%M %p')
elif column == 'tags':
return ', '.join([t.name for t in row.tags.all()])
elif column == 'delete':
delete_url = reverse('glucose_delete', args=(row.id,))
delete_link = '<a href="%s"><i class="fa fa-times text-danger">' \
'</i></a>' % delete_url
return '<center>%s</center>' % delete_link
else:
return super(GlucoseListJson, self).render_column(row, column)
def get_initial_queryset(self):
"""
Filter records to show only entries from the currently logged-in user.
"""
return Glucose.objects.by_user(self.request.user)
def filter_queryset(self, qs):
params = self.request.GET
search = params.get('sSearch')
if search:
qs = qs.filter(
Q(value__startswith=search) |
Q(category__name__istartswith=search) |
reduce(operator.and_, (Q(notes__icontains=i) for i in
search.split())) |
reduce(operator.and_, (Q(tags__name__icontains=i) for i in
search.split()))
)
start_date = params.get('start_date', '')
if start_date:
qs = qs.filter(record_date__gte=datetime.strptime(
start_date, DATE_FORMAT))
end_date = params.get('end_date', '')
if end_date:
qs = qs.filter(record_date__lte=datetime.strptime(
end_date, DATE_FORMAT))
start_value = params.get('start_value', '')
if start_value:
qs = qs.filter(value__gte=start_value)
end_value = params.get('end_value', '')
if end_value:
qs = qs.filter(value__lte=end_value)
category = params.get('category', '')
if category:
qs = qs.filter(category=category)
notes = params.get('notes', '')
if notes:
qs = qs.filter(reduce(
operator.and_, (Q(notes__icontains=i) for i in notes.split())))
tags = params.get('tags', '')
if tags:
qs = qs.filter(tags__name=tags)
return qs |
#
# -------------------------------------------------------------------------
# Copyright (c) 2018 Intel Corporation Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
import copy
import json
import unittest
import uuid
import conductor.data.service as service
import mock
import stevedore
import yaml
from conductor.common.utils import conductor_logging_util as log_util
from conductor.data.plugins.inventory_provider import extensions as ip_ext
from conductor.data.plugins.service_controller import extensions as sc_ext
from conductor.data.plugins.vim_controller import extensions as vc_ext
from conductor.data.service import DataEndpoint
from oslo_config import cfg
class TestDataEndpoint(unittest.TestCase):
def setUp(self):
ip_ext_manager = (
ip_ext.Manager(cfg.CONF, 'conductor.inventory_provider.plugin'))
vc_ext_manager = (
vc_ext.Manager(cfg.CONF, 'conductor.vim_controller.plugin'))
sc_ext_manager = (
sc_ext.Manager(cfg.CONF, 'conductor.service_controller.plugin'))
self.data_ep = DataEndpoint(ip_ext_manager,
vc_ext_manager,
sc_ext_manager)
def tearDown(self):
pass
def test_get_candidate_location(self):
req_json_file = './conductor/tests/unit/data/candidate_list.json'
req_json_candidate = json.loads(open(req_json_file).read())
req_json = dict()
req_json['candidate'] = req_json_candidate['candidate_list'][0]
location = (32.897480, -97.040443)
self.assertEqual({'response': location, 'error': False},
self.data_ep.get_candidate_location(None, req_json))
req_json['candidate']['latitude'] = None
req_json['candidate']['longitude'] = None
self.assertEqual({'response': None, 'error': True},
self.data_ep.get_candidate_location(None,
req_json))
req_json['candidate'] = req_json_candidate['candidate_list'][1]
location = (40.7128, -74.0060)
self.assertEqual({'response': location, 'error': False},
self.data_ep.get_candidate_location(None, req_json))
def test_get_candidate_zone(self):
req_json_file = './conductor/tests/unit/data/candidate_list.json'
req_json_candidate = json.loads(open(req_json_file).read())
req_json = dict()
req_json['candidate'] = req_json_candidate['candidate_list'][0]
req_json['category'] = None
self.assertEqual({'response': None, 'error': True},
self.data_ep.get_candidate_zone(None, req_json))
req_json['category'] = 'region'
self.assertEqual({'response': 'DLLSTX55', 'error': False},
self.data_ep.get_candidate_zone(None, req_json))
req_json['category'] = 'complex'
self.assertEqual({'response': 'dalls_one', 'error': False},
self.data_ep.get_candidate_zone(None, req_json))
req_json['candidate'] = req_json_candidate['candidate_list'][1]
req_json['category'] = 'region'
self.assertEqual({'response': 'NYCNY55', 'error': False},
self.data_ep.get_candidate_zone(None, req_json))
@mock.patch.object(service.LOG, 'error')
@mock.patch.object(service.LOG, 'debug')
@mock.patch.object(stevedore.ExtensionManager, 'map_method')
def test_get_candidates_from_service(self, ext_mock, debug_mock,
error_mock):
req_json_file = './conductor/tests/unit/data/constraints.json'
req_json = yaml.safe_load(open(req_json_file).read())
candidate_list = req_json['candidate_list']
ext_mock.return_value = [candidate_list]
self.maxDiff = None
self.assertEqual(2, len(
self.data_ep.get_candidates_from_service(None, req_json)))
req_json['controller'] = 'APP-C'
self.assertEqual({'response': [], 'error': False},
self.data_ep.get_candidates_from_service(None,
req_json))
def test_get_candidate_discard_set(self):
req_json_file = './conductor/tests/unit/data/constraints.json'
req_json = yaml.safe_load(open(req_json_file).read())
value_attrib = 'complex_name'
value = req_json['properties']['evaluate'][value_attrib]
candidate_list = req_json['candidate_list']
self.assertEqual(3, len(self.data_ep.get_candidate_discard_set(value,
candidate_list,
value_attrib)))
value_attrib = 'region'
value = req_json['properties']['evaluate'][value_attrib]
self.assertEqual(0, len(self.data_ep.get_candidate_discard_set(value,
candidate_list,
value_attrib)))
def test_get_candidate_discard_set_by_cloud_region(self):
req_json_file = './conductor/tests/unit/data/constraints.json'
req_json = yaml.safe_load(open(req_json_file).read())
value_attrib = 'location_id'
value = req_json['properties']['evaluate']['cloud-region']
candidate_list = req_json['candidate_list']
self.assertEqual(2, len(
self.data_ep.get_candidate_discard_set_by_cloud_region(value,
candidate_list,
value_attrib)))
@mock.patch.object(service.LOG, 'error')
@mock.patch.object(service.LOG, 'debug')
@mock.patch.object(service.LOG, 'info')
@mock.patch.object(stevedore.ExtensionManager, 'map_method')
@mock.patch.object(stevedore.ExtensionManager, 'names')
def test_get_inventory_group_candidates(self, ext2_mock, ext1_mock,
info_mock, debug_mock, error_mock):
ext1_mock.return_value = None
req_json_file = './conductor/tests/unit/data/constraints.json'
req_json = yaml.safe_load(open(req_json_file).read())
self.assertEqual({'response': [], 'error': True},
self.data_ep.get_inventory_group_candidates(None,
arg=req_json))
ext1_mock.return_value = [None]
self.assertEqual({'response': [], 'error': True},
self.data_ep.get_inventory_group_candidates(None,
arg=req_json))
pairs = [['instance-1', 'instance-2']]
ext1_mock.return_value = [pairs]
ext2_mock.return_value = ['aai']
candidate_list = req_json['candidate_list']
expected_candidate_list = [c for c in candidate_list
if c["candidate_id"] == 'instance-1']
self.assertEqual({'response': expected_candidate_list, 'error': False},
self.data_ep.get_inventory_group_candidates(None,
arg=req_json))
@mock.patch.object(service.LOG, 'error')
@mock.patch.object(service.LOG, 'debug')
@mock.patch.object(service.LOG, 'info')
@mock.patch.object(stevedore.ExtensionManager, 'map_method')
@mock.patch.object(stevedore.ExtensionManager, 'names')
def test_get_candidates_by_attributes(self, ext_mock2, ext_mock1,
info_mock, debug_mock, error_mock):
req_json_file = './conductor/tests/unit/data/constraints.json'
req_json = yaml.safe_load(open(req_json_file).read())
candidate_list = req_json['candidate_list']
ext_mock1.side_effect = ip_ext_sideeffect
ext_mock2.return_value = ['aai']
self.maxDiff = None
expected_response = {'response': [candidate_list[0]], 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_by_attributes(None,
req_json))
req_json['properties']['evaluate']['network_roles'] = {"all": []}
expected_response = {'response': [candidate_list[0]], 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_by_attributes(None,
req_json))
req_json['properties']['evaluate']['network_roles'] = {"any": []}
expected_response = {'response': [candidate_list[0]], 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_by_attributes(None,
req_json))
req_json['properties']['evaluate']['network_roles'] = {
"all": ['role-1']}
expected_response = {'response': [], 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_by_attributes(None,
req_json))
req_json['properties']['evaluate']['network_roles'] = {
"all": ['role-2']}
expected_response = {'response': [], 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_by_attributes(None,
req_json))
@mock.patch.object(service.LOG, 'error')
@mock.patch.object(service.LOG, 'debug')
@mock.patch.object(service.LOG, 'info')
@mock.patch.object(log_util, 'getTransactionId')
@mock.patch.object(stevedore.ExtensionManager, 'map_method')
def test_resolve_demands(self, ext_mock, logutil_mock, info_mock,
debug_mock,
error_mock):
self.maxDiff = None
req_json_file = './conductor/tests/unit/data/demands.json'
req_json = yaml.safe_load(open(req_json_file).read())
ctxt = {
'plan_id': uuid.uuid4(),
'keyspace': cfg.CONF.keyspace
}
logutil_mock.return_value = uuid.uuid4()
ext_mock.return_value = []
expected_response = {'response': {'resolved_demands': None, 'trans': {'plan_id': None,
'plan_name': None,
'translator_triage': []}},
'error': True}
self.assertEqual(expected_response,
self.data_ep.resolve_demands(ctxt, req_json))
return_value = req_json['demands']['vG']
ext_mock.return_value = [return_value]
expected_response = { 'error': False, 'response':
{ 'resolved_demands':
[{ 'attributes':
{ 'customer-id': 'some_company', 'provisioning-status': 'provisioned' },
'inventory_provider': 'aai', 'inventory_type': 'service', 'service_type': 'vG' },
{ 'inventory_provider': 'aai', 'inventory_type': 'cloud' } ],
'trans': { 'plan_id': 'plan_abc', 'plan_name': 'plan_name', 'translator_triage': [ [] ] } } }
self.assertEqual(expected_response,
self.data_ep.resolve_demands(ctxt, req_json))
@mock.patch.object(service.LOG, 'error')
@mock.patch.object(service.LOG, 'debug')
@mock.patch.object(service.LOG, 'info')
@mock.patch.object(log_util, 'getTransactionId')
@mock.patch.object(stevedore.ExtensionManager, 'map_method')
def test_resolve_vfmodule_demands(self, ext_mock, logutil_mock, info_mock,
debug_mock,
error_mock):
self.maxDiff = None
req_json_file = './conductor/tests/unit/data/demands_vfmodule.json'
req_json = yaml.safe_load(open(req_json_file).read())
ctxt = {
'plan_id': uuid.uuid4(),
'keyspace': cfg.CONF.keyspace
}
logutil_mock.return_value = uuid.uuid4()
return_value = req_json['demands']['vFW-SINK']
ext_mock.return_value = [return_value]
expected_response = \
{'response': {'trans': {'translator_triage': [ [] ], 'plan_name': 'plan_name', 'plan_id': 'plan_abc'},
'resolved_demands': [{'service_resource_id': 'vFW-SINK-XX', 'vlan_key': 'vlan_key',
'inventory_provider': 'aai', 'inventory_type': 'vfmodule',
'excluded_candidates': [
{'candidate_id': 'e765d576-8755-4145-8536-0bb6d9b1dc9a',
'inventory_type': 'vfmodule'
}], 'port_key': 'vlan_port', 'service_type': 'vFW-SINK-XX',
'attributes': {'global-customer-id': 'Demonstration',
'cloud-region-id': {'get_param': 'chosen_region'},
'model-version-id':
'763731df-84fd-494b-b824-01fc59a5ff2d',
'prov-status': 'ACTIVE',
'service_instance_id': {'get_param': 'service_id'},
'model-invariant-id':
'e7227847-dea6-4374-abca-4561b070fe7d',
'orchestration-status': ['active']
}
}]
}, 'error': False}
self.assertEqual(expected_response,
self.data_ep.resolve_demands(ctxt, req_json))
@mock.patch.object(service.LOG, 'error')
@mock.patch.object(service.LOG, 'info')
@mock.patch.object(stevedore.ExtensionManager, 'names')
@mock.patch.object(stevedore.ExtensionManager, 'map_method')
def test_get_candidates_with_hpa(self, hpa_mock, ext_mock1,
info_mock, error_mock):
req_json_file = './conductor/tests/unit/data/candidate_list.json'
hpa_json_file = './conductor/tests/unit/data/hpa_constraints.json'
hpa_json = yaml.safe_load(open(hpa_json_file).read())
req_json = yaml.safe_load(open(req_json_file).read())
candidate_list = req_json['candidate_list']
(constraint_id, constraint_info) = \
hpa_json["conductor_solver"]["constraints"][0].items()[0]
hpa_constraint = constraint_info['properties']
flavorProperties = hpa_constraint['evaluate'][0]['flavorProperties']
id = hpa_constraint['evaluate'][0]['id']
type = hpa_constraint['evaluate'][0]['type']
directives = hpa_constraint['evaluate'][0]['directives']
attr = directives[0].get("attributes")
label_name = attr[0].get("attribute_name")
ext_mock1.return_value = ['aai']
flavor_info = {"flavor-id": "vim-flavor-id1",
"flavor-name": "vim-flavor-name1"}
directive = [
{
"id": id,
"type": type,
"directives": directives
}
]
hpa_mock.return_value = [flavor_info]
self.maxDiff = None
args = generate_args(candidate_list, flavorProperties, id, type, directives)
hpa_candidate_list = copy.deepcopy(candidate_list)
hpa_candidate_list[1]['flavor_map'] = {}
hpa_candidate_list[1]['flavor_map'][label_name] = "vim-flavor-name1"
hpa_candidate_list[1]['all_directives'] = {}
hpa_candidate_list[1]['all_directives']['directives'] = directive
hpa_candidate_list1 = []
hpa_candidate_list1.append(hpa_candidate_list[0])
expected_response = {'response': hpa_candidate_list1, 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_with_hpa(None, args))
hpa_candidate_list2 = list()
hpa_candidate_list2.append(copy.deepcopy(candidate_list[0]))
args = generate_args(candidate_list, flavorProperties, id, type, directives)
hpa_mock.return_value = []
expected_response = {'response': hpa_candidate_list2, 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_with_hpa(None, args))
flavor_info = {}
hpa_mock.return_value = [flavor_info]
expected_response = {'response': hpa_candidate_list2, 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_with_hpa(None, args))
flavor_info = {"flavor-id": "vim-flavor-id1",
"flavor-name": ""}
hpa_mock.return_value = [flavor_info]
expected_response = {'response': hpa_candidate_list2, 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_with_hpa(None, args))
flavor_info = {"flavor-id": "vim-flavor-id1"}
hpa_mock.return_value = [flavor_info]
expected_response = {'response': hpa_candidate_list2, 'error': False}
self.assertEqual(expected_response,
self.data_ep.get_candidates_with_hpa(None, args))
@mock.patch.object(service.LOG, 'warn')
@mock.patch.object(service.LOG, 'info')
@mock.patch.object(stevedore.ExtensionManager, 'names')
@mock.patch.object(stevedore.ExtensionManager, 'map_method')
def test_get_candidates_with_vim_capacity(self, vim_mock, ext_mock1,
info_mock, warn_mock):
req_json_file = './conductor/tests/unit/data/candidate_list.json'
hpa_json_file = './conductor/tests/unit/data/hpa_constraints.json'
hpa_json = yaml.safe_load(open(hpa_json_file).read())
req_json = yaml.safe_load(open(req_json_file).read())
candidate_list = req_json['candidate_list']
ext_mock1.return_value = ['MultiCloud']
(constraint_id, constraint_info) = \
hpa_json["conductor_solver"]["constraints"][2].items()[0]
vim_request = constraint_info['properties']['request']
ctxt = {}
candidate_list_copy = list(copy.deepcopy(candidate_list))
args = {"candidate_list": [candidate_list_copy[1]],
"request": vim_request}
vim_mock.return_value = [['att-aic_NYCNY55']]
self.assertEqual({'response': [candidate_list[1]], 'error': False},
self.data_ep.get_candidates_with_vim_capacity(ctxt,
args))
vim_mock.return_value = []
self.assertEqual({'response': [candidate_list[1]], 'error': True},
self.data_ep.get_candidates_with_vim_capacity(ctxt,
args))
vim_mock.return_value = [None]
self.assertEqual({'response': [candidate_list[1]], 'error': True},
self.data_ep.get_candidates_with_vim_capacity(ctxt,
args))
vim_mock.return_value = None
self.assertEqual({'response': [candidate_list[1]], 'error': True},
self.data_ep.get_candidates_with_vim_capacity(ctxt,
args))
vim_mock.return_value = [[]]
self.assertEqual({'response': [], 'error': False},
self.data_ep.get_candidates_with_vim_capacity(ctxt,
args))
def generate_args(candidate_list, flavorProperties, vf_id, model_type, directives):
arg_candidate_list = copy.deepcopy(candidate_list)
args = {"candidate_list": arg_candidate_list,
"flavorProperties": flavorProperties,
"id": vf_id,
"type": model_type,
"directives": directives}
return args
def ip_ext_sideeffect(*args, **kwargs):
req_json_file = './conductor/tests/unit/data/constraints.json'
req_json = yaml.safe_load(open(req_json_file).read())
candidate_list = req_json['candidate_list']
if args[0] == 'check_network_roles':
if kwargs['network_role_id'] == 'role-1':
return None
else:
return ['DLLSTX55']
elif args[0] == 'check_candidate_role':
return ['candidate-role0']
if __name__ == "__main__":
unittest.main()
|
#
# Copyright (c) 2021 The banded_matrices Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import timeit
import numpy as np
import tensorflow as tf
class Timer:
"""
A context manager that times what is running within its context.
"""
def __init__(self):
self.elapsed_time = None
self.start_time = None
def __enter__(self):
self.elapsed_time = None
self.start_time = timeit.default_timer()
return self
def __exit__(self, *args):
self.elapsed_time = timeit.default_timer() - self.start_time
def constant_op(t: np.ndarray) -> tf.Tensor:
"""
Wrapper around tensorflow.python.framework which confuses pylint/mypy.
"""
return tf.constant(t)
def generate_band_mat(n, l: int, u: int) -> np.ndarray:
"""
Constructs the band as a ( l + u + 1 x n ) array
"""
return construct_extract_banded_matrix(l, u, np.random.rand(l + u + 1, n))
def to_dense(band: np.ndarray, l: int, u: int) -> np.ndarray:
"""
Constructs the full ( n x n ) matrix from the band
"""
return construct_banded_matrix_from_band(l, u, band)
def extract_band(dense_matrix: np.ndarray, l: int, u: int) -> np.ndarray:
"""
Extract the band of a full matrix into a rectangular array
"""
return extract_band_from_matrix(l, u, dense_matrix)
def gen_dense_banded_lower_triangular(n: int, k: int) -> np.ndarray:
"""
Generates a lower triangular banded matrix with k diagonals
"""
assert k <= n
return to_dense(generate_band_mat(n, k - 1, 0), k - 1, 0)
def compute_gradient_error(
input_tensor: tf.Tensor, output_tensor: tf.Tensor, delta: float = 1e-3
) -> float:
"""
Compute the finite differencing error for d(output)/d(input).
For TensorFlow < 1.7 we need some care about the shape.
"""
return tf.compat.v1.test.compute_gradient_error(
input_tensor,
[int(d) for d in input_tensor.shape],
output_tensor,
[int(d) for d in output_tensor.shape],
delta=delta,
)
def generate_banded_positive_definite_matrix(
dimension: int, lower_bandwidth: int
) -> np.ndarray:
"""
Generate a banded matrix that is constructed as LL^T for an underlying banded matrix L.
We don't return L since usually we are not able to recover exactly that decomposition.
NOTE: Only the lower half of the symmetric resulting matrix is returned;
so the resulting matrix has shape (lower_bandwidth + 1, dimension).
"""
# Generate a lower band with positive diagonal
L = generate_band_mat(dimension, lower_bandwidth, 0) + 1
L[0, :] = np.abs(L[0, :])
L_dense = to_dense(L, lower_bandwidth, 0)
# Compute the Q that L is a Cholesky of, and make it banded with the same bandwidth:
Q = extract_band(L_dense @ L_dense.T, lower_bandwidth, 0)
return Q
def generate_banded_tensor(shape_with_bands, ensure_positive_definite=False) -> np.ndarray:
"""
Generalization of `generate_band_mat` to tensor dimensions possibly higher than 2;
such tensors "stack-up" banded matrices.
In `shape_with_bands` elements at position -3 and -2 represent the lower and upper bands,
whereas the actual tensor shape needs a width which is their sum + 1.
"""
assert len(shape_with_bands) > 2
lower_band, upper_band, dimension = shape_with_bands[-3:]
width = lower_band + 1 + upper_band
shape = shape_with_bands[:-3] + (width, dimension)
assert not ensure_positive_definite or upper_band == 0
if len(shape) == 2:
return (
generate_band_mat(dimension, lower_band, upper_band)
if not ensure_positive_definite
else generate_banded_positive_definite_matrix(dimension, lower_band)
)
return np.stack(
[
generate_banded_tensor(shape_with_bands[1:], ensure_positive_definite)
for _ in range(shape_with_bands[0])
]
)
def to_dense_tensor(matrix: np.ndarray, lower_band: int, upper_band: int) -> np.ndarray:
"""
Generalization of `to_dense` to tensor dimensions possibly higher than 2;
such tensors "stack-up" banded matrices.
"""
assert len(matrix.shape) >= 2
width, dimension = matrix.shape[-2:]
assert width == lower_band + 1 + upper_band
if len(matrix.shape) == 2:
return to_dense(matrix, lower_band, upper_band)
dense_shape = matrix.shape[:-2] + (dimension, dimension)
return np.stack(
[to_dense_tensor(matrix[d], lower_band, upper_band) for d in range(dense_shape[0])]
)
def construct_banded_matrix_from_band(
num_lower_diagonals: int, num_upper_diagonals: int, rect_mat: np.ndarray
) -> np.ndarray:
"""
Constructs a square banded matrix from a representation of the band.
:param num_lower_diagonals: aka ``l``
:param num_upper_diagonals: aka ``u``
:param rect_mat: Matrix of shape (num_diagonals, size) where size is the size
of the corresponding square banded matrix.
"""
assert num_lower_diagonals >= 0
assert num_upper_diagonals >= 0
assert len(rect_mat.shape) == 2
num_diagonals = num_lower_diagonals + 1 + num_upper_diagonals
assert rect_mat.shape[0] == num_diagonals
size = rect_mat.shape[1]
full_matrix = np.zeros((size, size))
for i in range(-num_upper_diagonals, 1 + num_lower_diagonals):
row = num_upper_diagonals + i
for j in range(max(0, -i), max(0, size + min(0, -i))):
full_matrix[j + i, j] = rect_mat[row, j]
return full_matrix
def extract_band_from_matrix(
num_lower_diagonals: int, num_upper_diagonals: int, full_matrix: np.ndarray
) -> np.ndarray:
"""
Extracts a representation of the band from a square banded matrix.
:param num_lower_diagonals: aka ``l``
:param num_upper_diagonals: aka ``u``
:param full_matrix: Square banded matrix.
"""
assert num_lower_diagonals >= 0
assert num_upper_diagonals >= 0
assert len(full_matrix.shape) == 2
assert full_matrix.shape[0] == full_matrix.shape[1]
size = full_matrix.shape[0]
num_diagonals = num_lower_diagonals + 1 + num_upper_diagonals
rect_mat = np.empty((num_diagonals, size))
for i in range(-num_upper_diagonals, num_lower_diagonals + 1):
row = num_upper_diagonals + i
for j in range(size):
rect_mat[row, j] = full_matrix[j + i, j] if 0 <= j + i < size else 0.0
return rect_mat
def extract_construct_banded_matrix(
num_lower_diagonals: int, num_upper_diagonals: int, full_matrix: np.ndarray
) -> np.ndarray:
extracted = extract_band_from_matrix(
num_lower_diagonals=num_lower_diagonals,
num_upper_diagonals=num_upper_diagonals,
full_matrix=full_matrix,
)
return construct_banded_matrix_from_band(
num_lower_diagonals=num_lower_diagonals,
num_upper_diagonals=num_upper_diagonals,
rect_mat=extracted,
)
def construct_extract_banded_matrix(
num_lower_diagonals: int, num_upper_diagonals: int, rect_mat: np.ndarray
) -> np.ndarray:
constructed = construct_banded_matrix_from_band(
num_lower_diagonals=num_lower_diagonals,
num_upper_diagonals=num_upper_diagonals,
rect_mat=rect_mat,
)
return extract_band_from_matrix(
num_lower_diagonals=num_lower_diagonals,
num_upper_diagonals=num_upper_diagonals,
full_matrix=constructed,
)
|
<gh_stars>1-10
import sys
import inspect
from functools import update_wrapper
from ._compat import iteritems
from ._unicodefun import _check_for_unicode_literals
from .utils import echo
from .globals import get_current_context
def pass_context(f):
"""把一个回调函数标记成想要接收当前语境对象作为第一参数。
"""
def new_func(*args, **kwargs):
return f(get_current_context(), *args, **kwargs)
return update_wrapper(new_func, f)
def pass_obj(f):
"""类似 :func:`pass_context` 函数,
但只在语境上继续传递对象 (:attr:`Context.obj`) 属性。
如果对象呈现一个嵌入式系统的状态的话,这个函数就有用了。
"""
def new_func(*args, **kwargs):
return f(get_current_context().obj, *args, **kwargs)
return update_wrapper(new_func, f)
def make_pass_decorator(object_type, ensure=False):
"""根据一个对象类型建立一个装饰器,
装饰器工作类似 :func:`pass_obj` 函数,
但不传递当前语境对象,它会找到 :func:`object_type` 函数
类型的最内部的语境。
本函数生成的一个装饰器工作起来像下面一样::
from functools import update_wrapper
def decorator(f):
@pass_context
def new_func(ctx, *args, **kwargs):
obj = ctx.find_object(object_type)
return ctx.invoke(f, obj, *args, **kwargs)
return update_wrapper(new_func, f)
return decorator
:param object_type: 要传递的对象类型。
:param ensure: 如果设置成 `True` 的话,一个新对象会被建立,
并且在语境中会被记住,反之否然。
"""
def decorator(f):
def new_func(*args, **kwargs):
ctx = get_current_context()
if ensure:
obj = ctx.ensure_object(object_type)
else:
obj = ctx.find_object(object_type)
if obj is None:
raise RuntimeError('Managed to invoke callback without a '
'context object of type %r existing'
% object_type.__name__)
return ctx.invoke(f, obj, *args, **kwargs)
return update_wrapper(new_func, f)
return decorator
def _make_command(f, name, attrs, cls):
if isinstance(f, Command):
raise TypeError('Attempted to convert a callback into a '
'command twice.')
try:
params = f.__click_params__
params.reverse()
del f.__click_params__
except AttributeError:
params = []
help = attrs.get('help')
if help is None:
help = inspect.getdoc(f)
if isinstance(help, bytes):
help = help.decode('utf-8')
else:
help = inspect.cleandoc(help)
attrs['help'] = help
_check_for_unicode_literals()
return cls(name=name or f.__name__.lower().replace('_', '-'),
callback=f, params=params, **attrs)
def command(name=None, cls=None, **attrs):
r"""建立一个新的 :class:`Command` 类并且把装饰的函数作为回调函数使用。
本装饰器函数也会自动地把所有装饰的 :func:`option` 和 :func:`argument`
附着成参数形式给命令。
命令名默认为函数名。如果你要改变名字,你可以把名字作为第一参数值。
所有关键字参数都是指命令类中的参数。
一旦装饰的函数变成一个 :class:`Command` 类的实例,
就可以被触发成一个命令行工具,或者成为一个命令群组
:class:`Group` 类中的一个子命令。
:param name: 命令的名字。默认是把函数名中的下划线换成减号。
:param cls: 要实例化的命令类。默认值是 :class:`Command` 类。
"""
if cls is None:
cls = Command
def decorator(f):
cmd = _make_command(f, name, attrs, cls)
cmd.__doc__ = f.__doc__
return cmd
return decorator
def group(name=None, **attrs):
"""建立一个新的 :class:`Group` 类所含的一个函数作为回调函数。
本函数工作起来与 :func:`command` 类似,就是把 `cls` 参数形式
设置成 :class:`Group` 类了。
"""
attrs.setdefault('cls', Group)
return command(name, **attrs)
def _param_memo(f, param):
if isinstance(f, Command):
f.params.append(param)
else:
if not hasattr(f, '__click_params__'):
f.__click_params__ = []
f.__click_params__.append(param)
def argument(*param_decls, **attrs):
"""把一个参数提供给命令。
所有位置参数都代入成参数声明形式,提供给 :class:`Argument` 类;
所有关键字参数都直接不变 (除了 ``cls``) 。
本函数等价于手动建立了一个 :class:`Argument` 类的实例,
并且把实例提供给 :attr:`Command.params` 属性列表。
:param cls: 要实例化的参数类。默认值是 :class:`Argument` 类。
"""
def decorator(f):
ArgumentClass = attrs.pop('cls', Argument)
_param_memo(f, ArgumentClass(param_decls, **attrs))
return f
return decorator
def option(*param_decls, **attrs):
"""把一个可选项提供给命令。
所有位置参数都代入成参数声明形式,提供给 :class:`Option` 类;
所有关键字参数都直接不变 (除了 ``cls``) 。
本函数等价于手动建立了一个 :class:`Option` 类的实例,
并且把实例提供给 :attr:`Command.params` 属性列表。
:param cls: 要实例化的选项类。默认值是 :class:`Option` 类。
"""
def decorator(f):
# Issue 926, copy attrs, so pre-defined options can re-use the same cls=
option_attrs = attrs.copy()
if 'help' in option_attrs:
option_attrs['help'] = inspect.cleandoc(option_attrs['help'])
OptionClass = option_attrs.pop('cls', Option)
_param_memo(f, OptionClass(param_decls, **option_attrs))
return f
return decorator
def confirmation_option(*param_decls, **attrs):
"""确认提示的快捷功能。
确认提示可以通过使用 ``--yes`` 作为参数形式被忽略掉。
等价于使用 :func:`option` 函数装饰一个函数,
使用如下参数形式::
def callback(ctx, param, value):
if not value:
ctx.abort()
@click.command()
@click.option('--yes', is_flag=True, callback=callback,
expose_value=False, prompt='Do you want to continue?')
def dropdb():
pass
"""
def decorator(f):
def callback(ctx, param, value):
if not value:
ctx.abort()
attrs.setdefault('is_flag', True)
attrs.setdefault('callback', callback)
attrs.setdefault('expose_value', False)
attrs.setdefault('prompt', 'Do you want to continue?')
attrs.setdefault('help', 'Confirm the action without prompting.')
return option(*(param_decls or ('--yes',)), **attrs)(f)
return decorator
def password_option(*param_decls, **attrs):
"""密码提示的快捷功能。
等价于用 :func:`option` 函数装饰了一个函数,
使用如下参数形式::
@click.command()
@click.option('--password', prompt=True, confirmation_prompt=True,
hide_input=True)
def changeadmin(password):
pass
"""
def decorator(f):
attrs.setdefault('prompt', True)
attrs.setdefault('confirmation_prompt', True)
attrs.setdefault('hide_input', True)
return option(*(param_decls or ('--password',)), **attrs)(f)
return decorator
def version_option(version=None, *param_decls, **attrs):
"""增加一项 ``--version`` 选项。
该选项立即结束于程序输出版本号。本函数实现成一种期望可选项,
期望可选项输出版本信息后在回调中退出程序。
:param version: 要显示的版本号。如果没有提供,Click 会通过
setuptools 库来自动发现一个。
:param prog_name: 程序的名字 (默认是自动检测)
:param message: 显示自定义消息,反而不显示默认的
(``'%(prog)s, version %(version)s'``)
:param others: 其它的直接提供给 :func:`option` 函数。
"""
if version is None:
if hasattr(sys, '_getframe'):
module = sys._getframe(1).f_globals.get('__name__')
else:
module = ''
def decorator(f):
prog_name = attrs.pop('prog_name', None)
message = attrs.pop('message', '%(prog)s, version %(version)s')
def callback(ctx, param, value):
if not value or ctx.resilient_parsing:
return
prog = prog_name
if prog is None:
prog = ctx.find_root().info_name
ver = version
if ver is None:
try:
import pkg_resources
except ImportError:
pass
else:
for dist in pkg_resources.working_set:
scripts = dist.get_entry_map().get('console_scripts') or {}
for script_name, entry_point in iteritems(scripts):
if entry_point.module_name == module:
ver = dist.version
break
if ver is None:
raise RuntimeError('Could not determine version')
echo(message % {
'prog': prog,
'version': ver,
}, color=ctx.color)
ctx.exit()
attrs.setdefault('is_flag', True)
attrs.setdefault('expose_value', False)
attrs.setdefault('is_eager', True)
attrs.setdefault('help', 'Show the version and exit.')
attrs['callback'] = callback
return option(*(param_decls or ('--version',)), **attrs)(f)
return decorator
def help_option(*param_decls, **attrs):
"""增加一个 ``--help`` 选项。
该选项立即结束于程序输出帮助页面内容。
常常不需要增加,因为默认会增加给所有的命令,
除非你要实现压制作用。
像 :func:`version_option` 函数一样,
本函数实现成一个期望可选项,在回调中输出后退出程序。
所有参数都直接提供给 :func:`option` 函数。
"""
def decorator(f):
def callback(ctx, param, value):
if value and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
attrs.setdefault('is_flag', True)
attrs.setdefault('expose_value', False)
attrs.setdefault('help', 'Show this message and exit.')
attrs.setdefault('is_eager', True)
attrs['callback'] = callback
return option(*(param_decls or ('--help',)), **attrs)(f)
return decorator
# Circular dependencies between core and decorators
from .core import Command, Group, Argument, Option
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.31
#
# Don't modify this file, modify the SWIG interface instead.
import _geos
import new
new_instancemethod = new.instancemethod
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'PySwigObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
class PySwigIterator(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _geos.delete_PySwigIterator
__del__ = lambda self : None;
def value(*args): return _geos.PySwigIterator_value(*args)
def incr(*args): return _geos.PySwigIterator_incr(*args)
def decr(*args): return _geos.PySwigIterator_decr(*args)
def distance(*args): return _geos.PySwigIterator_distance(*args)
def equal(*args): return _geos.PySwigIterator_equal(*args)
def copy(*args): return _geos.PySwigIterator_copy(*args)
def next(*args): return _geos.PySwigIterator_next(*args)
def previous(*args): return _geos.PySwigIterator_previous(*args)
def advance(*args): return _geos.PySwigIterator_advance(*args)
def __eq__(*args): return _geos.PySwigIterator___eq__(*args)
def __ne__(*args): return _geos.PySwigIterator___ne__(*args)
def __iadd__(*args): return _geos.PySwigIterator___iadd__(*args)
def __isub__(*args): return _geos.PySwigIterator___isub__(*args)
def __add__(*args): return _geos.PySwigIterator___add__(*args)
def __sub__(*args): return _geos.PySwigIterator___sub__(*args)
def __iter__(self): return self
PySwigIterator_swigregister = _geos.PySwigIterator_swigregister
PySwigIterator_swigregister(PySwigIterator)
GEOS_VERSION_MAJOR = _geos.GEOS_VERSION_MAJOR
GEOS_VERSION_MINOR = _geos.GEOS_VERSION_MINOR
GEOS_VERSION = _geos.GEOS_VERSION
GEOS_JTS_PORT = _geos.GEOS_JTS_PORT
GEOS_CAPI_VERSION_MAJOR = _geos.GEOS_CAPI_VERSION_MAJOR
GEOS_CAPI_VERSION_MINOR = _geos.GEOS_CAPI_VERSION_MINOR
GEOS_CAPI_VERSION_PATCH = _geos.GEOS_CAPI_VERSION_PATCH
GEOS_CAPI_FIRST_INTERFACE = _geos.GEOS_CAPI_FIRST_INTERFACE
GEOS_CAPI_LAST_INTERFACE = _geos.GEOS_CAPI_LAST_INTERFACE
GEOS_CAPI_VERSION = _geos.GEOS_CAPI_VERSION
GEOS_POINT = _geos.GEOS_POINT
GEOS_LINESTRING = _geos.GEOS_LINESTRING
GEOS_LINEARRING = _geos.GEOS_LINEARRING
GEOS_POLYGON = _geos.GEOS_POLYGON
GEOS_MULTIPOINT = _geos.GEOS_MULTIPOINT
GEOS_MULTILINESTRING = _geos.GEOS_MULTILINESTRING
GEOS_MULTIPOLYGON = _geos.GEOS_MULTIPOLYGON
GEOS_GEOMETRYCOLLECTION = _geos.GEOS_GEOMETRYCOLLECTION
GEOS_WKB_XDR = _geos.GEOS_WKB_XDR
GEOS_WKB_NDR = _geos.GEOS_WKB_NDR
version = _geos.version
class CoordinateSequence(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
this = _geos.new_CoordinateSequence(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _geos.delete_CoordinateSequence
__del__ = lambda self : None;
def clone(*args): return _geos.CoordinateSequence_clone(*args)
def setX(*args): return _geos.CoordinateSequence_setX(*args)
def setY(*args): return _geos.CoordinateSequence_setY(*args)
def setZ(*args): return _geos.CoordinateSequence_setZ(*args)
def setOrdinate(*args): return _geos.CoordinateSequence_setOrdinate(*args)
def getX(*args): return _geos.CoordinateSequence_getX(*args)
def getY(*args): return _geos.CoordinateSequence_getY(*args)
def getZ(*args): return _geos.CoordinateSequence_getZ(*args)
def getOrdinate(*args): return _geos.CoordinateSequence_getOrdinate(*args)
def getSize(*args): return _geos.CoordinateSequence_getSize(*args)
def getDimensions(*args): return _geos.CoordinateSequence_getDimensions(*args)
CoordinateSequence_swigregister = _geos.CoordinateSequence_swigregister
CoordinateSequence_swigregister(CoordinateSequence)
class Geometry(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _geos.delete_Geometry
__del__ = lambda self : None;
def clone(*args): return _geos.Geometry_clone(*args)
def geomType(*args): return _geos.Geometry_geomType(*args)
def typeId(*args): return _geos.Geometry_typeId(*args)
def normalize(*args): return _geos.Geometry_normalize(*args)
def getSRID(*args): return _geos.Geometry_getSRID(*args)
def setSRID(*args): return _geos.Geometry_setSRID(*args)
def getDimensions(*args): return _geos.Geometry_getDimensions(*args)
def getNumGeometries(*args): return _geos.Geometry_getNumGeometries(*args)
def intersection(*args): return _geos.Geometry_intersection(*args)
def buffer(*args): return _geos.Geometry_buffer(*args)
def convexHull(*args): return _geos.Geometry_convexHull(*args)
def difference(*args): return _geos.Geometry_difference(*args)
def symDifference(*args): return _geos.Geometry_symDifference(*args)
def boundary(*args): return _geos.Geometry_boundary(*args)
def union(*args): return _geos.Geometry_union(*args)
def pointOnSurface(*args): return _geos.Geometry_pointOnSurface(*args)
def getCentroid(*args): return _geos.Geometry_getCentroid(*args)
def getEnvelope(*args): return _geos.Geometry_getEnvelope(*args)
def relate(*args): return _geos.Geometry_relate(*args)
def lineMerge(*args): return _geos.Geometry_lineMerge(*args)
def simplify(*args): return _geos.Geometry_simplify(*args)
def topologyPreserveSimplify(*args): return _geos.Geometry_topologyPreserveSimplify(*args)
def relatePattern(*args): return _geos.Geometry_relatePattern(*args)
def disjoint(*args): return _geos.Geometry_disjoint(*args)
def touches(*args): return _geos.Geometry_touches(*args)
def intersects(*args): return _geos.Geometry_intersects(*args)
def crosses(*args): return _geos.Geometry_crosses(*args)
def within(*args): return _geos.Geometry_within(*args)
def contains(*args): return _geos.Geometry_contains(*args)
def overlaps(*args): return _geos.Geometry_overlaps(*args)
def equals(*args): return _geos.Geometry_equals(*args)
def equalsExact(*args): return _geos.Geometry_equalsExact(*args)
def isEmpty(*args): return _geos.Geometry_isEmpty(*args)
def isValid(*args): return _geos.Geometry_isValid(*args)
def isSimple(*args): return _geos.Geometry_isSimple(*args)
def isRing(*args): return _geos.Geometry_isRing(*args)
def hasZ(*args): return _geos.Geometry_hasZ(*args)
def area(*args): return _geos.Geometry_area(*args)
def length(*args): return _geos.Geometry_length(*args)
def distance(*args): return _geos.Geometry_distance(*args)
Geometry_swigregister = _geos.Geometry_swigregister
Geometry_swigregister(Geometry)
class Point(Geometry):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _geos.delete_Point
__del__ = lambda self : None;
def getCoordSeq(*args): return _geos.Point_getCoordSeq(*args)
Point_swigregister = _geos.Point_swigregister
Point_swigregister(Point)
class LineString(Geometry):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _geos.delete_LineString
__del__ = lambda self : None;
def getCoordSeq(*args): return _geos.LineString_getCoordSeq(*args)
LineString_swigregister = _geos.LineString_swigregister
LineString_swigregister(LineString)
class LinearRing(Geometry):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _geos.delete_LinearRing
__del__ = lambda self : None;
def getCoordSeq(*args): return _geos.LinearRing_getCoordSeq(*args)
LinearRing_swigregister = _geos.LinearRing_swigregister
LinearRing_swigregister(LinearRing)
class Polygon(Geometry):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _geos.delete_Polygon
__del__ = lambda self : None;
def getExteriorRing(*args): return _geos.Polygon_getExteriorRing(*args)
def getNumInteriorRings(*args): return _geos.Polygon_getNumInteriorRings(*args)
def getInteriorRingN(*args): return _geos.Polygon_getInteriorRingN(*args)
Polygon_swigregister = _geos.Polygon_swigregister
Polygon_swigregister(Polygon)
class GeometryCollection(Geometry):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _geos.delete_GeometryCollection
__del__ = lambda self : None;
def getGeometryN(*args): return _geos.GeometryCollection_getGeometryN(*args)
GeometryCollection_swigregister = _geos.GeometryCollection_swigregister
GeometryCollection_swigregister(GeometryCollection)
class MultiPoint(GeometryCollection):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _geos.delete_MultiPoint
__del__ = lambda self : None;
MultiPoint_swigregister = _geos.MultiPoint_swigregister
MultiPoint_swigregister(MultiPoint)
class MultiLineString(GeometryCollection):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _geos.delete_MultiLineString
__del__ = lambda self : None;
MultiLineString_swigregister = _geos.MultiLineString_swigregister
MultiLineString_swigregister(MultiLineString)
class MultiLinearRing(GeometryCollection):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _geos.delete_MultiLinearRing
__del__ = lambda self : None;
MultiLinearRing_swigregister = _geos.MultiLinearRing_swigregister
MultiLinearRing_swigregister(MultiLinearRing)
class MultiPolygon(GeometryCollection):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _geos.delete_MultiPolygon
__del__ = lambda self : None;
MultiPolygon_swigregister = _geos.MultiPolygon_swigregister
MultiPolygon_swigregister(MultiPolygon)
createPoint = _geos.createPoint
createLineString = _geos.createLineString
createLinearRing = _geos.createLinearRing
createPolygon = _geos.createPolygon
geomFromWKT = _geos.geomFromWKT
geomToWKT = _geos.geomToWKT
getWKBOutputDims = _geos.getWKBOutputDims
setWKBOutputDims = _geos.setWKBOutputDims
getWKBByteOrder = _geos.getWKBByteOrder
setWKBByteOrder = _geos.setWKBByteOrder
geomFromWKB = _geos.geomFromWKB
geomToWKB = _geos.geomToWKB
geomFromHEX = _geos.geomFromHEX
geomToHEX = _geos.geomToHEX
|
<gh_stars>10-100
# Copyright (C) 2015 <NAME>
# All rights reserved.
from __future__ import print_function, absolute_import
__author__ = '<NAME> <<EMAIL>(at)<EMAIL>.<EMAIL>>'
__version__ = '0.1-dev'
import c4d
class Channel(object):
''' Wrapper for a Procedural Channel Tag. '''
def __init__(self, op):
super(Channel, self).__init__()
assert op.GetType() == c4d.PROCEDURAL_CHANNEL_ID
self.op = op
def __repr__(self):
type_name = {
c4d.PROCEDURAL_CHANNEL_TYPE_NIL: 'Nil',
c4d.PROCEDURAL_CHANNEL_TYPE_INTEGER: 'Integer',
c4d.PROCEDURAL_CHANNEL_TYPE_FLOAT: 'Float',
c4d.PROCEDURAL_CHANNEL_TYPE_VECTOR: 'Vector',
c4d.PROCEDURAL_CHANNEL_TYPE_MATRIX: 'Matrix',
c4d.PROCEDURAL_CHANNEL_TYPE_STRING: 'String',
}.get(self.GetChannelType(), '???')
fmt = '<{0} Channel {1!r} count={2} item_length={3} frame_count={4} frame={5}>'
return fmt.format(type_name, self.GetName(), self.GetCount(),
self.GetItemLength(), self.GetFrameCount(), self.GetFrame())
def GetName(self):
return self.op.GetName()
def SetName(self, name):
self.op.SetName(name)
def Message(self, mid, data=None):
return self.op.Message(mid, data)
def GetChannelDirty(self):
return self.op[c4d.PROCEDURAL_CHANNEL_DIRTYCOUNT]
def GetChannelState(self):
return self.op[c4d.PROCEDURAL_CHANNEL_PARAM_STATE]
def GetChannelMode(self):
return self.op[c4d.PROCEDURAL_CHANNEL_PARAM_MODE]
def SetChannelMode(self, mode):
self.op[c4d.PROCEDURAL_CHANNEL_PARAM_MODE] = mode
def GetChannelType(self):
return self.op[c4d.PROCEDURAL_CHANNEL_PARAM_TYPE]
def SetChannelType(self, type_):
self.op[c4d.PROCEDURAL_CHANNEL_PARAM_TYPE] = type_
def Check(self, type_, item_length=None):
if self.GetChannelType() != type_:
return False
if item_length is not None and self.GetItemLength() != item_length:
return False
return True
def GetCount(self):
return self.op[c4d.PROCEDURAL_CHANNEL_PARAM_COUNT]
def SetCount(self, count):
self.op[c4d.PROCEDURAL_CHANNEL_PARAM_COUNT] = count
def GetItemLength(self):
return self.op[c4d.PROCEDURAL_CHANNEL_PARAM_ITEMLENGTH]
def SetItemLength(self, length):
self.op[c4d.PROCEDURAL_CHANNEL_PARAM_ITEMLENGTH] = length
def GetFrameCount(self):
return self.op[c4d.PROCEDURAL_CHANNEL_PARAM_FRAMECOUNT]
def SetFrameCount(self, count):
self.op[c4d.PROCEDURAL_CHANNEL_PARAM_FRAMECOUNT] = count
def GetFrame(self):
return self.op[c4d.PROCEDURAL_CHANNEL_PARAM_FRAME]
def SetFrame(self, frame):
self.op[c4d.PROCEDURAL_CHANNEL_PARAM_FRAME] = frame
def GetSyncFrame(self):
return self.op[PROCEDURAL_CHANNEL_PARAM_SYNCFRAME]
def SetSyncFrame(self, sync_frame):
self.op[c4d.PROCEDURAL_CHANNEL_PARAM_SYNCFRAME] = frame
def GetFrameOffset(self):
return self.op[c4d.PROCEDURAL_CHANNEL_PARAM_FRAMEOFFSET]
def SetFrameOffset(self, frame_offset):
self.op[c4d.PROCEDURAL_CHANNEL_PARAM_FRAMEOFFSET] = frame_offset
def GetFrameOffsetIndex(self):
frame_count = self.GetFrameCount()
frame = self.GetFrame() % frame_count
if frame < 0:
frame += frame_count
return frame * self.GetCount() * self.GetItemLength()
def GetRefChannelName(self):
return self.op[c4d.PROCEDURAL_CHANNEL_PARAM_REF]
def SetRefChannelName(self, name):
self.op[c4d.PROCEDURAL_CHANNEL_PARAM_REF] = name
def GetLocked(self):
return self.op[c4d.PROCEDURAL_CHANNEL_PARAM_LOCKED]
def SetLocked(self, locked):
self.op[c4d.PROCEDURAL_CHANNEL_PARAM_LOCKED] = bool(locked)
def GetDataHandle(self):
return self.op[c4d.PROCEDURAL_CHANNEL_PARAM_HANDLE]
def _MkDescid(self, index, subindex):
return c4d.DescID(
c4d.DescLevel(c4d.PROCEDURAL_CHANNEL_PARAM_DATA),
c4d.DescLevel(index + 1),
c4d.DescLevel(subindex + 1))
def GetElement(self, index, subindex=None):
''' GetElement(index, [subindex]) -> item or list of items
Returns the element at the specified *index* which is always
a list if *subindex* is not specified. If *subindex* is
specified, returns only the item at the subindex.
Raises:
IndexError: If the element could not be read. '''
length = self.GetItemLength()
if subindex is None:
items = [None] * length
for i in xrange(length):
descid = self._MkDescid(index, i)
items[i] = self.op[descid]
if items[i] is None:
if i != 0:
# Strange, we were able to access some other element
# before this one.
raise RuntimeError('strange behaviour, investiage')
raise IndexError('index {0} out of bounds'.format(index))
return items
else:
if subindex < 0 or subindex >= length:
raise IndexError('subindex {0} out of bounds'.format(subindex))
descid = self._MkDescid(index, subindex)
result = self.op[descid]
if result is None:
raise IndexError('index {0} out of bounds'.format(index))
return result
def SetElement(self, index, subindex, value):
''' SetElement(index, subindex, value)
Sets an element at the specified subindex. '''
length = self.GetItemLength()
if subindex < 0 or subindex >= length:
raise IndexError('subindex {0} out of bounds'.format(index))
descid = self._MkDescid(index, subindex)
self.op[descid]= value
def Reinitialize(self):
self.op.Message(c4d.MSG_PROCEDURAL_CHANNEL_REINIT)
@staticmethod
def Find(ref, name, type_=None, item_length=None):
''' Finds the Channel Tag with the specified *name* in starting from
the reference channel *ref*. Simply pass the first tag of an object
if you have no channel to start searching from.
Args:
ref: A `c4d.BaseObject`, `c4d.BaseTag` or `Channel` instance.
If a tag or channel is passed, the original channel will be
ignored for the search.
'''
allow_self = False
if isinstance(ref, Channel):
ref = ref.op
elif isinstance(ref, c4d.BaseObject):
ref = ref.GetFirstTag()
allow_self = True
elif not isinstance(ref, c4d.BaseTag):
raise TypeError('expected BaseObject, BaseTag or Channel', type(ref))
def check(curr):
if curr.GetType() != c4d.PROCEDURAL_CHANNEL_ID: return False
if curr.GetName() != name: return False
curr = Channel(curr)
if type_ is not None and curr.GetChannelType() != type_: return False
if item_length is not None and item_length != curr.GetItemLength():
return False
return True
curr = ref.GetPred()
while curr:
if check(curr):
return Channel(curr)
curr = curr.GetPred()
if allow_self:
curr = ref
else:
curr = ref.GetNext()
while curr:
if check(curr):
return Channel(curr)
curr = curr.GetNext()
return None
|
<reponame>ShenQianwithC/HistomicsTK
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import numpy as np
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
import unittest
from histomicstk.features import compute_global_cell_graph_features as cgcgf
class GlobalCellGraphFeaturesTest(unittest.TestCase):
def testSimple(self):
data = np.array([[-1, -1], [-1, 1], [1, -1], [1, 1], [-.5, -.5], [.5, .5]])
actual = cgcgf(data, neighbor_distances=0.7 * np.arange(1, 6), neighbor_counts=(3, 5))
expected = DataFrame(dict(
delaunay_area_disorder=0.261203874964,
delaunay_area_mean=0.666666666667,
delaunay_area_min_max_ratio=0.5,
delaunay_area_stddev=0.235702260396,
delaunay_sides_disorder=0.224914917312,
delaunay_sides_mean=1.55936204046,
delaunay_sides_min_max_ratio=0.353553390593,
delaunay_sides_stddev=0.452497141571,
density_distance_for_neighbors_0_disorder=0.0959519464365,
density_distance_for_neighbors_0_mean=1.86037961003,
density_distance_for_neighbors_0_min_max_ratio=0.790569415042,
density_distance_for_neighbors_0_stddev=0.197453049082,
density_distance_for_neighbors_1_disorder=0.113918900724,
density_distance_for_neighbors_1_mean=2.59272486435,
density_distance_for_neighbors_1_min_max_ratio=0.75,
density_distance_for_neighbors_1_stddev=0.333333333333,
density_neighbors_in_distance_0_disorder=np.nan,
density_neighbors_in_distance_0_mean=0.0,
density_neighbors_in_distance_0_min_max_ratio=np.nan,
density_neighbors_in_distance_0_stddev=0.0,
density_neighbors_in_distance_1_disorder=0.414213562373,
density_neighbors_in_distance_1_mean=0.666666666667,
density_neighbors_in_distance_1_min_max_ratio=0.0,
density_neighbors_in_distance_1_stddev=0.471404520791,
density_neighbors_in_distance_2_disorder=0.113918900724,
density_neighbors_in_distance_2_mean=3.66666666667,
density_neighbors_in_distance_2_min_max_ratio=0.75,
density_neighbors_in_distance_2_stddev=0.471404520791,
density_neighbors_in_distance_3_disorder=0.0981124329991,
density_neighbors_in_distance_3_mean=4.33333333333,
density_neighbors_in_distance_3_min_max_ratio=0.8,
density_neighbors_in_distance_3_stddev=0.471404520791,
density_neighbors_in_distance_4_disorder=0.0,
density_neighbors_in_distance_4_mean=5.0,
density_neighbors_in_distance_4_min_max_ratio=1.0,
density_neighbors_in_distance_4_stddev=0.0,
mst_branches_disorder=0.252878170232,
mst_branches_mean=1.19814095698,
mst_branches_min_max_ratio=0.4472135955,
mst_branches_stddev=0.405534520356,
voronoi_area_disorder=0.0,
voronoi_area_mean=1.6875,
voronoi_area_min_max_ratio=1.0,
voronoi_area_stddev=0.0,
voronoi_max_dist_disorder=0.0,
voronoi_max_dist_mean=2.12132034356,
voronoi_max_dist_min_max_ratio=1.0,
voronoi_max_dist_stddev=0.0,
voronoi_peri_disorder=0.0,
voronoi_peri_mean=5.55368876047,
voronoi_peri_min_max_ratio=1.0,
voronoi_peri_stddev=0.0,
), index=[0])
assert_frame_equal(actual, expected, check_like=True)
|
<filename>losses/losses.py
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision import models
# --- Perceptual Loss --- #
class Perceptual(torch.nn.Module):
def __init__(self):
super(Perceptual, self).__init__()
vgg_model = models.vgg16(pretrained=True).features[:16].cuda()
for param in vgg_model.parameters():
param.requires_grad = False
self.vgg_layers = vgg_model
self.layer_name_mapping = {
'3': "relu1_2",
'8': "relu2_2",
'15': "relu3_3"
}
def output_features(self, x):
output = {}
for name, module in self.vgg_layers._modules.items():
x = module(x)
if name in self.layer_name_mapping:
output[self.layer_name_mapping[name]] = x
return list(output.values())
def forward(self, pred_im, gt):
loss = []
pred_im_features = self.output_features(pred_im)
gt_features = self.output_features(gt)
for pred_im_feature, gt_feature in zip(pred_im_features, gt_features):
loss.append(F.mse_loss(pred_im_feature, gt_feature))
return sum(loss) / len(loss)
# --- Relative L1 loss --- #
def l1_relative(reconstructed, real, mask):
batch = real.size(0)
area = torch.sum(mask.view(batch, -1), dim=1)
reconstructed = reconstructed * mask
real = real * mask
loss_l1 = torch.abs(reconstructed - real).view(batch, -1)
loss_l1 = torch.sum(loss_l1, dim=1) / area
loss_l1 = torch.sum(loss_l1) / batch
return loss_l1
# --- Charbonnier Loss (L1) --- #
class CharbonnierLoss(torch.nn.Module):
def __init__(self, eps=1e-3):
super(CharbonnierLoss, self).__init__()
self.eps = eps
def forward(self, x, y):
diff = x - y
# loss = torch.sum(torch.sqrt(diff * diff + self.eps))
loss = torch.mean(torch.sqrt((diff * diff) + (self.eps * self.eps)))
return loss
# --- Edge Loss --- #
class EdgeLoss(torch.nn.Module):
def __init__(self):
super(EdgeLoss, self).__init__()
k = torch.Tensor([[.05, .25, .4, .25, .05]])
self.kernel = torch.matmul(k.t(), k).unsqueeze(0).repeat(3, 1, 1, 1)
if torch.cuda.is_available():
self.kernel = self.kernel.cuda()
self.loss = CharbonnierLoss()
def conv_gauss(self, img):
n_channels, _, kw, kh = self.kernel.shape
img = F.pad(img, (kw // 2, kh // 2, kw // 2, kh // 2), mode='replicate')
return F.conv2d(img, self.kernel, groups=n_channels)
def laplacian_kernel(self, current):
filtered = self.conv_gauss(current) # filter
down = filtered[:, :, fdf8:f53e:61e4::18, ::2] # downsample
new_filter = torch.zeros_like(filtered)
new_filter[:, :, fdf8:f53e:61e4::18, ::2] = down * 4 # upsample
filtered = self.conv_gauss(new_filter) # filter
diff = current - filtered
return diff
def forward(self, x, y):
loss = self.loss(self.laplacian_kernel(x), self.laplacian_kernel(y))
return loss
# --- TV Loss --- #
def total_variation_loss(image):
# shift one pixel and get difference (for both x and y direction)
loss = torch.mean(torch.pow(image[:, :, :, :-1] - image[:, :, :, 1:], 2)) + \
torch.mean(torch.pow(image[:, :, :-1, :] - image[:, :, 1:, :], 2))
return loss
# --- Contrast Loss --- #
class Vgg19(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg19, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
return [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
class ContrastLoss(torch.nn.Module):
def __init__(self, ablation=False):
super(ContrastLoss, self).__init__()
self.vgg = Vgg19().cuda()
self.l1 = torch.nn.L1Loss()
self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]
self.ab = ablation
def forward(self, a, p, n):
a_vgg, p_vgg, n_vgg = self.vgg(a), self.vgg(p), self.vgg(n)
loss = 0
d_ap, d_an = 0, 0
for i in range(len(a_vgg)):
d_ap = self.l1(a_vgg[i], p_vgg[i].detach())
if not self.ab:
d_an = self.l1(a_vgg[i], n_vgg[i].detach())
contrastive = d_ap / (d_an + 1e-7)
else:
contrastive = d_ap
loss += self.weights[i] * contrastive
return loss
def dice_loss(prediction, target):
"""Calculating the dice loss
Args:
prediction = predicted image
target = Targeted image
Output:
dice_loss"""
smooth = 1.0
i_flat = prediction.view(-1)
t_flat = target.view(-1)
intersection = (i_flat * t_flat).sum()
return 1 - ((2. * intersection + smooth) / (i_flat.sum() + t_flat.sum() + smooth))
class WeightedL1Loss(nn.Module):
def __init__(self, alpha=1.0):
"""
Note that input is between 0.0 (negative) and 1.0 (positive)
If alpha == 0.0, the loss is equal to L1.
Larger alpha emphasize the importance of positive labels
"""
super().__init__()
assert alpha >= 0
self.alpha = alpha
def forward(self, output, target):
# The masks are mostly negative, so put more weight on positive masks
loss = torch.abs(output - target) * (1.0 + self.alpha * target)
return loss.mean()
class WeightedCrossEntropyLoss(nn.Module):
"""
Losses used in DSDNet
Distraction-aware Shadow Detection (CVPR2019)
https://quanlzheng.github.io/projects/Distraction-aware-Shadow-Detection.html
"""
def __init__(self):
super().__init__()
self.bc = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, output, target):
# B, C, H, W = target.size()
# loss = self.bc(output, target)
# target = target.int()
# sample-wise weight
# pos_w = (target == 0).sum(dim=[1, 2, 3]).float() / (C * H * W)
# pos_w = pos_w.view(B, 1, 1, 1).repeat(1, C, H, W)
# neg_w = (target == 1).sum(dim=[1, 2, 3]).float() / (C * H * W)
# neg_w = neg_w.view(B, 1, 1, 1).repeat(1, C, H, W)
# w = torch.zeros_like(output)
# w[target == 0] = neg_w[target == 0]
# w[target == 1] = pos_w[target == 1]
# loss = (w * loss).mean()
# batch-wise weight
# pos_w = (target == 0).sum().float() / target.numel()
# neg_w = (target == 1).sum().float() / target.numel()
# w = torch.zeros_like(output)
# w[target == 0] = neg_w
# w[target == 1] = pos_w
# loss = (w * loss).mean()
# Following dsdnet
epsilon = 1e-10
# sigmoid_pred = torch.sigmoid(output)
count_pos = torch.sum(target) * 1.0 + epsilon
count_neg = torch.sum(1.0 - target) * 1.0
beta = count_neg / count_pos
beta_back = count_pos / (count_pos + count_neg)
bce1 = nn.BCEWithLogitsLoss(pos_weight=beta)
loss = beta_back * bce1(output, target)
return loss
|
<reponame>starsep/NewsBlur<filename>apps/rss_feeds/migrations/0001_initial.py
from south.db import db
from django.db import models
from apps.rss_feeds.models import *
class Migration:
def forwards(self, orm):
# Adding model 'Feed'
db.create_table('feeds', (
('id', orm['rss_feeds.Feed:id']),
('feed_address', orm['rss_feeds.Feed:feed_address']),
('feed_link', orm['rss_feeds.Feed:feed_link']),
('feed_title', orm['rss_feeds.Feed:feed_title']),
('feed_tagline', orm['rss_feeds.Feed:feed_tagline']),
('active', orm['rss_feeds.Feed:active']),
('num_subscribers', orm['rss_feeds.Feed:num_subscribers']),
('last_update', orm['rss_feeds.Feed:last_update']),
('min_to_decay', orm['rss_feeds.Feed:min_to_decay']),
('days_to_trim', orm['rss_feeds.Feed:days_to_trim']),
('creation', orm['rss_feeds.Feed:creation']),
('etag', orm['rss_feeds.Feed:etag']),
('last_modified', orm['rss_feeds.Feed:last_modified']),
('page_data', orm['rss_feeds.Feed:page_data']),
('stories_per_month', orm['rss_feeds.Feed:stories_per_month']),
('next_scheduled_update', orm['rss_feeds.Feed:next_scheduled_update']),
('last_load_time', orm['rss_feeds.Feed:last_load_time']),
))
db.send_create_signal('rss_feeds', ['Feed'])
# Adding model 'Tag'
db.create_table('rss_feeds_tag', (
('id', orm['rss_feeds.Tag:id']),
('feed', orm['rss_feeds.Tag:feed']),
('name', orm['rss_feeds.Tag:name']),
))
db.send_create_signal('rss_feeds', ['Tag'])
# Adding model 'FeedPage'
db.create_table('rss_feeds_feedpage', (
('id', orm['rss_feeds.FeedPage:id']),
('feed', orm['rss_feeds.FeedPage:feed']),
('page_data', orm['rss_feeds.FeedPage:page_data']),
))
db.send_create_signal('rss_feeds', ['FeedPage'])
# Adding model 'FeedUpdateHistory'
db.create_table('rss_feeds_feedupdatehistory', (
('id', orm['rss_feeds.FeedUpdateHistory:id']),
('fetch_date', orm['rss_feeds.FeedUpdateHistory:fetch_date']),
('number_of_feeds', orm['rss_feeds.FeedUpdateHistory:number_of_feeds']),
('seconds_taken', orm['rss_feeds.FeedUpdateHistory:seconds_taken']),
('average_per_feed', orm['rss_feeds.FeedUpdateHistory:average_per_feed']),
))
db.send_create_signal('rss_feeds', ['FeedUpdateHistory'])
# Adding model 'Story'
db.create_table('stories', (
('id', orm['rss_feeds.Story:id']),
('story_feed', orm['rss_feeds.Story:story_feed']),
('story_date', orm['rss_feeds.Story:story_date']),
('story_title', orm['rss_feeds.Story:story_title']),
('story_content', orm['rss_feeds.Story:story_content']),
('story_original_content', orm['rss_feeds.Story:story_original_content']),
('story_content_type', orm['rss_feeds.Story:story_content_type']),
('story_author', orm['rss_feeds.Story:story_author']),
('story_permalink', orm['rss_feeds.Story:story_permalink']),
('story_guid', orm['rss_feeds.Story:story_guid']),
('story_guid_hash', orm['rss_feeds.Story:story_guid_hash']),
('story_past_trim_date', orm['rss_feeds.Story:story_past_trim_date']),
('story_tags', orm['rss_feeds.Story:story_tags']),
))
db.send_create_signal('rss_feeds', ['Story'])
# Adding model 'FeedXML'
db.create_table('rss_feeds_feedxml', (
('id', orm['rss_feeds.FeedXML:id']),
('feed', orm['rss_feeds.FeedXML:feed']),
('rss_xml', orm['rss_feeds.FeedXML:rss_xml']),
))
db.send_create_signal('rss_feeds', ['FeedXML'])
# Adding model 'StoryAuthor'
db.create_table('rss_feeds_storyauthor', (
('id', orm['rss_feeds.StoryAuthor:id']),
('feed', orm['rss_feeds.StoryAuthor:feed']),
('author_name', orm['rss_feeds.StoryAuthor:author_name']),
))
db.send_create_signal('rss_feeds', ['StoryAuthor'])
# Adding ManyToManyField 'Story.tags'
db.create_table('stories_tags', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('story', models.ForeignKey(orm.Story, null=False)),
('tag', models.ForeignKey(orm.Tag, null=False))
))
def backwards(self, orm):
# Deleting model 'Feed'
db.delete_table('feeds')
# Deleting model 'Tag'
db.delete_table('rss_feeds_tag')
# Deleting model 'FeedPage'
db.delete_table('rss_feeds_feedpage')
# Deleting model 'FeedUpdateHistory'
db.delete_table('rss_feeds_feedupdatehistory')
# Deleting model 'Story'
db.delete_table('stories')
# Deleting model 'FeedXML'
db.delete_table('rss_feeds_feedxml')
# Deleting model 'StoryAuthor'
db.delete_table('rss_feeds_storyauthor')
# Dropping ManyToManyField 'Story.tags'
db.delete_table('stories_tags')
models = {
'rss_feeds.feed': {
'Meta': {'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': '0', 'auto_now': 'True', 'blank': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '15'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'page_data': ('StoryField', [], {'null': 'True', 'blank': 'True'}),
'stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'rss_feeds.feedpage': {
'feed': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'feed_page'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_data': ('StoryField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedupdatehistory': {
'average_per_feed': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_feeds': ('django.db.models.fields.IntegerField', [], {}),
'seconds_taken': ('django.db.models.fields.IntegerField', [], {})
},
'rss_feeds.feedxml': {
'feed': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'feed_xml'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rss_xml': ('StoryField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.story': {
'Meta': {'db_table': "'stories'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'story_author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.StoryAuthor']"}),
'story_content': ('StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_content_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'story_date': ('django.db.models.fields.DateTimeField', [], {}),
'story_feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stories'", 'to': "orm['rss_feeds.Feed']"}),
'story_guid': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_guid_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'story_original_content': ('StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_past_trim_date': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'story_permalink': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_tags': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['rss_feeds.Tag']"})
},
'rss_feeds.storyauthor': {
'author_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.tag': {
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['rss_feeds']
|
<reponame>JohnsonLee98/hover_1<gh_stars>0
# -*- coding: utf-8 -*-
import importlib
import random
import cv2
import numpy as np
import tensorflow as tf
from tensorpack import imgaug
from loader.augs import (BinarizeLabel, GaussianBlur, GenInstanceDistance,
GenInstanceHV, MedianBlur, GenInstanceUnetMap,
GenInstanceContourMap)
####
class Config(object):
def __init__(self, ):
self.seed = 10
mode = 'hover'
self.model_type = 'np_hv'
self.type_classification = True # whether to predict the nuclear type
# ! must use CoNSeP dataset, where nuclear type labels are available
# denotes number of classes for nuclear type classification,
# plus the background class
self.nr_types = 5
# ! some semantic segmentation network like micronet,
# ! nr_types will replace nr_classes if type_classification=True
self.nr_classes = 2 # Nuclei Pixels vs Background
# define your nuclei type name here, please ensure it contains
# same the amount as defined in `self.nr_types` . ID 0 is preserved
# for background so please don't use it as ID
self.nuclei_type_dict = {
'Miscellaneous': 1, # ! Please ensure the matching ID is unique
'Inflammatory' : 2,
'Epithelial' : 3,
'Spindle' : 4,
}
assert len(self.nuclei_type_dict.values()) == self.nr_types - 1
#### Dynamically setting the config file into variable
if mode == 'hover':
config_file = importlib.import_module('opt.hover') # np_hv, np_dist
else:
config_file = importlib.import_module('opt.other') # fcn8, dcan, etc.
config_dict = config_file.__getattribute__(self.model_type)
for variable, value in config_dict.items():
self.__setattr__(variable, value)
#### Training data
# patches are stored as numpy arrays with N channels
# ordering as [Image][Nuclei Pixels][Nuclei Type][Additional Map]
# Ex: with type_classification=True
# HoVer-Net: RGB - Nuclei Pixels - Type Map - Horizontal and Vertical Map
# Ex: with type_classification=False
# Dist : RGB - Nuclei Pixels - Distance Map
data_code_dict = {
'unet' : '536x536_84x84',
'dist' : '536x536_84x84',
'fcn8' : '512x512_256x256',
'dcan' : '512x512_256x256',
'segnet' : '512x512_256x256',
'micronet' : '504x504_252x252',
'np_hv' : '540x540_80x80',
'np_dist' : '540x540_80x80',
}
self.data_ext = 'npy'
# list of directories containing validation patches.
# For both train and valid directories, a comma separated list of directories can be used
# self.train_dir = ['../../CoNSeP/Train/%s/' % data_code_dict[self.model_type]]
# self.valid_dir = ['../../CoNSeP/Valid/%s/' % data_code_dict[self.model_type]]
self.train_dir = ['../../con/train/']
self.valid_dir = ['../../con/test/']
# self.valid_dir = ['../../CoNSeP//Image' % data_code_dict[self.model_type]]
# number of processes for parallel processing input
self.nr_procs_train = 8
self.nr_procs_valid = 4
self.input_norm = True # normalize RGB to 0-1 range
####
exp_id = 'v1.2/'
model_id = '%s' % self.model_type
self.model_name = '%s/%s' % (exp_id, model_id)
# loading chkpts in tensorflow, the path must not contain extra '/'
self.log_path = './logs/' # log root path - modify according to needs
self.save_dir = '%s/%s' % (self.log_path, self.model_name) # log file destination
#### Info for running inference
self.inf_auto_find_chkpt = True
# path to checkpoints will be used for inference, replace accordingly
self.inf_model_path = self.save_dir + '/model-19640.index'
# output will have channel ordering as [Nuclei Type][Nuclei Pixels][Additional]
# where [Nuclei Type] will be used for getting the type of each instance
# while [Nuclei Pixels][Additional] will be used for extracting instances
self.inf_imgs_ext = '.png'
self.inf_data_dir = '../../CoNSeP/Test/Images/'
self.inf_output_dir = 'output/%s/%s/' % (exp_id, model_id)
# for inference during evalutaion mode i.e run by infer.py
self.eval_inf_input_tensor_names = ['images']
self.eval_inf_output_tensor_names = ['predmap-coded']
# for inference during training mode i.e run by trainer.py
self.train_inf_output_tensor_names = ['predmap-coded', 'truemap-coded']
def get_model(self):
if self.model_type == 'np_hv':
model_constructor = importlib.import_module('model.graph')
model_constructor = model_constructor.Model_NP_HV
elif self.model_type == 'np_dist':
model_constructor = importlib.import_module('model.graph')
model_constructor = model_constructor.Model_NP_DIST
else:
model_constructor = importlib.import_module('model.%s' % self.model_type)
model_constructor = model_constructor.Graph
return model_constructor # NOTE return alias, not object
# refer to https://tensorpack.readthedocs.io/modules/dataflow.imgaug.html for
# information on how to modify the augmentation parameters
def get_train_augmentors(self, input_shape, output_shape, view=False):
print(input_shape, output_shape)
shape_augs = [
imgaug.Affine(
shear=5, # in degree
scale=(0.8, 1.2),
rotate_max_deg=179,
translate_frac=(0.01, 0.01),
interp=cv2.INTER_NEAREST,
border=cv2.BORDER_CONSTANT),
imgaug.Flip(vert=True),
imgaug.Flip(horiz=True),
imgaug.CenterCrop(input_shape),
]
input_augs = [
imgaug.RandomApplyAug(
imgaug.RandomChooseAug(
[
GaussianBlur(),
MedianBlur(),
imgaug.GaussianNoise(),
]
), 0.5),
# standard color augmentation
imgaug.RandomOrderAug(
[imgaug.Hue((-8, 8), rgb=True),
imgaug.Saturation(0.2, rgb=True),
imgaug.Brightness(26, clip=True),
imgaug.Contrast((0.75, 1.25), clip=True),
]),
imgaug.ToUint8(),
]
label_augs = []
if self.model_type == 'unet' or self.model_type == 'micronet':
label_augs =[GenInstanceUnetMap(crop_shape=output_shape)]
if self.model_type == 'dcan':
label_augs =[GenInstanceContourMap(crop_shape=output_shape)]
if self.model_type == 'dist':
label_augs = [GenInstanceDistance(crop_shape=output_shape, inst_norm=False)]
if self.model_type == 'np_hv':
label_augs = [GenInstanceHV(crop_shape=output_shape)]
if self.model_type == 'np_dist':
label_augs = [GenInstanceDistance(crop_shape=output_shape, inst_norm=True)]
if not self.type_classification:
label_augs.append(BinarizeLabel())
if not view:
label_augs.append(imgaug.CenterCrop(output_shape))
return shape_augs, input_augs, label_augs
def get_valid_augmentors(self, input_shape, output_shape, view=False):
print(input_shape, output_shape)
shape_augs = [
imgaug.CenterCrop(input_shape),
]
input_augs = None
label_augs = []
if self.model_type == 'unet' or self.model_type == 'micronet':
label_augs =[GenInstanceUnetMap(crop_shape=output_shape)]
if self.model_type == 'dcan':
label_augs =[GenInstanceContourMap(crop_shape=output_shape)]
if self.model_type == 'dist':
label_augs = [GenInstanceDistance(crop_shape=output_shape, inst_norm=False)]
if self.model_type == 'np_hv':
label_augs = [GenInstanceHV(crop_shape=output_shape)]
if self.model_type == 'np_dist':
label_augs = [GenInstanceDistance(crop_shape=output_shape, inst_norm=True)]
label_augs.append(BinarizeLabel())
if not view:
label_augs.append(imgaug.CenterCrop(output_shape))
return shape_augs, input_augs, label_augs
|
<gh_stars>1-10
# Managed settings file
import os
import re
from readthedocs.settings.base import CommunityBaseSettings
_redis = {
'default': dict(zip(['host', 'port', 'db'], re.split(':|/', '{{ rtd_redis_cache }}'))),
'celery': dict(zip(['host', 'port', 'db'], re.split(':|/', '{{ rtd_redis_celery }}'))),
'stats': dict(zip(['host', 'port', 'db'], re.split(':|/', '{{ rtd_redis_stats }}'))),
}
class CommunityProdSettings(CommunityBaseSettings):
"""Settings for local development"""
SERVE_DOCS = ['private']
PYTHON_MEDIA = True
PRODUCTION_DOMAIN = '{{ rtd_domain }}'
USE_SUBDOMAIN = False
PUBLIC_DOMAIN = '{{ PUBLIC_DOMAIN }}'
PUBLIC_API_URL = '{{ PUBLIC_API_URL }}'
GLOBAL_ANALYTICS_CODE = '{{ GLOBAL_ANALYTICS_CODE }}'
PUBLIC_DOMAIN_USES_HTTPS = '{{ rtd_proto }}' == 'https'
# default build versions
RTD_LATEST = 'bozza'
RTD_LATEST_VERBOSE_NAME = RTD_LATEST
RTD_STABLE = 'stabile'
RTD_STABLE_VERBOSE_NAME = RTD_STABLE
RTD_LATEST_EN = 'draft'
RTD_STABLE_EN = 'stable'
# General settings
DEBUG = {{ DEBUG }}
TEMPLATE_DEBUG = False
DOCS_BASE = os.environ.get('DOCS_BASE', CommunityBaseSettings.SITE_ROOT)
MEDIA_ROOT = os.path.join(DOCS_BASE, 'media/')
STATIC_ROOT = os.path.join(DOCS_BASE, 'media/static/')
MEDIA_URL = '{{ MEDIA_URL }}'
STATIC_URL = '{{ MEDIA_URL }}static/'
ADMIN_MEDIA_PREFIX = MEDIA_URL + 'admin/'
SECRET_KEY = '{{ SECRET_KEY }}'
DEFAULT_FROM_EMAIL = '{{ DEFAULT_FROM_EMAIL }}'
SESSION_COOKIE_DOMAIN = '{{ rtd_domain }}'
TAGGIT_TAGS_FROM_STRING = 'readthedocs.docsitalia.utils.docsitalia_parse_tags'
DOCROOT = os.path.join(DOCS_BASE, 'user_builds')
UPLOAD_ROOT = os.path.join(DOCS_BASE, 'user_uploads')
CNAME_ROOT = os.path.join(DOCS_BASE, 'cnames')
LOGS_ROOT = os.path.join(DOCS_BASE, 'logs')
PRODUCTION_ROOT = os.path.join(DOCS_BASE, 'prod_artifacts')
PUBLIC_BASE = DOCS_BASE
PRIVATE_BASE = DOCS_BASE
@property
def TEMPLATES(self): # noqa
TEMPLATES = super().TEMPLATES
TEMPLATE_OVERRIDES = os.path.join(super().TEMPLATE_ROOT, 'docsitalia', 'overrides')
TEMPLATES[0]['DIRS'].insert(0, TEMPLATE_OVERRIDES)
return TEMPLATES
@property
def INSTALLED_APPS(self): # noqa
apps = super(CommunityProdSettings, self).INSTALLED_APPS
# Insert our depends above RTD applications, after guaranteed third
# party package
apps.append('readthedocs.docsitalia')
apps.append('dal', )
apps.append('dal_select2', )
{% if USE_CONVERTER %}apps.insert(apps.index('rest_framework'), 'docs_italia_convertitore_web'){% endif %}
{% if SENTRY_DSN|string|length %}apps.insert(apps.index('rest_framework'), 'raven.contrib.django.raven_compat'){% endif %}
return apps
# Celery
CACHES = dict(
(cache_name, {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': '{host}:{port}'.format(**cache),
'OPTIONS': {
'DB': cache['db'],
},
})
for (cache_name, cache)
in _redis.items()
if cache_name is not 'celery'
)
BROKER_URL = 'redis://{{ rtd_redis_celery }}'
CELERY_RESULT_BACKEND = 'redis://{{ rtd_redis_celery }}'
# Docker
DOCKER_SOCKET = 'tcp://{{ docker_main_ip }}:2375'
DOCKER_ENABLE = {{ DOCKER_ENABLE }}
DOCKER_IMAGE = '{{ docker_rtd_image }}'
DOCKER_VERSION = '1.33'
DOCKER_LIMITS = {
'memory': '999m',
'time': 3600,
}
{% if SENTRY_DSN|string|length %}
import raven
RAVEN_CONFIG = {
'dsn': '{{ SENTRY_DSN }}',
'release': raven.fetch_git_sha(CommunityBaseSettings.SITE_ROOT),
'environment': '{{ SENTRY_ENVIRONMENT }}'
}
{% endif %}
# Haystack - we don't really use it. ES API is used instead
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
CELERY_ALWAYS_EAGER = False
CELERY_HAYSTACK_DEFAULT_ALIAS = None
CELERY_TASK_RESULT_EXPIRES = 7200
# Elastic Search
ES_HOSTS = '{{ es_hosts }}'.split(',')
ELASTICSEARCH_DSL = {
'default': {
'hosts': ES_HOSTS
},
}
# RTD settings
# This goes together with FILE_SYNCER setting
# eg: FILE_SINCER = 'readthedocs.builds.syncers.*' (likely RemoteSyncer)
MULTIPLE_APP_SERVERS = '{{ app_hosts }}'.split(',')
MULTIPLE_BUILD_SERVERS = '{{ worker_hosts }}'.split(',')
SLUMBER_API_HOST = 'http://{{ api_host }}'
SLUMBER_USERNAME = '{{ SLUMBER_USERNAME }}'
SLUMBER_PASSWORD = '{{ <PASSWORD> }}'
SYNC_USER = '{{ rtd_user }}'
#DOCROOT = '/var/build'
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
REPO_LOCK_SECONDS = 300
DONT_HIT_DB = False
# Override classes
CLASS_OVERRIDES = {
'readthedocs.builds.syncers.Syncer': 'readthedocs.builds.syncers.LocalSyncer',
'readthedocs.core.resolver.Resolver': 'readthedocs.docsitalia.resolver.ItaliaResolver',
'readthedocs.oauth.services.GitHubService': 'readthedocs.docsitalia.oauth.services.github.DocsItaliaGithubService',
}
# Email
if {{ USE_SMTP }}:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = '{{ EMAIL_HOST }}'
EMAIL_HOST_USER = '{{ EMAIL_HOST_USER }}'
EMAIL_HOST_PASSWORD = '{{ EMAIL_HOST_PASSWORD }}'
else:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Social Auth
GITHUB_APP_ID = '{{ GITHUB_APP_ID }}'
GITHUB_API_SECRET = '{{ GITHUB_API_SECRET }}'
SOCIALACCOUNT_PROVIDERS = {
'github': {'SCOPE': ['user:email', 'read:org', 'admin:repo_hook', 'repo:status']}
}
ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'
ADMINS = (
('Test', 'test@{{ rtd_domain }}'),
)
TIME_ZONE = 'Europe/Rome'
LANGUAGE_CODE = 'it-it'
CORS_ORIGIN_WHITELIST = (
'{{ rtd_domain }}:8000',
)
{% if CORS_HEADERS_HOSTS == 'all' %}
CORS_ORIGIN_ALLOW_ALL = True
{% endif %}
WEBSOCKET_HOST = '{{ rtd_domain }}:8088'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '{{ rtd_db_name }}',
'USER': '{{ rtd_db_user }}',
'PASSWORD': '{{ rtd_db_pass }}',
'HOST': '{{ rtd_db_host }}',
'PORT': '{{ rtd_db_port }}',
},
}
# Etc
RESTRUCTUREDTEXT_FILTER_SETTINGS = {
'cloak_email_addresses': True,
'file_insertion_enabled': False,
'raw_enabled': False,
'strip_comments': True,
'doctitle_xform': True,
'sectsubtitle_xform': True,
'initial_header_level': 2,
'report_level': 5,
'syntax_highlight': 'none',
'math_output': 'latex',
'field_name_limit': 50,
}
USE_PROMOS = False
ALLOWED_HOSTS = ['*']
USER_MATURITY_DAYS = 14
READTHEDOCSEXT_MONITORING_CACHE = 'stats'
DEFAULT_VERSION_PRIVACY_LEVEL = '{{ DEFAULT_VERSION_PRIVACY_LEVEL }}'
@property
def TEXTCLASSIFIER_DATA_FILE(self):
return os.path.join(self.SITE_ROOT, 'textclassifier.json')
# Banned" projects
HTML_ONLY_PROJECTS = (
'atom',
'galaxy-central',
'django-geoposition',
)
# Add fancy sessions after the session middleware
@property
def MIDDLEWARE(self):
classes = super(CommunityProdSettings, self).MIDDLEWARE
classes = list(classes)
index = classes.index(
'readthedocs.core.middleware.FooterNoSessionMiddleware'
)
classes.insert(
index + 1,
'restrictedsessions.middleware.RestrictedSessionsMiddleware'
)
return tuple(classes)
RESTRICTEDSESSIONS_AUTHED_ONLY = True
# Logging
@property
def LOGGING(self):
logging = super(CommunityProdSettings, self).LOGGING
logging['formatters']['syslog'] = {
'format': 'readthedocs/%(name)s[%(process)d]: %(levelname)s %(message)s [%(name)s:%(lineno)s]',
'datefmt': '%d/%b/%Y %H:%M:%S'
}
logging['handlers']['syslog'] = {
'level': 'DEBUG',
'class': 'logging.handlers.SysLogHandler',
'formatter': 'syslog',
'address': '/dev/log',
}
logging['loggers'] = {
'': {
'handlers': ['console', 'syslog'],
'level': 'INFO',
}
}
return logging
CommunityProdSettings.load_settings(__name__)
if not os.environ.get('DJANGO_SETTINGS_SKIP_LOCAL', False):
try:
# pylint: disable=unused-wildcard-import
from .local_settings import * # noqa
except ImportError:
pass
|
<filename>main.py
from agent import Agent
import argparse
from collections import deque
from env import Environment
import numpy as np
import torch
from model import QNetwork, Small, Large, Dropout
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
def main(args):
if args.examine:
examine()
if args.random:
random()
if args.train:
train_multiple(summary_interval=50)
if args.test:
test()
def examine():
with Environment() as env:
# reset the environment
env_info = env.reset()
# number of agents in the environment
print('Number of agents:', len(env_info.agents))
# number of actions
action_size = env.action_space_size
print('Number of actions:', action_size)
# examine the state space
state = env_info.vector_observations[0]
print('States look like:', state)
state_size = len(state)
print('States have length:', state_size)
def random():
with Environment(no_graphics=False) as env:
env_info = env.reset()
action_size = env.action_space_size
score = 0
while True:
action = np.random.randint(action_size)
env_info = env.step(action)
reward = env_info.rewards[0]
done = env_info.local_done[0]
score += reward
if done:
break
print('\rScore: {:.2f}'.format(score), end="")
print('\rScore: {:.2f}'.format(score))
def test():
with Environment(no_graphics=False) as env:
env_info = env.reset(train_mode=False)
action_size = env.action_space_size
state_size = len(env_info.vector_observations[0])
state = env_info.vector_observations[0]
agent = Agent(model=QNetwork, state_size=state_size, action_size=action_size, seed=0)
agent.qnetwork_local.load_state_dict(torch.load('results/checkpoint.pth'))
score = 0
while True:
action = agent.act(state)
env_info = env.step(action)
state = env_info.vector_observations[0]
reward = env_info.rewards[0]
done = env_info.local_done[0]
score += reward
if done:
break
print('\rScore: {:.2f}'.format(score), end="")
print('\rScore: {:.2f}'.format(score))
def train_multiple(summary_interval):
models = [QNetwork, Small, Large, Dropout]
with Environment() as env:
summary_scores = {}
for model in models:
summary = train(model, env, summary_interval)
summary_scores[model.__name__] = summary
plot_summary(summary_scores, summary_interval)
def train(model, env, summary_interval=100):
env_info = env.reset(train_mode=True)
action_size = env.action_space_size
state_size = len(env_info.vector_observations[0])
agent = Agent(model=model, state_size=state_size, action_size=action_size, seed=0)
model_name = model.__name__
summary_scores = _train(env, agent, model_name=model_name, summary_interval=summary_interval)
return summary_scores
def _train(env, agent, model_name='model', summary_interval=100,
n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01):
"""Deep Q-Learning.
Params
======
n_episodes (int): maximum number of training episodes
max_t (int): maximum number of timesteps per episode
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
"""
scores = [] # list containing scores from each episode
summary_scores = []
scores_window = deque(maxlen=summary_interval) # last 100 scores
eps = eps_start # initialize epsilon
eps_decay = (eps_end ** (1 / n_episodes)) / eps_start
for i_episode in range(1, n_episodes + 1):
env_info = env.reset(train_mode=True)
state = env_info.vector_observations[0]
score = 0
for t in range(max_t):
action = agent.act(state, eps)
env_info = env.step(action)
next_state = env_info.vector_observations[0]
reward = env_info.rewards[0]
done = env_info.local_done[0]
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
eps = max(eps_end, eps_decay * eps) # decrease epsilon
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
if i_episode % summary_interval == 0:
summary_scores.append(np.mean(scores_window))
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
torch.save(agent.qnetwork_local.state_dict(), 'results/' + model_name + '.pth')
plot_scores(scores, summary_scores, summary_interval, model_name)
# if np.mean(scores_window) >= 20.0:
# print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(
# i_episode - summary_interval, np.mean(scores_window)))
# break
torch.save(agent.qnetwork_local.state_dict(), 'results/' + model_name + '.pth')
plot_scores(scores, summary_scores, summary_interval, model_name)
return summary_scores
def plot_scores(scores, summary_scores, summary_interval, file_name=''):
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.plot(np.arange(len(scores)), scores)
plt.plot(np.arange(len(summary_scores) * summary_interval), summary_scores)
if file_name:
plt.savefig('results' + file_name + '.png', bbox_inches='tight')
else:
plt.show()
def plot_summary(summary_scores, summary_interval):
plt.ylabel('Score')
plt.xlabel('Episode #')
for key, scores in summary_scores.items():
plt.plot(np.arange(len(scores)) * summary_interval, scores, label=key)
plt.legend()
plt.savefig('results/summary.png', bbox_inches='tight')
plt.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='DeepRL - Q-learning project')
parser.add_argument('--examine',
action="store_true",
dest="examine",
help='Print environment information')
parser.add_argument('--random',
action="store_true",
dest="random",
help='Start a random agent')
parser.add_argument('--train',
action="store_true",
dest="train",
help='Train a new network')
parser.add_argument('--test',
action="store_true",
dest="test",
help='Load an existing network and test it')
args = parser.parse_args()
if not any(vars(args).values()):
parser.error('No arguments provided.')
main(args)
|
<gh_stars>1-10
import sys
import numpy as np
import praw
import pymc as pm
from matplotlib import pyplot as plt
from IPython.core.display import Image
def posterior_upvote_ratio(upvotes, downvotes, samples=20000):
""" This function accepts the number of upvotes and downvotes a particular
comment received, and the number of posterior samples to return to the
user. Assumes a uniform prior.
"""
N = upvotes + downvotes
upvote_ratio = pm.Uniform("upvote_ratio", 0, 1)
observations = pm.Binomial("obs", N, upvote_ratio, value=upvotes,
observed=True)
# do the fitting; first do a MAP as it is cheap and useful.
map_ = pm.MAP([upvote_ratio, observations]).fit()
mcmc = pm.MCMC([upvote_ratio, observations])
mcmc.sample(samples, samples / 4)
return mcmc.trace("upvote_ratio")[:]
def main():
reddit = praw.Reddit("BayesianMethodsForHackers")
subreddit = reddit.get_subreddit( "pics" )
top_submissions = subreddit.get_top()
n_pic = int( sys.argv[1] ) if len(sys.argv) > 1 else 1
i = 0
while i < n_pic:
top_submission = top_submissions.next()
while "i.imgur.com" not in top_submission.url:
#make sure it is linking to an image, not a webpage.
top_submission = top_submissions.next()
i+=1
print "Title of submission: \n", top_submission.title
top_post_url = top_submission.url
#top_submission.replace_more_comments(limit=5, threshold=0)
print top_post_url
Image(top_post_url)
upvotes = []
downvotes = []
contents = []
_all_comments = top_submission.comments
all_comments=[]
for comment in _all_comments:
try:
upvotes.append( comment.ups )
downvotes.append( comment.downs )
contents.append( comment.body )
except Exception as e:
continue
votes = np.array( [ upvotes, downvotes] ).T
n_comments = len(contents)
comments = np.random.randint(n_comments, size=4)
print "Some Comments (out of %d total) \n-----------" % n_comments
for i in comments:
print '"' + contents[i] + '"'
print "upvotes/downvotes: ", votes[i, :]
print
posteriors = []
colours = ["#348ABD", "#A60628", "#7A68A6", "#467821", "#CF4457"]
for i in range(len(comments)):
j = comments[i]
posteriors.append(posterior_upvote_ratio(votes[j, 0], votes[j, 1]))
label = '(%d up:%d down)\n%s...' % (votes[j, 0], votes[j, 1],
contents[j][:50])
plt.hist(posteriors[i], bins=18, normed=True, alpha=.9,
histtype="step", color=colours[i % 5], lw=3,
label=label)
plt.hist(posteriors[i], bins=18, normed=True, alpha=.2,
histtype="stepfilled", color=colours[i], lw=3, )
plt.legend(loc="upper left")
plt.xlim(0, 1)
plt.title("Posterior distributions of upvote ratios on different comments")
plt.show()
N = posteriors[0].shape[0]
lower_limits = []
for i in range(len(comments)):
j = comments[i]
plt.hist(posteriors[i], bins=20, normed=True, alpha=.9,
histtype="step", color=colours[i], lw=3,
label='(%d up:%d down)\n%s...' % (votes[j, 0], votes[j, 1],
contents[j][:50]))
plt.hist(posteriors[i], bins=20, normed=True, alpha=.2,
histtype="stepfilled", color=colours[i], lw=3, )
v = np.sort(posteriors[i])[int(0.05 * N)]
# plt.vlines( v, 0, 15 , color = "k", alpha = 1, linewidths=3 )
plt.vlines(v, 0, 10, color=colours[i], linestyles="--", linewidths=3)
lower_limits.append(v)
plt.legend(loc="upper left")
plt.legend(loc="upper left")
plt.title("Posterior distributions of upvote ratios on different comments")
plt.show()
order = np.argsort(-np.array(lower_limits))
print order, lower_limits
# This is the closed form method to replace the Markov Chain process above
# for real-time work
def intervals(u, d):
a = 1. + u
b = 1. + d
mu = a / (a + b)
std_err = 1.65 * np.sqrt((a * b) / ((a + b) ** 2 * (a + b + 1.)))
return (mu, std_err)
print "Approximate lower bounds:"
posterior_mean, std_err = intervals(votes[:, 0], votes[:, 1])
lb = posterior_mean - std_err
print lb
print
print "Top 40 Sorted according to approximate lower bounds:"
print
order = np.argsort(-lb)
ordered_contents = []
for i in order[:40]:
ordered_contents.append(contents[i])
print votes[i, 0], votes[i, 1], contents[i]
print "-------------"
# Sorting visually with the lower bound estimation. Also showing the mean
# and its apparent random fluctuation i.e it's better to use lower bound
r_order = order[::-1][-40:]
plt.errorbar(posterior_mean[r_order], np.arange(len(r_order)),
xerr=std_err[r_order], xuplims=True, capsize=0, fmt="o",
color="#7A68A6")
plt.xlim(0.3, 1)
plt.yticks(np.arange(len(r_order) - 1, -1, -1),
map(lambda x: x[:30].replace("\n", ""), ordered_contents))
plt.show()
if __name__ == '__main__':
main()
|
# Django Libraries
from django.shortcuts import render,redirect, HttpResponseRedirect
from django.contrib import messages
# User Defined
from core.models import Contests, Problems
from core.forms import ProblemFilterForm
from core.viewers.helperviews import *
#Python Libraries
import random
import json
import requests
from collections import OrderedDict
def dashboard(request,handle):
'''
@type: renderfunction ;
@return: renders dashboards page ;
@description:
Calls helper functions and provides data for chart display on
dashboards page.;
@errorhandling:
Silent error handling. Supports anonymous views;
'''
try:
user,m,mtag,msubs,prbcnt,ranklist,_,days = getcharts(request,handle)
except:
messages.error(request,"Please enter a handle to view dashboard")
m={}
user=None
mtag={}
msubs={}
prbcnt=0
ranklist={}
days = []
return render(request,"dashboard.html",context={
"user":user,
"ProbCat":m,
"ProbTags":mtag,
"SubsInfo":msubs,
"probsolved":prbcnt,
"contcount":len(ranklist),
"ranklist":ranklist,
"days":days
})
def contests(request,handle):
'''
@type: renderfunction ;
@return: renders contests page ;
@description:
This page collects userinfo and Contests from dataset,
then handles api call to codeforces user status,
and returns contests for particular user;
@errorhandling:
Silent error handling,
no error message given, substitutes guest everywhere.
Supports anonymous views also;
'''
user = userinfo(request,handle)
ress = Contests.objects.all()
subs = requests.get('https://codeforces.com/api/user.status?handle={}'.format(handle)).json()
if subs['status'] != "OK":
subs['result'] = []
nids = []
for x in subs['result']:
try:
nids.append(x['contestId'])
except:
pass
nids = set(nids)
subs = []
for x in ress:
if x.contid in nids:
subs.append(1)
else:
subs.append(0)
ress = zip(ress,subs)
return render(request, 'contests.html', {'ress':ress,'user':user})
def problems(request,handle):
'''
@type: renderfunction ;
@return: renders problems page ;
@description:
calls userinfo and codeforces user status api
handles POST data of filterform on problems page.
By default returns problems from difficulty of
[(maxRating+300),(maxrating-100)];
@errorhandling:
silent error handling.
Supports anonymous views also;
'''
user = userinfo(request,handle)
solved = []
unsolved = []
try:
subs = requests.get('https://codeforces.com/api/user.status?handle={}'.format(handle)).json()['result']
except:
subs = []
for x in subs:
if x['verdict'] == "OK":
solved.append(x['problem']['name'])
else:
unsolved.append(x['problem']['name'])
prbs = []
index = None
try:
ratingmax = min(user['maxRating'] + 300,3500)
except:
ratingmax = 1000
try:
ratingmin = max(min(user['maxRating'] - 100,3000),0)
except:
ratingmin = 0
show_tags = False
taglist = None
filterform = ProblemFilterForm()
if request.method == "POST":
filterform = ProblemFilterForm(request.POST)
if filterform.is_valid():
index = filterform.cleaned_data.get('category')
ratingmin = filterform.cleaned_data.get('ratingmin')
ratingmax = filterform.cleaned_data.get('ratingmax')
tags = filterform.cleaned_data.get('tags')
taglist = tags.split(',')
show_tags = filterform.cleaned_data.get('show_tags')
else:
# print(filterform.errors)
messages.error(request,"Invalid values provided")
# return redirect("/cfviewer/")
if (ratingmax == None) and (ratingmin == None):
try:
ratingmax = min(user['maxRating'] + 300,3500)
except:
ratingmax = 1000
try:
ratingmin = max(min(user['maxRating'] - 100,3000),0)
except:
ratingmin = 0
else:
if(ratingmax == None):
ratingmax = 3800
if(ratingmin == None):
ratingmin = 0
problems = Problems.objects.filter(rating__lte=ratingmax,rating__gte=ratingmin,index__isnull=False)
if(index)and(index!="None"):
problems = problems.filter(index__startswith=index)
if not ((taglist == None) or (taglist[0] == '')):
for b in taglist:
problems =problems.filter(tags__contains=str(b.strip()))
prbs = problems
final = []
color = []
types = []
for x in problems:
if x.name in solved:
color.append("#a8eabe")
types.append("solved")
elif x.name in unsolved:
color.append('#ff9292')
types.append("wrong")
else:
color.append("white")
types.append("unsolved")
final.append(x)
prbs = zip(final,color,types)
return render(request,"problems.html",context={"problems":prbs,'filterform':filterform,'user':user,'show_tags':show_tags})
def friendsunsolved(request):
'''
@type: renderfunction ;
@return: renders friends page ;
@description:
calls userinfo for the viewer, also 3 api calls to codeforces for user and friends status.
It gives color to submissions which are solved/unsolved by user accordingly ;
@errorhandling:
Silent error handling. Supports anonymous views.
No messages given;
'''
try:
handle = request.GET['handle']
except:
handle =""
if handle == "":
messages.error(request,"Please enter your handle to continue")
return redirect("/cfviewer/")
user = userinfo(request,handle)
try:
x = user['firstName']
except:
x = user['handle']
if x == "Guest":
messages.error(request,"You need to login in before comparing")
return redirect("/cfviewer/")
friend = request.GET['friend']
friendinf = requests.get('https://codeforces.com/api/user.info?handles={}'.format(friend)).json()
if(friendinf['status'] == "OK"):
user,m,mtag,_,prbcnt,ranklist,_,_ = getcharts(request,handle)
fuser,fm,fmtag,_,fprbcnt,franklist,_,_ = getcharts(request,friend)
Probcat = {}
for x,y in m.items():
Probcat[x] = [y]
for x,y in fm.items():
Probcat[x].append(y)
ProbTags = {}
for x in mtag:
ProbTags[x] = [0,0]
for x in fmtag:
ProbTags[x] = [0,0]
for x,y in mtag.items():
ProbTags[x][0] = y
for x,y in fmtag.items():
ProbTags[x][1] = y
try:
user['minRating'] = min(ranklist)
fuser['minRating'] = min(franklist)
except:
pass
# ranklist = {}
days = {}
fdays = {}
contextdict = {
"user":user,
"fuser":fuser,
"Probcat":Probcat,
"ProbTags":ProbTags,
"ranklist":ranklist,
"probsolved":prbcnt,
"fprobsolved":fprbcnt,
"contcount":len(ranklist),
"fcontcount":len(franklist)
}
# comp = zip(s1,s2)
mysubs = requests.get('https://codeforces.com/api/user.status?handle={}'.format(handle)).json()['result']
friendssubs = requests.get('https://codeforces.com/api/user.status?handle={}'.format(friend)).json()['result']
# Creating a dictionary with friends unsolved problems
friendsunsolved = {}
for x in friendssubs:
if(x['verdict'] != "OK"):
friendsunsolved[x['problem']['name']] = x
for x in friendssubs:
if(x['verdict'] == "OK"):
if(x['problem']['name'] in friendsunsolved):
friendsunsolved.pop(x['problem']['name'])
# Creating my solutions dictionary to give color
mysols = {}
for x in mysubs:
if(x['verdict']!="OK"):
mysols[x['problem']['name']] = x
for x in mysubs:
if(x['verdict']=="OK"):
mysols[x['problem']['name']] = x
unsolved = 0
for x in mysols:
if(mysols[x]['verdict'] != "OK"):
unsolved += 1
substogo = []
color = []
types = []
for x,y in friendsunsolved.items():
if x in mysols:
if mysols[x]['verdict'] == "OK":
color.append("#a8eabe")
types.append("solved")
else:
color.append("#ff9292")
types.append("wrong")
else:
color.append("white")
types.append("unsolved")
substogo.append(y)
substogo = zip(substogo,color,types)
contextdict["user"]=user
contextdict["subs"]=substogo
contextdict["friend"]=friend
contextdict["unsolved"] = unsolved
contextdict["funsolved"] = len(friendsunsolved)
return render(request,"friends.html",context=contextdict)
else:
messages.error(request,"{} Handle not found".format(friend))
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
def suggestor(request,handle,slug):
'''
@type: renderfunction ;
@return: renders suggest pages for problems and contests ;
@description:
This page takes in an additional param slug, which decides
problems or contests type of view. User info is taken and api calls are made
to codeforces . returned objects are based on user ratings and submissions history.;
@errorhandling:
If slug is not 'problem' or 'contest' then redirects to home page
with error message. Else silent error handling. Supports anonymous views;
'''
status,context = suggestor_helper(request,handle,slug)
if(status == "success"):
if(slug == "problem"):
# print(context['prbs'])
return render(request,"suggestprobs.html",context=context)
elif(slug == "contest"):
return render(request,"suggestconts.html",context=context)
messages.error(request,context['msg'])
return redirect("/cfviewer/")
def submissionsviewer(request,handle,contid):
'''
@type: renderfunction ;
@return: renders submissions page for contests ;
@description:
Takes in handle and contestid, and provides color coded
submissions for viewing along with links;
@errorhandling:
If codeforces unavailable or status not ok, provides error message
and redirects to http_referer;
'''
user = userinfo(request,handle)
try:
subs = requests.get('https://codeforces.com/api/user.status?handle={}'.format(handle)).json()
except:
messages.error(request,"Codeforces unavailable. Please try again later")
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
if subs['status'] != "OK":
messages.error(request,"Handle not found. Please provide a proper handle")
return redirect("/cfviewer/")
nids = []
for x in subs['result']:
try:
if str(x['contestId']) == contid:
nids.append(x)
except:
pass
return render(request,"submissions.html",context={"handle":handle,"contest":contid,'subs':nids,"user":user}) |
import os
import sys
import argparse
import random
import numpy as np
import sklearn.preprocessing
import sklearn.svm
import sklearn.model_selection
from sklearn.cross_decomposition import CCA
from keras.callbacks import LearningRateScheduler
from keras.wrappers.scikit_learn import KerasClassifier
import tocca
def train_test_model( X_train, y_train, X_val, y_val, X_test, y_test, model_type, layers, layer_size, l2dist_weight, weight_decay, momentum, learning_rate, batch_size, epochs, sd_weight=0, zca_r=1e-4 ):
classes = np.unique(y_train)
np.sort(classes)
nclasses = len(classes)
# format labels
out_train = sklearn.preprocessing.label_binarize( y_train, classes )
out_train = [ out_train, out_train, np.ones((X_train[0].shape[0],1)) ]
out_val = sklearn.preprocessing.label_binarize( y_val, classes )
out_val = [ out_val, out_val, np.ones((X_val[0].shape[0],1)) ]
out_test = sklearn.preprocessing.label_binarize( y_test, classes )
out_test = [ out_test, out_test, np.ones((X_test[0].shape[0],1)) ]
# train model
input_dims = [ len(Xt[0]) for Xt in X_train ]
model = tocca.create_model( model_type, nclasses, input_dims, layers, layer_size, shared_size, learning_rate, l2dist_weight, momentum, weight_decay, sd_weight, zca_r )
model.fit( X_train, out_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_data=(X_val,out_val), shuffle=True )
p_train = model.predict( X_train )
shared_train = [ p_train[2][:,:shared_size], p_train[2][:,shared_size:] ]
# cross-modal classification
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
svm = sklearn.svm.LinearSVC()
params = { 'C':[float(2**e) for e in range(-10,5)] }
grid_search = sklearn.model_selection.GridSearchCV( estimator=svm, param_grid=params, scoring='accuracy', cv=5, refit=True, verbose=1, n_jobs=4 )
grid_search.fit( shared_train[0], y_train )
svm = grid_search.best_estimator_
# training accuracy
p_train = svm.predict( shared_train[1] )
acc_train = ( y_train == p_train ).mean()
# validation accuracy
p_val = model.predict( X_val )
shared_val = [ p_val[2][:,:shared_size], p_val[2][:,shared_size:] ]
p_val = svm.predict( shared_val[1] )
acc_val = ( y_val == p_val ).mean()
# test accuracy
p_test = model.predict( X_test )
shared_test = [ p_test[2][:,:shared_size], p_test[2][:,shared_size:] ]
p_test = svm.predict( shared_test[1] )
acc_test = ( y_test == p_test ).mean()
return acc_train,acc_val,acc_test
if __name__ == "__main__":
parser = argparse.ArgumentParser( description='Compute CNN features.' )
parser.add_argument('--dataset', '-d', help='data set name', default='MNISTsplit' )
parser.add_argument('--training_size', help='training set size' )
parser.add_argument('--model_type', '-m', required=True, help='model type (w, sd, nd)' )
parser.add_argument('--param_search', '-p', help='random search for best parameters' )
parser.add_argument('--cv', help='cross-validation folds' )
parser.add_argument('--layers', '-l', help='number of layers' )
parser.add_argument('--layer_size', help='layer size' )
parser.add_argument('--shared_size', '-s', help='number of shared features', default=50 )
parser.add_argument('--l2dist', help='l2 distance weight' )
parser.add_argument('--momentum', help='momentum' )
parser.add_argument('--l2', help='l2 weight decay' )
parser.add_argument('--sd', help='soft decorrelation weight' )
parser.add_argument('--zca', help='zca regularization' )
parser.add_argument('--learning_rate', '-r', help='learning rate' )
parser.add_argument('--batch_size', '-b', help='batch size' )
parser.add_argument('--epochs', '-e', help='epochs', default=100 )
parser.add_argument('--semi', help='semi supervised: % missing labels' )
parser.add_argument('--out_file', '-o', help='output file' )
args = parser.parse_args()
dataset = args.dataset
training_size = args.training_size
model_type = args.model_type
param_search = args.param_search
cv = args.cv
layers = args.layers
layer_size = args.layer_size
shared_size = int(args.shared_size)
l2dist_weight = args.l2dist
momentum = args.momentum
weight_decay = args.l2
sd_weight = args.sd
zca_r = args.zca
learning_rate = args.learning_rate
batch_size = args.batch_size
epochs = int(args.epochs)
semi = args.semi
out_file = args.out_file
if dataset == 'MNISTsplit':
# load data
from keras.datasets import mnist
(X_train,y_train),(X_test,y_test) = mnist.load_data()
X_train = X_train.astype('float')
X_test = X_test.astype('float')
data_train = [ X_train[i,:,:].squeeze() for i in range(X_train.shape[0]) ]
data_test = [ X_test[i,:,:].squeeze() for i in range(X_test.shape[0]) ]
# create left and right modalities by splitting image in half
X_train = [ [ dt[:,:14].flatten() for dt in data_train ], [ dt[:,14:].flatten() for dt in data_train ] ]
X_test = [ [ dt[:,:14].flatten() for dt in data_test ], [ dt[:,14:].flatten() for dt in data_test ] ]
X_train = [ np.array(X) for X in X_train ]
X_test = [ np.array(X) for X in X_test ]
else:
print('Unsupported data set: '+dataset)
sys.exit(1)
if cv is None:
cv = 5
else:
cv = int(cv)
if training_size is None:
training_size = (len(y_train)//cv)*(cv-1)
else:
training_size = int(training_size)
if param_search is not None:
param_search = int(param_search)
layers = [1,2,3,4] if layers is None else [int(layers)]
layer_size = [200] if layer_size is None else [int(layer_size)]
l2dist_weight = [1e-3,1e-2,1e-1,1e0,1e1,1e2,1e3] if l2dist_weight is None else [float(l2dist_weight)]
weight_decay = [1e-5,1e-4,1e-3,1e-2,1e-1,0] if weight_decay is None else [float(weight_decay)]
momentum = [0.99,0.95,0.9] if momentum is None else [float(momentum)]
learning_rate = [1e-2,1e-3,1e-4] if learning_rate is None else [float(learning_rate)]
batch_size = [1000,100] if batch_size is None else [int(batch_size)]
params = { 'layers':layers, 'layer_size':layer_size, 'l2dist_weight':l2dist_weight, 'weight_decay':weight_decay, 'momentum':momentum, 'learning_rate':learning_rate, 'batch_size':batch_size }
if model_type == 'sd':
params.update( { 'sd_weight':[1e-5,1e-4,1e-3,1e-2,1e-1] if sd_weight is None else [float(sd_weight)] } )
elif model_type == 'w':
params.update( { 'zca_r':[1e-4,1e-3,1e-2] if zca_r is None else [float(zca_r)] } )
else:
param_search = 1
layers = 2 if layers is None else [int(layers)]
layer_size = [200] if layer_size is None else [int(layer_size)]
l2dist_weight = [1.0] if l2dist_weight is None else [float(l2dist_weight)]
weight_deca = [1e-4] if weight_decay is None else [float(weight_decay)]
momentum = [0.99] if momentum is None else [float(momentum)]
learning_rate = [1e-3] if learning_rate is None else [float(learning_rate)]
batch_size = [1000] if batch_size is None else [int(batch_size)]
params = { 'layers':layers, 'layer_size':layer_size, 'l2dist_weight':l2dist_weight, 'weight_decay':weight_decay, 'momentum':momentum, 'learning_rate':learning_rate, 'batch_size':batch_size }
if model_type == 'sd':
params.update( { 'sd_weight':[0] if sd_weight is None else [float(sd_weight)] } )
elif model_type == 'w':
params.update( { 'zca_r':[1e-4] if zca_r is None else [float(zca_r)] } )
if semi is not None:
# TODO: semi-supervised: randomly retain only X% of labels on training set
semi = float(semi)
pass
param_sampler = sklearn.model_selection.ParameterSampler( params, param_search )
X_train_all = X_train[:]
y_train_all = y_train[:]
X_test_all = X_test[:]
for p in param_sampler:
print(p)
acc_train = []
acc_val = []
acc_test = []
for f in range(cv):
print('Fold %d of %d'%(f+1,cv))
# create train/val split
idx = np.arange(len(y_train_all))
random.shuffle(idx)
X_train = [ X[idx[:training_size],:] for X in X_train_all ]
y_train = y_train_all[idx[:training_size]]
X_val = [ X[idx[training_size:],:] for X in X_train_all ]
y_val = y_train_all[idx[training_size:]]
X_test = X_test_all[:]
# normalize
mean = [ X.mean(axis=0) for X in X_train ]
std = [ X.std(axis=0)+1e-6 for X in X_train ]
X_train = [ (X-m)/s for X,m,s in zip(X_train,mean,std) ]
X_val = [ (X-m)/s for X,m,s in zip(X_val,mean,std) ]
X_test = [ (X-m)/s for X,m,s in zip(X_test,mean,std) ]
a_train,a_val,a_test = train_test_model( X_train, y_train, X_val, y_val, X_test, y_test, model_type=model_type, epochs=epochs, **p )
acc_train.append( a_train )
acc_val.append( a_val )
acc_test.append( a_test )
if cv > 1:
print('train %f val %f test %f'%(a_train,a_val,a_test))
acc_train = np.array(acc_train)
acc_val = np.array(acc_val)
acc_test = np.array(acc_test)
if cv > 1:
print('train %f (%f) val %f (%f) test %f (%f)'%(acc_train.mean(),acc_train.std(),acc_val.mean(),acc_val.std(),acc_test.mean(),acc_test.std()))
else:
print('train %f val %f test %f'%(acc_train,acc_val,acc_test))
if out_file is not None:
fd = open( out_file, 'a' )
if cv > 1:
print('train %f (%f) val %f (%f) test %f (%f)'%(acc_train.mean(),acc_train.std(),acc_val.mean(),acc_val.std(),acc_test.mean(),acc_test.std()),file=fd)
else:
print('train %f val %f test %f'%(acc_train,acc_val,acc_test),file=fd)
fd.close()
|
<gh_stars>1-10
#!/usr/bin/env python3
"""
This is a python script to aggregate detected bibs from individual person crops
into one image.
Usage:
python person_aggregate.py /path/to/input/files \
/path/to/output \
/path/to/person/crops \
/path/to/bib/crops
Author: <NAME>
Date: 23 Aug 2017
"""
import os
import sys
from glob import glob
import cv2
import re
import json
import numpy as np
# Keep unions only if they are 75% of the area of either r1 or r2
KEEP_UNION_THRESHOLD = 0.75
def union(r1, r2):
"""Calculates the union of two regions.
Args:
r1, r2 (dict): A dictionary containing {x1, y1, x2, y2} arguments.
Returns:
dict: A dictionary in the same fashion.
"""
x1 = min(r1["x1"], r2["x1"])
y1 = min(r1["y1"], r2["y1"])
x2 = max(r1["x2"], r2["x2"])
y2 = max(r1["y2"], r2["y2"])
return {
"x1": x1,
"y1": y1,
"x2": x2,
"y2": y2,
"accuracy": max(r1["accuracy"], r2["accuracy"])
}
def crop_region(image, region):
"""Crops a singular region in an image
Args:
image (image): A numpy image
region (dict): A dictionary containing x1, y1, x2, y2
Returns:
image: The cropped image
"""
return image[ region["y1"]:region["y2"], region["x1"]:region["x2"] ]
def area(region):
"""Returns the area of the specified region.
Args:
region (dict): A dictionary containing {x1, y1, x2, y2} arguments.
Returns:
float: The area of the region.
"""
w = region["x2"] - region["x1"]
h = region["y2"] - region["y1"]
return w * h
def intersection(r1, r2):
"""Calculates the intersection rectangle of two regions.
Args:
r1, r2 (dict): A dictionary containing {x1, y1, x2, y2} arguments.
Returns:
dict or None: A dictionary in the same fashion of just the
intersection or None if the regions do not intersect.
"""
x1 = max(r1["x1"], r2["x1"])
y1 = max(r1["y1"], r2["y1"])
x2 = min(r1["x2"], r2["x2"])
y2 = min(r1["y2"], r2["y2"])
if y1 < y2 and x1 < x2:
return {
"x1": x1,
"y1": y1,
"x2": x2,
"y2": y2,
"accuracy": max(r1["accuracy"], r2["accuracy"])
}
else:
return None
def do_regions_intersect(r1, r2):
"""Calculates whether or not the two regions intersect eachother.
Args:
r1, r2 (dict): A dictionary containing {x1, y1, x2, y2} arguments.
Returns:
boolean: True if the regions intersect, false otherwise.
"""
return intersection(r1, r2) is not None
def read_json(json_filename):
"""Reads the JSON file as a dictionary.
Args:
json_filename (string): The JSON file to read.
Returns:
dict: The JSON data, parsed as a dictionary.
"""
with open(json_filename, 'r') as json_fp:
json_data = json.load(json_fp)
return json_data
def extract_bib_regions(image_filename, bib_json_dir, person_json_dir):
"""Extracts valid bibs from the image.
By `valid', we mean those regions which do not overlap. We will calculate
the regions where there are possible overlaps in two people's detections.
We also adjust these coordinates from the cropped images to the original
images.
Args:
image_filename (string): Path to ORIGINAL image filename.
bib_json_dir (string): Path of bib JSON files.
person_json_dir (string): Path of person crop JSON files.
Returns:
dict: A mapped dictionary of the same format as Bib JSON but aggregated.
"""
# Strip the image id from the original filename
image_id = os.path.splitext(os.path.basename(image_filename))[0]
# Read in the image
image = cv2.imread(image_filename)
# If a person JSON directory is provided, then we need to combine all
# detected bibs for each respective person
person_regions = read_json("%s/%s.json" % (person_json_dir, image_id))["person"]["regions"]
# Now I have all of my person_regions, I cna find the respective bib regions
# for every single person
for i, person_region in enumerate(person_regions):
# These are the person's coordinates in the ORIGINAL image
px1, py1 = (person_region["x1"], person_region["y1"])
bib_filename = "%s/%s_crop_person_%i.json" % (bib_json_dir, image_id, i)
if not os.path.exists(bib_filename):
print("No such crop at '%s'. Skipping..." % bib_filename)
person_region["bib_regions"] = []
person_region["bib_elapsed_seconds"] = float(0)
continue
json = read_json(bib_filename)
person_region["bib_regions"] = json["bib"]["regions"]
person_region["bib_elapsed_seconds"] = json["bib"]["elapsed_seconds"]
# Now we must mutate each of these bib regions to be reflective
# of the ORIGINAL image's dimension sizes
for bib_region in person_region["bib_regions"]:
bib_region["x1"] += px1
bib_region["y1"] += py1
bib_region["x2"] += px1
bib_region["y2"] += py1
# Now strip out all bib regions in the entire photo for every runner
bib_regions = [pr["bib_regions"] for pr in person_regions
if len(pr["bib_regions"]) > 0]
if len(bib_regions) > 0:
# Concatenate all bib regions (if any) in a single numpy array
bib_regions = np.hstack(bib_regions)
sum_of_time = float(np.sum([pr["bib_elapsed_seconds"] for pr in person_regions]))
# Go through every bib region we have, and see if any bibs overlap.
# If they do, then use the union of both.
# Go through every bib region we have, and see if any bibs overlap.
# If they do, then use the union of both.
bib_regions_to_remove = []
bib_regions_to_add = []
for r1 in bib_regions:
for r2 in bib_regions:
if r1 == r2:
continue
if do_regions_intersect(r1, r2):
ir = intersection(r1, r2)
r1a = area(r1)
r2a = area(r2)
ira = area(ir)
# Only if intersection is greater than KEEP_UNION_THRESHOLD
# If not include this, then too small
if ira > KEEP_UNION_THRESHOLD * r1a or ira > KEEP_UNION_THRESHOLD * r2a:
bib_regions_to_remove.append(r1)
bib_regions_to_remove.append(r2)
bib_regions_to_add.append(union(r1, r2))
bib_regions = [r for r in bib_regions if r not in bib_regions_to_remove] + bib_regions_to_add
# Ensure unique only!!
bib_regions = [dict(y) for y in set(tuple(x.items()) for x in bib_regions)]
return {
"bib": { "regions": bib_regions, "elapsed_seconds": sum_of_time }
}
def crop_bib_regions_from_image(image, bib_regions):
"""Crops the specified bib regions from the given image.
Args:
image (string): Path to ORIGINAL image filename.
bib_regions (dict): The bib regions to crop.
Returns:
numpy3d: Numpy 3D array of cropped images
"""
return [crop_region(image, bib_region) for bib_region in bib_regions]
def aggregate(image_filename, image_id, bib_json_dir, person_json_dir):
"""Aggrates person and bib crops.
Args:
image_filename (string): Path to ORIGINAL image filename.
image_id (string): The identifier of the original image.
bib_json_dir (string): Path of bib JSON files.
person_json_dir (string): Path of person crop JSON files.
"""
person_regions = read_json("%s/%s.json" % (person_json_dir, image_id))
bib_regions = extract_bib_regions(image_filename, bib_json_dir, person_json_dir)
return {
"person": person_regions["person"],
"bib": bib_regions["bib"]
}
def main():
assert len(sys.argv) - 1 >= 4, "Must provide four arguments (in_dir, out_dir, bib_crops_dir, people_dir)"
in_dir = sys.argv[1]
assert in_dir != None, "Missing input directory (argv[1])"
out_dir = sys.argv[2]
assert out_dir != None, "Missing output directory (argv[2])"
bib_dir = sys.argv[3]
assert bib_dir != None, "Missing bib crops directory (argv[3])"
ppl_dir = sys.argv[4]
assert ppl_dir != None, "Missing people crops directory (argv[4])"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for file in glob("%s/*.jpg" % in_dir):
image_id = os.path.splitext(os.path.basename(file))[0]
data = aggregate(file, image_id, bib_dir, ppl_dir)
out_file = ("%s/%s.json" % (out_dir, image_id))
print("Writing aggregated JSON '%s' to '%s'" % (image_id, out_file))
with open(out_file, 'w') as f:
json.dump(data, f)
image = cv2.imread(file)
for i, region in enumerate(data["bib"]["regions"]):
crop = crop_region(image, region)
crop_file = "%s/%s_crop_bib_%i.jpg" % (out_dir, image_id, i)
cv2.imwrite(crop_file, crop)
if __name__ == '__main__':
main()
|
<filename>External/astrometry.net/astrometry/python/pyfits/NA_pyfits.py<gh_stars>1-10
#!/usr/bin/env python
# $Id: NA_pyfits.py 329 2007-07-06 13:11:54Z jtaylor2 $
"""
A module for reading and writing FITS files and manipulating their contents.
A module for reading and writing Flexible Image Transport System
(FITS) files. This file format was endorsed by the International
Astronomical Union in 1999 and mandated by NASA as the standard format
for storing high energy astrophysics data. For details of the FITS
standard, see the NASA/Science Office of Standards and Technology
publication, NOST 100-2.0.
License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE
For detailed examples of usage, see the I{PyFITS User's Manual} available from
U{http://www.stsci.edu/resources/software_hardware/pyfits/Users_Manual1.pdf}
Epydoc markup used for all docstrings in this module.
@group Header-related Classes: Card, CardList, _Card_with_continue,
Header, _Hierarch
@group HDU Classes: _AllHDU, BinTableHDU, _CorruptedHDU, _ExtensionHDU,
GroupsHDU, ImageHDU, _ImageBaseHDU, PrimaryHDU, TableHDU,
_TableBaseHDU, _TempHDU, _ValidHDU
@group Table-related Classes: ColDefs, Column, FITS_rec, _FormatP,
_FormatX, _VLF
"""
"""
Do you mean: "Profits"?
- Google Search, when asked for "PyFITS"
"""
import re, os, tempfile, exceptions
import operator
import __builtin__
import urllib
import tempfile
import gzip
import zipfile
import numarray as num
import numarray.generic as ndarray
import numarray.strings as chararray
import numarray.records as rec
import numarray.objects as objects
import numarray.memmap as Memmap
from string import maketrans
import copy
import signal
import threading
# Module variables
_blockLen = 2880 # the FITS block size
_python_mode = {'readonly':'rb', 'copyonwrite':'rb', 'update':'rb+', 'append':'ab+'} # open modes
_memmap_mode = {'readonly':'r', 'copyonwrite':'c', 'update':'r+'}
TRUE = True # deprecated
FALSE = False # deprecated
_INDENT = " "
DELAYED = "delayed" # used for lazy instantiation of data
ASCIITNULL = 0 # value for ASCII table cell with value = TNULL
# this can be reset by user.
_isInt = "isinstance(val, (int, long))"
# Functions
def _padLength(stringLen):
"""Bytes needed to pad the input stringLen to the next FITS block."""
return (_blockLen - stringLen%_blockLen) % _blockLen
def _tmpName(input):
"""Create a temporary file name which should not already exist.
Use the directory of the input file and the base name of the mktemp()
output.
"""
dirName = os.path.dirname(input)
if dirName != '':
dirName += '/'
_name = dirName + os.path.basename(tempfile.mktemp())
if not os.path.exists(_name):
return _name
else:
raise _name, "exists"
class VerifyError(exceptions.Exception):
"""Verify exception class."""
pass
class _ErrList(list):
"""Verification errors list class. It has a nested list structure
constructed by error messages generated by verifications at different
class levels.
"""
def __init__(self, val, unit="Element"):
list.__init__(self, val)
self.unit = unit
def __str__(self, tab=0):
"""Print out nested structure with corresponding indentations.
A tricky use of __str__, since normally __str__ has only one
argument.
"""
result = ""
element = 0
# go through the list twice, first time print out all top level messages
for item in self:
if not isinstance(item, _ErrList):
result += _INDENT*tab+"%s\n" % item
# second time go through the next level items, each of the next level
# must present, even it has nothing.
for item in self:
if isinstance(item, _ErrList):
_dummy = item.__str__(tab=tab+1)
# print out a message only if there is something
if _dummy.strip():
if self.unit:
result += _INDENT*tab+"%s %s:\n" % (self.unit, element)
result += _dummy
element += 1
return result
class _Verify:
"""Shared methods for verification."""
def run_option(self, option="warn", err_text="", fix_text="Fixed.", fix = "pass", fixable=1):
"""Execute the verification with selected option."""
_text = err_text
if not fixable:
option = 'unfixable'
if option in ['warn', 'exception']:
#raise VerifyError, _text
#elif option == 'warn':
pass
# fix the value
elif option == 'unfixable':
_text = "Unfixable error: %s" % _text
else:
exec(fix)
#if option != 'silentfix':
_text += ' ' + fix_text
return _text
def verify (self, option='warn'):
"""Wrapper for _verify."""
_option = option.lower()
if _option not in ['fix', 'silentfix', 'ignore', 'warn', 'exception']:
raise ValueError, 'Option %s not recognized.' % option
if (_option == "ignore"):
return
x = str(self._verify(_option)).rstrip()
if _option in ['fix', 'silentfix'] and x.find('Unfixable') != -1:
raise VerifyError, '\n'+x
if (_option != "silentfix") and x:
print 'Output verification result:'
print x
if _option == 'exception' and x:
raise VerifyError
def _pad(input):
"""Pad balnk space to the input string to be multiple of 80."""
_len = len(input)
if _len == Card.length:
return input
elif _len > Card.length:
strlen = _len % Card.length
if strlen == 0:
return input
else:
return input + ' ' * (Card.length-strlen)
# minimum length is 80
else:
strlen = _len % Card.length
return input + ' ' * (Card.length-strlen)
def _floatFormat(value):
"""Format the floating number to make sure it gets the decimal point."""
valueStr = "%.16G" % value
if "." not in valueStr and "E" not in valueStr:
valueStr += ".0"
return valueStr
class Undefined:
"""Undefined value."""
pass
class Delayed:
"""Delayed file-reading data."""
def __init__(self, hdu=None, field=None):
self.hdu = hdu
self.field = field
# translation table for floating value string
_fix_table = maketrans('de', 'DE')
_fix_table2 = maketrans('dD', 'eE')
class Card(_Verify):
# string length of a card
length = 80
# String for a FITS standard compliant (FSC) keyword.
_keywd_FSC = r'[A-Z0-9_-]* *$'
_keywd_FSC_RE = re.compile(_keywd_FSC)
# A number sub-string, either an integer or a float in fixed or
# scientific notation. One for FSC and one for non-FSC (NFSC) format:
# NFSC allows lower case of DE for exponent, allows space between sign,
# digits, exponent sign, and exponents
_digits_FSC = r'(\.\d+|\d+(\.\d*)?)([DE][+-]?\d+)?'
_digits_NFSC = r'(\.\d+|\d+(\.\d*)?) *([deDE] *[+-]? *\d+)?'
_numr_FSC = r'[+-]?' + _digits_FSC
_numr_NFSC = r'[+-]? *' + _digits_NFSC
# This regex helps delete leading zeros from numbers, otherwise
# Python might evaluate them as octal values.
_number_FSC_RE = re.compile(r'(?P<sign>[+-])?0*(?P<digt>' + _digits_FSC+')')
_number_NFSC_RE = re.compile(r'(?P<sign>[+-])? *0*(?P<digt>' + _digits_NFSC + ')')
# FSC commentary card string which must contain printable ASCII characters.
_ASCII_text = r'[ -~]*$'
_comment_FSC_RE = re.compile(_ASCII_text)
# Checks for a valid value/comment string. It returns a match object
# for a valid value/comment string.
# The valu group will return a match if a FITS string, boolean,
# number, or complex value is found, otherwise it will return
# None, meaning the keyword is undefined. The comment field will
# return a match if the comment separator is found, though the
# comment maybe an empty string.
_value_FSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
# The <strg> regex is not correct for all cases, but
# it comes pretty darn close. It appears to find the
# end of a string rather well, but will accept
# strings with an odd number of single quotes,
# instead of issuing an error. The FITS standard
# appears vague on this issue and only states that a
# string should not end with two single quotes,
# whereas it should not end with an even number of
# quotes to be precise.
#
# Note that a non-greedy match is done for a string,
# since a greedy match will find a single-quote after
# the comment separator resulting in an incorrect
# match.
r'\'(?P<strg>([ -~]+?|\'\'|)) *?\'(?=$|/| )|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_FSC + ')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_FSC + ') *, *(?P<imag>' + _numr_FSC + ') *\))'
r')? *)'
r'(?P<comm_field>'
r'(?P<sepr>/ *)'
r'(?P<comm>[!-~][ -~]*)?'
r')?$')
_value_NFSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
r'\'(?P<strg>([ -~]+?|\'\'|)) *?\'(?=$|/| )|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_NFSC + ')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_NFSC + ') *, *(?P<imag>' + _numr_NFSC + ') *\))'
r')? *)'
r'(?P<comm_field>'
r'(?P<sepr>/ *)'
r'(?P<comm>.*)'
r')?$')
# keys of commentary cards
_commentaryKeys = ['', 'COMMENT', 'HISTORY']
def __init__(self, key='', value='', comment=''):
"""Construct a card from key, value, and (optionally) comment.
Any specifed arguments, except defaults, must be compliant to
FITS standard.
key: keyword name, default=''.
value: keyword value, default=''.
comment: comment, default=''.
"""
if key != '' or value != '' or comment != '':
self._setkey(key)
self._setvalue(value)
self._setcomment(comment)
# for commentary cards, value can only be strings and there
# is no comment
if self.key in Card._commentaryKeys:
if not isinstance(self.value, str):
raise ValueError, 'Value in a commentary card must be a string'
else:
self.__dict__['_cardimage'] = ' '*80
def __repr__(self):
return self._cardimage
def __getattr__(self, name):
""" instanciate specified attribute object."""
if name == '_cardimage':
self.ascardimage()
elif name == 'key':
self._extractKey()
elif name in ['value', 'comment']:
self._extractValueComment(name)
else:
raise AttributeError, name
return getattr(self, name)
def _setkey(self, val):
"""Set the key attribute, surrogate for the __setattr__ key case."""
if isinstance(val, str):
val = val.strip()
if len(val) <= 8:
val = val.upper()
if val == 'END':
raise ValueError, "keyword 'END' not allowed"
self._checkKey(val)
else:
if val[:8].upper() == 'HIERARCH':
val = val[8:].strip()
self.__class__ = _Hierarch
else:
raise ValueError, 'keyword name %s is too long (> 8), use HIERARCH.' % val
else:
raise ValueError, 'keyword name %s is not a string' % val
self.__dict__['key'] = val
def _setvalue(self, val):
"""Set the value attribute."""
if isinstance(val, (str, int, long, float, complex, bool, Undefined)):
if isinstance(val, str):
self._checkText(val)
self.__dict__['_valueModified'] = 1
else:
raise ValueError, 'Illegal value %s' % str(val)
self.__dict__['value'] = val
def _setcomment(self, val):
"""Set the comment attribute."""
if isinstance(val,str):
self._checkText(val)
else:
if val is not None:
raise ValueError, 'comment %s is not a string' % val
self.__dict__['comment'] = val
def __setattr__(self, name, val):
if name == 'key':
raise SyntaxError, 'keyword name cannot be reset.'
elif name == 'value':
self._setvalue(val)
elif name == 'comment':
self._setcomment(val)
else:
raise AttributeError, name
# When an attribute (value or comment) is changed, will reconstructe
# the card image.
self._ascardimage()
def ascardimage(self, option='silentfix'):
"""Generate a (new) card image from the attributes: key, value,
and comment, or from raw string.
option: verification option, default=silentfix.
"""
# Only if the card image already exist (to avoid infinite loop),
# fix it first.
if self.__dict__.has_key('_cardimage'):
self._check(option)
self._ascardimage()
return self.__dict__['_cardimage']
def _ascardimage(self):
"""Generate a (new) card image from the attributes: key, value,
and comment. Core code for ascardimage.
"""
# keyword string
if self.__dict__.has_key('key') or self.__dict__.has_key('_cardimage'):
if isinstance(self, _Hierarch):
keyStr = 'HIERARCH %s ' % self.key
else:
keyStr = '%-8s' % self.key
else:
keyStr = ' '*8
# value string
# check if both value and _cardimage attributes are missing,
# to avoid infinite loops
if not (self.__dict__.has_key('value') or self.__dict__.has_key('_cardimage')):
valStr = ''
# string value should occupies at least 8 columns, unless it is
# a null string
elif isinstance(self.value, str):
if self.value == '':
valStr = "''"
else:
_expValStr = self.value.replace("'","''")
valStr = "'%-8s'" % _expValStr
valStr = '%-20s' % valStr
# must be before int checking since bool is also int
elif isinstance(self.value , bool):
valStr = '%20s' % `self.value`[0]
elif isinstance(self.value , (int, long)):
valStr = '%20d' % self.value
# XXX need to consider platform dependence of the format (e.g. E-009 vs. E-09)
elif isinstance(self.value, float):
if self._valueModified:
valStr = '%20s' % _floatFormat(self.value)
else:
valStr = '%20s' % self._valuestring
elif isinstance(self.value, complex):
if self._valueModified:
_tmp = '(' + _floatFormat(self.value.real) + ', ' + _floatFormat(self.value.imag) + ')'
valStr = '%20s' % _tmp
else:
valStr = '%20s' % self._valuestring
elif isinstance(self.value, Undefined):
valStr = ''
# conserve space for HIERARCH cards
if isinstance(self, _Hierarch):
valStr = valStr.strip()
# comment string
if keyStr.strip() in Card._commentaryKeys: # do NOT use self.key
commentStr = ''
elif self.__dict__.has_key('comment') or self.__dict__.has_key('_cardimage'):
if self.comment in [None, '']:
commentStr = ''
else:
commentStr = ' / ' + self.comment
else:
commentStr = ''
# equal sign string
eqStr = '= '
if keyStr.strip() in Card._commentaryKeys: # not using self.key
eqStr = ''
if self.__dict__.has_key('value'):
valStr = str(self.value)
# put all parts together
output = keyStr + eqStr + valStr + commentStr
# need this in case card-with-continue's value is shortened
if not isinstance(self, _Hierarch):
self.__class__ = Card
else:
# does not support CONTINUE for HIERARCH
if len(keyStr + eqStr + valStr) > Card.length:
raise ValueError, "The keyword %s with its value is too long." % self.key
if len(output) <= Card.length:
output = "%-80s" % output
# longstring case (CONTINUE card)
else:
# try not to use CONTINUE if the string value can fit in one line.
# Instead, just truncate the comment
if isinstance(self.value, str) and len(valStr) > (Card.length-10):
self.__class__ = _Card_with_continue
output = self._breakup_strings()
else:
print 'card is too long, comment is truncated.'
output = output[:Card.length]
self.__dict__['_cardimage'] = output
def _checkText(self, val):
"""Verify val to be printable ASCII text."""
if Card._comment_FSC_RE.match(val) is None:
self.__dict__['_err_text'] = 'Unprintable string %s' % repr(val)
self.__dict__['_fixable'] = 0
raise ValueError, self._err_text
def _checkKey(self, val):
"""Verify the keyword to be FITS standard."""
# use repr (not str) in case of control character
if Card._keywd_FSC_RE.match(val) is None:
self.__dict__['_err_text'] = 'Illegal keyword name %s' % repr(val)
self.__dict__['_fixable'] = 0
raise ValueError, self._err_text
def _extractKey(self):
"""Returns the keyword name parsed from the card image."""
head = self._getKeyString()
if isinstance(self, _Hierarch):
self.__dict__['key'] = head.strip()
else:
self.__dict__['key'] = head.strip().upper()
def _extractValueComment(self, name):
"""Exatrct the keyword value or comment from the card image."""
# for commentary cards, no need to parse further
if self.key in Card._commentaryKeys:
self.__dict__['value'] = self._cardimage[8:].rstrip()
self.__dict__['comment'] = ''
return
valu = self._check(option='parse')
if name == 'value':
if valu is None:
raise ValueError, "Unparsable card, fix it first with .verify('fix')."
if valu.group('bool') != None:
_val = valu.group('bool')=='T'
elif valu.group('strg') != None:
_val = re.sub("''", "'", valu.group('strg'))
elif valu.group('numr') != None:
# Check for numbers with leading 0s.
numr = Card._number_NFSC_RE.match(valu.group('numr'))
_digt = numr.group('digt').translate(_fix_table2, ' ')
if numr.group('sign') == None:
_val = eval(_digt)
else:
_val = eval(numr.group('sign')+_digt)
elif valu.group('cplx') != None:
# Check for numbers with leading 0s.
real = Card._number_NFSC_RE.match(valu.group('real'))
_rdigt = real.group('digt').translate(_fix_table2, ' ')
if real.group('sign') == None:
_val = eval(_rdigt)
else:
_val = eval(real.group('sign')+_rdigt)
imag = Card._number_NFSC_RE.match(valu.group('imag'))
_idigt = imag.group('digt').translate(_fix_table2, ' ')
if imag.group('sign') == None:
_val += eval(_idigt)*1j
else:
_val += eval(imag.group('sign') + _idigt)*1j
else:
_val = UNDEFINED
self.__dict__['value'] = _val
if '_valuestring' not in self.__dict__:
self.__dict__['_valuestring'] = valu.group('valu')
if '_valueModified' not in self.__dict__:
self.__dict__['_valueModified'] = 0
elif name == 'comment':
self.__dict__['comment'] = ''
if valu is not None:
_comm = valu.group('comm')
if isinstance(_comm, str):
self.__dict__['comment'] = _comm.rstrip()
def _fixValue(self, input):
"""Fix the card image for fixable non-standard compliance."""
_valStr = None
# for the unparsable case
if input is None:
_tmp = self._getValueCommentString()
try:
slashLoc = _tmp.index("/")
self.__dict__['value'] = _tmp[:slashLoc].strip()
self.__dict__['comment'] = _tmp[slashLoc+1:].strip()
except:
self.__dict__['value'] = _tmp.strip()
elif input.group('numr') != None:
numr = Card._number_NFSC_RE.match(input.group('numr'))
_valStr = numr.group('digt').translate(_fix_table, ' ')
if numr.group('sign') is not None:
_valStr = numr.group('sign')+_valStr
elif input.group('cplx') != None:
real = Card._number_NFSC_RE.match(input.group('real'))
_realStr = real.group('digt').translate(_fix_table, ' ')
if real.group('sign') is not None:
_realStr = real.group('sign')+_realStr
imag = Card._number_NFSC_RE.match(input.group('imag'))
_imagStr = imag.group('digt').translate(_fix_table, ' ')
if imag.group('sign') is not None:
_imagStr = imag.group('sign') + _imagStr
_valStr = '(' + _realStr + ', ' + _imagStr + ')'
self.__dict__['_valuestring'] = _valStr
self._ascardimage()
def _locateEq(self):
"""Locate the equal sign in the card image before column 10 and
return its location. It returns None if equal sign is not present,
or it is a commentary card.
"""
# no equal sign for commentary cards (i.e. part of the string value)
_key = self._cardimage[:8].strip().upper()
if _key in Card._commentaryKeys:
eqLoc = None
else:
if _key == 'HIERARCH':
_limit = Card.length
else:
_limit = 10
try:
eqLoc = self._cardimage[:_limit].index("=")
except:
eqLoc = None
return eqLoc
def _getKeyString(self):
"""Locate the equal sign in the card image and return the string
before the equal sign. If there is no equal sign, return the
string before column 9.
"""
eqLoc = self._locateEq()
if eqLoc is None:
eqLoc = 8
_start = 0
if self._cardimage[:8].upper() == 'HIERARCH':
_start = 8
self.__class__ = _Hierarch
return self._cardimage[_start:eqLoc]
def _getValueCommentString(self):
"""Locate the equal sign in the card image and return the string
after the equal sign. If there is no equal sign, return the
string after column 8.
"""
eqLoc = self._locateEq()
if eqLoc is None:
eqLoc = 7
return self._cardimage[eqLoc+1:]
def _check(self, option='ignore'):
"""Verify the card image with the specified option. """
self.__dict__['_err_text'] = ''
self.__dict__['_fix_text'] = ''
self.__dict__['_fixable'] = 1
if option == 'ignore':
return
elif option == 'parse':
# check the value only, no need to check key and comment for 'parse'
result = Card._value_NFSC_RE.match(self._getValueCommentString())
# if not parsable (i.e. everything else) result = None
return result
else:
# verify the equal sign position
if self.key not in Card._commentaryKeys and self._cardimage.find('=') != 8:
if option in ['exception', 'warn']:
self.__dict__['_err_text'] = 'Card image is not FITS standard (equal sign not at column 8).'
raise ValueError, self._err_text, '\n%s' % self._cardimage
elif option in ['fix', 'silentfix']:
result = self._check('parse')
self._fixValue(result)
if option == 'fix':
self.__dict__['_fix_text'] = 'Fixed card to be FITS standard.: %s' % self.key
# verify the key, it is never fixable
# always fix silently the case where "=" is before column 9,
# since there is no way to communicate back to the _keylist.
self._checkKey(self.key)
# verify the value, it may be fixable
result = Card._value_FSC_RE.match(self._getValueCommentString())
if result is not None or self.key in Card._commentaryKeys:
return result
else:
if option in ['fix', 'silentfix']:
result = self._check('parse')
self._fixValue(result)
if option == 'fix':
self.__dict__['_fix_text'] = 'Fixed card to be FITS standard.: %s' % self.key
else:
self.__dict__['_err_text'] = 'Card image is not FITS standard (unparsable value string).'
raise ValueError, self._err_text + '\n%s' % self._cardimage
# verify the comment (string), it is never fixable
if result is not None:
_str = result.group('comm')
if _str is not None:
self._checkText(_str)
def fromstring(self, input):
"""Construct a Card object from a (raw) string. It will pad the
string if it is not the length of a card image (80 columns).
If the card image is longer than 80, assume it contains CONTINUE
card(s).
"""
self.__dict__['_cardimage'] = _pad(input)
if self._cardimage[:8].upper() == 'HIERARCH':
self.__class__ = _Hierarch
# for card image longer than 80, assume it contains CONTINUE card(s).
elif len(self._cardimage) > Card.length:
self.__class__ = _Card_with_continue
# remove the key/value/comment attributes, some of them may not exist
for name in ['key', 'value', 'comment', '_valueModified']:
if self.__dict__.has_key(name):
delattr(self, name)
return self
def _ncards(self):
return len(self._cardimage) / Card.length
def _verify(self, option='warn'):
"""Card class verification method."""
_err = _ErrList([])
try:
self._check(option)
except:
pass
_err.append(self.run_option(option, err_text=self._err_text, fix_text=self._fix_text, fixable=self._fixable))
return _err
class _Hierarch(Card):
"""Cards begins with HIERARCH which allows keyword name longer than 8
characters.
"""
def _verify(self, option='warn'):
"""No verification (for now)."""
return _ErrList([])
class _Card_with_continue(Card):
"""Cards having more than one 80-char "physical" cards, the cards after
the first one must start with CONTINUE and the whole card must have
string value.
"""
def __str__(self):
"""Format a list of cards into a printable string."""
kard = self._cardimage
output = ''
for i in range(len(kard)/80):
output += kard[i*80:(i+1)*80] + '\n'
return output[:-1]
def _extractValueComment(self, name):
"""Exatrct the keyword value or comment from the card image."""
longstring = ''
ncards = self._ncards()
for i in range(ncards):
# take each 80-char card as a regular card and use its methods.
_card = Card().fromstring(self._cardimage[i*80:(i+1)*80])
if i > 0 and _card.key != 'CONTINUE':
raise ValueError, 'Long card image must have CONTINUE cards after the first card.'
if not isinstance(_card.value, str):
raise ValueError, 'Cards with CONTINUE must have string value.'
if name == 'value':
_val = re.sub("''", "'", _card.value).rstrip()
# drop the ending "&"
if _val[-1] == '&':
_val = _val[:-1]
longstring = longstring + _val
elif name == 'comment':
_comm = _card.comment
if isinstance(_comm, str) and _comm != '':
longstring = longstring + _comm.rstrip() + ' '
self.__dict__[name] = longstring.rstrip()
def _breakup_strings(self):
"""Break up long string value/comment into CONTINUE cards.
This is a primitive implementation, it will put the value
string in one block and the comment string in another.
Also, it does not break at the blank space between words.
So it may not look pretty.
"""
val_len = 67
comm_len = 64
output = ''
# do the value string
valfmt = "'%-s&'"
val = self.value.replace("'", "''")
val_list = self._words_group(val, val_len)
for i in range(len(val_list)):
if i == 0:
headstr = "%-8s= " % self.key
else:
headstr = "CONTINUE "
valstr = valfmt % val_list[i]
output = output + '%-80s' % (headstr + valstr)
# do the comment string
if self.comment is None:
comm = ''
else:
comm = self.comment
commfmt = "%-s"
if not comm == '':
nlines = len(comm) / comm_len + 1
comm_list = self._words_group(comm, comm_len)
for i in comm_list:
commstr = "CONTINUE '&' / " + commfmt % i
output = output + '%-80s' % commstr
return output
def _words_group(self, input, strlen):
"""Split a long string into parts where each part is no longer than
strlen and no word is cut into two pieces. But if there is one
single word which is longer than strlen, then it will be split in
the middle of the word.
"""
list = []
_nblanks = input.count(' ')
nmax = max(_nblanks, len(input)/strlen+1)
arr = chararray.array(input+' ', itemsize=1)
# locations of the blanks
blank_loc = num.nonzero(arr == ' ')[0]
offset = 0
xoffset = 0
for i in range(nmax):
try:
loc = num.nonzero(blank_loc >= strlen+offset)[0][0]
offset = blank_loc[loc-1] + 1
if loc == 0:
offset = -1
except:
offset = len(input)
# check for one word longer than strlen, break in the middle
if offset <= xoffset:
offset = xoffset + strlen
# collect the pieces in a list
tmp = input[xoffset:offset]
list.append(tmp)
if len(input) == offset:
break
xoffset = offset
return list
class Header:
"""FITS header class."""
def __init__(self, cards=[]):
"""Construct a Header from a CardList.
cards: A list of Cards, default=[].
"""
# decide which kind of header it belongs to
try:
if cards[0].key == 'SIMPLE':
if 'GROUPS' in cards._keylist and cards['GROUPS'].value == True:
self._hdutype = GroupsHDU
elif cards[0].value == True:
self._hdutype = PrimaryHDU
else:
self._hdutype = _ValidHDU
elif cards[0].key == 'XTENSION':
xtension = cards[0].value.rstrip()
if xtension == 'TABLE':
self._hdutype = TableHDU
elif xtension == 'IMAGE':
self._hdutype = ImageHDU
elif xtension in ('BINTABLE', 'A3DTABLE'):
self._hdutype = BinTableHDU
else:
self._hdutype = _ExtensionHDU
else:
self._hdutype = _ValidHDU
except:
self._hdutype = _CorruptedHDU
# populate the cardlist
self.ascard = CardList(cards)
def __getitem__ (self, key):
"""Get a header keyword value."""
return self.ascard[key].value
def __setitem__ (self, key, value):
"""Set a header keyword value."""
self.ascard[key].value = value
self._mod = 1
def __delitem__(self, key):
"""Delete card(s) with the name 'key'."""
# delete ALL cards with the same keyword name
if isinstance(key, str):
while 1:
try:
del self.ascard[key]
self._mod = 1
except:
return
# for integer key only delete once
else:
del self.ascard[key]
self._mod = 1
def __str__(self):
return self.ascard.__str__()
def ascardlist(self):
"""Returns a CardList."""
return self.ascard
def items(self):
"""Return a list of all keyword-value pairs from the CardList."""
pairs = []
for card in self.ascard:
pairs.append((card.key, card.value))
return pairs
def has_key(self, key):
"""Check for existence of a keyword. Returns 1 if found, otherwise, 0.
key: keyword name. If given an index, always returns 0.
"""
try:
key = key.strip().upper()
if key[:8] == 'HIERARCH':
key = key[8:].strip()
_index = self.ascard._keylist.index(key)
return 1
except:
return 0
def rename_key(self, oldkey, newkey, force=0):
"""Rename a card's keyword in the header.
oldkey: old keyword, can be a name or index.
newkey: new keyword, must be a string.
force: if new key name already exist, force to have duplicate name.
"""
oldkey = oldkey.strip().upper()
newkey = newkey.strip().upper()
if newkey == 'CONTINUE':
raise ValueError, 'Can not rename to CONTINUE'
if newkey in Card._commentaryKeys or oldkey in Card._commentaryKeys:
if not (newkey in Card._commentaryKeys and oldkey in Card._commentaryKeys):
raise ValueError, 'Regular and commentary keys can not be renamed to each other.'
elif (force == 0) and (newkey in self.ascard._keylist):
raise ValueError, 'Intended keyword %s already exists in header.' % newkey
_index = self.ascard.index_of(oldkey)
_comment = self.ascard[_index].comment
_value = self.ascard[_index].value
self.ascard[_index] = Card(newkey, _value, _comment)
# self.ascard[_index].__dict__['key']=newkey
# self.ascard[_index].ascardimage()
# self.ascard._keylist[_index] = newkey
def get(self, key, default=None):
"""Get a keyword value from the CardList.
If no keyword is found, return the default value.
key: keyword name or index
default: if no keyword is found, the value to be returned.
"""
try:
return self[key]
except:
return default
def update(self, key, value, comment=None, before=None, after=None):
"""Update one header card."""
"""
If the keyword already exists, it's value/comment will be updated.
If it does not exist, a new card will be created and it will be
placed before or after the specified location. If no "before"
or "after" is specified, it will be appended at the end.
key: keyword name
value: keyword value (to be used for updating)
comment: keyword comment (to be used for updating), default=None.
before: name of the keyword, or index of the Card before which
the new card will be placed. The argument `before' takes
precedence over `after' if both specified. default=None.
after: name of the keyword, or index of the Card after which
the new card will be placed. default=None.
"""
if self.has_key(key):
j = self.ascard.index_of(key)
if comment is not None:
_comment = comment
else:
_comment = self.ascard[j].comment
self.ascard[j] = Card(key, value, _comment)
elif before != None or after != None:
_card = Card(key, value, comment)
self.ascard._pos_insert(_card, before=before, after=after)
else:
self.ascard.append(Card(key, value, comment))
self._mod = 1
def add_history(self, value, before=None, after=None):
"""Add a HISTORY card.
value: History text to be added.
before: [same as in update()]
after: [same as in update()]
"""
self._add_commentary('history', value, before=before, after=after)
def add_comment(self, value, before=None, after=None):
"""Add a COMMENT card.
value: Comment text to be added.
before: [same as in update()]
after: [same as in update()]
"""
self._add_commentary('comment', value, before=before, after=after)
def add_blank(self, value='', before=None, after=None):
"""Add a blank card.
value: Text to be added.
before: [same as in update()]
after: [same as in update()]
"""
self._add_commentary(' ', value, before=before, after=after)
def get_history(self):
"""Get all histories as a list of string texts."""
output = []
for _card in self.ascardlist():
if _card.key == 'HISTORY':
output.append(_card.value)
return output
def get_comment(self):
"""Get all comments as a list of string texts."""
output = []
for _card in self.ascardlist():
if _card.key == 'COMMENT':
output.append(_card.value)
return output
def _add_commentary(self, key, value, before=None, after=None):
"""Add a commentary card.
If before and after are None, add to the last occurrence of
cards of the same name (except blank card). If there is no card
(or blank card), append at the end.
"""
new_card = Card(key, value)
if before != None or after != None:
self.ascard._pos_insert(new_card, before=before, after=after)
else:
if key[0] == ' ':
useblanks = new_card._cardimage != ' '*80
self.ascard.append(new_card, useblanks=useblanks, bottom=1)
else:
try:
_last = self.ascard.index_of(key, backward=1)
self.ascard.insert(_last+1, new_card)
except:
self.ascard.append(new_card, bottom=1)
self._mod = 1
def copy(self):
"""Make a copy of the Header."""
tmp = Header(self.ascard.copy())
# also copy the class
tmp._hdutype = self._hdutype
return tmp
def _strip(self):
"""Strip cards specific to a certain kind of header.
Strip cards like SIMPLE, BITPIX, etc. so the rest of the header
can be used to reconstruct another kind of header.
"""
try:
# have both SIMPLE and XTENSION to accomodate Extension
# and Corrupted cases
del self['SIMPLE']
del self['XTENSION']
del self['BITPIX']
_naxis = self['NAXIS']
if issubclass(self._hdutype, _TableBaseHDU):
_tfields = self['TFIELDS']
del self['NAXIS']
for i in range(_naxis):
del self['NAXIS'+`i+1`]
if issubclass(self._hdutype, PrimaryHDU):
del self['EXTEND']
del self['PCOUNT']
del self['GCOUNT']
if issubclass(self._hdutype, PrimaryHDU):
del self['GROUPS']
if issubclass(self._hdutype, _ImageBaseHDU):
del self['BSCALE']
del self['BZERO']
if issubclass(self._hdutype, _TableBaseHDU):
del self['TFIELDS']
for name in ['TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE', 'TUNIT']:
for i in range(_tfields):
del self[name+`i+1`]
if issubclass(self._hdutype, BinTableHDU):
for name in ['TDISP', 'TDIM', 'THEAP']:
for i in range(_tfields):
del self[name+`i+1`]
if issubclass(self._hdutype == TableHDU):
for i in range(_tfields):
del self['TBCOL'+`i+1`]
except:
pass
class CardList(list):
"""FITS header card list class."""
def __init__(self, cards=[], keylist=None):
"""Construct the CardList object from a list of Cards.
cards: A list of Cards, default=[].
"""
list.__init__(self, cards)
self._cards = cards
# if the key list is not supplied (as in reading in the FITS file),
# it will be constructed from the card list.
if keylist is None:
self._keylist = [k.upper() for k in self.keys()]
else:
self._keylist = keylist
# find out how many blank cards are *directly* before the END card
self._blanks = 0
self.count_blanks()
def __getitem__(self, key):
"""Get a Card by indexing or by the keyword name."""
_key = self.index_of(key)
return super(CardList, self).__getitem__(_key)
def __getslice__(self, start, end):
_cards = super(CardList, self).__getslice__(start,end)
result = CardList(_cards, self._keylist[start:end])
return result
def __setitem__(self, key, value):
"""Set a Card by indexing or by the keyword name."""
if isinstance (value, Card):
_key = self.index_of(key)
# only set if the value is different from the old one
if str(self[_key]) != str(value):
super(CardList, self).__setitem__(_key, value)
self._keylist[_key] = value.key.upper()
self.count_blanks()
self._mod = 1
else:
raise SyntaxError, "%s is not a Card" % str(value)
def __delitem__(self, key):
"""Delete a Card from the CardList."""
_key = self.index_of(key)
super(CardList, self).__delitem__(_key)
del self._keylist[_key] # update the keylist
self.count_blanks()
self._mod = 1
def count_blanks(self):
"""Find out how many blank cards are *directly* before the END card."""
for i in range(1, len(self)):
if str(self[-i]) != ' '*Card.length:
self._blanks = i - 1
break
def append(self, card, useblanks=1, bottom=0):
"""Append a Card to the CardList.
card: The Card to be appended.
useblanks: Use any *extra* blank cards? default=1.
If useblanks != 0, and if there are blank cards directly
before END, it will use this space first, instead of
appending after these blank cards, so the total space
will not increase (default). When useblanks == 0, the
card will be appended at the end, even if there are
blank cards in front of END.
bottom: If =0 (default) the card will be appended after the last
non-commentary card. If =1, the card will be appended
after the last non-blank card.
"""
if isinstance (card, Card):
nc = len(self) - self._blanks
i = nc - 1
if not bottom:
for i in range(nc-1, -1, -1): # locate last non-commentary card
if self[i].key not in Card._commentaryKeys:
break
super(CardList, self).insert(i+1, card)
self._keylist.insert(i+1, card.key.upper())
if useblanks:
self._use_blanks(card._ncards())
self.count_blanks()
self._mod = 1
else:
raise SyntaxError, "%s is not a Card" % str(card)
def _pos_insert(self, card, before, after, useblanks=1):
"""Insert a Card to the location specified by before or after.
The argument `before' takes precedence over `after' if both
specified. They can be either a keyword name or index.
"""
if before != None:
loc = self.index_of(before)
self.insert(loc, card, useblanks=useblanks)
elif after != None:
loc = self.index_of(after)
self.insert(loc+1, card, useblanks=useblanks)
def insert(self, pos, card, useblanks=1):
"""Insert a Card to the CardList.
pos: The position (index, keyword name will not be allowed) to
insert. The new card will be inserted before it.
card: The Card to be inserted.
useblanks: Use any *extra* blank cards? default=1.
If useblanks != 0, and if there are blank cards directly
before END, it will use this space first, instead of
appending after these blank cards, so the total space
will not increase (default). When useblanks == 0, the
card will be appended at the end, even if there are
blank cards in front of END.
"""
if isinstance (card, Card):
super(CardList, self).insert(pos, card)
self._keylist.insert(pos, card.key) # update the keylist
self.count_blanks()
if useblanks:
self._use_blanks(card._ncards())
self.count_blanks()
self._mod = 1
else:
raise SyntaxError, "%s is not a Card" % str(card)
def _use_blanks(self, how_many):
if self._blanks > 0:
for i in range(min(self._blanks, how_many)):
del self[-1] # it also delete the keylist item
def keys(self):
"""Return a list of all keywords from the CardList."""
return map(lambda x: getattr(x,'key'), self)
def index_of(self, key, backward=0):
"""Get the index of a keyword in the CardList.
key: the keyword name (a string) or the index (an integer).
backward: search the index from the END, i.e. backward? default=0.
If backward = 1, search from the end.
"""
if isinstance(key, (int, long)):
return key
elif isinstance(key, str):
_key = key.strip().upper()
if _key[:8] == 'HIERARCH':
_key = _key[8:].strip()
_keylist = self._keylist
if backward:
_keylist = self._keylist[:] # make a copy
_keylist.reverse()
try:
_indx = _keylist.index(_key)
if backward:
_indx = len(_keylist) - _indx - 1
return _indx
except:
raise KeyError, 'Keyword %s not found.' % `key`
else:
raise KeyError, 'Illegal key data type %s' % type(key)
def copy(self):
"""Make a (deep)copy of the CardList."""
cards = [None]*len(self)
for i in range(len(self)):
cards[i]=Card('').fromstring(str(self[i]))
return CardList(cards)
def __repr__(self):
"""Format a list of cards into a string."""
block = ''
for card in self:
block = block + repr(card)
return block
def __str__(self):
"""Format a list of cards into a printable string."""
output = ''
for card in self:
output += str(card) + '\n'
return output[:-1]
# ----------------------------- HDU classes ------------------------------------
class _AllHDU:
"""Base class for all HDU (header data unit) classes."""
pass
class _CorruptedHDU(_AllHDU):
"""A Corrupted HDU class."""
""" This class is used when one or more mandatory Cards are
corrupted (unparsable), such as the 'BITPIX', 'NAXIS', or 'END' cards.
A corrupted HDU usually means that the data size cannot be
calculated or the 'END' card is not found. In the case of a
missing 'END' card, the Header may also contain the binary data(*).
(*) In future it may be possible to decipher where the last block
of the Header ends, but this task may be difficult when the
extension is a TableHDU containing ASCII data.
"""
def __init__(self, data=None, header=None):
self._file, self._offset, self._datLoc = None, None, None
self.header = header
self.data = data
self.name = None
def size(self):
"""Returns the size (in bytes) of the HDU's data part."""
self._file.seek(0, 2)
return self._file.tell() - self._datLoc
def _summary(self):
return "%-10s %-11s" % (self.name, "CorruptedHDU")
def verify(self):
pass
class _ValidHDU(_AllHDU, _Verify):
"""Base class for all HDUs which are not corrupted."""
# 0.6.5.5
def size(self):
"""Size (in bytes) of the data portion of the HDU."""
size = 0
naxis = self.header.get('NAXIS', 0)
if naxis > 0:
size = 1
for j in range(naxis):
size = size * self.header['NAXIS'+`j+1`]
bitpix = self.header['BITPIX']
gcount = self.header.get('GCOUNT', 1)
pcount = self.header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) / 8
return size
def copy(self):
"""Make a copy of the HDU, both header and data are copied."""
if self.data is not None:
_data = self.data.copy()
else:
_data = None
return self.__class__(data=_data, header=self.header.copy())
def writeto(self, name, output_verify='exception', clobber=False):
"""Write the HDU to a new file. This is a convenience method
to provide a user easier output interface if only one HDU
needs to be written to a file.
name: output FITS file name to be written to.
output_verify: output verification option, default='exception'.
clobber: Overwrite the output file if exists, default = False.
"""
if isinstance(self, _ExtensionHDU):
hdulist = HDUList([PrimaryHDU(), self])
elif isinstance(self, PrimaryHDU):
hdulist = HDUList([self])
hdulist.writeto(name, output_verify, clobber=clobber)
def _verify(self, option='warn'):
_err = _ErrList([], unit='Card')
isValid = "val in [8, 16, 32, 64, -32, -64]"
# Verify location and value of mandatory keywords.
# Do the first card here, instead of in the respective HDU classes,
# so the checking is in order, in case of required cards in wrong order.
if isinstance(self, _ExtensionHDU):
firstkey = 'XTENSION'
firstval = self._xtn
else:
firstkey = 'SIMPLE'
firstval = True
self.req_cards(firstkey, '== 0', '', firstval, option, _err)
self.req_cards('BITPIX', '== 1', _isInt+" and "+isValid, 8, option, _err)
self.req_cards('NAXIS', '== 2', _isInt+" and val >= 0 and val <= 999", 0, option, _err)
naxis = self.header.get('NAXIS', 0)
if naxis < 1000:
for j in range(3, naxis+3):
self.req_cards('NAXIS'+`j-2`, '== '+`j`, _isInt+" and val>= 0", 1, option, _err)
# verify each card
for _card in self.header.ascard:
_err.append(_card._verify(option))
return _err
def req_cards(self, keywd, pos, test, fix_value, option, errlist):
"""Check the existence, location, and value of a required Card."""
"""If pos = None, it can be anywhere. If the card does not exist,
the new card will have the fix_value as its value when created.
Also check the card's value by using the "test" argument.
"""
_err = errlist
fix = ''
cards = self.header.ascard
try:
_index = cards.index_of(keywd)
except:
_index = None
fixable = fix_value is not None
# if pos is a string, it must be of the syntax of "> n",
# where n is an int
if isinstance(pos, str):
_parse = pos.split()
if _parse[0] in ['>=', '==']:
insert_pos = eval(_parse[1])
# if the card does not exist
if _index is None:
err_text = "'%s' card does not exist." % keywd
fix_text = "Fixed by inserting a new '%s' card." % keywd
if fixable:
# use repr to accomodate both string and non-string types
# Boolean is also OK in this constructor
_card = "Card('%s', %s)" % (keywd, `fix_value`)
fix = "self.header.ascard.insert(%d, %s)" % (insert_pos, _card)
_err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable))
else:
# if the supposed location is specified
if pos is not None:
test_pos = '_index '+ pos
if not eval(test_pos):
err_text = "'%s' card at the wrong place (card %d)." % (keywd, _index)
fix_text = "Fixed by moving it to the right place (card %d)." % insert_pos
fix = "_cards=self.header.ascard; dummy=_cards[%d]; del _cards[%d];_cards.insert(%d, dummy)" % (_index, _index, insert_pos)
_err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix))
# if value checking is specified
if test:
val = self.header[keywd]
if not eval(test):
err_text = "'%s' card has invalid value '%s'." % (keywd, val)
fix_text = "Fixed by setting a new value '%s'." % fix_value
if fixable:
fix = "self.header['%s'] = %s" % (keywd, `fix_value`)
_err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable))
return _err
class _TempHDU(_ValidHDU):
"""Temporary HDU, used when the file is first opened. This is to
speed up the open. Any header will not be initialized till the
HDU is accessed.
"""
def _getname(self):
"""Get the extname and extver from the header."""
re_extname = re.compile(r"EXTNAME\s*=\s*'([ -&(-~]*)'")
re_extver = re.compile(r"EXTVER\s*=\s*(\d+)")
mo = re_extname.search(self._raw)
if mo:
name = mo.group(1).rstrip()
else:
name = ''
mo = re_extver.search(self._raw)
if mo:
extver = int(mo.group(1))
else:
extver = 1
return name, extver
def _getsize(self, block):
"""Get the size from the first block of the HDU."""
re_simple = re.compile(r'SIMPLE =\s*')
re_bitpix = re.compile(r'BITPIX =\s*(-?\d+)')
re_naxis = re.compile(r'NAXIS =\s*(\d+)')
re_naxisn = re.compile(r'NAXIS(\d) =\s*(\d+)')
re_gcount = re.compile(r'GCOUNT =\s*(-?\d+)')
re_pcount = re.compile(r'PCOUNT =\s*(-?\d+)')
re_groups = re.compile(r'GROUPS =\s*(T)')
simple = re_simple.search(block[:80])
mo = re_bitpix.search(block)
if mo is not None:
bitpix = int(mo.group(1))
else:
raise ValueError("BITPIX not found where expected")
mo = re_gcount.search(block)
if mo is not None:
gcount = int(mo.group(1))
else:
gcount = 1
mo = re_pcount.search(block)
if mo is not None:
pcount = int(mo.group(1))
else:
pcount = 0
mo = re_groups.search(block)
if mo and simple:
groups = 1
else:
groups = 0
mo = re_naxis.search(block)
if mo is not None:
naxis = int(mo.group(1))
pos = mo.end(0)
else:
raise ValueError("NAXIS not found where expected")
if naxis == 0:
datasize = 0
else:
dims = [0]*naxis
for i in range(naxis):
mo = re_naxisn.search(block, pos)
pos = mo.end(0)
dims[int(mo.group(1))-1] = int(mo.group(2))
datasize = reduce(operator.mul, dims[groups:])
size = abs(bitpix) * gcount * (pcount + datasize) / 8
if simple and not groups:
name = 'PRIMARY'
else:
name = ''
return size, name
def setupHDU(self):
"""Read one FITS HDU, data portions are not actually read here, but
the beginning locations are computed.
"""
_cardList = []
_keyList = []
blocks = self._raw
if (len(blocks) % _blockLen) != 0:
raise IOError, 'Header size is not multiple of %d: %d' % (_blockLen, len(blocks))
elif (blocks[:8] not in ['SIMPLE ', 'XTENSION']):
raise IOError, 'Block does not begin with SIMPLE or XTENSION'
for i in range(0, len(blocks), Card.length):
_card = Card('').fromstring(blocks[i:i+Card.length])
_key = _card.key
if _key == 'END':
break
else:
_cardList.append(_card)
_keyList.append(_key)
# Deal with CONTINUE cards
# if a long string has CONTINUE cards, the "Card" is considered
# to be more than one 80-char "physical" cards.
_max = _keyList.count('CONTINUE')
_start = 0
for i in range(_max):
_where = _keyList[_start:].index('CONTINUE') + _start
for nc in range(1, _max+1):
if _where+nc >= len(_keyList):
break
if _cardList[_where+nc]._cardimage[:10].upper() != 'CONTINUE ':
break
# combine contiguous CONTINUE cards with its parent card
if nc > 0:
_longstring = _cardList[_where-1]._cardimage
for c in _cardList[_where:_where+nc]:
_longstring += c._cardimage
_cardList[_where-1] = _Card_with_continue().fromstring(_longstring)
del _cardList[_where:_where+nc]
del _keyList[_where:_where+nc]
_start = _where
# if not the real CONTINUE card, skip to the next card to search
# to avoid starting at the same CONTINUE card
else:
_start = _where + 1
if _keyList[_start:].count('CONTINUE') == 0:
break
# construct the Header object, using the cards.
try:
header = Header(CardList(_cardList, keylist=_keyList))
hdu = header._hdutype(data=DELAYED, header=header)
# pass these attributes
hdu._file = self._file
hdu._hdrLoc = self._hdrLoc
hdu._datLoc = self._datLoc
hdu._datSpan = self._datSpan
hdu._ffile = self._ffile
hdu.name = self.name
hdu._extver = self._extver
hdu._new = 0
hdu.header._mod = 0
hdu.header.ascard._mod = 0
except:
pass
return hdu
class _ExtensionHDU(_ValidHDU):
"""An extension HDU class.
This class is the base class for the TableHDU, ImageHDU, and
BinTableHDU classes.
"""
def __init__(self, data=None, header=None):
self._file, self._offset, self._datLoc = None, None, None
self.header = header
self.data = data
self._xtn = ' '
def __setattr__(self, attr, value):
"""Set an HDU attribute."""
if attr == 'name' and value:
if not isinstance(value, str):
raise TypeError, 'bad value type'
value = value.upper()
if self.header.has_key('EXTNAME'):
self.header['EXTNAME'] = value
else:
self.header.ascard.append(Card('EXTNAME', value, 'extension name'))
self.__dict__[attr] = value
def _verify(self, option='warn'):
_err = _ValidHDU._verify(self, option=option)
# Verify location and value of mandatory keywords.
naxis = self.header.get('NAXIS', 0)
self.req_cards('PCOUNT', '== '+`naxis+3`, _isInt+" and val >= 0", 0, option, _err)
self.req_cards('GCOUNT', '== '+`naxis+4`, _isInt+" and val == 1", 1, option, _err)
return _err
# 0.8.8
def _iswholeline(indx, naxis):
if isinstance(indx, (int, long)):
if indx >= 0 and indx < naxis:
if naxis > 1:
return _SinglePoint(1, indx)
elif naxis == 1:
return _OnePointAxis(1, 0)
else:
raise IndexError, 'Index %s out of range.' % indx
elif isinstance(indx, slice):
indx = _normalize_slice(indx, naxis)
if (indx.start == 0) and (indx.stop == naxis) and (indx.step == 1):
return _WholeLine(naxis, 0)
else:
if indx.step == 1:
return _LineSlice(indx.stop-indx.start, indx.start)
else:
return _SteppedSlice((indx.stop-indx.start)/indx.step, indx.start)
else:
raise IndexError, 'Illegal index %s' % indx
def _normalize_slice(input, naxis):
"""Set the slice's start/stop in the regular range."""
def _normalize(indx, npts):
if indx < -npts:
indx = 0
elif indx < 0:
indx += npts
elif indx > npts:
indx = npts
return indx
_start = input.start
if _start is None:
_start = 0
elif isinstance(_start, (int, long)):
_start = _normalize(_start, naxis)
else:
raise IndexError, 'Illegal slice %s, start must be integer.' % input
_stop = input.stop
if _stop is None:
_stop = naxis
elif isinstance(_stop, (int, long)):
_stop = _normalize(_stop, naxis)
else:
raise IndexError, 'Illegal slice %s, stop must be integer.' % input
if _stop < _start:
raise IndexError, 'Illegal slice %s, stop < start.' % input
_step = input.step
if _step is None:
_step = 1
elif isinstance(_step, (int, long)):
if _step <= 0:
raise IndexError, 'Illegal slice %s, step must be positive.' % input
else:
raise IndexError, 'Illegal slice %s, step must be integer.' % input
return slice(_start, _stop, _step)
class _KeyType:
def __init__(self, npts, offset):
self.npts = npts
self.offset = offset
class _WholeLine(_KeyType):
pass
class _SinglePoint(_KeyType):
pass
class _OnePointAxis(_KeyType):
pass
class _LineSlice(_KeyType):
pass
class _SteppedSlice(_KeyType):
pass
class Section:
"""Image section."""
def __init__(self, hdu):
self.hdu = hdu
def __getitem__(self, key):
dims = []
if not isinstance(key, tuple):
key = (key,)
naxis = self.hdu.header['NAXIS']
if naxis < len(key):
raise IndexError, 'too many indices.'
elif naxis > len(key):
key = key + (slice(None),) * (naxis-len(key))
offset = 0
for i in range(naxis):
_naxis = self.hdu.header['NAXIS'+`naxis-i`]
indx = _iswholeline(key[i], _naxis)
offset = offset * _naxis + indx.offset
# all elements after the first WholeLine must be WholeLine or
# OnePointAxis
if isinstance(indx, (_WholeLine, _LineSlice)):
dims.append(indx.npts)
break
elif isinstance(indx, _SteppedSlice):
raise IndexError, 'Subsection data must be contiguous.'
for j in range(i+1,naxis):
_naxis = self.hdu.header['NAXIS'+`naxis-j`]
indx = _iswholeline(key[j], _naxis)
dims.append(indx.npts)
if not isinstance(indx, _WholeLine):
raise IndexError, 'Subsection data is not contiguous.'
# the offset needs to multiply the length of all remaining axes
else:
offset *= _naxis
if dims == []:
dims = [1]
npt = 1
for n in dims:
npt *= n
# Now, get the data (does not include bscale/bzero for now XXX)
_bitpix = self.hdu.header['BITPIX']
code = _ImageBaseHDU.NumCode[_bitpix]
self.hdu._file.seek(self.hdu._datLoc+offset*abs(_bitpix)/8)
raw_data = num.fromfile(self.hdu._file, type=code, shape=dims)
raw_data._byteorder = 'big'
return raw_data
class _ImageBaseHDU(_ValidHDU):
"""FITS image HDU base class."""
"""Attributes:
header: image header
data: image data
_file: file associated with array (None)
_datLoc: starting byte location of data block in file (None)
"""
# mappings between FITS and numarray typecodes
NumCode = {8:'UInt8', 16:'Int16', 32:'Int32', 64:'Int64', -32:'Float32', -64:'Float64'}
ImgCode = {'UInt8':8, 'Int16':16, 'Int32':32, 'Int64':64, 'Float32':-32, 'Float64':-64}
def __init__(self, data=None, header=None):
self._file, self._datLoc = None, None
if header is not None:
if not isinstance(header, Header):
raise ValueError, "header must be a Header object"
if data is DELAYED:
# this should never happen
if header is None:
raise ValueError, "No header to setup HDU."
# if the file is read the first time, no need to copy, and keep it unchanged
else:
self.header = header
else:
# construct a list of cards of minimal header
if isinstance(self, _ExtensionHDU):
c0 = Card('XTENSION', 'IMAGE', 'Image extension')
else:
c0 = Card('SIMPLE', True, 'conforms to FITS standard')
_list = CardList([
c0,
Card('BITPIX', 8, 'array data type'),
Card('NAXIS', 0, 'number of array dimensions'),
])
if isinstance(self, GroupsHDU):
_list.append(Card('GROUPS', True, 'has groups'))
if isinstance(self, (_ExtensionHDU, GroupsHDU)):
_list.append(Card('PCOUNT', 0, 'number of parameters'))
_list.append(Card('GCOUNT', 1, 'number of groups'))
if header is not None:
hcopy = header.copy()
hcopy._strip()
_list.extend(hcopy.ascardlist())
self.header = Header(_list)
self._bzero = self.header.get('BZERO', 0)
self._bscale = self.header.get('BSCALE', 1)
if (data is DELAYED): return
self.data = data
# update the header
self.update_header()
self._bitpix = self.header['BITPIX']
# delete the keywords BSCALE and BZERO
del self.header['BSCALE']
del self.header['BZERO']
def update_header(self):
"""Update the header keywords to agree with the data."""
old_naxis = self.header.get('NAXIS', 0)
if isinstance(self.data, GroupData):
self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.data.type()]
axes = list(self.data.data.getshape())[1:]
axes.reverse()
axes = [0] + axes
elif isinstance(self.data, num.NumArray):
self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()]
axes = list(self.data.getshape())
axes.reverse()
elif self.data is None:
axes = []
else:
raise ValueError, "incorrect array type"
self.header['NAXIS'] = len(axes)
# add NAXISi if it does not exist
for j in range(len(axes)):
try:
self.header['NAXIS'+`j+1`] = axes[j]
except:
if (j == 0):
_after = 'naxis'
else :
_after = 'naxis'+`j`
self.header.update('naxis'+`j+1`, axes[j], after = _after)
# delete extra NAXISi's
for j in range(len(axes)+1, old_naxis+1):
try:
del self.header.ascard['NAXIS'+`j`]
except KeyError:
pass
if isinstance(self.data, GroupData):
self.header.update('GROUPS', True, after='NAXIS'+`len(axes)`)
self.header.update('PCOUNT', len(self.data.parnames), after='GROUPS')
self.header.update('GCOUNT', len(self.data), after='PCOUNT')
npars = len(self.data.parnames)
(_scale, _zero) = self.data._get_scale_factors(npars)[3:5]
if _scale:
self.header.update('BSCALE', self.data._coldefs.bscales[npars])
if _zero:
self.header.update('BZERO', self.data._coldefs.bzeros[npars])
for i in range(npars):
self.header.update('PTYPE'+`i+1`, self.data.parnames[i])
(_scale, _zero) = self.data._get_scale_factors(i)[3:5]
if _scale:
self.header.update('PSCAL'+`i+1`, self.data._coldefs.bscales[i])
if _zero:
self.header.update('PZERO'+`i+1`, self.data._coldefs.bzeros[i])
def __getattr__(self, attr):
"""Get the data attribute."""
if attr == 'section':
return Section(self)
elif attr == 'data':
self.__dict__[attr] = None
if self.header['NAXIS'] > 0:
_bitpix = self.header['BITPIX']
self._file.seek(self._datLoc)
if isinstance(self, GroupsHDU):
dims = self.size()*8/abs(_bitpix)
else:
dims = self._dimShape()
code = _ImageBaseHDU.NumCode[self.header['BITPIX']]
if self._ffile.memmap:
_mmap = self._ffile._mm[self._datLoc:self._datLoc+self._datSpan]
raw_data = num.array(_mmap, type=code, shape=dims)
else:
raw_data = num.fromfile(self._file, type=code, shape=dims)
raw_data._byteorder = 'big'
if (self._bzero != 0 or self._bscale != 1):
if _bitpix > 0: # scale integers to Float32
self.data = num.array(raw_data, type=num.Float32)
else: # floating point cases
if self._ffile.memmap:
self.data = raw_data.copy()
# if not memmap, use the space already in memory
else:
self.data = raw_data
if self._bscale != 1:
num.multiply(self.data, self._bscale, self.data)
if self._bzero != 0:
self.data += self._bzero
# delete the keywords BSCALE and BZERO after scaling
del self.header['BSCALE']
del self.header['BZERO']
self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()]
else:
self.data = raw_data
try:
return self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
def _dimShape(self):
"""Returns a tuple of image dimensions, reverse the order of NAXIS."""
naxis = self.header['NAXIS']
axes = naxis*[0]
for j in range(naxis):
axes[j] = self.header['NAXIS'+`j+1`]
axes.reverse()
return tuple(axes)
def _summary(self):
"""Summarize the HDU: name, dimensions, and formats."""
class_name = str(self.__class__)
type = class_name[class_name.rfind('.')+1:]
# if data is touched, use data info.
if 'data' in dir(self):
if self.data is None:
_shape, _format = (), ''
else:
# the shape will be in the order of NAXIS's which is the
# reverse of the numarray shape
if isinstance(self, GroupsHDU):
_shape = list(self.data.data.getshape())[1:]
_format = `self.data._parent.field(0).type()`
else:
_shape = list(self.data.getshape())
_format = `self.data.type()`
_shape.reverse()
_shape = tuple(_shape)
_format = _format[_format.rfind('.')+1:]
# if data is not touched yet, use header info.
else:
_shape = ()
for j in range(self.header['NAXIS']):
if isinstance(self, GroupsHDU) and j == 0:
continue
_shape += (self.header['NAXIS'+`j+1`],)
_format = self.NumCode[self.header['BITPIX']]
if isinstance(self, GroupsHDU):
_gcount = ' %d Groups %d Parameters' % (self.header['GCOUNT'], self.header['PCOUNT'])
else:
_gcount = ''
return "%-10s %-11s %5d %-12s %s%s" % \
(self.name, type, len(self.header.ascard), _shape, _format, _gcount)
def scale(self, type=None, option="old", bscale=1, bzero=0):
"""Scale image data by using BSCALE/BZERO.
Call to this method will scale self.data and update the keywords
of BSCALE and BZERO in self.header. This method should only be
used right before writing to the output file, as the data will be
scaled and is therefore not very usable after the call.
type (string): destination data type, use numarray attribute format,
(e.g. 'UInt8', 'Int16', 'Float32' etc.). If is None, use the
current data type.
option: how to scale the data: if "old", use the original BSCALE
and BZERO values when the data was read/created. If
"minmax", use the minimum and maximum of the data to scale.
The option will be overwritten by any user specified
bscale/bzero values.
bscale/bzero: user specified BSCALE and BZERO values.
"""
if self.data is None:
return
# Determine the destination (numarray) data type
if type is None:
type = self.NumCode[self._bitpix]
_type = getattr(num, type)
# Determine how to scale the data
# bscale and bzero takes priority
if (bscale != 1 or bzero !=0):
_scale = bscale
_zero = bzero
else:
if option == 'old':
_scale = self._bscale
_zero = self._bzero
elif option == 'minmax':
if isinstance(_type, num.FloatingType):
_scale = 1
_zero = 0
else:
# flat the shape temporarily to save memory
dims = self.data.getshape()
self.data.setshape(self.data.nelements())
min = num.minimum.reduce(self.data)
max = num.maximum.reduce(self.data)
self.data.setshape(dims)
if `_type` == 'UInt8': # UInt8 case
_zero = min
_scale = (max - min) / (2.**8 - 1)
else:
_zero = (max + min) / 2.
# throw away -2^N
_scale = (max - min) / (2.**(8*_type.bytes) - 2)
# Do the scaling
if _zero != 0:
self.data += -_zero # 0.9.6.3 to avoid out of range error for BZERO = +32768
self.header.update('BZERO', _zero)
else:
del self.header['BZERO']
if _scale != 1:
self.data /= _scale
self.header.update('BSCALE', _scale)
else:
del self.header['BSCALE']
if self.data._type != _type:
self.data = num.array(num.around(self.data), type=_type) #0.7.7.1
class PrimaryHDU(_ImageBaseHDU):
"""FITS primary HDU class."""
def __init__(self, data=None, header=None):
"""Construct a primary HDU.
data: the data in the HDU, default=None.
header: the header to be used (as a template), default=None.
If header=None, a minimal Header will be provided.
"""
_ImageBaseHDU.__init__(self, data=data, header=header)
self.name = 'PRIMARY'
# insert the keywords EXTEND
if header is None:
dim = `self.header['NAXIS']`
if dim == '0':
dim = ''
self.header.update('EXTEND', True, after='NAXIS'+dim)
class ImageHDU(_ExtensionHDU, _ImageBaseHDU):
"""FITS image extension HDU class."""
def __init__(self, data=None, header=None, name=None):
"""Construct an image HDU.
data: the data in the HDU, default=None.
header: the header to be used (as a template), default=None.
If header=None, a minimal Header will be provided.
name: The name of the HDU, will be the value of the keywod EXTNAME,
default=None.
"""
# no need to run _ExtensionHDU.__init__ since it is not doing anything.
_ImageBaseHDU.__init__(self, data=data, header=header)
self._xtn = 'IMAGE'
self.header._hdutype = ImageHDU
# insert the require keywords PCOUNT and GCOUNT
dim = `self.header['NAXIS']`
if dim == '0':
dim = ''
# set extension name
if (name is None) and self.header.has_key('EXTNAME'):
name = self.header['EXTNAME']
self.name = name
def _verify(self, option='warn'):
"""ImageHDU verify method."""
_err = _ExtensionHDU._verify(self, option=option)
self.req_cards('PCOUNT', None, _isInt+" and val == 0", 0, option, _err)
return _err
class GroupsHDU(PrimaryHDU):
"""FITS Random Groups HDU class."""
_dict = {8:'B', 16:'I', 32:'J', 64:'K', -32:'E', -64:'D'}
def __init__(self, data=None, header=None, name=None):
PrimaryHDU.__init__(self, data=data, header=header)
self.header._hdutype = GroupsHDU
self.name = name
if self.header['NAXIS'] <= 0:
self.header['NAXIS'] = 1
self.header.update('NAXIS1', 0, after='NAXIS')
def __getattr__(self, attr):
"""Get the 'data' or 'columns' attribute. The data of random group
FITS file will be like a binary table's data.
"""
if attr == 'data': # same code as in _TableBaseHDU
size = self.size()
if size:
self._file.seek(self._datLoc)
data = GroupData(_get_tbdata(self))
data._coldefs = self.columns
data.parnames = self.columns._pnames
else:
data = None
self.__dict__[attr] = data
elif attr == 'columns':
_cols = []
_pnames = []
_pcount = self.header['PCOUNT']
_format = GroupsHDU._dict[self.header['BITPIX']]
for i in range(self.header['PCOUNT']):
_bscale = self.header.get('PSCAL'+`i+1`, 1)
_bzero = self.header.get('PZERO'+`i+1`, 0)
_pnames.append(self.header['PTYPE'+`i+1`].lower())
_cols.append(Column(name='c'+`i+1`, format = _format, bscale = _bscale, bzero = _bzero))
data_shape = self._dimShape()[:-1]
dat_format = `int(num.array(data_shape).sum())` + _format
_bscale = self.header.get('BSCALE', 1)
_bzero = self.header.get('BZERO', 0)
_cols.append(Column(name='data', format = dat_format, bscale = _bscale, bzero = _bzero))
_coldefs = ColDefs(_cols)
_coldefs._shape = self.header['GCOUNT']
_coldefs._dat_format = _fits2rec[_format]
_coldefs._pnames = _pnames
self.__dict__[attr] = _coldefs
elif attr == '_theap':
self.__dict__[attr] = 0
try:
return self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
# 0.6.5.5
def size(self):
"""Returns the size (in bytes) of the HDU's data part."""
size = 0
naxis = self.header.get('NAXIS', 0)
# for random group image, NAXIS1 should be 0, so we skip NAXIS1.
if naxis > 1:
size = 1
for j in range(1, naxis):
size = size * self.header['NAXIS'+`j+1`]
bitpix = self.header['BITPIX']
gcount = self.header.get('GCOUNT', 1)
pcount = self.header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) / 8
return size
def _verify(self, option='warn'):
_err = PrimaryHDU._verify(self, option=option)
# Verify locations and values of mandatory keywords.
self.req_cards('NAXIS', '== 2', _isInt+" and val >= 1 and val <= 999", 1, option, _err)
self.req_cards('NAXIS1', '== 3', _isInt+" and val == 0", 0, option, _err)
_after = self.header['NAXIS'] + 3
# if the card EXTEND exists, must be after it.
try:
_dum = self.header['EXTEND']
#_after += 1
except:
pass
_pos = '>= '+`_after`
self.req_cards('GCOUNT', _pos, _isInt, 1, option, _err)
self.req_cards('PCOUNT', _pos, _isInt, 0, option, _err)
self.req_cards('GROUPS', _pos, 'val == True', True, option, _err)
return _err
# --------------------------Table related code----------------------------------
# lists of column/field definition common names and keyword names, make
# sure to preserve the one-to-one correspondence when updating the list(s).
# Use lists, instead of dictionaries so the names can be displayed in a
# preferred order.
_commonNames = ['name', 'format', 'unit', 'null', 'bscale', 'bzero', 'disp', 'start', 'dim']
_keyNames = ['TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO', 'TDISP', 'TBCOL', 'TDIM']
# mapping from TFORM data type to numarray data type (code)
_booltype = 'i1'
_fits2rec = {'L':_booltype, 'B':'u1', 'I':'i2', 'E':'f4', 'D':'f8', 'J':'i4', 'A':'a', 'C':'c8', 'M':'c16', 'K':'i8'}
# the reverse dictionary of the above
_rec2fits = {}
for key in _fits2rec.keys():
_rec2fits[_fits2rec[key]]=key
class _FormatX(str):
"""For X format in binary tables."""
pass
class _FormatP(str):
"""For P format in variable length table."""
pass
# TFORM regular expression
_tformat_re = re.compile(r'(?P<repeat>^[0-9]*)(?P<dtype>[A-Za-z])(?P<option>[!-~]*)')
# table definition keyword regular expression
_tdef_re = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)')
def _parse_tformat(tform):
"""Parse the TFORM value into repeat, data type, and option."""
try:
(repeat, dtype, option) = _tformat_re.match(tform.strip()).groups()
except:
print 'Format "%s" is not recognized.' % tform
if repeat == '': repeat = 1
else: repeat = eval(repeat)
return (repeat, dtype, option)
def _convert_format(input_format, reverse=0):
"""Convert FITS format spec to record format spec. Do the opposite
if reverse = 1.
"""
fmt = input_format
(repeat, dtype, option) = _parse_tformat(fmt)
if reverse == 0:
if dtype in _fits2rec.keys(): # FITS format
if dtype == 'A':
output_format = _fits2rec[dtype]+`repeat`
# to accomodate both the ASCII table and binary table column
# format spec, i.e. A7 in ASCII table is the same as 7A in
# binary table, so both will produce 'a7'.
if fmt.lstrip()[0] == 'A' and option != '':
output_format = _fits2rec[dtype]+`int(option)` # make sure option is integer
else:
_repeat = ''
if repeat != 1:
_repeat = `repeat`
output_format = _repeat+_fits2rec[dtype]
elif dtype == 'X':
nbytes = ((repeat-1) / 8) + 1
# use an array, even if it is only ONE u1 (i.e. use tuple always)
output_format = _FormatX(`(nbytes,)`+'u1')
output_format._nx = repeat
elif dtype == 'P':
output_format = _FormatP('2i4')
output_format._dtype = _fits2rec[option[0]]
elif dtype == 'F':
output_format = 'f8'
else:
raise ValueError, "Illegal format %s" % fmt
else:
if dtype == 'a':
output_format = option+_rec2fits[dtype]
elif isinstance(dtype, _FormatX):
print 'X format'
elif dtype+option in _rec2fits.keys(): # record format
_repeat = ''
if repeat != 1:
_repeat = `repeat`
output_format = _repeat+_rec2fits[dtype+option]
else:
raise ValueError, "Illegal format %s" % fmt
return output_format
def _convert_ASCII_format(input_format):
"""Convert ASCII table format spec to record format spec. """
ascii2rec = {'A':'a', 'I':'i4', 'F':'f4', 'E':'f4', 'D':'f8'}
_re = re.compile(r'(?P<dtype>[AIFED])(?P<width>[0-9]*)')
# Parse the TFORM value into data type and width.
try:
(dtype, width) = _re.match(input_format.strip()).groups()
dtype = ascii2rec[dtype]
if width == '':
width = None
else:
width = eval(width)
except:
raise ValueError, 'Illegal format `%s` for ASCII table.' % input_format
return (dtype, width)
def _get_index(nameList, key):
"""
Get the index of the key in the name list.
The key can be an integer or string. If integer, it is the index
in the list. If string,
(a) Field (column) names are case sensitive: you can have two
different columns called 'abc' and 'ABC' respectively.
(b) When you *refer* to a field (presumably with the field method),
it will try to match the exact name first, so in the example in
(a), field('abc') will get the first field, and field('ABC') will
get the second field.
If there is no exact name matched, it will try to match the name
with case insensitivity. So, in the last example, field('Abc')
will cause an exception since there is no unique mapping. If
there is a field named "XYZ" and no other field name is a case
variant of "XYZ", then field('xyz'), field('Xyz'), etc. will get
this field.
"""
if isinstance(key, (int, long)):
indx = int(key)
elif isinstance(key, str):
# try to find exact match first
try:
indx = nameList.index(key.rstrip())
except ValueError:
# try to match case-insentively,
_key = key.lower().rstrip()
_list = map(lambda x: x.lower().rstrip(), nameList)
_count = operator.countOf(_list, _key) # occurrence of _key in _list
if _count == 1:
indx = _list.index(_key)
elif _count == 0:
raise NameError, "Key '%s' does not exist." % key
else: # multiple match
raise NameError, "Ambiguous key name '%s'." % key
else:
raise NameError, "Illegal key '%s'." % `key`
return indx
def _unwrapx(input, output, nx):
"""Unwrap the X format column into a Boolean array.
input: input Uint8 array of shape (s, nbytes)
output: output Boolean array of shape (s, nx)
nx: number of bits
"""
pow2 = [128, 64, 32, 16, 8, 4, 2, 1]
nbytes = ((nx-1) / 8) + 1
for i in range(nbytes):
_min = i*8
_max = min((i+1)*8, nx)
for j in range(_min, _max):
num.bitwise_and(input[...,i], pow2[j-i*8], output[...,j])
def _wrapx(input, output, nx):
"""Wrap the X format column Boolean array into an UInt8 array.
input: input Boolean array of shape (s, nx)
output: output Uint8 array of shape (s, nbytes)
nx: number of bits
"""
output[...] = 0 # reset the output
nbytes = ((nx-1) / 8) + 1
unused = nbytes*8 - nx
for i in range(nbytes):
_min = i*8
_max = min((i+1)*8, nx)
for j in range(_min, _max):
if j != _min:
num.lshift(output[...,i], 1, output[...,i])
num.add(output[...,i], input[...,j], output[...,i])
# shift the unused bits
num.lshift(output[...,i], unused, output[...,i])
def _makep(input, desp_output, dtype):
"""Construct the P format column array, both the data descriptors and
the data. It returns the output "data" array of data type dtype.
The descriptor location will have a zero offset for all columns
after this call. The final offset will be calculated when the file
is written.
input: input object array
desp_output: output "descriptor" array of data type 2Int32
dtype: data type of the variable array
"""
_offset = 0
data_output = _VLF([None]*len(input))
data_output._dtype = dtype
if dtype == 'a':
_nbytes = 1
else:
_nbytes = num.getType(dtype).bytes
for i in range(len(input)):
if dtype == 'a':
data_output[i] = chararray.array(input[i], itemsize=1)
else:
data_output[i] = num.array(input[i], type=dtype)
desp_output[i,0] = len(data_output[i])
desp_output[i,1] = _offset
_offset += len(data_output[i]) * _nbytes
return data_output
class _VLF(objects.ObjectArray):
"""variable length field object."""
def __init__(self, input):
"""
input: a sequence of variable-sized elements.
"""
objects.ObjectArray.__init__(self, input)
self._max = 0
def __setitem__(self, key, value):
"""To make sure the new item has consistent data type to avoid
misalignment.
"""
if isinstance(value, num.NumArray) and value.type() == self._dtype:
pass
elif isinstance(value, chararray.CharArray) and value.itemsize() == 1:
pass
elif self._dtype == 'a':
value = chararray.array(value, itemsize=1)
else:
value = num.array(value, type=self._dtype)
objects.ObjectArray.__setitem__(self, key, value)
self._max = max(self._max, len(value))
class Column:
"""Column class which contains the definition of one column, e.g.
ttype, tform, etc. and the array. Does not support theap yet.
"""
def __init__(self, name=None, format=None, unit=None, null=None, \
bscale=None, bzero=None, disp=None, start=None, \
dim=None, array=None):
"""Construct a Column by specifying attributes. All attributes
except format can be optional.
name: column name, corresponding to TTYPE keyword
format: column format, corresponding to TFORM keyword
unit: column unit, corresponding to TUNIT keyword
null: null value, corresponding to TNULL keyword
bscale: bscale value, corresponding to TSCAL keyword
bzero: bzero value, corresponding to TZERO keyword
disp: display format, corresponding to TDISP keyword
start: column starting position (ASCII table only),
corresponding to TBCOL keyword
dim: column dimension corresponding to TDIM keyword
"""
# any of the input argument (except array) can be a Card or just
# a number/string
for cname in _commonNames:
value = eval(cname) # get the argument's value
keyword = _keyNames[_commonNames.index(cname)]
if isinstance(value, Card):
setattr(self, cname, value.value)
else:
setattr(self, cname, value)
# if the column data is not NDarray, make it to be one, i.e.
# input arrays can be just list or tuple, not required to be NDArray
if format is not None:
# check format
try:
# legit FITS format? convert to record format (e.g. '3J'->'3i4')
recfmt = _convert_format(format)
except:
try:
# legit RecArray format?
recfmt = format
format = _convert_format(recfmt, reverse=1)
except:
raise ValueError, "Illegal format `%s`." % format
self.format = format
# does not include Object array because there is no guarantee
# the elements in the object array are consistent.
if not isinstance(array, (num.NumArray, chararray.CharArray, Delayed)):
try: # try to convert to a numarray first
array = num.array(array)
except:
try: # then try to conver it to a strings array
array = chararray.array(array, itemsize=eval(recfmt[1:]))
# then try variable length array
except:
if isinstance(recfmt, _FormatP):
try:
_func = lambda x: num.array(x, type=recfmt._dtype)
array = _VLF(map(_func, array))
except:
try:
# this handles ['abc'] and [['a','b','c']]
# equally, beautiful!
_func = lambda x: chararray.array(x, itemsize=1)
array = _VLF(map(_func, array))
except:
raise ValueError, "Inconsistent input data array: %s" % array
array._dtype = recfmt._dtype
else:
raise ValueError, "Data is inconsistent with the format `%s`." % format
else:
raise ValueError, "Must specify format to construct Column"
# scale the array back to storage values if there is bscale/bzero
if isinstance(array, num.NumArray):
# boolean needs to be scaled too
if recfmt == _booltype:
_out = num.zeros(array.shape, type=recfmt)
num.where(array==0, ord('F'), ord('T'), _out)
array = _out
# make a copy if scaled, so as not to corrupt the original array
if bzero not in ['', None, 0] or bscale not in ['', None, 1]:
array = array.copy()
if bzero not in ['', None, 0]:
array += -bzero
if bscale not in ['', None, 1]:
array /= bscale
self.array = array
def __repr__(self):
text = ''
for cname in _commonNames:
value = getattr(self, cname)
if value != None:
text += cname + ' = ' + `value` + '\n'
return text[:-1]
def copy(self):
tmp = Column(format='I') # just use a throw-away format
tmp.__dict__=self.__dict__.copy()
return tmp
class ColDefs(object):
"""Column definitions class. It has attributes corresponding to the
Column attributes (e.g. ColDefs has the attribute .names while Column
has .name), Each attribute in ColDefs is a list of corresponding
attribute values from all Columns.
"""
def __init__(self, input, tbtype='BinTableHDU'):
"""input: a list of Columns, an (table) HDU
tbtype: which table HDU, 'BinTableHDU' (default) or
'TableHDU' (text table).
"""
ascii_fmt = {'A':'A1', 'I':'I10', 'E':'E14.6', 'F':'F16.7', 'D':'D24.16'}
self._tbtype = tbtype
if isinstance(input, ColDefs):
self.data = [col.copy() for col in input.data]
# if the input is a list of Columns
elif isinstance(input, (list, tuple)):
for col in input:
if not isinstance(col, Column):
raise "Element %d in the ColDefs input is not a Column." % input.index(col)
self.data = [col.copy() for col in input]
# if the format of an ASCII column has no width, add one
if tbtype == 'TableHDU':
for i in range(len(self)):
(type, width) = _convert_ASCII_format(self.data[i].format)
if width is None:
self.data[i].format = ascii_fmt[self.data[i].format[0]]
elif isinstance(input, _TableBaseHDU):
hdr = input.header
_nfields = hdr['TFIELDS']
self._width = hdr['NAXIS1']
self._shape = hdr['NAXIS2']
# go through header keywords to pick out column definition keywords
dict = [{} for i in range(_nfields)] # definition dictionaries for each field
for _card in hdr.ascardlist():
_key = _tdef_re.match(_card.key)
try:
keyword = _key.group('label')
except:
continue # skip if there is no match
if (keyword in _keyNames):
col = eval(_key.group('num'))
if col <= _nfields and col > 0:
cname = _commonNames[_keyNames.index(keyword)]
dict[col-1][cname] = _card.value
# data reading will be delayed
for col in range(_nfields):
dict[col]['array'] = Delayed(input, col)
# now build the columns
tmp = [Column(**attrs) for attrs in dict]
self.data = tmp
else:
raise TypeError, "input to ColDefs must be a table HDU or a list of Columns"
def __getattr__(self, name):
"""Populate the attributes."""
cname = name[:-1]
if cname in _commonNames:
attr = [''] * len(self)
for i in range(len(self)):
val = getattr(self[i], cname)
if val != None:
attr[i] = val
elif name == '_arrays':
attr = [col.array for col in self.data]
elif name == '_recformats':
if self._tbtype == 'BinTableHDU':
attr = [_convert_format(fmt) for fmt in self.formats]
elif self._tbtype == 'TableHDU':
self._Formats = self.formats
if len(self) == 1:
dummy = []
else:
dummy = map(lambda x, y: x-y, self.starts[1:], [1]+self.starts[1:-1])
dummy.append(self._width-self.starts[-1]+1)
attr = map(lambda y: 'a'+`y`, dummy)
elif name == 'spans':
# make sure to consider the case that the starting column of
# a field may not be the column right after the last field
if self._tbtype == 'TableHDU':
last_end = 0
attr = [0] * len(self)
for i in range(len(self)):
(_format, _width) = _convert_ASCII_format(self.formats[i])
if self.starts[i] is '':
self.starts[i] = last_end + 1
_end = self.starts[i] + _width - 1
attr[i] = _end - last_end
last_end = _end
self._width = _end
else:
raise KeyError, 'Attribute %s not defined.' % name
self.__dict__[name] = attr
return self.__dict__[name]
"""
# make sure to consider the case that the starting column of
# a field may not be the column right after the last field
elif tbtype == 'TableHDU':
(_format, _width) = _convert_ASCII_format(self.formats[i])
if self.starts[i] is '':
self.starts[i] = last_end + 1
_end = self.starts[i] + _width - 1
self.spans[i] = _end - last_end
last_end = _end
self._Formats = self.formats
self._arrays[i] = input[i].array
"""
def __getitem__(self, key):
x = self.data[key]
if isinstance(key, (int, long)):
return x
else:
return ColDefs(x)
def __len__(self):
return len(self.data)
def __repr__(self):
return 'ColDefs'+ `tuple(self.data)`
def __coerce__(self, other):
pass # needed for __add__
def __add__(self, other, option='left'):
if isinstance(other, Column):
b = [other]
elif isinstance(other, ColDefs):
b = list(other.data)
else:
raise TypeError, 'Wrong type of input'
if option == 'left':
tmp = list(self.data) + b
else:
tmp = b + list(self.data)
return ColDefs(tmp)
def __radd__(self, other):
return self.__add__(other, 'right')
def __sub__(self, other):
if not isinstance(other, (list, tuple)):
other = [other]
_other = [_get_index(self.names, key) for key in other]
indx=range(len(self))
for x in _other:
indx.remove(x)
tmp = [self[i] for i in indx]
return ColDefs(tmp)
def _setup(self):
""" Initialize all attributes to be a list of null strings."""
for cname in _commonNames:
setattr(self, cname+'s', ['']*self._nfields)
setattr(self, '_arrays', [None]*self._nfields)
def add_col(self, column):
"""Append one Column to the column definition."""
return self+column
def del_col(self, col_name):
"""Delete (the definition of) one Column."""
indx = _get_index(self.names, col_name)
for cname in _commonNames:
attr = getattr(self, cname+'s')
del attr[indx]
del self._arrays[indx]
self._nfields -= 1
def change_attrib(self, col_name, attrib, new_value):
"""Change an attribute (in the commonName list) of a Column."""
indx = _get_index(self.names, col_name)
getattr(self, attrib+'s')[indx] = new_value
def change_name(self, col_name, new_name):
"""Change a Column's name."""
if new_name != col_name and new_name in self.names:
raise ValueError, 'New name %s already exists.' % new_name
else:
self.change_attrib(col_name, 'name', new_name)
def change_unit(self, col_name, new_unit):
"""Change a Column's unit."""
self.change_attrib(col_name, 'unit', new_unit)
def info(self, attrib='all'):
"""Get attribute(s) information of the column definition."""
"""The attrib can be one or more of the attributes listed in
_commonNames. The default is "all" which will print out
all attributes. It forgives plurals and blanks. If there are
two or more attribute names, they must be separated by comma(s).
"""
if attrib.strip().lower() in ['all', '']:
list = _commonNames
else:
list = attrib.split(',')
for i in range(len(list)):
list[i]=list[i].strip().lower()
if list[i][-1] == 's':
list[i]=list[i][:-1]
for att in list:
if att not in _commonNames:
print "'%s' is not an attribute of the column definitions."%att
continue
print "%s:" % att
print ' ', getattr(self, att+'s')
#def change_format(self, col_name, new_format):
#new_format = _convert_format(new_format)
#self.change_attrib(col_name, 'format', new_format)
def _get_tbdata(hdu):
""" Get the table data from input (an HDU object)."""
tmp = hdu.columns
# get the right shape for the data part of the random group,
# since binary table does not support ND yet
if isinstance(hdu, GroupsHDU):
tmp._recformats[-1] = `hdu._dimShape()[:-1]` + tmp._dat_format
if hdu._ffile.memmap:
_mmap = hdu._ffile._mm[hdu._datLoc:hdu._datLoc+hdu._datSpan]
_data = rec.RecArray(_mmap, formats=tmp._recformats, names=tmp.names, shape=tmp._shape)
else:
_data = rec.array(hdu._file, formats=tmp._recformats, names=tmp.names, shape=tmp._shape)
if isinstance(hdu._ffile, _File):
_data._byteorder = 'big'
# pass datLoc, for P format
_data._heapoffset = hdu._theap + hdu._datLoc
_data._file = hdu._file
_tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2']
_data._gap = hdu._theap - _tbsize
# comment out to avoid circular reference of _pcount
# pass the attributes
for attr in ['formats', 'names']:
setattr(_data, attr, getattr(tmp, attr))
for i in range(len(tmp)):
tmp._arrays[i] = _data.field(i)
return FITS_rec(_data)
def new_table (input, header=None, nrows=0, fill=0, tbtype='BinTableHDU'):
"""Create a new table from the input column definitions."""
"""
input: a list of Columns or a ColDefs object.
header: header to be used to populate the non-required keywords
nrows: number of rows in the new table
fill: if = 1, will fill all cells with zeros or blanks
if = 0, copy the data from input, undefined cells will still
be filled with zeros/blanks.
tbtype: table type to be created (BinTableHDU or TableHDU)
"""
# construct a table HDU
hdu = eval(tbtype)(header=header)
if isinstance(input, ColDefs):
if input._tbtype == tbtype:
tmp = hdu.columns = input
else:
raise ValueError, 'column definitions have a different table type'
elif isinstance(input, FITS_rec): # input is a FITS_rec
tmp = hdu.columns = input._coldefs
else: # input is a list of Columns
tmp = hdu.columns = ColDefs(input, tbtype)
# read the delayed data
for i in range(len(tmp)):
_arr = tmp._arrays[i]
if isinstance(_arr, Delayed):
tmp._arrays[i] = _arr.hdu.data._parent.field(_arr.field)
# use the largest column shape as the shape of the record
if nrows == 0:
for arr in tmp._arrays:
if arr is not None:
dim = arr._shape[0]
else:
dim = 0
if dim > nrows:
nrows = dim
if tbtype == 'TableHDU':
_formats = ''
_itemsize = 0
for i in range(len(tmp)):
_formats += 'a%d,' % tmp.spans[i]
_itemsize += tmp.spans[i]
hdu.data = FITS_rec(rec.array(' '*_itemsize*nrows, formats=_formats[:-1], names=tmp.names, shape=nrows))
else:
hdu.data = FITS_rec(rec.array(None, formats=tmp._recformats, names=tmp.names, shape=nrows))
hdu.data._coldefs = hdu.columns
# populate data to the new table
for i in range(len(tmp)):
if tmp._arrays[i] is None:
size = 0
else:
size = len(tmp._arrays[i])
n = min(size, nrows)
if fill:
n = 0
(_scale, _zero, bscale, bzero) = hdu.data._get_scale_factors(i)[3:]
if n > 0:
if isinstance(tmp._recformats[i], _FormatX):
if tmp._arrays[i][:n].shape[-1] == tmp._recformats[i]._nx:
_wrapx(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._nx)
else: # from a table parent data, just pass it
hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n]
elif isinstance(tmp._recformats[i], _FormatP):
hdu.data._convert[i] = _makep(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._dtype)
else:
if tbtype == 'TableHDU':
# string no need to convert,
if isinstance(tmp._arrays[i], chararray.CharArray):
hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n]
else:
hdu.data._convert[i] = num.zeros(nrows, type=tmp._arrays[i].type())
if _scale or _zero:
_arr = tmp._arrays[i].copy()
else:
_arr = tmp._arrays[i]
if _scale:
_arr *= bscale
if _zero:
_arr += bzero
hdu.data._convert[i][:n] = _arr
else:
hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n]
if n < nrows:
if tbtype == 'BinTableHDU':
if isinstance(hdu.data._parent.field(i), num.NumArray):
# make the scaled data = 0, not the stored data
hdu.data._parent.field(i)[n:] = -bzero/bscale
else:
hdu.data._parent.field(i)[n:] = ''
hdu.update()
return hdu
class FITS_rec(rec.RecArray):
"""FITS record array class. FITS record array is the data part of a
table HDU's data part. This is a layer over the RecArray, so we
can deal with scaled columns.
"""
def __init__(self, input):
"""Construct a FITS record array from a RecArray."""
# input should be a record array
self.__setstate__(input.__getstate__())
# _parent is the original (storage) array,
# _convert is the scaled (physical) array.
self._parent = input
self._convert = [None]*self._nfields
self.names = self._names
def copy(self):
r = rec.RecArray.copy(self)
r.__class__ = rec.RecArray
r._coldefs = self._coldefs
f = FITS_rec(r)
f._convert = copy.deepcopy(self._convert)
return f
def _clone(self, shape):
"""Overload this to make mask array indexing work properly."""
hdu = new_table(self._coldefs, nrows=shape[0])
return hdu.data
def __repr__(self):
tmp = rec.RecArray.__repr__(self)
loc = tmp.rfind('\nnames=')
tmp = tmp[:loc+7] + `self._coldefs.names` + ')'
return tmp
# synchronize the sliced FITS_rec and its ._parent
def __getitem__(self, key):
tmp = rec.RecArray.__getitem__(self, key)
if isinstance(key, slice):
out = tmp
out._parent = rec.RecArray.__getitem__(self._parent, key)
out._convert = [None]*self._nfields
for i in range(self._nfields):
# touch all fields to expand the original ._convert list
# so the sliced FITS_rec will view the same scaled columns as
# the original
dummy = self.field(i)
if self._convert[i] is not None:
out._convert[i] = ndarray.NDArray.__getitem__(self._convert[i], key)
del dummy
return out
# if not a slice, do this because Record has no __getstate__.
# also more efficient.
else:
return tmp
def _get_scale_factors(self, indx):
"""
Get the scaling flags and factors for one field.
indx is the index of the field.
"""
if self._coldefs._tbtype == 'BinTableHDU':
_str = 'a' in self._coldefs.formats[indx]
_bool = self._coldefs._recformats[indx][-2:] == _booltype
else:
_str = self._coldefs.formats[indx][0] == 'A'
_bool = 0 # there is no boolean in ASCII table
_number = not(_bool or _str)
bscale = self._coldefs.bscales[indx]
bzero = self._coldefs.bzeros[indx]
_scale = bscale not in ['', None, 1]
_zero = bzero not in ['', None, 0]
# ensure bscale/bzero are numbers
if not _scale:
bscale = 1
if not _zero:
bzero = 0
return (_str, _bool, _number, _scale, _zero, bscale, bzero)
def field(self, key):
"""A view of a Column's data as an array."""
indx = _get_index(self._coldefs.names, key)
if (self._convert[indx] is None):
# for X format
if isinstance(self._coldefs._recformats[indx], _FormatX):
_nx = self._coldefs._recformats[indx]._nx
dummy = num.zeros(self._parent.shape+(_nx,), type=num.Bool)
_unwrapx(self._parent.field(indx), dummy, _nx)
self._convert[indx] = dummy
return self._convert[indx]
(_str, _bool, _number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx)
# for P format
if isinstance(self._coldefs._recformats[indx], _FormatP):
dummy = _VLF([None]*len(self._parent))
dummy._dtype = self._coldefs._recformats[indx]._dtype
for i in range(len(self._parent)):
_offset = self._parent.field(indx)[i,1] + self._heapoffset
self._file.seek(_offset)
if self._coldefs._recformats[indx]._dtype is 'a':
dummy[i] = chararray.array(self._file, itemsize=self._parent.field(indx)[i,0], shape=1)
else:
dummy[i] = num.array(self._file, type=self._coldefs._recformats[indx]._dtype, shape=self._parent.field(indx)[i,0])
dummy[i]._byteorder = 'big'
# scale by TSCAL and TZERO
if _scale or _zero:
for i in range(len(self._parent)):
dummy[i][:] = dummy[i]*bscale+bzero
# Boolean (logical) column
if self._coldefs._recformats[indx]._dtype is _booltype:
for i in range(len(self._parent)):
dummy[i] = num.equal(dummy[i], ord('T'))
self._convert[indx] = dummy
return self._convert[indx]
if _str:
return self._parent.field(indx)
# ASCII table, convert strings to numbers
if self._coldefs._tbtype == 'TableHDU':
_dict = {'I':num.Int32, 'F':num.Float32, 'E':num.Float32, 'D':num.Float64}
_type = _dict[self._coldefs._Formats[indx][0]]
# if the string = TNULL, return ASCIITNULL
nullval = self._coldefs.nulls[indx].strip()
dummy = num.zeros(len(self._parent), type=_type)
dummy[:] = ASCIITNULL
self._convert[indx] = dummy
for i in range(len(self._parent)):
if self._parent.field(indx)[i].strip() != nullval:
dummy[i] = float(self._parent.field(indx)[i].replace('D', 'E'))
else:
dummy = self._parent.field(indx)
# further conversion for both ASCII and binary tables
if _number and (_scale or _zero):
# only do the scaling the first time and store it in _convert
self._convert[indx] = num.array(dummy, type=num.Float64)
if _scale:
num.multiply(self._convert[indx], bscale, self._convert[indx])
if _zero:
self._convert[indx] += bzero
elif _bool:
self._convert[indx] = num.equal(dummy, ord('T'))
else:
return dummy
return self._convert[indx]
def _scale_back(self):
"""Update the parent array, using the (latest) scaled array."""
_dict = {'A':'s', 'I':'d', 'F':'f', 'E':'E', 'D':'E'}
# calculate the starting point and width of each field for ASCII table
if self._coldefs._tbtype == 'TableHDU':
_loc = [1]
_width = []
for i in range(self._nfields):
_loc.append(_loc[-1]+self._parent.field(i).itemsize())
_width.append(_convert_ASCII_format(self._coldefs._Formats[i])[1])
self._heapsize = 0
for indx in range(self._nfields):
if (self._convert[indx] is not None):
if isinstance(self._coldefs._recformats[indx], _FormatX):
_wrapx(self._convert[indx], self._parent.field(indx), self._coldefs._recformats[indx]._nx)
continue
(_str, _bool, _number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx)
# add the location offset of the heap area for each
# variable length column
if isinstance(self._coldefs._recformats[indx], _FormatP):
desc = self._parent.field(indx)
desc[:] = 0 # reset
_npts = map(len, self._convert[indx])
desc[:len(_npts),0] = _npts
_dtype = num.getType(self._coldefs._recformats[indx]._dtype)
desc[1:,1] = num.add.accumulate(desc[:-1,0])*_dtype.bytes
desc[:,1][:] += self._heapsize
self._heapsize += desc[:,0].sum()*_dtype.bytes
# conversion for both ASCII and binary tables
if _number or _str:
if _number and (_scale or _zero):
dummy = self._convert[indx].copy()
if _zero:
dummy -= bzero
if _scale:
dummy /= bscale
elif self._coldefs._tbtype == 'TableHDU':
dummy = self._convert[indx]
else:
continue
# ASCII table, convert numbers to strings
if self._coldefs._tbtype == 'TableHDU':
_format = self._coldefs._Formats[indx].strip()
_lead = self._coldefs.starts[indx] - _loc[indx]
if _lead < 0:
raise ValueError, "column `%s` starting point overlaps to the previous column" % indx+1
_trail = _loc[indx+1] - _width[indx] - self._coldefs.starts[indx]
if _trail < 0:
raise ValueError, "column `%s` ending point overlaps to the next column" % indx+1
if 'A' in _format:
_pc = '%-'
else:
_pc = '%'
_fmt = ' '*_lead + _pc + _format[1:] + _dict[_format[0]] + ' '*_trail
# not using numarray.strings's num2char because the
# result is not allowed to expand (as C/Python does).
for i in range(len(dummy)):
x = _fmt % dummy[i]
if len(x) > (_loc[indx+1]-_loc[indx]):
raise ValueError, "number `%s` does not fit into the output's itemsize of %s" % (x, _width[indx])
else:
self._parent.field(indx)[i] = x
if 'D' in _format:
self._parent.field(indx).sub('E', 'D')
# binary table
else:
if isinstance(self._parent.field(indx)._type, num.IntegralType):
dummy = num.around(dummy)
self._parent.field(indx)[:] = dummy
del dummy
# ASCII table does not have Boolean type
elif _bool:
self._parent.field(indx)[:] = num.choose(self._convert[indx], (ord('F'),ord('T')))
class GroupData(FITS_rec):
"""Random groups data object.
Allows structured access to FITS Group data in a manner analogous to tables
"""
def __init__(self, input=None, bitpix=None, pardata=None, parnames=[],
bscale=None, bzero=None, parbscales=None, parbzeros=None):
"""input: input data, either the group data itself (a numarray) or
a record array (FITS_rec) which will contain both group
parameter info and the data. The rest of the arguments are
used only for the first case.
bitpix: data type as expressed in FITS BITPIX value
(8, 16, 32, 64, -32, or -64)
pardata: parameter data, as a list of (numeric) arrays.
parnames: list of parameter names.
bscale: BSCALE of the data
bzero: BZERO of the data
parbscales: list of bscales for the parameters
parbzeros: list of bzeros for the parameters
"""
if isinstance(input, num.NumArray):
_formats = ''
_cols = []
if pardata is None:
npars = 0
else:
npars = len(pardata)
if parbscales is None:
parbscales = [None]*npars
if parbzeros is None:
parbzeros = [None]*npars
if bitpix is None:
bitpix = _ImageBaseHDU.ImgCode[input.type()]
fits_fmt = GroupsHDU._dict[bitpix] # -32 -> 'E'
_fmt = _fits2rec[fits_fmt] # 'E' -> 'f4'
_formats = (_fmt+',') * npars
data_fmt = '%s%s' % (`input.shape[1:]`, _fmt)
_formats += data_fmt
gcount = input.shape[0]
for i in range(npars):
_cols.append(Column(name='c'+`i+1`, format = fits_fmt, bscale = parbscales[i], bzero = parbzeros[i]))
_cols.append(Column(name='data', format = fits_fmt, bscale = bscale, bzero = bzero))
self._coldefs = ColDefs(_cols)
self.parnames = [i.lower() for i in parnames]
tmp = FITS_rec(rec.array(None, formats=_formats, shape=gcount, names= self._coldefs.names))
self.__setstate__(tmp.__getstate__())
for i in range(npars):
(_scale, _zero) = self._get_scale_factors(i)[3:5]
if _scale or _zero:
self._convert[i] = pardata[i]
else:
self._parent.field(i)[:] = pardata[i]
(_scale, _zero) = self._get_scale_factors(npars)[3:5]
if _scale or _zero:
self._convert[npars] = input
else:
self._parent.field(npars)[:] = input
else:
self.__setstate__(input.__getstate__())
def __getattr__(self, attr):
if attr == 'data':
self.__dict__[attr] = self.field('data')
elif attr == '_unique':
_unique = {}
for i in range(len(self.parnames)):
_name = self.parnames[i]
if _name in _unique:
_unique[_name].append(i)
else:
_unique[_name] = [i]
self.__dict__[attr] = _unique
try:
return self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
def par(self, parName):
"""Get the group parameter values."""
if isinstance(parName, (int, long)):
result = self.field(parName)
else:
indx = self._unique[parName.lower()]
if len(indx) == 1:
result = self.field(indx[0])
# if more than one group parameter have the same name
else:
result = self.field(indx[0]).astype('f8')
for i in indx[1:]:
result += self.field(i)
return result
def setpar(self, parName, value):
"""Set the group parameter values."""
if isinstance(parName, (int, long)):
self.field(parName)[:] = value
else:
indx = self._unique[parName]
if len(indx) == 1:
self.field(indx[0])[:] = value
# if more than one group parameter have the same name, the
# value must be a list (or tuple) containing arrays
else:
if isinstance(value, (list, tuple)) and len(indx) == len(value):
for i in range(len(indx)):
self.field(indx[i])[:] = value[i]
else:
raise ValueError, "parameter value must be a sequence with %d arrays/numbers." % len(indx)
def _getitem(self, offset):
row = (offset - self._byteoffset) / self._strides[0]
return _Group(self, row)
class _Group(rec.Record):
"""One group of the random group data."""
def __init__(self, input, row=0):
rec.Record.__init__(self, input, row)
def par(self, fieldName):
"""Get the group parameter value."""
return self.array.par(fieldName)[self.row]
def setpar(self, fieldName, value):
"""Set the group parameter value."""
self.array[self.row:self.row+1].setpar(fieldName, value)
class _TableBaseHDU(_ExtensionHDU):
"""FITS table extension base HDU class."""
def __init__(self, data=None, header=None, name=None):
"""
header: header to be used
data: data to be used
name: name to be populated in EXTNAME keyword
"""
if header is not None:
if not isinstance(header, Header):
raise ValueError, "header must be a Header object"
if data is DELAYED:
# this should never happen
if header is None:
raise ValueError, "No header to setup HDU."
# if the file is read the first time, no need to copy, and keep it unchanged
else:
self.header = header
else:
# construct a list of cards of minimal header
_list = CardList([
Card('XTENSION', '', ''),
Card('BITPIX', 8, 'array data type'),
Card('NAXIS', 2, 'number of array dimensions'),
Card('NAXIS1', 0, 'length of dimension 1'),
Card('NAXIS2', 0, 'length of dimension 2'),
Card('PCOUNT', 0, 'number of group parameters'),
Card('GCOUNT', 1, 'number of groups'),
Card('TFIELDS', 0, 'number of table fields')
])
if header is not None:
# Make a "copy" (not just a view) of the input header, since it
# may get modified. the data is still a "view" (for now)
hcopy = header.copy()
hcopy._strip()
_list.extend(hcopy.ascardlist())
self.header = Header(_list)
if (data is not DELAYED):
if isinstance(data, rec.RecArray):
self.header['NAXIS1'] = data._itemsize
self.header['NAXIS2'] = data._shape[0]
self.header['TFIELDS'] = data._nfields
self.data = data
self.columns = data._coldefs
self.update()
elif data is None:
pass
else:
raise TypeError, "table data has incorrect type"
# set extension name
if not name and self.header.has_key('EXTNAME'):
name = self.header['EXTNAME']
self.name = name
def __getattr__(self, attr):
"""Get the 'data' or 'columns' attribute."""
if attr == 'data':
size = self.size()
if size:
self._file.seek(self._datLoc)
data = _get_tbdata(self)
data._coldefs = self.columns
else:
data = None
self.__dict__[attr] = data
elif attr == 'columns':
class_name = str(self.__class__)
class_name = class_name[class_name.rfind('.')+1:]
self.__dict__[attr] = ColDefs(self, tbtype=class_name)
elif attr == '_theap':
self.__dict__[attr] = self.header.get('THEAP', self.header['NAXIS1']*self.header['NAXIS2'])
elif attr == '_pcount':
self.__dict__[attr] = self.header.get('PCOUNT', 0)
try:
return self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
def _summary(self):
"""Summarize the HDU: name, dimensions, and formats."""
class_name = str(self.__class__)
type = class_name[class_name.rfind('.')+1:]
# if data is touched, use data info.
if 'data' in dir(self):
if self.data is None:
_shape, _format = (), ''
_nrows = 0
else:
_nrows = len(self.data)
_ncols = len(self.columns.formats)
_format = self.columns.formats
# if data is not touched yet, use header info.
else:
_shape = ()
_nrows = self.header['NAXIS2']
_ncols = self.header['TFIELDS']
_format = '['
for j in range(_ncols):
_format += self.header['TFORM'+`j+1`] + ', '
_format = _format[:-2] + ']'
_dims = "%dR x %dC" % (_nrows, _ncols)
return "%-10s %-11s %5d %-12s %s" % \
(self.name, type, len(self.header.ascard), _dims, _format)
def get_coldefs(self):
"""Returns the table's column definitions."""
return self.columns
def update(self):
""" Update header keywords to reflect recent changes of columns."""
_update = self.header.update
_append = self.header.ascard.append
_cols = self.columns
_update('naxis1', self.data._itemsize, after='naxis')
_update('naxis2', self.data._shape[0], after='naxis1')
_update('tfields', len(_cols), after='gcount')
# Wipe out the old table definition keywords. Mark them first,
# then delete from the end so as not to confuse the indexing.
_list = []
for i in range(len(self.header.ascard)-1,-1,-1):
_card = self.header.ascard[i]
_key = _tdef_re.match(_card.key)
try: keyword = _key.group('label')
except: continue # skip if there is no match
if (keyword in _keyNames):
_list.append(i)
for i in _list:
del self.header.ascard[i]
del _list
# populate the new table definition keywords
for i in range(len(_cols)):
for cname in _commonNames:
val = getattr(_cols, cname+'s')[i]
if val != '':
keyword = _keyNames[_commonNames.index(cname)]+`i+1`
if cname == 'format' and isinstance(self, BinTableHDU):
val = _cols._recformats[i]
if isinstance(val, _FormatX):
val = `val._nx` + 'X'
elif isinstance(val, _FormatP):
VLdata = self.data.field(i)
VLdata._max = max(map(len, VLdata))
val = 'P' + _convert_format(val._dtype, reverse=1) + '(%d)' % VLdata._max
else:
val = _convert_format(val, reverse=1)
#_update(keyword, val)
_append(Card(keyword, val))
def copy(self):
"""Make a copy of the table HDU, both header and data are copied."""
# touch the data, so it's defined (in the case of reading from a
# FITS file)
self.data
return new_table(self.columns, header=self.header, tbtype=self.columns._tbtype)
def _verify(self, option='warn'):
"""_TableBaseHDU verify method."""
_err = _ExtensionHDU._verify(self, option=option)
self.req_cards('NAXIS', None, 'val == 2', 2, option, _err)
self.req_cards('BITPIX', None, 'val == 8', 8, option, _err)
self.req_cards('TFIELDS', '== 7', _isInt+" and val >= 0 and val <= 999", 0, option, _err)
tfields = self.header['TFIELDS']
for i in range(tfields):
self.req_cards('TFORM'+`i+1`, None, None, None, option, _err)
return _err
class TableHDU(_TableBaseHDU):
"""FITS ASCII table extension HDU class."""
__format_RE = re.compile(
r'(?P<code>[ADEFI])(?P<width>\d+)(?:\.(?P<prec>\d+))?')
def __init__(self, data=None, header=None, name=None):
"""data: data of the table
header: header to be used for the HDU
name: the EXTNAME value
"""
_TableBaseHDU.__init__(self, data=data, header=header, name=name)
self._xtn = 'TABLE'
if self.header[0].rstrip() != self._xtn:
self.header[0] = self._xtn
self.header.ascard[0].comment = 'ASCII table extension'
'''
def format(self):
strfmt, strlen = '', 0
for j in range(self.header['TFIELDS']):
bcol = self.header['TBCOL'+`j+1`]
valu = self.header['TFORM'+`j+1`]
fmt = self.__format_RE.match(valu)
if fmt:
code, width, prec = fmt.group('code', 'width', 'prec')
else:
raise ValueError, valu
size = eval(width)+1
strfmt = strfmt + 's'+str(size) + ','
strlen = strlen + size
else:
strfmt = '>' + strfmt[:-1]
return strfmt
'''
def _verify(self, option='warn'):
"""TableHDU verify method."""
_err = _TableBaseHDU._verify(self, option=option)
self.req_cards('PCOUNT', None, 'val == 0', 0, option, _err)
tfields = self.header['TFIELDS']
for i in range(tfields):
self.req_cards('TBCOL'+`i+1`, None, _isInt, None, option, _err)
return _err
class BinTableHDU(_TableBaseHDU):
"""Binary table HDU class."""
def __init__(self, data=None, header=None, name=None):
"""data: data of the table
header: header to be used for the HDU
name: the EXTNAME value
"""
_TableBaseHDU.__init__(self, data=data, header=header, name=name)
self._xtn = 'BINTABLE'
hdr = self.header
if hdr[0] != self._xtn:
hdr[0] = self._xtn
hdr.ascard[0].comment = 'binary table extension'
class StreamingHDU:
"""
A class that provides the capability to stream data to a FITS file
instead of requiring data to all be written at once.
The following psudo code illustrates its use:
header = pyfits.Header()
for all the cards you need in the header:
header.update(key,value,comment)
shdu = pyfits.StreamingHDU('filename.fits',header)
for each piece of data:
shdu.write(data)
shdu.close()
"""
def __init__(self, name, header):
"""
Construct a StreamingHDU object given a file name and a header.
:Parameters:
name : string
The name of the file to which the header and data will be
streamed.
header : Header
The header object associated with the data to be written
to the file.
:Returns:
None
Notes
-----
The file will be opened and the header appended to the end of
the file. If the file does not already exist, it will be created
and if the header represents a Primary header, it will be written
to the beginning of the file. If the file does not exist and the
provided header is not a Primary header, a default Primary HDU will
be inserted at the beginning of the file and the provided header
will be added as the first extension. If the file does already
exist, but the provided header represents a Primary header, the
header will be modified to an image extension header and appended
to the end of the file.
"""
self.header = header.copy()
#
# Check if the file already exists. If it does not, check to see
# if we were provided with a Primary Header. If not we will need
# to prepend a default PrimaryHDU to the file before writing the
# given header.
#
if not os.path.exists(name):
if not self.header.has_key('SIMPLE'):
hdulist = HDUList([PrimaryHDU()])
hdulist.writeto(name, 'exception')
else:
if self.header.has_key('SIMPLE') and os.path.getsize(name) > 0:
#
# This will not be the first extension in the file so we
# must change the Primary header provided into an image
# extension header.
#
self.header.update('XTENSION','IMAGE','Image extension',
after='SIMPLE')
del self.header['SIMPLE']
if not self.header.has_key('PCOUNT'):
dim = self.header['NAXIS']
if dim == 0:
dim = ''
else:
dim = str(dim)
self.header.update('PCOUNT', 0, 'number of parameters',
after='NAXIS'+dim)
if not self.header.has_key('GCOUNT'):
self.header.update('GCOUNT', 1, 'number of groups',
after='PCOUNT')
self._ffo = _File(name, 'append')
self._ffo.getfile().seek(0,2)
self._hdrLoc = self._ffo.writeHDUheader(self)
self._datLoc = self._ffo.getfile().tell()
self._size = self.size()
if self._size != 0:
self.writeComplete = 0
else:
self.writeComplete = 1
def write(self,data):
"""
Write the given data to the stream.
:Parameters:
data : NumArray
Data to stream to the file.
:Returns:
writeComplete : integer
Flag that when true indicates that all of the required data
has been written to the stream.
Notes
-----
Only the amount of data specified in the header provided to the
class constructor may be written to the stream. If the provided
data would cause the stream to overflow, an IOError exception is
raised and the data is not written. Once sufficient data has been
written to the stream to satisfy the amount specified in the header,
the stream is padded to fill a complete FITS block and no more data
will be accepted. An attempt to write more data after the stream
has been filled will raise an IOError exception. If the dtype of
the input data does not match what is expected by the header, a
TypeError exception is raised.
"""
if self.writeComplete:
raise IOError, "The stream is closed and can no longer be written"
curDataSize = self._ffo.getfile().tell() - self._datLoc
if curDataSize + data.itemsize()*data._size > self._size:
raise IOError, "Supplied data will overflow the stream"
if _ImageBaseHDU.NumCode[self.header['BITPIX']] != data.type():
raise TypeError, "Supplied data is not the correct type."
if data._byteorder != 'big':
#
# byteswap little endian arrays before writing
#
output = data.byteswapped()
else:
output = data
output.tofile(self._ffo.getfile())
if self._ffo.getfile().tell() - self._datLoc == self._size:
#
# the stream is full so pad the data to the next FITS block
#
self._ffo.getfile().write(_padLength(self._size)*'\0')
self.writeComplete = 1
self._ffo.getfile().flush()
return self.writeComplete
def size(self):
"""
Return the size (in bytes) of the data portion of the HDU.
:Parameters:
None
:Returns:
size : integer
The number of bytes of data required to fill the stream
per the header provided in the constructor.
"""
size = 0
naxis = self.header.get('NAXIS', 0)
if naxis > 0:
simple = self.header.get('SIMPLE','F')
randomGroups = self.header.get('GROUPS','F')
if simple == 'T' and randomGroups == 'T':
groups = 1
else:
groups = 0
size = 1
for j in range(groups,naxis):
size = size * self.header['NAXIS'+`j+1`]
bitpix = self.header['BITPIX']
gcount = self.header.get('GCOUNT', 1)
pcount = self.header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) / 8
return size
def close(self):
"""
Close the 'physical' FITS file.
:Parameters:
None
:Returns:
None
"""
self._ffo.close()
class ErrorURLopener(urllib.FancyURLopener):
"""A class to use with urlretrieve to allow IOError exceptions to be
raised when a file specified by a URL cannot be accessed"""
def http_error_default(self, url, fp, errcode, errmsg, headers):
raise IOError, (errcode, errmsg, url)
urllib._urlopener = ErrorURLopener() # Assign the locally subclassed opener
# class to the urllibrary
urllib._urlopener.tempcache = {} # Initialize tempcache with an empty
# dictionary to enable file cacheing
class _File:
"""A file I/O class"""
def __init__(self, name, mode='copyonwrite', memmap=0):
if mode not in _python_mode.keys():
raise "Mode '%s' not recognized" % mode
if mode != 'append' and not os.path.exists(name):
self.name, fileheader = urllib.urlretrieve(name)
else:
self.name = name
self.mode = mode
self.memmap = memmap
if memmap and mode not in ['readonly', 'copyonwrite', 'update']:
raise "Memory mapping is not implemented for mode `%s`." % mode
else:
if os.path.splitext(self.name)[1] == '.gz':
# Handle gzip files
if mode in ['update', 'append']:
raise "Writing to gzipped fits files is not supported"
zfile = gzip.GzipFile(self.name)
self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits')
self.name = self.tfile.name
self.__file = self.tfile.file
self.__file.write(zfile.read())
zfile.close()
elif os.path.splitext(self.name)[1] == '.zip':
# Handle zip files
if mode in ['update', 'append']:
raise "Writing to zipped fits files is not supported"
zfile = zipfile.ZipFile(self.name)
namelist = zfile.namelist()
if len(namelist) != 1:
raise "Zip files with multiple members are not supported."
self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits')
self.name = self.tfile.name
self.__file = self.tfile.file
self.__file.write(zfile.read(namelist[0]))
zfile.close()
else:
self.__file = __builtin__.open(self.name, _python_mode[mode])
# For 'ab+' mode, the pointer is at the end after the open in
# Linux, but is at the beginning in Solaris.
self.__file.seek(0, 2)
self._size = self.__file.tell()
self.__file.seek(0)
def __getattr__(self, attr):
"""Get the _mm attribute."""
if attr == '_mm':
self.__dict__[attr] = Memmap.open(self.name, mode=_memmap_mode[self.mode])
try:
return self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
def getfile(self):
return self.__file
def _readheader(self, cardList, keyList, blocks):
"""Read blocks of header, and put each card into a list of cards.
Will deal with CONTINUE cards in a later stage as CONTINUE cards
may span across blocks.
"""
if len(block) != _blockLen:
raise IOError, 'Block length is not %d: %d' % (_blockLen, len(block))
elif (blocks[:8] not in ['SIMPLE ', 'XTENSION']):
raise IOError, 'Block does not begin with SIMPLE or XTENSION'
for i in range(0, len(_blockLen), Card.length):
_card = Card('').fromstring(block[i:i+Card.length])
_key = _card.key
cardList.append(_card)
keyList.append(_key)
if _key == 'END':
break
def _readHDU(self):
"""Read the skeleton structure of the HDU."""
end_RE = re.compile('END'+' '*77)
_hdrLoc = self.__file.tell()
# Read the first header block.
block = self.__file.read(_blockLen)
if block == '':
raise EOFError
hdu = _TempHDU()
hdu._raw = ''
# continue reading header blocks until END card is reached
while 1:
# find the END card
mo = end_RE.search(block)
if mo is None:
hdu._raw += block
block = self.__file.read(_blockLen)
if block == '':
break
else:
break
hdu._raw += block
_size, hdu.name = hdu._getsize(hdu._raw)
# get extname and extver
if hdu.name == '':
hdu.name, hdu._extver = hdu._getname()
elif hdu.name == 'PRIMARY':
hdu._extver = 1
hdu._file = self.__file
hdu._hdrLoc = _hdrLoc # beginning of the header area
hdu._datLoc = self.__file.tell() # beginning of the data area
# data area size, including padding
hdu._datSpan = _size + _padLength(_size)
hdu._new = 0
self.__file.seek(hdu._datSpan, 1)
if self.__file.tell() > self._size:
print 'Warning: File size is smaller than specified data size. File may have been truncated.'
hdu._ffile = self
return hdu
def writeHDU(self, hdu):
"""Write *one* FITS HDU. Must seek to the correct location before
calling this method.
"""
if isinstance(hdu, _ImageBaseHDU):
hdu.update_header()
return (self.writeHDUheader(hdu),) + self.writeHDUdata(hdu)
def writeHDUheader(self, hdu):
"""Write FITS HDU header part."""
blocks = repr(hdu.header.ascard) + _pad('END')
blocks = blocks + _padLength(len(blocks))*' '
if len(blocks)%_blockLen != 0:
raise IOError
self.__file.flush()
loc = self.__file.tell()
self.__file.write(blocks)
# flush, to make sure the content is written
self.__file.flush()
return loc
def writeHDUdata(self, hdu):
"""Write FITS HDU data part."""
self.__file.flush()
loc = self.__file.tell()
_size = 0
if hdu.data is not None:
# if image, need to deal with byte order
if isinstance(hdu, _ImageBaseHDU):
if hdu.data._byteorder != 'big':
output = hdu.data.byteswapped()
else:
output = hdu.data
# Binary table byteswap
elif isinstance(hdu, BinTableHDU):
for i in range(hdu.data._nfields):
coldata = hdu.data.field(i)
coldata2 = hdu.data._parent.field(i)
if not isinstance(coldata, chararray.CharArray):
# only swap unswapped
# deal with var length table
if isinstance(coldata, _VLF):
for i in coldata:
if not isinstance(i, chararray.CharArray):
if i._type.bytes > 1:
if i._byteorder != 'big':
i.byteswap()
i._byteorder = 'big'
else:
if coldata._type.bytes > 1:
if coldata._byteorder != 'big':
coldata.byteswap()
coldata._byteorder = 'big'
if coldata2._type.bytes > 1:
# do the _parent too, otherwise the _parent
# of a scaled column may have wrong byteorder
if coldata2._byteorder != 'big':
coldata2.byteswap()
coldata2._byteorder = 'big'
# In case the FITS_rec was created in a LittleEndian machine
hdu.data._byteorder = 'big'
hdu.data._parent._byteorder = 'big'
output = hdu.data
else:
output = hdu.data
output.tofile(self.__file)
_size = output.nelements() * output._itemsize
# write out the heap of variable length array columns
# this has to be done after the "regular" data is written (above)
_where = self.__file.tell()
if isinstance(hdu, BinTableHDU):
self.__file.write(hdu.data._gap*'\0')
for i in range(hdu.data._nfields):
if isinstance(hdu.data._coldefs._recformats[i], _FormatP):
for j in range(len(hdu.data.field(i))):
coldata = hdu.data.field(i)[j]
if len(coldata) > 0:
coldata.tofile(self.__file)
_shift = self.__file.tell() - _where
hdu.data._heapsize = _shift - hdu.data._gap
_size = _size + _shift
# pad the FITS data block
if _size > 0:
self.__file.write(_padLength(_size)*'\0')
# flush, to make sure the content is written
self.__file.flush()
# return both the location and the size of the data area
return loc, _size+_padLength(_size)
def close(self):
"""Close the 'physical' FITS file."""
self.__file.close()
class HDUList(list, _Verify):
"""HDU list class. This is the top-level FITS object. When a FITS
file is opened, a HDUList object is returned.
"""
def __init__(self, hdus=[], file=None):
"""Construct a HDUList object.
hdus: Input, can be a list of HDU's or a single HDU. Default = None,
i.e. an empty HDUList.
file: The opened physical file associated with the HDUList.
Default = None.
"""
self.__file = file
if hdus is None:
hdus = []
# can take one HDU, as well as a list of HDU's as input
if isinstance(hdus, _ValidHDU):
hdus = [hdus]
elif not isinstance(hdus, (HDUList, list)):
raise "Invalid input for HDUList."
for hdu in hdus:
if not isinstance(hdu, _AllHDU):
raise "Element %d in the HDUList input is not an HDU." % hdus.index(hdu)
list.__init__(self, hdus)
def __iter__(self):
return [self[i] for i in range(len(self))].__iter__()
def __getitem__(self, key):
"""Get an HDU from the HDUList, indexed by number or name."""
key = self.index_of(key)
_item = super(HDUList, self).__getitem__(key)
if isinstance(_item, _TempHDU):
super(HDUList, self).__setitem__(key, _item.setupHDU())
return super(HDUList, self).__getitem__(key)
def __getslice__(self, start, end):
_hdus = super(HDUList, self).__getslice__(start,end)
result = HDUList(_hdus)
return result
def __setitem__(self, key, hdu):
"""Set an HDU to the HDUList, indexed by number or name."""
_key = self.index_of(key)
if isinstance(hdu, (slice, list)):
if isinstance(_key, int):
raise ValueError, "An element in the HDUList must be an HDU."
for item in hdu:
if not isinstance(item, _AllHDU):
raise ValueError, "%s is not an HDU." % item
else:
if not isinstance(hdu, _AllHDU):
raise ValueError, "%s is not an HDU." % hdu
try:
super(HDUList, self).__setitem__(_key, hdu)
except IndexError:
raise IndexError, 'Extension %s is out of bound or not found.' % key
self._resize = 1
def __delitem__(self, key):
"""Delete an HDU from the HDUList, indexed by number or name."""
key = self.index_of(key)
super(HDUList, self).__delitem__(key)
self._resize = 1
def __delslice__(self, i, j):
"""Delete a slice of HDUs from the HDUList, indexed by number only."""
super(HDUList, self).__delslice__(i, j)
self._resize = 1
def _verify (self, option='warn'):
_text = ''
_err = _ErrList([], unit='HDU')
# the first (0th) element must be a primary HDU
if len(self) > 0 and (not isinstance(self[0], PrimaryHDU)):
err_text = "HDUList's 0th element is not a primary HDU."
fix_text = 'Fixed by inserting one as 0th HDU.'
fix = "self.insert(0, PrimaryHDU())"
_text = self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)
_err.append(_text)
# each element calls their own verify
for i in range(len(self)):
if i > 0 and (not isinstance(self[i], _ExtensionHDU)):
err_text = "HDUList's element %s is not an extension HDU." % `i`
_text = self.run_option(option, err_text=err_text, fixable=0)
_err.append(_text)
else:
_result = self[i]._verify(option)
if _result:
_err.append(_result)
return _err
def append(self, hdu):
"""Append a new HDU to the HDUList."""
if isinstance(hdu, _AllHDU):
super(HDUList, self).append(hdu)
hdu._new = 1
self._resize = 1
else:
raise "HDUList can only append an HDU"
# make sure the EXTEND keyword is in primary HDU if there is extension
if len(self) > 1:
self.update_extend()
def index_of(self, key):
"""Get the index of an HDU from the HDUList. The key can be an
integer, a string, or a tuple of (string, integer).
"""
if isinstance(key, (int, slice)):
return key
elif isinstance(key, tuple):
_key = key[0]
_ver = key[1]
else:
_key = key
_ver = None
if not isinstance(_key, str):
raise KeyError, key
_key = (_key.strip()).upper()
nfound = 0
for j in range(len(self)):
_name = self[j].name
if isinstance(_name, str):
_name = _name.strip().upper()
if _name == _key:
# if only specify extname, can only have one extension with
# that name
if _ver == None:
found = j
nfound += 1
else:
# if the keyword EXTVER does not exist, default it to 1
_extver = self[j]._extver
if _ver == _extver:
found = j
nfound += 1
if (nfound == 0):
raise KeyError, 'extension %s not found' % `key`
elif (nfound > 1):
raise KeyError, 'there are %d extensions of %s' % (nfound, `key`)
else:
return found
def readall(self):
"""Read data of all HDU's into memory."""
for i in range(len(self)):
if self[i].data is not None:
continue
def update_tbhdu(self):
"""Update all table HDU's for scaled fields."""
for hdu in self:
if 'data' in dir(hdu):
if isinstance(hdu, (GroupsHDU, _TableBaseHDU)) and hdu.data is not None:
hdu.data._scale_back()
if isinstance(hdu, _TableBaseHDU) and hdu.data is not None:
# check TFIELDS and NAXIS2
hdu.header['TFIELDS'] = hdu.data._nfields
hdu.header['NAXIS2'] = hdu.data.shape[0]
# calculate PCOUNT, for variable length tables
_tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2']
_heapstart = hdu.header.get('THEAP', _tbsize)
hdu.data._gap = _heapstart - _tbsize
_pcount = hdu.data._heapsize + hdu.data._gap
if _pcount > 0:
hdu.header['PCOUNT'] = _pcount
# update TFORM for variable length columns
for i in range(hdu.data._nfields):
if isinstance(hdu.data._coldefs.formats[i], _FormatP):
key = hdu.header['TFORM'+`i+1`]
hdu.header['TFORM'+`i+1`] = key[:key.find('(')+1] + `hdu.data.field(i)._max` + ')'
def flush(self, output_verify='exception', verbose=0):
"""Force a write of the HDUList back to the file (for append and
update modes only).
output_verify: output verification option, default = 'exception'.
verbose: print out verbose messages? default = 0.
"""
# Get the name of the current thread and determine if this is a single treaded application
threadName = threading.currentThread()
singleThread = (threading.activeCount() == 1) and (threadName.getName() == 'MainThread')
if singleThread:
# Define new signal interput handler
keyboardInterruptSent = False
def New_SIGINT(*args):
print "KeyboardInterrupt ignored until flush is complete!"
keyboardInterruptSent = True
# Install new handler
signal.signal(signal.SIGINT,New_SIGINT)
if self.__file.mode not in ('append', 'update'):
print "flush for '%s' mode is not supported." % self.__file.mode
return
self.update_tbhdu()
self.verify(option=output_verify)
if self.__file.mode == 'append':
for hdu in self:
if (verbose):
try: _extver = `hdu.header['extver']`
except: _extver = ''
# only append HDU's which are "new"
if hdu._new:
self.__file.writeHDU(hdu)
if (verbose):
print "append HDU", hdu.name, _extver
hdu._new = 0
elif self.__file.mode == 'update':
if not self._resize:
# determine if any of the HDU is resized
for hdu in self:
# Header:
# Add 1 to .ascard to include the END card
_nch80 = reduce(operator.add, map(Card._ncards, hdu.header.ascard))
_bytes = (_nch80+1) * Card.length
_bytes = _bytes + _padLength(_bytes)
if _bytes != (hdu._datLoc-hdu._hdrLoc):
self._resize = 1
if verbose:
print "One or more header is resized."
break
# Data:
if 'data' not in dir(hdu):
continue
if hdu.data is None:
continue
_bytes = hdu.data._itemsize*hdu.data.nelements()
_bytes = _bytes + _padLength(_bytes)
if _bytes != hdu._datSpan:
self._resize = 1
if verbose:
print "One or more data area is resized."
break
# if the HDUList is resized, need to write it to a tmp file,
# delete the original file, and rename the tmp to the original file
if self._resize:
oldName = self.__file.name
oldMemmap = self.__file.memmap
_name = _tmpName(oldName)
_hduList = open(_name, mode="append")
if (verbose): print "open a temp file", _name
for hdu in self:
(hdu._hdrLoc, hdu._datLoc, hdu._datSpan) = _hduList.__file.writeHDU(hdu)
_hduList.__file.close()
self.__file.close()
os.remove(self.__file.name)
if (verbose): print "delete the original file", oldName
# reopen the renamed new file with "update" mode
os.rename(_name, oldName)
ffo = _File(oldName, mode="update", memmap=oldMemmap)
self.__file = ffo
if (verbose): print "reopen the newly renamed file", oldName
# reset the resize attributes after updating
self._resize = 0
for hdu in self:
hdu.header._mod = 0
hdu.header.ascard._mod = 0
hdu._new = 0
hdu._file = ffo.getfile()
# if not resized, update in place
else:
for hdu in self:
if (verbose):
try: _extver = `hdu.header['extver']`
except: _extver = ''
if hdu.header._mod or hdu.header.ascard._mod:
hdu._file.seek(hdu._hdrLoc)
self.__file.writeHDUheader(hdu)
if (verbose):
print "update header in place: Name =", hdu.name, _extver
if 'data' in dir(hdu):
if hdu.data is not None:
hdu._file.seek(hdu._datLoc)
self.__file.writeHDUdata(hdu)
if (verbose):
print "update data in place: Name =", hdu.name, _extver
# reset the modification attributes after updating
for hdu in self:
hdu.header._mod = 0
hdu.header.ascard._mod = 0
if singleThread:
if keyboardInterruptSent:
raise KeyboardInterrupt
signal.signal(signal.SIGINT,signal.getsignal(signal.SIGINT))
def update_extend(self):
"""Make sure if the primary header needs the keyword EXTEND or if
it has the proper value.
"""
hdr = self[0].header
if hdr.has_key('extend'):
if (hdr['extend'] == False):
hdr['extend'] = True
else:
if hdr['naxis'] == 0:
hdr.update('extend', True, after='naxis')
else:
n = hdr['naxis']
hdr.update('extend', True, after='naxis'+`n`)
def writeto(self, name, output_verify='exception', clobber=False):
"""Write the HDUList to a new file.
name: output FITS file name to be written to.
output_verify: output verification option, default = 'exception'.
clobber: Overwrite the output file if exists, default = False.
"""
if (len(self) == 0):
print "There is nothing to write."
return
self.update_tbhdu()
if output_verify == 'warn':
output_verify = 'exception'
self.verify(option=output_verify)
# check if the output file already exists
if os.path.exists(name):
if clobber:
print "Overwrite existing file '%s'." % name
os.remove(name)
else:
raise IOError, "File '%s' already exist." % name
# make sure the EXTEND keyword is there if there is extension
if len(self) > 1:
self.update_extend()
hduList = open(name, mode="append")
for hdu in self:
hduList.__file.writeHDU(hdu)
hduList.close(output_verify=output_verify)
def close(self, output_verify='exception', verbose=0):
"""Close the associated FITS file and memmap object, if any.
output_verify: output verification option, default = 'exception'.
verbose: print out verbose messages? default = 0.
This simply calls the close method of the _File class. It has this
two-tier calls because _File has ts own private attribute __file.
"""
if self.__file != None:
if self.__file.memmap == 1:
self.mmobject = self.__file._mm
if self.__file.mode in ['append', 'update']:
self.flush(output_verify=output_verify, verbose=verbose)
self.__file.close()
# close the memmap object, it is designed to use an independent
# attribute of mmobject so if the HDUList object is created from files
# other than FITS, the close() call can also close the mm object.
try:
self.mmobject.close()
except:
pass
def info(self):
"""Summarize the info of the HDU's in this HDUList."""
if self.__file is None:
_name = '(No file associated with this HDUList)'
else:
_name = self.__file.name
results = "Filename: %s\nNo. Name Type"\
" Cards Dimensions Format\n" % _name
for j in range(len(self)):
results = results + "%-3d %s\n"%(j, self[j]._summary())
results = results[:-1]
print results
def open(name, mode="copyonwrite", memmap=0):
"""Factory function to open a FITS file and return an HDUList object.
name: Name of the FITS file to be opened.
mode: Open mode, 'readonly' (default), 'update', or 'append'.
memmap: Is memmory mapping to be used? default=0.
"""
# instantiate a FITS file object (ffo)
ffo = _File(name, mode=mode, memmap=memmap)
hduList = HDUList(file=ffo)
# read all HDU's
while 1:
try:
hduList.append(ffo._readHDU())
except EOFError:
break
# check in the case there is extra space after the last HDU or corrupted HDU
except ValueError:
print 'Warning: Required keywords missing when trying to read HDU #%d.\n There may be extra bytes after the last HDU or the file is corrupted.' % (len(hduList)+1)
break
# initialize/reset attributes to be used in "update/append" mode
# CardList needs its own _mod attribute since it has methods to change
# the content of header without being able to pass it to the header object
hduList._resize = 0
return hduList
fitsopen = open
# Convenience functions
class _Zero(int):
def __init__(self):
self = 0
def _getext(filename, mode, *ext1, **ext2):
"""Open the input file, return the HDUList and the extension."""
hdulist = open(filename, mode=mode)
n_ext1 = len(ext1)
n_ext2 = len(ext2)
keys = ext2.keys()
# parse the extension spec
if n_ext1 > 2:
raise ValueError, "too many positional arguments"
elif n_ext1 == 1:
if n_ext2 == 0:
ext = ext1[0]
else:
if isinstance(ext1[0], (int, tuple)):
raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2
if isinstance(ext1[0], str):
if n_ext2 == 1 and 'extver' in keys:
ext = ext1[0], ext2['extver']
raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2
elif n_ext1 == 2:
if n_ext2 == 0:
ext = ext1
else:
raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2
elif n_ext1 == 0:
if n_ext2 == 0:
ext = _Zero()
elif 'ext' in keys:
if n_ext2 == 1:
ext = ext2['ext']
elif n_ext2 == 2 and 'extver' in keys:
ext = ext2['ext'], ext2['extver']
else:
raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2
else:
if 'extname' in keys:
if 'extver' in keys:
ext = ext2['extname'], ext2['extver']
else:
ext = ext2['extname']
else:
raise KeyError, 'Insufficient keyword argument: %s' % ext2
return hdulist, ext
def getheader(filename, *ext, **extkeys):
"""Get the header from an extension of a FITS file.
@param filename: input FITS file name
@type: string
@param ext: The rest of the arguments are for extension specification.
See L{getdata} for explanations/examples.
@rtype: L{Header} object
@return: header
"""
hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys)
hdu = hdulist[_ext]
hdr = hdu.header
hdulist.close()
return hdr
def getdata(filename, *ext, **extkeys):
"""Get the data from an extension of a FITS file (and optionally the header).
@type filename: string
@param filename: input FITS file name
@param ext: The rest of the arguments are for extension specification. They are
flexible and are best illustrated by examples:
No extra arguments implies the primary header
>>> getdata('in.fits')
By extension number:
>>> getdata('in.fits', 0) # the primary header
>>> getdata('in.fits', 2) # the second extension
>>> getdata('in.fits', ext=2) # the second extension
By name, i.e., EXTNAME value (if unique):
>>> getdata('in.fits', 'sci')
>>> getdata('in.fits', extname='sci') # equivalent
Note EXTNAMEs are not case sensitive
By combination of EXTNAME and EXTVER, as separate arguments or as a tuple:
>>> getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2
>>> getdata('in.fits', extname='sci', extver=2) # equivalent
>>> getdata('in.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception, e.g.,
>>> getdata('in.fits', ext=('sci',1), extname='err', extver=2)
@return: an array, record array (i.e. table), or groups data object
depending on the type of the extension being referenced
If the optional keyword 'header' is set to True, this function will
return a (data, header) tuple.
"""
if 'header' in extkeys:
_gethdr = extkeys['header']
del extkeys['header']
else:
_gethdr = False
hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys)
hdu = hdulist[_ext]
_data = hdu.data
if _data is None and isinstance(_ext, _Zero):
try:
hdu = hdulist[1]
_data = hdu.data
except IndexError:
raise IndexError, 'No data in this HDU.'
if _data is None:
raise IndexError, 'No data in this HDU.'
if _gethdr:
_hdr = hdu.header
hdulist.close()
if _gethdr:
return _data, _hdr
else:
return _data
def getval(filename, key, *ext, **extkeys):
"""Get a keyword's value from a header in a FITS file.
@type filename: string
@param filename: input FITS file name
@type key: string
@param key: keyword name
@param ext: The rest of the arguments are for extension specification.
See L{getdata} for explanations/examples.
@return: keyword value
@rtype: string, integer, or float
"""
_hdr = getheader(filename, *ext, **extkeys)
return _hdr[key]
def _makehdu(data, header):
if header is None:
if isinstance(data, num.NumArray):
hdu = ImageHDU(data)
elif isinstance(data, FITS_rec):
hdu = BinTableHDU(data)
else:
raise KeyError, 'data must be numarray or table data.'
else:
hdu=header._hdutype(data=data, header=header)
return hdu
def writeto(filename, data, header=None, **keys):
"""Create a new FITS file using the supplied data/header.
@type filename: string
@param filename: name of the new FITS file to write to
@type data: array, record array, or groups data object
@param data: data to write to the new file
@type header: L{Header} object or None
@param header: the header associated with 'data', if None, a
header of the appropriate type is created for the supplied
data. This argument is optional.
@keyword clobber: (optional) if True and if filename already exists, it
will overwrite the file. Default is False.
"""
if header is None:
if 'header' in keys:
header = keys['header']
hdu=_makehdu(data, header)
if not isinstance(hdu, PrimaryHDU):
hdu = PrimaryHDU(data, header=header)
clobber = keys.get('clobber', False)
hdu.writeto(filename, clobber=clobber)
def append(filename, data, header=None):
"""Append the header/data to FITS file if filename exists, create if not.
If only data is supplied, a minimal header is created
@type filename: string
@param filename: name of the file to append to
@type data: array, table, or group data object
@param data: the new data used for appending
@type header: L{Header} object or None
@param header: the header associated with 'data', if None,
an appropriate header will be created for the data object
supplied.
"""
if not os.path.exists(filename):
writeto(filename, data, header)
else:
hdu=_makehdu(data, header)
if isinstance(hdu, PrimaryHDU):
hdu = ImageHDU(data, header)
f = open(filename, mode='update')
f.append(hdu)
f.close()
def update(filename, data, *ext, **extkeys):
"""Update the specified extension with the input data/header.
@type filename: string
@param filename: name of the file to be updated
data: the new data used for updating
The rest of the arguments are flexible:
the 3rd argument can be the header associated with the data.
If the 3rd argument is not a header, it (and other positional
arguments) are assumed to be the extension specification(s).
Header and extension specs can also be keyword arguments.
For example:
>>> update(file, dat, hdr, 'sci') # update the 'sci' extension
>>> update(file, dat, 3) # update the 3rd extension
>>> update(file, dat, hdr, 3) # update the 3rd extension
>>> update(file, dat, 'sci', 2) # update the 2nd SCI extension
>>> update(file, dat, 3, header=hdr) # update the 3rd extension
>>> update(file, dat, header=hdr, ext=5) # update the 5th extension
"""
# parse the arguments
header = None
if len(ext) > 0:
if isinstance(ext[0], Header):
header = ext[0]
ext = ext[1:]
elif not isinstance(ext[0], (int, long, str, tuple)):
raise KeyError, 'Input argument has wrong data type.'
if 'header' in extkeys:
header = extkeys['header']
del extkeys['header']
new_hdu=_makehdu(data, header)
hdulist, _ext = _getext(filename, 'update', *ext, **extkeys)
hdulist[_ext] = new_hdu
hdulist.close()
def info(filename):
"""Print the summary information on a FITS file.
This includes the name, type, length of header, data shape and type
for each extension.
@type filename: string
@param filename: input FITS file name
"""
f = open(filename)
f.info()
f.close()
UNDEFINED = Undefined()
__credits__="""
Copyright (C) 2004 Association of Universities for Research in Astronomy (AURA)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. The name of AURA and its representatives may not be used to
endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
|
import pyyjj
import pywingchun
import click
from kungfu.command.journal import journal, pass_ctx_from_parent
import kungfu.yijinjing.msg as yjj_msg
import kungfu.yijinjing.journal as kfj
import kungfu.yijinjing.time as kft
import time
import sys
import csv
import traceback
import pprint
import importlib
import os
import kungfu.msg
@journal.command()
@click.option('-i', '--session_id', type=int, required=True, help='session id')
@click.option('-t', '--io_type', type=click.Choice(['all', 'in', 'out']), default='all', help='input or output during this session')
@click.option("--from-beginning", is_flag=True, help="start with the earliest message within this session")
@click.option("--max-messages", type=int, default=sys.maxsize, help="The maximum number of messages to reader before exiting")
@click.option('--msg', type=click.Choice(['all'] + kungfu.msg.Registry.type_names()), default='all',help="msg type to read")
@click.option("--continuous", is_flag=True, help="reader not to stop when no data avail util the session end")
@click.option("-o", "--output", type=str,help="file path where you want to store the exported csv file")
@click.option('--script', type=str, required=False, help="python script to process every frame data")
@click.pass_context
def reader(ctx, session_id, io_type, from_beginning, max_messages, msg, continuous, output, script):
pass_ctx_from_parent(ctx)
session = kfj.find_session(ctx, session_id)
uname = '{}/{}/{}/{}'.format(session['category'], session['group'], session['name'], session['mode'])
uid = pyyjj.hash_str_32(uname)
ctx.category = '*'
ctx.group = '*'
ctx.name = '*'
ctx.mode = '*'
locations = kfj.collect_journal_locations(ctx)
location = locations[uid]
home = kfj.make_location_from_dict(ctx, location)
io_device = pyyjj.io_device(home)
reader = io_device.open_reader_to_subscribe()
if io_type == 'out' or io_type == 'all':
for dest in location['readers']:
dest_id = int(dest, 16)
reader.join(home, dest_id, session['begin_time'])
if (io_type == 'in' or io_type == 'all') and not (home.category == pyyjj.category.SYSTEM and home.group == 'master' and home.name == 'master'):
master_home_uid = pyyjj.hash_str_32('system/master/master/live')
master_home_location = kfj.make_location_from_dict(ctx, locations[master_home_uid])
reader.join(master_home_location, 0, session['begin_time'])
master_cmd_uid = pyyjj.hash_str_32('system/master/{:08x}/live'.format(location['uid']))
master_cmd_location = kfj.make_location_from_dict(ctx, locations[master_cmd_uid])
reader.join(master_cmd_location, location['uid'], session['begin_time'])
start_time = pyyjj.now_in_nano() if not from_beginning else session["begin_time"]
msg_count = 0
msg_type_to_read = None if msg == "all" else kungfu.msg.Registry.meta_from_name(msg)["id"]
if output:
if msg not in kungfu.msg.Registry.type_names():
raise ValueError("invalid msg {}, please choose from {}".format(kungfu.msg.Registry.type_names()))
csv_writer = None
def handle(frame):
data_as_dict = frame["data"]
dict_row = kungfu.msg.utils.flatten_json(data_as_dict)
nonlocal csv_writer
if not csv_writer:
csv_writer = csv.DictWriter(open(output, "w"), fieldnames = dict_row.keys())
csv_writer.writeheader()
csv_writer.writerow(dict_row)
frame_handler = handle
elif script:
dir = os.path.dirname(script)
name_no_ext = os.path.split(os.path.basename(script))
sys.path.append(os.path.relpath(dir))
impl = importlib.import_module(os.path.splitext(name_no_ext[1])[0])
frame_handler = getattr(impl, 'on_frame', lambda frame: None)
else:
pp = pprint.PrettyPrinter(indent=4)
frame_handler = pp.pprint
while True:
if reader.data_available() and msg_count < max_messages:
frame = reader.current_frame()
if frame.dest == home.uid and (frame.msg_type == yjj_msg.RequestReadFrom or frame.msg_type == yjj_msg.RequestReadFromPublic):
request = pyyjj.get_RequestReadFrom(frame)
source_location = kfj.make_location_from_dict(ctx, locations[request.source_id])
reader.join(source_location, location['uid'] if frame.msg_type == yjj_msg.RequestReadFrom else 0, request.from_time)
if frame.dest == home.uid and frame.msg_type == yjj_msg.Deregister:
loc = json.loads(frame.data_as_string())
reader.disjoin(loc['uid'])
if frame.msg_type == yjj_msg.SessionEnd:
ctx.logger.info("session reach end at %s", kft.strftime(frame.gen_time))
break
elif frame.gen_time >= start_time and (msg == "all" or msg_type_to_read == frame.msg_type):
try:
frame_handler(frame.as_dict())
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
ctx.logger.error('error [%s] %s', exc_type, traceback.format_exception(exc_type, exc_obj, exc_tb))
msg_count +=1
reader.next()
elif msg_count >= max_messages:
ctx.logger.info("reach max messages {}".format(max_messages))
break
elif not reader.data_available():
if not continuous:
ctx.logger.info("no data is available")
break
else:
time.sleep(0.1)
|
<gh_stars>1-10
import sys
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import RobustScaler
import pandas as pd
import numpy as np
def scaler(data, type="standard", stdScaler_with_mean=True, stdScaler_with_std=True, robust_with_centering=True, robust_with_scaling=True, robust_unit_variance=False, minimum=0, maximum=1, lower_iqr=25.0, upper_iqr=75.0):
"""Scales a series of values in a 1D numpy array or pandas dataframe matrix based on different scaling functions
Parameters
----------
data: A pandas dataframe matrix or 1D numpy array of numerical values
type: The scaler type to apply based on sklearn preprocessing functions (default: "standard")
stdScaler_with_mean: Using "standard" scaler, center the data to the mean before scaling (default: True)
stdScaler_with_std: Using "standard" scaler, scale the data to unit variance (default: True)
robust_with_centering: Using "robust" scaler, center the data to the median before scaling (default: True)
robust_with_scaling: Using "robust" scaler, scale the data to within the quantile range (default: True)
robust_unit_variance: Using "robust" scaler, scale the data so that normally distributed features have a variance of 1 (default: False)
minimum: Using "minmax" scaler, set the minimum value for scaling (default: 0)
maximum: Using "minmax" scaler, set the maximum value for scaling (default: 1)
lower_iqr: Using "robust" scaler, set the lower quantile range (default: 25.0)
upper_iqr: Using "robust" scaler, set the upper quantile range (default: 75.0)
Returns
-------
scaled_data: A scaled pandas dataframe matrix or 1D numpy array of numerical values
"""
data, type, stdScaler_with_mean, stdScaler_with_std, robust_with_centering, robust_with_scaling, robust_unit_variance, minimum, maximum, lower_iqr, upper_iqr = __checkData(data, type, stdScaler_with_mean, stdScaler_with_std, robust_with_centering, robust_with_scaling, robust_unit_variance, minimum, maximum, lower_iqr, upper_iqr)
if isinstance(data, np.ndarray):
data = data.reshape((data.shape[0], 1)).astype(float)
if type.lower() == "standard":
scaler = StandardScaler(with_mean=stdScaler_with_mean, with_std=stdScaler_with_std)
elif type.lower() == "minmax":
scaler = MinMaxScaler(feature_range=(minimum,maximum))
elif type.lower() == "maxabs":
scaler = MaxAbsScaler()
elif type.lower() == "robust":
scaler = RobustScaler(with_centering=robust_with_centering, with_scaling=robust_with_scaling, unit_variance=robust_unit_variance, quantile_range=(lower_iqr,upper_iqr))
if isinstance(data, np.ndarray):
scaled_data = scaler.fit_transform(data).flatten()
elif isinstance(data, pd.DataFrame):
scaled_data = scaler.fit_transform(data)
if isinstance(data, pd.DataFrame):
scaled_data = pd.DataFrame(scaled_data, columns=data.columns, index=data.index)
return scaled_data
def __checkData(data, type, stdScaler_with_mean, stdScaler_with_std, robust_with_centering, robust_with_scaling, robust_unit_variance, minimum, maximum, lower_iqr, upper_iqr):
if not isinstance(data, pd.DataFrame):
if not isinstance(data, np.ndarray):
print("Error: A pandas dataframe or numpy array was not entered. Please check your data.")
sys.exit()
if type.lower() not in ["standard", "minmax", "maxabs", "robust"]:
print("Error: Scaler type not valid. Choose either \"Standard\", \"MinMax\", \"MaxAbs\", or \"Robust\".")
sys.exit()
if not isinstance(stdScaler_with_mean, bool):
print("Error: The standard scaler with mean value is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(stdScaler_with_std, bool):
print("Error: The standard scaler with standard deviation (unit variance) value is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(robust_with_centering, bool):
print("Error: The robust scaler with centering value is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(robust_with_scaling, bool):
print("Error: The robust scaler with scaling value is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(robust_unit_variance, bool):
print("Error: The robust scaler with unit variance value is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(minimum, float):
if not isinstance(maximum, int):
print("Error: The minmax scaler minimum value is not valid. Choose a float or integer value.")
sys.exit()
if not isinstance(maximum, float):
if not isinstance(maximum, int):
print("Error: The minmax scaler maximum value is not valid. Choose a float or integer value.")
sys.exit()
if not isinstance(lower_iqr, float):
if not isinstance(lower_iqr, int):
print("Error: The robust lower interquartile range value is not valid. Choose a float or integer value.")
sys.exit()
if not isinstance(upper_iqr, float):
if not isinstance(upper_iqr, int):
print("Error: The robust upper interquartile range value is not valid. Choose a float or integer value.")
sys.exit()
return data, type, stdScaler_with_mean, stdScaler_with_std, robust_with_centering, robust_with_scaling, robust_unit_variance, minimum, maximum, lower_iqr, upper_iqr |
from margin.utils import AlignedPair, getFastaDictionary, getFastqDictionary, samIterator
import os, sys
from optparse import OptionParser
import pysam
import xml.etree.cElementTree as ET
from jobTree.src.bioio import reverseComplement, prettyXml, system
from itertools import product
class SubstitutionMatrix():
"""Represents a nucleotide substitution matrix. Also allows
for recording matches against Ns.
"""
def __init__(self):
self.matrix = [0.0]*25 #Includes alignments between wildcard characters.
def addAlignedPair(self, refBase, readBase):
self.matrix[self._index(refBase) * 5 + self._index(readBase)] += 1
def getCount(self, refBase, readBase):
return self.matrix[self._index(refBase) * 5 + self._index(readBase)]
def getFreqs(self, refBase, bases):
"""
Get list of relative frequencies for a refBase against all bases (passed as string)
"""
freqs = list()
for b in bases:
freqs.append(self.getCount(refBase, b))
if sum(freqs) == 0:
return [ 0.0 ] * len(freqs)
return [x / sum(freqs) for x in freqs]
def getXML(self):
def _identity(matches, mismatches):
if matches + mismatches == 0:
return "NaN"
return matches/(mismatches+matches)
matches = sum([ self.getCount(base, base) for base in "ACTG" ])
mismatches = sum([ sum([ self.getCount(refBase, readBase) for readBase in "ACTG" if readBase != refBase ]) for refBase in "ACTG" ])
node = ET.Element("substitutions", { "matches":str(matches), "mismatches":str(mismatches), "identity":str(_identity(matches, mismatches)) })
overallMatches = 0
overallMismatches = 0
for refBase in "ACGTN":
matches = self.getCount(refBase, refBase)
mismatches = sum([ self.getCount(refBase, readBase) for readBase in "ACTG" if readBase != refBase ])
baseNode = ET.SubElement(node, refBase, { "matches":str(matches), "mismatches":str(mismatches), "identity":str(_identity(matches, mismatches)) })
for readBase in "ACGTN":
ET.SubElement(baseNode, readBase, { "count":str(self.getCount(refBase, readBase)) })
return node
@staticmethod
def _index(base):
base = base.upper()
if base not in "ACGT":
return 4
return { 'A':0, 'C':1, 'G':2, 'T':3 }[base]
def Substitutions(readFastqFile, referenceFastaFile, samFile, outputDir, kmer=6):
"""Calculates stats on substitutions
"""
refSequences = getFastaDictionary(referenceFastaFile) #Hash of names to sequences
readSequences = getFastqDictionary(readFastqFile) #Hash of names to sequences
sM = SubstitutionMatrix() #The thing to store the counts in
sam = pysam.Samfile(samFile, "r" )
for aR in samIterator(sam): #Iterate on the sam lines
for aP in AlignedPair.iterator(aR, refSequences[sam.getrname(aR.rname)], readSequences[aR.qname]): #Walk through the matches mismatches:
sM.addAlignedPair(aP.getRefBase(), aP.getReadBase())
sam.close()
#Write out the substitution info
open(os.path.join(outputDir, "substitutions.xml"), 'w').write(prettyXml(sM.getXML()))
bases = "ACGT"
outf = open(os.path.join(outputDir, "subst.tsv"), "w")
outf.write("A\tC\tG\tT\n")
for x in bases:
freqs = sM.getFreqs(x, bases)
outf.write("{}\t{}\n".format(x, "\t".join(map(str,freqs)), "\n"))
outf.close()
analysis = str(samFile.split("/")[-1].split(".sam")[0])
system("Rscript scripts/substitution_plot.R {} {} {}".format(os.path.join(outputDir, "subst.tsv"), os.path.join(outputDir, "substitution_plot.pdf"), analysis))
def main(myCommandLine=None):
#Parse the inputs args/options
parser = OptionParser(usage="usage: readFastqFile referenceFastaFile samFile outputDir",
version="%prog 0.1")
#Parse the options/arguments
options, args = parser.parse_args()
#Print help message if no input
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
#Exit if the arguments are not what we expect
if len(args) != 4:
raise RuntimeError("Expected three arguments, got: %s" % " ".join(args))
readFastqFile = sys.argv[1]
referenceFastaFile = sys.argv[2]
samFile = sys.argv[3]
outputDir = sys.argv[4]
Substitutions(readFastqFile, referenceFastaFile, samFile, outputDir)
if __name__ == '__main__':
main()
|
<filename>tests/localization/translations/drivers/testconfigtranslator.py
from tests.testcase import TestCase
from edmunds.localization.translations.drivers.configtranslator import ConfigTranslator
from edmunds.localization.translations.exceptions.translationerror import TranslationError
from edmunds.localization.translations.exceptions.sentencefillererror import SentenceFillerError
from edmunds.localization.localization.models.time import Time
from edmunds.localization.localization.models.number import Number
from edmunds.localization.localization.models.localization import Localization
from babel.core import Locale
from babel.dates import get_timezone
class TestConfigTranslator(TestCase):
"""
Test the Config Translator
"""
def set_up(self):
"""
Set up test
:return: void
"""
super(TestConfigTranslator, self).set_up()
self.config = [
"from edmunds.localization.translations.drivers.configtranslator import ConfigTranslator \n",
"APP = { \n",
" 'localization': { \n",
" 'enabled': True, \n",
" 'locale': { \n",
" 'fallback': 'en', \n",
" 'supported': ['en', 'nl'], \n",
" }, \n",
" 'timezonefallback': 'Europe/Brussels', \n",
" 'translations': { \n",
" 'enabled': True, \n",
" 'instances': [ \n",
" { \n",
" 'name': 'configtranslator',\n",
" 'driver': ConfigTranslator,\n",
" }, \n",
" ], \n",
" 'strings': { \n",
" 'en': { \n",
" 'beautiful': 'This is a beautiful translation. Is it not, {name}?', \n",
" 'smashing': 'A smashing sentence!', \n",
" }, \n",
" 'nl': { \n",
" 'beautiful': 'Dit is een prachtige vertaling. Nietwaar, {name}?', \n",
" }, \n",
" }, \n",
" }, \n",
" }, \n",
"} \n",
]
def test_get_unknown_key(self):
"""
Test get unknown key
:return: void
"""
rule = '/' + self.rand_str(20)
# Write config and create app
self.write_config(self.config)
app = self.create_application()
data = [
('en', 'beautiful', {}),
('nl', 'beautiful', {}),
]
for locale_str, key, params in data:
with app.test_request_context(rule):
locale = Locale.parse(locale_str, sep='_')
time_zone = get_timezone('Europe/Brussels')
time_obj = Time(locale=locale, time_zone=time_zone)
number_obj = Number(locale=locale)
localization_obj = Localization(locale=locale, number=number_obj, time=time_obj)
# Fetch driver
driver = app.localization().translator()
self.assert_is_instance(driver, ConfigTranslator)
with self.assert_raises_regexp(SentenceFillerError, 'Param "name" could not be replaced. \(locale "%s" and key "%s"\)' % (locale_str, key)):
driver.get(localization_obj, key, params)
def test_get_errors(self):
"""
Test get errors
:return: void
"""
rule = '/' + self.rand_str(20)
# Write config and create app
self.write_config(self.config)
app = self.create_application()
data = [
('en', 'unknownkey1', {}),
('nl', 'unknownkey2', {}),
('nl_BE', 'unknownkey3', {}),
('bo', 'unknownkey1', {}),
('ar', 'unknownkey2', {}),
('nl', 'smashing', {}),
('nl_BE', 'smashing', {}),
('nl_BE', 'beautiful', {'name': 'Steve'}),
]
for locale_str, key, params in data:
with app.test_request_context(rule):
locale = Locale.parse(locale_str, sep='_')
time_zone = get_timezone('Europe/Brussels')
time_obj = Time(locale=locale, time_zone=time_zone)
number_obj = Number(locale=locale)
localization_obj = Localization(locale=locale, number=number_obj, time=time_obj)
# Fetch driver
driver = app.localization().translator()
self.assert_is_instance(driver, ConfigTranslator)
with self.assert_raises_regexp(TranslationError, 'Could not find the sentence for locale "%s" and key "%s".' % (locale_str, key)):
driver.get(localization_obj, key, params)
def test_get(self):
"""
Test get
:return: void
"""
rule = '/' + self.rand_str(20)
# Write config and create app
self.write_config(self.config)
app = self.create_application()
data = [
('en', 'A smashing sentence!', 'smashing', {}),
('en', 'This is a beautiful translation. Is it not, Steve?', 'beautiful', {'name': 'Steve'}),
('nl', 'Dit is een prachtige vertaling. Nietwaar, Steve?', 'beautiful', {'name': 'Steve'}),
]
for locale_str, expected, key, params in data:
with app.test_request_context(rule):
locale = Locale.parse(locale_str, sep='_')
time_zone = get_timezone('Europe/Brussels')
time_obj = Time(locale=locale, time_zone=time_zone)
number_obj = Number(locale=locale)
localization_obj = Localization(locale=locale, number=number_obj, time=time_obj)
# Fetch driver
driver = app.localization().translator()
self.assert_is_instance(driver, ConfigTranslator)
self.assert_equal(expected, driver.get(localization_obj, key, params))
|
from copy import deepcopy
import torch
import torch.nn as nn
from ..submodule import *
from ..data import SimulationInput
__all__ = (
"LSTMBaseline",
"LSTMCNNBaselineFF",
"LSTMCNNBaselineLF",
"LSTMCNNBaseline2F",
"LSTMCNNVideoBaseline",
)
class LSTMBaseline(nn.Module):
"""
Does not use any kind of visual data.
"""
SIMULATION_INPUT = SimulationInput.NO_FRAMES
NUM_VIDEO_FRAMES = 0
def __init__(self, config):
super().__init__()
config["question_encoder"]["vocab_size"] = config["input_size"]
self.question_encoder = LSTMEncoder(config["question_encoder"])
self.linear = nn.Linear(
self.question_encoder.output_size, config["output_size"])
self.dropout = nn.Dropout(p=config["dropout"])
self.config = config
def forward(self, simulations, questions, lengths, **kwargs):
_, (hiddens, _) = self.question_encoder(questions, lengths)
if self.question_encoder.lstm.bidirectional:
hiddens = torch.cat([hiddens[0], hiddens[1]], dim=1)
else:
hiddens = hiddens.squeeze(0)
answers = self.linear(self.dropout(hiddens))
return answers
class LSTMCNNBaseline(nn.Module):
SIMULATION_INPUT = SimulationInput.NO_FRAMES
NUM_VIDEO_FRAMES = 0
def __init__(self, config):
super().__init__()
# input dependent params
config["question_encoder"]["vocab_size"] = config["input_size"]
config["frame_encoder"]["depth_size"] = config["depth_size"]
config["frame_encoder"]["input_width"] = config["input_width"]
config["frame_encoder"]["input_height"] = config["input_height"]
self.config = config
self.frame_encoder = self.create_submodule("frame_encoder")
self.question_encoder = self.create_submodule("question_encoder")
if self.SIMULATION_INPUT is not SimulationInput.VIDEO:
self.adaptive_pool = nn.AdaptiveAvgPool2d(config["pool_size"])
else:
self.adaptive_pool = nn.AdaptiveAvgPool3d(
(None, config["pool_size"], config["pool_size"])
)
visual_size = self.NUM_VIDEO_FRAMES * self.frame_encoder.out_channels * config["pool_size"]**2
if self.SIMULATION_INPUT is SimulationInput.VIDEO:
visual_size *= self.frame_encoder.out_depth
textual_size = self.question_encoder.output_size
config["mlp"]["input_size"] = visual_size + textual_size
self.flatten = nn.Flatten()
self.mlp = MLP(config["mlp"])
self.linear = nn.Linear(
in_features=config["mlp"]["hidden_size"],
out_features=config["output_size"])
self.dropout = nn.Dropout(p=config["dropout"])
self.config = config
def create_submodule(self, submodule):
config = self.config[submodule]
submodule = eval(config["architecture"])(config)
return submodule
def process_simulation(self, simulations, **kwargs):
y = self.frame_encoder(simulations)
y = self.adaptive_pool(y)
y = self.flatten(y)
return y
def process_question(self, questions, lengths, **kwargs):
_, (hiddens, _) = self.question_encoder(questions, lengths)
if self.question_encoder.lstm.bidirectional:
hiddens = torch.cat([hiddens[0], hiddens[1]], dim=1)
else:
hiddens = hiddens.squeeze(0)
return hiddens
def forward(self, simulations, questions, lengths, **kwargs):
vis = self.process_simulation(simulations, **kwargs)
txt = self.process_question(questions, lengths, **kwargs)
y = torch.cat([self.dropout(vis), self.dropout(txt)], dim=1)
y = self.mlp(y)
return self.linear(y)
class LSTMCNNBaselineFF(LSTMCNNBaseline):
SIMULATION_INPUT = SimulationInput.FIRST_FRAME
NUM_VIDEO_FRAMES = 1
class LSTMCNNBaselineLF(LSTMCNNBaseline):
SIMULATION_INPUT = SimulationInput.LAST_FRAME
NUM_VIDEO_FRAMES = 1
class LSTMCNNBaseline2F(LSTMCNNBaseline):
SIMULATION_INPUT = SimulationInput.FIRST_AND_LAST_FRAMES
NUM_VIDEO_FRAMES = 2
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.first_frame_encoder = self.frame_encoder
self.last_frame_encoder = self.create_submodule("frame_encoder")
def process_simulation(self, simulations, **kwargs):
batch_size = simulations.size(0) // 2
first_frames = self.first_frame_encoder(simulations[:batch_size])
first_frames = self.flatten(self.adaptive_pool(first_frames))
last_frames = self.last_frame_encoder(simulations[batch_size:])
last_frames = self.flatten(self.adaptive_pool(last_frames))
y = torch.cat([first_frames, last_frames], dim=1)
return y
class LSTMCNNVideoBaseline(LSTMCNNBaseline):
SIMULATION_INPUT = SimulationInput.VIDEO
NUM_VIDEO_FRAMES = 1 # see parent class constructor
|
# -*- coding: utf-8 -*-
## Show classic screening curve analysis for generation investment
#
# Compute the long-term equilibrium power plant investment for a given load duration curve (1000-1000z for z \in [0,1]) and a given set of generator investment options.
#
# Available as a Jupyter notebook at https://pypsa.readthedocs.io/en/latest/examples/generation-investment-screening-curve.ipynb.
import numpy as np
import pandas as pd
import pypsa
#%matplotlib inline
# Generator marginal (m) and capital (c) costs in EUR/MWh - numbers chosen for simple answer
generators = {
"coal": {"m": 2, "c": 15},
"gas": {"m": 12, "c": 10},
"load-shedding": {"m": 1012, "c": 0},
}
# Screening curve intersections at 0.01 and 0.5
x = np.linspace(0, 1, 101)
df = pd.DataFrame(
{key: pd.Series(item["c"] + x * item["m"], x) for key, item in generators.items()}
)
df.plot(ylim=[0, 50], title="Screening Curve")
n = pypsa.Network()
num_snapshots = 1001
snapshots = np.linspace(0, 1, num_snapshots)
n.set_snapshots(snapshots)
n.snapshot_weightings = n.snapshot_weightings / num_snapshots
n.add("Bus", name="bus")
n.add("Load", name="load", bus="bus", p_set=1000 - 1000 * snapshots)
for gen in generators:
n.add(
"Generator",
name=gen,
bus="bus",
p_nom_extendable=True,
marginal_cost=float(generators[gen]["m"]),
capital_cost=float(generators[gen]["c"]),
)
n.loads_t.p_set.plot(title="Load Duration Curve")
n.lopf(solver_name="cbc")
print(n.objective)
# capacity set by total electricity required
# NB: no load shedding since all prices < 1e4
n.generators.p_nom_opt.round(2)
n.buses_t.marginal_price.plot(title="Price Duration Curve")
# The prices correspond either to VOLL (1012) for first 0.01 or the marginal costs (12 for 0.49 and 2 for 0.5)
# EXCEPT for (infinitesimally small) points at the screening curve intersections, which
# correspond to changing the load duration near the intersection, so that capacity changes
# This explains 7 = (12+10 - 15) (replacing coal with gas) and 22 = (12+10) (replacing load-shedding with gas)
# I have no idea what is causing \l = 0; it should be 2.
n.buses_t.marginal_price.round(2).sum(axis=1).value_counts()
n.generators_t.p.plot(ylim=[0, 600], title="Generation Dispatch")
# Demonstrate zero-profit condition
print("Total costs:")
print(
n.generators.p_nom_opt * n.generators.capital_cost
+ n.generators_t.p.multiply(n.snapshot_weightings.generators, axis=0).sum()
* n.generators.marginal_cost
)
print("\nTotal revenue:")
print(
n.generators_t.p.multiply(n.snapshot_weightings.generators, axis=0)
.multiply(n.buses_t.marginal_price["bus"], axis=0)
.sum()
)
## Without expansion optimisation
#
# Take the capacities from the above long-term equilibrium, then disallow expansion.
#
# Show that the resulting market prices are identical.
#
# This holds in this example, but does NOT necessarily hold and breaks down in some circumstances (for example, when there is a lot of storage and inter-temporal shifting).
n.generators.p_nom_extendable = False
n.generators.p_nom = n.generators.p_nom_opt
n.lopf(solver_name="glpk")
n.buses_t.marginal_price.plot(title="Price Duration Curve")
n.buses_t.marginal_price.sum(axis=1).value_counts()
# Demonstrate zero-profit condition
# Differences are due to singular times, see above, not a problem
print("Total costs:")
print(
n.generators.p_nom * n.generators.capital_cost
+ n.generators_t.p.multiply(n.snapshot_weightings.generators, axis=0).sum()
* n.generators.marginal_cost
)
print("Total revenue:")
print(
n.generators_t.p.multiply(n.snapshot_weightings.generators, axis=0)
.multiply(n.buses_t.marginal_price["bus"], axis=0)
.sum()
)
|
<filename>Fracktory3-3.0_b11/plugins/USBPrinting/avr_isp/stk500v2.py
"""
STK500v2 protocol implementation for programming AVR chips.
The STK500v2 protocol is used by the ArduinoMega2560 and a few other Arduino platforms to load firmware.
This is a python 3 conversion of the code created by <NAME> for the Cura project.
"""
import struct
import sys
import time
from serial import Serial # type: ignore
from serial import SerialException
from serial import SerialTimeoutException
from UM.Logger import Logger
from . import ispBase, intelHex
class Stk500v2(ispBase.IspBase):
def __init__(self):
self.serial = None
self.seq = 1
self.last_addr = -1
self.progress_callback = None
def connect(self, port = "COM22", speed = 115200):
if self.serial is not None:
self.close()
try:
self.serial = Serial(str(port), speed, timeout=1, writeTimeout=10000)
except SerialException:
raise ispBase.IspError("Failed to open serial port")
except:
raise ispBase.IspError("Unexpected error while connecting to serial port:" + port + ":" + str(sys.exc_info()[0]))
self.seq = 1
#Reset the controller
for n in range(0, 2):
self.serial.setDTR(True)
time.sleep(0.1)
self.serial.setDTR(False)
time.sleep(0.1)
time.sleep(0.2)
self.serial.flushInput()
self.serial.flushOutput()
try:
if self.sendMessage([0x10, 0xc8, 0x64, 0x19, 0x20, 0x00, 0x53, 0x03, 0xac, 0x53, 0x00, 0x00]) != [0x10, 0x00]:
raise ispBase.IspError("Failed to enter programming mode")
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
if self.sendMessage([0xEE])[1] == 0x00:
self._has_checksum = True
else:
self._has_checksum = False
except ispBase.IspError:
self.close()
raise
self.serial.timeout = 5
def close(self):
if self.serial is not None:
self.serial.close()
self.serial = None
#Leave ISP does not reset the serial port, only resets the device, and returns the serial port after disconnecting it from the programming interface.
# This allows you to use the serial port without opening it again.
def leaveISP(self):
if self.serial is not None:
if self.sendMessage([0x11]) != [0x11, 0x00]:
raise ispBase.IspError("Failed to leave programming mode")
ret = self.serial
self.serial = None
return ret
return None
def isConnected(self):
return self.serial is not None
def hasChecksumFunction(self):
return self._has_checksum
def sendISP(self, data):
recv = self.sendMessage([0x1D, 4, 4, 0, data[0], data[1], data[2], data[3]])
return recv[2:6]
def writeFlash(self, flash_data):
#Set load addr to 0, in case we have more then 64k flash we need to enable the address extension
page_size = self.chip["pageSize"] * 2
flash_size = page_size * self.chip["pageCount"]
Logger.log("d", "Writing flash")
if flash_size > 0xFFFF:
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
else:
self.sendMessage([0x06, 0x00, 0x00, 0x00, 0x00])
load_count = (len(flash_data) + page_size - 1) / page_size
for i in range(0, int(load_count)):
self.sendMessage([0x13, page_size >> 8, page_size & 0xFF, 0xc1, 0x0a, 0x40, 0x4c, 0x20, 0x00, 0x00] + flash_data[(i * page_size):(i * page_size + page_size)])
if self.progress_callback is not None:
if self._has_checksum:
self.progress_callback(i + 1, load_count)
else:
self.progress_callback(i + 1, load_count * 2)
def verifyFlash(self, flash_data):
if self._has_checksum:
self.sendMessage([0x06, 0x00, (len(flash_data) >> 17) & 0xFF, (len(flash_data) >> 9) & 0xFF, (len(flash_data) >> 1) & 0xFF])
res = self.sendMessage([0xEE])
checksum_recv = res[2] | (res[3] << 8)
checksum = 0
for d in flash_data:
checksum += d
checksum &= 0xFFFF
if hex(checksum) != hex(checksum_recv):
raise ispBase.IspError("Verify checksum mismatch: 0x%x != 0x%x" % (checksum & 0xFFFF, checksum_recv))
else:
#Set load addr to 0, in case we have more then 64k flash we need to enable the address extension
flash_size = self.chip["pageSize"] * 2 * self.chip["pageCount"]
if flash_size > 0xFFFF:
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
else:
self.sendMessage([0x06, 0x00, 0x00, 0x00, 0x00])
load_count = (len(flash_data) + 0xFF) / 0x100
for i in range(0, int(load_count)):
recv = self.sendMessage([0x14, 0x01, 0x00, 0x20])[2:0x102]
if self.progress_callback is not None:
self.progress_callback(load_count + i + 1, load_count * 2)
for j in range(0, 0x100):
if i * 0x100 + j < len(flash_data) and flash_data[i * 0x100 + j] != recv[j]:
raise ispBase.IspError("Verify error at: 0x%x" % (i * 0x100 + j))
def sendMessage(self, data):
message = struct.pack(">BBHB", 0x1B, self.seq, len(data), 0x0E)
for c in data:
message += struct.pack(">B", c)
checksum = 0
for c in message:
checksum ^= c
message += struct.pack(">B", checksum)
try:
self.serial.write(message)
self.serial.flush()
except SerialTimeoutException:
raise ispBase.IspError("Serial send timeout")
self.seq = (self.seq + 1) & 0xFF
return self.recvMessage()
def recvMessage(self):
state = "Start"
checksum = 0
while True:
s = self.serial.read()
if len(s) < 1:
raise ispBase.IspError("Timeout")
b = struct.unpack(">B", s)[0]
checksum ^= b
if state == "Start":
if b == 0x1B:
state = "GetSeq"
checksum = 0x1B
elif state == "GetSeq":
state = "MsgSize1"
elif state == "MsgSize1":
msg_size = b << 8
state = "MsgSize2"
elif state == "MsgSize2":
msg_size |= b
state = "Token"
elif state == "Token":
if b != 0x0E:
state = "Start"
else:
state = "Data"
data = []
elif state == "Data":
data.append(b)
if len(data) == msg_size:
state = "Checksum"
elif state == "Checksum":
if checksum != 0:
state = "Start"
else:
return data
def portList():
ret = []
import _winreg # type: ignore
key=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,"HARDWARE\\DEVICEMAP\\SERIALCOMM") #@UndefinedVariable
i=0
while True:
try:
values = _winreg.EnumValue(key, i) #@UndefinedVariable
except:
return ret
if "USBSER" in values[0]:
ret.append(values[1])
i+=1
return ret
def runProgrammer(port, filename):
""" Run an STK500v2 program on serial port 'port' and write 'filename' into flash. """
programmer = Stk500v2()
programmer.connect(port = port)
programmer.programChip(intelHex.readHex(filename))
programmer.close()
def main():
""" Entry point to call the stk500v2 programmer from the commandline. """
import threading
if sys.argv[1] == "AUTO":
Logger.log("d", "portList(): ", repr(portList()))
for port in portList():
threading.Thread(target=runProgrammer, args=(port,sys.argv[2])).start()
time.sleep(5)
else:
programmer = Stk500v2()
programmer.connect(port = sys.argv[1])
programmer.programChip(intelHex.readHex(sys.argv[2]))
sys.exit(1)
if __name__ == "__main__":
main()
|
<reponame>ponderng/recon-pipeline
import shutil
import tempfile
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from pipeline.recon.web import SubjackScan, TKOSubsScan, GatherWebTargets
subjack_results = Path(__file__).parent.parent / "data" / "recon-results" / "subjack-results"
tkosubs_results = Path(__file__).parent.parent / "data" / "recon-results" / "tkosubs-results"
class TestTKOSubsScanScan:
def setup_method(self):
self.tmp_path = Path(tempfile.mkdtemp())
self.scan = TKOSubsScan(
target_file=__file__, results_dir=str(self.tmp_path), db_location=str(self.tmp_path / "testing.sqlite")
)
self.scan.exception = False
def teardown_method(self):
shutil.rmtree(self.tmp_path)
def test_scan_requires(self):
with patch("pipeline.recon.web.GatherWebTargets"):
with patch("pipeline.recon.web.subdomain_takeover.meets_requirements"):
retval = self.scan.requires()
assert isinstance(retval, GatherWebTargets)
def test_scan_creates_results_dir(self):
assert self.scan.results_subfolder == self.tmp_path / "tkosubs-results"
def test_scan_creates_results_file(self):
assert self.scan.output_file == self.tmp_path / "tkosubs-results" / "tkosubs.csv"
def test_scan_creates_database(self):
assert self.scan.db_mgr.location.exists()
assert self.tmp_path / "testing.sqlite" == self.scan.db_mgr.location
def test_scan_creates_results(self):
self.scan.results_subfolder = tkosubs_results
self.scan.output_file = self.scan.results_subfolder / "tkosubs.csv"
self.scan.parse_results()
assert self.scan.output().exists()
def test_parse_results(self):
myresults = self.tmp_path / "tkosubs-results" / "tkosubs.csv"
myresults.parent.mkdir(parents=True, exist_ok=True)
content = "Domain,Cname,Provider,IsVulnerable,IsTakenOver,Response\n"
content += "google.com,Cname,Provider,true,IsTakenOver,Response\n"
content += "maps.google.com,Cname,Provider,false,IsTakenOver,Response\n"
myresults.write_text(content)
self.scan.output_file = myresults
self.scan.db_mgr.get_or_create_target_by_ip_or_hostname = MagicMock()
self.scan.db_mgr.get_or_create_target_by_ip_or_hostname.return_value = MagicMock()
self.scan.db_mgr.add = MagicMock()
self.scan.parse_results()
assert self.scan.output().exists()
assert self.scan.db_mgr.add.called
assert self.scan.db_mgr.get_or_create_target_by_ip_or_hostname.called
@pytest.mark.parametrize("test_input", [["google.com"], None])
def test_scan_run(self, test_input):
with patch("subprocess.run") as mocked_run:
self.scan.parse_results = MagicMock()
self.scan.db_mgr.get_all_hostnames = MagicMock()
self.scan.db_mgr.get_all_hostnames.return_value = test_input
self.scan.run()
if test_input is None:
assert not mocked_run.called
assert not self.scan.parse_results.called
else:
assert mocked_run.called
assert self.scan.parse_results.called
class TestSubjackScan:
def setup_method(self):
self.tmp_path = Path(tempfile.mkdtemp())
self.scan = SubjackScan(
target_file=__file__, results_dir=str(self.tmp_path), db_location=str(self.tmp_path / "testing.sqlite")
)
self.scan.exception = False
def teardown_method(self):
shutil.rmtree(self.tmp_path)
def test_scan_requires(self):
with patch("pipeline.recon.web.GatherWebTargets"):
with patch("pipeline.recon.web.subdomain_takeover.meets_requirements"):
retval = self.scan.requires()
assert isinstance(retval, GatherWebTargets)
def test_scan_creates_results_dir(self):
assert self.scan.results_subfolder == self.tmp_path / "subjack-results"
def test_scan_creates_results_file(self):
assert self.scan.output_file == self.tmp_path / "subjack-results" / "subjack.txt"
def test_scan_creates_database(self):
assert self.scan.db_mgr.location.exists()
assert self.tmp_path / "testing.sqlite" == self.scan.db_mgr.location
def test_scan_creates_results(self):
self.scan.results_subfolder = subjack_results
self.scan.output_file = self.scan.results_subfolder / "subjack.txt"
self.scan.parse_results()
assert self.scan.output().exists()
def test_parse_results(self):
myresults = self.tmp_path / "subjack-results" / "subjack.txt"
myresults.parent.mkdir(parents=True, exist_ok=True)
content = "[Not Vulnerable] email.assetinventory.bugcrowd.com\n"
content += "[Vulnerable] email.assetinventory.bugcrowd.com\n"
content += "[Vulnerable] assetinventory.bugcrowd.com:8080\n"
content += "weird input\n"
myresults.write_text(content)
self.scan.output_file = myresults
self.scan.db_mgr.get_or_create_target_by_ip_or_hostname = MagicMock()
self.scan.db_mgr.get_or_create_target_by_ip_or_hostname.return_value = MagicMock()
self.scan.db_mgr.add = MagicMock()
self.scan.parse_results()
assert self.scan.output().exists()
assert self.scan.db_mgr.add.called
assert self.scan.db_mgr.get_or_create_target_by_ip_or_hostname.called
@pytest.mark.parametrize("test_input", [["google.com"], None])
def test_scan_run(self, test_input):
with patch("subprocess.run") as mocked_run:
self.scan.parse_results = MagicMock()
self.scan.db_mgr.get_all_hostnames = MagicMock()
self.scan.db_mgr.get_all_hostnames.return_value = test_input
self.scan.run()
if test_input is None:
assert not mocked_run.called
assert not self.scan.parse_results.called
else:
assert mocked_run.called
assert self.scan.parse_results.called
|
import xarray as xr
import matplotlib.pyplot as plt
#CMIP5 models
ACCESS_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/src/rol_mean_3_5_deg/ACCESS_rol_4.nc').mean(dim='year')
HADGEM_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/src/rol_mean_3_5_deg/HADGEM_rol_4.nc').mean(dim='year')
HADGEM_cloud_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/src/rol_mean_3_5_deg/HADGEM_cloud_rol_4.nc').mean(dim='year')
HADGEM_SMB_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/src/rol_mean_3_5_deg/HADGEM_SMB_rol_4.nc').mean(dim='year')
CSIRO_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/src/rol_mean_3_5_deg/CSIRO_rol_4.nc').mean(dim='year')
IPSL_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/src/rol_mean_3_5_deg/IPSL_rol_4.nc').mean(dim='year')
MIROC5_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/src/rol_mean_3_5_deg/MIROC5_rol_4.nc').mean(dim='year')
NORESM_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/src/rol_mean_3_5_deg/NORESM_rol_4.nc').mean(dim='year')
#CMIP6 models
CESM_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/src/rol_mean_3_5_deg/CESM_rol_4.nc').mean(dim='year')
CNRM_ESM2_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/src/rol_mean_3_5_deg/CNRM_ESM2_rol_4.nc').mean(dim='year')
CNRM_CM6_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/src/rol_mean_3_5_deg/CNRM_CM6_rol_4.nc').mean(dim='year')
MRI_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/src/rol_mean_3_5_deg/MRI_rol_4.nc').mean(dim='year')
UKMO_rol_4 = xr.open_dataset('/projects/NS9600K/idunnam/src/rol_mean_3_5_deg/UKMO_rol_4.nc').mean(dim='year')
ACCESS = ACCESS_rol_4
HADGEM = HADGEM_SMB_rol_4
HADGEM_cloud = HADGEM_cloud_rol_4
HADGEM_SMB = HADGEM_SMB_rol_4
CSIRO = CSIRO_rol_4
IPSL = IPSL_rol_4
MIROC5 = MIROC5_rol_4
NORESM = NORESM_rol_4
#CMIP6 models
CESM = CESM_rol_4
CNRM_ESM2 = CNRM_ESM2_rol_4
CNRM_CM6 = CNRM_CM6_rol_4
MRI = MRI_rol_4
UKMO = UKMO_rol_4
#dataset for choosing coordinates
ds = xr.open_dataset('/projects/NS9600K/shofer/Paper_CMIP6/MAR/monthly_all_072021/MARv3.9-ACCESS13-2074.nc', decode_times=False)
from function_two_plots import two_plots
# === Calculate the model mean of each variable ==#
#Function for calculating the model mean
def model_mean(mod):
return sum(mod)/ len(mod)
#CMIP5 models
CMIP5_models_SMB = [ACCESS, HADGEM_SMB, CSIRO, IPSL, MIROC5, NORESM]
CMIP5_models_precipitation = [ACCESS, HADGEM_cloud, CSIRO, IPSL, MIROC5, NORESM]
SMB_CMIP5 = []
SU_CMIP5 = []
RU_CMIP5 = []
ME_CMIP5 = []
PR_CMIP5 = []
RF_CMIP5 = []
SF_CMIP5 = []
for i in range(len(CMIP5_models_SMB)):
SMB_cmip5 = CMIP5_models_SMB[i].SMB
SMB_CMIP5.append(SMB_cmip5)
SU_cmip5 = CMIP5_models_SMB[i].SU
SU_CMIP5.append(SU_cmip5)
RU_cmip5 = CMIP5_models_SMB[i].RU
RU_CMIP5.append(RU_cmip5)
ME_cmip5 = CMIP5_models_SMB[i].ME
ME_CMIP5.append(ME_cmip5)
for i in range(len(CMIP5_models_precipitation)):
PR_cmip5 = CMIP5_models_precipitation[i].PR
PR_CMIP5.append(PR_cmip5)
RF_cmip5 = CMIP5_models_precipitation[i].RF
RF_CMIP5.append(RF_cmip5)
SF_cmip5 = CMIP5_models_precipitation[i].SF
SF_CMIP5.append(SF_cmip5)
SMB_CMIP5_model_mean = model_mean(SMB_CMIP5)
SU_CMIP5_model_mean = model_mean(SU_CMIP5)
RU_CMIP5_model_mean = model_mean(RU_CMIP5)
ME_CMIP5_model_mean = model_mean(ME_CMIP5)
PR_CMIP5_model_mean = model_mean(PR_CMIP5)
RF_CMIP5_model_mean = model_mean(RF_CMIP5)
SF_CMIP5_model_mean = model_mean(SF_CMIP5)
#CMIP6 models
CMIP6_models = [CESM, CNRM_ESM2, CNRM_CM6, MRI, UKMO]
SMB_CMIP6 = []
SU_CMIP6 = []
RU_CMIP6 = []
ME_CMIP6 = []
PR_CMIP6 = []
RF_CMIP6 = []
SF_CMIP6 = []
for i in range(len(CMIP6_models)):
SMB_cmip6 = CMIP6_models[i].SMB
SMB_CMIP6.append(SMB_cmip6)
SU_cmip6 = CMIP6_models[i].SU
SU_CMIP6.append(SU_cmip6)
RU_cmip6 = CMIP6_models[i].RU
RU_CMIP6.append(RU_cmip6)
ME_cmip6 = CMIP6_models[i].ME
ME_CMIP6.append(ME_cmip6)
PR_cmip6 = CMIP6_models[i].PR
PR_CMIP6.append(PR_cmip6)
RF_cmip6 = CMIP6_models[i].RF
RF_CMIP6.append(RF_cmip6)
SF_cmip6 = CMIP6_models[i].SF
SF_CMIP6.append(SF_cmip6)
SMB_CMIP6_model_mean = model_mean(SMB_CMIP6)
SU_CMIP6_model_mean = model_mean(SU_CMIP6)
RU_CMIP6_model_mean = model_mean(RU_CMIP6)
ME_CMIP6_model_mean = model_mean(ME_CMIP6)
PR_CMIP6_model_mean = model_mean(PR_CMIP6)
RF_CMIP6_model_mean = model_mean(RF_CMIP6)
SF_CMIP6_model_mean = model_mean(SF_CMIP6)
plt.rcParams.update({
"text.usetex": True,
"font.family": 'DejaVu Sans',
"font.serif": ["Computer Modern Roman"]})
#=== SMB ===#
two_plots(SMB_CMIP5_model_mean, SMB_CMIP6_model_mean, ds['LON'], ds['LAT'], -600,600,
surf_height_data = ds['SH'],
add_contour_levels = True,
contour_levels = [2000,3000],
title_fig_l = 'MAR CMIP5 Model mean',
title_fig_r = 'MAR CMIP6 Model mean',
fontsize_title_fig = 16,
cbar_title = 'SMB [mmWE]' , cmap_color = 'RdBu_r',
file_title = '4_deg_SMB_net_two_plots')
#=== SU (sublimation) ===#
two_plots(SU_CMIP5_model_mean, SU_CMIP6_model_mean, ds['LON'], ds['LAT'], -10,10,
surf_height_data = ds['SH'],
add_contour_levels = True,
contour_levels = [2000,3000],
title_fig_l = 'MAR CMIP5 Model mean',
title_fig_r = 'MAR CMIP6 Model mean',
fontsize_title_fig = 16,
cbar_title = 'Sublimation [mmWE]' , cmap_color = 'RdBu_r',
file_title = '4_deg_SU_net_two_plots')
#=== RU (Runoff) ===#
two_plots(RU_CMIP5_model_mean, RU_CMIP6_model_mean, ds['LON'], ds['LAT'], -500,500,
surf_height_data = ds['SH'],
add_contour_levels = True,
contour_levels = [2000,3000],
title_fig_l = 'MAR CMIP5 Model mean',
title_fig_r = 'MAR CMIP6 Model mean',
fontsize_title_fig = 16,
cbar_title = 'Runoff [mmWE]' , cmap_color = 'RdBu_r',
file_title = '4_deg_RU_net_two_plots')
#=== ME (Melt) ===#
two_plots(ME_CMIP5_model_mean, ME_CMIP6_model_mean, ds['LON'], ds['LAT'], -500,500,
surf_height_data = ds['SH'],
add_contour_levels = True,
contour_levels = [2000,3000],
title_fig_l = 'MAR CMIP5 Model mean',
title_fig_r = 'MAR CMIP6 Model mean',
fontsize_title_fig = 16,
cbar_title = 'Melt [mmWE]' , cmap_color = 'RdBu_r',
file_title = '4_deg_ME_net_two_plots')
#=== PR (precipitation) ===#
two_plots(PR_CMIP5_model_mean, PR_CMIP6_model_mean, ds['LON'], ds['LAT'], -50,50,
surf_height_data = ds['SH'],
add_contour_levels = True,
contour_levels = [2000,3000],
title_fig_l = 'MAR CMIP5 Model mean',
title_fig_r = 'MAR CMIP6 Model mean',
fontsize_title_fig = 16,
cbar_title = 'PR [mmWE]' , cmap_color = 'RdBu_r',
file_title = '4_deg_PR_net_two_plots')
#=== RF (Rainfall) ===#
two_plots(RF_CMIP5_model_mean, RF_CMIP6_model_mean, ds['LON'], ds['LAT'], -50,50,
surf_height_data = ds['SH'],
add_contour_levels = True,
contour_levels = [2000,3000],
title_fig_l = 'MAR CMIP5 Model mean',
title_fig_r = 'MAR CMIP6 Model mean',
fontsize_title_fig = 16,
cbar_title = 'RF [mmWE]' , cmap_color = 'RdBu_r',
file_title = '4_deg_RF_net_two_plots')
#=== SF (Snowfall) ===#
two_plots(SF_CMIP5_model_mean, SF_CMIP6_model_mean, ds['LON'], ds['LAT'], -50,50,
surf_height_data = ds['SH'],
add_contour_levels = True,
contour_levels = [2000,3000],
title_fig_l = 'MAR CMIP5 Model mean',
title_fig_r = 'MAR CMIP6 Model mean',
fontsize_title_fig = 16,
cbar_title = 'SF [mmWE]' , cmap_color = 'RdBu_r',
file_title = '4_deg_SF_net_two_plots') |
from ci_reduce.image import CI_image
from ci_reduce.exposure import CI_exposure
import ci_reduce.common as common
import ci_reduce.xmatch.gaia as gaia
import astropy.io.fits as fits
from astropy.table import vstack, hstack
import os
import ci_reduce.analysis.basic_image_stats as bis
import ci_reduce.analysis.basic_catalog_stats as bcs
import ci_reduce.analysis.util as util
import numpy as np
import time
from ci_reduce.ci_wcs import ccd_center_radec
# in the context of this file, "image" and "exposure" generally refer to
# CI_image and CI_exposure objects
def loading_image_extension_message(extname):
print('Attempting to load image extension : ' + extname)
def load_image_from_hdu(hdu, verbose=True, cube_index=None):
loading_image_extension_message(hdu.header['EXTNAME'])
if verbose:
print(repr(hdu.header))
return CI_image(hdu.data, hdu.header, cube_index=cube_index)
def load_image_from_filename(fname, extname):
assert(os.path.exists(fname))
loading_image_extension_message(extname)
assert(common.is_valid_image_extname(extname))
data, header = fits.getdata(fname, extname=extname, header=True)
return CI_image(data, header)
def realtime_raw_read(fname, delay=2.0, max_attempts=5):
"""
attempt to avoid getting burned by partially written files when
trying to analyze data in real time
delay is in seconds
"""
# something has gone badly wrong if the filename doesn't even exist
# that's not the scenario I'm trying to address here
assert(os.path.exists(fname))
hdul = None
for i in range(max_attempts):
try:
hdul = fits.open(fname, lazy_load_hdus=False)
hdul.verify(option='exception')
for hdu in hdul:
_, __ = hdu.data, hdu.header
___ = hdu.data.shape
except:
print('encountered problem reading ' + fname)
time.sleep(delay)
if hdul is not None:
break
# die if unable to read file after max_attempts attempts
assert(hdul is not None)
return hdul
def load_exposure(fname, verbose=True, realtime=False, cube_index=None):
assert(os.path.exists(fname))
print('Attempting to load exposure : ' + fname)
par = common.ci_misc_params()
if not realtime:
hdul = fits.open(fname)
else:
hdul = realtime_raw_read(fname)
dummy_fz_header = None
is_image_hdu = np.zeros(len(hdul), dtype=bool)
for i, hdu in enumerate(hdul):
# real data has another dummy extension added with no EXTNAME
keywords = [c[0] for c in hdu.header.cards]
if not ('EXTNAME' in keywords):
continue
if hdu.header['EXTNAME'] not in common.valid_extname_list():
continue
if (hdu.header['EXTNAME']).strip() == par['fz_dummy_extname']:
dummy_fz_header = hdu.header
continue
is_image_hdu[i] = True
w_im = np.where(is_image_hdu)[0]
is_cube = (len(hdul[w_im[0]].data.shape) == 3)
assert((is_cube and (cube_index is None)) == False)
assert(((not is_cube) and (cube_index is not None)) == False)
try:
imlist = [load_image_from_hdu(hdul[ind], verbose=verbose, cube_index=cube_index) for ind in w_im]
except:
print('failed to load exposure at image list creation stage')
return None
exp = CI_exposure(imlist, dummy_fz_header=dummy_fz_header)
print('Successfully loaded exposure : ' + fname)
print('Exposure has ' + str(exp.num_images_populated()) +
' image extensions populated')
print('Populated image extension names are : ' +
str(exp.populated_extnames()))
return exp
def reduced_image_fname(outdir, fname_in, flavor, gzip=True, cube_index=None):
assert(os.path.exists(outdir))
outname = os.path.join(outdir, os.path.basename(fname_in))
# get rid of any ".fz" or ".gz" present in input filename
outname = outname.replace('.fz', '')
outname = outname.replace('.gz', '')
assert(outname[-5:] == '.fits')
outname = outname.replace('.fits',
common.reduced_image_filename_label(flavor) + '.fits')
if gzip:
outname += '.gz'
if cube_index is not None:
outname = outname.replace('.fits',
'-' + str(cube_index).zfill(5) + '.fits')
assert(not os.path.exists(outname))
return outname
def check_image_level_outputs_exist(outdir, fname_in, gzip=True,
cube_index=None):
par = common.ci_misc_params()
for flavor in par['reduced_image_flavors']:
_ = reduced_image_fname(outdir, fname_in, flavor, gzip=gzip,
cube_index=cube_index)
def retrieve_git_rev():
code_dir = os.path.dirname(os.path.realpath(__file__))
cwd = os.getcwd()
do_chdir = (cwd[0:len(code_dir)] != code_dir)
if do_chdir:
os.chdir(code_dir)
gitrev = os.popen("git rev-parse --short HEAD").read().replace('\n','')
if do_chdir:
os.chdir(cwd)
print('"git rev" version info:', gitrev)
return gitrev
def write_image_level_outputs(exp, outdir, fname_in, gzip=True,
cube_index=None):
# exp is a CI_exposure object
# outdir is the output directory (string)
# fname_in is the input filename (string)
par = common.ci_misc_params()
for flavor in par['reduced_image_flavors']:
_gzip = (gzip if (flavor != 'REDUCED') else False)
outname = reduced_image_fname(outdir, fname_in, flavor, gzip=_gzip,
cube_index=cube_index)
hdulist = exp.to_hdulist(flavor=flavor)
print('Attempting to write ' + flavor + ' image output to ' +
outname)
hdulist.writeto(outname)
print('Successfully wrote ' + flavor + ' image output to ' +
outname)
def strip_none_columns(table):
# can't write an astropy table to FITS if it has columns with None
# values
for c in table.colnames:
if table[c].dtype.str == '|O':
table.remove_column(c)
return table
def combine_per_camera_catalogs(catalogs):
# catalogs is the output of CI_exposure's all_source_catalogs() method
# which is a dictionary of astropy QTable's, with the keys
# being the CI camera extension names
# want to add a column to each table giving the CI camera name, then
# append the all into one per-exposure table
assert(type(catalogs).__name__ == 'dict')
composite_list = []
for extname, tab in catalogs.items():
if tab is not None:
tab['camera'] = extname
tab['ci_number'] = [common.ci_extname_to_ci_number(extname) for extname in tab['camera']]
composite_list.append(tab)
composite = vstack(composite_list)
composite = strip_none_columns(composite)
return composite
def write_exposure_source_catalog(catalog, outdir, fname_in,
cube_index=None):
assert(os.path.exists(outdir))
outname = os.path.join(outdir, os.path.basename(fname_in))
# get rid of any ".fz" or ".gz" present in input filename
outname = outname.replace('.fz', '')
outname = outname.replace('.gz', '')
assert(outname[-5:] == '.fits')
outname = outname.replace('.fits', '_catalog.fits')
if cube_index is not None:
outname = outname.replace('.fits',
'-' + str(cube_index).zfill(5) + '.fits')
assert(not os.path.exists(outname))
catalog['fname_in'] = fname_in
expid = util.expid_from_raw_filename(fname_in)
catalog['expid'] = expid
print('Attempting to write source catalog to ' + outname)
catalog.write(outname, format='fits')
def write_ps1_matches(catalog, outdir, fname_in, cube_index=None):
ps1 = gaia.gaia_xmatch(catalog['ra'], catalog['dec'], ps1=True)
ps1.rename_column('ra', 'ra_ps1')
ps1.rename_column('dec', 'dec_ps1')
ps1_matches = hstack([catalog, ps1])
assert(os.path.exists(outdir))
outname = os.path.join(outdir, os.path.basename(fname_in))
# get rid of any ".fz" or ".gz" present in input filename
outname = outname.replace('.fz', '')
outname = outname.replace('.gz', '')
assert(outname[-5:] == '.fits')
outname = outname.replace('.fits', '_ps1.fits')
if cube_index is not None:
outname = outname.replace('.fits',
'-' + str(cube_index).zfill(5) + '.fits')
assert(not os.path.exists(outname))
ps1_matches.write(outname, format='fits')
def gather_gaia_crossmatches(catalog):
gaia_matches = gaia.gaia_xmatch(catalog['ra'], catalog['dec'])
# avoid downstream conflict with 'ra', 'dec' columns that refer
# to the world coordinates of the CI detections
gaia_matches.rename_column('ra', 'ra_gaia')
gaia_matches.rename_column('dec', 'dec_gaia')
return gaia_matches
def append_gaia_crossmatches(catalog):
gaia_matches = gather_gaia_crossmatches(catalog)
# I believe that there will always be a Gaia match for each
# detected source, but will need to see if that assumption breaks
# at any point
catalog = hstack([catalog, gaia_matches])
return catalog
def gather_pixel_stats(exp):
t = None
for extname, im in exp.images.items():
if im is None:
continue
print('Computing pixel statistics for ' + extname)
t_im = bis.compute_all_stats(im.image, extname=extname)
if t is None:
t = t_im
else:
t = vstack([t, t_im])
return t
def high_level_ccds_metrics(tab, catalog):
nrows = len(tab)
fwhm_major_pix = np.zeros(nrows)
fwhm_minor_pix = np.zeros(nrows)
fwhm_pix = np.zeros(nrows)
fwhm_asec = np.zeros(nrows)
n_sources = np.zeros(nrows, dtype=int)
n_sources_for_shape = np.zeros(nrows, dtype=int)
for i, row in enumerate(tab):
if np.sum(catalog['camera'] == row['camera']) == 0:
continue
fwhm_stats = bcs.overall_image_fwhm(catalog[catalog['camera'] == row['camera']])
fwhm_major_pix[i] = fwhm_stats[0]
fwhm_minor_pix[i] = fwhm_stats[1]
fwhm_pix[i] = fwhm_stats[2]
fwhm_asec[i] = fwhm_stats[3]
n_sources[i] = int(np.sum(catalog['camera'] == row['camera']))
n_sources_for_shape[i] = fwhm_stats[4]
tab['fwhm_major_pix'] = fwhm_major_pix
tab['fwhm_minor_pix'] = fwhm_minor_pix
tab['fwhm_pix'] = fwhm_pix
tab['fwhm_asec'] = fwhm_asec
tab['n_sources'] = n_sources
tab['n_sources_for_shape'] = n_sources_for_shape
def write_ccds_table(tab, catalog, exp, outdir, fname_in, cube_index=None):
assert(os.path.exists(outdir))
outname = os.path.join(outdir, os.path.basename(fname_in))
# get rid of any ".fz" or ".gz" present in input filename
outname = outname.replace('.fz', '')
outname = outname.replace('.gz', '')
assert(outname[-5:] == '.fits')
outname = outname.replace('.fits', '_ccds.fits')
if cube_index is not None:
outname = outname.replace('.fits',
'-' + str(cube_index).zfill(5) + '.fits')
assert(not os.path.exists(outname))
tab['sky_mag_ab'] = [exp.images[extname].sky_mag for extname in tab['camera']]
tab['petal_loc'] = [common.ci_extname_to_ci_number(extname) for extname in tab['camera']]
tab['expid'] = [exp.images[extname].header['EXPID'] for extname in tab['camera']]
if cube_index is None:
tab['exptime'] = [exp.images[extname].header['EXPTIME'] for extname in tab['camera']]
tab['mjd'] = [exp.images[extname].header['MJD-OBS'] for extname in tab['camera']]
h_gfa = fits.getheader(fname_in, extname='GFA')
tab['airmass'] = h_gfa['AIRMASS']
tab['night'] = h_gfa['NIGHT']
tab['racen'] = np.zeros(len(tab), dtype=float)
tab['deccen'] = np.zeros(len(tab), dtype=float)
tab['fname_raw'] = fname_in
tab['contrast'] = [exp.images[extname].header['CONTRAST'] for extname in tab['camera']]
for i, extname in enumerate(tab['camera']):
racen, deccen = ccd_center_radec(exp.images[extname].wcs)
tab['racen'][i] = racen
tab['deccen'][i] = deccen
high_level_ccds_metrics(tab, catalog)
print('Attempting to write CCDs table to ' + outname)
tab.write(outname, format='fits')
def get_temperature_celsius(fname_in, extname):
# try to get CCDTEMP if it's available
# otherwise use the average of CI-T[1-5] from EXTNAME = 'CI' header
assert(os.path.exists(fname_in))
h = fits.getheader(fname_in, extname=extname)
try:
ccdtemp = h['CCDTEMP']
except:
# this is just a placeholder/guess -- the CCD temperature situation
# is a complete zoo
hh = fits.getheader(fname_in, extname='CI')
t_kw_list = ['CI-T' + str(i) for i in np.arange(1, 6)]
ccdtemp = np.mean([hh[kw] for kw in t_kw_list])
return ccdtemp
|
<filename>byceps/services/seating/seat_service.py
"""
byceps.services.seating.seat_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 <NAME>
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
from typing import Iterator, Optional, Sequence
from ...database import db
from ...typing import PartyID
from ..ticketing.dbmodels.category import Category as DbTicketCategory
from ..ticketing.dbmodels.ticket import Ticket as DbTicket
from ..ticketing.transfer.models import TicketCategory, TicketCategoryID
from .dbmodels.area import Area as DbArea
from .dbmodels.seat import Seat as DbSeat
from .transfer.models import AreaID, Seat, SeatID, SeatUtilization
def create_seat(
area_id: AreaID,
coord_x: int,
coord_y: int,
category_id: TicketCategoryID,
*,
label: Optional[str] = None,
) -> DbSeat:
"""Create a seat."""
seat = DbSeat(
area_id, category_id, coord_x=coord_x, coord_y=coord_y, label=label
)
db.session.add(seat)
db.session.commit()
return seat
def create_seats(area_id: AreaID, seats: Iterator[Seat]) -> None:
"""Create multiple seats in the same area at once."""
db_seats = [
DbSeat(
area_id,
seat.category_id,
coord_x=seat.coord_x,
coord_y=seat.coord_y,
label=seat.label,
type_=seat.type_,
)
for seat in seats
]
db.session.add_all(db_seats)
db.session.commit()
def delete_seat(seat_id: SeatID) -> None:
"""Delete a seat."""
db.session.query(DbSeat) \
.filter_by(id=seat_id) \
.delete()
db.session.commit()
def count_occupied_seats_by_category(
party_id: PartyID,
) -> list[tuple[TicketCategory, int]]:
"""Count occupied seats for the party, grouped by ticket category."""
subquery = db.session \
.query(
DbSeat.id,
DbSeat.category_id
) \
.join(DbTicket) \
.filter_by(revoked=False) \
.subquery()
rows = db.session \
.query(
DbTicketCategory.id,
DbTicketCategory.party_id,
DbTicketCategory.title,
db.func.count(subquery.c.id)
) \
.outerjoin(subquery, db.and_(DbTicketCategory.id == subquery.c.category_id)) \
.filter(DbTicketCategory.party_id == party_id) \
.group_by(DbTicketCategory.id) \
.order_by(DbTicketCategory.id) \
.all()
return [(TicketCategory(row[0], row[1], row[2]), row[3]) for row in rows]
def count_occupied_seats_for_party(party_id: PartyID) -> int:
"""Count occupied seats for the party."""
return db.session \
.query(DbSeat) \
.join(DbTicket) \
.join(DbTicketCategory) \
.filter(DbTicket.revoked == False) \
.filter(DbTicketCategory.party_id == party_id) \
.count()
def count_seats_for_party(party_id: PartyID) -> int:
"""Return the number of seats in seating areas for that party."""
return db.session \
.query(DbSeat) \
.join(DbArea) \
.filter(DbArea.party_id == party_id) \
.count()
def get_seat_utilization(party_id: PartyID) -> SeatUtilization:
"""Return how many seats of how many in total are occupied."""
occupied_seat_count = count_occupied_seats_for_party(party_id)
total_seat_count = count_seats_for_party(party_id)
return SeatUtilization(occupied_seat_count, total_seat_count)
def get_seat_total_per_area(party_id: PartyID) -> dict[AreaID, int]:
"""Return the number of seats per area for that party."""
area_ids_and_seat_counts = db.session \
.query(
DbArea.id,
db.func.count(DbSeat.id)
) \
.filter_by(party_id=party_id) \
.outerjoin(DbSeat) \
.group_by(DbArea.id) \
.all()
return dict(area_ids_and_seat_counts)
def find_seat(seat_id: SeatID) -> Optional[DbSeat]:
"""Return the seat with that id, or `None` if not found."""
return db.session.query(DbSeat).get(seat_id)
def get_seat(seat_id: SeatID) -> DbSeat:
"""Return the seat with that id, or raise an exception."""
seat = find_seat(seat_id)
if seat is None:
raise ValueError(f'Unknown seat ID "{seat_id}"')
return seat
def find_seats(seat_ids: set[SeatID]) -> set[DbSeat]:
"""Return the seats with those IDs."""
if not seat_ids:
return set()
seats = db.session \
.query(DbSeat) \
.filter(DbSeat.id.in_(frozenset(seat_ids))) \
.all()
return set(seats)
def get_seats_with_tickets_for_area(area_id: AreaID) -> Sequence[DbSeat]:
"""Return the seats and their associated tickets (if available) for
that area.
"""
return db.session \
.query(DbSeat) \
.filter_by(area_id=area_id) \
.options(
db.joinedload(DbSeat.occupied_by_ticket),
) \
.all()
|
from pathlib import Path
import logging
import os
import re
import sys
from typing import Union
import bmesh
import bpy
import numpy
from mathutils import Vector
sys.path.append(os.path.dirname(__file__))
import world_json
from dirs import dest, src
from selection import all_mesh_objects, editmode, select_object, select_objects_in_collection
from blender_file import saveScene, writeGlb
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
log = logging.getLogger()
def separate_rect(obj_name: str, xlo: float, xhi: float, ylo: float, yhi: float) -> Union[str, None]:
names_before = set(o.name for o in bpy.data.objects)
select_object(obj_name)
with editmode():
bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='VERT')
bpy.ops.mesh.select_all(action='DESELECT')
mesh = bmesh.from_edit_mesh(bpy.data.objects[obj_name].data)
sel_verts = set()
for i, v in enumerate(mesh.verts):
if xlo <= v.co.x < xhi and ylo <= v.co.y < yhi:
sel_verts.add(i)
v.select = True
for e in mesh.edges:
if all(i in sel_verts for i in e.verts):
e.select = True
for f in mesh.faces:
if all(i in sel_verts for i in f.verts):
f.select = True
mesh.select_flush(True)
bpy.ops.mesh.separate()
names_after = set(o.name for o in bpy.data.objects)
new_names = names_after.difference(names_before)
if not new_names:
return None
new_name = new_names.pop()
select_object(new_name)
with editmode():
bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='FACE')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.uv.cube_project(
cube_size=1, scale_to_bounds=True) # todo needs to go to a separate uv, to not break the big ground dif texture
return new_name
def make_lightmap_uv_layer(obj, outData):
uvs = obj.data.uv_layers
lightmap_uv = uvs.new()
lightmap_uv.name = 'lightmap'
obj_uv = outData['objs'][obj.name]
obj_uv['lightmap_uv'] = lightmap_uv.name
return lightmap_uv
def objectBbox(obj):
ptsObject = obj.bound_box
ptsWorld = numpy.array([obj.matrix_world @ Vector(pt) for pt in ptsObject])
centerWorld = numpy.average(ptsWorld, axis=0)
centerToPts = ptsWorld - centerWorld
radius = numpy.linalg.norm(centerToPts, axis=1).max()
return {'center': [round(x, 3) for x in centerWorld], 'radius': round(radius, 3)}
def storeExistingUvLayer(outData, obj):
obj_uv = outData['objs'][obj.name]
try:
render_uv_layer = obj.data.uv_layers.active.name
obj_uv['render_uv'] = render_uv_layer
except AttributeError:
pass
def delete_extra_objs():
try:
key_collection, = [c for c in bpy.data.collections if c.name != 'Collection' and c.library is None]
except ValueError:
log.error(list(bpy.data.collections))
raise
select_objects_in_collection(key_collection)
keep = len(bpy.context.selected_objects)
bpy.ops.object.select_all(action='INVERT')
dump = len(bpy.context.selected_objects)
log.info(f'keeping {keep} objects, deleting {dump}')
bpy.ops.object.delete(confirm=False)
def main():
input_scene = Path(sys.argv[-1])
output_export = dest / 'serve' / input_scene.relative_to(src).parent / input_scene.name.replace('.blend', '.glb')
output_export.parent.mkdir(parents=True, exist_ok=True)
outData = {}
log.info(f'open collection {input_scene}')
bpy.ops.wm.open_mainfile(filepath=str(input_scene))
def dice_ground():
log.info('dice_ground')
for xsplit in range(-750, 750, 250):
for ysplit in range(-750, 750, 250):
separate_rect('gnd.001', -750, xsplit + 250, -750, ysplit + 250)
def separate_materials():
log.info('separate_materials')
for obj_name in all_mesh_objects():
if len(bpy.data.objects[obj_name].material_slots) > 1:
select_object(obj_name)
bpy.ops.mesh.separate(type='MATERIAL')
def lightmaps():
log.info('lightmaps')
for obj_name in all_mesh_objects():
# if not obj_name.startswith('sign_board'): continue
try:
obj = select_object(obj_name)
except Exception as exc:
log.warning(f'lightmap_pack failed on {obj_name}: {exc!r}')
continue
outData.setdefault('objs', {}).setdefault(obj_name, {})['worldBbox'] = objectBbox(obj)
storeExistingUvLayer(outData, obj)
lyr = make_lightmap_uv_layer(obj, outData)
obj.data.uv_layers.active = lyr
log.info(f'start lightmap_pack on {obj_name}; active uv is {obj.data.uv_layers.active.name}')
try:
bpy.ops.uv.lightmap_pack(
PREF_CONTEXT='ALL_FACES',
PREF_PACK_IN_ONE=True,
PREF_NEW_UVLAYER=False,
)
except Exception as exc:
log.warning(f'lightmap_pack failed on {obj_name}: {exc!r}')
def rel_paths():
log.info('rel_paths')
# bpy.ops.file.make_paths_relative() is not working; it makes like
# '//../../../home/drewp/own/proj_shared/megasecond/client/asset/wrap/gnd_dif.png'
for img in bpy.data.images.values():
prev = img.filepath
img.filepath = re.sub(r'.*/megasecond/client/asset/wrap/', '//', img.filepath)
if img.filepath != prev:
log.info(f'fix path from {prev} to {img.filepath}')
log.info(f' * image at {img.filepath}')
delete_extra_objs()
if 'gnd.001' in bpy.data.objects:
dice_ground()
separate_materials()
lightmaps()
rel_paths()
# write obj list so we can make deps?
saveScene(dest / 'stage/bake' / input_scene.relative_to(src))
writeGlb(output_export, select=None, with_materials=True)
main()
|
<filename>S12/tensornet/engine/learner.py
import torch
import torch.nn.functional as F
from tensornet.engine.ops.regularizer import l1
from tensornet.data.processing import InfiniteDataLoader
from tensornet.utils.progress_bar import ProgressBar
class Learner:
def __init__(
self, model, optimizer, criterion, train_loader, device='cpu',
epochs=1, val_loader=None, l1_factor=0.0, callbacks=None
):
"""Train and validate the model.
Args:
model (torch.nn.Module): Model Instance.
optimizer (torch.optim): Optimizer for the model.
criterion (torch.nn): Loss Function.
train_loader (torch.utils.data.DataLoader): Training data loader.
device (str or torch.device, optional): Device where the data
will be loaded. (default='cpu')
epochs (int, optional): Numbers of epochs/iterations to train the model for.
(default: 1)
val_loader (torch.utils.data.DataLoader, optional): Validation data
loader. (default: None)
l1_factor (float, optional): L1 regularization factor. (default: 0)
callbacks (list, optional): List of callbacks to be used during training.
(default: None)
"""
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.train_loader = train_loader
self.device = device
self.epochs = epochs
self.val_loader = val_loader
self.l1_factor = l1_factor
self.callbacks = {
'step_lr': None,
'lr_plateau': None,
'one_cycle_policy': None
}
if not callbacks is None:
self._setup_callbacks(callbacks)
# Training
self.train_losses = [] # Change in loss
self.train_accuracies = [] # Change in accuracy
self.train_correct = 0 # Total number of correctly predicted samples so far
self.train_processed = 0 # Total number of predicted samples so far
self.val_losses = [] # Change in loss
self.val_accuracies = [] # Change in accuracy
def _setup_callbacks(self, callbacks):
for callback in callbacks:
if isinstance(callback, torch.optim.lr_scheduler.StepLR):
self.callbacks['step_lr'] = callback
elif isinstance(callback, torch.optim.lr_scheduler.ReduceLROnPlateau):
self.callbacks['lr_plateau'] = callback
elif isinstance(callback, torch.optim.lr_scheduler.OneCycleLR):
self.callbacks['one_cycle_policy'] = callback
def update_training_history(self, loss, accuracy):
"""Update the training history."""
self.train_losses.append(loss)
self.train_accuracies.append(accuracy)
def reset_history(self):
"""Reset the training history"""
self.train_losses = []
self.train_accuracies = []
self.train_correct = 0
self.train_processed = 0
self.val_losses = []
self.val_accuracies = []
def train_batch(self, data, target):
"""Train the model on a batch of data.
Args:
data: Input batch for the model.
target: Expected batch of labels for the data.
Returns:
Batch loss and predictions.
"""
data, target = data.to(self.device), target.to(self.device)
self.optimizer.zero_grad() # Set gradients to zero before starting backpropagation
y_pred = self.model(data) # Predict output
loss = l1(self.model, self.criterion(y_pred, target), self.l1_factor) # Calculate loss
# Perform backpropagation
loss.backward()
self.optimizer.step()
pred = y_pred.argmax(dim=1, keepdim=True)
self.train_correct += pred.eq(target.view_as(pred)).sum().item()
self.train_processed += len(data)
# One Cycle Policy for learning rate
if not self.callbacks['one_cycle_policy'] is None:
self.callbacks['one_cycle_policy'].step()
return loss.item()
def train_epoch(self):
"""Run an epoch of model training."""
self.model.train()
pbar = ProgressBar(target=len(self.train_loader), width=8)
for batch_idx, (data, target) in enumerate(self.train_loader, 0):
# Train a batch
loss = self.train_batch(data, target)
# Update Progress Bar
accuracy = 100 * self.train_correct / self.train_processed
pbar.update(batch_idx, values=[
('loss', round(loss, 2)), ('accuracy', round(accuracy, 2))
])
# Update training history
accuracy = 100 * self.train_correct / self.train_processed
self.update_training_history(loss, accuracy)
pbar.add(1, values=[
('loss', round(loss, 2)), ('accuracy', round(accuracy, 2))
])
def train_iterations(self):
"""Train model for the 'self.epochs' number of batches."""
self.model.train()
pbar = ProgressBar(target=self.epochs, width=8)
iterator = InfiniteDataLoader(self.train_loader)
correct = 0
processed = 0
for iteration in range(self.epochs):
# Train a batch
data, target = iterator.get_batch()
loss = self.train_batch(data, target)
# Update Progress Bar
accuracy = 100 * self.train_correct / self.train_processed
pbar.update(iteration, values=[
('loss', round(loss, 2)), ('accuracy', round(accuracy, 2))
])
# Update training history
self.update_training_history(loss, accuracy)
pbar.add(1, values=[
('loss', round(loss, 2)), ('accuracy', round(accuracy, 2))
])
def validate(self, verbose=True):
"""Validate an epoch of model training.
Args:
verbose: Print validation loss and accuracy.
"""
self.model.eval()
val_loss = 0
correct = 0
with torch.no_grad():
for data, target in self.val_loader:
img_batch = data # This is done to keep data in CPU
data, target = data.to(self.device), target.to(self.device) # Get samples
output = self.model(data) # Get trained model output
val_loss += self.criterion(output, target).item() # Sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # Get the index of the max log-probability
result = pred.eq(target.view_as(pred))
correct += result.sum().item()
val_loss /= len(self.val_loader.dataset)
val_accuracy = 100. * correct / len(self.val_loader.dataset)
self.val_losses.append(val_loss)
self.val_accuracies.append(val_accuracy)
if verbose:
print(
f'Validation set: Average loss: {val_loss:.4f}, Accuracy: {val_accuracy:.2f}%\n'
)
def fit(self):
"""Perform model training."""
self.reset_history()
for epoch in range(1, self.epochs + 1):
print(f'Epoch {epoch}:')
# Train an epoch
self.train_epoch()
# Call Step LR
if not self.callbacks['step_lr'] is None:
self.callbacks['step_lr'].step()
# Validate the model
if not self.val_loader is None:
self.validate()
# Call Reduce LR on Plateau
if not self.callbacks['lr_plateau'] is None:
self.callbacks['lr_plateau'].step(self.val_losses[-1])
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...config import cfg
from ..model_utils.pytorch_utils import Empty
class VoxelFeatureExtractor(nn.Module):
def __init__(self, **kwargs):
super().__init__()
def get_output_feature_dim(self):
raise NotImplementedError
def forward(self, **kwargs):
raise NotImplementedError
class MeanVoxelFeatureExtractor(VoxelFeatureExtractor):
def __init__(self, **kwargs):
super().__init__()
def get_output_feature_dim(self):
return cfg.DATA_CONFIG.NUM_POINT_FEATURES['use']
def forward(self, features, num_voxels, **kwargs):
"""
:param features: (N, max_points_of_each_voxel, 3 + C)
:param num_voxels: (N)
:param kwargs:
:return:
"""
points_mean = features[:, :, :].sum(dim=1, keepdim=False) / num_voxels.type_as(features).view(-1, 1)
return points_mean.contiguous()
def get_paddings_indicator(actual_num, max_num, axis=0):
"""Create boolean mask by actually number of a padded tensor.
Args:
actual_num ([type]): [description]
max_num ([type]): [description]
Returns:
[type]: [description]
"""
actual_num = torch.unsqueeze(actual_num, axis + 1)
# tiled_actual_num: [N, M, 1]
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis + 1] = -1
max_num = torch.arange(max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
# tiled_actual_num: [[3,3,3,3,3], [4,4,4,4,4], [2,2,2,2,2]]
# tiled_max_num: [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]]
paddings_indicator = actual_num.int() > max_num
# paddings_indicator shape: [batch_size, max_num]
return paddings_indicator
class PFNLayer(nn.Module):
def __init__(self,
in_channels,
out_channels,
use_norm=True,
last_layer=False):
"""
Pillar Feature Net Layer.
The Pillar Feature Net could be composed of a series of these layers, but the PointPillars paper results only
used a single PFNLayer.
:param in_channels: <int>. Number of input channels.
:param out_channels: <int>. Number of output channels.
:param use_norm: <bool>. Whether to include BatchNorm.
:param last_layer: <bool>. If last_layer, there is no concatenation of features.
"""
super().__init__()
self.name = 'PFNLayer'
self.last_vfe = last_layer
if not self.last_vfe:
out_channels = out_channels // 2
self.units = out_channels
if use_norm:
self.linear = nn.Linear(in_channels, self.units, bias=False)
self.norm = nn.BatchNorm1d(self.units, eps=1e-3, momentum=0.01)
else:
self.linear = nn.Linear(in_channels, self.units, bias=True)
self.norm = Empty(self.units)
def forward(self, inputs):
x = self.linear(inputs)
# x = self.norm(x.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous()
total_points, voxel_points, channels = x.shape
x = self.norm(x.view(-1, channels)).view(total_points, voxel_points, channels)
x = F.relu(x)
x_max = torch.max(x, dim=1, keepdim=True)[0]
if self.last_vfe:
return x_max
else:
x_repeat = x_max.repeat(1, inputs.shape[1], 1)
x_concatenated = torch.cat([x, x_repeat], dim=2)
return x_concatenated
class PillarFeatureNetOld2(VoxelFeatureExtractor):
def __init__(self,
num_input_features=4,
use_norm=True,
num_filters=(64, ),
with_distance=False,
voxel_size=(0.2, 0.2, 4),
pc_range=(0, -40, -3, 70.4, 40, 1),
expand=None):
"""
Pillar Feature Net.
The network prepares the pillar features and performs forward pass through PFNLayers.
:param num_input_features: <int>. Number of input features, either x, y, z or x, y, z, r.
:param use_norm: <bool>. Whether to include BatchNorm.
:param num_filters: (<int>: N). Number of features in each of the N PFNLayers.
:param with_distance: <bool>. Whether to include Euclidean distance to points.
:param voxel_size: (<float>: 3). Size of voxels, only utilize x and y size.
:param pc_range: (<float>: 6). Point cloud range, only utilize x and y min.
"""
super().__init__()
self.name = 'PillarFeatureNetOld2'
self.expand = expand
assert len(num_filters) > 0
num_input_features += 6
if with_distance:
num_input_features += 1
if self.expand == '2':
num_input_features -= 4
self.with_distance = with_distance
self.num_filters = num_filters
# Create PillarFeatureNetOld layers
num_filters = [num_input_features] + list(num_filters)
pfn_layers = []
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
if i < len(num_filters) - 2:
last_layer = False
else:
last_layer = True
pfn_layers.append(
PFNLayer(in_filters, out_filters, use_norm, last_layer=last_layer)
)
self.pfn_layers = nn.ModuleList(pfn_layers)
# Need pillar (voxel) size and x/y offset in order to calculate pillar offset
self.vx = voxel_size[0]
self.vy = voxel_size[1]
self.vz = voxel_size[2]
self.x_offset = self.vx / 2 + pc_range[0]
self.y_offset = self.vy / 2 + pc_range[1]
self.z_offset = self.vz / 2 + pc_range[2]
def get_output_feature_dim(self):
return self.num_filters[-1]
def forward(self, features, num_voxels, coords):
"""
:param features: (N, max_points_of_each_voxel, 3 + C)
:param num_voxels: (N)
:param coors:
:return:
"""
dtype = features.dtype
# Find distance of x, y, and z from cluster center
points_mean = features[:, :, :3].sum(dim=1, keepdim=True) / num_voxels.type_as(features).view(-1, 1, 1)
f_cluster = features[:, :, :3] - points_mean
# Find distance of x, y, and z from pillar center
# f_center = features[:, :, :3]
f_center = torch.zeros_like(features[:, :, :3])
f_center[:, :, 0] = features[:, :, 0] - (coords[:, 3].to(dtype).unsqueeze(1) * self.vx + self.x_offset)
f_center[:, :, 1] = features[:, :, 1] - (coords[:, 2].to(dtype).unsqueeze(1) * self.vy + self.y_offset)
f_center[:, :, 2] = features[:, :, 2] - (coords[:, 1].to(dtype).unsqueeze(1) * self.vz + self.z_offset)
# Combine together feature decorations
if self.expand == '2':
features_ls = [f_cluster, f_center]
else:
features_ls = [features, f_cluster, f_center]
if self.with_distance:
points_dist = torch.norm(features[:, :, :3], 2, 2, keepdim=True)
features_ls.append(points_dist)
features = torch.cat(features_ls, dim=-1)
# The feature decorations were calculated without regard to whether pillar was empty. Need to ensure that
# empty pillars remain set to zeros.
voxel_count = features.shape[1]
mask = get_paddings_indicator(num_voxels, voxel_count, axis=0)
mask = torch.unsqueeze(mask, -1).type_as(features)
features *= mask
# Forward pass through PFNLayers
for pfn in self.pfn_layers:
features = pfn(features)
return features.squeeze()
|
<reponame>douglaslab/cryoorigami<filename>bin/em_copystarcols.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2018-11-15 11:00:21
# @Author : <NAME> (<EMAIL>)
# @Link : http://example.org
# @Version : $Id$
import os
import sys
import argparse
import cryoorigami.origamiem as em
import cryoorigami.utilities as util
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-i", "--input", type=str, help="Particle star file")
parser.add_argument("-o", "--output", type=str, help="Output directory", default=None)
parser.add_argument("-cols", "--columns", type=str, help="Columns to copy", nargs='*', default=None)
parser.add_argument("-copytopriors", "--copypriors", type=str, help="Copy offset and/or angle parameters to priors", default=None, choices=['angle','all', 'helix'])
parser.add_argument("-copytooffsets", "--copyoffsets", type=str, help="Copy priors to angle and distance offset parameters", default=None, choices=['angle','all', 'helix'])
parser.add_argument("-reset-priors", "--resetpriors", action='store_true', help="Delete prior offset and angle columngs")
parser.add_argument("-reset-offsets", "--resetoffsets", action='store_true', help="Assign angle and origin offsets to 0")
parser.add_argument("-invert-psi", "--invertpsi", action='store_true', help="Invert psi angle")
parser.add_argument("-invert-origin", "--invertorigin", action='store_true', help="Invert originX and originY")
args = parser.parse_args()
# Prepare args dict
args_dict = {'input': args.input,
'output': args.output,
'columns': args.columns,
'copypriors': args.copypriors,
'copyoffsets': args.copyoffsets,
'resetpriors': args.resetpriors,
'resetoffsets': args.resetoffsets,
'invertpsi': args.invertpsi,
'invertorigin': args.invertorigin
}
# Check if the input file exists
if args_dict['input'] is None or not os.path.isfile(args_dict['input']):
parser.print_help()
sys.exit('Input file does not exist!')
# Get the new column parameters
if args_dict['columns'] is not None:
new_column_parameters = util.parse_star_parameters(args_dict['columns'])
else:
new_column_parameters = None
# Copy offsets to priors overwrites
if args_dict['copypriors'] == 'all':
new_column_parameters = {'rlnOriginX': 'rlnOriginXPrior',
'rlnOriginY': 'rlnOriginYPrior',
'rlnAnglePsi': 'rlnAnglePsiPrior',
'rlnAngleRot': 'rlnAngleRotPrior',
'rlnAngleTilt':'rlnAngleTiltPrior'}
elif args_dict['copypriors'] == 'angle':
new_column_parameters = {'rlnAnglePsi': 'rlnAnglePsiPrior',
'rlnAngleRot': 'rlnAngleRotPrior',
'rlnAngleTilt':'rlnAngleTiltPrior'}
elif args_dict['copypriors'] == 'helix':
new_column_parameters = {'rlnOriginX': 'rlnOriginXPrior',
'rlnOriginY': 'rlnOriginYPrior',
'rlnAnglePsi': 'rlnAnglePsiPrior',
'rlnAngleTilt':'rlnAngleTiltPrior'}
# Copy priors to offsets overwrites
if args_dict['copyoffsets'] == 'all':
new_column_parameters = {'rlnOriginXPrior': 'rlnOriginX',
'rlnOriginYPrior': 'rlnOriginY',
'rlnAnglePsiPrior': 'rlnAnglePsi',
'rlnAngleRotPrior': 'rlnAngleRot',
'rlnAngleTiltPrior':'rlnAngleTilt'}
elif args_dict['copyoffsets'] == 'angle':
new_column_parameters = {'rlnAnglePsiPrior': 'rlnAnglePsi',
'rlnAngleRotPrior': 'rlnAngleRot',
'rlnAngleTiltPrior':'rlnAngleTilt'}
elif args_dict['copyoffsets'] == 'helix':
new_column_parameters = {'rlnOriginXPrior': 'rlnOriginX',
'rlnOriginYPrior': 'rlnOriginY',
'rlnAnglePsiPrior': 'rlnAnglePsi',
'rlnAngleTiltPrior':'rlnAngleTilt'}
# Create an EM project object
new_project = em.Project(name='EMCopyColumns')
new_project.set_output_directory(args_dict['output'], project_root='.')
# Write parameters to args filename
args_filename = new_project.output_directory+'/args.yaml'
util.write_config_file(args_filename, args_dict)
# Read particles
new_project.read_particles(args_dict['input'])
print('Read particle star file {}'.format(args_dict['input']))
# Prepare input and output files
new_project.prepare_io_files_star()
# If reset priors option is ON
if args_dict['resetpriors']:
new_project.reset_priors()
# If reset offset option is on
if args_dict['resetoffsets']:
new_project.reset_offsets()
# If invert-psi option is on
if args_dict['invertpsi']:
new_project.invert_psi()
# If invert-origin
if args_dict['invertorigin']:
new_project.invert_origin()
# Add new columns
new_project.copy_columns(new_column_parameters)
# Write output files
new_project.write_output_files(write_ref_class_star=False)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.