text stringlengths 957 885k |
|---|
"""This script plots IVCV files together for files generated by QTC
Data must be """
import logging
import holoviews as hv
from forge.tools import convert_to_df, rename_columns
from forge.tools import plot_all_measurements, convert_to_EngUnits
from forge.specialPlots import *
class Stripscan:
def __init__(self, data, configs):
self.log = logging.getLogger(__name__)
self.config = configs
self.analysisName = "Stripscan"
self.data = convert_to_df(data, abs=self.config.get("abs_value_only", False))
self.data = rename_columns(
self.data,
self.config.get(self.analysisName, {}).get("Measurement_aliases", {}),
)
self.finalPlot = None
self.df = []
self.measurements = self.data["columns"]
self.donts = ()
if "Pad" in self.measurements:
padidx = self.measurements.index("Pad")
self.xrow = "Pad"
else:
self.log.error("No 'Pad' column found in data. Analysis cannot be done!")
return
self.PlotDict = {"Name": self.analysisName}
self.donts = ["Pad", "Name"]
# Convert the units to the desired ones
for meas in self.measurements:
unit = (
self.config[self.analysisName].get(meas, {}).get("UnitConversion", None)
)
if unit:
self.data = convert_to_EngUnits(self.data, meas, unit)
def run(self):
"""Runs the script"""
# Plot all Measurements
self.basePlots = plot_all_measurements(
self.data, self.config, self.xrow, self.analysisName, do_not_plot=self.donts
)
self.PlotDict["BasePlots"] = self.basePlots
self.PlotDict["All"] = self.basePlots
# Plot all special Plots:
# Histogram Plot
self.Histogram = dospecialPlots(
self.data,
self.config,
self.analysisName,
"concatHistogram",
self.measurements,
**self.config[self.analysisName]
.get("AuxOptions", {})
.get("concatHistogram", {})
)
if self.Histogram:
self.PlotDict["Histogram"] = self.Histogram
self.PlotDict["All"] = self.PlotDict["All"] + self.Histogram
# Whiskers Plot
self.WhiskerPlots = dospecialPlots(
self.data, self.config, self.analysisName, "BoxWhisker", self.measurements
)
if self.WhiskerPlots:
self.PlotDict["Whiskers"] = self.WhiskerPlots
self.PlotDict["All"] = self.PlotDict["All"] + self.WhiskerPlots
# Violin Plot
self.Violin = dospecialPlots(
self.data, self.config, self.analysisName, "Violin", self.measurements
)
if self.Violin:
self.PlotDict["Violin"] = self.Violin
self.PlotDict["All"] = self.PlotDict["All"] + self.Violin
# singleHist Plot
self.singleHist = dospecialPlots(
self.data,
self.config,
self.analysisName,
"Histogram",
self.measurements,
**self.config[self.analysisName]
.get("AuxOptions", {})
.get("singleHistogram", {})
)
if self.singleHist:
self.PlotDict["singleHistogram"] = self.singleHist
self.PlotDict["All"] = self.PlotDict["All"] + self.singleHist
# Reconfig the plots to be sure
self.PlotDict["All"] = config_layout(
self.PlotDict["All"], **self.config[self.analysisName].get("Layout", {})
)
self.PlotDict["data"] = self.data
return self.PlotDict
|
#!/usr/bin/env python
# Copyright (c) 2015 by <NAME>. All Rights Reserved.
"""The test grid:
0 2 5
1 1 3
2 1 1
"""
import unittest
import walk_grid
GRID = [[0, 2, 5], [1, 1, 3], [2, 1, 1]]
FOOD = 12
STEPS = 18
SMALL_GRID = [[0, 2], [1, 3]]
SMALL_COSTS = set([4, 5])
SMALL_STEPS = 4
class _BadDirection(object):
"""A bad direction for testing direction type."""
def __init__(self, value):
"""Set the value."""
self.value = value
class TestWalker(unittest.TestCase):
def setUp(self):
self.walker = walk_grid.Walker(GRID)
def test_create(self):
self.assertNotEqual(self.walker, None)
self.assertEqual(self.walker.position, (0, 0))
self.assertEqual(self.walker.consumed, 0)
self.assertEqual(self.walker.step_counter, 0)
def test_copy(self):
self.walker.position = (1, 2)
self.walker.consumed = 5
self.walker.step_counter = 4
walker2 = self.walker.copy()
self.assertEqual(walker2.position, (1, 2))
self.assertEqual(walker2.consumed, 5)
self.assertEqual(walker2.grid, GRID)
self.assertEqual(walker2.step_counter, 0)
def test_assert_square(self):
asymetric_grid = [[0, 2], [1, 1], [2, 1]]
self.assertRaises(AssertionError, walk_grid.Walker, asymetric_grid)
def test_assert_upper_left_zero(self):
non_zero_grid = [[5, 2], [1, 1], [2, 1]]
self.assertRaises(AssertionError, walk_grid.Walker, non_zero_grid)
def test_in_bound(self):
self.assertTrue(self.walker.in_bound(0))
self.assertTrue(self.walker.in_bound(1))
self.assertFalse(self.walker.in_bound(3))
self.assertFalse(self.walker.in_bound(-1))
def test_step_down(self):
self.walker.step(walk_grid.DOWN)
self.assertEqual(self.walker.position, (0, 1))
self.assertEqual(self.walker.step_counter, 1)
def test_step_down_error(self):
self.walker.position = (0, 2)
self.assertRaises(walk_grid.BoundError, self.walker.step, walk_grid.DOWN)
def test_step_right(self):
self.walker.step(walk_grid.RIGHT)
self.assertEqual(self.walker.position, (1, 0))
self.assertEqual(self.walker.step_counter, 1)
def test_step_right_error(self):
self.walker.position = (2, 0)
self.assertRaises(walk_grid.BoundError, self.walker.step, walk_grid.RIGHT)
def test_direction_error(self):
self.assertRaises(walk_grid.NotDirectionError, self.walker.step,
_BadDirection('other'))
def test_consume(self):
self.walker.position = (2, 1)
self.walker.consume()
def test_walk_path(self):
self.walker.step(walk_grid.RIGHT)
self.walker.consume()
self.walker.step(walk_grid.RIGHT)
self.walker.consume()
self.walker.step(walk_grid.DOWN)
self.walker.consume()
self.walker.step(walk_grid.DOWN)
self.walker.consume()
self.assertEqual(self.walker.consumed, 11)
self.assertEqual(self.walker.step_counter, 4)
def test_at_end(self):
self.assertFalse(self.walker.at_end())
self.walker.position = (1, 1)
self.assertFalse(self.walker.at_end())
self.walker.position = (2, 2)
self.assertTrue(self.walker.at_end())
class TestCollector(unittest.TestCase):
def setUp(self):
self.collector = walk_grid.Collector(GRID)
def test_create(self):
self.assertNotEqual(self.collector, None)
self.assertEqual(self.collector.costs, None)
def test_costs_recursive_small(self):
self.collector = walk_grid.Collector(SMALL_GRID)
walker = walk_grid.Walker(SMALL_GRID)
self.assertEqual(self.collector.collect_costs_recursive(walker),
SMALL_COSTS)
self.assertEqual(self.collector.steps, SMALL_STEPS)
def test_costs_recursive(self):
self.collector = walk_grid.Collector(GRID)
walker = walk_grid.Walker(GRID)
self.assertEqual(self.collector.collect_costs_recursive(walker),
set([4, 5, 6, 7, 11]))
self.assertEqual(self.collector.steps, STEPS)
def test_costs(self):
self.collector = walk_grid.Collector(GRID)
self.assertEqual(self.collector.collect_costs(), set([4, 5, 6, 7, 11]))
def test_least_left(self):
self.assertEqual(self.collector.least_left(12), 1)
self.assertEqual(self.collector.least_left(8), 1)
self.assertEqual(self.collector.least_left(9), 2)
self.assertEqual(self.collector.least_left(11), 0)
self.assertEqual(self.collector.least_left(4), 0)
def test_least_left_out(self):
self.assertEqual(self.collector.least_left(3), -1)
class TestTrimCollector(TestCollector):
def setUp(self):
self.collector = walk_grid.TrimCollector(GRID)
def _test_collect_trim_recursive(self, food, best_cost, steps):
walker = walk_grid.Walker(GRID)
self.assertEqual(self.collector.collect_trim_recursive(walker, food),
best_cost)
self.assertEqual(self.collector.steps, steps)
def test_collect_trim_5(self):
self._test_collect_trim_recursive(5, 5, 9)
def test_collect_trim_6(self):
self._test_collect_trim_recursive(6, 5, 9)
def test_collect_trim_7(self):
self._test_collect_trim_recursive(7, 7, 7)
def test_collect_trim_8(self):
self._test_collect_trim_recursive(8, 7, 7)
def test_collect_trim_9(self):
self._test_collect_trim_recursive(9, 7, 7)
def test_collect_trim_10(self):
self._test_collect_trim_recursive(10, 7, 7)
def test_collect_trim_11(self):
self._test_collect_trim_recursive(11, 11, 4)
def test_collect_trim_12(self):
self._test_collect_trim_recursive(12, 11, 4)
def test_least_left(self):
self.assertEqual(self.collector.least_left(8), 1)
self.assertEqual(self.collector.least_left(9), 2)
self.assertEqual(self.collector.least_left(11), 0)
self.assertEqual(self.collector.least_left(4), 0)
self.assertEqual(self.collector.least_left(7), 0)
self.assertEqual(self.collector.least_left(12), 1)
def test_least_left_out(self):
self.assertEqual(self.collector.least_left(3), -1)
if __name__ == '__main__':
unittest.main()
|
# Project 2 for AERO 351 Fall 2021
# Texas A&M University
# <NAME>
# UIN: 627005116
# Professor <NAME>
from math import *
import matplotlib.pyplot as plt
import numpy as np
# Problem Statement:
# A scramjet engine flies at altitude H = 10, 000 + A m. The inlet diffuser losses are
# σ0d = 70 + B%, the combustor losses are σ0c = 90 + C%, the combustor efficiency is
# ξcomb = 96%, and the nozzle losses are σ0n = 92 + D%. One can assume the engine is
# using standard fuel.
# Part A
# Assuming the ratio of specific heat capacities is constant, γ = 1.4, calculate the
# specific thrust, the thrust specific fuel consumption, the propulsion, thermal and
# overall efficiencies, and the fuel to air ratio of the engine operating between Mach
# 2 and 5, at T03 = 2400 K. Plot the variation of these quantities vs. Mach number
# using a Mach number increment of 0.2 or less.
# UIN parameters
a = 5
b = 1
c = 1
d = 6
A = 100*a
B = 0.5*b
C = 0.4*c
D = 0.5*d
M = np.arange(2, 5.05, 0.05)
thrust_array = []
TSFC_array = []
propEff_array = []
thermEff_array = []
overEff_array = []
f_array = []
# assuming p_a / p_5 = 1
def Scramjet(diff_loss, comb_loss, nozz_loss, T_03, T_a, gam, r, lhv, comb_eff, c_p, p_a, a):
for i in range(len(M)):
# specific thrust
p0a = p_a*(1+((gam - 1)/2)*(M[i]**2))**(gam/(gam-1))
T_0a = T_a*(1+((gam-1)/2)*(M[i]**2))
u_inlet = M[i]*a
beta = (1 + ((gam - 1)/2)*(M[i]**2))*(((diff_loss*comb_loss*nozz_loss)*1)**((gam-1)/gam))
# M_e = sqrt((2/(gam-1))*(beta - 1))
u_e = sqrt(((2*gam)/(gam-1))*((beta-1)/beta)*r*T_03)
f = ((T_03/T_0a) - 1)/((comb_eff*lhv)/(c_p*T_0a - T_03/T_0a))
thrust = (1+f)*u_e*(1+(((gam-1)/(2*gam))*(1/(beta-1))*(1-1))) - M[i]*sqrt(gam*r*T_a)
thrust_array.append(thrust)
f_array.append(f)
# TSFC
TSFC = f/thrust
TSFC_array.append(TSFC)
# propulsion efficiency
prop_eff = (thrust*u_inlet)/((1+f)*((u_e**2)/2)-((u_inlet**2)/2))
propEff_array.append(prop_eff)
# thermal efficiency
therm_eff = (((1+f)*((u_e**2)/2))-((u_inlet**2)/2))/(f*lhv)
thermEff_array.append(therm_eff)
# overall efficiency
over_eff = therm_eff * prop_eff
overEff_array.append(over_eff)
return thrust_array, TSFC_array, propEff_array, thermEff_array, overEff_array, f_array
diffLoss = (70+B)/100
combLoss = (90+C)/100
combEff = 96/100
nozzLoss = (92+D)/100
T03 = 2400
Ta = 220.67 # at altitude of 10500m
pa = 24540 # std atmosphere
a = 297.4 # speed of sound standard atmosphere
Scramjet(diffLoss, combLoss, nozzLoss, T03, Ta, 1.4, 287.16, 43500000, combEff, 1004, pa, a)
# Plotting
figure, axis = plt.subplots(2, 2)
# specific thrust vs mach
axis[0, 0].plot(M, thrust_array, color='blue')
axis[0, 0].set_xlabel('Mach Number, M')
axis[0, 0].set_ylabel('Specific Thrust [N-s/kg]')
axis[0, 0].set_title('Specific Thrust vs. Mach Number')
# TSFC vs mach
axis[0, 1].plot(M, TSFC_array, color='blue')
axis[0, 1].set_xlabel('Mach Number, M')
axis[0, 1].set_ylabel('TSFC [kg/(N-s)]')
axis[0, 1].set_title('TSFC vs. Mach Number')
# Efficiencies vs mach
axis[1, 0].plot(M, propEff_array, color='blue', label='Propulsion Efficiency')
axis[1, 0].plot(M, thermEff_array, color='red', label='Thermal Efficiency')
axis[1, 0].plot(M, overEff_array, color='orange', label='Overall Efficiency')
axis[1, 0].set_xlabel('Mach Number, M')
axis[1, 0].set_ylabel('Efficiencies')
axis[1, 0].set_title('Efficiencies vs. Mach Number')
axis[1, 0].legend()
# fuel to air ratio vs mach
axis[1, 1].plot(M, f_array, color='blue')
axis[1, 1].set_xlabel('Mach Number, M')
axis[1, 1].set_ylabel('Fuel to Air Ratio, f')
axis[1, 1].set_title('Fuel to Air Ratio vs. Mach Number')
plt.show()
# Part B
# Calculate the specific thrust, the thrust specific fuel consumption, the propulsion,
# thermal and overall efficiencies, and the fuel to air ratio of the engine operating
# at Mach 5 with T03 = 2400 K using variable γ.
# givens
diffLoss = (70+B)/100
combLoss = (90+C)/100
combEff = 96/100
nozzLoss = (92+D)/100
T03 = 2400
R = 287.16
minL = 14.66
lhv = 43500000
M = 5
# state a (inlet)
Ta = 220.67 # std atmosphere at 10500 m
pa = .24540 # bar std atmosphere at 10500 m
a = 297.4 # std atmosphere at 10500 m
ha = 220 # air tables
sa = 6.3927 - .28716*log(pa)
u = M * a
# state 0a
h0a = ha + (u**2)/(2*1000)
s0a = sa
s0a_prime = 8.219
p0a = exp(-(s0a - s0a_prime)/.28716)
# state 02
p02 = p0a * diffLoss
h02 = h0a
s02 = s0a - .28716*log(diffLoss)
# state 03
p03 = p02 * combLoss
h03_air = 2784.792 # interpolated from tables
h03_lam = 3031.476 # interpolated from tables
lam = (((h03_lam*(1+minL)) - (combEff*lhv) - (h03_air*minL))/(minL*(h02-h03_air)))/1000
r = (1+minL)/(1+lam*minL)
q = ((lam - 1)*minL)/(1+lam*minL)
h03 = (r*h03_lam + q*h03_air)
# state 05
h05 = h03
p05 = diffLoss*combLoss*nozzLoss*p0a
s05_lam = 9.423 # interpolated from tables
s05_air = 9.159 # interpolated from tables
s05_comb = (r*s05_lam + q*s05_air)
s05 = s05_comb - .28716*log(p05)
# state 5
s5 = s05
p5 = pa
s5_prime = s5 + .28716*log(p5)
h5 = 744.6
u_e = sqrt((h05-h5)*2000)
# fuel to air ratio
f = 1/(lam*minL)
# specific thrust
T_specific = u_e*(1+f) - u
# thrust specific fuel consumption (TSFC)
TSFC_b = f/T_specific
# thermal efficiency
n_th = (((1+f)*((u_e**2)/2))-((u**2)/2))/(f*lhv)
# propulsion efficiency
n_prop = (T_specific*u)/((1+f)*((u_e**2)/2)-((u**2)/2))
# overall efficiency
n_o = n_th * n_prop
print('Part B\nFuel to air ratio:', f, '\nSpecific Thrust:', T_specific, '\nTSFC:', TSFC_b, '\nThermal Efficiency:', n_th
, '\nPropulsion Efficiency:', n_prop, '\nOverall Efficiency:', n_o)
|
import getopt, sys
import uno
from unohelper import Base, systemPathToFileUrl, absolutize
from os import getcwd
from os.path import splitext
from com.sun.star.beans import PropertyValue
from com.sun.star.uno import Exception as UnoException
from com.sun.star.io import IOException, XOutputStream
class OutputStream( Base, XOutputStream ):
def __init__( self ):
self.closed = 0
def closeOutput(self):
self.closed = 1
def writeBytes( self, seq ):
sys.stdout.write( seq.value )
def flush( self ):
pass
def main():
retVal = 0
doc = None
stdout = False
try:
opts, args = getopt.getopt(sys.argv[1:], "hc:",
["help", "connection-string=" , "html", "pdf", "stdout" ])
url = "uno:socket,host=localhost,port=2002;urp;StarOffice.ComponentContext"
filterName = "Text (Encoded)"
extension = "txt"
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if o in ("-c", "--connection-string" ):
url = "uno:" + a + ";urp;StarOffice.ComponentContext"
if o == "--html":
filterName = "HTML (StarWriter)"
extension = "html"
if o == "--pdf":
filterName = "writer_pdf_Export"
extension = "pdf"
if o == "--stdout":
stdout = True
if not len( args ):
usage()
sys.exit()
ctxLocal = uno.getComponentContext()
smgrLocal = ctxLocal.ServiceManager
resolver = smgrLocal.createInstanceWithContext(
"com.sun.star.bridge.UnoUrlResolver", ctxLocal )
ctx = resolver.resolve( url )
smgr = ctx.ServiceManager
desktop = smgr.createInstanceWithContext("com.sun.star.frame.Desktop", ctx )
cwd = systemPathToFileUrl( getcwd() )
outProps = (
PropertyValue( "FilterName" , 0, filterName , 0 ),
PropertyValue( "Overwrite" , 0, True , 0 ),
PropertyValue( "OutputStream", 0, OutputStream(), 0)
)
inProps = PropertyValue( "Hidden" , 0 , True, 0 ),
for path in args:
try:
fileUrl = absolutize( cwd, systemPathToFileUrl(path) )
doc = desktop.loadComponentFromURL( fileUrl , "_blank", 0, inProps )
if not doc:
raise UnoException( "Couldn't open stream for unknown reason", None )
if not stdout:
(dest, ext) = splitext(path)
dest = dest + "." + extension
destUrl = absolutize( cwd, systemPathToFileUrl(dest) )
sys.stderr.write(destUrl + "\n")
doc.storeToURL(destUrl, outProps)
else:
doc.storeToURL("private:stream",outProps)
except IOException, e:
sys.stderr.write( "Error during conversion: " + e.Message + "\n" )
retVal = 1
except UnoException, e:
sys.stderr.write( "Error ("+repr(e.__class__)+") during conversion:" + e.Message + "\n" )
retVal = 1
if doc:
doc.dispose()
except UnoException, e:
sys.stderr.write( "Error ("+repr(e.__class__)+") :" + e.Message + "\n" )
retVal = 1
except getopt.GetoptError,e:
sys.stderr.write( str(e) + "\n" )
usage()
retVal = 1
sys.exit(retVal)
def usage():
sys.stderr.write( "usage: ooextract.py --help | --stdout\n"+
" [-c <connection-string> | --connection-string=<connection-string>\n"+
" [--html|--pdf]\n"+
" [--stdout]\n"+
" file1 file2 ...\n"+
"\n" +
"Extracts plain text from documents and prints it to a file (unless --stdout is specified).\n" +
"Requires an OpenOffice.org instance to be running. The script and the\n"+
"running OpenOffice.org instance must be able to access the file with\n"+
"by the same system path. [ To have a listening OpenOffice.org instance, just run:\n"+
"openoffice \"-accept=socket,host=localhost,port=2002;urp;\" \n"
"\n"+
"--stdout \n" +
" Redirect output to stdout. Avoids writing to a file directly\n" +
"-c <connection-string> | --connection-string=<connection-string>\n" +
" The connection-string part of a uno url to where the\n" +
" the script should connect to in order to do the conversion.\n" +
" The strings defaults to socket,host=localhost,port=2002\n"
"--html \n"
" Instead of the text filter, the writer html filter is used\n"
"--pdf \n"
" Instead of the text filter, the pdf filter is used\n"
)
main()
|
<reponame>TinBane/python-magento<filename>magento/sales.py
# -*- coding: UTF-8 -*-
'''
magento.sales
Allows to export/import sales orders from/into Magento,
to create invoices, shipments, credit memos
:license: BSD, see LICENSE for more details
'''
from .api import API
class Order(API):
"""
Allows to import/export orders.
"""
__slots__ = ()
def list(self, filters=None):
"""
Retrieve order list by filters
:param filters: Dictionary of filters.
Format :
`{<attribute>:{<operator>:<value>}}`
Example :
`{'firstname':{'ilike':'sharoon'}}`
:return: `list` of `dict`
"""
return self.call('sales_order.list', [filters])
def search(self, filters=None, fields=None, limit=None, page=1):
"""
Retrieve order list by options using search api. Using this result can
be paginated
:param options: Dictionary of options.
:param filters: `{<attribute>:{<operator>:<value>}}`
:param fields: [<String: magento field names>, ...]
:param limit: `page limit`
:param page: `current page`
:return: `list` of `dict`
"""
options = {
'imported': False,
'filters': filters or {},
'fields': fields or [],
'limit': limit or 1000,
'page': page,
}
return self.call('sales_order.search', [options])
def info(self, order_increment_id):
"""
Retrieve order info
:param order_increment_id: Order ID
"""
return self.call(
'sales_order.info', [order_increment_id]
)
def info_multi(self, order_ids):
"""
This is multicall version of 'order.info'
"""
return self.multiCall([
[
'sales_order.info', [order_id]
]
for order_id in order_ids
])
def addcomment(self, order_increment_id,
status, comment=None, notify=False):
"""
Add comment to order or change its state
:param order_increment_id: Order ID
TODO: Identify possible values for status
"""
if comment is None:
comment = ""
return bool(self.call(
'sales_order.addComment',
[order_increment_id, status, comment, notify]
)
)
#: A proxy for :meth:`addcomment`
addComment = addcomment
def hold(self, order_increment_id):
"""
Hold order
:param order_increment_id: Order ID
"""
return bool(self.call('sales_order.hold', [order_increment_id]))
def unhold(self, order_increment_id):
"""
Unhold order
:param order_increment_id: Order ID
"""
return bool(self.call('sales_order.unhold', [order_increment_id]))
def cancel(self, order_increment_id):
"""
Cancel an order
:param order_increment_id: Order ID
"""
return bool(self.call('sales_order.cancel', [order_increment_id]))
class CreditMemo(API):
"""
Allows create/export order credit memos.
"""
__slots__ = ()
def list(self, filters=None):
"""
Retrieve credit memo list by filters
:param filters: Dictionary of filters.
Format :
`{<attribute>:{<operator>:<value>}}`
Example :
`{'firstname':{'ilike':'sharoon'}}`
:return: `list` of `dict`
"""
return self.call('sales_order_creditmemo.list', [filters])
def info(self, creditmemo_increment_id):
"""
Retrieve credit memo info
:param creditmemo_increment_id: Credit memo increment ID
:return dict, credit memo data
"""
return self.call('sales_order_creditmemo.info', [creditmemo_increment_id])
def create(
self,
order_increment_id,
creditmemo_data=None,
comment=None,
email=False,
include_comment=False,
refund_to_store_credit_amount=None):
"""
Create new credit_memo for order
:param order_increment_id: Order Increment ID
:type order_increment_id: str
:param creditmemo_data: Sales order credit memo data (optional)
:type creditmemo_data: associative array as dict
{
'qtys': [
{
'order_item_id': str, # Order item ID to be refunded
'qty': int # Items quantity to be refunded
},
...
],
'shipping_amount': float # refund shipping amount (optional)
'adjustment_positive': float # adjustment refund amount (optional)
'adjustment_negative': float # adjustment fee amount (optional)
}
:param comment: Credit memo Comment
:type comment: str
:param email: send e-mail flag (optional)
:type email: bool
:param include_comment: include comment in e-mail flag (optional)
:type include_comment: bool
:param refund_to_store_credit_amount: amount to refund to store credit
:type refund_to_store_credit_amount: float
:return str, increment id of credit memo created
"""
if comment is None:
comment = ''
return self.call(
'sales_order_creditmemo.create', [
order_increment_id, creditmemo_data, comment, email, include_comment, refund_to_store_credit_amount
]
)
def addcomment(self, creditmemo_increment_id,
comment, email=True, include_in_email=False):
"""
Add new comment to credit memo
:param creditmemo_increment_id: Credit memo increment ID
:return: bool
"""
return bool(
self.call(
'sales_order_creditmemo.addComment',
[creditmemo_increment_id, comment, email, include_in_email]
)
)
#: A proxy for :meth:`addcomment`
addComment = addcomment
def cancel(self, creditmemo_increment_id):
"""
Cancel credit memo
:param creditmemo_increment_id: Credit memo ID
:return: bool
"""
return bool(
self.call('sales_order_creditmemo.cancel', [creditmemo_increment_id])
)
class Shipment(API):
"""
Allows create/export order shipments.
"""
__slots__ = ()
def list(self, filters=None):
"""
Retrieve shipment list by filters
:param filters: Dictionary of filters.
Format :
`{<attribute>:{<operator>:<value>}}`
Example :
`{'firstname':{'ilike':'sharoon'}}`
:return: `list` of `dict`
"""
return self.call('sales_order_shipment.list', [filters])
def info(self, shipment_increment_id):
"""
Retrieve shipment info
:param shipment_increment_id: Order ID
"""
return self.call('sales_order_shipment.info', [shipment_increment_id])
def create(self, order_increment_id,
items_qty, comment=None, email=True, include_comment=False):
"""
Create new shipment for order
:param order_increment_id: Order Increment ID
:type order_increment_id: str
:param items_qty: items qty to ship
:type items_qty: associative array (order_item_id ⇒ qty) as dict
:param comment: Shipment Comment
:type comment: str
:param email: send e-mail flag (optional)
:type email: bool
:param include_comment: include comment in e-mail flag (optional)
:type include_comment: bool
"""
if comment is None:
comment = ''
return self.call(
'sales_order_shipment.create', [
order_increment_id, items_qty, comment, email, include_comment
]
)
def addcomment(self, shipment_increment_id,
comment, email=True, include_in_email=False):
"""
Add new comment to shipment
:param shipment_increment_id: Shipment ID
"""
return bool(
self.call(
'sales_order_shipment.addComment',
[shipment_increment_id, comment, email, include_in_email]
)
)
#: A proxy for :meth:`addcomment`
addComment = addcomment
def addtrack(self, shipment_increment_id, carrier, title, track_number):
"""
Add new tracking number
:param shipment_increment_id: Shipment ID
:param carrier: Carrier Code
:param title: Tracking title
:param track_number: Tracking Number
"""
return self.call(
'sales_order_shipment.addTrack',
[shipment_increment_id, carrier, title, track_number]
)
#: A proxy for :meth:`addtrack`
addTrack = addtrack
def removetrack(self, shipment_increment_id, track_id):
"""
Remove tracking number
:param shipment_increment_id: SHipment ID
:param track_id: Tracking number to remove
"""
return bool(
self.call(
'sales_order_shipment.removeTrack',
[shipment_increment_id, track_id]
)
)
#: A proxy for :meth:`removetrack`
removeTrack = removetrack
def getcarriers(self, order_increment_id):
"""
Retrieve list of allowed carriers for order
:param order_increment_id: Order ID
"""
return self.call(
'sales_order_shipment.getCarriers', [order_increment_id]
)
#: A proxy for :meth:`getcarriers`
getCarriers = getcarriers
def sendinfo(self, shipment_increment_id, comment=''):
"""
Send email with shipment data to customer
:param order_increment_id: Order ID
"""
return self.call(
'sales_order_shipment.sendInfo', [shipment_increment_id, comment]
)
#: A proxy for :meth:`sendinfo`
sendInfo = sendinfo
class Invoice(API):
"""
Allows create/export order invoices
"""
__slots__ = ()
def list(self, filters=None):
"""
Retrieve invoice list by filters
:param filters: Dictionary of filters.
Format :
`{<attribute>:{<operator>:<value>}}`
Example :
`{'firstname':{'ilike':'sharoon'}}`
:return: `list` of `dict`
"""
return self.call('sales_order_invoice.list', [filters])
def info(self, invoice_increment_id):
"""
Retrieve invoice info
:param invoice_increment_id: Invoice ID
"""
return self.call(
'sales_order_invoice.info', [invoice_increment_id]
)
def create(self, order_increment_id, items_qty,
comment=None, email=True, include_comment=False):
"""
Create new invoice for order
:param order_increment_id: Order increment ID
:type order_increment_id: str
:param items_qty: Items quantity to invoice
:type items_qty: dict
:param comment: Invoice Comment
:type comment: str
:param email: send invoice on e-mail
:type email: bool
:param include_comment: Include comments in email
:type include_comment: bool
:rtype: str
"""
return self.call(
'sales_order_invoice.create',
[order_increment_id, items_qty, comment, email, include_comment]
)
def addcomment(self, invoice_increment_id,
comment=None, email=False, include_comment=False):
"""
Add comment to invoice or change its state
:param invoice_increment_id: Invoice ID
"""
if comment is None:
comment = ""
return bool(
self.call(
'sales_order_invoice.addComment',
[invoice_increment_id, comment, email, include_comment]
)
)
#: Add a proxy for :meth:`addcomment`
addComment = addcomment
def capture(self, invoice_increment_id):
"""
Capture Invoice
:attention: You should check the invoice to see if can be
captured before attempting to capture an invoice, otherwise
the API call with generate an error.
Invoices have states as defined in the model
Mage_Sales_Model_Order_Invoice:
STATE_OPEN = 1
STATE_PAID = 2
STATE_CANCELED = 3
Also note there is a method call in the model that checks this
for you canCapture(), and it also verifies that the payment is
able to be captured, so the invoice state might not be the only
condition that’s required to allow it to be captured.
:param invoice_increment_id: Invoice ID
:rtype: bool
"""
return bool(
self.call('sales_order_invoice.capture', [invoice_increment_id])
)
def void(self, invoice_increment_id):
"""
Void an invoice
:param invoice_increment_id: Invoice ID
:rtype: bool
"""
return bool(
self.call('sales_order_invoice.void', [invoice_increment_id])
)
def cancel(self, invoice_increment_id):
"""
Cancel invoice
:param invoice_increment_id: Invoice ID
:rtype: bool
"""
return bool(
self.call('sales_order_invoice.cancel', [invoice_increment_id])
)
|
<filename>baseclasses/problems/pyMission_problem.py
"""
pyMission_problem
Holds the Segment, Profile and Problem classes for the mission solvers.
"""
import sys
import numpy
import copy
from .ICAOAtmosphere import ICAOAtmosphere
from .FluidProperties import FluidProperties
from ..utils import Error
class MissionProblem:
"""
Mission Problem Object:
This mission problem object should contain all of the information required
to analyze a single mission. A mission problem is made of profiles. All
profiles in a given mission problem must use consistent units.
Parameters
----------
name : str
A name for the mission
evalFuncs : iteratble object containing strings
The names of the functions the user wants evaluated for this mission
problem
"""
def __init__(self, name, **kwargs):
"""
Initialize the mission problem
"""
self.name = name
self.missionProfiles = []
self.missionSegments = []
self.funcNames = {}
self.currentDVs = {}
self.solveFailed = False
# Check for function list:
self.evalFuncs = set()
if "evalFuncs" in kwargs:
self.evalFuncs = set(kwargs["evalFuncs"])
self.segCounter = 1
self.solutionCounter = 0
self.states = None
def addProfile(self, profiles):
"""
Append a mission profile to the list. update the internal
segment indices to correspond
"""
# Check if profile is of type MissionProfile or list, otherwise raise Error
if type(profiles) == MissionProfile:
profiles = [profiles]
elif type(profiles) == list:
pass
else:
raise Error("addProfile() takes in either a list of or a single MissionProfile")
# Add the profiles to missionProfiles and segments to missionSegments
for prof in profiles:
# Check for consistent units
if len(self.missionProfiles) == 0:
self.englishUnits = prof.englishUnits
elif prof.englishUnits != self.englishUnits:
raise Error("Units are not consistent across all profiles.")
self.missionProfiles.append(prof)
for seg in prof.segments:
self.segCounter += 1
self.missionSegments.extend([seg])
for dvName in prof.dvList:
self.currentDVs[dvName] = prof.dvList[dvName].value
# end
return
def addVariablesPyOpt(self, pyOptProb):
"""
Add the current set of variables to the optProb object.
Parameters
----------
optProb : pyOpt_optimization class
Optimization problem definition to which variables are added
"""
for profile in self.missionProfiles:
for dvName in profile.dvList:
dv = profile.dvList[dvName]
pyOptProb.addVar(dvName, "c", scale=dv.scale, value=dv.value, lower=dv.lower, upper=dv.upper)
self.currentDVs[dvName] = dv.value
return pyOptProb
def checkForProfileDVs(self):
"""
Check if design variables have been added to this mission.
"""
for profile in self.missionProfiles:
if profile.dvList:
return True
return False
def setDesignVars(self, missionDVs):
"""
Pass the DVs to each of the profiles and have the profiles set the DVs
Parameters
----------
missionDVs : dict
Dictionary of variables which may or may not contain the
design variable names this object needs
"""
# Update the set of design variable values being used
for dv in self.currentDVs:
if dv in missionDVs:
self.currentDVs[dv] = missionDVs[dv]
for profile in self.missionProfiles:
profile.setDesignVars(missionDVs)
def evalDVSens(self, stepSize=1e-20):
"""
Evaluate the sensitivity of each of the 4 segment parameters
(Mach, Alt) with respect to the design variables
"""
dvSens = {}
# Perturbate each of the DV with complex step
for dvName in self.currentDVs:
tmpDV = {dvName: self.currentDVs[dvName] + stepSize * 1j}
profSens = []
for profile in self.missionProfiles:
profile.setDesignVars(tmpDV)
profSens.extend(profile.getSegmentParameters())
profile.setDesignVars(self.currentDVs)
# Replace the NaNs with 0
profSens = numpy.array(profSens)
indNaNs = numpy.isnan(profSens)
profSens[indNaNs] = 0.0
profSens = profSens.imag / stepSize
dvSens[dvName] = profSens
return dvSens
def getAltitudeCons(self, CAS, mach, alt):
"""
Solve for the altitude at which CAS=mach
"""
if type(CAS) == str and CAS in self.currentDVs:
CAS = self.currentDVs[CAS]
if type(mach) == str and mach in self.currentDVs:
mach = self.currentDVs[mach]
if type(alt) == str and alt in self.currentDVs:
alt = self.currentDVs[alt]
seg = self.missionSegments[0]
altIntercept = seg._solveMachCASIntercept(CAS, mach)
return alt - altIntercept
def getAltitudeConsSens(self, CAS, mach, alt, stepSize=1e-20):
"""
Solve for the altitude sensitivity at which CAS=mach
"""
seg = self.missionSegments[0]
altSens = {}
if type(CAS) == str and CAS in self.currentDVs:
CASVal = self.currentDVs[CAS]
dAltdCAS = seg._solveMachCASIntercept(CASVal + stepSize * 1j, mach)
altSens[CAS] = -dAltdCAS.imag / stepSize
if type(mach) == str and mach in self.currentDVs:
machVal = self.currentDVs[mach]
dAltdMach = seg._solveMachCASIntercept(CAS, machVal + stepSize * 1j)
altSens[mach] = -dAltdMach.imag / stepSize
if type(alt) == str and alt in self.currentDVs:
altSens[alt] = 1.0
return altSens
def getNSeg(self):
"""
return the number of segments in the mission
"""
return self.segCounter - 1
def getSegments(self):
"""
return a list of the segments in the mission in order
"""
return self.missionSegments
def setUnits(self, module):
"""
Set the units and the gravity constant for this mission.
"""
module.mission_parameters.englishUnits = self.englishUnits
if self.englishUnits:
module.mission_parameters.g = 32.2 # ft/s/s
else:
module.mission_parameters.g = 9.80665 # ft/s/s
def __str__(self):
"""
Return a string representation of the profiles within this mission
"""
segCount = 1
string = "MISSION PROBLEM: %s \n" % self.name
for i in range(len(self.missionProfiles)):
# profTag = 'P%02d'%i
string += self.missionProfiles[i].__str__(segCount)
segCount += len(self.missionProfiles[i].segments)
return string
class MissionProfile:
"""
Mission Profile Object:
This Mission Profile Object contain an ordered set of segments that
make up a single subsection of a mission. Start and end points of each
segment in the profile are required to be continuous.
"""
def __init__(self, name, englishUnits=False):
"""
Initialize the mission profile
"""
self.name = name
self.englishUnits = englishUnits
self.segments = []
self.dvList = {}
self.firstSegSet = False
def addSegments(self, segments):
"""
Take in a list of segments and append it to the the current list.
Check for consistency while we are at it.
"""
# Check if profile is of type MissionProfile or list, otherwise raise Error
if type(segments) == MissionSegment:
segments = [segments]
elif type(segments) == list:
pass
else:
raise Error("addSegments() takes in either a list or a single MissionSegment")
nSeg_Before = len(self.segments)
self.segments.extend(segments)
# Loop over each *new* segment in search for DVs
for i in range(len(segments)):
seg = segments[i]
seg.setUnitSystem(self.englishUnits)
seg.setDefaults(self.englishUnits)
segID = i + nSeg_Before
# Loop over the DVs in the segment, if any
for dvName in seg.dvList:
dvObj = seg.dvList[dvName]
if dvObj.userDef:
# Variable name should remain unchanged
if dvName in self.dvList:
raise Error(
"User-defined design variable name "
+ f"{dvName} has already been added"
+ " to this profile."
)
dvNameGlobal = dvName
else:
# Prepend profile name and segment ID
dvNameGlobal = f"{self.name}_seg{segID}_{dvName}"
# Save a reference of the DV object and set its segment ID
self.dvList[dvNameGlobal] = seg.dvList[dvName]
self.dvList[dvNameGlobal].setSegmentID(segID)
# Propagate the segment inputs from one to next
# except don't propagate from last (i=-1) to first (i=0) segment
if i > 0 and seg.propagateInputs:
for var in segments[i - 1].segInputs:
if "final" in var:
newVar = var.replace("final", "init")
seg.segInputs.add(newVar)
seg.determineInputs()
self._checkStateConsistancy()
def setDesignVars(self, missionDVs):
"""
Set the variables for this mission profile
Parameters
----------
missionDVs : dict
Dictionary of variables which may or may not contain the
design variable names this object needs
"""
for dvName in missionDVs:
# Only concern about the DVs that are in this profile
if dvName in self.dvList:
dvObj = self.dvList[dvName]
dvVal = missionDVs[dvName]
dvType = dvObj.type # String: 'Mach', 'Alt', 'TAS', 'CAS'
segID = dvObj.segID
isInitVal = dvObj.isInitVal
# Update the segment for which the DV object belongs to
seg = self.segments[segID]
updatePrev, updateNext = seg.setParameters(dvVal, dvType, isInitVal)
# Update any PREVIOUS segments that depends on this DV
while updatePrev and segID > 0:
segID -= 1
seg = self.segments[segID]
updatePrev, tmp = seg.setParameters(dvVal, dvType, isInitVal=False)
# Update any FOLLOWING segments that depends on this DV
segID = dvObj.segID
while updateNext and segID < len(self.segments) - 1:
segID += 1
seg = self.segments[segID]
tmp, updateNext = seg.setParameters(dvVal, dvType, isInitVal=True)
# After setting all the design variables, update the remaining segment states
for seg in self.segments:
seg.propagateParameters()
self._checkStateConsistancy()
def getSegmentParameters(self):
"""
Get the 4 segment parameters from each of the segment it owns
Order is [M1, h1, M2, h2]
"""
nSeg = len(self.segments)
segParameters = numpy.zeros(4 * nSeg, dtype="D")
for i in range(nSeg):
seg = self.segments[i]
segParameters[4 * i] = seg.initMach
segParameters[4 * i + 1] = seg.initAlt
segParameters[4 * i + 2] = seg.finalMach
segParameters[4 * i + 3] = seg.finalAlt
# segParameters[8*i ] = seg.initMach
# segParameters[8*i+1] = seg.initAlt
# segParameters[8*i+2] = seg.initCAS
# segParameters[8*i+3] = seg.initTAS
# segParameters[8*i+4] = seg.finalMach
# segParameters[8*i+5] = seg.finalAlt
# segParameters[8*i+6] = seg.finalCAS
# segParameters[8*i+7] = seg.finalTAS
return segParameters
def _checkStateConsistancy(self):
# loop over the segments.
# if it is a fuel fraction segment, skip
# otherwise check if its initial parameters match the final parameters
# from the previous segment, if not raise an error
# if they don't exist, copy.
for i in range(len(self.segments)):
seg = self.segments[i]
if seg.propagateInputs is False:
# Segment is a fuel fraction segment nothing needs to be done
pass
else:
if not self.firstSegSet:
seg.isFirstStateSeg = True
self.firstSegSet = True
# end
if seg.isFirstStateSeg:
# this is the first segment.
# Need to have at least the start alt and V or M
if seg.initAlt is None:
raise Error(
"%s: Initial altitude must be\
specified for the first non fuel fraction\
segment in the profile"
% (self.name)
)
# end
if (seg.initMach is None) and (seg.initCAS is None) and (seg.initTAS is None):
raise Error(
"%s: One of initCAS,initTAS or initMach needs to be\
specified for the first non fuelfraction\
segment in the profile"
% (self.name)
)
# end
# Determine the remaining segment parameters (Alt, Mach, CAS, TAS)
seg.propagateParameters()
else:
prevSeg = self.segments[i - 1]
refAlt = prevSeg.finalAlt
refCAS = prevSeg.finalCAS
refTAS = prevSeg.finalTAS
refMach = prevSeg.finalMach
TASi = seg.initTAS
CASi = seg.initCAS
Mi = seg.initMach
Alti = seg.initAlt
if CASi is not None:
if not CASi == refCAS:
raise Error(
"%s: Specified initCAS \
inconsistent with\
previous finalCAS: %f, %f \
"
% (seg.phase, CASi, refCAS)
)
# end
else:
seg.initCAS = refCAS
# end
if TASi is not None:
if not TASi == refTAS:
raise Error(
"%s: Specified initTAS \
inconsistent with\
previous finalTAS: %f, %f \
"
% (seg.phase, TASi, refTAS)
)
# end
else:
seg.initTAS = refTAS
# end
if Alti is not None:
if not Alti == refAlt:
raise Error(
"%s: Specified initAlt \
inconsistent with\
previous finalAlt"
% (seg.phase)
)
# end
else:
seg.initAlt = refAlt
# end
if Mi is not None:
if not Mi == refMach:
raise Error(
"%s: Specified initMach \
inconsistent with\
previous finalMach"
% (seg.phase)
)
# end
else:
seg.initMach = refMach
# end
# Determine the remaining segment parameters (Alt, Mach, CAS, TAS)
seg.propagateParameters()
# end
# end
# end
def __str__(self, segStartNum=0):
"""
Return a string representation of the segments within this profile
"""
string = "MISSION PROFILE: %s \n" % self.name
for i in range(len(self.segments)):
# segTag = '%sS%02d'%(idTag,i)
string += self.segments[i].__str__(segStartNum + i)
return string
class MissionSegment:
"""
Mission Segment Object:
This is the basic building block of the mission solver.
Parameters
----------
phase : str
Segment type selector valid options include
"""
def __init__(self, phase, **kwargs):
# have to have a phase type
self.phase = phase
# These are the parameters that can be simply set directly in the class.
paras = {
"initMach",
"initAlt",
"initCAS",
"initTAS",
"finalMach",
"finalAlt",
"finalCAS",
"finalTAS",
"fuelFraction",
"rangeFraction",
"segTime",
"engType",
"throttle",
"nIntervals",
"residualclimbrate",
"descentrate",
"climbtdratio",
"descenttdratio",
}
# By default everything is None
for para in paras:
setattr(self, para, None)
# Set default number of intervals
self.nIntervals = 4
# Any matching key from kwargs that is in 'paras'
for key in kwargs:
if key in paras:
setattr(self, key, kwargs[key])
# identify the possible design variables based on what parameters
# have been set
varFuncs = ["initMach", "initAlt", "initTAS", "initCAS", "finalMach", "finalAlt", "finalCAS", "finalTAS"]
self.possibleDVs = set()
self.segInputs = set()
for var in varFuncs:
if getattr(self, var) is not None:
self.possibleDVs.add(var)
self.segInputs.add(var)
# propagateInputs should be true for everything
# except fuelFraction and fixedThrottle segments
self.propagateInputs = True
if self.fuelFraction is not None or self.throttle is not None:
self.propagateInputs = False
# Storage of DVs
self.dvList = {}
if self.phase.lower() in ["cvelclimb", "cveldescent"]:
self.constMachDV = False
self.constVelDV = True
self.constAltDV = False
elif self.phase.lower() in ["cmachclimb", "cmachdescent"]:
self.constMachDV = True
self.constVelDV = False
self.constAltDV = False
elif self.phase.lower() in ["cruise", "loiter"]:
self.constMachDV = True
self.constVelDV = True
self.constAltDV = True
elif self.phase.lower() in ["acceleratedcruise", "deceleratedcruise"]:
self.constMachDV = False
self.constVelDV = False
self.constAltDV = True
else:
self.constMachDV = False
self.constVelDV = False
self.constAltDV = False
self.isFirstStateSeg = False
return
def setUnitSystem(self, englishUnits):
self.atm = ICAOAtmosphere(englishUnits=englishUnits)
fluidProps = FluidProperties(englishUnits=englishUnits)
self.R = fluidProps.R
self.gamma = fluidProps.gamma
def setDefaults(self, englishUnits):
# Set default climb/descent rates and td ratios
if self.residualclimbrate is None:
if englishUnits:
self.residualclimbrate = 300.0 / 60.0
else:
self.residualclimbrate = 300.0 / 60.0 * 0.3048
if self.descentrate is None:
if englishUnits:
self.descentrate = -2000.0 / 60.0
else:
self.descentrate = -2000.0 / 60.0 * 0.3048
if self.climbtdratio is None:
self.climbtdratio = 1.1
if self.descenttdratio is None:
self.descenttdratio = 0.5
def determineInputs(self):
"""
Determine which of the four parameters (h, M, CAS, TAS) are inputs,
which can be updated directly by the DV. For each end, there should
be two inputs. At this point, the two beginning inputs should already
be determined during initalization or by the MissionProfile.
"""
# Check there are two inputs for the segment start
count = 0
for var in self.segInputs:
if "init" in var:
count += 1
if count < 2 and self.fuelFraction is None:
raise Error(
"%s: There does not appear to be two inputs at the \
start of this segment"
% self.phase
)
elif count > 2 and self.fuelFraction is None:
raise Error(
"%s: There appears to be more than two inputs at the \
start of this segment, may not be consistent"
% self.phase
)
# If there are two inputs for the segment end, done;
# otherwise determine based on start
count = 0
for var in self.segInputs:
if "final" in var:
count += 1
if count == 2:
return
elif count > 2:
raise Error(
"%s: There appears to be more than two inputs at the \
start of this segment, may not be consistent"
% self.phase
)
else:
# For any segment with constant Mach, CAS, or altitude...
if "cmach" in self.phase.lower():
self.segInputs.add("finalMach")
elif "cvel" in self.phase.lower():
self.segInputs.add("finalCAS")
elif "cruise" in self.phase.lower() or self.phase.lower() == "loiter":
self.segInputs.add("finalAlt")
# For cruise segments, copy the initial speeds to final
if self.phase.lower() == "cruise" or self.phase.lower() == "loiter":
if "initMach" in self.segInputs:
self.segInputs.add("finalMach")
elif "initCAS" in self.segInputs:
self.segInputs.add("finalCAS")
elif "initTAS" in self.segInputs:
self.segInputs.add("finalTAS")
# # For set throttle segment types
# if self.throttle != None:
# if 'finalMach' not in self.segInputs and 'initMach' in self.segInputs:
# self.segInputs.add('finalMach')
# if 'finalAlt' not in self.segInputs and 'initAlt' in self.segInputs:
# self.segInputs.add('finalAlt')
# if 'finalCAS' not in self.segInputs and 'initCAS' in self.segInputs:
# self.segInputs.add('finalCAS')
# if 'finalTAS' not in self.segInputs and 'initTAS' in self.segInputs:
# self.segInputs.add('finalTAS')
"""
def _syncMachVAndAlt(self,endPoint='start'):
# get speed of sound at initial point
if endPoint.lower()=='start':
CAS = getattr(self,'initCAS')
TAS = getattr(self,'initTAS')
M = getattr(self,'initMach')
h = getattr(self,'initAlt')
CASTag = 'initCAS'
TASTag = 'initTAS'
machTag = 'initMach'
altTag = 'initAlt'
elif endPoint.lower()=='end':
TAS = getattr(self,'finalTAS')
CAS = getattr(self,'finalCAS')
M = getattr(self,'finalMach')
h = getattr(self,'finalAlt')
CASTag = 'finalCAS'
TASTag = 'finalTAS'
machTag = 'finalMach'
altTag = 'finalAlt'
else:
# invalid endpoint
raise Error('%s: _syncMachAndV, invalid endPoint:\
%s'%(self.phase,endPoint))
# end
if h is None:
# initial altitude is missing calculate from M and V
h = self._solveMachCASIntercept(CAS, M)
setattr(self,altTag,h)
# end
a = self._getSoundSpeed(h)
P,T,Rho = self._getPTRho(h)
if not (CAS is None and TAS is None):
# Specified either (h,CAS) or (h,TAS)
if CAS is None:
CAS = self._TAS2CAS(TAS,h)
setattr(self,CASTag,CAS)
elif TAS is None:
TAS= self._CAS2TAS(CAS,h)
setattr(self,TASTag,TAS)
# end
MCalc = TAS/a
if not M is None:
if not abs(MCalc-M)<1e-11:
raise Error('%s: _syncMachAndV, Specified V \
inconsistent with specified M: \
%f %f %s'%(self.phase, M, MCalc,
endPoint))
# end
else:
setattr(self,machTag,MCalc)
# end
else:
# Specified (M,h)
TAS = M*a
CAS = self._TAS2CAS(TAS,h)
setattr(self,TASTag,TAS)
setattr(self,CASTag,CAS)
# end
"""
def propagateParameters(self):
"""
Set the final V,M,h base on initial values and segType.
"""
if self.propagateInputs is False:
# A FuelFraction type segment, nothing to propagate
return
elif self.phase.lower() in ["cruise", "loiter"]:
# Given M, CAS, or TAS, calculate the other two speeds
self._calculateSpeed(endPoint="start")
# take everything from init and copy to final
self.finalAlt = self.initAlt
self.finalCAS = self.initCAS
self.finalTAS = self.initTAS
self.finalMach = self.initMach
elif self.phase.lower() in ["acceleratedcruise", "deceleratedcruise"]:
self.finalAlt = self.initAlt
self._calculateSpeed(endPoint="start")
self._calculateSpeed(endPoint="end")
elif self.phase.lower() in ["cvelclimb", "cveldescent"]:
# Requires either (v, hi, hf), (v, hi, Mf), or (v, Mi, hf)
self.finalCAS = self.initCAS
if {"initCAS", "initAlt", "finalAlt"}.issubset(self.segInputs):
# (v, hi, hf): Solve for the TAS and then for Mach
self._calculateSpeed(endPoint="start")
self._calculateSpeed(endPoint="end")
elif {"initCAS", "initAlt", "finalMach"}.issubset(self.segInputs):
# (v, hi, Mf): Solve for finalAlt and then TAS
self.finalAlt = self._solveMachCASIntercept(self.initCAS, self.finalMach)
self.finalTAS = self._CAS2TAS(self.finalCAS, self.finalAlt)
self.initTAS = self._CAS2TAS(self.initCAS, self.initAlt)
a = self._getSoundSpeed(self.initAlt)
self.initMach = self.initTAS / a
elif {"initCAS", "initMach", "finalAlt"}.issubset(self.segInputs):
# (v, Mi, hf): Solve for initAlt and then TAS
self.initAlt = self._solveMachCASIntercept(self.initCAS, self.initMach)
self.initTAS = self._CAS2TAS(self.initCAS, self.initAlt)
self.finalTAS = self._CAS2TAS(self.finalCAS, self.finalAlt)
a = self._getSoundSpeed(self.finalAlt)
self.finalMach = self.finalTAS / a
else:
raise Error("%s", self.phase)
elif self.phase.lower() in ["cmachclimb", "cmachdescent"]:
# Requires either (M, hi, hf), (M, vi, hf), or (M, hi, vf)
self.finalMach = self.initMach
if {"initMach", "initAlt", "finalAlt"}.issubset(self.segInputs):
# (M, hi, hf): Solve for the TAS and then CAS
self._calculateSpeed(endPoint="start")
self._calculateSpeed(endPoint="end")
elif {"initMach", "initCAS", "finalAlt"}.issubset(self.segInputs):
# (M, vi, hf): Solve for initAlt and then initTAS, finalTAS then finalCAS
self.initAlt = self._solveMachCASIntercept(self.initCAS, self.initMach)
self.initTAS = self._CAS2TAS(self.initCAS, self.initAlt)
a = self._getSoundSpeed(self.finalAlt)
self.finalTAS = self.finalMach * a
self.finalCAS = self._TAS2CAS(self.finalTAS, self.finalAlt)
elif {"initMach", "initAlt", "finalCAS"}.issubset(self.segInputs):
# (M, hi, vf): Solve for finalAlt and then finalTAS, initTAS then initCAS
self.finalAlt = self._solveMachCASIntercept(self.finalCAS, self.finalMach)
self.finalTAS = self._CAS2TAS(self.finalCAS, self.finalAlt)
a = self._getSoundSpeed(self.initAlt)
self.initTAS = self.initMach * a
self.initCAS = self._TAS2CAS(self.initTAS, self.initAlt)
else:
raise Error("%s", self.phase)
else:
self._calculateSpeed(endPoint="start")
self._calculateSpeed(endPoint="end")
"""
elif self.phase.lower() in ['cvelclimb','climb_cvel']:
# we require that Vi,hi and Mf are specified
# calculate hf from Vi and Mf
self.finalCAS = self.initCAS
#solve for h given M and V
CAS = getattr(self,'finalCAS')
M = getattr(self,'finalMach')
finalAlt = self._solveMachCASIntercept(CAS, M)
TAS = self._CAS2TAS(CAS,finalAlt)
setattr(self,'finalAlt',finalAlt)
setattr(self,'finalTAS',TAS)
elif self.phase.lower() in ['cmachclimb','climb_cmach']:
# we require that Mi,hg and Vi are specified
# calculate hi from Vi and Mf
CAS = getattr(self,'initCAS')
M = getattr(self,'initMach')
setattr(self,'finalMach',M)
initAlt = self._solveMachCASIntercept(CAS, M)
setattr(self,'initAlt',initAlt)
TAS = self._CAS2TAS(CAS,initAlt)
setattr(self,'initTAS',TAS)
elif self.phase.lower() in ['cmachdescent','descent_cmach']:
# use final CAS and init Mach (copied to final Mach)
# to calculate the intersection altitude of the M and
# CAS values
CAS = getattr(self,'finalCAS')
M = getattr(self,'initMach')
setattr(self,'finalMach',M)
finalAlt = self._solveMachCASIntercept(CAS, M)
setattr(self,'finalAlt',finalAlt)
TAS = self._CAS2TAS(CAS,finalAlt)
setattr(self,'finalTAS',TAS)
elif self.phase.lower() in ['cveldescent','descent_cvel']:
# copy CAS directly, then compute TAS and M from CAS
# and h
CAS = getattr(self,'initCAS')
setattr(self,'finalCAS',CAS)
finalAlt = getattr(self,'finalAlt')
TAS = self._CAS2TAS(CAS,finalAlt)
a = self._getSoundSpeed(finalAlt)
M = TAS/a
setattr(self,'finalTAS',TAS)
setattr(self,'finalMach',M)
# end
"""
def _calculateSpeed(self, endPoint="start"):
"""
This assumes that the altitude and one of three speeds
(CAS, TAS, or Mach) are given, and calculates the other two speeds.
"""
if endPoint.lower() == "start":
CASTag = "initCAS"
TASTag = "initTAS"
machTag = "initMach"
altTag = "initAlt"
elif endPoint.lower() == "end":
CASTag = "finalCAS"
TASTag = "finalTAS"
machTag = "finalMach"
altTag = "finalAlt"
else:
# invalid endpoint
raise Error(
"%s: _calculateSpeed, invalid endPoint:\
%s"
% (self.phase, endPoint)
)
CAS = getattr(self, CASTag)
TAS = getattr(self, TASTag)
mach = getattr(self, machTag)
alt = getattr(self, altTag)
# Given M, CAS, or TAS, calculate the other two speeds
if alt is None:
sys.exit(0)
a = self._getSoundSpeed(alt)
if CASTag in self.segInputs:
TAS = self._CAS2TAS(CAS, alt)
mach = TAS / a
elif TASTag in self.segInputs:
CAS = self._TAS2CAS(TAS, alt)
mach = TAS / a
elif machTag in self.segInputs:
TAS = mach * a
CAS = self._TAS2CAS(TAS, alt)
setattr(self, CASTag, CAS)
setattr(self, TASTag, TAS)
setattr(self, machTag, mach)
setattr(self, altTag, alt)
def _getSoundSpeed(self, alt):
"""
compute the speed of sound at this altitude
"""
# evaluate the atmosphere model
P, T = self.atm(alt)
a = numpy.sqrt(self.gamma * self.R * T)
return copy.copy(a)
def _getPTRho(self, alt):
"""
compute the pressure at this altitude
"""
# evaluate the atmosphere model
P, T = self.atm(alt)
rho = P / (self.R * T)
return P, T, rho
def _solveMachCASIntercept(self, CAS, mach, initAlt=3048.0):
# TAS: True Air speed
# CAS: Calibrated air speed
# Simple Newton's method to solve for the altitude at which
# CAS=mach
alt = initAlt
dAlt = 1e1
res = 1.0
while abs(res) > 1e-12:
a = self._getSoundSpeed(alt)
TAS = self._CAS2TAS(CAS, alt)
M = TAS / a
res = M - mach
a = self._getSoundSpeed(alt + dAlt)
TAS = self._CAS2TAS(CAS, alt + dAlt)
M2 = TAS / a
res2 = M2 - mach
df = (res2 - res) / dAlt
if abs(res / df) < 1000.0:
alt -= res / df
else:
alt -= (1.0 / 2.0) * res / df
# end
return alt
def _TAS2CAS(self, TAS, h):
# get sea level properties
P0, T0, rho0 = self._getPTRho(0)
# get the properties at the current altitude
a = self._getSoundSpeed(h)
P, T, rho = self._getPTRho(h)
# compute the ratios at for the static atmospheric states
PRatio = P / P0
RhoRatio = rho / rho0
# Convert the TAS to EAS
EAS = TAS * numpy.sqrt(RhoRatio)
# Evaluate the current M based on TAS
M = TAS / a
# Evaluate the Calibrated air speed, CAS
term1 = (1.0 / 8.0) * (1 - PRatio) * M ** 2
term2 = (3.0 / 640.0) * (1 - 10 * PRatio + 9 * PRatio ** 2) * M ** 4
ECTerm = 1 + term1 + term2
CAS = EAS * ECTerm
return CAS
def _CAS2TAS(self, CAS, h):
# TAS: True air speed
a0 = self._getSoundSpeed(0)
P0, T0, rho0 = self._getPTRho(0)
a = self._getSoundSpeed(h)
P, T, rho = self._getPTRho(h)
# Source: http://williams.best.vwh.net/avform.htm#Intro
# Differential pressure: Units of CAS and a0 must be consistent
DP = P0 * ((1 + 0.2 * (CAS / a0) ** 2) ** (7.0 / 2.0) - 1) # impact pressure
M = numpy.sqrt(5 * ((DP / P + 1) ** (2.0 / 7.0) - 1))
if M > 1:
raise Error(
"%s_CAS2TAS: The current mission class is\
limited to subsonic missions: %f %f"
% (self.phase, M, CAS)
)
# M_diff = 1.0
# while M_diff > 1e-4:
# # computing Mach number in a supersonic compressible flow by using the
# # Rayleigh Supersonic Pitot equation using parameters for air
# M_new = 0.88128485 * numpy.sqrt((DP/P + 1) * (1 - 1/(7*M**2))**2.5)
# M_diff = abs(M_new - M)
# M = M_new
TAS = M * a
return TAS
def setMissionData(self, module, segTypeDict, engTypeDict, idx, segIdx):
"""
set the data for the current segment in the fortran module
"""
h1 = self.initAlt
if h1 is None:
h1 = 0.0
h2 = self.finalAlt
if h2 is None:
h2 = 0.0
M1 = self.initMach
if M1 is None:
M1 = 0.0
M2 = self.finalMach
if M2 is None:
M2 = 0.0
deltaTime = self.segTime
if deltaTime is None:
deltaTime = 0.0
# end
rangeFraction = self.rangeFraction
if rangeFraction is None:
rangeFraction = 1.0
# end
# Get the fuel-fraction, if provided, then segment is a generic fuel fraction type
fuelFraction = self.fuelFraction
throttle = self.throttle
if fuelFraction is None and throttle is None:
segTypeID = segTypeDict[self.phase.lower()]
fuelFraction = 0.0
throttle = 0.0
elif fuelFraction is not None:
segTypeID = segTypeDict["fuelFraction"]
throttle = 0.0
elif throttle is not None:
segTypeID = segTypeDict["fixedThrottle"]
fuelFraction = 0.0
# end
# Get the engine type and ensure the engine type is defined in engTypeDict
if self.engType not in engTypeDict and self.engType is not None:
raise Error(f"engType {self.engType} defined in segment {self.phase} not defined in engTypeDict")
if self.engType is None:
self.engType = "None"
engTypeID = engTypeDict[self.engType]
module.setmissionsegmentdata(
idx,
segIdx,
h1,
h2,
M1,
M2,
deltaTime,
fuelFraction,
throttle,
rangeFraction,
segTypeID,
engTypeID,
self.nIntervals,
self.residualclimbrate,
self.descentrate,
self.climbtdratio,
self.descenttdratio,
)
def addDV(self, paramKey, lower=-1e20, upper=1e20, scale=1.0, name=None):
"""
Add one of the class attributes as a mission design
variable. Typical variables are mach or velocity and altitude
An error will be given if the requested DV is not allowed to
be added .
Parameters
----------
dvName : str
Name used by the optimizer for this variables.
paramKey : str
Name of variable to add. See above for possible ones
value : float. Default is None
Initial value for variable. If not given, current value
of the attribute will be used.
lower : float. Default is None
Optimization lower bound. Default is unbonded.
upper : float. Default is None
Optimization upper bound. Default is unbounded.
scale : float. Default is 1.0
Set scaling parameter for the optimization to use.
name : str. Default is None
Overwrite the name of this variable. This is typically
only used when the user wishes to have multiple
aeroProblems to explictly use the same design variable.
Examples
--------
>>> # Add initMach variable with typical bounds
>>> seg.addDV('initMach', value=0.75, lower=0.0, upper=1.0, scale=1.0)
"""
# First check if we are allowed to add the DV:
if paramKey not in self.possibleDVs:
raise Error(
"The DV '%s' could not be added. Potential DVs MUST\
be specified when the missionSegment class is created. \
For example, if you want initMach as a design variable \
(...,initMach=value, ...) must\
be given. The list of possible DVs are: %s."
% (paramKey, repr(self.possibleDVs))
)
if name is None:
dvName = paramKey
userDef = False
else:
dvName = name
userDef = True
value = getattr(self, paramKey)
# Remove 'init' or 'final' from paramKey and set to dvType
dvType = paramKey.replace("init", "").replace("final", "")
if "init" in paramKey:
isInitVal = True
elif "final" in paramKey:
isInitVal = False
self.dvList[dvName] = SegmentDV(dvType, isInitVal, value, lower, upper, scale, userDef)
def setParameters(self, value, paramType, isInitVal):
"""
Design variable handling, where 'initMach' will be of paramType='Mach'
and isInitVal=True, and the finalMach will be automatically adjusted if needed.
Also determines if the previous or next segment will be affect as well
"""
# Determine whether the following or previous segment needs to be updated
if isInitVal:
key = "init" + paramType
updatePrev = True
if paramType == "Mach":
updateNext = self.constMachDV
elif paramType == "Alt":
updateNext = self.constAltDV
elif paramType == "CAS" or paramType == "TAS":
updateNext = self.constVelDV
else:
key = "final" + paramType
updateNext = True
if paramType == "Mach":
updatePrev = self.constMachDV
elif paramType == "Alt":
updatePrev = self.constAltDV
elif paramType == "CAS" or paramType == "TAS":
updatePrev = self.constVelDV
# Update the value in the current segment
setattr(self, key, value)
# If this segment has a constant value, update the init/final value
if isInitVal and updateNext:
key = "final" + paramType
setattr(self, key, value)
elif (not isInitVal) and updatePrev:
key = "init" + paramType
setattr(self, key, value)
return updatePrev, updateNext
def __str__(self, segNum=None):
"""
Return a string representation of the states in this segment
"""
# if len(idTag) > 0:
# idTag = ' --- %s'%idTag
if segNum is None:
idTag = ""
else:
idTag = "%02d:" % segNum
# Putting the states into an array automatically convert Nones to nans
states = numpy.zeros([2, 4])
states[0, :] = [self.initAlt, self.initMach, self.initCAS, self.initTAS]
states[1, :] = [self.finalAlt, self.finalMach, self.finalCAS, self.finalTAS]
if self.fuelFraction is None:
fuelFrac = self.fuelFraction
else:
fuelFrac = numpy.nan
string = f"{idTag:>3} {self.phase:>18} "
string += "{:>8} {:>8} {:>8} {:>8} {:>8} \n".format("Alt", "Mach", "CAS", "TAS", "FuelFrac")
string += "{:>22} {:8.2f} {:8.6f} {:8.4f} {:8.4f} {:8.4f} \n".format(
"",
states[0, 0],
states[0, 1],
states[0, 2],
states[0, 3],
fuelFrac,
)
string += "{:>22} {:8.2f} {:8.6f} {:8.4f} {:8.4f} \n".format(
"", states[1, 0], states[1, 1], states[1, 2], states[1, 3]
)
return string
class SegmentDV:
"""
A container storing information regarding a mission profile variable.
"""
def __init__(self, dvType, isInitVal, value, lower, upper, scale=1.0, userDef=False):
self.type = dvType # String: 'Mach', 'Alt', 'TAS', 'CAS'
self.isInitVal = isInitVal # Boolean:
self.value = value
self.lower = lower
self.upper = upper
self.scale = scale
self.userDef = userDef
self.segID = -1 # The segment ID this DV obj was initalized
# self.offset = offset
def setSegmentID(self, ID):
"""
Set the segment ID in which this DV belongs to within the profile
"""
self.segID = ID
|
import datetime
import json
import os
from typing import Any, Dict, List
from sauce_searcher_server.constants import (
DOUJIN_BASE_IMAGE,
DOUJIN_BASE_URL,
VISUAL_NOVEL_BASE_URL,
VISUAL_NOVEL_LENGTHS,
VISUAL_NOVEL_TAGS_FILE,
)
from sauce_searcher_server.models import (
Anime,
Doujin,
DoujinTag,
LightNovel,
MALEntry,
Manga,
VisualNovel,
VisualNovelTag,
)
VN_TAGS = None # singleton
def get_vn_tags():
global VN_TAGS
if VN_TAGS is None:
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, VISUAL_NOVEL_TAGS_FILE)) as f:
VN_TAGS = json.load(f)
return VN_TAGS
def get_title_english(data: Dict[str, Any]) -> str:
title_english = data.get('title_english')
if title_english:
return title_english
title_synonyms = data.get('title_synonyms')
if title_synonyms:
return title_synonyms[0]
else:
return data['title']
def format_mal_entry(relation: MALEntry, show_type=False) -> str:
name = relation.name
type = relation.type
if show_type:
if type == 'manga':
type = 'manga/light novel'
return f'{name} ({type})'
else:
return name
def format_song(song: str) -> str:
start = song.find(':')
if start > 0:
start += 2
else:
start = 0
return song[start:]
def format_tag(tag: DoujinTag) -> str:
name = tag.name
count = tag.count
return f'{name} ({count:,})'
def get_relations(related: Dict[str, List[dict]]) -> Dict[str, List[str]]:
relations = {}
for k, v in related.items():
relations[k] = [format_mal_entry(MALEntry(**x), True) for x in v]
return relations
def parse_anime(data: Dict[str, Any]) -> Anime:
title_english = get_title_english(data)
relations = get_relations(data.get('related', {}))
studios = [format_mal_entry(MALEntry(**x)) for x in data.get('studios', [])]
genres = [format_mal_entry(MALEntry(**x)) for x in data.get('genres', [])]
openings = [format_song(x) for x in data.get('opening_themes', [])]
endings = [format_song(x) for x in data.get('ending_themes', [])]
anime = Anime(
id=data['mal_id'],
title=data['title'],
title_english=title_english,
url=data['url'],
image=data['image_url'],
type=data['type'],
source=data['source'],
episodes=data['episodes'],
status=data['status'],
airing=data['airing'],
premiered=data['premiered'],
broadcast=data['broadcast'],
aired=data['aired'],
duration=data['duration'],
rating=data['rating'],
score=data['score'],
synopsis=data['synopsis'],
relations=relations,
studios=studios,
genres=genres,
openings=openings,
endings=endings,
)
return anime
def parse_manga(data: Dict[str, Any]) -> Manga:
title_english = get_title_english(data)
relations = get_relations(data.get('related', {}))
genres = [format_mal_entry(MALEntry(**x)) for x in data.get('genres', [])]
authors = [format_mal_entry(MALEntry(**x)) for x in data.get('authors', [])]
serializations = [format_mal_entry(MALEntry(**x)) for x in data.get('serializations', [])]
manga = Manga(
id=data['mal_id'],
title=data['title'],
title_english=title_english,
url=data['url'],
image=data['image_url'],
type=data['type'],
volumes=data['volumes'],
chapters=data['chapters'],
status=data['status'],
publishing=data['publishing'],
published=data['published'],
score=data['score'],
synopsis=data['synopsis'],
relations=relations,
genres=genres,
authors=authors,
serializations=serializations,
)
return manga
def parse_light_novel(data: Dict[str, Any]) -> LightNovel:
title_english = get_title_english(data)
relations = get_relations(data.get('related', {}))
genres = [format_mal_entry(MALEntry(**x)) for x in data.get('genres', [])]
authors = [format_mal_entry(MALEntry(**x)) for x in data.get('authors', [])]
light_novel = LightNovel(
id=data['mal_id'],
title=data['title'],
title_english=title_english,
url=data['url'],
image=data['image_url'],
type=data['type'],
volumes=data['volumes'],
chapters=data['chapters'],
status=data['status'],
publishing=data['publishing'],
published=data['published'],
score=data['score'],
synopsis=data['synopsis'],
relations=relations,
genres=genres,
authors=authors,
)
return light_novel
def parse_visual_novel(data: Dict[str, Any]) -> VisualNovel:
url = f'{VISUAL_NOVEL_BASE_URL}{data["id"]}'
released = datetime.datetime.fromisoformat(data['released'])
lewd_image = data['image_flagging'] and data['image_flagging']['sexual_avg'] > 1
violent_image = data['image_flagging'] and data['image_flagging']['violence_avg'] > 1
image_nsfw = lewd_image or violent_image
anime = bool(data['anime'])
staff = [x['name'] for x in data.get('staff', []) if x['role'].lower() == 'staff']
length = VISUAL_NOVEL_LENGTHS[data.get('length', 0)]
# dir_path = os.path.dirname(os.path.realpath(__file__))
# with open(os.path.join(dir_path, VISUAL_NOVEL_TAGS_FILE)) as f:
# tag_mappings = ujson.load(f)
tag_mappings = get_vn_tags()
tags: List[VisualNovelTag] = []
for tag in data.get('tags', []):
vn_tag = VisualNovelTag(id=tag[0], score=tag[1], spoiler=tag[2])
vn_tag.name = tag_mappings.get(str(vn_tag.id))
if vn_tag.name and vn_tag.spoiler < 2:
tags.append(vn_tag)
sorted_tags = sorted(tags, key=lambda t: t.score)
parsed_tags = [tag.name for tag in sorted_tags]
visual_novel = VisualNovel(
id=data['id'],
title=data['title'],
url=url,
released=released,
description=data['description'],
image=data['image'],
image_nsfw=image_nsfw,
tags=parsed_tags,
staff=staff,
anime=anime,
length=length,
score=data['rating'],
languages=data['languages'],
)
return visual_novel
def parse_doujin(data: Dict[str, Any]) -> Doujin:
url = f'{DOUJIN_BASE_URL}{data["id"]}'
image = f'{DOUJIN_BASE_IMAGE}{data["media_id"]}/thumb.jpg'
upload_date = datetime.datetime.fromtimestamp(data['upload_date'])
tags: List[str] = []
languages: List[str] = []
artists: List[str] = []
categories: List[str] = []
parodies: List[str] = []
characters: List[str] = []
groups: List[str] = []
all_tags = data.get('tags', [])
for tag in all_tags:
doujin_tag = DoujinTag(**tag)
formatted_tag = format_tag(doujin_tag)
type = doujin_tag.type.lower()
if type == 'tag':
tags.append(formatted_tag)
elif type == 'language':
languages.append(formatted_tag)
elif type == 'artist':
artists.append(formatted_tag)
elif type == 'category':
categories.append(formatted_tag)
elif type == 'parody':
parodies.append(formatted_tag)
elif type == 'character':
characters.append(formatted_tag)
elif type == 'group':
groups.append(formatted_tag)
doujin = Doujin(
id=data['id'],
title=data['title']['pretty'],
full_title=data['title']['english'],
url=url,
image=image,
pages=data['num_pages'],
upload_date=upload_date,
tags=tags,
languages=languages,
artists=artists,
categories=categories,
parodies=parodies,
characters=characters,
groups=groups,
)
return doujin
|
<reponame>andreabradpitto/turtlex
#!/usr/bin/env python3
# Import necessary libraries
import os
import torch
import onnx
import pynever.strategies.conversion as conv
import pynever.networks as networks
import pynever.nodes as nodes
import torch.nn.functional as F
from torch.distributions import Normal
class PolicyNetwork(torch.nn.Module): # PyTorch needs this definition in order to know how load the .pth file correctly
def __init__(self, state_dim, action_dim, actor_hidden_dim, log_std_min=-20, log_std_max=2):
super(PolicyNetwork, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.linear1 = torch.nn.Linear(state_dim, actor_hidden_dim)
self.linear2 = torch.nn.Linear(actor_hidden_dim, actor_hidden_dim)
self.mean_linear = torch.nn.Linear(actor_hidden_dim, action_dim)
self.log_std_linear = torch.nn.Linear(actor_hidden_dim, action_dim)
self.apply(self.weights_init)
def weights_init(m):
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
mean = self.mean_linear(x)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, min=self.log_std_min, max=self.log_std_max)
return mean, log_std
def sample(self, state, epsilon=1e-6):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
x_t = normal.rsample()
action = (2 * torch.sigmoid(2 * x_t)) - 1
log_prob = normal.log_prob(x_t)
log_prob -= torch.log(1 - action.pow(2) + epsilon)
log_prob = log_prob.sum(1, keepdim=True)
return action, log_prob, mean, log_std
def pnv_converter(pol_net_id: str, state_dim: int, hidden_dim: int, action_dim: int, device: str):
"""
This function searches for the trained and saved network, and returns 3 PyNeVer-compatible
versions of the same new network: PyNeVer internal format, PyTorch format, and ONNX format
"""
# Specify networks directory loading and saving paths
netspath_load = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'results/nets_train/office_nav_sac'))
netspath_save = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'results/nets_ver/office_nav_sac'))
policy_net = torch.load(netspath_load + '/' + pol_net_id + ".pth", map_location=device) # This line needs the PolicyNetwork definition above
policy_net.eval()
pol_new_pnv = networks.SequentialNetwork('NET_0', "X")
fc1 = nodes.FullyConnectedNode("FC1", (state_dim,), hidden_dim)
pol_new_pnv.add_node(fc1)
rl2 = nodes.ReLUNode("RL2", fc1.out_dim)
pol_new_pnv.add_node(rl2)
fc3 = nodes.FullyConnectedNode("FC3", rl2.out_dim, hidden_dim)
pol_new_pnv.add_node(fc3)
rl4 = nodes.ReLUNode("RL4", fc3.out_dim)
pol_new_pnv.add_node(rl4)
fc5 = nodes.FullyConnectedNode("FC5", rl4.out_dim, action_dim) # Ex "mean" layer
pol_new_pnv.add_node(fc5)
fc6 = nodes.FullyConnectedNode("FC6", fc5.out_dim, action_dim) # "* 2" layer
pol_new_pnv.add_node(fc6)
sm7 = nodes.SigmoidNode("SM7", fc6.out_dim)
pol_new_pnv.add_node(sm7)
fc8 = nodes.FullyConnectedNode("FC8", sm7.out_dim, action_dim) # "* 2; - 1" layer
pol_new_pnv.add_node(fc8)
pol_new_pnv_pt = conv.PyTorchConverter().from_neural_network(pol_new_pnv)
torch.nn.init.constant_(pol_new_pnv_pt.pytorch_network._modules['5'].weight, 0)
torch.nn.init.constant_(pol_new_pnv_pt.pytorch_network._modules['5'].bias, 0)
torch.nn.init.constant_(pol_new_pnv_pt.pytorch_network._modules['7'].weight, 0)
torch.nn.init.constant_(pol_new_pnv_pt.pytorch_network._modules['7'].bias, -1)
with torch.no_grad():
for idx, elem in enumerate(pol_new_pnv_pt.pytorch_network._modules['5'].weight):
elem[idx] = 2
for idx, elem in enumerate(pol_new_pnv_pt.pytorch_network._modules['7'].weight):
elem[idx] = 2
with torch.no_grad():
pol_new_pnv_pt.pytorch_network._modules['0'].weight.copy_(policy_net.linear1.weight)
pol_new_pnv_pt.pytorch_network._modules['0'].bias.copy_(policy_net.linear1.bias)
pol_new_pnv_pt.pytorch_network._modules['2'].weight.copy_(policy_net.linear2.weight)
pol_new_pnv_pt.pytorch_network._modules['2'].bias.copy_(policy_net.linear2.bias)
pol_new_pnv_pt.pytorch_network._modules['4'].weight.copy_(policy_net.mean_linear.weight)
pol_new_pnv_pt.pytorch_network._modules['4'].bias.copy_(policy_net.mean_linear.bias)
pol_new_pnv_pt.pytorch_network.eval() # Not strictly necessary here
torch.save(pol_new_pnv_pt.pytorch_network, netspath_save + '/' + pol_net_id + "_pnv" + ".pth")
pol_new_pnv = conv.PyTorchConverter().to_neural_network(pol_new_pnv_pt)
pol_new_pnv_onnx = conv.ONNXConverter().from_neural_network(pol_new_pnv).onnx_network
onnx.save(pol_new_pnv_onnx, netspath_save + '/' + pol_net_id + "_pnv" + ".onnx")
return pol_new_pnv, pol_new_pnv_pt, pol_new_pnv_onnx
|
import sympy as sym
from scipy.interpolate import CubicSpline, PPoly
import math
import typing
class Segwise(sym.Function):
"""Piecewise function for sequential linear segments.
arguments: Segwise(argument, (equation (in Segwise.s), upper_bound), (equation_2, upper_bound_2))"""
nargs=None
s = sym.Symbol("s")
_equispaced = True
@classmethod
def __new__(cls, *args, **kwargs):
equispaced = True
try:
x = args[1]
equations = args[2:]
except IndexError:
raise ValueError("Segwise created with zero arguments")
if not isinstance(x, sym.Basic):
raise ValueError("Segwise's first argument must a sympy expression")
if len(equations)<2:
raise ValueError("Segwise requires at least 2 segments to do anything")
try:
for i,(eq,ub) in enumerate(equations):
if not isinstance(eq,sym.Basic):
raise ValueError(f"Segment {i} does not have a valid equation")
if isinstance(ub,sym.Basic) and not ub.is_Number:
raise ValueError(f"Segment {i} does not have a constant upper bound")
if ub==sym.oo:
#can't do floor division with infinity...
equispaced = False
except TypeError:
raise ValueError("One or more segments are in an incorrect format")
except ValueError as v:
if "too many values to unpack (expected 2)" in v.args:
raise ValueError("One or more segments are in an incorrect format")
raise v
segment_spacing = equations[1][1]-equations[0][1]
# check upper bounds
for i,(eq,ub) in enumerate(equations[:-1]):
if ub>equations[i+1][1]:
raise ValueError(f"Segment {i} has a higher upper bound than segment {i+1}")
if not math.isclose(equations[i+1][1]-ub,segment_spacing,abs_tol=1e-9):
equispaced=False
obj = super().__new__(*args,**kwargs)
obj._equispaced = equispaced
return obj
def check_continuity(self):
"""Check that this Segwise instance is continuous with continuous 1st derivatives
May be slow for large numbers of segments"""
equations = self.args[1:]
# check continuity
for i,(eq,ub) in enumerate(equations[:-1]):
s1 = eq.subs(self.s,ub).evalf()
s2 = equations[i+1][0].subs(self.s,ub).evalf()
d1 = sym.diff(eq,self.s).subs(self.s,ub).evalf()
d2 = sym.diff(equations[i + 1][0],self.s).subs(self.s, ub).evalf()
if not s1.is_Number:
print(f"Segment {i} contains other variables other than {self.s}")
return False
if not s2.is_Number:
print(f"Segment {i+1} contains other variables other than {self.s}")
return False
if not math.isclose(s1,s2,abs_tol=1e-9):
print(f"Segments {i} and {i+1} are not continuous.")
return False
if not math.isclose(d1,d2,abs_tol=1e-9):
print(f"Segments {i} and {i+1} do not have continuous 1st derivatives.")
return False
return True
def _eval_subs(self, old, new):
sub_arg = self.args[0].subs(old,new)
if sub_arg.is_Number:
for eq,ub in self.args[1:]:
if ub>=sub_arg:
return eq.subs(self.s,sub_arg)
return Segwise(sub_arg,*self.args[1:])
def _eval_derivative(self, s):
return sym.diff(self.args[0],s)*self.__class__(self.args[0],*((sym.diff(eq,self.s),ub) for eq,ub in self.args[1:]))
#readonly property
@property
def equispaced(self):
return self._equispaced
class CyclicSegwise(Segwise):
"""Piecewise function for repeating sequential linear segments.
Evaluates as Segwise(argument % highest_upper_bound)
arguments: Segwise(argument, (equation (in Segwise.s), upper_bound), (equation_2, upper_bound_2))"""
def check_continuity(self):
"""Check that this Segwise instance is continuous with continuous 1st derivatives
May be slow for large numbers of segments"""
if super().check_continuity():
equations = self.args[1:]
# check continuity
first, fub = equations[0]
last, lub = equations[-1]
s1 = first.subs(self.s,0)
s2 = last.subs(self.s,lub)
d1 = sym.diff(first,self.s).subs(self.s,0)
d2 = sym.diff(last,self.s).subs(self.s, lub)
if not s1.is_Number:
print(f"The first segment contains other variables other than {self.s}")
return False
if not s2.is_Number:
print(f"The last segment contains other variables other than {self.s}")
return False
if not math.isclose(s1,s2,abs_tol=1e-9):
print(f"The first and last segment are not continuous.")
return False
if not math.isclose(d1,d2,abs_tol=1e-9):
print(f"The first and last segment do not have continuous 1st derivatives.")
return False
return True
return False
def _eval_subs(self, old, new):
sub_arg = self.args[0].subs(old,new)
if sub_arg.is_Number:
sub_arg = sub_arg % self._wrap
for eq,ub in self.args[1:]:
if ub>=sub_arg:
return eq.subs(self.s,sub_arg)
return CyclicSegwise(sub_arg,*self.args[1:])
#readonly property
@property
def equispaced(self):
return self._equispaced
@property
def _wrap(self):
return self.args[-1][1]
class _PPolyStash(object):
"""hides ppoly instances from sympy"""
def __init__(self):
self._cache = {}
self._next = 0
def register_poly(self,ppoly:PPoly)->int:
self._cache[self._next] = ppoly
self._next+=1
return self._next-1
def __getitem__(self, item):
return self._cache[item]
class PolynomialSpline(sym.Function):
"""Increased performance spline via "data smuggling"?"""
nargs=2
_equispaced = True
poly_cache = _PPolyStash()
@classmethod
def __new__(cls, *args, **kwargs):
expression = args[1]
ppoly = args[2]
if not isinstance(expression, sym.Basic):
raise ValueError("PolynomialSpline's first argument must a sympy expression")
if not isinstance(ppoly,PPoly):
raise ValueError("PolynomialSpline's second argument must a scipy PPoly")
idx = cls.poly_cache.register_poly(ppoly)
equispace = ppoly.x[1] - ppoly.x[0]
for x1, x2 in zip(ppoly.x[:-1], ppoly.x[1:]):
if not math.isclose((x2 - x1), equispace):
equispaced = False
break
else:
equispaced = True
obj = super().__new__(cls,expression,idx)
obj._equispaced=equispaced
return obj
def _eval_subs(self, old, new):
sub_arg = self.args[0].subs(old,new)
if sub_arg.is_Number:
return self._ppoly(sub_arg)
return self.__class__(sub_arg,self._ppoly)
def _eval_derivative(self, s):
derivative = self.__class__(self.args[0],self._ppoly.derivative())
return sym.diff(self.args[0],s)*derivative
#readonly property
@property
def equispaced(self):
return self._equispaced
@property
def _ppoly(self):
return self.poly_cache[self.args[1]]
class CyclicPolynomialSpline(PolynomialSpline):
def _eval_subs(self, old, new):
sub_arg = self.args[0].subs(old,new)
if sub_arg.is_Number:
return self._ppoly(sub_arg % self._wrap)
return super()._eval_subs(old,new)
@property
def _wrap(self):
return self._ppoly.x[-1]-self._ppoly.x[0]
def cubic_spline(x, x_data, y_data, bounds: typing.Literal["natural", "not-a-knot", "periodic", "clamped"] = "natural"):
"""Create a cubic spline
Setting periodic bounds will return a CyclicSegwise function"""
if bounds=="periodic" and not math.isclose(y_data[0],y_data[-1]):
raise ValueError("Periodic splines require the first and last y data points to be the same.")
# normalise spline to make 0 the first data point
min_data = min(x_data)
spline = CubicSpline([xd - min_data for xd in x_data], y_data, bc_type=bounds)
return (CyclicPolynomialSpline if bounds=="periodic" else PolynomialSpline)(x-min_data,spline)
def softplus(x,k=1.0):
"""Approximates x if x>0 else 0 safely"""
return Segwise(x,(1/k*sym.ln(1+sym.exp(Segwise.s*k)),0),(Segwise.s+1/k*sym.ln(1+sym.exp(-Segwise.s*k)),sym.oo))
def logistic(x,k=1.0):
"""Approximates 1 if x>0 else 0 safely"""
return 0.5*(sym.tanh(k*x)+1) |
<reponame>bigbitbus/opta
import base64
from typing import Any, Dict, List
import pytest
from click.testing import CliRunner
from pytest_mock import MockFixture
from opta.cli import cli
from opta.commands.push import (
get_acr_auth_info,
get_ecr_auth_info,
get_gcr_auth_info,
get_registry_url,
push_to_docker,
)
from opta.exceptions import UserErrors
from opta.layer import Layer
from tests.fixtures.basic_apply import BASIC_APPLY
REGISTRY_URL = "889760294590.dkr.ecr.us-east-1.amazonaws.com/test-service-runx-app"
TERRAFORM_OUTPUTS = {"docker_repo_url": REGISTRY_URL}
@pytest.fixture(scope="module", autouse=True)
def mock_is_service_config(module_mocker: MockFixture) -> None:
module_mocker.patch("opta.commands.push.is_service_config", return_value=True)
def test_is_env_config(mocker: MockFixture) -> None:
# Opta file check
mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists")
mocked_os_path_exists.return_value = True
mocker.patch("opta.commands.push.is_service_config", return_value=False)
runner = CliRunner()
result = runner.invoke(cli, ["push", "local_image:local_tag"])
assert "Opta push can only run on service yaml files." in str(result.exception)
def test_get_registry_url(mocker: MockFixture) -> None:
mocker.patch(
"opta.commands.push.get_terraform_outputs", return_value=TERRAFORM_OUTPUTS
)
layer = mocker.Mock(spec=Layer)
layer.name = "blah"
docker_repo_url = get_registry_url(layer)
assert docker_repo_url == REGISTRY_URL
def test_no_docker_repo_url_in_output(mocker: MockFixture) -> None:
mocker.patch("os.path.isdir", return_value=True)
mocker.patch("opta.commands.push.get_terraform_outputs", return_value={})
layer = mocker.Mock(spec=Layer)
layer.name = "blah"
with pytest.raises(Exception) as e_info:
get_registry_url(layer)
expected_error_output = "Unable to determine docker repository url"
assert expected_error_output in str(e_info)
def test_get_ecr_auth_info(mocker: MockFixture) -> None:
mocked_layer = mocker.Mock(spec=Layer)
mocked_layer.gen_providers = lambda x: BASIC_APPLY[1]
mocked_ecr_client = mocker.Mock()
def mock_get_authorization_token(registryIds: List[str]) -> Dict[str, Any]:
decoded_auth_token = "<PASSWORD>:password"
auth_token_bytes = decoded_auth_token.encode()
b64_auth_token = base64.b64encode(auth_token_bytes)
return {"authorizationData": [{"authorizationToken": b64_auth_token}]}
mocked_ecr_client.get_authorization_token = mock_get_authorization_token
patched_boto_client = mocker.patch("opta.commands.push.boto3.client")
patched_boto_client.return_value = mocked_ecr_client
assert get_ecr_auth_info(mocked_layer) == ("username", "password",)
def test_get_gcr_auth_info(mocker: MockFixture) -> None:
mocked_layer = mocker.Mock(spec=Layer)
mocked_credentials = mocker.Mock()
mocked_credentials.token = "blah"
mocker.patch("opta.commands.push.GCP.using_service_account", return_value=False)
patched_gcp = mocker.patch(
"opta.commands.push.GCP.get_credentials",
return_value=tuple([mocked_credentials, "oauth2accesstoken"]),
)
assert get_gcr_auth_info(mocked_layer) == ("oauth2accesstoken", "blah",)
patched_gcp.assert_called_once_with()
def test_get_acr_auth_info(mocker: MockFixture) -> None:
mocked_layer = mocker.Mock(spec=Layer)
mocked_layer.root.return_value = mocked_layer
mocked_get_terraform_output = mocker.patch(
"opta.commands.push.get_terraform_outputs", return_value={"acr_name": "blah"}
)
mocked_nice_run_output = mocker.Mock()
mocked_nice_run = mocker.patch(
"opta.commands.push.nice_run", return_value=mocked_nice_run_output
)
mocked_nice_run_output.stdout = "dummy_token"
assert get_acr_auth_info(mocked_layer) == (
"00000000-0000-0000-0000-000000000000",
"dummy_token",
)
mocked_get_terraform_output.assert_called_once_with(mocked_layer)
mocked_nice_run.assert_has_calls(
[
mocker.call(
[
"az",
"acr",
"login",
"--name",
"blah",
"--expose-token",
"--output",
"tsv",
"--query",
"accessToken",
],
check=True,
capture_output=True,
),
]
)
def test_valid_input(mocker: MockFixture) -> None:
mocker.patch("opta.commands.push.get_image_digest")
mocked_nice_run = mocker.patch("opta.commands.push.nice_run")
push_to_docker(
"username",
"password",
"local_image:local_tag",
REGISTRY_URL,
"image_tag_override",
)
mocked_nice_run.assert_has_calls(
[
mocker.call(
[
"docker",
"login",
REGISTRY_URL,
"--username",
"username",
"--password-stdin",
],
input=b"password",
check=True,
),
mocker.call(
[
"docker",
"tag",
"local_image:local_tag",
f"{REGISTRY_URL}:image_tag_override",
],
check=True,
),
mocker.call(
["docker", "push", f"{REGISTRY_URL}:image_tag_override"], check=True,
),
]
)
def test_no_tag(mocker: MockFixture) -> None:
with pytest.raises(Exception) as e_info:
push_to_docker(
"username", "password", "local_image", REGISTRY_URL, "image_tag_override"
)
assert (
"Unexpected image name local_image: your image_name must be of the format <IMAGE>:<TAG>."
in str(e_info)
)
def test_no_docker(mocker: MockFixture) -> None:
mocker.patch(
"opta.utils.os.path.exists", return_value=True
) # Make check_opta_file_exists succeed
mocker.patch("opta.commands.push.ensure_installed", side_effect=UserErrors("foobar"))
runner = CliRunner()
result = runner.invoke(cli, ["push", "local_image:local_tag"])
assert str(result.exception) == "foobar"
def test_no_tag_override(mocker: MockFixture) -> None:
# Opta file check
mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists")
mocked_os_path_exists.return_value = True
nice_run_mock = mocker.patch("opta.commands.push.nice_run")
gen_mock = mocker.patch("opta.commands.push.gen_all")
layer_object_mock = mocker.patch("opta.commands.push.Layer")
layer_mock = mocker.Mock(spec=Layer)
layer_mock.cloud = "aws"
layer_mock.org_name = "dummy_org_name"
layer_mock.name = "dummy_name"
layer_object_mock.load_from_yaml.return_value = layer_mock
mocker.patch(
"opta.commands.push.get_registry_url"
).return_value = "889760294590.dkr.ecr.us-east-1.amazonaws.com/github-runx-app"
mocker.patch("opta.commands.push.get_ecr_auth_info").return_value = (
"username",
"password",
)
mocker.patch("opta.commands.push.get_image_digest")
runner = CliRunner()
result = runner.invoke(cli, ["push", "local_image:local_tag"])
assert result.exit_code == 0
layer_object_mock.load_from_yaml.assert_called_once_with("opta.yaml", None)
gen_mock.assert_called_once_with(layer_mock)
nice_run_mock.assert_has_calls(
[
mocker.call(
[
"docker",
"login",
"889760294590.dkr.ecr.us-east-1.amazonaws.com/github-runx-app",
"--username",
"username",
"--password-stdin",
],
input=b"password",
check=True,
),
mocker.call(
[
"docker",
"tag",
"local_image:local_tag",
"889760294590.dkr.ecr.us-east-1.amazonaws.com/github-runx-app:local_tag",
],
check=True,
),
]
)
def test_with_tag_override(mocker: MockFixture) -> None:
# Opta file check
mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists")
mocked_os_path_exists.return_value = True
nice_run_mock = mocker.patch("opta.commands.push.nice_run")
gen_mock = mocker.patch("opta.commands.push.gen_all")
layer_object_mock = mocker.patch("opta.commands.push.Layer")
layer_mock = mocker.Mock(spec=Layer)
layer_mock.cloud = "aws"
layer_mock.org_name = "dummy_org_name"
layer_mock.name = "dummy_name"
layer_object_mock.load_from_yaml.return_value = layer_mock
mocker.patch(
"opta.commands.push.get_registry_url"
).return_value = "889760294590.dkr.ecr.us-east-1.amazonaws.com/github-runx-app"
mocker.patch("opta.commands.push.get_ecr_auth_info").return_value = (
"username",
"password",
)
mocker.patch("opta.commands.push.get_image_digest")
runner = CliRunner()
result = runner.invoke(
cli, ["push", "local_image:local_tag", "--tag", "tag-override"]
)
assert result.exit_code == 0
layer_object_mock.load_from_yaml.assert_called_once_with("opta.yaml", None)
gen_mock.assert_called_once_with(layer_mock)
nice_run_mock.assert_has_calls(
[
mocker.call(
[
"docker",
"login",
"889760294590.dkr.ecr.us-east-1.amazonaws.com/github-runx-app",
"--username",
"username",
"--password-stdin",
],
input=b"password",
check=True,
),
mocker.call(
[
"docker",
"tag",
"local_image:local_tag",
"889760294590.dkr.ecr.us-east-1.amazonaws.com/github-runx-app:tag-override",
],
check=True,
),
mocker.call(
[
"docker",
"push",
"889760294590.dkr.ecr.us-east-1.amazonaws.com/github-runx-app:tag-override",
],
check=True,
),
]
)
def test_bad_image_name(mocker: MockFixture) -> None:
# Opta file check
mocked_os_path_exists = mocker.patch("opta.utils.os.path.exists")
mocked_os_path_exists.return_value = True
gen_mock = mocker.patch("opta.commands.push.gen_all")
layer_object_mock = mocker.patch("opta.commands.push.Layer")
layer_mock = mocker.Mock(spec=Layer)
layer_mock.cloud = "aws"
layer_mock.org_name = "dummy_org_name"
layer_mock.name = "dummy_name"
layer_object_mock.load_from_yaml.return_value = layer_mock
mocker.patch(
"opta.commands.push.get_registry_url"
).return_value = "889760294590.dkr.ecr.us-east-1.amazonaws.com/github-runx-app"
mocker.patch("opta.commands.push.get_ecr_auth_info").return_value = (
"username",
"password",
)
runner = CliRunner()
result = runner.invoke(cli, ["push", "local_image", "--tag", "tag-override"])
assert result.exit_code == 1
layer_object_mock.load_from_yaml.assert_called_once_with("opta.yaml", None)
gen_mock.assert_called_once_with(layer_mock)
assert (
str(result.exception)
== "Unexpected image name local_image: your image_name must be of the format <IMAGE>:<TAG>."
)
|
<gh_stars>10-100
#
# PySNMP MIB module ENTERASYS-MIB-NAMES (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ENTERASYS-MIB-NAMES
# Produced by pysmi-0.3.4 at Mon Apr 29 18:48:40 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
TimeTicks, iso, Counter32, NotificationType, Bits, Integer32, Unsigned32, ObjectIdentity, MibIdentifier, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, ModuleIdentity, IpAddress, enterprises = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "iso", "Counter32", "NotificationType", "Bits", "Integer32", "Unsigned32", "ObjectIdentity", "MibIdentifier", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "ModuleIdentity", "IpAddress", "enterprises")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
etsysModuleName = ModuleIdentity((1, 3, 6, 1, 4, 1, 5624, 1, 2, 1))
etsysModuleName.setRevisions(('2003-11-06 15:15', '2003-10-23 17:19', '2002-06-14 16:02', '2002-06-14 14:02', '2000-11-13 21:21', '2000-10-05 13:00', '2000-04-07 00:00', '2000-03-21 00:00',))
if mibBuilder.loadTexts: etsysModuleName.setLastUpdated('200311061515Z')
if mibBuilder.loadTexts: etsysModuleName.setOrganization('Enterasys Networks, Inc')
enterasys = ObjectIdentity((1, 3, 6, 1, 4, 1, 5624))
if mibBuilder.loadTexts: enterasys.setStatus('current')
etsysMibs = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1))
etsysOids = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 2))
etsysAgentCaps = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 3))
etsysX509Pki = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 509))
etsysModules = MibIdentifier((1, 3, 6, 1, 4, 1, 5624, 1, 2))
etsysNamesMib = ObjectIdentity((1, 3, 6, 1, 4, 1, 5624, 1, 1))
if mibBuilder.loadTexts: etsysNamesMib.setStatus('obsolete')
etsysConformance = ObjectIdentity((1, 3, 6, 1, 4, 1, 5624, 1, 3))
if mibBuilder.loadTexts: etsysConformance.setStatus('obsolete')
etsysConformName = ObjectIdentity((1, 3, 6, 1, 4, 1, 5624, 1, 3, 1))
if mibBuilder.loadTexts: etsysConformName.setStatus('obsolete')
etsysConformOID = ObjectIdentity((1, 3, 6, 1, 4, 1, 5624, 1, 3, 2))
if mibBuilder.loadTexts: etsysConformOID.setStatus('obsolete')
mibBuilder.exportSymbols("ENTERASYS-MIB-NAMES", etsysAgentCaps=etsysAgentCaps, etsysModules=etsysModules, etsysConformOID=etsysConformOID, PYSNMP_MODULE_ID=etsysModuleName, etsysModuleName=etsysModuleName, etsysMibs=etsysMibs, etsysConformance=etsysConformance, etsysNamesMib=etsysNamesMib, enterasys=enterasys, etsysOids=etsysOids, etsysConformName=etsysConformName, etsysX509Pki=etsysX509Pki)
|
import argparse
import numpy as np
import os
from keras import backend as K
from keras.models import load_model
import pdb
import sys
import traceback
import matplotlib.pyplot as plt
import numpy as np
from keras import layers, models, optimizers
from keras import backend as K
from keras.utils import to_categorical
import matplotlib.pyplot as plt
from utils import combine_images
from PIL import Image
from capsulelayers import CapsuleLayer, PrimaryCap, Length, Mask
# dimensions of the generated pictures for each filter.
img_width = 28
img_height = 28
def CapsNet(input_shape, n_class, routings):
"""
A Capsule Network on MNIST.
:param input_shape: data shape, 3d, [width, height, channels]
:param n_class: number of classes
:param routings: number of routing iterations
:return: Two Keras Models, the first one used for training, and the second one for evaluation.
`eval_model` can also be used for training.
"""
x = layers.Input(shape=input_shape)
# Layer 1: Just a conventional Conv2D layer
conv1 = layers.Conv2D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1')(x)
# Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
primarycaps = PrimaryCap(conv1, dim_capsule=8, n_channels=32, kernel_size=9, strides=2, padding='valid')
# Layer 3: Capsule layer. Routing algorithm works here.
digitcaps = CapsuleLayer(num_capsule=n_class, dim_capsule=16, routings=routings,
name='digitcaps')(primarycaps)
# Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
# If using tensorflow, this will not be necessary. :)
out_caps = Length(name='capsnet')(digitcaps)
# Decoder network.
y = layers.Input(shape=(n_class,))
masked_by_y = Mask()([digitcaps, y]) # The true label is used to mask the output of capsule layer. For training
masked = Mask()(digitcaps) # Mask using the capsule with maximal length. For prediction
# Shared Decoder model in training and prediction
decoder = models.Sequential(name='decoder')
decoder.add(layers.Dense(512, activation='relu', input_dim=16*n_class))
decoder.add(layers.Dense(1024, activation='relu'))
decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))
# Models for training and evaluation (prediction)
train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])
eval_model = models.Model(x, [out_caps, decoder(masked)])
# manipulate model
noise = layers.Input(shape=(n_class, 16))
noised_digitcaps = layers.Add()([digitcaps, noise])
masked_noised_y = Mask()([noised_digitcaps, y])
manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))
return train_model, eval_model, manipulate_model
def get_test(csv):
xs = []
with open(csv) as f:
f.readline()
for l in f:
cols = l.split(',')
xs.append(list(map(int, cols[1].split())))
return {'x': np.array(xs)}
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-7)
def manipulate_latent(model, data, args):
print('-'*30 + 'Begin: manipulate' + '-'*30)
x_test, y_test = data
index = np.argmax(y_test, 1) == args.digit
number = np.random.randint(low=0, high=sum(index) - 1)
x, y = x_test[index][number], y_test[index][number]
x, y = np.expand_dims(x, 0), np.expand_dims(y, 0)
noise = np.zeros([1, 10, 16])
x_recons = []
for dim in range(16):
for r in [-0.25, -0.2, -0.15, -0.1, -0.05, 0, 0.05, 0.1, 0.15, 0.2, 0.25]:
tmp = np.copy(noise)
tmp[:,:,dim] = r
x_recon = model.predict([x, y, tmp])
x_recons.append(x_recon)
x_recons = np.concatenate(x_recons)
img = combine_images(x_recons, height=16)
image = img*255
Image.fromarray(image.astype(np.uint8)).save(args.save_dir + '/manipulate-%d.png' % args.digit)
print('manipulated result saved to %s/manipulate-%d.png' % (args.save_dir, args.digit))
print('-' * 30 + 'End: manipulate' + '-' * 30)
def load_mnist():
# the data, shuffled and split between train and test sets
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype('float32') / 255.
x_test = x_test.reshape(-1, 28, 28, 1).astype('float32') / 255.
y_train = to_categorical(y_train.astype('float32'))
y_test = to_categorical(y_test.astype('float32'))
return (x_train, y_train), (x_test, y_test)
def main():
n_iter = 100
# load data
(x_train, y_train), (x_test, y_test) = load_mnist()
# define model
model, eval_model, manipulate_model = CapsNet(input_shape=x_train.shape[1:],
n_class=len(np.unique(np.argmax(y_train, 1))),
routings=3)
model.summary()
model.load_weights('result/trained_model.h5')
layer_dict = dict([layer.name, layer] for layer in model.layers)
layer = layer_dict["primarycap_squash"]
"""
n_filters = layer.filters
print()
print('layer={}'.format(layer))
print()
filter_imgs = [[] for i in range(n_filters)]
for ind_filter in range(n_filters):
filter_imgs[ind_filter] = np.random.random((1, img_width, img_height, 1))
activation = K.mean(layer.output[:, :, :, ind_filter])
grads = normalize(K.gradients(activation, model.inputs[0])[0])
iterate = K.function([model.inputs[0], K.learning_phase()],
[activation, grads])
print('processing filter %d' % ind_filter)
for i in range(n_iter):
act, g = iterate([filter_imgs[ind_filter], 0])
filter_imgs[ind_filter] += g
"""
n_filters = 32
filter_imgs = [[] for i in range(n_filters)]
for ind_filter in range(n_filters):
filter_imgs[ind_filter] = np.random.random((1, img_width, img_height, 1))
#activation = K.mean(layer.output[:, 36*ind_filter:36*(ind_filter+1), :])
activation = K.mean(layer.output[:, ind_filter::36, :])
grads = normalize(K.gradients(activation, model.inputs[0])[0])
iterate = K.function([model.inputs[0], K.learning_phase()],
[activation, grads])
print('processing filter %d' % ind_filter)
for i in range(n_iter):
act, g = iterate([filter_imgs[ind_filter], 0])
filter_imgs[ind_filter] += g
fig = plt.figure(figsize=(14, 2 * ind_filter / 16))
for ind_filter in range(n_filters):
ax = fig.add_subplot(n_filters / 16 + 1, 16, ind_filter + 1)
ax.imshow(filter_imgs[ind_filter].reshape(img_width, img_height),
cmap='BuGn')
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.xlabel('filter %d' % ind_filter)
plt.tight_layout()
fig.suptitle('Filters of PrimaryCap')
fig.savefig('filters.png')
if __name__ == '__main__':
try:
main()
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
|
import json
import numpy as np
import pandas as pd
import streamlit as st
from PIL import Image
from run_deployment import run_main
from zenml.services import load_last_service_from_step
def main():
st.title("End to End Customer Satisfaction Pipeline with ZenML")
high_level_image = Image.open("_assets/high_level_overview.png")
st.image(high_level_image, caption="High Level Pipeline")
whole_pipeline_image = Image.open("_assets/training_and_deployment_pipeline_updated.png")
st.markdown(
"""
#### Problem Statement
The objective here is to predict the customer satisfaction score for a given order based on features like order status, price, payment, etc. I will be using [ZenML](https://zenml.io/) to build a production-ready pipeline to predict the customer satisfaction score for the next order or purchase. """
)
st.image(whole_pipeline_image, caption="Whole Pipeline")
st.markdown(
"""
Above is a figure of the whole pipeline, we first ingest the data, clean it, train the model, and evaluate the model, and if data source changes or any hyperparameter values changes, deployment will be triggered, and (re) trains the model and if the model meets minimum accuracy requirement, the model will be deployed.
"""
)
st.markdown(
"""
#### Description of Features
This app is designed to predict the customer satisfaction score for a given customer. You can input the features of the product listed below and get the customer satisfaction score.
| Models | Description |
| ------------- | - |
| Payment Sequential | Customer may pay an order with more than one payment method. If he does so, a sequence will be created to accommodate all payments. |
| Payment Installments | Number of installments chosen by the customer. |
| Payment Value | Total amount paid by the customer. |
| Price | Price of the product. |
| Freight Value | Freight value of the product. |
| Product Name length | Length of the product name. |
| Product Description length | Length of the product description. |
| Product photos Quantity | Number of product published photos |
| Product weight measured in grams | Weight of the product measured in grams. |
| Product length (CMs) | Length of the product measured in centimeters. |
| Product height (CMs) | Height of the product measured in centimeters. |
| Product width (CMs) | Width of the product measured in centimeters. |
"""
)
payment_sequential = st.sidebar.slider("Payment Sequential")
payment_installments = st.sidebar.slider("Payment Installments")
payment_value = st.number_input("Payment Value")
price = st.number_input("Price")
freight_value = st.number_input("freight_value")
product_name_length = st.number_input("Product name length")
product_description_length = st.number_input("Product Description length")
product_photos_qty = st.number_input("Product photos Quantity ")
product_weight_g = st.number_input("Product weight measured in grams")
product_length_cm = st.number_input("Product length (CMs)")
product_height_cm = st.number_input("Product height (CMs)")
product_width_cm = st.number_input("Product width (CMs)")
if st.button("Predict"):
service = load_last_service_from_step(
pipeline_name="continuous_deployment_pipeline",
step_name="model_deployer",
running=True,
)
if service is None:
st.write("No service could be found. The pipeline will be run first to create a service.")
run_main()
df = pd.DataFrame(
{
"payment_sequential": [payment_sequential],
"payment_installments": [payment_installments],
"payment_value": [payment_value],
"price": [price],
"freight_value": [freight_value],
"product_name_lenght": [product_name_length],
"product_description_lenght": [product_description_length],
"product_photos_qty": [product_photos_qty],
"product_weight_g": [product_weight_g],
"product_length_cm": [product_length_cm],
"product_height_cm": [product_height_cm],
"product_width_cm": [product_width_cm],
}
)
json_list = json.loads(json.dumps(list(df.T.to_dict().values())))
data = np.array(json_list)
pred = service.predict(data)
st.success(
"Your Customer Satisfactory rate(range between 0 - 5) with given product details is :-{}".format(
pred
)
)
if st.button("Results"):
st.write(
"We have experimented with two ensemble and tree based models and compared the performance of each model. The results are as follows:"
)
df = pd.DataFrame(
{
"Models": ["LightGBM", "Xgboost"],
"MSE": [1.804, 1.781],
"RMSE": [1.343, 1.335],
}
)
st.dataframe(df)
st.write(
"Following figure shows how important each feature is in the model that contributes to the target variable or contributes in predicting customer satisfaction rate."
)
image = Image.open("_assets/feature_importance_gain.png")
st.image(image, caption="Feature Importance Gain")
if __name__ == "__main__":
main()
|
import logging
import threading
from typing import Iterable, Optional
from cv2 import cv2
import werkzeug.serving
from flask import Flask, Blueprint
from flask.helpers import url_for
from flask.wrappers import Response
from layered_vision.utils.image import rgb_to_bgr, apply_alpha
from layered_vision.utils.image import ImageArray
from .api import T_OutputSink
LOGGER = logging.getLogger(__name__)
class ServerThread(threading.Thread):
def __init__(self, app: Flask, host: str, port: int, **kwargs):
threading.Thread.__init__(self)
self.server = werkzeug.serving.make_server(host, port, app, **kwargs)
self.app_context = app.app_context()
self.app_context.push()
def run(self):
LOGGER.info('starting server, host=%r, port=%d', self.server.host, self.server.port)
self.server.serve_forever()
def _shutdown(self):
LOGGER.info('stopping webserver')
self.server.shutdown()
LOGGER.info('stopped webserver')
def shutdown(self):
threading.Thread(target=self._shutdown).start()
class LastImageFrameWrapper:
def __init__(self):
self.stopped_event = threading.Event()
self.has_frame_event = threading.Event()
self.frame: Optional[ImageArray] = None
def stop(self):
LOGGER.info('setting stopped event')
self.stopped_event.set()
def push(self, frame: ImageArray):
self.frame = frame
self.has_frame_event.set()
def wait_for_next(self) -> ImageArray:
while True:
if self.stopped_event.is_set():
LOGGER.info('stopped event set, raising StopIteration')
raise StopIteration()
if not self.has_frame_event.wait(1.0):
continue
self.has_frame_event.clear()
frame = self.frame
assert frame is not None
return frame
def generate_image_frames(
last_image_frame_wrapper: LastImageFrameWrapper
) -> Iterable[bytes]:
while True:
try:
LOGGER.debug('waiting for frame...')
frame = last_image_frame_wrapper.wait_for_next()
except StopIteration:
LOGGER.info('received stop iteration')
return
LOGGER.debug('generating frame...')
bgr_frame = rgb_to_bgr(apply_alpha(frame.astype('float32')))
(flag, encoded_frame) = cv2.imencode(".jpg", bgr_frame)
if not flag:
LOGGER.warning('unable to encode frame')
continue
yield (
b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + bytearray(encoded_frame) + b'\r\n'
)
class ServerBlueprint(Blueprint):
def __init__(self, last_image_frame_wrapper: LastImageFrameWrapper):
super().__init__('server', __name__)
self.last_image_frame_wrapper = last_image_frame_wrapper
self.route('/', methods=['GET'])(self.home)
self.route('/stream', methods=['GET'])(self.stream)
def home(self):
return '<a href="%s">image stream</a>' % url_for('.stream')
def stream(self):
return Response(
generate_image_frames(self.last_image_frame_wrapper),
mimetype='multipart/x-mixed-replace; boundary=frame'
)
class WebPreviewSink:
def __init__(self, host: str, port: int):
self.host = host
self.port = port
self.last_image_frame_wrapper = LastImageFrameWrapper()
self.app = Flask(__name__)
self.app.register_blueprint(
ServerBlueprint(self.last_image_frame_wrapper),
url_prefix='/'
)
self.server_thread: Optional[ServerThread] = None
def __enter__(self):
self.server_thread = ServerThread(self.app, host=self.host, port=self.port)
self.server_thread.start()
return self
def __exit__(self, *_, **__):
self.last_image_frame_wrapper.stop()
self.server_thread.shutdown()
def __call__(self, image_array: ImageArray):
self.last_image_frame_wrapper.push(image_array)
def get_web_preview_output_sink(
*_, host='0.0.0.0', port: int = 8280, **__
) -> T_OutputSink:
return WebPreviewSink(host=host, port=port)
OUTPUT_SINK_FACTORY = get_web_preview_output_sink
|
import os
import pylast
import matplotlib.pyplot as plt
import logging
from slack_bolt import App
from slack_bolt.adapter.socket_mode import SocketModeHandler
from dotenv import load_dotenv
load_dotenv()
SLACK_BOT_TOKEN = os.environ['SLACK_BOT_TOKEN']
SLACK_APP_TOKEN = os.environ['SLACK_APP_TOKEN']
SLACK_SIGNING_SECRET = os.environ['SLACK_SIGNING_SECRET']
LF_API_KEY = os.environ['LF_API_KEY']
LF_API_SECRET = os.environ['LF_API_SECRET']
LF_USERNAME = os.environ['LF_USERNAME']
LF_PASSWORD_HASH = os.environ['LF_PASSWORD_HASH']
logging.basicConfig(level=logging.DEBUG)
# Initializes your app with your bot token and signing secret
app = App(
token=SLACK_BOT_TOKEN,
signing_secret=SLACK_SIGNING_SECRET
)
# Initializes connection to LastFM API
network = pylast.LastFMNetwork(
api_key=LF_API_KEY,
api_secret=LF_API_SECRET,
username=LF_USERNAME,
password_hash=<PASSWORD>
)
# The list(actually a tuple) of LastFM usernames that belong to members of your slack group
group = (
"lastfm_username1", "lastfm_username2", "lastfm_username3"
)
def get_album_scrobbles(artist: str, album_name: str) -> str:
counts = {}
if '&' in album_name:
album_name = album_name.replace('&', '\u0026')
for user in group:
album = pylast.Album(artist, album_name, network, username=user)
count = album.get_userplaycount()
counts[user] = count
counts = sorted(counts.items(), key=lambda x: x[1], reverse=True)
res = [f"Top {artist} - {album_name} fans in KaGang"]
for i in range(len(counts)):
if i == 0:
res.append(f"👑. {counts[i][0]}: {counts[i][1]} scrobbles")
elif i == 1:
res.append(f"🥈. {counts[i][0]}: {counts[i][1]} scrobbles")
elif i == 2:
res.append(f"🥉. {counts[i][0]}: {counts[i][1]} scrobbles")
else:
res.append(f"{i+1}. {counts[i][0]}: {counts[i][1]} scrobbles")
return "\n".join(res)
def get_track_scrobbles(artist: str, track_name: str) -> str:
counts = {}
if '&' in track_name:
track_name = track_name.replace('&', '\u0026')
for user in group:
track = pylast.Track(artist, track_name, network, username=user)
count = track.get_userplaycount()
counts[user] = count
counts = sorted(counts.items(), key=lambda x: x[1], reverse=True)
res = [f"Top {artist} - {track_name} fans in KaGang"]
for i in range(len(counts)):
if i == 0:
res.append(f"👑. {counts[i][0]}: {counts[i][1]} scrobbles")
elif i == 1:
res.append(f"🥈. {counts[i][0]}: {counts[i][1]} scrobbles")
elif i == 2:
res.append(f"🥉. {counts[i][0]}: {counts[i][1]} scrobbles")
else:
res.append(f"{i+1}. {counts[i][0]}: {counts[i][1]} scrobbles")
return "\n".join(res)
def get_now_playing() -> str:
user_nowplaying_map = {}
for username in group:
user = network.get_user(username)
curr_track = user.get_now_playing()
if not curr_track:
continue
user_nowplaying_map[username] = curr_track
res = [f"What everyone's listening to right now:\n"]
for k, v in user_nowplaying_map.items():
res.append(f"{k} is listening to: {v}")
return "\n".join(res)
def get_recent_tracks(username: str, default_limit=5) -> str:
res = [f"{username}'s last {default_limit} played songs:\n"]
user = network.get_user(username)
try:
recent_tracks = user.get_recent_tracks(limit=default_limit, now_playing=False)
except:
return ""
if not recent_tracks:
return res
for i, track in enumerate(recent_tracks):
trackname = track[0]
res.append(f"{i+1}. {trackname}")
res = "\n".join(res)
return res
# Artist name doesn't need perfect capitalization, the Last.FM API is smart
# enough to figure it out. Returns a list of tuples where
# tuple[0] is the lastfm user and tuple[1] is their corresponding number
# of scrobbles for the given artist.
def get_artist_playcounts(artist_name: str) -> str:
counts = {}
if '&' in artist_name:
artist_name = artist_name.replace('&', '\u0026')
for user in group:
artist = pylast.Artist(artist_name, network, username=user)
count = artist.get_userplaycount()
counts[user] = count
counts = sorted(counts.items(), key=lambda x: x[1], reverse=True)
res = [f"Top {artist} fans in KaGang"]
for i in range(len(counts)):
if i == 0:
res.append(f"👑. {counts[i][0]}: {counts[i][1]} scrobbles")
elif i == 1:
res.append(f"🥈. {counts[i][0]}: {counts[i][1]} scrobbles")
elif i == 2:
res.append(f"🥉. {counts[i][0]}: {counts[i][1]} scrobbles")
else:
res.append(f"{i+1}. {counts[i][0]}: {counts[i][1]} scrobbles")
return "\n".join(res)
# This function is not used at the moment
# TODO: Make barplot look nicer (maybe include different types of charts)
# and get slack bot to upload image of chart when requested
def create_barplot(playcounts: list[tuple[str, int]], artist: str):
users = [x[0] for x in playcounts]
counts = [x[1] for x in playcounts]
plt.figure(figsize=(14,9))
plt.bar(users, counts)
plt.title(artist)
plt.xlabel('Users')
plt.ylabel('Play Count')
plt.show()
def parse_message(message: str) -> str:
if not message:
return ""
if message.startswith("!rp"):
text = message[3:].strip()
text = text.split()
username = text[0]
if len(text) > 1:
limit = int(text[1])
if limit > 20:
return "Go fuck yourself"
return get_recent_tracks(username, default_limit=limit)
return get_recent_tracks(username)
if message.startswith("!np"):
return get_now_playing()
if " by " in message:
if message.startswith("!t"):
message = message.split("!t")[1].strip()
(track_name, artist_name) = message.split(" by ")
return get_track_scrobbles(artist_name, track_name)
else:
(album_name, artist_name) = message.split(" by ")
return get_album_scrobbles(artist_name, album_name)
else:
artist_name = message
return get_artist_playcounts(artist_name)
@app.event("app_mention")
def mention_handler(event, say):
msg = ""
try:
msg = event['text'].split('>')[1].strip()
except Exception as e:
print(e.with_traceback(None))
return
print(f"message = '{msg}'")
try:
s = parse_message(msg)
print(f"parsed message = {s}")
if s:
say(s)
else:
say("You wrote it wrong")
except pylast.WSError:
say("Who? :extremelaughingemoji:")
@app.command("/chuu")
def hello_command(ack, body):
user_id = body['user_id']
ack(f"Hi, <@{user_id}>!")
if __name__ == "__main__":
handler = SocketModeHandler(app, SLACK_APP_TOKEN)
handler.start()
|
from django.core.validators import MaxValueValidator, MinValueValidator
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from django.core.exceptions import ObjectDoesNotExist
MAX_DAYS_PER_PLAN = 6
MAX_PLACES_PER_DAY = 10
STATUS = (
(0, 'Private'),
(1, 'Public')
)
ROLES = (
(0, 'Viewer'),
(1, 'Editor')
)
class Plan(models.Model):
"""Class for plan organized."""
name = models.CharField(max_length=250, default=None, null=True)
days = models.IntegerField(default=1,
validators=[
MaxValueValidator(MAX_DAYS_PER_PLAN),
MinValueValidator(1)
])
author = models.ForeignKey(User, on_delete=models.CASCADE, null=False, blank=False)
status = models.IntegerField(choices=STATUS, default=0)
date_created = models.DateTimeField("Date Created", auto_now_add=True, null=False, blank=False)
last_modified = models.DateTimeField("Last Modified", auto_now=True, null=False, blank=False)
def __str__(self):
"""Display name for this plan."""
return self.name
def is_editable(self, user: User) -> bool:
"""Returns True if user can edit this plan."""
if user == self.author:
return True
try:
Editor.objects.get(plan=self, user=user, role=1)
return True
except ObjectDoesNotExist:
return False
def is_viewable(self, user: User) -> bool:
"""Returns True if someone with link can view this plan."""
if self.status:
return True
if user == self.author:
return True
try:
Editor.objects.get(plan=self, user=user)
return True
except ObjectDoesNotExist:
return False
class Editor(models.Model):
"""Class for storing editors for each plan."""
user = models.ForeignKey(User, on_delete=models.CASCADE, null=False, blank=False)
plan = models.ForeignKey(Plan, on_delete=models.CASCADE, null=False, blank=False)
role = models.IntegerField(choices=ROLES, default=0, null=False, blank=False)
def __str__(self):
return f"{self.plan} - {self.user}"
class Place(models.Model):
"""Place in a plan that will be displayed in table."""
day = models.IntegerField(default=None,
validators=[
MaxValueValidator(MAX_DAYS_PER_PLAN),
MinValueValidator(1)
])
sequence = models.IntegerField(default=None,
validators=[
MaxValueValidator(MAX_PLACES_PER_DAY),
MinValueValidator(1)
])
place_id = models.TextField(null=False, blank=False)
place_name = models.TextField()
place_vicinity = models.TextField()
arrival_time = models.TimeField("Arrival", default=None, null=True, blank=True)
departure_time = models.TimeField("Departure")
plan = models.ForeignKey(Plan, on_delete=models.CASCADE, default=None)
class Meta:
ordering = ('day', 'sequence',)
def __str__(self):
return self.place_name
def get_default_name(user: User) -> str:
"""Setting planner name to default name pattern.
if created user has first_name: will be `{user.first_name}'s Plan`
otherwise: will be `{user.username}'s Plan`
Args:
user: django user model object about author.
Returns:
default name as described above.
"""
if user.first_name == "":
return user.username + "'s Plan"
else:
return user.first_name + "'s Plan"
@receiver(pre_save, sender=Plan)
def initial_plan_validate(**kwargs):
"""Initialize planner name if not specified."""
# validate name shouldn't be blank
if kwargs['instance'].name is None or len(kwargs['instance'].name) == 0:
kwargs['instance'].name = get_default_name(kwargs['instance'].author)
# validate days shouldn't be over the limit
if int(kwargs['instance'].days) > MAX_DAYS_PER_PLAN:
kwargs['instance'].days = MAX_DAYS_PER_PLAN
elif int(kwargs['instance'].days) < 1:
kwargs['instance'].days = 1
|
<reponame>ssashita/ml_work
#!/usr/bin/env python
# coding: utf-8
# In[136]:
get_ipython().run_line_magic('run', 'preproc.ipynb')
get_ipython().system('jupyter kernelspec list')
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
import sys
print('Python3 executable is ',sys.executable)
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingClassifier
# ### First load the data from a csv
# In[137]:
data = pd.read_csv(filepath_or_buffer="githubadvisedata.csv",index_col="index",dtype={"buysell":np.str, "durationtype":np.str, "advisor":np.str,"otheradvices":np.str, "symbolname":np.str, "success":np.bool,"advisor":np.str, "niftysentiment":np.str})
# ### Next, create a dataframe from the contents. LabelEncode the symbolname and leadername fields since
# ### later scatter plot functions expect numerical values for x and y axes
# ### Also sort the dataframe first based on the ascending order of success fraction per leader, and secondly based
# ### on the same per symbolname
# ### Create two scatter plots where you plot the success (True or False) in two different colors (yellow for True and
# ### brown for False. In the first plot advisor is the x-axis and the position of the symbolname in the dataframe, sorted
# ### as mentioned above, is the y-axis. In the second plot, symbolname is the y-axis and the position of the advisor
# ### in the dataframe, sorted as mentioned above, is the x-axis
# ### As you will notice, the first scatter plot is brown at the bottom and progressively turns more yellow towards the top
# ### Similarly, the second plot is more brown to the left and progressively turns more yellow towards the right
# In[138]:
#plt.scatter(alpha=0.5)
tempdata = pd.DataFrame(data=data,copy=True)
symbol_encoder=LabelEncoder()
leader_encoder=LabelEncoder()
tempdata["symbolname"] = symbol_encoder.fit_transform(data["symbolname"])
tempdata["advisor"] = leader_encoder.fit_transform(data["advisor"])#pd.to_numeric(data["advisor"])
#tempdata.groupby("advisor").apply(lambda x: x[x=='True'].count())
#tempdata.groupby("advisor").agg({"success": (lambda x: x[x=='True'].count())})
def aggFunc(threshold):
def func(x):
c=x.count()
if (c>=threshold):
return x[x==True].count()
else:
return 0
return func
leader_successes = (tempdata.groupby("advisor").agg({"success": aggFunc(10)}) / tempdata.groupby("advisor").count()).sort_values(by="success",ascending=False)
leader_successes = leader_successes[["success"]]
symbolname_successes = (tempdata.groupby("symbolname").agg({"success": aggFunc(5)}) / tempdata.groupby("symbolname").count()).sort_values(by="success",ascending=False)
symbolname_successes = symbolname_successes[["success"]]
tempdata['advisor_successes']=pd.Series(data=[leader_successes.loc[ll]["success"] for ll in tempdata['advisor']],index=tempdata.index)
tempdata["symbolname_successes"] = pd.Series(data=[symbolname_successes.loc[ss]["success"] for ss in tempdata['symbolname']],index=tempdata.index)
tempdata["success"] = LabelEncoder().fit_transform(data["success"])
tempdata = tempdata[["symbolname","advisor","success","symbolname_successes","advisor_successes","durationtype"]].sort_values(by=["symbolname_successes","symbolname"],ascending=True)
tempdata.assign(sequence_no=range(len(tempdata))).plot.scatter(alpha=0.5, c='success',colormap='viridis',y="sequence_no",x="advisor",figsize=(5,5))
print(tempdata.assign(sequence_no=range(len(tempdata))))
othertempdata = tempdata[["symbolname","advisor","success","symbolname_successes","advisor_successes","durationtype"]].sort_values(by=["advisor_successes","advisor"],ascending=True)
othertempdata.assign(sequence_no=range(len(othertempdata))).plot.scatter(alpha=0.5, c='success',colormap='viridis',y="symbolname",x="sequence_no",figsize=(5,5))
print(othertempdata.assign(sequence_no=range(len(othertempdata))))
# ### The following symbol advises have had large SUCCESS
# In[139]:
tdata = tempdata[tempdata["symbolname_successes"]>0.8]
list(zip(symbol_encoder.classes_[tdata["symbolname"].values], tdata["symbolname_successes"].values, tdata["durationtype"].values))
# ### The following leader advises have had large SUCCESS
# In[140]:
tdata = othertempdata[othertempdata["advisor_successes"]>0.7]
list(zip(leader_encoder.classes_[tdata["advisor"].values], tdata["advisor_successes"].values, tdata["durationtype"].values))
# ### Try the VotingClassifier ensemble predictor
# In[141]:
from sklearn.ensemble import VotingClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
pipeline = FullPipeline()
tempdata = data.copy()
tempdata["success"] = pd.Series([str(x) for x in data["success"].values], index=tempdata.index)
non_num_attrs = ["advisor","durationtype","buysell","symbolname","niftysentiment"]
data_prepared = pipeline.full_pipeline_apply_features(tempdata,non_num_attrs)
label_prepared = pipeline.full_pipeline_apply_labels(tempdata,["success"]).ravel()
data_train, data_test,label_train,label_test = train_test_split(data_prepared,label_prepared,test_size=0.20)
#vote_classifier = VotingClassifier(estimators=[('rfclassifier', RandomForestClassifier(n_estimators=25, random_state=42,max_depth=60)), ('svc', SVC(C=1000,degree=4,gamma='scale',kernel='poly',coef0=0.1,decision_function_shape='ovo')), ('naive_bayes',MultinomialNB(alpha=1.12))],voting="hard")
vote_classifier = VotingClassifier(estimators=[('dtree1',DecisionTreeClassifier(random_state=312,max_depth=5,criterion="gini")),('dtree2',DecisionTreeClassifier(random_state=42,min_samples_split=350,criterion="gini")),('rfclassifier', RandomForestClassifier(n_estimators=1000, random_state=23,max_depth=10))], voting="soft")
vote_classifier.fit(data_train, label_train)
# In[142]:
vote_classifier.score(X=data_test, y=label_test)
# ### Now lets try Bagging Classifiers
# In[143]:
from sklearn.ensemble import BaggingClassifier
bg_classifier = BaggingClassifier(random_state=49,base_estimator=DecisionTreeClassifier(),n_estimators=200,bootstrap=True,max_features=720,max_samples=200)
bg_classifier.fit(data_train, label_train)
bg_classifier.score(data_test, label_test)
# ### Pasting classifier
# In[144]:
pst_classifier = BaggingClassifier(random_state=49,base_estimator=DecisionTreeClassifier(),n_estimators=299,bootstrap=False,max_features=720,max_samples=200)
pst_classifier.fit(data_train, label_train)
pst_classifier.score(data_test, label_test)
# ### Ada Boost Classifier
# In[145]:
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import mean_squared_error
ada_classifier = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=4),learning_rate=0.70,n_estimators=120,random_state=49,algorithm="SAMME.R")
ada_classifier.fit(data_train, label_train)
print(ada_classifier.score(data_test, label_test))
ada_classifier.set_params(n_estimators=120)
errors = [mean_squared_error(label_test, y_pred) for y_pred in ada_classifier.staged_predict(data_test)]
bst_n_estimators = np.argmin(errors)
print('bst_n_estimators',bst_n_estimators)
ada_best = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=4),learning_rate=0.70,n_estimators=bst_n_estimators,random_state=49,algorithm="SAMME.R")
ada_best.fit(data_train,label_train)
ada_best.score(data_test,label_test)
# ### Gradient Boost Classifier
# In[146]:
from sklearn.ensemble import GradientBoostingClassifier
grad_classifier = GradientBoostingClassifier(max_depth=4, learning_rate=0.70,n_estimators=120, random_state=42)
grad_classifier.fit(data_train,label_train)
errors = [mean_squared_error(label_test,y_pred) for y_pred in grad_classifier.staged_predict(data_test)]
bst_n_estimators = np.argmin(errors)
grad_best = GradientBoostingClassifier(max_depth=4, n_estimators=bst_n_estimators)
print('bst_n_estimators',bst_n_estimators)
grad_best.fit(data_train,label_train)
grad_best.score(data_test, label_test)
# ### A stacked generalization classifier. Use a RandomForest classifier at the end
# In[147]:
svc_classifier=SVC(C=1900,degree=4,gamma='scale',kernel='poly',coef0=0.1,decision_function_shape='ovo',probability=True)
svc_classifier.fit(data_train, label_train)
print(svc_classifier.score(data_test, label_test))
svc_probas = svc_classifier.predict_proba(data_train)[:,1]
rnd_classifier=RandomForestClassifier(bootstrap=True,max_depth=4,n_estimators=1000,n_jobs=-1,random_state=42,oob_score=True)
rnd_classifier.fit(data_train,label_train)
rndf_probas=rnd_classifier.predict_proba(data_train)[:,1]
# In[148]:
from sklearn.metrics import precision_recall_curve, precision_score, recall_score, roc_auc_score, roc_curve
svc_test_probas = svc_classifier.predict_proba(data_test)[:,1]
precisions,recalls, thresholds = precision_recall_curve(probas_pred=svc_test_probas, y_true=label_test)
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plot_precision_recall_vs_threshold(precisions, recalls,thresholds)
print(roc_auc_score(y_score=svc_test_probas,y_true=label_test))
fpr,tpr,thresholds= roc_curve(y_score=svc_test_probas, y_true=label_test)
plt.subplot(1,2,2)
plot_roc_curve(fpr=fpr,tpr=tpr)
rndf_test_probas = rnd_classifier.predict_proba(data_test)[:,1]
precisions,recalls, thresholds = precision_recall_curve(probas_pred=rndf_test_probas, y_true=label_test)
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plot_precision_recall_vs_threshold(precisions, recalls,thresholds)
print(roc_auc_score(y_score=rndf_test_probas,y_true=label_test))
fpr,tpr,thresholds= roc_curve(y_score=rndf_test_probas, y_true=label_test)
plt.subplot(1,2,2)
plot_roc_curve(fpr=fpr,tpr=tpr)
# In[149]:
data_train.shape
# In[150]:
svc_preds = svc_classifier.predict(data_train)
svc_test_preds = svc_classifier.predict(data_test)
rndf_preds = rnd_classifier.predict(data_train)
rndf_test_preds = rnd_classifier.predict(data_test)
data_train_stack = pd.DataFrame({"svc_preds": svc_preds, "rndf_preds":rndf_preds})
#data_train_stack,data_test_stack,label_train_stack,label_test_stack = train_test_split(data_stack,label_train,test_size=0.2)
data_test_stack = pd.DataFrame({"svc_preds":svc_test_preds, "rndf_preds":rndf_test_preds})
# In[ ]:
# In[151]:
rnd_classifier=RandomForestClassifier(bootstrap=True,max_depth=4,n_estimators=1000,n_jobs=-1,random_state=42,oob_score=True)
rnd_classifier.fit(data_train_stack,label_train)
print(rnd_classifier.score(data_test_stack, label_test))
print(rnd_classifier.oob_score_)
# ### Results with stacking not quite satisfactory. Let's try Voting. I am desperate to somehow get a better
# ### score than SVC (68.4)
# In[152]:
vot_classifier = VotingClassifier(estimators=[("svc",svc_classifier),("rndf",rnd_classifier)],voting="soft")
vot_classifier.fit(data_train,label_train)
vot_classifier.score(data_test, label_test)
# ### That was not too bad. Let us try a DecisionTreeClassifier for the stack classifier
# In[153]:
dt_classifier = DecisionTreeClassifier(max_depth=3,random_state=49)
dt_classifier.fit(data_train_stack,label_train)
stack_probas = dt_classifier.predict_proba(data_test_stack)[:,1]
precisions,recalls, thresholds = precision_recall_curve(probas_pred=stack_probas, y_true=label_test)
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plot_precision_recall_vs_threshold(precisions, recalls,thresholds)
print(roc_auc_score(y_score=stack_probas,y_true=label_test))
fpr,tpr,thresholds= roc_curve(y_score=stack_probas, y_true=label_test)
plt.subplot(1,2,2)
plot_roc_curve(fpr=fpr,tpr=tpr)
print("score",dt_classifier.score(data_test_stack, label_test))
# In[154]:
data_train.shape
# ### Let us avoid binarizing and see what happens when we use Ada boost - of course we can then use only Tree based classifiers
# In[155]:
label_test
# In[156]:
pipel=Pipeline(steps=[('select',DataFrameSelector(non_num_attrs)),('encode',MyMultiLabelEncoder())])
data_prepared_encoded=pipel.fit_transform(tempdata)
data_train, data_test,label_train,label_test = train_test_split(data_prepared_encoded,label_prepared,test_size=0.20)
# In[157]:
ada_classifier = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=4),learning_rate=0.7,n_estimators=120,random_state=49,algorithm="SAMME.R")
ada_classifier.fit(data_train,label_train)
errors =[mean_squared_error(label_test, y_pred) for y_pred in ada_classifier.staged_predict(data_test)]
bst_n_estimators = np.argmin(errors)
print('bst_n_estimators',bst_n_estimators)
ada_best = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=4),learning_rate=0.7,n_estimators=bst_n_estimators,random_state=49,algorithm="SAMME.R")
ada_best.fit(data_train,label_train)
print('score',ada_best.score(data_test, label_test))
sorted(zip(ada_best.feature_importances_,non_num_attrs),reverse=True)
# In[158]:
from sklearn.model_selection import GridSearchCV
dtree=DecisionTreeClassifier()
params_grid = [{"max_features":[1,2,3,4,5],"max_depth":[6,3,4,5],"random_state":[42]}]
grid_search=GridSearchCV(cv=5, estimator=dtree,param_grid=params_grid)
grid_search.fit(data_train, label_train)
grid_search.best_params_
#grid_search.best_score_
DecisionTreeClassifier(max_depth=5,max_features=5,random_state=42).fit(data_train, label_train).score(data_test,label_test)
# In[159]:
ada_classifier = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=5,max_features=5,random_state=42),learning_rate=0.14,n_estimators=90,random_state=42,algorithm="SAMME.R")
ada_classifier.fit(data_train, label_train)
ada_classifier.score(data_test, label_test)
# In[160]:
from sklearn.naive_bayes import GaussianNB
mnb_classifier = GaussianNB(var_smoothing=8e-07)
mnb_classifier.fit(data_train.toarray(), label_train)
mnb_classifier.score(data_test.toarray(), label_test)
#mnb_classifier.get_params()
# ### Lets try our luck with ExtraTreesClassifier
# In[161]:
from sklearn.ensemble import ExtraTreesClassifier
# In[162]:
ex_classifier = ExtraTreesClassifier(bootstrap=True,criterion='entropy',max_depth=4,random_state=49,n_estimators=1000,n_jobs=-1)
ex_classifier.fit(data_train,label_train)
ex_classifier.score(data_test,label_test)
# ### Dimension Reduction. Let us start with PCA
# ### Restore data_train and data_test to original (binarized) version
# In[163]:
data_train, data_test,label_train,label_test = train_test_split(data_prepared,label_prepared,test_size=0.20)
data_train.shape, data_test.shape
# In[108]:
from sklearn.decomposition import PCA
pca = PCA(n_components=0.95)
X_reduced = pca.fit_transform(data_train.toarray())
n_components = pca.n_components_
print(n_components,data_train.shape[1])
X_test_reduced = pca.transform(data_test.toarray())
svc_classifier=SVC(C=1900,degree=5,gamma='scale',kernel='poly',coef0=0.1,decision_function_shape='ovo')
svc_classifier.fit(X_reduced, label_train)
print(svc_classifier.score(X_test_reduced, label_test))
# ## Use PCA to reduce dimensions then use RandForest with bagging, and max_features and max_depth and oob_score
# ## to get a good estimate of error
# In[109]:
pca = PCA(n_components=0.95)
X_reduced = pca.fit_transform(data_train.toarray())
n_components = pca.n_components_
print(n_components)
X_test_reduced = pca.transform(data_test.toarray())
# In[110]:
F=int(np.log2(n_components)+1)
rndf_classifier=RandomForestClassifier(bootstrap=True,n_estimators=1000,max_depth=10,max_features=F,n_jobs=-1,random_state=42,oob_score=True)
rndf_classifier.fit(X_reduced, label_train)
print(rndf_classifier.score(X_test_reduced, label_test))
rndf_classifier.oob_score_
# In[111]:
rndf_classifier.oob_decision_function_
# In[112]:
len(label_train)
# ### Now kernel PCA
# In[113]:
from sklearn.decomposition import KernelPCA
pca = KernelPCA(gamma=0.04, kernel="rbf", n_components=200)
X_reduced=pca.fit_transform(data_train)
X_test_reduced = pca.transform(data_test.toarray())
rndf_classifier=RandomForestClassifier(bootstrap=True,max_depth=4,n_estimators=1000,n_jobs=-1,random_state=42,oob_score=True)
rndf_classifier.fit(X_reduced, label_train)
print(rndf_classifier.score(X_test_reduced, label_test))
# In[114]:
from sklearn.model_selection import GridSearchCV
pipeline = Pipeline(steps=[("kernel_pca",KernelPCA()),("dtree",DecisionTreeClassifier(max_depth=3, random_state=49))])
params_grid = [{"kernel_pca__n_components":[170,276,400],"kernel_pca__gamma":[0.03, 0.05,1.0],"kernel_pca__kernel":["rbf","sigmoid"]}]
grid_search = GridSearchCV(estimator=pipeline,cv=3,param_grid=params_grid)
grid_search.fit(data_train,label_train)
grid_search.best_params_
# In[116]:
n_components=grid_search.best_params_["kernel_pca__n_components"]
n_components
# In[119]:
from sklearn.model_selection import cross_val_score
pipeline = Pipeline(steps=[("kernel_pca",KernelPCA(gamma=1.0,kernel="sigmoid",n_components=n_components)),("dtree",DecisionTreeClassifier(max_depth=3, random_state=49))])
cross_val_score(estimator=pipeline,X=data_train,y=label_train,cv=3,n_jobs=-1)
# In[121]:
transformer = KernelPCA(gamma=1.0,kernel="sigmoid",n_components=n_components)
X_reduced = transformer.fit_transform(data_train)
X_test_reduced = transformer.transform(data_test)
# In[122]:
F= int(np.log2(n_components)+1)
rndf_classifier=RandomForestClassifier(bootstrap=True,max_depth=4,max_features=F,n_estimators=1000,n_jobs=-1,random_state=42,oob_score=True)
rndf_classifier.fit(X_reduced, label_train)
print('score on test data',rndf_classifier.score(X_test_reduced, label_test))
print('oob_score',rndf_classifier.oob_score_)
# In[123]:
probas = rndf_classifier.predict_proba(X=X_test_reduced)[:,1]
print(len(probas),label_test.shape)
precisions,recalls,thresholds = precision_recall_curve(probas_pred=probas,y_true=label_test)
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
# ## Use the Lasso classifier (you need to use LogisticRegression with the right parameters) and detect attributes
# ## that can be removed because the coefficients (theta) obtained are zero
# In[124]:
from sklearn.linear_model import LogisticRegressionCV
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
lasso_classifier=LogisticRegressionCV(Cs=[1.0],max_iter=100,cv=5,random_state=42,n_jobs=-1,penalty='l1',solver='saga')
lasso_classifier.fit(X_reduced, label_train)
print('score on test data',lasso_classifier.score(X_test_reduced, label_test))
# In[125]:
n_components = np.count_nonzero(lasso_classifier.coef_[0])
print(n_components)
X_reduced_after_lasso = X_reduced[:,lasso_classifier.coef_[0]!=0.0]
X_test_reduced_after_lasso = X_test_reduced[:,lasso_classifier.coef_[0]!=0.0]
# In[126]:
lasso_classifier=LogisticRegressionCV(Cs=[1.0],max_iter=100,cv=5,random_state=42,n_jobs=-1,penalty='l1',solver='saga')
lasso_classifier.fit(X_reduced_after_lasso, label_train)
print('score on test data',lasso_classifier.score(X_test_reduced_after_lasso, label_test))
# In[127]:
lasso_sgd_classifier = SGDClassifier(alpha=0.1,loss='log',penalty='l2', n_jobs=-1,random_state=42, max_iter=1000)
lasso_sgd_classifier.fit(X_reduced, label_train)
print('score on test data',lasso_sgd_classifier.score(X_test_reduced, label_test))
# ### Use the reduced attributes for Random Forest again
# In[128]:
F= int(np.log2(127)+1)
rndf_classifier=RandomForestClassifier(bootstrap=True,max_depth=9,max_features=F,n_estimators=1000,n_jobs=-1,random_state=42,oob_score=True)
rndf_classifier.fit(X_reduced_after_lasso, label_train)
print('score on test data',rndf_classifier.score(X_test_reduced_after_lasso, label_test))
print('oob_score',rndf_classifier.oob_score_)
max_depth Comment
score on test data 0.6661849710982659 3
oob_score 0.6448699421965318
score on test data 0.6685934489402697 4
oob_score 0.6467967244701349
score on test data 0.6724470134874759 5 peak for testdata
oob_score 0.653179190751445
score on test data 0.6695568400770713 6
oob_score 0.6541425818882466
score on test data 0.6652215799614644 7
oob_score 0.6587186897880539
score on test data 0.6594412331406551 8 peak for oob_score
oob_score 0.6588391136801541
score on test data 0.6560693641618497 9
oob_score 0.6585982658959537
# ### Let us see how SVC does on this reduced data
# In[129]:
svc_classifier=SVC(C=1900,degree=4,gamma='scale',kernel='poly',coef0=0.1,decision_function_shape='ovo',probability=True)
svc_classifier.fit(X_reduced_after_lasso, label_train)
print(svc_classifier.score(X_test_reduced_after_lasso, label_test))
# ### And what about Ada boost on the reduced data
# In[134]:
ada_classifier = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=1),learning_rate=0.3,n_estimators=120,random_state=49,algorithm="SAMME.R")
ada_classifier.fit(X_reduced_after_lasso, label_train)
print(ada_classifier.score(X_test_reduced_after_lasso, label_test))
ada_classifier.set_params(n_estimators=120)
errors = [mean_squared_error(label_test, y_pred) for y_pred in ada_classifier.staged_predict(X_test_reduced_after_lasso)]
bst_n_estimators = np.argmin(errors)
print('bst_n_estimators',bst_n_estimators)
# In[135]:
ada_best = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=1),learning_rate=0.3045,n_estimators=bst_n_estimators,random_state=49,algorithm="SAMME.R")
ada_best.fit(X_reduced_after_lasso,label_train)
ada_best.score(X_test_reduced_after_lasso,label_test)
learning_rate
0.6623314065510597 0.303
0.6647398843930635 0.304
0.6652215799614644 0.3045 highest score
0.6642581888246628 0.305
# In[132]:
### Locally Linear Embedding
# In[133]:
from sklearn.manifold import LocallyLinearEmbedding
pca = LocallyLinearEmbedding(n_components=277, n_neighbors=10)
X_reduced=pca.fit_transform(data_train.toarray())
X_test_reduced = pca.transform(data_test.toarray())
rndf_classifier=RandomForestClassifier(bootstrap=True,max_depth=4,n_estimators=1000,n_jobs=-1,random_state=42,oob_score=True)
rndf_classifier.fit(X_reduced, label_train)
print(rndf_classifier.score(X_test_reduced, label_test))
# In[522]:
###
# In[ ]:
|
"""
Cubes
=====
Tools to deal with spectroscopic data cubes.
Some features in Cubes require additional packages:
* smoothing - requires agpy_\'s smooth and parallel_map routines
* `pyregion <git://github.com/astropy/pyregion.git>`_
The 'grunt work' is performed by the :py:mod:`cubes` module
"""
from __future__ import print_function
import time
import sys
import traceback
import numpy as np
import types
import copy
import itertools
from ..specwarnings import warn,PyspeckitWarning
import astropy
from astropy.io import fits
from astropy import log
from astropy import wcs
from astropy import units
from astropy.utils.console import ProgressBar
from six import iteritems, string_types
from functools import wraps
# import parent package
from .. import spectrum
from ..spectrum import smooth
from ..spectrum.units import (generate_xarr, SpectroscopicAxis,
SpectroscopicAxes)
from ..parallel_map import parallel_map
from ..spectrum import history
# import local things
from . import mapplot
from . import cubes
def not_for_cubes(func):
@wraps(func)
def wrapper(*args):
warn("This operation ({0}) operates on the spectrum selected "
"from the cube, e.g. with `set_spectrum` or `set_apspec`"
", it does not operate on the whole cube.", PyspeckitWarning)
return func(*args)
return wrapper
class Cube(spectrum.Spectrum):
def __init__(self, filename=None, cube=None, xarr=None, xunit=None,
errorcube=None, header=None, x0=0, y0=0,
maskmap=None,
**kwargs):
"""
A pyspeckit Cube object. Can be created from a FITS file on disk or
from an array or a `spectral_cube.SpectralCube` object. If an array
is used to insantiate the cube, the `xarr` keyword must be given,
specifying the X-axis units
Parameters
----------
filename : str, optional
The name of a FITS file to open and read from. Must be 3D
cube : `np.ndarray`, `spectral_cube.SpectralCube`, or \
`astropy.units.Quantity`
The data from which to instantiate a Cube object. If it is
an array or an astropy Quantity (which is an array with attached
units), the X-axis must be specified. If this is given as a
SpectralCube object, the X-axis and units should be handled
automatically.
xarr : `np.ndarray` or `astropy.units.Quantity`, optional
The X-axis of the spectra from each cube. This actually
corresponds to axis 0, or what we normally refer to as the Z-axis
of the cube, but it indicates the X-axis in a plot of intensity vs
wavelength. The units for this array are specified in the `xunit`
keyword unless a `~astropy.units.Quantity` is given.
xunit : str, optional
The unit of the ``xarr`` array if ``xarr`` is given as a numpy
array
errorcube : `np.ndarray`, `spectral_cube.SpectralCube`,\
or `~astropy.units.Quantity`, optional
A cube with the same shape as the input cube providing the 1-sigma
error for each voxel. This can be specified more efficiently as an
error map for most use cases, but that approach has not yet been
implemented. However, you can pass a 2D error map to `fiteach`.
header : `fits.Header` or dict, optional
The header associated with the data. Only needed if the cube is
given as an array or a quantity.
x0, y0 : int
The initial spectrum to use. The `Cube` object can be treated as
a `pyspeckit.Spectrum` object, with all the associated tools
(plotter, fitter) using the `set_spectrum` method to select a pixel
from the cube to plot and fit. However, it is generally more sensible
to extract individual spectra and treat them separately using the
`get_spectrum` method, so these keywords MAY BE DEPRECATED in the
future.
maskmap : `np.ndarray`, optional
A boolean mask map, where ``True`` implies that the data are good.
This will be used for both plotting using `mapplot` and fitting
using `fiteach`.
"""
if filename is not None:
self.load_fits(filename)
return
else:
if hasattr(cube, 'spectral_axis'):
# Load from a SpectralCube instance
self.cube = cube.hdu.data
if (cube.unit in ('undefined', units.dimensionless_unscaled)
and 'BUNIT' in cube._meta):
self.unit = cube._meta['BUNIT']
else:
self.unit = cube.unit
log.debug("Self.unit: {0}".format(self.unit))
if xarr is None:
if cube.spectral_axis.flags['OWNDATA']:
xarr = SpectroscopicAxis(cube.spectral_axis,
unit=cube.spectral_axis.unit,
refX=cube.wcs.wcs.restfrq,
refX_unit='Hz')
else:
xarr = SpectroscopicAxis(cube.spectral_axis.copy(),
unit=cube.spectral_axis.unit,
refX=cube.wcs.wcs.restfrq,
refX_unit='Hz')
if header is None:
header = cube.header
elif hasattr(cube, 'unit'):
self.cube = cube.value
self.unit = cube.unit
else:
self.cube = cube
if hasattr(errorcube, 'spectral_axis'):
# Load from a SpectralCube instance
self.errorcube = errorcube.hdu.data
elif hasattr(errorcube, 'unit'):
self.errorcube = errorcube.value
else:
self.errorcube = errorcube
if hasattr(xarr, 'flags'):
log.debug("XARR flags: {0}".format(xarr.flags))
self.xarr = generate_xarr(xarr, unit=xunit)
if hasattr(xarr, 'flags'):
log.debug("self.xarr flags: {0}".format(xarr.flags))
self.header = header
self.error = None
if self.cube is not None:
self.data = self.cube[:,int(y0),int(x0)]
if not hasattr(self, '_unit'):
self.unit = units.dimensionless_unscaled
log.debug("Self.unit before header: {0}".format(self.unit))
if self.header is not None:
self.parse_header(self.header)
else:
log.debug("self.header is None: {0}".format(self.header))
self.unit = 'undefined'
self.header = fits.Header()
log.debug("Self.unit after header: {0}".format(self.unit))
if maskmap is not None:
if maskmap.ndim != 2:
raise ValueError("Mask map must be two-dimensional.")
self.maskmap = maskmap
else:
self.maskmap = np.ones(self.cube.shape[1:],dtype='bool')
if isinstance(filename,str):
self.fileprefix = filename.rsplit('.', 1)[0] # Everything prior to .fits or .txt
else:
self.fileprefix = "pyfitsHDU"
self.plotter = spectrum.plotters.Plotter(self)
self._register_fitters()
self.specfit = spectrum.fitters.Specfit(self,Registry=self.Registry)
self.baseline = spectrum.baseline.Baseline(self)
self.speclines = spectrum.speclines
# Initialize writers
self.writer = {}
for writer in spectrum.writers.writers:
self.writer[writer] = spectrum.writers.writers[writer](self)
# Special. This needs to be modified to be more flexible; for now I need it to work for nh3
self.plot_special = None
self.plot_special_kwargs = {}
self._modelcube = None
if self.header:
self.wcs = wcs.WCS(self.header)
self.wcs.wcs.fix()
self._spectral_axis_number = self.wcs.wcs.spec+1
self._first_cel_axis_num = np.where(self.wcs.wcs.axis_types // 1000 == 2)[0][0]+1
# TODO: Improve this!!!
self.system = ('galactic'
if ('CTYPE{0}'.format(self._first_cel_axis_num)
in self.header and 'GLON' in
self.header['CTYPE{0}'.format(self._first_cel_axis_num)])
else 'celestial')
else:
self._spectral_axis_number = 2
self._first_cel_axis_num = 0
self.system = 'PIXEL'
self.mapplot = mapplot.MapPlotter(self)
def load_fits(self, fitsfile):
try:
from spectral_cube import SpectralCube
except ImportError:
raise ImportError("Could not import spectral_cube. As of pyspeckit"
" 0.17, spectral_cube is required for cube reading. "
"It can be pip installed or acquired from "
"spectral-cube.rtfd.org.")
mycube = SpectralCube.read(fitsfile)
return self.load_spectral_cube(mycube)
def load_spectral_cube(self, cube):
"""
Load the cube from a spectral_cube.SpectralCube object
"""
self.__init__(cube=cube)
def __repr__(self):
return (r'<Cube object over spectral range %6.5g :'
' %6.5g %s and flux range = [%2.1f, %2.1f]'
' %s with shape %r at %s>' %
(self.xarr.min().value, self.xarr.max().value, self.xarr.unit,
self.data.min(), self.data.max(), self.unit, self.cube.shape,
str(hex(self.__hash__()))))
def copy(self,deep=True):
"""
Create a copy of the spectral cube with its own plotter, fitter, etc.
Useful for, e.g., comparing smoothed to unsmoothed data
"""
newcube = copy.copy(self)
newcube.header = copy.copy(self.header)
deep_attr_lst = ['xarr', 'data', 'cube', 'maskmap',
'error', 'errorcube']
if deep:
for attr in deep_attr_lst:
setattr(newcube, attr, copy.copy(getattr(self, attr)))
if hasattr(self, 'wcs'):
newcube.wcs = self.wcs.deepcopy()
newcube.header = self.header.copy()
newcube.plotter = self.plotter.copy(parent=newcube)
newcube._register_fitters()
newcube.specfit = self.specfit.copy(parent=newcube)
newcube.specfit.Spectrum.plotter = newcube.plotter
newcube.baseline = self.baseline.copy(parent=newcube)
newcube.baseline.Spectrum.plotter = newcube.plotter
newcube.mapplot = self.mapplot.copy(parent=newcube)
newcube.mapplot.Cube = newcube
return newcube
def _update_header_from_xarr(self):
"""Uses SpectroscopiAxis' _make_header method to update Cube header"""
self.header['NAXIS3'] = self.xarr.size
self.xarr._make_header()
sp_naxis = self._spectral_axis_number
# change keywords in xarr._make_header from, e.g., CRPIX1 to CRPIX3
newhead = {(key.replace('1', str(sp_naxis))
if key.endswith('1') else key): val
for key, val in iteritems(self.xarr.wcshead)}
for key, val in iteritems(newhead):
if isinstance(val, units.Quantity):
newhead[key] = val.value
elif (isinstance(val, units.CompositeUnit)
or isinstance(val, units.Unit)):
newhead[key] = val.to_string()
log.debug("Updating header: {}: {}".format(key, val))
self.header.update(newhead)
def slice(self, start=None, stop=None, unit='pixel', preserve_fits=False,
copy=True, update_header=False):
"""
Slice a cube along the spectral axis
(equivalent to "spectral_slab" from the spectral_cube package)
Parameters
----------
start : numpy.float or int
start of slice
stop : numpy.float or int
stop of slice
unit : str
allowed values are any supported physical unit, 'pixel'
update_header : bool
modifies the header of the spectral cube according to the slice
"""
x_in_units = self.xarr.as_unit(unit)
start_ind = x_in_units.x_to_pix(start)
stop_ind = x_in_units.x_to_pix(stop)
if start_ind > stop_ind:
start_ind, stop_ind = stop_ind, start_ind
spectrum_slice = slice(start_ind,stop_ind)
if not copy:
raise NotImplementedError("Must copy when slicing a cube.")
newcube = self.copy()
newcube.cube = newcube.cube[spectrum_slice,:,:]
if hasattr(newcube,'errcube'):
newcube.errcube = newcube.errcube[spectrum_slice,:,:]
newcube.data = newcube.data[spectrum_slice]
if newcube.error is not None:
newcube.error = newcube.error[spectrum_slice]
newcube.xarr = newcube.xarr[spectrum_slice]
# create new specfit / baseline instances (otherwise they'll be the wrong length)
newcube._register_fitters()
newcube.baseline = spectrum.baseline.Baseline(newcube)
newcube.specfit = spectrum.fitters.Specfit(newcube,Registry=newcube.Registry)
if preserve_fits:
newcube.specfit.modelpars = self.specfit.modelpars
newcube.specfit.parinfo = self.specfit.parinfo
newcube.baseline.baselinepars = self.baseline.baselinepars
newcube.baseline.order = self.baseline.order
# modify the header in the new cube
if update_header:
newcube._update_header_from_xarr()
# create a new wcs instance from the updated header
newcube.wcs = wcs.WCS(newcube.header)
newcube.wcs.wcs.fix()
newcube._spectral_axis_number = newcube.wcs.wcs.spec + 1
newcube._first_cel_axis_num = np.where(newcube.wcs.wcs.axis_types
// 1000 == 2)[0][0] + 1
return newcube
def __getitem__(self, indx):
"""
If [] is used on a cube, slice on the cube and use
the first dimension to slice on the xarr and the data
"""
return Cube(xarr=self.xarr.__getitem__(indx[0]),
cube=self.cube[indx],
errorcube=self.errorcube[indx] if self.errorcube else None,
maskmap=self.maskmap[indx[1:]] if self.maskmap is not None else None,
header=self.header
)
def set_spectrum(self, x, y):
self.data = self.cube[:,int(y),int(x)]
if self.errorcube is not None:
self.error = self.errorcube[:,int(y),int(x)]
def plot_spectrum(self, x, y, plot_fit=False, **kwargs):
"""
Fill the .data array with a real spectrum and plot it
"""
self.set_spectrum(x,y)
if self.plot_special is None:
self.plotter(**kwargs)
if plot_fit:
self.plot_fit(x,y)
self.plotted_spectrum = self
else:
sp = self.get_spectrum(x,y)
sp.plot_special = types.MethodType(self.plot_special, sp)
combined_kwargs = dict(kwargs.items())
combined_kwargs.update(self.plot_special_kwargs)
self._spdict = sp.plot_special(**combined_kwargs)
self.plotted_spectrum = sp
self.plotter = sp.plotter
self.plotter.refresh = lambda: [spi.plotter.refresh()
for spi in self._spdict.values()]
self.specfit.modelplot = [comp
for spi in self._spdict.values()
for comp in spi.specfit.modelplot]
self.specfit._plotted_components = [comp
for spi in self._spdict.values()
for comp in spi.specfit._plotted_components]
def plot_fit(self, x, y, silent=False, **kwargs):
"""
If fiteach has been run, plot the best fit at the specified location
Parameters
----------
x : int
y : int
The x, y coordinates of the pixel (indices 2 and 1 respectively in
numpy notation)
"""
if not hasattr(self,'parcube'):
if not silent:
log.info("Must run fiteach before plotting a fit. "
"If you want to fit a single spectrum, "
"use plot_spectrum() and specfit() directly.")
return
if self.plot_special is not None:
# don't try to overplot a fit on a "special" plot
# this is already handled in plot_spectrum
return
if not self.has_fit[int(y), int(x)]:
# no fit to plot
return
self.specfit.modelpars = self.parcube[:,int(y),int(x)]
if np.any(np.isnan(self.specfit.modelpars)):
log.exception("Attempted to plot a model with NaN parameters.")
return
self.specfit.npeaks = self.specfit.fitter.npeaks
self.specfit.model = self.specfit.fitter.n_modelfunc(self.specfit.modelpars,
**self.specfit.fitter.modelfunc_kwargs)(self.xarr)
# set the parinfo values correctly for annotations
self.specfit.parinfo.values = self.parcube[:,int(y),int(x)]
self.specfit.parinfo.errors = self.errcube[:,int(y),int(x)]
self.specfit.fitter.parinfo.values = self.parcube[:,int(y),int(x)]
self.specfit.fitter.parinfo.errors = self.errcube[:,int(y),int(x)]
#for pi,p,e in zip(self.specfit.parinfo,
# self.specfit.modelpars,
# self.errcube[:,int(y),int(x)]):
# try:
# pi['value'] = p
# pi['error'] = e
# except ValueError:
# # likely to happen for failed fits
# pass
self.specfit.plot_fit(**kwargs)
def plot_apspec(self, aperture, coordsys=None, reset_ylimits=True,
wunit='arcsec',
method='mean', **kwargs):
"""
Extract an aperture using cubes.extract_aperture
(defaults to Cube coordinates)
Parameters
----------
aperture : list
A list of aperture parameters, e.g.
* For a circular aperture, len(ap)=3:
+ ``ap = [xcen,ycen,radius]``
* For an elliptical aperture, len(ap)=5:
+ ``ap = [xcen,ycen,height,width,PA]``
coordsys : None or str
The coordinate system of the aperture (e.g., galactic, fk5, None
for pixel)
method : 'mean' or 'sum'
Either average over parellel spectra or sum them.
"""
if self.plot_special is None:
self.set_apspec(aperture, coordsys=coordsys, method=method)
self.plotter(reset_ylimits=reset_ylimits, **kwargs)
else:
#self.plot_special(reset_ylimits=reset_ylimits, **dict(kwargs.items()+self.plot_special_kwargs.items()))
sp = self.get_apspec(aperture, coordsys=coordsys, wunit=wunit, method=method)
sp.plot_special = types.MethodType(self.plot_special, sp)
combined_kwargs = dict(kwargs.items())
combined_kwargs.update(self.plot_special_kwargs)
sp.plot_special(reset_ylimits=reset_ylimits, **combined_kwargs)
def get_spectrum(self, x, y):
"""
Very simple: get the spectrum at coordinates x,y
(inherits fitter from self)
Returns a SpectroscopicAxis instance
"""
ct = 'CTYPE{0}'.format(self._first_cel_axis_num)
header = cubes.speccen_header(fits.Header(cards=[(k,v) for k,v in
iteritems(self.header)
if k != 'HISTORY']),
lon=x, lat=y, system=self.system,
proj=(self.header[ct][-3:]
if ct in self.header else
'CAR'))
sp = spectrum.Spectrum(xarr=self.xarr.copy(), data=self.cube[:,int(y),int(x)],
header=header, error=(self.errorcube[:,int(y),int(x)] if
self.errorcube is not None
else None),
unit=self.unit,
model_registry=self.Registry,
)
sp.specfit = self.specfit.copy(parent=sp, registry=sp.Registry)
# explicitly re-do this (test)
sp.specfit.includemask = self.specfit.includemask.copy()
sp.specfit.Spectrum = sp
if hasattr(self, 'parcube'):
if self.has_fit[int(y),int(x)]:
# only set parameters if they're valid
sp.specfit.modelpars = self.parcube[:,int(y),int(x)]
if hasattr(self.specfit,'parinfo') and self.specfit.parinfo is not None:
# set the parinfo values correctly for annotations
for pi,p,e in zip(sp.specfit.parinfo, sp.specfit.modelpars, self.errcube[:,int(y),int(x)]):
try:
pi['value'] = p
pi['error'] = e
except ValueError:
pass
if hasattr(self.specfit,'fitter') and self.specfit.fitter is not None:
sp.specfit.fitter.mpp = sp.specfit.modelpars # also for annotations (differs depending on which function... sigh... need to unify)
sp.specfit.npeaks = self.specfit.fitter.npeaks
sp.specfit.fitter.npeaks = len(sp.specfit.modelpars) / sp.specfit.fitter.npars
sp.specfit.fitter.parinfo = sp.specfit.parinfo
try:
sp.specfit.model = sp.specfit.fitter.n_modelfunc(sp.specfit.modelpars,
**sp.specfit.fitter.modelfunc_kwargs)(sp.xarr)
except ValueError:
# possibly invalid model parameters, just skip
sp.specfit.model = np.zeros_like(sp.data)
return sp
def get_apspec(self, aperture, coordsys=None, method='mean', **kwargs):
"""
Extract an aperture using cubes.extract_aperture
(defaults to Cube pixel coordinates)
*aperture* [tuple or list] (x, y, radius)
The aperture to use when extracting the data
*coordsys* [ 'celestial' | 'galactic' | None]
the coordinate system the aperture is specified in
None indicates pixel coordinates (default)
*wunit* [str]
arcsec, arcmin, or degree
"""
if coordsys is not None:
wcs = self.mapplot.wcs
else:
wcs = None
data = cubes.extract_aperture(self.cube, aperture,
coordsys=coordsys,
wcs=wcs,
method=method,
**kwargs)
if self.errorcube is not None:
error = cubes.extract_aperture(self.errorcube, aperture,
coordsys=coordsys,
wcs=self.mapplot.wcs,
method='error', **kwargs)
else:
error = None
ct = 'CTYPE{0}'.format(self._first_cel_axis_num)
header = cubes.speccen_header(fits.Header(cards=[(k,v) for k,v in
iteritems(self.header)
if k != 'HISTORY']),
lon=aperture[0],
lat=aperture[1],
system=self.system,
proj=self.header[ct][-3:])
if len(aperture) == 3:
header['APRADIUS'] = aperture[2]
if len(aperture) == 5:
header['APMAJ'] = aperture[2]
header['APMIN'] = aperture[3]
header['APREFF'] = (aperture[2]*aperture[3])**0.5
header['APPA'] = aperture[4]
sp = spectrum.Spectrum(xarr=self.xarr.copy(),
data=data,
error=error,
header=header,
model_registry=self.Registry,
)
sp.specfit = self.specfit.copy(parent=sp, registry=sp.Registry)
return sp
def set_apspec(self, aperture, coordsys=None, method='mean'):
"""
Extract an aperture using cubes.extract_aperture
(defaults to Cube coordinates)
"""
if coordsys is not None:
self.data = cubes.extract_aperture(self.cube, aperture,
coordsys=coordsys,
wcs=self.mapplot.wcs,
method=method)
else:
self.data = cubes.extract_aperture(self.cube, aperture,
coordsys=None, method=method)
def get_modelcube(self, update=False, multicore=1):
"""
Return or generate a "model cube", which will have the same shape as
the ``.cube`` but will have spectra generated from the fitted model.
If the model cube does not yet exist, one will be generated
Parameters
----------
update : bool
If the cube has already been computed, set this to ``True`` to
recompute the model.
multicore: int
if >1, try to use multiprocessing via parallel_map to run on multiple cores
"""
if self._modelcube is None or update:
yy,xx = np.indices(self.parcube.shape[1:])
nanvals = np.any(~np.isfinite(self.parcube),axis=0)
isvalid = np.any(self.parcube, axis=0) & ~nanvals
valid_pixels = zip(xx[isvalid], yy[isvalid])
self._modelcube = np.full_like(self.cube, np.nan)
def model_a_pixel(xy):
x,y = int(xy[0]), int(xy[1])
self._modelcube[:,y,x] = self.specfit.get_full_model(pars=self.parcube[:,y,x])
return ((x,y), self._modelcube[:,y,x])
if multicore > 1:
sequence = [(x,y) for x,y in valid_pixels]
result = parallel_map(model_a_pixel, sequence, numcores=multicore)
merged_result = [core_result for core_result in result
if core_result is not None]
for mr in merged_result:
((x,y), model) = mr
x = int(x)
y = int(y)
self._modelcube[:,y,x] = model
else:
# progressbar doesn't work with zip; I'm therefore giving up on
# "efficiency" in memory by making a list here.
for xy in ProgressBar(list(valid_pixels)):
model_a_pixel(xy)
return self._modelcube
def fiteach(self, errspec=None, errmap=None, guesses=(), verbose=True,
verbose_level=1, quiet=True, signal_cut=3, usemomentcube=None,
blank_value=0, integral=False, direct_integral=False,
absorption=False, use_nearest_as_guess=False,
use_neighbor_as_guess=False, start_from_point=(0,0),
multicore=1, position_order=None, continuum_map=None,
prevalidate_guesses=False, maskmap=None,
skip_failed_fits=False,
**fitkwargs):
"""
Fit a spectrum to each valid pixel in the cube
For guesses, priority is *use_nearest_as_guess*, *usemomentcube*,
*guesses*, None
Once you have successfully run this function, the results will be
stored in the ``.parcube`` and ``.errcube`` attributes, which are each
cubes of shape ``[npars, ny, nx]``, where npars is the number of fitted
parameters and ``nx``, ``ny`` are the shape of the map. ``errcube``
contains the errors on the fitted parameters (1-sigma, as returned from
the Levenberg-Marquardt fit's covariance matrix). You can use the
attribute ``has_fit``, which is a map of shape ``[ny,nx]`` to find
which pixels have been successfully fit.
Parameters
----------
use_nearest_as_guess: bool
Unless the fitted point is the first, it will find the nearest
other point with a successful fit and use its best-fit parameters
as the guess
use_neighbor_as_guess: bool
Set this keyword to use the average best-fit parameters from
neighboring positions with successful fits as the guess
start_from_point: tuple(int,int)
Either start from the center or from a point defined by a tuple.
Work outward from that starting point.
position_order: ndarray[naxis=2]
2D map of region with pixel values indicating the order in which
to carry out the fitting. Any type with increasing pixel values.
guesses: tuple or ndarray[naxis=3]
Either a tuple/list of guesses with len(guesses) = npars or a cube
of guesses with shape [npars, ny, nx].
NOT TRUE, but a good idea in principle:
You can also use a dictionary of the form {(y,x): [list of length
npars]}, where (y,x) specifies a pixel location. If the dictionary
method is used, npars must be specified and it sets the length of
the first parameter axis
signal_cut: float
Minimum signal-to-noise ratio to "cut" on (i.e., if peak in a given
spectrum has s/n less than this value, ignore it)
blank_value: float
Value to replace non-fitted locations with.
errmap: ndarray[naxis=2] or ndarray[naxis=3]
A map of errors used for the individual pixels of the spectral
cube. 2D errmap results in an equal weighting of each given
spectrum, while a 3D array sets individual weights of each channel
verbose: bool
verbose_level: int
Controls how much is output.
0,1 - only changes frequency of updates in loop
2 - print out messages when skipping pixels
3 - print out messages when fitting pixels
4 - specfit will be verbose
multicore: int
if >1, try to use multiprocessing via parallel_map to run on
multiple cores
continuum_map: np.ndarray
Same shape as error map. Subtract this from data before estimating
noise.
prevalidate_guesses: bool
An extra check before fitting is run to make sure the guesses are
all within the specified limits. May be slow, so it is off by
default. It also should not be necessary, since careful checking
is performed before each fit.
maskmap : `np.ndarray`, optional
A boolean mask map, where ``True`` implies that the data are good.
This will be used for both plotting using `mapplot` and fitting
using `fiteach`. If ``None``, will use ``self.maskmap``.
integral : bool
If set, the integral of each spectral fit will be computed and
stored in the attribute ``.integralmap``
direct_integral : bool
Return the integral of the *spectrum* (as opposed to the fitted
model) over a range defined by the `integration_limits` if specified or
`threshold` otherwise
skip_failed_fits : bool
Flag to forcibly skip failed fits that fail with "unknown error".
Generally, you do not want this on, but this is the
'finger-over-the-engine-light' approach that will allow these
incomprehensible failures to go by and just ignore them. Keep
an eye on how many of these you get: if it's just one or two
out of hundreds, then maybe those are just pathological cases
that can be ignored. If it's a significant fraction, you probably
want to take a different approach.
"""
if 'multifit' in fitkwargs:
warn("The multifit keyword is no longer required. All fits "
"allow for multiple components.", DeprecationWarning)
if not hasattr(self.mapplot,'plane'):
self.mapplot.makeplane()
if maskmap is None:
maskmap = self.maskmap
yy,xx = np.indices(self.mapplot.plane.shape)
if isinstance(self.mapplot.plane, np.ma.core.MaskedArray):
OK = ((~self.mapplot.plane.mask) &
maskmap.astype('bool')).astype('bool')
else:
OK = (np.isfinite(self.mapplot.plane) &
maskmap.astype('bool')).astype('bool')
# NAN guesses rule out the model too
if hasattr(guesses,'shape') and guesses.shape[1:] == self.cube.shape[1:]:
bad = np.isnan(guesses).sum(axis=0).astype('bool')
OK &= (~bad)
log.info("Fitting up to {0} spectra".format(OK.sum()))
if start_from_point == 'center':
start_from_point = (xx.max()/2., yy.max()/2.)
if hasattr(position_order,'shape') and position_order.shape == self.cube.shape[1:]:
sort_distance = np.argsort(position_order.flat)
else:
d_from_start = ((xx-start_from_point[1])**2 + (yy-start_from_point[0])**2)**0.5
sort_distance = np.argsort(d_from_start.flat)
if use_neighbor_as_guess or use_nearest_as_guess:
distance = ((xx)**2 + (yy)**2)**0.5
valid_pixels = list(zip(xx.flat[sort_distance][OK.flat[sort_distance]],
yy.flat[sort_distance][OK.flat[sort_distance]]))
if len(valid_pixels) != len(set(valid_pixels)):
raise ValueError("There are non-unique pixels in the 'valid pixel' list. "
"This should not be possible and indicates a major error.")
elif len(valid_pixels) == 0:
raise ValueError("No valid pixels selected.")
if start_from_point not in valid_pixels:
raise ValueError("The starting fit position is not among the valid"
" pixels. Check your selection criteria to make "
"sure you have not unintentionally excluded "
"this first fit pixel.")
if verbose_level > 0:
log.debug("Number of valid pixels: %i" % len(valid_pixels))
guesses_are_moments = (isinstance(guesses, string_types) and
guesses in ('moment','moments'))
if guesses_are_moments or (usemomentcube and len(guesses)):
if not hasattr(self, 'momentcube') and guesses_are_moments:
self.momenteach()
npars = self.momentcube.shape[0]
else:
npars = len(guesses)
if npars == 0:
raise ValueError("Parameter guesses are required.")
self.parcube = np.zeros((npars,)+self.mapplot.plane.shape)
self.errcube = np.zeros((npars,)+self.mapplot.plane.shape)
if integral:
self.integralmap = np.zeros((2,)+self.mapplot.plane.shape)
# newly needed as of March 27, 2012. Don't know why.
if 'fittype' in fitkwargs:
self.specfit.fittype = fitkwargs['fittype']
self.specfit.fitter = self.specfit.Registry.multifitters[self.specfit.fittype]
# TODO: VALIDATE THAT ALL GUESSES ARE WITHIN RANGE GIVEN THE
# FITKWARG LIMITS
# array to store whether pixels have fits
self.has_fit = np.zeros(self.mapplot.plane.shape, dtype='bool')
self._counter = 0
self._tracebacks = {}
t0 = time.time()
def fit_a_pixel(iixy):
ii,x,y = iixy
sp = self.get_spectrum(x,y)
# very annoying - cannot use min/max without checking type
# maybe can use np.asarray here?
# cannot use sp.data.mask because it can be a scalar boolean,
# which does unpredictable things.
if hasattr(sp.data, 'mask') and not isinstance(sp.data.mask, (bool,
np.bool_)):
sp.data[sp.data.mask] = np.nan
sp.error[sp.data.mask] = np.nan
sp.data = np.array(sp.data)
sp.error = np.array(sp.error)
if errspec is not None:
sp.error = errspec
elif errmap is not None:
if self.errorcube is not None:
raise ValueError("Either the 'errmap' argument or"
" self.errorcube attribute should be"
" specified, but not both.")
if errmap.shape == self.cube.shape[1:]:
sp.error = np.ones(sp.data.shape) * errmap[int(y),int(x)]
elif errmap.shape == self.cube.shape:
sp.error = errmap[:, int(y), int(x)]
elif self.errorcube is not None:
sp.error = self.errorcube[:, int(y), int(x)]
else:
if ii==0:
# issue the warning only once (ii==0), but always issue
warn("Using data std() as error. "
"If signal_cut is set, this can result in "
"some pixels not being fit.",
PyspeckitWarning)
sp.error[:] = sp.data[sp.data==sp.data].std()
if sp.error is None:
raise TypeError("The Spectrum's error is unset. This should "
"not be possible. Please raise an Issue.")
if signal_cut > 0 and not all(sp.error == 0):
if continuum_map is not None:
with np.errstate(divide='raise'):
snr = (sp.data-continuum_map[int(y),int(x)]) / sp.error
else:
with np.errstate(divide='raise'):
snr = sp.data / sp.error
if absorption:
max_sn = np.nanmax(-1*snr)
else:
max_sn = np.nanmax(snr)
if max_sn < signal_cut:
if verbose_level > 1:
log.info("Skipped %4i,%4i (s/n=%0.2g)" % (x,y,max_sn))
return
elif np.isnan(max_sn):
if verbose_level > 1:
log.info("Skipped %4i,%4i (s/n is nan; max(data)=%0.2g, min(error)=%0.2g)" %
(x,y,np.nanmax(sp.data),np.nanmin(sp.error)))
return
if verbose_level > 2:
log.info("Fitting %4i,%4i (s/n=%0.2g)" % (x,y,max_sn))
else:
max_sn = None
sp.specfit.Registry = self.Registry # copy over fitter registry
# Do some homework for local fits
# Exclude out of bounds points
xpatch, ypatch = get_neighbors(x,y,self.has_fit.shape)
local_fits = self.has_fit[ypatch+y,xpatch+x]
if use_nearest_as_guess and self.has_fit.sum() > 0:
if verbose_level > 1 and ii == 0 or verbose_level > 4:
log.info("Using nearest fit as guess")
rolled_distance = np.roll(np.roll(distance, x, 0), y, 1)
# If there's no fit, set its distance to be unreasonably large
# so it will be ignored by argmin
nearest_ind = np.argmin(rolled_distance+1e10*(~self.has_fit))
nearest_x, nearest_y = xx.flat[nearest_ind],yy.flat[nearest_ind]
if np.all(np.isfinite(self.parcube[:,nearest_y,nearest_x])):
gg = self.parcube[:,nearest_y,nearest_x]
else:
log.exception("Pixel {0},{1} had a fit including a NaN: {2}"
" so it will not be used as a guess for {3},{4}"
.format(nearest_x, nearest_y, self.parcube[:, nearest_y, nearest_x],
x, y))
gg = guesses
elif use_neighbor_as_guess and np.any(local_fits):
# Array is N_guess X Nvalid_nbrs so averaging over
# Axis=1 is the axis of all valid neighbors
gg = np.mean(self.parcube[:,
(ypatch+y)[local_fits],
(xpatch+x)[local_fits]], axis=1)
if np.any(~np.isfinite(gg)):
log.exception("Pixel {0},{1} neighbors had non-finite guess: {2}"
.format(x, y, gg))
gg = guesses
elif guesses_are_moments and usemomentcube is False:
raise ValueError("usemomentcube must be set to True")
elif guesses_are_moments or (usemomentcube and len(guesses)):
if not guesses_are_moments and ii == 0:
log.warn("guesses will be ignored because usemomentcube "
"was set to True.", PyspeckitWarning)
if verbose_level > 1 and ii == 0:
log.info("Using moment cube")
gg = self.momentcube[:,int(y),int(x)]
elif hasattr(guesses,'shape') and guesses.shape[1:] == self.cube.shape[1:]:
if verbose_level > 1 and ii == 0:
log.info("Using input guess cube")
gg = guesses[:,int(y),int(x)]
elif isinstance(guesses, dict):
if verbose_level > 1 and ii == 0:
log.info("Using input guess dict")
gg = guesses[(int(y),int(x))]
else:
if verbose_level > 1 and ii == 0:
log.info("Using input guess")
gg = guesses
if np.all(np.isfinite(gg)):
try:
with np.errstate(divide='raise'):
sp.specfit(guesses=gg, quiet=verbose_level<=3,
verbose=verbose_level>3, **fitkwargs)
self.parcube[:,int(y),int(x)] = sp.specfit.modelpars
self.errcube[:,int(y),int(x)] = sp.specfit.modelerrs
if np.any(~np.isfinite(sp.specfit.modelpars)):
log.exception("Fit result included nan for pixel {0},{1}: "
"{2}".format(x, y, sp.specfit.modelpars))
success = False
# this is basically a debug statement to try to get the
# code to crash here
raise KeyboardInterrupt
else:
success = True
except Exception as ex:
exc_traceback = sys.exc_info()[2]
self._tracebacks[(ii,x,y)] = exc_traceback
log.exception("Fit number %i at %i,%i failed on error %s" % (ii,x,y, str(ex)))
log.exception("Failure was in file {0} at line {1}".format(
exc_traceback.tb_frame.f_code.co_filename,
exc_traceback.tb_lineno,))
traceback.print_tb(exc_traceback)
log.exception("Guesses were: {0}".format(str(gg)))
log.exception("Fitkwargs were: {0}".format(str(fitkwargs)))
success = False
if isinstance(ex, KeyboardInterrupt):
raise ex
# keep this out of the 'try' statement
if integral and success:
self.integralmap[:,int(y),int(x)] = sp.specfit.integral(direct=direct_integral,
return_error=True)
self.has_fit[int(y),int(x)] = success
else:
log.exception("Fit number {0} at {1},{2} had non-finite guesses {3}"
.format(ii, x, y, guesses))
self.has_fit[int(y),int(x)] = False
self.parcube[:,int(y),int(x)] = blank_value
self.errcube[:,int(y),int(x)] = blank_value
if integral:
self.integralmap[:,int(y),int(x)] = blank_value
self._counter += 1
if verbose:
if ii % (min(10**(3-verbose_level),1)) == 0:
snmsg = " s/n=%5.1f" % (max_sn) if max_sn is not None else ""
npix = len(valid_pixels)
pct = 100 * (ii+1.0)/float(npix)
log.info("Finished fit %6i of %6i at (%4i,%4i)%s. Elapsed time is %0.1f seconds. %%%01.f" %
(ii+1, npix, x, y, snmsg, time.time()-t0, pct))
if sp.specfit.modelerrs is None:
log.exception("Fit number %i at %i,%i failed with no specific error." % (ii,x,y))
if hasattr(sp.specfit, 'mpfit_status'):
log.exception("mpfit status is {0}".format(sp.specfit.mpfit_status))
log.exception("The problem is that the model errors were never set, "
"which implies that the fit simply failed to finish.")
log.exception("The string representation of `sp.specfit.parinfo` is: {0}"
.format(sp.specfit.parinfo))
log.exception("The string representation of `sp.specfit.fitter.parinfo` is: {0}"
.format(sp.specfit.fitter.parinfo))
log.exception("modelpars is: {0}".format(str(sp.specfit.modelpars)))
log.exception("cube modelpars are: {0}".format(str(self.parcube[:,int(y),int(x)])))
log.exception("cube modelerrs are: {0}".format(str(self.errcube[:,int(y),int(x)])))
log.exception("Guesses were: {0}".format(str(gg)))
log.exception("Fitkwargs were: {0}".format(str(fitkwargs)))
if skip_failed_fits:
# turn the flag into a count
log.exception("The fit never completed; something has gone wrong. Failed fits = {0}".format(int(skip_failed_fits)))
else:
raise TypeError("The fit never completed; something has gone wrong.")
# blank out the errors (and possibly the values) wherever they are zero = assumed bad
# this is done after the above exception to make sure we can inspect these values
if blank_value != 0:
self.parcube[self.parcube == 0] = blank_value
self.errcube[self.parcube == 0] = blank_value
if integral:
return ((x,y), sp.specfit.modelpars, sp.specfit.modelerrs,
self.integralmap[:,int(y),int(x)])
else:
return ((x,y), sp.specfit.modelpars, sp.specfit.modelerrs)
#### BEGIN TEST BLOCK ####
# This test block is to make sure you don't run a 30 hour fitting
# session that's just going to crash at the end.
# try a first fit for exception-catching
if len(start_from_point) == 2:
try0 = fit_a_pixel((0,start_from_point[0], start_from_point[1]))
else:
try0 = fit_a_pixel((0,valid_pixels[0][0],valid_pixels[0][1]))
try:
len_guesses = len(self.momentcube) if (usemomentcube or
guesses_are_moments) else len(guesses)
assert len(try0[1]) == len_guesses == len(self.parcube) == len(self.errcube)
assert len(try0[2]) == len_guesses == len(self.parcube) == len(self.errcube)
except TypeError as ex:
if try0 is None:
raise AssertionError("The first fitted pixel did not yield a "
"fit. Please try starting from a "
"different pixel.")
else:
raise ex
except AssertionError:
raise AssertionError("The first pixel had the wrong fit "
"parameter shape. This is probably "
"a bug; please report it.")
# This is a secondary test... I'm not sure it's necessary, but it
# replicates what's inside the fit_a_pixel code and so should be a
# useful sanity check
x,y = valid_pixels[0]
sp = self.get_spectrum(x,y)
sp.specfit.Registry = self.Registry # copy over fitter registry
# this reproduced code is needed because the functional wrapping
# required for the multicore case prevents gg from being set earlier
if usemomentcube or guesses_are_moments:
gg = self.momentcube[:,int(y),int(x)]
elif hasattr(guesses,'shape') and guesses.shape[1:] == self.cube.shape[1:]:
gg = guesses[:,int(y),int(x)]
else:
gg = guesses
# This is NOT in a try/except block because we want to raise the
# exception here if an exception is going to happen
sp.specfit(guesses=gg, **fitkwargs)
if prevalidate_guesses:
if guesses.ndim == 3:
for ii,(x,y) in ProgressBar(tuple(enumerate(valid_pixels))):
pinf, _ = sp.specfit.fitter._make_parinfo(parvalues=guesses[:,int(y),int(x)], **fitkwargs)
sp.specfit._validate_parinfo(pinf, 'raise')
else:
pinf, _ = sp.specfit.fitter._make_parinfo(parvalues=guesses, **fitkwargs)
sp.specfit._validate_parinfo(pinf, 'raise')
#### END TEST BLOCK ####
if multicore > 1:
sequence = [(ii,x,y) for ii,(x,y) in tuple(enumerate(valid_pixels))]
with np.errstate(divide='raise'):
result = parallel_map(fit_a_pixel, sequence, numcores=multicore)
self._result = result # backup - don't want to lose data in the case of a failure
# a lot of ugly hacking to deal with the way parallel_map returns
# its results needs TWO levels of None-filtering, because any
# individual result can be None (I guess?) but apparently (and this
# part I don't believe) any individual *fit* result can be None as
# well (apparently the x,y pairs can also be None?)
merged_result = [core_result for core_result in result if
core_result is not None]
# for some reason, every other time I run this code, merged_result
# ends up with a different intrinsic shape. This is an attempt to
# force it to maintain a sensible shape.
try:
if integral:
((x,y), m1, m2, intgl) = merged_result[0]
else:
((x,y), m1, m2) = merged_result[0]
except ValueError:
if verbose > 1:
log.exception("ERROR: merged_result[0] is {0} which has the"
" wrong shape".format(merged_result[0]))
merged_result = itertools.chain.from_iterable(merged_result)
for TEMP in merged_result:
if TEMP is None:
# this shouldn't be possible, but it appears to happen
# anyway. parallel_map is great, up to a limit that was
# reached long before this level of complexity
log.debug("Skipped a None entry: {0}".format(str(TEMP)))
continue
try:
if integral:
((x,y), modelpars, modelerrs, intgl) = TEMP
else:
((x,y), modelpars, modelerrs) = TEMP
except TypeError:
# implies that TEMP does not have the shape ((a,b),c,d)
# as above, shouldn't be possible, but it happens...
log.debug("Skipped a misshapen entry: {0}".format(str(TEMP)))
continue
if ((len(modelpars) != len(modelerrs)) or
(len(modelpars) != len(self.parcube))):
raise ValueError("There was a serious problem; modelpar and"
" error shape don't match that of the "
"parameter cubes")
if ((any([x is None for x in modelpars]) or
np.any(np.isnan(modelpars)) or
any([x is None for x in modelerrs]) or
np.any(np.isnan(modelerrs)))):
self.parcube[:,int(y),int(x)] = np.nan
self.errcube[:,int(y),int(x)] = np.nan
self.has_fit[int(y),int(x)] = False
else:
self.parcube[:,int(y),int(x)] = modelpars
self.errcube[:,int(y),int(x)] = modelerrs
self.has_fit[int(y),int(x)] = max(modelpars) > 0
if integral:
self.integralmap[:,int(y),int(x)] = intgl
else:
for ii,(x,y) in enumerate(valid_pixels):
fit_a_pixel((ii,x,y))
# March 27, 2014: This is EXTREMELY confusing. This isn't in a loop...
# make sure the fitter / fittype are set for the cube
# this has to be done within the loop because skipped-over spectra
# don't ever get their fittypes set
self.specfit.fitter = sp.specfit.fitter
self.specfit.fittype = sp.specfit.fittype
self.specfit.parinfo = sp.specfit.parinfo
if verbose:
log.info("Finished final fit %i. "
"Elapsed time was %0.1f seconds" % (len(valid_pixels), time.time()-t0))
pars_are_finite = np.all(np.isfinite(self.parcube), axis=0)
# if you see one of these exceptions, please try to produce a minimum
# working example and report it as a bug.
# all non-finite fit parameters should be has_fit=False
assert np.all(~self.has_fit[~pars_are_finite]), "Non-finite parameters found in fits"
def momenteach(self, verbose=True, verbose_level=1, multicore=1, **kwargs):
"""
Return a cube of the moments of each pixel
Parameters
----------
multicore: int
if >1, try to use multiprocessing via parallel_map to run on multiple cores
"""
if not hasattr(self.mapplot,'plane'):
self.mapplot.makeplane()
if 'vheight' not in kwargs:
kwargs['vheight'] = False
yy,xx = np.indices(self.mapplot.plane.shape)
if isinstance(self.mapplot.plane, np.ma.core.MaskedArray):
OK = (~self.mapplot.plane.mask) * self.maskmap
else:
OK = np.isfinite(self.mapplot.plane) * self.maskmap
valid_pixels = zip(xx[OK],yy[OK])
# run the moment process to find out how many elements are in a moment
_temp_moment = self.get_spectrum(yy[OK][0],xx[OK][0]).moments(**kwargs)
self.momentcube = np.zeros((len(_temp_moment),)+self.mapplot.plane.shape)
t0 = time.time()
def moment_a_pixel(iixy):
ii,x,y = iixy
sp = self.get_spectrum(x,y)
self.momentcube[:,int(y),int(x)] = sp.moments(**kwargs)
if verbose:
if ii % 10**(3-verbose_level) == 0:
log.info("Finished moment %i. "
"Elapsed time is %0.1f seconds" % (ii, time.time()-t0))
return ((x,y), self.momentcube[:,int(y),int(x)])
if multicore > 1:
sequence = [(ii,x,y) for ii,(x,y) in tuple(enumerate(valid_pixels))]
result = parallel_map(moment_a_pixel, sequence, numcores=multicore)
merged_result = [core_result.tolist()
for core_result in result
if core_result is not None]
for TEMP in merged_result:
((x,y), moments) = TEMP
self.momentcube[:,int(y),int(x)] = moments
else:
for ii,(x,y) in enumerate(valid_pixels):
moment_a_pixel((ii,x,y))
if verbose:
log.info("Finished final moment %i. "
"Elapsed time was %0.1f seconds" % (OK.sum(), time.time()-t0))
def show_moment(self, momentnumber, **kwargs):
"""
If moments have been computed, display them in the mapplot window
"""
if not hasattr(self,'momentcube'):
raise ValueError("Compute moments first")
self.mapplot.plane = self.momentcube[momentnumber,:,:].squeeze()
self.mapplot(estimator=None, **kwargs)
def show_fit_param(self, parnumber, **kwargs):
"""
If pars have been computed, display them in the mapplot window
Parameters
----------
parnumber : int
The index of the parameter in the parameter cube
"""
if not hasattr(self,'parcube'):
raise ValueError("Compute fit parameters first")
self.mapplot.plane = self.parcube[parnumber,:,:].squeeze()
self.mapplot(estimator=None, **kwargs)
def load_model_fit(self, fitsfilename, npars, npeaks=1, fittype=None,
_temp_fit_loc=(0,0)):
"""
Load a parameter + error cube into the ``.parcube`` and ``.errcube``
attributes. The models can then be examined and plotted using
``.mapplot`` as if you had run ``.fiteach``.
Parameters
----------
fitsfilename : str
The filename containing the parameter cube written with `write_fit`
npars : int
The number of parameters in the model fit for a single spectrum
npeaks : int
The number of independent peaks fit toward each spectrum
fittype : str, optional
The name of the fittype, e.g. 'gaussian' or 'voigt', from the
pyspeckit fitter registry. This is optional; it should have
been written to the FITS header and will be read from there if
it is not specified
_temp_fit_loc : tuple (int,int)
The initial spectrum to use to generate components of the class.
This should not need to be changed.
"""
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
cubefile = pyfits.open(fitsfilename,ignore_missing_end=True)
cube = cubefile[0].data
if cube.shape[0] != npars * npeaks * 2:
raise ValueError("The cube shape is not correct. The cube has "
"first dimension = {0}, but it should be {1}. "
"The keyword npars = number of parameters per "
"model component, and npeaks = number of "
"independent peaks. You gave npars={2} and "
"npeaks={3}".format(cube.shape[0], npars*npeaks*2,
npars, npeaks))
# grab a spectrum and fit it however badly you want
# this is just to __init__ the relevant data structures
if fittype is None:
if cubefile[0].header.get('FITTYPE'):
fittype = cubefile[0].header.get('FITTYPE')
else:
raise KeyError("Must specify FITTYPE or include it in cube header.")
self.parcube = cube[:npars*npeaks,:,:]
self.errcube = cube[npars*npeaks:npars*npeaks*2,:,:]
if np.any(np.all(self.parcube == 0, axis=(1,2))):
# there are some slices where all parameters are zero, we should
# ignore this when establishing whether there's a fit (some
# parameters, like fortho, can be locked to zero)
self.has_fit = np.all((np.isfinite(self.parcube)), axis=0)
else:
self.has_fit = np.all((self.parcube != 0) &
(np.isfinite(self.parcube)), axis=0)
nanvals = ~np.isfinite(self.parcube)
nanvals_flat = np.any(nanvals, axis=0)
if np.any(nanvals):
warn("NaN or infinite values encountered in parameter cube.",
PyspeckitWarning)
# make sure params are within limits
fitter = self.specfit.Registry.multifitters[fittype]
guesses,throwaway = fitter._make_parinfo(npeaks=npeaks)
try:
x,y = _temp_fit_loc
sp = self.get_spectrum(x,y)
guesses.values = self.parcube[:,int(y),int(x)]
sp.specfit(fittype=fittype, guesses=guesses.values)
except Exception as ex1:
try:
OKmask = np.any(self.parcube, axis=0) & ~nanvals_flat
whereOK = np.where(OKmask)
x,y = whereOK[1][0],whereOK[0][0]
sp = self.get_spectrum(x,y)
guesses.values = self.parcube[:,int(y),int(x)]
sp.specfit(fittype=fittype, guesses=guesses.values)
except Exception as ex2:
log.error("Fitting the pixel at location {0} failed with error: {1}. "
"Re-trying at location {2} failed with error {3}. "
"Try setting _temp_fit_loc to a valid pixel".format(_temp_fit_loc, ex1,
(x,y), ex2))
self.specfit.fitter = sp.specfit.fitter
self.specfit.fittype = sp.specfit.fittype
self.specfit.parinfo = sp.specfit.parinfo
def smooth(self,factor,**kwargs):
"""
Smooth the spectrum by factor `factor`.
Documentation from the :mod:`cubes.spectral_smooth` module:
"""
factor = round(factor)
self.cube = cubes.spectral_smooth(self.cube,factor,**kwargs)
self.xarr = self.xarr[::factor]
if hasattr(self,'data'):
self.data = smooth.smooth(self.data,factor,**kwargs)
if len(self.xarr) != self.cube.shape[0]:
raise ValueError("Convolution resulted in different X and Y array lengths. Convmode should be 'same'.")
if self.errorcube is not None:
self.errorcube = cubes.spectral_smooth(self.errorcube,factor,**kwargs)
self._smooth_header(factor)
__doc__ += "cubes.spectral_smooth doc: \n" + cubes.spectral_smooth.__doc__
def _smooth_header(self,factor):
"""
Internal - correct the FITS header parameters when smoothing
"""
if self.header.get('CDELT3') is not None and self.header.get('CRPIX3') is not None:
self.header['CDELT3'] = self.header.get('CDELT3') * float(factor)
self.header['CRPIX3'] = self.header.get('CRPIX3') / float(factor)
history.write_history(self.header,"SMOOTH: Smoothed and downsampled spectrum by factor %i" % (factor))
history.write_history(self.header,"SMOOTH: Changed CRPIX3 from %f to %f" % (self.header.get('CRPIX3')*float(factor),self.header.get('CRPIX3')))
history.write_history(self.header,"SMOOTH: Changed CDELT3 from %f to %f" % (self.header.get('CRPIX3')/float(factor),self.header.get('CRPIX3')))
def write_fit(self, fitcubefilename, overwrite=False):
"""
Write out a fit cube containing the ``.parcube`` and ``.errcube`` using
the information in the fit's parinfo to set the header keywords. The
``PLANE#`` keywords will be used to indicate the content of each plane
in the data cube written to the FITS file. All of the fitted
parameters will be written first, followed by all of the errors on
those parameters. So, for example, if you have fitted a single
gaussian to each pixel, the dimensions of the saved cube will be ``[6,
ny, nx]``, and they will be the amplitude, centroid, width, error on
amplitude, error on centroid, and error on width, respectively.
To load such a file back in for plotting purposes, see
`SpectralCube.load_model_fit`.
Parameters
----------
fitcubefilename: string
Filename to write to
overwrite: bool
Overwrite file if it exists?
"""
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
try:
fitcubefile = pyfits.PrimaryHDU(data=np.concatenate([self.parcube,self.errcube]), header=self.header)
fitcubefile.header['FITTYPE'] = self.specfit.fittype
for ii,par in enumerate(self.specfit.parinfo):
kw = "PLANE%i" % ii
parname = par['parname'].strip('0123456789')
fitcubefile.header[kw] = parname
# set error parameters
for jj,par in enumerate(self.specfit.parinfo):
kw = "PLANE%i" % (ii+jj+1)
parname = "e"+par['parname'].strip('0123456789')
fitcubefile.header[kw] = parname
# overwrite the WCS
fitcubefile.header['CDELT3'] = 1
fitcubefile.header['CTYPE3'] = 'FITPAR'
fitcubefile.header['CRVAL3'] = 0
fitcubefile.header['CRPIX3'] = 1
except AttributeError:
log.exception("Make sure you run the cube fitter first.")
return
if astropy.version.major >= 2 or (astropy.version.major==1 and astropy.version.minor>=3):
fitcubefile.writeto(fitcubefilename, overwrite=overwrite)
else:
fitcubefile.writeto(fitcubefilename, clobber=overwrite)
def write_cube(self):
raise NotImplementedError
class CubeStack(Cube):
"""
The Cube equivalent of Spectra: for stitching multiple cubes with the same
spatial grid but different frequencies together
"""
def __init__(self, cubelist, xunit='GHz', x0=0, y0=0, maskmap=None, **kwargs):
"""
Initialize the Cube. Accepts FITS files.
x0,y0 - initial spectrum to use (defaults to lower-left corner)
"""
log.info("Creating Cube Stack")
cubelist = list(cubelist)
for ii,cube in enumerate(cubelist):
if type(cube) is str:
cube = Cube(cube)
cubelist[ii] = cube
if cube.xarr.unit != xunit:
# convert all inputs to same (non-velocity) unit
cube.xarr.convert_to_unit(xunit, **kwargs)
self.cubelist = cubelist
log.info("Concatenating data")
self.xarr = SpectroscopicAxes([sp.xarr for sp in cubelist])
self.cube = np.ma.concatenate([icube.cube for icube in cubelist])
if np.any([icube.errorcube is not None for icube in cubelist]):
if all([icube.errorcube is not None for icube in cubelist]):
self.errorcube = np.ma.concatenate([icube.errorcube for icube in cubelist])
else:
raise ValueError("Mismatched error cubes.")
else:
self.errorcube = None
if hasattr(self.cube,'mask'):
try:
if self.cube.mask in (False,np.bool_(False)):
# mask causes major problems internally for numpy...
self.cube = np.array(self.cube)
except ValueError:
# this means that self.cube.mask is an array;
# techically that's alright
pass
self._sort()
self.data = self.cube[:,int(y0),int(x0)]
self.error = self.errorcube[:,int(y0),int(x0)] if self.errorcube is not None else None
self.header = cubelist[0].header.copy()
for cube in cubelist:
for key,value in cube.header.items():
if key in ['HISTORY', 'COMMENT']:
continue
self.header[key] = value
if self.header:
self.wcs = wcs.WCS(self.header)
self.wcs.wcs.fix()
self._spectral_axis_number = self.wcs.wcs.spec+1
self._first_cel_axis_num = np.where(self.wcs.wcs.axis_types // 1000 == 2)[0][0]+1
# TODO: Improve this!!!
self.system = ('galactic'
if ('CTYPE{0}'.format(self._first_cel_axis_num)
in self.header and 'GLON' in
self.header['CTYPE{0}'.format(self._first_cel_axis_num)])
else 'celestial')
else:
self._spectral_axis_number = 3
self._first_cel_axis_num = 1
self.system = 'PIXEL'
self.unit = cubelist[0].unit
for cube in cubelist:
if cube.unit != self.unit:
raise ValueError("Mismatched units "
"{0} and {1}".format(cube.unit, self.unit))
self.fileprefix = cubelist[0].fileprefix # first is the best?
if maskmap is not None:
self.maskmap = maskmap
else:
self.maskmap = np.ones(self.cube.shape[1:],dtype='bool')
self._register_fitters()
self.plotter = spectrum.plotters.Plotter(self)
self.specfit = spectrum.fitters.Specfit(self,Registry=self.Registry)
self.baseline = spectrum.baseline.Baseline(self)
self.speclines = spectrum.speclines
# Initialize writers TO DO: DO WRITERS WORK FOR CUBES?
self.writer = {}
for writer in spectrum.writers.writers:
self.writer[writer] = spectrum.writers.writers[writer](self)
# Special. This needs to be modified to be more flexible; for now I need it to work for nh3
self.plot_special = None
self.plot_special_kwargs = {}
self._modelcube = None
self.mapplot = mapplot.MapPlotter(self)
def _sort(self):
""" Sort the data in order of increasing X (could be decreasing, but
must be monotonic for plotting reasons) """
indices = self.xarr.argsort()
self.xarr = self.xarr[indices]
self.cube = self.cube[indices,:,:]
if self.errorcube is not None:
self.errorcube = self.errorcube[indices,:,:]
def get_neighbors(x, y, shape):
"""
Find the 9 nearest neighbors, excluding self and any out of bounds points
"""
ysh, xsh = shape
xpyp = [(ii,jj)
for ii,jj in itertools.product((-1,0,1),
(-1,0,1))
if (ii+x < xsh) and (ii+x >= 0)
and (jj+y < ysh) and (jj+y >= 0)
and not (ii==0 and jj==0)]
xpatch, ypatch = zip(*xpyp)
return np.array(xpatch, dtype='int'), np.array(ypatch, dtype='int')
def test_get_neighbors():
xp,yp = get_neighbors(0,0,[10,10])
assert set(xp) == {0,1}
assert set(yp) == {0,1}
xp,yp = get_neighbors(0,1,[10,10])
assert set(xp) == {0,1}
assert set(yp) == {-1,0,1}
xp,yp = get_neighbors(5,6,[10,10])
assert set(xp) == {-1,0,1}
assert set(yp) == {-1,0,1}
xp,yp = get_neighbors(9,9,[10,10])
assert set(xp) == {0,-1}
assert set(yp) == {0,-1}
xp,yp = get_neighbors(9,8,[10,10])
assert set(xp) == {-1,0}
assert set(yp) == {-1,0,1}
|
# -*- coding: utf-8 -*-
#
# PySceneDetect: Python-Based Video Scene Detector
# ---------------------------------------------------------------
# [ Site: http://www.scenedetect.scenedetect.com/ ]
# [ Docs: http://manual.scenedetect.scenedetect.com/ ]
# [ Github: https://github.com/Breakthrough/PySceneDetect/ ]
#
# Copyright (C) 2014-2022 <NAME> <http://www.bcastell.com>.
# PySceneDetect is licensed under the BSD 3-Clause License; see the
# included LICENSE file, or visit one of the above pages for details.
#
""":py:class:`VideoStreamAv` provides an adapter for the PyAV av.InputContainer object.
Uses string identifier ``'pyav'``.
"""
from logging import getLogger
from typing import AnyStr, BinaryIO, Optional, Tuple, Union
#pylint: disable=c-extension-no-member
import av
from cupy import ndarray
from scenedetect.frame_timecode import FrameTimecode, MAX_FPS_DELTA
from scenedetect.platform import get_file_name
from scenedetect.video_stream import VideoStream, VideoOpenFailure, FrameRateUnavailable
logger = getLogger('pyscenedetect')
VALID_THREAD_MODES = [
av.codec.context.ThreadType.NONE,
av.codec.context.ThreadType.SLICE,
av.codec.context.ThreadType.FRAME,
av.codec.context.ThreadType.AUTO,
]
class VideoStreamAv(VideoStream):
"""PyAV `av.InputContainer` backend."""
# TODO: Investigate adding an accurate_duration option to backends to calculate the duration
# with higher precision. Sometimes it doesn't exactly match what the codec or VLC reports,
# but we can try to seek to the end of the video first to determine it. Investigate how VLC
# calculates the end time.
def __init__(
self,
path_or_io: Union[AnyStr, BinaryIO],
framerate: Optional[float] = None,
name: Optional[str] = None,
threading_mode: Optional[str] = None,
suppress_output: bool = False,
):
"""Open a video by path.
.. warning::
Using `threading_mode` with `suppress_output = True` can cause lockups in your
application. See the PyAV documentation for details:
https://pyav.org/docs/stable/overview/caveats.html#sub-interpeters
Arguments:
path_or_io: Path to the video, or a file-like object.
framerate: If set, overrides the detected framerate.
name: Overrides the `name` property derived from the video path. Should be set if
`path_or_io` is a file-like object.
threading_mode: The PyAV video stream `thread_type`. See av.codec.context.ThreadType
for valid threading modes ('AUTO', 'FRAME', 'NONE', and 'SLICE'). If this mode is
'AUTO' or 'FRAME' and not all frames have been decoded, the video will be reopened
if seekable, and the remaining frames decoded in single-threaded mode.
suppress_output: If False, ffmpeg output will be sent to stdout/stderr by calling
`av.logging.restore_default_callback()` before any other library calls. If True
the application may deadlock if threading_mode is set. See the PyAV documentation
for details: https://pyav.org/docs/stable/overview/caveats.html#sub-interpeters
Raises:
OSError: file could not be found or access was denied
VideoOpenFailure: video could not be opened (may be corrupted)
ValueError: specified framerate is invalid
"""
# TODO(#258): See what self._container.discard_corrupt = True does with corrupt videos.
super().__init__()
# Ensure specified framerate is valid if set.
if framerate is not None and framerate < MAX_FPS_DELTA:
raise ValueError('Specified framerate (%f) is invalid!' % framerate)
self._name = '' if name is None else name
self._path = ''
self._frame = None
self._reopened = True
if threading_mode:
threading_mode = threading_mode.upper()
if not threading_mode in VALID_THREAD_MODES:
raise ValueError('Invalid threading mode! Must be one of: %s' % VALID_THREAD_MODES)
if not suppress_output:
logger.debug('Restoring default ffmpeg log callbacks.')
av.logging.restore_default_callback()
try:
if isinstance(path_or_io, (str, bytes)):
self._path = path_or_io
self._io = open(path_or_io, 'rb')
if not self._name:
self._name = get_file_name(self.path, include_extension=False)
else:
self._io = path_or_io
self._container = av.open(self._io)
if threading_mode is not None:
self._video_stream.thread_type = threading_mode
self._reopened = False
logger.debug('Threading mode set: %s', threading_mode)
except OSError:
raise
except Exception as ex:
raise VideoOpenFailure(str(ex)) from ex
if framerate is None:
# Calculate framerate from video container.
if self._codec_context.framerate.denominator == 0:
raise FrameRateUnavailable()
frame_rate = self._codec_context.framerate.numerator / float(
self._codec_context.framerate.denominator)
if frame_rate < MAX_FPS_DELTA:
raise FrameRateUnavailable()
self._frame_rate: float = frame_rate
else:
assert framerate >= MAX_FPS_DELTA
self._frame_rate: float = framerate
# Calculate duration after we have set the framerate.
self._duration_frames = self._get_duration()
def __del__(self):
self._container.close()
#
# VideoStream Methods/Properties
#
BACKEND_NAME = 'pyav'
"""Unique name used to identify this backend."""
@property
def path(self) -> Union[bytes, str]:
"""Video path."""
return self._path
@property
def name(self) -> Union[bytes, str]:
"""Name of the video, without extension."""
return self._name
@property
def is_seekable(self) -> bool:
"""True if seek() is allowed, False otherwise."""
return self._io.seekable()
@property
def frame_size(self) -> Tuple[int, int]:
"""Size of each video frame in pixels as a tuple of (width, height)."""
return (self._codec_context.coded_width, self._codec_context.coded_height)
@property
def duration(self) -> FrameTimecode:
"""Duration of the video as a FrameTimecode."""
return self.base_timecode + self._duration_frames
@property
def frame_rate(self) -> float:
"""Frame rate in frames/sec."""
return self._frame_rate
@property
def position(self) -> FrameTimecode:
"""Current position within stream as FrameTimecode.
This can be interpreted as presentation time stamp, thus frame 1 corresponds
to the presentation time 0. Returns 0 even if `frame_number` is 1."""
if self._frame is None:
return self.base_timecode
return FrameTimecode(round(self._frame.time * self.frame_rate), self.frame_rate)
@property
def position_ms(self) -> float:
"""Current position within stream as a float of the presentation time in
milliseconds. The first frame has a PTS of 0."""
if self._frame is None:
return 0.0
return self._frame.time * 1000.0
@property
def frame_number(self) -> int:
"""Current position within stream as the frame number.
Will return 0 until the first frame is `read`."""
if self._frame:
return self.position.frame_num + 1
return 0
@property
def aspect_ratio(self) -> float:
"""Display/pixel aspect ratio as a float (1.0 represents square pixels)."""
return (self._codec_context.display_aspect_ratio.numerator /
self._codec_context.display_aspect_ratio.denominator)
def seek(self, target: Union[FrameTimecode, float, int]) -> None:
"""Seek to the given timecode. If given as a frame number, represents the current seek
pointer (e.g. if seeking to 0, the next frame decoded will be the first frame of the video).
For 1-based indices (first frame is frame #1), the target frame number needs to be converted
to 0-based by subtracting one. For example, if we want to seek to the first frame, we call
seek(0) followed by read(). If we want to seek to the 5th frame, we call seek(4) followed
by read(), at which point frame_number will be 5.
May not be supported on all input codecs (see `is_seekable`).
Arguments:
target: Target position in video stream to seek to.
If float, interpreted as time in seconds.
If int, interpreted as frame number.
Raises:
ValueError: `target` is not a valid value (i.e. it is negative).
"""
if target < 0:
raise ValueError("Target cannot be negative!")
beginning = (target == 0)
target = (self.base_timecode + target)
if target >= 1:
target = target - 1
target_pts = self._video_stream.start_time + int(
(self.base_timecode + target).get_seconds() / self._video_stream.time_base)
self._frame = None
self._container.seek(target_pts, stream=self._video_stream)
if not beginning:
self.read(decode=False, advance=True)
while self.position < target:
if self.read(decode=False, advance=True) is False:
break
def reset(self):
""" Close and re-open the VideoStream (should be equivalent to calling `seek(0)`). """
self._container.close()
self._frame = None
try:
self._container = av.open(self._path if self._path else self._io)
except Exception as ex:
raise VideoOpenFailure() from ex
def read(self, decode: bool = True, advance: bool = True) -> Union[ndarray, bool]:
""" Return next frame (or current if advance = False), or False if end of video.
Arguments:
decode: Decode and return the frame.
advance: Seek to the next frame. If False, will remain on the current frame.
Returns:
If decode = True, returns either the decoded frame, or False if end of video.
If decode = False, a boolean indicating if the next frame was advanced to or not is
returned.
"""
has_advanced = False
if advance:
try:
last_frame = self._frame
self._frame = next(self._container.decode(video=0))
except av.error.EOFError:
self._frame = last_frame
if self._handle_eof():
return self.read(decode, advance=True)
return False
except StopIteration:
return False
has_advanced = True
if decode:
return self._frame.to_ndarray(format='bgr24')
return has_advanced
#
# Private Methods/Properties
#
@property
def _video_stream(self):
"""PyAV `av.video.stream.VideoStream` being used."""
return self._container.streams.video[0]
@property
def _codec_context(self):
"""PyAV `av.codec.context.CodecContext` associated with the `video_stream`."""
return self._video_stream.codec_context
def _get_duration(self) -> int:
"""Get video duration as number of frames based on the video and set framerate."""
# See https://pyav.org/docs/develop/api/time.html for details on how ffmpeg/PyAV
# handle time calculations internally and which time base to use.
assert self.frame_rate is not None, "Frame rate must be set before calling _get_duration!"
# See if we can obtain the number of frames directly from the stream itself.
if self._video_stream.frames > 0:
return self._video_stream.frames
# Calculate based on the reported container duration.
duration_sec = None
container = self._video_stream.container
if container.duration is not None and container.duration > 0:
# Containers use AV_TIME_BASE as the time base.
duration_sec = float(self._video_stream.container.duration / av.time_base)
# Lastly, if that calculation fails, try to calculate it based on the stream duration.
if duration_sec is None or duration_sec < MAX_FPS_DELTA:
if self._video_stream.duration is None:
logger.warning('Video duration unavailable.')
return 0
# Streams use stream `time_base` as the time base.
time_base = self._video_stream.time_base
if time_base.denominator == 0:
logger.warning(
'Unable to calculate video duration: time_base (%s) has zero denominator!',
str(time_base))
return 0
duration_sec = float(self._video_stream.duration / time_base)
return round(duration_sec * self.frame_rate)
def _handle_eof(self):
"""Fix for issue where if thread_type is 'AUTO' the whole video is not decoded.
Re-open video if the threading mode is AUTO and we didn't decode all of the frames."""
# Don't re-open the video if we already did, or if we already decoded all the frames.
if self._reopened or self.frame_number >= self.duration:
return False
self._reopened = True
# Don't re-open the video if we can't seek or aren't in AUTO/FRAME thread_type mode.
if not self.is_seekable or not self._video_stream.thread_type in ('AUTO', 'FRAME'):
return False
last_frame = self.frame_number
orig_pos = self._io.tell()
try:
self._io.seek(0)
container = av.open(self._io)
except:
self._io.seek(orig_pos)
raise
self._container.close()
self._container = container
self.seek(last_frame)
return True
|
# -*- coding: utf-8 -*-
'''
Created on 2021-05-17
@author: p19992003
'''
import numpy as np
from turtle import Turtle
class Turtle(object):
'''
海龟法则
《金融评论》曾发表过一篇论文,里面刊载了十年间对二十多种技术型交易系统的测试和研究,最终得出了结论,周规则名列榜首,仅随其后的是移动平均线。同时期,理查德·丹尼斯(<NAME>)创办了举世轰动的“海龟交易班”,“龟儿”们创造了四年年均复利八十的收益,而《海龟交易法则》中的具体操作信号正是周规则。对于移动平均线,大家早已熟,那么周规则是什么呢?为什么它如此优秀,就连世界上最顶级的交易员都在使用它?
周规则是由理查德·唐迁(<NAME>)发明的,它是一种追随趋势的自动交易系统。最初它以四周的形式出现。以周规则为基础的交易系统十分简单,下面以四周规则为例,讲述它的使用方法。
四周规则的使用方法:
1、只要价格超出前四周内的最高价,就平掉空头仓位并做多;
2、只要价格跌破前四周内的最低价,就平掉多头仓位并做空。
交易者可以在四周规则的基础上,对其进行优化,为了避免在无趋势时产生的错误开仓信号和尽量的保护手中利润,可以将四周规则用于开仓,而将二周规则用于平仓。同样周规则也适用于任何时间周期,同时,它不仅可以当作交易系统使用,还可以当作辨别趋势是否反转的工具。如果你是一个系统交易者,那么通过优化周规则和在其基础上进行交易是最好的选择,因为它已被证明具备在任何市场中获利的能力。最后需要指出的是,使用周规则应该始终如一按照它的指示去操作,往往一年甚至几年的利润就在一次信号之中。
以周期理论为主要工具的分析者认为,市场运动的最终线索就在其运行周期上。不可否认,时间周期的研究成果,为我们的测市手段增加了时间维度。作为理论,经过不断丰富和发展之后,变得繁复而深奥是可以理解的;作为手段,其存在和发展必定有其特殊理由,但任何一种技术都会因其自身利弊、得失而无法概全。在这里,笔者力求以简练的语言和朋友们交流其核心内容的应用心得。
通常,周期分析者认为,波谷比波峰可靠,所以周期长度的度量都是从波谷到波谷进行的,原因大概是绝大多数周期的变异出现在波峰上,也就是说波峰的形成比较复杂,因而认为波谷更可靠些。从实际应用结果来看,在牛市中周期分析远比在熊市中表现优异。原因何在,笔者认为,这与周期理论研究倾向于关注底部有关。同时笔者发现,在牛市中,波谷比波峰形成或驻留的时间相对较短,而波峰因常 出现强势整理的态势,变得复杂起来,所以较难把握。在熊市中则相反,因为市态较弱,市场常以整理形态取代反弹,所以波峰比波谷形成时间要短,易于发现。在运用周期理论测市的时候,牛市中以波谷法度量较为准确,熊市中以波峰法度量胜算更高些。笔者之所以倾向于度量构筑时间较短的形态,是因为这样的形态比较容易判别,预测时间目标与实际发生时间的偏差较小。有兴趣的朋友不妨一试。
在决定使用峰测法还是谷测法度量的时候,除了使用趋势线来筛选之外,还有一种方法也可以给您很大的帮助,那就是先观察上一层次周期中,波峰是向周期时间中线左移还是右移,即一个涨跌周期如是40天,波峰是向20天之前移还是向20天之后移,左移看跌,右移看涨。看跌时用峰测法,看涨时用谷测法。波峰左移和右移作为辅助工具之一,适用于任何趋势和长度的周期。周期理论中四个重要的基本原理:叠加原理、谐波原理、同步原理、比例原理,以及两个通则原理:变通原理、基准原理,本文中不再赘述了。
关于时间周期,则不能不提神奇的菲波纳契数列1、1、2、3、5、 8、12、21、34、55、89、144……,这组数字之间的关系,有书籍专论,本文不详细述及。由于它是波浪理论的基础,波浪理论与周期理论也颇有渊源,在运用周期理论测市的时候,不论是从重要的市场顶部只是底部起向未来数算,得出菲波纳契时间目标,这些日子都可能 意味着成为市场重要的转折点。在这些时间窗口,如何取得交易信号,还需辅以其他技术手段以验证。对于神奇数字,笔者从江恩理论及其小说中体会到一种默契,江恩将“7”及其倍数的周期视作重要的转折点。笔者发现,如果这个数字是菲波纳契数×7,那这个数字更神奇。 我们如何理解“7”这个数字呢,在江恩眼里,上帝用7天创造了世界, 因此“7”是一个完整的数字;在圣经中,人类最大的敌人-死亡的恐 俱也是可以克服的,耶酥在死后的第3天站起来,第7天复活,这意昧着7天是一个周期,“3”是菲波纳契数字,就是“4”也相当不平凡。 地球自转一周为360度,每4分钟旋转1度,因此,最短的循环可以是4 分钟,地球启转一周需再24小时,也是4的倍数,所以4×7天的周期 也是一个很重要的短期周期。而上述一系列数字构成了价格变化的时间窗,一旦市场进入了时间窗,我们还须依靠其他技术工具做过滤器, 如摆动指标KDJ、W%、RSI等,过滤伪杂信息来判断转折点的出现,并得出交易信号。
需要特别指出的是,在运用周期理论、波浪理论菲波纳契数列的时候,要注意它们都是以群体心理为基础的,也就是说市场规模越大,参与的人数越多,就越符合上述理论,比如股指远比个股符合上述理论,况且波浪理论本意也是应用于股市平均指数的。
我们知道增加盈利的手段不外乎是开源节流,这里我先从节流挖潜力。由于时候验证是很难的,难在不加已知结果下的主观影响。因此新加入的条件一定要中性,可操作,不能事后诸葛亮。我在Week图中仅仅加入了两条EMA,然后规定了下面几条限制:
1. 上升的EMA下方不开新的空单掉头;
2. 下降的EMA上方不开新的多单掉头;
3. 典型双顶模式下不做相反的单子掉头。
周规则广泛应用于任何的投资市场中,在期货市场中应用较多,同样也适合于股票市场,夏蕊看股经过研究,总结出了在股票市场中的周规则。
股票投资的周规则:
周规则一:
只要本周的收盘价超出前四周内的最高价,就可买进股票做多。
只要本周的收盘价低于前两周内的最低价,就要卖出股票做空。
周规则二:
只要本周的收盘价超出前三周内的最高的收盘价或开盘价,就可买进股票做多。
只要本周的收盘价低于前三周内的最低的收盘价或开盘价,就可卖出股票做空。
'''
def __init__(self):
'''
'''
pass
@classmethod
def default_quantity(cls):
"""返回四周规则最高价、最低价基准;
e.g. return CLOSE, CLOSE 表示最高收盘价 最低收盘价作为最高价,最低价的比较基准
"""
from funcat.api import CLOSE, HIGH, LOW
return CLOSE, CLOSE
@classmethod
def four_week_qty(cls, high_series=None, low_series=None, high_n:int=20, low_n:int=20):
"""四周规则;日线级别
参数high_series,low_series为None时,序列默认为从_default_quantity()获取(此处为CLOSE, CLOSE)
Args:
high_series: 价格(成交量)序列); 用于取周期n内的最高价(成交量)
low_series: 价格(成交量)序列); 用于取周期n内的最低价(成交量)
high_n: 计算最高价的周期数(默认:20);
low_n : 计算最低价的周期数(默认:20);
Returns:
截取最高价、最低价
"""
from funcat.api import LLV, HHV
if high_series is None:
# 没有参数时,序列默认为从_default_quantity()获取(此处为CLOSE, CLOSE)
high_series, low_series = cls.default_quantity()
last_high = HHV(high_series, high_n)
last_low = LLV(low_series, low_n)
return last_high, last_low
@classmethod
def four_week(cls, high_series=None, low_series=None, high_n:int=20, low_n:int=20):
"""四周规则的使用方法:
1、只要价格超出前四周内的最高价,就平掉空头仓位并做多;
2、只要价格跌破前四周内的最低价,就平掉多头仓位并做空。
"""
from funcat.api import LLV, HHV, REF, IF, \
NumericSeries
if high_series is None:
high_series, low_series = cls.default_quantity()
last_high, last_low = cls.four_week_qty(high_series, low_series, high_n, low_n)
hh = IF(high_series > REF(last_high, 1), 1, 0)
ll = IF(low_series < REF(last_low, 1), -1, 0)
return NumericSeries(np.append(np.array([np.nan]), hh.series)), NumericSeries(np.append(np.array([np.nan]), ll.series))
class TurtleUsingHighLow(Turtle):
@classmethod
def default_quantity(cls):
"""返回四周规则最高价、最低价基准;
e.g. return HIGH, LOW , 表示最高价 最低价作为最高价,最低价的比较基准
"""
from funcat.api import HIGH, LOW
return HIGH, LOW
FOURWEEKQTY = Turtle.four_week_qty
FOURWEEK = Turtle.four_week
|
<filename>run_science.py
import krpc
import time
from loguru import logger
from typing import Set
conn = krpc.connect(name="Run science experiments to gather science")
vessel = conn.space_center.active_vessel
# Create connection streams, about 20 times faster than just calling them directly
vessel_experiments = conn.add_stream(getattr, vessel.parts, "experiments")
class Science:
def __init__(self):
# How much science at least should be gathered if the experiment is run
self.min_science: float = 0.01
# self.min_science: float = 5
# How much percentage value the experiment has, e.g. if the experiment was already run 3 times, its probably gonna be under 0.1
self.min_scientific_value: float = 0.01
# Only automatically run experiments that can be rerun
self.run_non_rerunnable_science = False
# TODO Automatically transmit if we have enough electric charge to transmit data ?
self.transmit_science = True
self.min_transmit_science: float = 0.1
# TODO If there is a scientist within the crew, it can reset non-rerunnable experiments
self.has_scientist_in_crew = False
self.run_interval = 5
self.last_run = time.time()
def run(self):
if time.time() - self.last_run < self.run_interval:
return
self.last_run = time.time()
names_run: Set[str] = set()
experiments = vessel_experiments()
for i, experiment in enumerate(experiments):
name: str = experiment.part.name
if experiment.inoperable or not experiment.available:
continue
if name in names_run:
continue
scientific_value = science = 0
if experiment.science_subject is not None:
# A value between 0 and 1
scientific_value: float = experiment.science_subject.scientific_value
# How much science can be gathered total?
science_cap: float = experiment.science_subject.science_cap
# Science that can be obtained if ran
science: float = scientific_value * science_cap
# Currently stored science
current_science_value: float = 0 if not experiment.has_data or not experiment.data else experiment.data[
0
].science_value
if (
not experiment.has_data
and current_science_value == 0
and (self.run_non_rerunnable_science or experiment.rerunnable)
):
if science >= self.min_science and scientific_value > self.min_scientific_value:
logger.info(
f"{i}: Running experiment on part: {experiment.part.name} to obtain {science:.2f} science ({scientific_value:.2f} scientific value), experiment: {experiment.science_subject.title}"
)
experiment.run()
names_run.add(name)
elif experiment.has_data and experiment.rerunnable and current_science_value < science:
if science >= self.min_science and scientific_value > self.min_scientific_value:
experiment.reset()
# time.sleep(0.1)
logger.info(
f"{i}: Resetting experiment on part: {experiment.part.name} to obtain {science:.2f} science ({scientific_value:.2f} scientific value) and discarding old value of {current_science_value:.2f} science"
)
# experiment.run()
names_run.add(name)
# for experiment in experiments:
elif experiment.has_data and experiment.data and self.transmit_science:
data = experiment.data[0]
transmit_science = data.data_amount * data.transmit_value
if transmit_science > self.min_transmit_science and vessel.resources.amount(
"ElectricCharge"
) * 0.99 > vessel.resources.max("ElectricCharge"):
logger.info(
f"Transmitting science on part {experiment.part.name} for transmit science total of {transmit_science:.02f}"
)
# TODO only transmit if electric charge is >99%, or check if we have enough electric charge to transmit
experiment.transmit()
# Wait some time before running more experiments
# self.last_run = time.time() + 20
return
if __name__ == "__main__":
# while vessel.situation.name in {"pre_launch"}:
# time.sleep(0.1)
logger.info(f"Gathering science...")
science = Science()
while 1:
time.sleep(0.1)
science.run()
# https://krpc.github.io/krpc/python/api/space-center/vessel.html#SpaceCenter.VesselSituation
# if vessel.situation.name in {"landed", "splashed"}:
# break
logger.info("END OF PROGRAM")
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
from scipy.optimize import curve_fit
from textwrap import wrap
def AbsErrorROCPerPoint(scores, mode='mean', draw=True):
# INPUT: n x 2, cols: conf, abs error
n = scores.shape[0]
scores = scores[(-scores[:, 0]).argsort()] # sort confidence in descending order
if mode == 'mean':
# mean of conf
cum_error = np.cumsum(scores[:, 1], axis=0)
ave_error = cum_error / (np.arange(n) + 1)
elif mode == 'median':
# median of conf
ave_error = np.zeros((n,))
for i in range(n):
ave_error[i] = np.median(scores[:i + 1, 1])
if draw:
plt.plot(scores[:, 0], ave_error, '-o')
plt.grid()
plt.show()
return np.stack((scores[:, 0], ave_error), axis=1)
def AbsErrorROCFixedSteps(scores, num_threth=50, mode='median', draw=True):
# INPUT: n x 2, cols: conf, abs error
scores = scores[(-scores[:, 0]).argsort()] # sort confidence in descending order
ave_error = np.zeros((num_threth, 2))
n = scores.shape[0]
step_size = n / num_threth
for i in range(num_threth):
ind = min(np.int((i + 1) * step_size + 1), n - 1)
ave_error[i, 0] = scores[ind, 0]
if mode == 'mean':
ave_error[i, 1] = np.mean(scores[:ind, 1])
if mode == 'median':
ave_error[i, 1] = np.median(scores[:ind, 1])
if draw:
plt.plot(ave_error[:, 0], ave_error[:, 1], '-o')
plt.grid()
plt.show()
return ave_error
def gaussian(x, mu, sig):
return 1 / (2 * np.pi * sig) * np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
def scaled_gaussian(x, mu, sig, s):
return s * np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
def gaussianVoting(scores, kernel_var=500, draw=True, path='../figures/offset.jpg'):
# INPUT: n x 2, conf, offset
# OUTPUT: offset
offset_max = 20000
x = np.arange(-offset_max, offset_max + 1)
y = np.zeros(2 * offset_max + 1)
for i in range(scores.shape[0]):
y += gaussian(x, scores[i, 1], kernel_var) * scores[i, 0]
y /= np.sum(scores[:, 0])
offset = np.argmax(y) - offset_max
# fit a Gaussian to voted_shift using nonlinear least square
# confidence of the shift estimation can be described as the variance of the estimated model parameters
# conf = max(abs(y-median(y)))/stdev(y)
try:
popt, pcov = curve_fit(gaussian, x, y, bounds=([-offset_max, 0], [offset_max, np.inf]))
y_nlm = gaussian(x, *popt)
except RuntimeError:
popt, pcov = np.array([np.inf, np.inf, np.inf]), \
np.array([[np.inf, np.inf, np.inf], [np.inf, np.inf, np.inf], [np.inf, np.inf, np.inf]])
y_nlm = np.zeros((len(x)))
conf = 200000 / popt[1] / pcov[0, 0]
if draw:
plt.figure()
plt.plot(x, y, color='blue', label='weighted kde')
plt.plot(x, y_nlm, color='red', label='fitted gaussian')
plt.xlabel('shift/ms')
plt.ylabel('probability')
plt.legend(loc='upper right')
title = '{} windows, offset={}ms, conf={:.2f}'.format(scores.shape[0], int(offset), conf)
plt.title("\n".join(wrap(title, 60)))
plt.savefig(path)
plt.close()
return offset, conf, [popt, pcov]
def gaussianVotingPerVideo(scores_dataframe, kernel_var=100, thresh=0, min_voting_segs=0, draw=True,
folder='../figures/cross_corr/'):
# INPUT: n x 3, conf, offset, video
# OUTPUT: nv, offset
scores = scores_dataframe[['confidence', 'drift', 'video']].to_numpy()
scores = scores[scores[:, 0] > thresh]
videos = np.unique(scores_dataframe[['video']].to_numpy())
offset = np.zeros((len(videos)))
conf = np.zeros((len(videos)))
nlm_params = np.zeros((len(videos), 4))
num_valid_segs = np.zeros((len(videos)))
num_segs = 0
num_videos = 0
for i, vid in enumerate(videos):
path = os.path.join(folder, 'offset_' + vid)
valid_segs = scores[:, 2] == vid
num_segs_cur = sum(valid_segs)
if num_segs_cur > min_voting_segs:
offset[i], conf[i], p = gaussianVoting(scores[valid_segs, :2], kernel_var, draw, path)
nlm_params[i, :] = np.concatenate((p[0][:2], np.diag(p[1])[:2]))
num_valid_segs[i] = num_segs_cur
num_segs += num_segs_cur
num_videos += 1
else:
offset[i] = np.nan
conf[i] = np.nan
try:
ave_segs = num_segs / num_videos
except ZeroDivisionError:
ave_segs = np.nan
summary_df = pd.DataFrame(np.concatenate(
[np.stack([videos, offset, abs(offset), num_valid_segs, conf], axis=1), nlm_params, abs(nlm_params[:, :1])],
axis=1), \
columns=['video', 'offset', 'abs_offset', 'num_segs', 'conf', 'mu', 'sigma', 'mu_var',
'sigma_var', 'abs_mu'])
return summary_df, ave_segs
def testCase1():
scores = np.random.rand(10, 2)
ave_errors = AbsErrorROCPerPoint(scores)
print(ave_errors)
def videoDrift(scores_dataframe, output_file):
offset, _ = gaussianVotingPerVideo(scores_dataframe, draw=True, folder='../figures/cross_corr')
offset.to_csv(output_file, index=None)
return offset
def videoDriftROC(scores_dataframe, num_thresh=50, draw=True, folder='../figures/cross_corr'):
# INPUT: n x 2, cols: conf, abs error
# lb = 3
# ub = 5
lb = 0
ub = 7
conf_threshs = np.linspace(lb, ub, num_thresh)
offsets = np.zeros((num_thresh,))
num_videos = np.zeros((num_thresh,))
num_valid_segs = np.zeros((num_thresh,))
for i, thresh in enumerate(conf_threshs):
offset, ave_segs = gaussianVotingPerVideo(scores_dataframe, kernel_var=500, thresh=thresh, draw=False)
offset = offset.to_numpy()
valid_videos = ~np.isnan(offset[:, 1].astype(np.float))
offsets[i] = np.mean(abs(offset[valid_videos, 1]))
num_videos[i] = sum(valid_videos)
num_valid_segs[i] = ave_segs
if draw:
fig, ax = plt.subplots()
ax.plot(conf_threshs, offsets, color="red", marker="o")
ax.set_xlabel("confidence threshold", fontsize=14)
ax.set_ylabel("abs offset", color="red", fontsize=14)
ax2 = ax.twinx()
ax2.plot(conf_threshs, num_videos, color="blue", marker="o")
ax2.set_ylabel("number of valid videos", color="blue", fontsize=14)
plt.grid()
# plt.show()
fig.savefig(os.path.join(folder, "video drift ROC curve"), \
format='jpeg', \
dpi=100, \
bbox_inches='tight')
plt.figure()
plt.plot(conf_threshs, num_valid_segs, marker="o")
plt.grid()
plt.savefig(os.path.join(folder, "num of segs"), \
format='jpeg', \
dpi=100, \
bbox_inches='tight')
plt.show()
return offsets, num_videos
if __name__ == '__main__':
testCase1()
|
<filename>chainer/functions/connection/convolution_2d.py
import ctypes
import math
import numpy
from six import moves
from chainer import cuda
from chainer import function
from chainer.utils import conv
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.cudnn.cudnn
_fwd_pref = libcudnn.CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return (x, x)
class Convolution2D(function.Function):
"""Two-dimensional convolution function.
The details of this function are described below the arguments description.
Args:
in_channels (int): Number of channels of input arrays.
out_channels (int): Number of channels of output arrays.
ksize (int or (int, int)): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
stride (int or (int, int)): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or (int, int)): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
wscale (float): Scaling factor of the initial weight.
bias (float): Initial bias value.
nobias (bool): If True, then this function does not use the bias term.
use_cudnn (bool): If True, then this function uses CuDNN if available.
initialW (4-D array): Initial weight value. If ``None``, then this
function uses to initialize ``wscale``.
initial_bias (1-D array): Initial bias value. If ``None``, then this
function uses to initialize ``bias``.
dtype (numpy.dtype): Type to use in computing.
This function holds at most two parameter arrays: ``W`` and ``b``, which
indicate the filter weight and the bias vector, respectively.
The filter weight has four dimensions :math:`(c_O, c_I, k_H, k_W)`
which indicate the number of output channels, the number of input channels,
height and width of the kernels, respectively.
The filter weight is initialized with i.i.d. Gaussian random samples, each
of which has zero mean and deviation :math:`\sqrt{1/(c_I k_H k_W)}` by
default. The deviation is scaled by ``wscale`` if specified.
The bias vector is of size :math:`c_O`.
Each element of it is initialized by ``bias`` argument.
If ``nobias`` argument is set to True, then this function does not hold
the bias parameter.
The two-dimensional convolution function is defined as follows.
Let :math:`X` be the input tensor of dimensions :math:`(n, c_I, h, w)`,
where :math:`n` is the batch size, and :math:`(h, w)` is spatial size of
the input image.
Then the ``Convolution2D`` function computes correlations between filters
and patches of size :math:`(k_H, k_W)` in :math:`X`.
Note that correlation here is equivalent to the inner product between
expanded vectors.
Patches are extracted at positions shifted by multiples of ``stride`` from
the first position ``-pad`` for each spatial axis.
The right-most (or bottom-most) patches do not run over the padded spatial
size.
Let :math:`(s_Y, s_X)` be the stride of filter application, and
:math:`(p_H, p_W)` the spatial padding size. Then, the output size
:math:`(h_O, w_O)` is determined by the following equations:
.. math::
h_O &= (h + 2p_H - k_H) / s_Y + 1,\\\\
w_O &= (w + 2p_W - k_W) / s_X + 1.
"""
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,
wscale=1, bias=0, nobias=False, use_cudnn=True,
initialW=None, initial_bias=None,
dtype=numpy.float32):
self.dtype = numpy.dtype(dtype)
ksize = _pair(ksize)
stride = _pair(stride)
pad = _pair(pad)
self.kh, self.kw = ksize
self.sy, self.sx = stride
self.ph, self.pw = pad
self.in_channels = in_channels
self.out_channels = out_channels
self.W = None
self.gW = None
self.b = None
self.gb = None
if initialW is not None:
assert initialW.shape == \
(out_channels, in_channels, self.kh, self.kw)
self.W = initialW
else:
self.W = numpy.random.normal(
0, wscale * math.sqrt(1. / (self.kh * self.kw * in_channels)),
(out_channels, in_channels, self.kh, self.kw)
).astype(self.dtype)
xp = cuda.get_array_module(self.W)
self.gW = xp.full_like(self.W, numpy.nan)
if initial_bias is not None:
assert initial_bias.shape == (out_channels,)
self.b = initial_bias
elif not nobias:
self.b = numpy.repeat(self.dtype.type(bias), out_channels)
if self.b is not None:
self.gb = xp.full_like(self.b, numpy.nan)
self.use_cudnn = use_cudnn
if cuda.cudnn_enabled and use_cudnn:
# chance to choose implicit-precomp-gemm algorithm
self.max_workspace_size = in_channels * self.kh * self.kw * 4
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(
x_type.dtype == self.dtype,
x_type.ndim == 4,
x_type.shape[1] == self.in_channels
)
@property
def parameter_names(self):
if self.b is None:
return 'W',
return 'W', 'b'
@property
def gradient_names(self):
if self.gb is None:
return 'gW',
return 'gW', 'gb'
def zero_grads(self):
self.gW.fill(0)
if self.gb is not None:
self.gb.fill(0)
def forward_cpu(self, x):
self.col = conv.im2col_cpu(
x[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw)
y = numpy.tensordot(self.col, self.W, ((1, 2, 3), (1, 2, 3)))
if self.b is not None:
y += self.b
return numpy.rollaxis(y, 3, 1),
def forward_gpu(self, x):
n, c, h, w = x[0].shape
out_h = conv.get_conv_outsize(h, self.kh, self.sy, self.ph)
out_w = conv.get_conv_outsize(w, self.kw, self.sx, self.pw)
out_c = self.W.shape[0]
y = cuda.empty((n, out_c, out_h, out_w), dtype=self.dtype)
if cuda.cudnn_enabled and self.use_cudnn:
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(x[0])
y_desc = cudnn.create_tensor_descriptor(y)
self.filter_desc = cudnn.create_filter_descriptor(self.W)
self.conv_desc = cudnn.create_convolution_descriptor(
(self.ph, self.pw), (self.sy, self.sx))
if self.b is not None:
self.bias_desc = cudnn.create_tensor_descriptor(
self.b[None, :, None, None])
algo = libcudnn.getConvolutionForwardAlgorithm(
handle, x_desc.value, self.filter_desc.value,
self.conv_desc.value, y_desc.value, _fwd_pref,
self.max_workspace_size)
workspace_size = libcudnn.getConvolutionForwardWorkspaceSize(
handle, x_desc.value, self.filter_desc.value,
self.conv_desc.value, y_desc.value, algo)
workspace = cuda.empty(
(max(workspace_size // 4, 1),), dtype=self.dtype)
one = ctypes.c_float(1)
zero = ctypes.c_float(0)
libcudnn.convolutionForward(
handle, one, x_desc.value, x[0].data.ptr,
self.filter_desc.value, self.W.data.ptr, self.conv_desc.value,
algo, workspace.data.ptr, workspace_size, zero, y_desc.value,
y.data.ptr)
# TODO(beam2d): Support unshared bias
if self.b is not None:
libcudnn.addTensor(
handle, libcudnn.CUDNN_ADD_SAME_C, one,
self.bias_desc.value, self.b.data.ptr, one, y_desc.value,
y.data.ptr)
else:
# Implementation using im2col
self.col = conv.im2col_gpu(
x[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw)
# TODO(beam2d): Use streams
W_mat = self.W.reshape(out_c, c * self.kh * self.kw)
col_mats = self.col.reshape(
n, c * self.kh * self.kw, out_h * out_w)
y_mats = y.reshape(n, out_c, out_h * out_w)
for i in moves.range(n):
y_mats[i] = W_mat.dot(col_mats[i])
# TODO(beam2d): Support unshared bias
if self.b is not None:
y += self.b.reshape((1, out_c, 1, 1))
return y,
def backward_cpu(self, x, gy):
if self.gb is not None:
self.gb += gy[0].sum(axis=(0, 2, 3))
self.gW += numpy.tensordot(gy[0], self.col, ((0, 2, 3), (0, 4, 5)))
gcol = numpy.tensordot(self.W, gy[0], (0, 1))
gcol = numpy.rollaxis(gcol, 3)
h, w = x[0].shape[2:]
return conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w),
def backward_gpu(self, x, gy):
out_c, out_h, out_w = gy[0].shape[1:]
n, c, h, w = x[0].shape
if cuda.cudnn_enabled and self.use_cudnn:
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(x[0])
gy_arr = gy[0]
if not gy_arr.flags.c_contiguous:
gy_arr = cuda.cupy.ascontiguousarray(gy_arr)
gy_desc = cudnn.create_tensor_descriptor(gy_arr)
one = ctypes.c_float(1)
zero = ctypes.c_float(0)
if self.b is not None:
libcudnn.convolutionBackwardBias(
handle, one, gy_desc.value, gy_arr.data.ptr,
one, self.bias_desc.value, self.gb.data.ptr)
libcudnn.convolutionBackwardFilter(
handle, one, x_desc.value, x[0].data.ptr,
gy_desc.value, gy_arr.data.ptr, self.conv_desc.value,
one, self.filter_desc.value, self.gW.data.ptr)
gx = cuda.empty_like(x[0])
libcudnn.convolutionBackwardData(
handle, one, self.filter_desc.value, self.W.data.ptr,
gy_desc.value, gy_arr.data.ptr, self.conv_desc.value,
zero, x_desc.value, gx.data.ptr)
else:
if self.gb is not None:
self.gb += gy[0].sum(axis=(0, 2, 3))
# TODO(beam2d): Use streams
gW_mat = self.gW.reshape(out_c, c * self.kh * self.kw)
col_mats = self.col.reshape(
n, c * self.kh * self.kw, out_h * out_w)
gy_mats = gy[0].reshape(n, out_c, out_h * out_w)
for i in moves.range(n):
gW_mat += cuda.cupy.dot(gy_mats[i], col_mats[i].T)
W_mat = self.W.reshape(out_c, c * self.kh * self.kw)
gcol = cuda.empty_like(self.col)
gcol_mats = gcol.reshape(n, c * self.kh * self.kw, out_h * out_w)
for i in moves.range(n):
cuda.cupy.dot(W_mat.T, gy_mats[i], gcol_mats[i])
gx = conv.col2im_gpu(
gcol, self.sy, self.sx, self.ph, self.pw, h, w)
return gx,
class NonparameterizedConvolution2D(function.Function):
"""Two-dimensional nonparameterized convolution class.
Args:
stride (int or (int, int)): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or (int, int)): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
use_cudnn (bool): If True, then this function uses CuDNN if available.
.. seealso:: :class:`Convolution2D`
"""
def __init__(self, stride=1, pad=0, use_cudnn=True):
self.stride = stride
self.pad = pad
self.use_cudnn = use_cudnn
def check_type_forward(self, in_types):
type_check.expect(
2 <= in_types.size(),
in_types.size() <= 3,
)
x_type = in_types[0]
w_type = in_types[1]
type_check.expect(
x_type.dtype == numpy.float32,
w_type.dtype == numpy.float32,
x_type.ndim == 4,
w_type.ndim == 4,
x_type.shape[1] == w_type.shape[1],
)
if in_types.size().eval() == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype == numpy.float32,
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def forward(self, x):
W = x[1]
if len(x) == 3:
func = Convolution2D(
W.shape[1], W.shape[0], W.shape[2:],
stride=self.stride, pad=self.pad, use_cudnn=self.use_cudnn,
initialW=W, initial_bias=x[2])
else:
func = Convolution2D(
W.shape[1], W.shape[0], W.shape[2:],
stride=self.stride, pad=self.pad, use_cudnn=self.use_cudnn,
initialW=W, nobias=True)
self.func = func
if any(isinstance(i, cuda.ndarray) for i in x):
func.to_gpu()
return func.forward(x[:1])
def backward(self, x, gy):
func = self.func
func.zero_grads()
gx = func.backward(x[:1], gy)
if func.gb is None:
return (gx[0], func.gW)
return (gx[0], func.gW, func.gb)
def convolution_2d(x, W, b=None, stride=1, pad=0, use_cudnn=True):
"""Two-dimensional convolution function.
Args:
x (~chainer.Variable): Input variable.
W (~chainer.Variable): Weight variable.
b (~chainer.Variable): Bias variable (optional).
stride (int or (int, int)): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or (int, int)): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
use_cudnn (bool): If True, then this function uses CuDNN if available.
Returns:
~chainer.Variable: Output variable.
.. seealso:: :class:`Convolution2D`
"""
if b is None:
return NonparameterizedConvolution2D(
stride=stride, pad=pad, use_cudnn=use_cudnn)(x, W)
else:
return NonparameterizedConvolution2D(
stride=stride, pad=pad, use_cudnn=use_cudnn)(x, W, b)
|
<reponame>wuyuehang/crosswalk-webdriver-python
__all__ = ["ExecuteWindowCommand", \
"ExecuteGetTitle", \
"ExecuteRefresh", \
"ExecuteGetCurrentUrl", \
"ExecuteGetPageSource", \
"ExecuteIsBrowserOnline", \
"ExecuteGet", \
"ExecuteGoBack", \
"ExecuteGoForward", \
"ExecuteFindElement", \
"ExecuteFindElements", \
"ExecuteExecuteScript", \
"ExecuteExecuteAsyncScript", \
"ExecuteScreenshot", \
"ExecuteGetWindowSize", \
"ExecuteGetWindowPosition", \
"ExecuteGetCookies", \
"ExecuteAddCookie", \
"ExecuteDeleteCookie", \
"ExecuteDeleteAllCookies", \
"ExecuteSwitchToFrame"]
from browser.status import *
from browser.js import *
from browser.web_view_impl import WebViewImpl
from base.log import VLOG
from command.element_util import FindElement
from command.init_session_commands import GenerateId
class Cookie(object):
def __init__(self, name, value, domain, path, expiry, secure, session):
self.name = name
self.value = value
self.domain = domain
self.path = path
self.expiry = expiry
self.secure = secure
self.session = session
def Update(self, other):
self.name = other.name
self.value = other.value
self.domain = other.domain
self.path = other.path
self.expiry = other.expiry
self.secure = other.secure
self.session = other.session
def _CreateDictionaryFrom(cookie):
dictionary = {}
dictionary["name"] = cookie.name
dictionary["value"] = cookie.value
if cookie.domain:
dictionary["domain"] = cookie.domain
if cookie.path:
dictionary["path"] = cookie.path
if not cookie.session:
dictionary["expiry"] = cookie.expiry
dictionary["secure"] = cookie.secure
return dictionary
def _GetVisibleCookies(web_view, cookies):
internal_cookies = []
status = web_view.GetCookies(internal_cookies)
if status.IsError():
return status
cookies_tmp = []
for cookie_dict in internal_cookies:
if type(cookie_dict) != dict:
return Status(kUnknownError, "DevTools returns a non-dictionary cookie")
name = cookie_dict.get("name", "")
value = cookie_dict.get("value", "")
domain = cookie_dict.get("domain", "")
path = cookie_dict.get("path", "")
expiry = cookie_dict.get("expires", 0)
# Convert from millisecond to second.
expiry = expiry / 1000.0
session = cookie_dict.get("session", False)
secure = cookie_dict.get("secure", False)
cookies_tmp.append(Cookie(name, value, domain, path, expiry, secure, session))
cookies[:] = cookies_tmp
return Status(kOk)
# return status and url<string>
def _GetUrl(web_view, frame):
value = {}
args = []
status = web_view.CallFunction(frame, "function() { return document.URL; }", args, value)
if status.IsError():
return (status, "")
if type(value["value"]) != str:
return (Status(kUnknownError, "javascript failed to return the url"), "")
return (Status(kOk), value["value"])
def ExecuteWindowCommand(command, session, params, value):
web_view = WebViewImpl("fake_id", 0, None)
# update web_view
status = session.GetTargetWindow(web_view)
if status.IsError():
return status
status = web_view.ConnectIfNecessary()
if status.IsError():
return status
status = web_view.HandleReceivedEvents()
if status.IsError():
return status
if web_view.GetJavaScriptDialogManager().IsDialogOpen():
return Status(kUnexpectedAlertOpen)
nav_status = Status(kOk)
for attempt in range(2):
if attempt == 1:
if status.Code() == kNoSuchExecutionContext:
# Switch to main frame and retry command if subframe no longer exists.
session.SwitchToTopFrame()
else:
break
nav_status = web_view.WaitForPendingNavigations(session.GetCurrentFrameId(), session.page_load_timeout, True)
if nav_status.IsError():
return nav_status
command.Update([session, web_view, params, value])
status = command.Run()
nav_status = web_view.WaitForPendingNavigations(session.GetCurrentFrameId(), session.page_load_timeout, True)
if status.IsOk() and nav_status.IsError() and nav_status.Code() != kUnexpectedAlertOpen:
return nav_status
if status.Code() == kUnexpectedAlertOpen:
return Status(kOk)
return status
def ExecuteGetTitle(session, web_view, params, value):
kGetTitleScript = "function() {"\
" if (document.title)"\
" return document.title;"\
" else"\
" return document.URL;"\
"}"
args = []
return web_view.CallFunction("", kGetTitleScript, args, value)
def ExecuteRefresh(session, web_view, params, value):
return web_view.Reload()
def ExecuteGetCurrentUrl(session, web_view, params, value):
(status, url) = _GetUrl(web_view, session.GetCurrentFrameId())
if status.IsError():
return status
value.clear()
value.update(url)
return Status(kOk)
def ExecuteGetPageSource(session, web_view, params, value):
kGetPageSource = \
"function() {"\
" return new XMLSerializer().serializeToString(document);"\
"}"
return web_view.CallFunction(session.GetCurrentFrameId(), kGetPageSource, [], value)
def ExecuteIsBrowserOnline(session, web_view, params, value):
return web_view.EvaluateScript(session.GetCurrentFrameId(), "navigator.onLine", value)
def ExecuteGet(session, web_view, params, value):
url = params.get("url", None)
if type(url) != str:
return Status(kUnknownError, "'url' must be a string")
return web_view.Load(url)
def ExecuteGoBack(session, web_view, params, value):
return web_view.EvaluateScript("", "window.history.back();", value)
def ExecuteGoForward(session, web_view, params, value):
return web_view.EvaluateScript("", "window.history.forward();", value)
def ExecuteFindElement(session, web_view, params, value):
interval_ms = 50
return FindElement(interval_ms, True, "", session, web_view, params, value)
def ExecuteFindElements(session, web_view, params, value):
interval_ms = 50
return FindElement(interval_ms, False, "", session, web_view, params, value)
def ExecuteExecuteScript(session, web_view, params, value):
script = params.get("script")
if type(script) != str:
return Status(kUnknownError, "'script' must be a string")
if script == ":takeHeapSnapshot":
#TODO:
#return web_view->TakeHeapSnapshot(value);
pass
else:
args = params.get("args")
if type(args) != list:
return Status(kUnknownError, "'args' must be a list")
return web_view.CallFunction(session.GetCurrentFrameId(), "function(){" + script + "}", args, value)
def ExecuteExecuteAsyncScript(session, web_view, params, value):
script = params.get("script")
if type(script) != str:
return Status(kUnknownError, "'script' must be a string")
args = params.get("args")
if type(args) != list:
return Status(kUnknownError, "'args' must be a list")
return web_view.CallUserAsyncFunction(session.GetCurrentFrameId(), "function(){" + script + "}", args, session.script_timeout, value)
def ExecuteScreenshot(session, web_view, params, value):
status = session.xwalk.ActivateWebView(web_view.GetId())
(status, screenshot) = web_view.CaptureScreenshot()
if status.IsError():
return status
value.clear()
value.update({"value": screenshot})
return Status(kOk)
def ExecuteGetWindowSize(session, web_view, params, value):
result = {}
kExecuteGetWindowSizeScript = \
"function() {"\
" var size = {'height': 0, 'width': 0};"\
" size.height = window.screen.height;"\
" size.width = window.screen.width;"\
" return size;"\
"}"
status = web_view.CallFunction(session.GetCurrentFrameId(), \
kExecuteGetWindowSizeScript, [], result)
if status.IsError():
return status
value.clear()
value.update(result)
return Status(kOk)
def ExecuteGetWindowPosition(session, web_view, params, value):
result = {}
kGetWindowPositionScript = \
"function() {"\
" var position = {'x': 0, 'y': 0};"\
" position.x = window.screenX;"\
" position.y = window.screenY;"\
" return position;"\
"}"
status = web_view.CallFunction(session.GetCurrentFrameId(), \
kGetWindowPositionScript, [], result);
if status.IsError():
return status
value.clear()
value.update(result)
return Status(kOk)
def ExecuteGetCookies(session, web_view, params, value):
cookies = []
status = _GetVisibleCookies(web_view, cookies)
if status.IsError():
return status
cookie_list = []
for it in cookies:
cookie_list.append(_CreateDictionaryFrom(it))
value.clear()
value.update({"value": cookie_list})
return Status(kOk)
def ExecuteAddCookie(session, web_view, params, value):
cookie = params.get("cookie")
if type(cookie) != dict:
return Status(kUnknownError, "missing 'cookie'")
args = []
args.append(cookie)
status = web_view.CallFunction(session.GetCurrentFrameId(), kAddCookieScript, args, {})
return status
def ExecuteDeleteCookie(session, web_view, params, value):
name = params.get("name")
if type(name) != str:
return Status(kUnknownError, "missing 'name'")
(status, url) = _GetUrl(web_view, session.GetCurrentFrameId())
if status.IsError():
return status
return web_view.DeleteCookie(name, url)
def ExecuteDeleteAllCookies(session, web_view, params, value):
cookies = []
status = _GetVisibleCookies(web_view, cookies)
if status.IsError():
return status
if cookies:
(status, url) = _GetUrl(web_view, session.GetCurrentFrameId())
if status.IsError():
return status
for it in cookies:
status = web_view.DeleteCookie(it.name, url)
if status.IsError():
return status
return Status(kOk)
def ExecuteSwitchToFrame(session, web_view, params, value):
if not params.has_key("id"):
return Status(kUnknownError, "missing 'id'")
id_value = params["id"]
if id_value == None:
session.SwitchToTopFrame()
return Status(kOk)
script = ""
args = []
if type(id_value) == dict:
script = "function(elem) { return elem; }"
args.append(id_value)
else:
script = \
"function(xpath) {"\
" return document.evaluate(xpath, document, null, "\
" XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue;"\
"}"
xpath = "(/html/body//iframe|/html/frameset/frame)"
if type(id_value) == str:
xpath += '[@name="%s" or @id="%s"]' % (id_value, id_value)
elif type(id_value) == int:
xpath += "[%d]" % (id_value + 1)
else:
return Status(kUnknownError, "invalid 'id'")
args.append(xpath)
(status, frame) = web_view.GetFrameByFunction(session.GetCurrentFrameId(), script, args)
if status.IsError():
return status
result = {}
status = web_view.CallFunction(session.GetCurrentFrameId(), script, args, result)
if status.IsError():
return status
if type(result) != dict:
return Status(kUnknownError, "fail to locate the sub frame element")
xwalk_driver_id = GenerateId()
kSetFrameIdentifier = \
"function(frame, id) {"\
" frame.setAttribute('cd_frame_id_', id);"\
"}"
new_args = []
new_args.append(result)
new_args.append(xwalk_driver_id);
result = {}
status = web_view.CallFunction(session.GetCurrentFrameId(), kSetFrameIdentifier, new_args, result)
if status.IsError():
return status
session.SwitchToSubFrame(frame, xwalk_driver_id)
return Status(kOk)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys, re
from os.path import isfile, join
from subprocess import call
import subprocess
from pprint import pprint
import argparse
class coverageRanking():
tests_by_class = {}
file_coverage = {}
file_line_representations = {}
num_smoke_tests = 5
# Pass in the package that you are covering and tests dir
def __init__(self, cov_package, test_dir='test'):
self.cov_package = cov_package
self.test_dir = test_dir
# Parses all unittest files in test_dir
# Sets tests_by_class dict formatted like:
# { testFile: {testClass: [list, of ,test, names], ...}}
def parseTests(self):
test_classes = {}
# For each test file in test dir
test_files = [f for f in os.listdir(self.test_dir) if (isfile(join(self.test_dir, f)) and (re.search(r'^test_.*.py', f)))]
for test_file in test_files:
test_classes[test_file] = {}
#Open file
with (open(join(self.test_dir, test_file), 'r')) as fin:
lines = fin.readlines()
class_name = None
for line in lines:
if (re.search(r'^class\s+(.+)\(',line)):
class_name = re.search(r'^class\s+(.+)\(',line).group(1)
test_classes[test_file][class_name] = []
elif ((class_name is not None) and (re.search(r'^\s\s\s\sdef\s+(test.+)\(',line))):
test_classes[test_file][class_name].append(re.search(r'^\s\s\s\sdef\s+(test.+)\(',line).group(1))
self.tests_by_class = test_classes
# Runs each test in tests_by_class & records line coverage
def runTests(self):
call(['cd', self.test_dir])
call(['mkdir', 'covData'])
for test_file, test_classes in self.tests_by_class.items():
for test_class, test_list in test_classes.items():
for test in test_list:
full_test_name = test_file + '::' + test_class + '::' + test
# Run test with coverage
# py.test --cov=pyllist test/test_pyllist.py::testdllist::test_init_empty
command = 'py.test --cov-report term-missing --cov=' + self.cov_package + ' ' \
+ full_test_name
cov_report = subprocess.run(command.split(' '), stdout=subprocess.PIPE)
self.parseCovReport(full_test_name, cov_report.stdout.decode('utf-8'))
# Copy coverage to named data file
new_cov_file = '.coverage.' + test_class + '.' + test
call(['mv', '.coverage', 'covData/'+new_cov_file])
def parseMissingLines(self, file_name, string_missing):
continuous_string_match = re.compile(r'(\d+)-(\d+)')
individual_line_string_match = re.compile(r'(\d+)')
line_arr = self.file_line_representations[file_name].copy()
for missing in string_missing.split(','):
if re.search(continuous_string_match, missing):
first = int(re.search(continuous_string_match, missing).group(1))
last = int(re.search(continuous_string_match, missing).group(2))
for i in range(first,last):
line_arr[i] = False
elif re.search(individual_line_string_match, missing):
index = int(re.search(individual_line_string_match, missing).group(1))
line_arr[index] = False
return line_arr
def file_len(self, fname):
with open(fname) as f:
i = 1
for line in enumerate(f):
i+=1
return i
# Report Format: ['Name', 'Stmts', 'Miss', 'Cover', 'Missing']
def parseCovReport(self, test_name, cov_report):
self.file_coverage[test_name] = {}
lines = cov_report.split('\n')
for line in lines:
# If python file
data = line.replace(', ', ',').split()
if len(data):
file_name = data[0]
if file_name[-3:] == '.py':
self.file_coverage[test_name][file_name] = {}
if file_name not in self.file_line_representations:
file_lines = self.file_len(file_name)
self.file_line_representations[file_name] = [True for i in range(0,file_lines)]
# Account for 100% format
if ((len(data) > 3) and (data[3] == '100%')):
line_coverage = self.file_line_representations[file_name]
self.file_coverage[test_name][file_name] = line_coverage
# Account for 0%-99% format
elif (len(data) > 4):
line_coverage = self.parseMissingLines(file_name, data[4])
self.file_coverage[test_name][file_name] = line_coverage
def countOverallCovered(self, test_cov_dict):
covered_count = 0
for file_name,file_cov in test_cov_dict.items():
for line in file_cov:
if line is True:
covered_count+=1
return covered_count
def coverage_merger(self, test_cov_dict1, test_cov_dict2):
merged_dict = {}
for file_name,file_cov1 in test_cov_dict1.items():
file_cov2 = test_cov_dict2[file_name]
merged_dict[file_name] = []
for i in range(0, len(file_cov1)):
merged_dict[file_name].append(file_cov1[i] or file_cov2[i])
return merged_dict
def rankTests(self):
call(['cd', self.test_dir])
# Leverage self.file_coverage
# At this point, self.file_coverage has:
# {test_name: {file_name: [T,T,T,F,F,T,...] }}
# The array for each file has len = # lines in file
# and T=covered, F=missed
# Start with test that covers most (heuristic)
base_cov = 0
for test_name, test_dict in self.file_coverage.items():
#print(test_dict)
test_cov = self.countOverallCovered(test_dict)
if test_cov > base_cov:
base_cov = test_cov
base_test = test_name
# start with base_test, count non-overlapping coverage for each other test
# merge test with biggest non-overlapping cov
# repeat until num_smoke_tests
current_smoke_merge = {}
for file_name, file_cov in self.file_coverage[base_test].items():
current_smoke_merge[file_name] = file_cov
smoke_tests = [base_test]
highest_merge_test = base_test
highest_merge = self.file_coverage[base_test]
highest_merge_count = self.countOverallCovered(self.file_coverage[base_test])
for i in range(1,self.num_smoke_tests):
for test_name, test_dict in self.file_coverage.items():
if test_name not in smoke_tests:
merged_files = self.coverage_merger(current_smoke_merge,test_dict)
merge_count = self.countOverallCovered(merged_files)
if merge_count >= highest_merge_count:
highest_merge = merged_files
highest_merge_count = merge_count
highest_merge_test = test_name
smoke_tests.append(highest_merge_test)
current_smoke_merge = highest_merge
print('\n\n\n')
pprint(smoke_tests)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument(
'--cov-package', dest='cov_package',
help='Package for which coverage is collected', required=True,
)
requiredNamed.add_argument(
'--test-dir', dest='test_dir',
help='Directory where tests are located', required=True,
)
args = parser.parse_args()
ranker = coverageRanking(args.cov_package, args.test_dir)
ranker.parseTests()
ranker.runTests()
ranker.rankTests()
|
import hashlib
import stat
import tempfile
import time
from pathlib import Path
import requests
import typer
from python_on_whales import docker
from sultan.api import Sultan
from controller import SUBMODULES_DIR, log, print_and_exit
from controller.app import Application, Configuration
from controller.packages import Packages
from controller.utilities import git
# https://get.docker.com
EXPECTED_DOCKER_SCRIPT_MD5 = "dd5da5e89bf5730e84ef5b20dc45588c"
# https://github.com/docker/compose/releases
COMPOSE_VERSION = "v2.2.2"
EXPECTED_COMPOSE_BIN_MD5 = "d8518059a22e4a5ff7794e5cd7162f7e"
# https://github.com/docker/buildx/releases
BUILDX_VERSION = "v0.7.1"
EXPECTED_BUILDX_BIN_MD5 = "94f186350daf6841239a599e65ba38f1"
def download(url: str, expected_checksum: str) -> Path:
try:
r = requests.get(url, timeout=10)
if r.status_code != 200:
print_and_exit(
"Can't download {}, invalid status code {}", url, str(r.status_code)
)
file: Path = Path(tempfile.NamedTemporaryFile().name)
with open(file, "wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
md5 = hashlib.md5(open(file, "rb").read()).hexdigest()
if md5 == expected_checksum:
log.info("Checksum verified: {}", md5)
else:
print_and_exit(
"Checksum of download file ({}) does not match the expected value ({})",
md5,
expected_checksum,
)
return file
except requests.exceptions.ReadTimeout as e: # pragma: no cover
print_and_exit("The request timed out, please retry in a while ({})", str(e))
@Application.app.command(help="Install the specified version of rapydo")
def install(
version: str = typer.Argument("auto", help="Version to be installed"),
editable: bool = typer.Option(
True,
"--no-editable",
help="Disable editable mode",
show_default=False,
),
) -> None:
Application.print_command(
Application.serialize_parameter("--no-editable", not editable, IF=not editable),
Application.serialize_parameter("", version),
)
if version == "docker":
log.info("Docker current version: {}", Packages.get_bin_version("docker"))
url = "https://get.docker.com"
log.info("Downloading installation script: {}", url)
f = download(url, EXPECTED_DOCKER_SCRIPT_MD5)
log.info("The installation script contains a wait, please be patient")
with Sultan.load(sudo=True) as sultan:
result = sultan.sh(f).run()
for r in result.stdout + result.stderr:
print(r)
log.info("Docker installed version: {}", Packages.get_bin_version("docker"))
return None
if version == "compose":
cli_plugin = Path.home().joinpath(".docker", "cli-plugins")
cli_plugin.mkdir(parents=True, exist_ok=True)
compose_bin = cli_plugin.joinpath("docker-compose")
url = "https://github.com/docker/compose/releases/download/"
url += f"{COMPOSE_VERSION}/docker-compose-linux-x86_64"
log.info("Downloading compose binary: {}", url)
f = download(url, EXPECTED_COMPOSE_BIN_MD5)
f.rename(compose_bin)
compose_bin.chmod(compose_bin.stat().st_mode | stat.S_IEXEC)
if docker.compose.is_installed():
log.info("Docker compose is installed")
else: # pragma: no cover
log.error("Docker compose is NOT installed")
return None
if version == "buildx":
if docker.buildx.is_installed():
v = docker.buildx.version()
log.info("Docker buildx current version: {}", v)
else: # pragma: no cover
log.info("Docker buildx current version: N/A")
cli_plugin = Path.home().joinpath(".docker", "cli-plugins")
cli_plugin.mkdir(parents=True, exist_ok=True)
buildx_bin = cli_plugin.joinpath("docker-buildx")
url = "https://github.com/docker/buildx/releases/download/"
url += f"{BUILDX_VERSION}/buildx-{BUILDX_VERSION}.linux-amd64"
log.info("Downloading buildx binary: {}", url)
f = download(url, EXPECTED_BUILDX_BIN_MD5)
f.rename(buildx_bin)
buildx_bin.chmod(buildx_bin.stat().st_mode | stat.S_IEXEC)
v = docker.buildx.version()
log.info("Docker buildx installed version: {}", v)
return None
Application.get_controller().controller_init()
if version == "auto":
version = Configuration.rapydo_version
log.info("Detected version {} to be installed", version)
if editable:
install_controller_from_folder(version)
else:
install_controller_from_git(version)
def install_controller_from_folder(version: str) -> None:
do_path = SUBMODULES_DIR.joinpath("do")
try:
Application.git_submodules()
except SystemExit:
log.info(
"""You asked to install rapydo {ver} in editable mode, but {p} is missing.
You can force the installation by disabling the editable mode:
rapydo install {ver} --no-editable
""",
ver=version,
p=do_path,
)
raise
log.info(
"""You asked to install rapydo {}. It will be installed in editable mode
This command will require root privileges because of the editable mode.
You could be prompted to enter your password: this is due to the use of sudo.
If you want to execute this installation by yourself, you can execute:
sudo pip3 install --upgrade --editable {}
""",
version,
do_path,
)
time.sleep(2)
do_repo = Application.gits.get("do")
b = git.get_active_branch(do_repo)
if b is None:
log.error("Unable to read local controller repository") # pragma: no cover
elif b == version:
log.info("Controller repository already at {}", version)
elif git.switch_branch(do_repo, version):
log.info("Controller repository switched to {}", version)
else:
print_and_exit("Invalid version")
installed = Packages.install(do_path, editable=True, user=False)
if not installed: # pragma: no cover
log.error("Unable to install controller {} from local folder", version)
else:
log.info("Controller version {} installed from local folder", version)
def install_controller_from_git(version: str) -> None:
controller_repository = "do"
rapydo_uri = "https://github.com/rapydo"
controller = f"git+{rapydo_uri}/{controller_repository}.git@{version}"
log.info(
"""You asked to install rapydo {} from git. It will be installed globally
This command will require root privileges because of the global installation.
You could be prompted to enter your password: this is due to the use of sudo.
If you want to execute this installation by yourself, you can execute:
sudo pip3 install --upgrade [--user] {}
""",
version,
controller,
)
time.sleep(2)
installed = Packages.install(controller, user=False)
if not installed: # pragma: no cover
log.error("Unable to install controller {} from git", version)
else:
log.info("Controller version {} installed from git", version)
|
<reponame>Govexec/django-forms-builder
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.sites.models import Site
from django.core.mail import EmailMessage
from django.core.urlresolvers import reverse
try:
import jinja2
from coffin.shortcuts import get_object_or_404, redirect, render_to_response
except ImportError:
from django.shortcuts import get_object_or_404, redirect, render_to_response
from django.template import RequestContext
from django.utils.http import urlquote
from forms_builder.forms.forms import FormForForm
from forms_builder.forms.models import Form
from forms_builder.forms.settings import USE_SITES
from forms_builder.forms.signals import form_invalid, form_valid
from websites.models import Page
def form_detail(request, slug, template="forms/form_detail.html"):
"""
Display a built form and handle submission.
"""
published = Form.objects.published(for_user=request.user)
if USE_SITES:
published = published.filter(sites=Site.objects.get_current())
form = get_object_or_404(published, slug=slug)
if form.login_required and not request.user.is_authenticated():
return redirect("%s?%s=%s" % (settings.LOGIN_URL, REDIRECT_FIELD_NAME,
urlquote(request.get_full_path())))
args = (form, request.POST or None, request.FILES or None)
form_for_form = FormForForm(*args)
if request.method == "POST":
if not form_for_form.is_valid():
form_invalid.send(sender=request, form=form_for_form)
else:
entry = form_for_form.save()
fields = ["%s: %s" % (v.label, form_for_form.cleaned_data[k])
for (k, v) in form_for_form.fields.items()]
subject = form.email_subject
if not subject:
subject = "%s - %s" % (form.title, entry.entry_time)
body = "\n".join(fields)
if form.email_message:
body = "%s\n\n%s" % (form.email_message, body)
email_from = form.email_from or settings.DEFAULT_FROM_EMAIL
email_to = form_for_form.email_to()
if email_to and form.send_email:
msg = EmailMessage(subject, body, email_from, [email_to])
msg.send()
email_from = email_to or email_from # Send from the email entered.
email_copies = [e.strip() for e in form.email_copies.split(",")
if e.strip()]
if email_copies:
msg = EmailMessage(subject, body, email_from, email_copies)
for f in form_for_form.files.values():
f.seek(0)
msg.attach(f.name, f.read())
msg.send()
form_valid.send(sender=request, form=form_for_form, entry=entry)
return redirect(reverse("form_sent", kwargs={"slug": form.slug}))
context = {"form": form, "form_for_form": form_for_form, "page": get_page(form, request)}
return render_to_response(template, context, RequestContext(request))
def form_sent(request, slug, template="forms/form_sent.html"):
"""
Show the response message.
"""
published = Form.objects.published(for_user=request.user)
form = get_object_or_404(published, slug=slug)
context = {"form": form, "page": get_page(form, request)}
return render_to_response(template, context, RequestContext(request))
def get_page(form, request):
return Page({
"type": "home",
"title": form.title,
"category": None,
"url": form.get_absolute_url(),
}, request, ad_settings=settings.DART_AD_DEFAULTS)
|
# Author: <EMAIL>, 2015
import unittest
from tester import Tester
class TestTester(unittest.TestCase):
def setUp(self):
self.tester = Tester()
self.customers1 = [['1 0', '2 0'], ['1 1'], ['3 0']]
self.customers2 = [['1 0', '2 1'], ['1 1'], ['3 1']]
self.customers3 = [['2 0', '1 0'], ['2 1', '1 0']]
self.customers4 = [['2 1', '1 0'], ['2 1']]
self.customers5 = [['2 0', '1 0'], ['2 0', '1 1']]
self.customers6 = [['2 0']]
self.customers7 = [['1 0']]
self.customers8 = [['3 0', '1 1', '2 0']]
self.customers9 = [['1 0', '2 0'], ['1 1', '2 0'], ['1 0']]
self.customers10 = [['2 1', '1 0'], ['2 1', '1 0']]
self.customers11 = [['2 0', '1 0'], ['1 0']]
self.customers12 = [['3 1', '2 0'], ['2 1'], ['2 1', '1 0']]
self.customers13 = [['2 1', '1 0'], ['2 0', '1 1']]
self.customers14 = [['3 0'], ['2 1', '1 0', '3 0']]
self.customers15 = [['1 0', '2 1'], ['1 1', '2 0'], ['3 0']]
self.solution1 = ['0', '0', '0']
self.solution2 = ['1', '0', '0']
self.solution3 = ['0', '1', '0']
self.solution4 = ['1', '1', '1']
self.solution5 = ['0', '0']
self.solution6 = ['0']
self.solution7 = ['0', '1']
self.solution8 = ['1', '0', '0']
self.solution9 = ['0', '1', '1']
self.solution10 = ['0', '0', '1']
self.solution11 = ['1', '1', '1']
self.solution12 = ['1', '1', '0']
def test_solution_tester(self):
is_valid = self.tester.is_valid_solution
c1 = self.customers1
c2 = self.customers2
c3 = self.customers3
c4 = self.customers4
c5 = self.customers5
c6 = self.customers6
c7 = self.customers7
c8 = self.customers8
c9 = self.customers9
c10 = self.customers10
c11 = self.customers11
c12 = self.customers12
c13 = self.customers13
c14 = self.customers14
c15 = self.customers15
s1 = self.solution1
s2 = self.solution2
s3 = self.solution3
s4 = self.solution4
s5 = self.solution5
s6 = self.solution6
s7 = self.solution7
s8 = self.solution8
s9 = self.solution9
s10 = self.solution10
s11 = self.solution11
s12 = self.solution12
self.assertFalse(is_valid(s1, c1))
self.assertTrue(is_valid(s2, c1))
self.assertFalse(is_valid(s3, c1))
self.assertFalse(is_valid(s3, c2))
self.assertTrue(is_valid(s4, c2))
self.assertFalse(is_valid(s4, c1))
self.assertTrue(is_valid(s5, c3))
self.assertFalse(is_valid(s6, c3))
self.assertFalse(is_valid(s5, c4))
self.assertTrue(is_valid(s7, c4))
self.assertTrue(is_valid(s5, c5))
self.assertTrue(is_valid(s1, c6))
self.assertTrue(is_valid(s5, c6))
self.assertTrue(is_valid(s1, c7))
self.assertTrue(is_valid(s3, c7))
self.assertTrue(is_valid(s5, c7))
self.assertTrue(is_valid(s6, c7))
self.assertTrue(is_valid(s7, c7))
self.assertTrue(is_valid(s8, c8))
self.assertFalse(is_valid(s1, c9))
self.assertTrue(is_valid(s3, c10))
self.assertTrue(is_valid(s9, c10))
self.assertTrue(is_valid(s1, c11))
self.assertTrue(is_valid(s5, c11))
self.assertTrue(is_valid(s10, c11))
self.assertFalse(is_valid(s11, c12))
self.assertTrue(is_valid(s9, c12))
self.assertTrue(is_valid(s12, c13))
self.assertTrue(is_valid(s1, c13))
self.assertTrue(is_valid(s1, c14))
self.assertTrue(is_valid(s3, c14))
self.assertTrue(is_valid(s12, c14))
self.assertTrue(is_valid(s12, c15))
self.assertTrue(is_valid(s1, c15))
self.assertFalse(is_valid(s2, c15))
self.assertFalse(is_valid(s3, c15))
def test_solution_tester_bigger(self):
is_valid = self.tester.is_valid_solution
c = [['1 0', '6 1', '3 0', '4 0'],
['5 1', '2 0', '3 0', '6 0'], ['1 0', '5 1', '3 0', '4 0', '6 0'],
['2 1'], ['2 1', '1 0', '6 0', '3 0']]
sols = [['0', '1', '0', '0', '1', '0'],
['0', '1', '0', '1', '1', '0'],
['0', '1', '1', '0', '1', '0'],
['0', '1', '0', '0', '1', '1'],
['0', '1', '0', '0', '0', '0'],
['0', '1', '0', '1', '1', '1'],
['0', '1', '1', '0', '1', '1'],
['1', '1', '0', '0', '1', '1'],
['0', '1', '0', '0', '0', '1'],
['1', '1', '0', '0', '1', '0']]
for s in sols:
self.assertTrue(is_valid(s, c))
if __name__ == '__main__':
unittest.main() |
### Code written for STDP tutorial @ CAMP 2016
### Adapted from Song et al 2000 (Competitive Hebbian Learning through spike-timing-dependent synaptic plasticity
### With independent Poisson inputs (relevant figures : Fig 2e)
### Author: <NAME>
### Date: June 15, 2016
#######################################
from brian2 import *
from time import time
#set_device('cpp_standalone')
#################
def newrun( fe = 15, fi = 10, Weakratio=1.05, simtime = 100*second, delta = 0.1*ms):
if fe<30:
simtime=150*second
defaultclock.dt = delta
taum = 20*ms
Vrest = -70*mV
Erev = 0*mV #Excitatory synapse - reversal potential
Irev = -70*mV #Inhibitory synapse - reversal potential
taue = 5*ms
taui = 5*ms
gmax = 0.015 #Max excitatory conductance
ginh = 0.05 #Inhibitory conductance
Vt = -54*mV # Spike threshold
Vr = -60*mV # Reset potential
#### How does no. of synapses/ ratio influence firing rates?
Ne = 1000 #No. of excitatory synapses
Ni = 200 #No. of ihibitory synapses
# How does final distribution of weights depend on presynaptic firing rates? Why???
FRe = fe*Hz #Firing rate for excitatory input
FRi = fi*Hz #FR for inhibitory input
### Neuron model
eqs = Equations('''
dV/dt = ( (Vrest - V) + ge*(Erev - V) + gi*(Irev - V) )/taum :volt
dge/dt = -ge/taue :1 #Current from exc synapse
dgi/dt = -gi/taui :1 #Current from inh synapse
''')
NRN = NeuronGroup(1, model=eqs, threshold = 'V>Vt', reset = 'V=Vr', method='euler')
## STDP parameters for excitatory synapses
taupre = 20*ms
taupost = 20*ms
#Weakratio = 1.05 ##Apost*taupost/(Apre/taupre)
##:Weakening:Strengthening ratio (slightly greater than 1 for stabilising network)
Apre = 0.005 # %Strengthening with pre-post pair
Apost = Apre*(taupre/taupost)*Weakratio
### Excitatory input and synapse model
InpE = PoissonGroup(Ne, rates = FRe)
syneqs = '''
gsyn :1
dx/dt = -x/taupre :1 (event-driven)
dy/dt = -y/taupost :1 (event-driven)
'''
preeqs = '''
ge_post += gsyn
x += Apre
gsyn += -y*gmax
gsyn = clip(gsyn, 0, gmax)
'''
posteqs = '''
y += Apost
gsyn += x*gmax
gsyn = clip(gsyn, 0, gmax)
'''
S_exc = Synapses(InpE, NRN, model=syneqs, on_pre=preeqs, on_post=posteqs )
S_exc.connect()
S_exc.gsyn[:] = gmax*rand(Ne) #Initialise uniformly between 0 and gmax
#print 'Created', Ne, 'excitatory synapses with mean weight = ', float(int(mean(S_exc.gsyn[:]*10000)))/10
### Inhibitory synapses
InpI = PoissonGroup(Ni, rates = FRi)
S_inh = Synapses(InpI, NRN, model = ' ', on_pre = '''gi_post += ginh''')
S_inh.connect()
#print 'Created', Ni, 'inhibitory synapses ...'
## Monitors
SM = SpikeMonitor(NRN)
VMon = StateMonitor( NRN, 'V', record = 0 )
FR = PopulationRateMonitor(NRN)
## Runtime
print '\n Running for', simtime, 'at dt = ', delta, 'with excitatory inputs at', fe, 'Hz'
run( simtime, report = 'text', report_period = 50*second)
#device.build(directory='output', compile=True, run=True, debug=False)
## Plotting
figure()
suptitle('Excitatory firing rate = %d Hz' %(fe))
### Histogram of sinal synaptic weights
subplot(311)
hist(S_exc.gsyn[:] /gmax, 20)
ylabel('No. of synapses')
xlabel('Normalised synaptic weight')
### Initial membrane potential trace
subplot(323)
plot(VMon.t[0:3000] /ms, VMon.V[0,0:3000] /mV)
ylim([-80,-40])
ylabel('Membrane potential (mV)')
legend('Initial V')
### Final membrane potential trace
subplot(324)
plot(VMon.t[-3000:-1] /ms, VMon.V[0,-3000:-1] /mV)
ylim([-80,-40])
legend('Final V')
### Evolution of Firing rate in time
subplot(313)
plot(FR.t /second, FR.smooth_rate(window='gaussian', width=50*ms)/Hz)
ylabel('Firing rate (Hz)')
tight_layout()
poprate=FR.smooth_rate(window='gaussian', width=100*ms)
fin_poprate = mean( poprate[-1000:-500] )
print fin_poprate
spt = SM.t[:]
L = len(spt)
##Get last 500 spikes. Fixed no. of spikes instead of duration.
if L > 500:
spt = spt[-500:L]
isi = spt[1:500] - spt[0:499]
cv = sqrt(var(isi))/mean(isi)
else :
cv = NaN
print ' \n'
return fin_poprate, cv
################
ratios = [0.95, 0.98, 1.0, 1.02, 1.04, 1.06, 1.08, 1.10]
fe = 20 ## Hz
poprates = []
cv = []
for r in ratios:
ret1, ret2 = newrun(fe, Weakratio = r)
poprates.append(ret1)
cv.append(ret2)
print poprates
fig = figure()
A = fig.add_subplot(111)
A.plot(ratios, poprates, marker ='o', color = 'b')
A.set_xlabel('Apost/Apre : Weakening/Strengthening')
A.set_ylabel('Firing rate of postsynaptic neuron (Hz)', color='b')
for tl in A.get_yticklabels():
tl.set_color('b')
## Different y-scales for the two plots with same x-axis
B = A.twinx()
B.plot(ratios, cv, marker='*', color='g')
B.set_ylabel('CV', color='g')
for tl in B.get_yticklabels():
tl.set_color('g')
show()
|
# -*- encoding: utf-8 -*-
def _complain_about_matplotlib(*args, **kwargs):
raise ImportError("Plotting functionality requires matplotlib. Please install matplotlib.")
def _register_dummy_methods():
import h2o.model
import h2o.automl._base # NOQA
h2o.model.H2ORegressionModel.residual_analysis_plot = _complain_about_matplotlib
h2o.model.ModelBase.shap_summary_plot = _complain_about_matplotlib
h2o.model.ModelBase.shap_explain_row_plot = _complain_about_matplotlib
h2o.model.ModelBase.explain = _complain_about_matplotlib
h2o.model.ModelBase.explain_row = _complain_about_matplotlib
h2o.model.ModelBase.pd_plot = _complain_about_matplotlib
h2o.model.ModelBase.ice_plot = _complain_about_matplotlib
h2o.model.ModelBase.learning_curve_plot = _complain_about_matplotlib
h2o.automl._base.H2OAutoMLBaseMixin.pd_multi_plot = _complain_about_matplotlib
h2o.automl._base.H2OAutoMLBaseMixin.varimp_heatmap = _complain_about_matplotlib
h2o.automl._base.H2OAutoMLBaseMixin.model_correlation_heatmap = _complain_about_matplotlib
h2o.automl._base.H2OAutoMLBaseMixin.explain = _complain_about_matplotlib
h2o.automl._base.H2OAutoMLBaseMixin.explain_row = _complain_about_matplotlib
h2o.automl._base.H2OAutoMLBaseMixin.model_correlation = _complain_about_matplotlib
h2o.automl._base.H2OAutoMLBaseMixin.varimp = _complain_about_matplotlib
try:
import numpy
import matplotlib
from ._explain import *
__all__ = [
"explain",
"explain_row",
"varimp_heatmap",
"model_correlation_heatmap",
"pd_multi_plot",
"varimp",
"model_correlation",
]
except ImportError as e: # Numpy, Matplotlib
_register_dummy_methods()
raise e
def register_explain_methods():
import h2o.model
import h2o.automl._base # NOQA
h2o.model.H2ORegressionModel.residual_analysis_plot = residual_analysis_plot
h2o.model.ModelBase.shap_summary_plot = shap_summary_plot
h2o.model.ModelBase.shap_explain_row_plot = shap_explain_row_plot
h2o.model.ModelBase.explain = explain
h2o.model.ModelBase.explain_row = explain_row
h2o.model.ModelBase.pd_plot = pd_plot
h2o.model.ModelBase.ice_plot = ice_plot
h2o.model.ModelBase.learning_curve_plot = learning_curve_plot
h2o.automl._base.H2OAutoMLBaseMixin.pd_multi_plot = pd_multi_plot
h2o.automl._base.H2OAutoMLBaseMixin.varimp_heatmap = varimp_heatmap
h2o.automl._base.H2OAutoMLBaseMixin.model_correlation_heatmap = model_correlation_heatmap
h2o.automl._base.H2OAutoMLBaseMixin.explain = explain
h2o.automl._base.H2OAutoMLBaseMixin.explain_row = explain_row
h2o.automl._base.H2OAutoMLBaseMixin.model_correlation = model_correlation
h2o.automl._base.H2OAutoMLBaseMixin.varimp = varimp
|
#!BPY
"""
Name: 'Follow Active (quads)'
Blender: 242
Group: 'UVCalculation'
Tooltip: 'Follow from active quads.'
"""
__author__ = "<NAME>"
__url__ = ("blender", "blenderartists.org")
__version__ = "1.0 2006/02/07"
__bpydoc__ = """\
This script sets the UV mapping and image of selected faces from adjacent unselected faces.
for full docs see...
http://mediawiki.blender.org/index.php/Scripts/Manual/UV_Calculate/Follow_active_quads
"""
# ***** BEGIN GPL LICENSE BLOCK *****
#
# Script copyright (C) <NAME>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
from Blender import *
import bpy
import BPyMesh
def extend(EXTEND_MODE,ob):
if EXTEND_MODE == -1:
return
me = ob.getData(mesh=1)
me_verts = me.verts
# Toggle Edit mode
is_editmode = Window.EditMode()
if is_editmode:
Window.EditMode(0)
Window.WaitCursor(1)
t = sys.time()
edge_average_lengths = {}
OTHER_INDEX = 2,3,0,1
FAST_INDICIES = 0,2,1,3 # order is faster
def extend_uvs(face_source, face_target, edge_key):
'''
Takes 2 faces,
Projects its extends its UV coords onto the face next to it.
Both faces must share an edge.
'''
def face_edge_vs(vi):
# assunme a quad
return [(vi[0], vi[1]), (vi[1], vi[2]), (vi[2], vi[3]), (vi[3], vi[0])]
uvs_source = face_source.uv
uvs_target = face_target.uv
vidx_source = [v.index for v in face_source]
vidx_target = [v.index for v in face_target]
# vertex index is the key, uv is the value
uvs_vhash_source = dict( [ (vindex, uvs_source[i]) for i, vindex in enumerate(vidx_source)] )
uvs_vhash_target = dict( [ (vindex, uvs_target[i]) for i, vindex in enumerate(vidx_target)] )
edge_idxs_source = face_edge_vs(vidx_source)
edge_idxs_target = face_edge_vs(vidx_target)
source_matching_edge = -1
target_matching_edge = -1
edge_key_swap = edge_key[1], edge_key[0]
try: source_matching_edge = edge_idxs_source.index(edge_key)
except: source_matching_edge = edge_idxs_source.index(edge_key_swap)
try: target_matching_edge = edge_idxs_target.index(edge_key)
except: target_matching_edge = edge_idxs_target.index(edge_key_swap)
edgepair_inner_source = edge_idxs_source[source_matching_edge]
edgepair_inner_target = edge_idxs_target[target_matching_edge]
edgepair_outer_source = edge_idxs_source[OTHER_INDEX[source_matching_edge]]
edgepair_outer_target = edge_idxs_target[OTHER_INDEX[target_matching_edge]]
if edge_idxs_source[source_matching_edge] == edge_idxs_target[target_matching_edge]:
iA= 0; iB= 1 # Flipped, most common
else: # The normals of these faces must be different
iA= 1; iB= 0
# Set the target UV's touching source face, no tricky calc needed,
uvs_vhash_target[edgepair_inner_target[0]][:] = uvs_vhash_source[edgepair_inner_source[iA]]
uvs_vhash_target[edgepair_inner_target[1]][:] = uvs_vhash_source[edgepair_inner_source[iB]]
# Set the 2 UV's on the target face that are not touching
# for this we need to do basic expaning on the source faces UV's
if EXTEND_MODE == 2:
try: # divide by zero is possible
'''
measure the length of each face from the middle of each edge to the opposite
allong the axis we are copying, use this
'''
i1a= edgepair_outer_target[iB]
i2a= edgepair_inner_target[iA]
if i1a>i2a: i1a, i2a = i2a, i1a
i1b= edgepair_outer_source[iB]
i2b= edgepair_inner_source[iA]
if i1b>i2b: i1b, i2b = i2b, i1b
# print edge_average_lengths
factor = edge_average_lengths[i1a, i2a][0] / edge_average_lengths[i1b, i2b][0]
except:
# Div By Zero?
factor = 1.0
uvs_vhash_target[edgepair_outer_target[iB]][:] = uvs_vhash_source[edgepair_inner_source[0]] +factor * (uvs_vhash_source[edgepair_inner_source[0]] - uvs_vhash_source[edgepair_outer_source[1]])
uvs_vhash_target[edgepair_outer_target[iA]][:] = uvs_vhash_source[edgepair_inner_source[1]] +factor * (uvs_vhash_source[edgepair_inner_source[1]] - uvs_vhash_source[edgepair_outer_source[0]])
else:
# same as above but with no factor
uvs_vhash_target[edgepair_outer_target[iB]][:] = uvs_vhash_source[edgepair_inner_source[0]] + (uvs_vhash_source[edgepair_inner_source[0]] - uvs_vhash_source[edgepair_outer_source[1]])
uvs_vhash_target[edgepair_outer_target[iA]][:] = uvs_vhash_source[edgepair_inner_source[1]] + (uvs_vhash_source[edgepair_inner_source[1]] - uvs_vhash_source[edgepair_outer_source[0]])
if not me.faceUV:
me.faceUV= True
face_act = me.activeFace
if face_act == -1:
Draw.PupMenu('ERROR: No active face')
return
face_sel= [f for f in me.faces if len(f) == 4 and f.sel]
face_act_local_index = -1
for i, f in enumerate(face_sel):
if f.index == face_act:
face_act_local_index = i
break
if face_act_local_index == -1:
Draw.PupMenu('ERROR: Active face not selected')
return
# Modes
# 0 unsearched
# 1:mapped, use search from this face. - removed!!
# 2:all siblings have been searched. dont search again.
face_modes = [0] * len(face_sel)
face_modes[face_act_local_index] = 1 # extend UV's from this face.
# Edge connectivty
edge_faces = {}
for i, f in enumerate(face_sel):
for edkey in f.edge_keys:
try: edge_faces[edkey].append(i)
except: edge_faces[edkey] = [i]
SEAM = Mesh.EdgeFlags.SEAM
if EXTEND_MODE == 2:
edge_loops = BPyMesh.getFaceLoopEdges(face_sel, [ed.key for ed in me.edges if ed.flag & SEAM] )
me_verts = me.verts
for loop in edge_loops:
looplen = [0.0]
for ed in loop:
edge_average_lengths[ed] = looplen
looplen[0] += (me_verts[ed[0]].co - me_verts[ed[1]].co).length
looplen[0] = looplen[0] / len(loop)
# remove seams, so we dont map accross seams.
for ed in me.edges:
if ed.flag & SEAM:
# remove the edge pair if we can
try: del edge_faces[ed.key]
except: pass
# Done finding seams
# face connectivity - faces around each face
# only store a list of indicies for each face.
face_faces = [[] for i in xrange(len(face_sel))]
for edge_key, faces in edge_faces.iteritems():
if len(faces) == 2: # Only do edges with 2 face users for now
face_faces[faces[0]].append((faces[1], edge_key))
face_faces[faces[1]].append((faces[0], edge_key))
# Now we know what face is connected to what other face, map them by connectivity
ok = True
while ok:
ok = False
for i in xrange(len(face_sel)):
if face_modes[i] == 1: # searchable
for f_sibling, edge_key in face_faces[i]:
if face_modes[f_sibling] == 0:
face_modes[f_sibling] = 1 # mapped and search from.
extend_uvs(face_sel[i], face_sel[f_sibling], edge_key)
face_modes[i] = 1 # we can map from this one now.
ok= True # keep searching
face_modes[i] = 2 # dont search again
print sys.time() - t
if is_editmode:
Window.EditMode(1)
else:
me.update()
Window.RedrawAll()
Window.WaitCursor(0)
def main():
sce = bpy.data.scenes.active
ob = sce.objects.active
# print ob, ob.type
if ob == None or ob.type != 'Mesh':
Draw.PupMenu('ERROR: No mesh object.')
return
# 0:normal extend, 1:edge length
EXTEND_MODE = Draw.PupMenu("Use Face Area%t|Loop Average%x2|None%x0")
extend(EXTEND_MODE,ob)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import re
from datetime import datetime
import requests
from bs4 import BeautifulSoup
from secrets import (
deviceheight, devicename, devicewidth, os_version, token,
user_agent, uuid
)
QUERY_URL = "https://mob1.processotelematico.giustizia.it/proxy/index_mobile.php"
BASE_PAYLOAD = dict(
version="1.1.13",
platform=os_version,
uuid=uuid,
devicename=devicename,
devicewidth=devicewidth,
deviceheight=deviceheight,
token=token,
azione="direttarg_sicid_mobile",
registro="CC",
idufficio="0580910098",
tipoufficio=1
)
HEADERS = {
'User-Agent': user_agent
}
RE_INSCRITO_RUOLO = re.compile("\<li\>iscritto al ruolo il (.+)\<\/li\>")
RE_REMOVE_LAWYER_PREFIX = re.compile("(?<=Avv. ).*")
class Case:
def __init__(self, case_yr, case_no, date_filed, judge_name, date_hearing, case_state, primary_lawyer_initials,
raw_case_content=None, judgement_number=None):
self.year = case_yr
self.number = case_no
self.date_filed = date_filed
self.date_hearing = date_hearing
self.judge_name = judge_name
self.case_state = case_state
self.primary_lawyer_initials = primary_lawyer_initials
self.raw_case_content = raw_case_content
self.judgement_number = judgement_number
def __str__(self):
return ";".join([
'{}/{}'.format(self.number, self.year),
self.date_filed,
self.judge_name,
self.date_hearing,
self.case_state,
self.primary_lawyer_initials,
self.judgement_number if self.judgement_number else "",
])
def asdict(self):
return {
'case_yr': self.year,
'case_no': self.number,
'date_filed': self.date_filed,
'date_hearing': self.date_hearing,
'judge_name': self.judge_name,
'case_state': self.case_state,
'primary_lawyer_initials': self.primary_lawyer_initials,
'judgement_number': self.judgement_number,
'raw_case_content': self.raw_case_content,
}
def get_case_details(case_yr, case_no):
payload = BASE_PAYLOAD.copy()
payload.update(dict(
aaproc=str(case_yr),
numproc=str(case_no),
_=int(datetime.now().timestamp())
))
response = requests.get(QUERY_URL, params=payload, headers=HEADERS)
content = response.text
if "Errore tecnico" in content:
print("Request failed", content.text)
raise Exception()
if "cittadinanza" in content:
bs = BeautifulSoup(content)
nome_giudice = bs.find("nomegiudice")
data_udienza = bs.find("dataudienza")
inscrito_ruolo_search = RE_INSCRITO_RUOLO.search(content)
if inscrito_ruolo_search:
data_inscricao = inscrito_ruolo_search.groups()[0]
else:
data_inscricao = "???"
case_state = extract_case_state_from_content(bs) or "Unknown"
nome_giudice = nome_giudice.string if nome_giudice else "Not Assigned"
data_udienza = data_udienza.string[:10] if data_udienza else "Not Assigned"
primary_lawyer_initials = extract_primary_lawyer_initials(bs) or "Unknown"
judgement_number = extract_judgement_number(bs)
return Case(
case_yr,
case_no,
data_inscricao,
nome_giudice,
data_udienza,
case_state,
primary_lawyer_initials,
raw_case_content=content,
judgement_number=judgement_number
)
def extract_case_state_from_content(bs_content):
try:
case_state_list_copy = bs_content.findAll("li")
for idx, val in enumerate(case_state_list_copy):
if val.contents[0] == 'Stato fascicolo':
return case_state_list_copy[idx + 1].contents[0]
return None
except:
return None
def extract_primary_lawyer_initials(bs_content):
try:
case_state_list_copy = bs_content.findAll("li")
for idx, val in enumerate(case_state_list_copy):
if val.contents[0] == 'Parti fascicolo':
redacted_name = RE_REMOVE_LAWYER_PREFIX.search(case_state_list_copy[idx + 1].contents[3]).group(0)
return redacted_name.replace(' ', '').replace('*', '')
return None
except:
return None
def extract_judgement_number(bs_content):
try:
li_entries = bs_content.findAll("li")
for idx, val in enumerate(li_entries):
if val.contents[0] == 'Sentenza definitiva':
return li_entries[idx + 1].contents[0][4:]
return None
except:
return None
|
# -*- coding: utf-8 -*-
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Base class for service request handler tests.
ServiceBaseTestCase
"""
__authors__ = ['<EMAIL> (<NAME>)',
'<EMAIL> (<NAME>)']
import logging
import mock
import shutil
import time
import tempfile
from collections import namedtuple
from copy import deepcopy
from tornado import httpclient, options, testing, web
from viewfinder.backend.base import secrets, util, environ
from viewfinder.backend.base.testing import BaseTestCase
from viewfinder.backend.db.client_log import CLIENT_LOG_CONTENT_TYPE
from viewfinder.backend.db.contact import Contact
from viewfinder.backend.db.device import Device
from viewfinder.backend.db.episode import Episode
from viewfinder.backend.db.follower import Follower
from viewfinder.backend.db.id_allocator import IdAllocator
from viewfinder.backend.db.identity import Identity
from viewfinder.backend.db.photo import Photo
from viewfinder.backend.db.settings import AccountSettings
from viewfinder.backend.db.user import User
from viewfinder.backend.db.versions import Version
from viewfinder.backend.db.viewpoint import Viewpoint
from viewfinder.backend.db import db_client, local_client, vf_schema
from viewfinder.backend.db.test.db_validator import DBValidator
from viewfinder.backend.op.op_manager import OpManager
from viewfinder.backend.op.operation_map import DB_OPERATION_MAP
from viewfinder.backend.resources.resources_mgr import ResourcesManager
from viewfinder.backend.services.apns import APNS, TestService
from viewfinder.backend.services import email_mgr, sms_mgr
from viewfinder.backend.storage import file_object_store, object_store, server_log
from viewfinder.backend.www import auth, auth_viewfinder, basic_auth, server, uimodules
from viewfinder.backend.www.test.service_tester import ServiceTester
ClientLogRecord = namedtuple('ClientLogRecord', ['timestamp', 'client_id', 'contents'])
# TODO(spencer): use testing.AsyncHTTPSTestCase.
class ServiceBaseTestCase(BaseTestCase, testing.AsyncHTTPTestCase):
"""Initializes the test datastore and the viewfinder schema.
"""
def setUp(self):
if not hasattr(self, '_enable_xsrf'): self._enable_xsrf = True
if not hasattr(self, '_url_host') or self._url_host is None: self._url_host = 'www.goviewfinder.com'
super(ServiceBaseTestCase, self).setUp()
# TODO(spencer): remove this when switched to AsyncHTTPSTestCase.
basic_auth.BasicAuthHandler._HTTP_TEST_CASE = True
db_client.DBClient.SetInstance(local_client.LocalClient(vf_schema.SCHEMA))
self._client = db_client.DBClient.Instance()
email_mgr.EmailManager.SetInstance(email_mgr.TestEmailManager())
sms_mgr.SMSManager.SetInstance(sms_mgr.TestSMSManager())
self._backup_dir = tempfile.mkdtemp()
server_log.LogBatchPersistor.SetInstance(server_log.LogBatchPersistor(backup_dir=self._backup_dir))
APNS.SetInstance('test', APNS(environment='test',
feedback_handler=Device.FeedbackHandler(self._client)))
self._apns = TestService.Instance()
IdAllocator.ResetState()
# Do not freeze new account creation during testing (dy default).
options.options.freeze_new_accounts = False
# Set deterministic testing timestamp used in place of time.time() in server code.
util._TEST_TIME = time.time()
# Create validator and create some users and devices for testing convenience.
self._validator = DBValidator(self._client, self.stop, self.wait)
self._tester = ServiceTester(self.get_url(''), self.http_client, self._validator,
secrets.GetSecret('cookie_secret'), self.stop, self.wait)
self._test_id = 1
self._validate = True
# Skip model_db validation for specified tables. Ignored if _validate==False.
self._skip_validation_for = []
self._RunAsync(vf_schema.SCHEMA.VerifyOrCreate, self._client)
OpManager.SetInstance(OpManager(op_map=DB_OPERATION_MAP, client=self._client, scan_ops=True))
# Ensure that test users are created.
self._CreateTestUsers()
# Remove limit of number of auth messages that can be sent to a particular identity key.
auth_viewfinder.VerifyIdBaseHandler._MAX_MESSAGES_PER_MIN = 10000
auth_viewfinder.VerifyIdBaseHandler._MAX_MESSAGES_PER_DAY = 10000
def tearDown(self):
# If validation is enabled, validate all viewpoint assets using the service API.
# If the test failed, skip validation so we don't get extra redundant error reports
# (The unittest framework reports errors in the main test and errors in tearDown separately)
validate = self._validate and self._GetUnittestErrorCount() == self._unittest_error_count
if validate:
self._ValidateAssets()
# Ensure that operations do not exist in the db.
from viewfinder.backend.db.operation import Operation
Operation.Scan(self._client, None, self.stop)
ops, last_key = self.wait()
if validate:
self.assertTrue(len(ops) == 0, ops)
self.assertTrue(last_key is None)
# Cleanup all assets created during tests.
self._tester.Cleanup(validate=validate, skip_validation_for=self._skip_validation_for)
self._RunAsync(server_log.LogBatchPersistor.Instance().close)
self._RunAsync(OpManager.Instance().Drain)
shutil.rmtree(self._backup_dir)
super(ServiceBaseTestCase, self).tearDown()
self.assertIs(Operation.GetCurrent().operation_id, None)
def run(self, result=None):
self._unittest_result = result
self._unittest_error_count = self._GetUnittestErrorCount()
super(ServiceBaseTestCase, self).run(result)
def _GetUnittestErrorCount(self):
# Returns the number of errors the test framework has seen so far. This is unfortunately
# the best method available to detect in tearDown whether the test itself succeeded.
return len(self._unittest_result.errors) + len(self._unittest_result.failures)
def get_app(self):
"""Creates a web server which handles /service requests."""
options.options.localdb = True
options.options.fileobjstore = True
options.options.localdb_dir = ''
options.options.devbox = True
options.options.domain = 'goviewfinder.com'
options.options.short_domain = 'short.goviewfinder.com'
# Init secrets with the unencrypted 'goviewfinder.com' domain.
secrets.InitSecretsForTest()
object_store.InitObjectStore(temporary=True)
environ.ServerEnvironment.InitServerEnvironment()
# Set up photo object store.
obj_store = object_store.ObjectStore.GetInstance(object_store.ObjectStore.PHOTO)
obj_store.SetUrlFmtString(self.get_url('/fileobjstore/photo/%s'))
# Set up user logs object store.
user_log_obj_store = object_store.ObjectStore.GetInstance(object_store.ObjectStore.USER_LOG)
user_log_obj_store.SetUrlFmtString(self.get_url('/fileobjstore/user_log/%s'))
# Set up user_zips object store.
user_zips_obj_store = object_store.ObjectStore.GetInstance(object_store.ObjectStore.USER_ZIPS)
user_zips_obj_store.SetUrlFmtString(self.get_url('/fileobjectstore/user_zips/%s'))
settings = {
'login_url': '/',
'cookie_secret': secrets.GetSecret('cookie_secret'),
'obj_store': obj_store,
'server_version': ServiceTester.SERVER_VERSION,
'google_client_id': secrets.GetSecret('google_client_id'),
'google_client_secret': secrets.GetSecret('google_client_secret'),
'google_client_mobile_id': secrets.GetSecret('google_client_mobile_id'),
'google_client_mobile_secret': secrets.GetSecret('google_client_mobile_secret'),
'facebook_api_key': secrets.GetSecret('facebook_api_key'),
'facebook_secret': secrets.GetSecret('facebook_secret'),
'template_path': ResourcesManager.Instance().template_path,
'ui_modules': uimodules,
'xsrf_cookies' : self._enable_xsrf,
'static_path': ResourcesManager.Instance().static_path,
}
# Start with the production webapp handlers and add several for testing.
webapp_handlers = deepcopy(server.WEBAPP_HANDLERS + server.ADMIN_HANDLERS)
webapp_handlers.append((r'/fileobjstore/photo/(.*)',
file_object_store.FileObjectStoreHandler,
{ 'storename': object_store.ObjectStore.PHOTO, 'contenttype': 'image/jpeg' }))
webapp_handlers.append((r'/fileobjstore/user_log/(.*)',
file_object_store.FileObjectStoreHandler,
{ 'storename': object_store.ObjectStore.USER_LOG, 'contenttype': 'text/plain' }))
webapp_handlers.append((r'/fileobjstore/user_zips/(.*)',
file_object_store.FileObjectStoreHandler,
{ 'storename': object_store.ObjectStore.USER_ZIPS, 'contenttype': 'application/zip' }))
# Fake viewfinder handler - added explicitly because it is not part of WEBAPP_HANDLERS.
webapp_handlers.append((r'/(link|login|register)/fakeviewfinder', auth_viewfinder.FakeAuthViewfinderHandler))
application = web.Application(**settings)
application.add_handlers(options.options.short_domain, server.SHORT_DOMAIN_HANDLERS)
application.add_handlers('.*', webapp_handlers)
return application
def get_url(self, path):
return 'http://%s:%d%s' % (self._url_host, self.get_http_port(), path)
def assertRaisesHttpError(self, status_code, callableObj, *args, **kwargs):
"""Fail unless an exception of type HTTPError is raised by callableObj
when invoked with arguments "args" and "kwargs", and unless the status
code is equal to "status_code".
"""
with self.assertRaises(httpclient.HTTPError) as cm:
callableObj(*args, **kwargs)
self.assertEqual(cm.exception.code, status_code)
return cm.exception
def _ValidateAssets(self):
""""Query for all viewpoints, episodes, and photos in order to make
sure they're configured and associated properly with one another.
"""
logging.info('Validating all viewpoint assets from the vantage point of every test user...')
for cookie in self._cookies:
# Query all viewpoints followed by this user.
self._tester.QueryFollowed(cookie)
# Query all friends of this user.
self._tester.QueryUsers(cookie, [u.user_id for u in self._users])
# Query all viewpoints accessible to the user.
vp_select_list = [self._tester.CreateViewpointSelection(vp.viewpoint_id)
for vp in self._validator.QueryModelObjects(Viewpoint)]
self._tester.QueryViewpoints(cookie, vp_select_list)
# Query all episodes accessible to the user.
ep_select_list = [self._tester.CreateEpisodeSelection(ep.episode_id)
for ep in self._validator.QueryModelObjects(Episode)]
self._tester.QueryEpisodes(cookie, ep_select_list)
# =================================================================
#
# Helper methods that forward to the ServiceTester.
#
# =================================================================
def _SendRequest(self, method, user_cookie, request_dict, version=None):
"""Pass through to ServiceTester.SendRequest."""
return self._tester.SendRequest(method, user_cookie, request_dict, version=version)
def _GetSecureUserCookie(self, user=None, device_id=None, confirm_time=None):
"""Pass through to ServiceTester.GetSecureUserCookie, but defaulting
to user #1 and device #1 if not specified.
"""
return self._tester.GetSecureUserCookie(user_id=self._user.user_id if not user else user.user_id,
device_id=device_id or self._device_ids[0],
user_name=self._user.name if not user else user.name,
confirm_time=confirm_time)
# =================================================================
#
# Helper methods to create test data used by derived tests.
#
# =================================================================
def _CreateTestUsers(self):
"""Create several interesting users and devices to use in testing."""
# List of test users.
self._users = list()
# List of main test user device.
self._device_ids = list()
# List of cookies containing user and main device.
self._cookies = list()
def _SaveUserInfo(user, device_id):
self._users.append(user)
device_id = user.webapp_dev_id if device_id is None else device_id
self._device_ids.append(device_id)
self._cookies.append(self._GetSecureUserCookie(user, device_id))
# 1. Create default user (with web device and multiple mobile devices).
user_dict = {'name': 'Viewfinder User #1', 'given_name': 'user1', 'email': '<EMAIL>'}
device_dict = {'name': 'User #1 IPhone', 'push_token': '%<PASSWORD>' % TestService.PREFIX}
_SaveUserInfo(*self._tester.RegisterFakeViewfinderUser(user_dict, device_dict))
self._user = self._users[0]
# Create additional device for user #1.
device_dict = {'name': 'User #1 IPad', 'push_token': '%<PASSWORD>' % TestService.PREFIX}
user, self._extra_device_id1 = self._tester.LoginFakeViewfinderUser(user_dict, device_dict)
# Create additional device with no name and no push token for user #1.
user, self._extra_device_id2 = self._tester.LoginFakeViewfinderUser(user_dict, {})
# 2. Create Facebook user (with only mobile device).
user_dict = {'name': 'Facebook User #2', 'email': '<EMAIL>',
'picture': {'data': {'url': 'http://facebook.com/user2'}}, 'id': 2}
device_dict = {'name': 'User #2 IPhone', 'push_token': '%<PASSWORD>' % TestService.PREFIX}
_SaveUserInfo(*self._tester.RegisterFacebookUser(user_dict, device_dict))
self._user2 = self._users[1]
# Turn on email and SMS alerts in addition to APNS alerts for user #2.
self._UpdateOrAllocateDBObject(AccountSettings,
settings_id=AccountSettings.ConstructSettingsId(self._user2.user_id),
group_name=AccountSettings.GROUP_NAME,
user_id=self._user2.user_id,
email_alerts=AccountSettings.EMAIL_ON_SHARE_NEW,
sms_alerts=AccountSettings.SMS_ON_SHARE_NEW)
# Set user #2's phone number.
self._user2.phone = '+12121234567'
self._UpdateOrAllocateDBObject(User, user_id=self._users[1].user_id, phone=self._user2.phone)
# 3. Create user with minimal properties (with only web device).
user_dict = {'name': 'Gmail User #3', 'email': '<EMAIL>', 'verified_email': True}
_SaveUserInfo(*self._tester.RegisterGoogleUser(user_dict))
self._user3 = self._users[2]
# Get device object from database.
Device.Query(self._client, self._user.user_id, self._device_ids[0], None, self.stop)
self._mobile_device = self.wait()
self._webapp_device_id = self._user.webapp_dev_id
self._cookie, self._cookie2, self._cookie3 = self._cookies
# Get identity for each user.
self._identities = [self._RunAsync(user.QueryIdentities, self._client)[0] for user in self._users]
def _CreateSimpleTestAssets(self):
"""Create two episodes with several photos in user #1's default
viewpoint. This is useful for tests which just need a little data
to work with, and where having lots of data makes debugging harder.
"""
self._episode_id, self._photo_ids = self._UploadOneEpisode(self._cookie, 2)
self._episode_id2, self._photo_ids2 = self._UploadOneEpisode(self._cookie, 2)
def _ShareSimpleTestAssets(self, contacts):
"""Shares the episode and photos ids created by _CreateSimpleTestAssets with the given
list of contacts. Returns a tuple with the viewpoint and episode.
"""
vp_id, ep_ids = self._tester.ShareNew(self._cookie,
[(self._episode_id, self._photo_ids)],
contacts,
**self._CreateViewpointDict(self._cookie))
return vp_id, ep_ids[0]
def _CreateSimpleContacts(self):
"""Create multiple unbound contacts for user #1 and user #2."""
for user_id in [self._user.user_id, self._user2.user_id]:
for identity_key in ['Local:identity1', 'Local:identity2']:
contact_dict = Contact.CreateContactDict(user_id,
[('Phone:+13191234567', 'mobile'), (identity_key, None)],
util._TEST_TIME,
Contact.GMAIL)
self._UpdateOrAllocateDBObject(Contact, **contact_dict)
def _CreateProspectiveUser(self):
"""Creates a prospective user by sharing the photos created by _CreateSimpleTestAssets to
a new email identity "Email:<EMAIL>". Returns a tuple containing the user,
the new viewpoint, and the new episode.
"""
assert getattr(self, '_episode_id'), 'call _CreateSimpleTestAssets first'
vp_id, ep_ids = self._tester.ShareNew(self._cookie,
[(self._episode_id, self._photo_ids)],
['Email:<EMAIL>'])
identity = self._RunAsync(Identity.Query, self._client, 'Email:<EMAIL>', None)
return self._RunAsync(User.Query, self._client, identity.user_id, None), vp_id, ep_ids[0]
def _CreateQueryAssets(self, add_test_photos=False):
"""Create a good number of interesting viewfinder assets for use by
query tests in derived classes. Use the tester classes in order to
ensure that all will be cleaned up after the test has completed.
"""
# User #1 has empty viewpoint with only himself as follower.
self._tester.ShareNew(self._cookie, [], [], **self._CreateViewpointDict(self._cookie))
# User #1 uploads a bunch of episodes to his default viewpoint.
ep_ph_ids_list = self._UploadMultipleEpisodes(self._cookie,
37,
add_asset_keys=True,
add_test_photos=add_test_photos)
# User #1 shares the episodes with users #2 and #3.
vp_id, ep_ids = self._tester.ShareNew(self._cookie, ep_ph_ids_list,
[self._user2.user_id, self._user3.user_id],
**self._CreateViewpointDict(self._cookie))
# All 3 users post comments to the new viewpoint.
user_cookies = [self._cookie, self._cookie2, self._cookie3]
self._PostCommentChain(user_cookies, vp_id, 4)
self._PostCommentChain(user_cookies[:2], vp_id, 5, ph_id=ep_ph_ids_list[1][0])
# User #1 shares 1/2 the new episodes to a new viewpoint with no followers.
self._tester.ShareNew(self._cookie, ep_ph_ids_list[::2], [], **self._CreateViewpointDict(self._cookie))
# User #3 uploads a bunch of episodes to his default viewpoint.
ep_ph_ids_list = self._UploadMultipleEpisodes(self._cookie3, 17, add_test_photos=add_test_photos)
# User #3 shares 1/2 the new episodes to a new viewpoint with user #1 as a follower.
prev_vp_id, ep_ids = self._tester.ShareNew(self._cookie3, ep_ph_ids_list[::2], [self._user.user_id],
**self._CreateViewpointDict(self._cookie3))
# User #1 sets the viewed_seq on the new viewpoint.
self._tester.UpdateViewpoint(self._cookie, prev_vp_id, viewed_seq=1)
# User #1 reshares 1/2 of the new episodes to a new viewpoint with user #2 as a follower.
ep_ph_ids_list = [(new_ep_id, old_ph_ids)
for new_ep_id, (old_ep_id, old_ph_ids) in zip(ep_ids, ep_ph_ids_list[::2])]
vp_id, ep_ids = self._tester.ShareNew(self._cookie, ep_ph_ids_list[::2], [self._user2.user_id],
**self._CreateViewpointDict(self._cookie))
# Update some episode metadata in the new viewpoint.
self._tester.UpdateEpisode(self._cookie, episode_id=ep_ids[0], title='Updated this title')
# User #2 gets view-only permission on the new viewpoint.
self._UpdateOrAllocateDBObject(Follower, user_id=self._user2.user_id, viewpoint_id=vp_id, labels=[])
# User #1 reshares the episodes shared with user #2 back to the previous viewpoint.
ep_ph_ids_list = [(new_ep_id, old_ph_ids)
for new_ep_id, (old_ep_id, old_ph_ids) in zip(ep_ids, ep_ph_ids_list[::2])]
self._tester.ShareExisting(self._cookie, prev_vp_id, ep_ph_ids_list)
def _UpdateOrAllocateDBObject(self, cls, **db_dict):
"""Updates an existing DBObject of type "cls", with the attributes
in "db_dict". If no such object yet exists, allocates a new object,
generating the object's key if necessary. Adds the object to the DB
validator's model so that it can verify against it. Returns the
updated or allocate object.
This method is especially useful in cases where the service API
does not have a way to create a certain kind of object without
a lot of trouble, or if we need to simulate some condition by
directly modifying an object in the DB.
"""
# Check for conditions under which we can do an allocation.
# For a hash-key only object, just verify the hash key
# has not been supplied.
#
# For a composite key object, verify that the range key
# is not supplied, but hash key is.
hash_key_col = cls._table.hash_key_col
range_key_col = cls._table.range_key_col
if not range_key_col:
if not db_dict.has_key(hash_key_col.name):
cls.Allocate(self._client, self.stop)
else:
cls.Query(self._client, db_dict[hash_key_col.name],
col_names=None, callback=self.stop, must_exist=False)
else:
assert db_dict.has_key(hash_key_col.name), 'Must supply hash key.'
if not db_dict.has_key(range_key_col.name):
cls.Allocate(self._client, db_dict[hash_key_col.name], self.stop)
else:
cls.Query(self._client, db_dict[hash_key_col.name], db_dict[range_key_col.name],
col_names=None, callback=self.stop, must_exist=False)
o = self.wait()
if o == None:
o = cls()
o._version = Version.GetCurrentVersion()
for k, v in db_dict.items():
setattr(o, k, v)
o.Update(self._client, self.stop)
self.wait()
self._validator.AddModelObject(o)
return o
def _MakeSystemViewpoint(self, viewpoint_id):
"""Force the specified viewpoint to be a system viewpoint for testing purposes."""
viewpoint = self._RunAsync(Viewpoint.Query, self._client, viewpoint_id, None)
# Patch read_only for the "type" field so that the field can be overwritten.
with mock.patch.object(viewpoint._columns['type'].col_def, 'read_only', False):
self._UpdateOrAllocateDBObject(Viewpoint, viewpoint_id=viewpoint_id, type=Viewpoint.SYSTEM)
def _WriteClientLog(self, user_cookie, log_record):
"""Writes "log_content" to a user client log."""
response_dict = self._SendRequest('new_client_log_url', user_cookie,
{'headers': {'op_id': 'o1', 'op_timestamp': time.time()},
'timestamp': log_record.timestamp,
'client_log_id': log_record.client_id})
url = response_dict['client_log_put_url']
headers = {'Content-Type': CLIENT_LOG_CONTENT_TYPE}
self._tester.http_client.fetch(url, callback=self.stop, method='PUT',
body=log_record.contents, follow_redirects=False,
headers=headers)
response = self.wait()
self.assertEqual(200, response.code)
def _PostCommentChain(self, user_cookies, vp_id, num_comments, ph_id=None):
"""Generate and execute requests to post "num_comments" to the specified
viewpoint. Select from "user_cookies" to impersonate users who are
posting the comments. Link each comment to the preceding comment via
its "asset_id" attribute. If "ph_id" is specified, link the "root"
comment's "asset_id" to that photo. Return the list of ids of the
photos that were created.
"""
comment_ids = []
timestamp = time.time()
asset_id = ph_id
for i in xrange(num_comments):
cm_dict = {'timestamp': timestamp}
message = 'Comment #%d' % self._test_id
if asset_id is not None:
cm_dict['asset_id'] = asset_id
timestamp += 10
self._test_id += 1
user_cookie = user_cookies[i % len(user_cookies)]
asset_id = self._tester.PostComment(user_cookie, vp_id, message, **cm_dict)
comment_ids.append(asset_id)
return comment_ids
self._episode_id, self._photo_ids = self._UploadOneEpisodes(self._cookie, 2)
def _UploadOneEpisode(self, user_cookie, num_photos):
"""Generate and execute a request to upload "num_photos" to the default
viewpoint of the specified user. Return a tuple of ids:
(episode_id, photo_ids)
"""
ep_dict = {'title': 'Episode #%d Title' % self._test_id,
'description': 'Episode #%d Description' % self._test_id}
self._test_id += 1
ph_dict_list = [self._CreatePhotoDict(user_cookie)
for i in range(num_photos)]
return self._tester.UploadEpisode(user_cookie, ep_dict, ph_dict_list)
def _UploadMultipleEpisodes(self, user_cookie, num_photos, add_asset_keys=False, add_test_photos=False):
"""Upload multiple episodes to the specified user's default viewpoint.
Divide "num_photos" into groups that logarithmically decrease in size
(base-2). Create the number of episodes that are needed to contain
those photos, where each episode has a different number of photos,
and the last episode has zero photos. Return a list of tuples:
[(episode_id, photo_ids), ...]
"""
result = []
while True:
half = (num_photos + 1) / 2
num_photos -= half
ep_dict = {'title': 'Episode #%d Title' % self._test_id,
'description': 'Episode #%d Description' % self._test_id}
self._test_id += 1
ph_dict_list = [self._CreatePhotoDict(user_cookie)
for i in range(half)]
if add_asset_keys:
for ph_dict in ph_dict_list:
ph_dict['asset_keys'] = ['a/#asset_key-%d' % self._test_id]
self._test_id += 1
result.append(self._tester.UploadEpisode(user_cookie, ep_dict, ph_dict_list, add_test_photos=add_test_photos))
if half == 0:
return result
# =================================================================
#
# Helper methods to get useful verification info from service API.
#
# =================================================================
def _CountEpisodes(self, user_cookie, viewpoint_id):
"""Return count of episodes in the given viewpoint."""
vp_select = self._tester.CreateViewpointSelection(viewpoint_id)
response_dict = self._tester.QueryViewpoints(user_cookie, [vp_select])
return len(response_dict['viewpoints'][0]['episodes'])
# =================================================================
#
# Helper methods to create service API request and object dicts.
#
# =================================================================
def _CreateViewpointDict(self, user_cookie, **update_vp_dict):
"""Create dict() for a test viewpoint, overriding default values with
whatever is passed in "update_vp_dict"."""
user_id, device_id = self._tester.GetIdsFromCookie(user_cookie)
vp_dict = {'viewpoint_id': Viewpoint.ConstructViewpointId(device_id, self._test_id),
'title': 'Title %s' % self._test_id,
'description': 'Description %s. 朋友你好.' % self._test_id,
'name': 'Name %s' % self._test_id,
'type': Viewpoint.EVENT}
self._test_id += 1
vp_dict.update(**update_vp_dict)
return vp_dict
def _CreateEpisodeDict(self, user_cookie, **update_ep_dict):
"""Create dict() for a test episode, overriding default values with
whatever is passed in "update_ep_dict"."""
user_id, device_id = self._tester.GetIdsFromCookie(user_cookie)
timestamp = time.time() - self._test_id
ep_dict = {'episode_id': Episode.ConstructEpisodeId(timestamp, device_id, self._test_id),
'timestamp': timestamp,
'title': 'Title %s' % self._test_id,
'description': 'Description %s. 朋友你好.' % self._test_id}
self._test_id += 1
ep_dict.update(**update_ep_dict)
return ep_dict
def _CreatePhotoDict(self, user_cookie, **update_ph_dict):
"""Create dict() for a test photo, overriding default values with
whatever is passed in "update_ph_dict"."""
user_id, device_id = self._tester.GetIdsFromCookie(user_cookie)
timestamp = update_ph_dict.get('timestamp', time.time() - self._test_id)
ph_dict = {'photo_id': Photo.ConstructPhotoId(timestamp, device_id, self._test_id),
'aspect_ratio': .75 + self._test_id,
'content_type': 'image/jpeg',
'tn_md5': util.ComputeMD5Hex('thumbnail image data'),
'med_md5': util.ComputeMD5Hex('medium image data'),
'full_md5': util.ComputeMD5Hex('full image data'),
'orig_md5': util.ComputeMD5Hex('original image data'),
'location': {'latitude': 47.5675, 'longitude':-121.962, 'accuracy': 0.0},
'placemark': {'iso_country_code': u'US',
'thoroughfare': u'SE 43rd St',
'locality': u'Fall City',
'country': u'United States',
'subthoroughfare': u'28408',
'state': u'Washington',
'sublocality': u'Issaquah Plateau'},
'tn_size': 5 * 1024,
'med_size': 40 * 1024,
'full_size': 150 * 1024,
'orig_size': 1200 * 1024,
'timestamp': timestamp,
'caption': 'Photo caption #%d' % self._test_id}
self._test_id += 1
ph_dict.update(**update_ph_dict)
return ph_dict
|
#
#
#
import json
from gzip import GzipFile
from io import BytesIO
from typing import MutableSequence
from typing import Union
from ._exceptions import CompressionFull
from ._exceptions import SingleCompressionOnGoing
class CompressedJsonList:
"""
Creates an json array of compressed json messages.
[<compressed>, <compressed>, <compressed>]
Arguments:
max_compressed_size [int]: This is the max compression size needed for one batch.
"""
def __init__(self, compression_limit: int) -> None:
self._max_compressed_size: int = compression_limit
self._uncompressed_size: int = 0
self._compressed_size: int = 0
self._single_compress_started = False
self._byte_stream: BytesIO = None # type: ignore
self._gzip_stream: GzipFile = None # type: ignore
self._gzip_metadata_size = 20
self._unzipped_chars = 1 + self._gzip_metadata_size
self._data_written = 0
def _get_max_compressed_size(self) -> int:
""" Get the max compressed size """
return self._max_compressed_size
def _set_max_compressed_size(self, value: int) -> None:
self._max_compressed_size = value
compression_limit = property(_get_max_compressed_size, _set_max_compressed_size)
@property
def uncompressed_size(self) -> int:
""" The size of the uncompressed data """
return self._uncompressed_size
@property
def compressed_size(self) -> int:
""" The size of the compression """
return self._compressed_size
@property
def compression_ratio(self) -> float:
"""The compression ratio
Returns:
float: The ration of the compression
"""
ratio = 0.0
if self.uncompressed_size != 0:
ratio = self.compressed_size / self.uncompressed_size * 100.0
return ratio
def get_data(self, before_maxed=False) -> bytes:
"""Get the compressed json array
Args:
before_maxed (bool, optional): Set to `True` if retriving data before reached max limit. Defaults to False.
Returns:
bytes: The compressed json array
"""
if before_maxed:
self._gzip_stream.write(b"]")
compressed = self._byte_stream.getvalue()
self._compressed_size = len(compressed)
self._gzip_stream.close()
self._single_compress_started = False
return compressed
def compress(self, data: Union[str, dict]) -> None:
"""
Compress a single json object and adds it to the json array.
Args:
data (Union[str, dict]): The json string or python `dict`
Raises:
CompressionFull: When we have reached the max limit
"""
if not self._single_compress_started:
self._single_compress_started = True
self._byte_stream = BytesIO()
self._gzip_stream = GzipFile(mode="wb", fileobj=self._byte_stream)
self._gzip_stream.write(b"[")
if not self._compress(data):
self._gzip_stream.write(b"]")
raise CompressionFull()
def get_compressed_json_list(self, json_data: MutableSequence[Union[str, dict]]) -> bytes:
"""Get a compressed list of json objects
Args:
data (MutableSequence[Union[str, dict]]): List of json string or python `dict` to compress
Returns:
bytearray: The array of compressed bytes
"""
if self._single_compress_started:
raise SingleCompressionOnGoing("Single compression started, cant compress a range now.")
self._byte_stream = BytesIO()
self._gzip_stream = GzipFile(mode="wb", fileobj=self._byte_stream)
self._gzip_stream.write(b"[")
for org_data in json_data:
if self._compress(org_data): # pragma: no cover
json_data.remove(org_data)
self._gzip_stream.write(b"]")
self._gzip_stream.close()
compressed = self._byte_stream.getvalue()
self._compressed_size = len(compressed)
return compressed
def _compress(self, org_data: Union[str, dict]) -> bool:
if isinstance(org_data, dict):
data = json.dumps(org_data)
elif isinstance(org_data, str):
data = org_data
else:
raise ValueError(f"We do not support type: {type(org_data)}")
data_bytes = data.encode("utf-8")
if not self._check_compression(data_bytes):
return False
if self._data_written > 0:
self._gzip_stream.write(b",") # This should be an array so add coma when mutiple messages
self._gzip_stream.write(data_bytes)
self._data_written += 1
self._unzipped_chars += len(data_bytes)
self._uncompressed_size += len(data_bytes)
return True
def _check_compression(self, data_bytes: bytes) -> bool:
stream_size = self._byte_stream.getbuffer().nbytes
if (stream_size + len(data_bytes) + self._unzipped_chars) > self._max_compressed_size:
self._gzip_stream.flush()
self._unzipped_chars = 0 + self._gzip_metadata_size
if (stream_size + len(data_bytes)) >= self._max_compressed_size and self._data_written > 0:
return False
return True
|
<reponame>ap9272/CSE246-project
import numpy as np
from scipy.stats import truncnorm
# get values from a truncated normal distribution for age sampling
def get_truncated_normal(mean=0, sd=1, low=0, upp=10):
return truncnorm(
(low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
# class for human
class Human():
# TODO: add other parameters of a human like location
def __init__(self, age, gender, human_hash, incub_time, app_install):
self.Age = age
self.Gender = gender
self.location = np.array([0.0,0.0])
self.state = 'S' # SIRD model: (S)usceptible, {(I)nfected, (SYM)ptomatic, (ASYM)ptomatic} , (R)ecovered, (D)ead
self.infected_time = -1 # time steps since infected
self.incubation_time = -1
self.app_install = app_install
self.h_id = human_hash
self.contacted_humans = [set()]
self.contacted_buff = incub_time
def __str__(self):
return "(" + str(self.Age) + ", " + self.Gender + ") "
# TODO: add other functions of humans like movement
# set the humans location (also on graph)
def set_location(self, coords):
self.location = coords
# distance from another human
def distance(self, coord):
return np.sqrt((self.location[0] - coord[0])**2 + (self.location[1] - coord[1])**2)
# To find new location of a human (can be changed)
def move(self, direction, bounding_coords):
new_loc = self.location + direction
# checking for boc containment
if (new_loc[0] < bounding_coords[0][0] or new_loc[0] > bounding_coords[1][0]):
direction[0] = -direction[0]
if (new_loc[1] < bounding_coords[0][1] or new_loc[1] > bounding_coords[1][1]):
direction[1] = -direction[1]
self.location = self.location + direction
def add_contact(self, h_idx):
if self.app_install == False:
return
self.contacted_humans[-1].add(h_idx)
def update_contact_day(self):
if self.app_install == False:
return
self.contacted_humans.append(set())
if len(self.contacted_humans) >= self.contacted_buff:
self.contacted_humans.pop(0)
def upload_contacts(self):
return self.contacted_humans
def create_humans(args):
total_pop = args.total_population
gender_ratio = args.gender_ratio
age_dist_func = args.age_dist
max_age = args.max_age
incub_time = args.infection_incub
app_install = args.app_install_prob
Ages = []
# assumes normal distribution with values choden from the internet
if age_dist_func == 'normal':
X = get_truncated_normal(mean=29.6, sd=15, low=0, upp=max_age)
Ages = X.rvs(total_pop)
Ages = np.around(Ages)
Ages = Ages.astype(int)
# assumes equally distributed by age
elif age_dist_func == 'uniform':
Ages = np.random.randint(low=0, high=max_age, size=total_pop)
else:
print("Unknown age distribution function given.")
# Get genders for the humans
Genders = np.random.uniform(size=total_pop) > gender_ratio
# Get the humans who install the app
Apps = np.random.uniform(size=total_pop) < app_install
Population = []
for i in range(Ages.shape[0]):
if Genders[i] == True:
Population.append(Human(Ages[i],'Male', i, incub_time, Apps[i]))
else:
Population.append(Human(Ages[i],'Female', i, incub_time, Apps[i]))
return Population
|
import sublime
import sublime_plugin
import os
from ..lib import log, setup_log_panel, yte_setting, dotty
from ..lib import select_video, select_playlist, select_tag, select_timecode
from ..lib import Request, NetworkManager, stored_credentials_path, video_sort
# TODO:
# - Hit the keyword in the first few lines and 2-3 times total
# - The first few lines (how much?) are shown above the fold
# - Tags is 500 characters long, no more than 30 characters per tag
# - Tags with spaces may count as having a length + 2 because internally
# they're wrapped in quotes and that counts against the length
# - Tags should include brand related and channel tags for more relvance
# - Chapters: first must be at 0:00; there has to be at least 3 in ascending
# order, and the minimum length of a chapter is 10 seconds. There is no
# official doc on what the text should look like, but observably it seems to
# ignore leading punctuatuion, as in "00:00 - Introduction" the " - " is
# skipped (though starting it with a literal " gets it added, so there's
# that)
###----------------------------------------------------------------------------
# Our global network manager object
netManager = None
###----------------------------------------------------------------------------
# The uploads playlist doesn't appear in the list of playlists associated with
# a user because it's channel specific and not user specific. This is a sample
# dotty entry with just enough information to allow for populating that
# playlist into a chooser.
#
# The actual ID of the placeholder needs to be established at the point where
# the data is actually collected.
_upload_template = {
"id": "placeholder",
"snippet": {
"title": "Uploaded Videos"
},
"status": {
"privacyStatus": "private",
},
"contentDetails": {
# We don't know how many items are in the uploads playlist until we
# fetch the contents of it. The display code in the chooser will use
# markup to tell the user the list size is unknown in this case.
# "itemCount": 0
}
}
###----------------------------------------------------------------------------
def loaded():
"""
Initialize our plugin state on load.
"""
global netManager
for window in sublime.windows():
setup_log_panel(window)
log("PKG: YouTubeEditor loaded")
yte_setting.obj = sublime.load_settings("YouTubeEditor.sublime-settings")
yte_setting.default = {
"camtasia_folder": os.path.expanduser("~"),
"auto_show_panel": 2,
"report_output_to_view": False,
"cache_downloaded_data": True,
"encrypt_cache": False,
"client_id": "",
"client_secret": "",
"auth_uri": "",
"token_uri": ""
}
netManager = NetworkManager()
def unloaded():
"""
Clean up plugin state on unload.
"""
global netManager
if netManager is not None:
netManager.shutdown()
netManager = None
def youtube_has_credentials():
"""
Determine if there are stored credentials for a YouTube login; this
indicates that the user has previously gone through the login steps to
authorize the plugin with YouTube.
"""
return netManager.has_credentials()
def youtube_is_authorized():
"""
Determine if the plugin is currently authorized or not. This indicates not
only that the user has previously authorizaed the plugin on YouTube, but
also that a request has been made that has validated (and potentially
refreshed) our access token. If this is not the case, requests will fail.
"""
return netManager.is_authorized()
def youtube_request(request, handler, reason, callback, **kwargs):
"""
Dispatch a request to collect data from YouTube, invoking the given
callback when the request completes. The request will store the given
handler and all remaining arguments as arguments to the request dispatched.
"""
netManager.request(Request(request, handler, reason, **kwargs), callback)
###----------------------------------------------------------------------------
class YoutubeRequest():
"""
This class abstracts away the common portions of using the NetworkManager
to make requests and get responses back.
A request can be made via the `request()` method, and the result will
be automatically directed to a method in the class. The default handler
is the name of the request preceeded by an underscore.
"""
auth_req = None
auth_resp = None
run_args = None
def run(self, **kwargs):
self.run_args = kwargs
if not youtube_is_authorized():
self.request("authorize", "_internal_auth", "Authorizing")
else:
self._authorized(self.auth_req, self.auth_resp)
def _internal_auth(self, request, result):
self.auth_req = request
self.auth_resp = result
self._authorized(self.auth_req, self.auth_resp)
def request(self, request, handler=None, reason=None, **kwargs):
youtube_request(request, handler, reason, self.result, **kwargs)
def result(self, request, success, result):
attr = request.handler if success else "_error"
if not hasattr(self, attr):
raise RuntimeError("'%s' has no handler for request '%s'" % (
self.name(), request.name))
handler = getattr(self, attr)
handler(request, result)
def _error(self, request, result):
log("Err: in '{0}': {2} (code={1})", request.name,
result['error.code'], result['error.message'], display=True)
# Assume that most commands want to only enable themselves when there are
# credentials; commands that are responsible for obtaining credentials
# override this method.
def is_enabled(self, **kwargs):
return youtube_has_credentials()
###----------------------------------------------------------------------------
class YouTubeVideoSelect(YoutubeRequest):
"""
This class is a specialization on YoutubeRequest that specifically presumes
that the ultimate goal is to have the user select a video for some purpose.
The sequence of items here is:
- Gather channel information
- Gather list of playlists and prompt (or; assume uploads playlist)
- Gather contents of selected playlist
- Prompt by tags on videos in the playlist (optional based on args)
- Prompt for a video (either in the tags or in the playlist)
- Prompt for a timecode in the video (if any)
"""
# These values control what the placeholder text in the various quick
# panels will be for each of the given operations. The default value of
# None defers the placeholder to the utility functions in the utils.py
# file.
#
# video_tag_placeholder takes an optional format of {tag} to specify the
# tag that was chosen to get to this video list.
#
# timecode_placeholder takes an optional format of {title} to specify the
# title of the video the user is selecting a timecode from.
playlist_placeholder = None
tag_placeholder = None
video_placeholder = None
video_tag_placeholder = None
timecode_placeholder = None
def _authorized(self, request, result):
self.use_tags = self.run_args.get("by_tags", False)
self.use_playlists = self.run_args.get("by_playlists", False)
self.request("channel_list", reason="Get Channel Info")
def _channel_list(self, request, result):
self.channel = result[0]
# Make a fake playlist from a template; populate it with the public
# video count. The count will be adjusted later if/when the user
# browses into the Uploads playlist.
self.uploads_playlist = dotty.dotty(_upload_template)
self.uploads_playlist['contentDetails.itemCount'] = self.channel['statistics.videoCount']
self.uploads_playlist['id'] = self.channel['contentDetails.relatedPlaylists.uploads']
if self.use_playlists:
self.request("playlist_list", channel_id=self.channel['id'],
reason="Get user playlists")
else:
self.pick_playlist(self.uploads_playlist)
def _playlist_list(self, request, result):
self.playlists = video_sort(result, 'snippet.title')
self.playlists.insert(0, self.uploads_playlist)
select_playlist(self.playlists, self.pick_playlist,
placeholder=self.playlist_placeholder)
def _playlist_contents(self, request, result):
if self.use_tags:
select_tag(result, self.pick_tag, show_back=self.use_playlists,
placeholder=self.tag_placeholder)
else:
# If this is the uploads playlist, update the video count to
# include non-public videos.
if request["playlist_id"] == self.uploads_playlist['id']:
self.uploads_playlist['contentDetails.itemCount'] = len(result)
# Pass the video list as the tag_list to the lambda so it can be
# picked up and used again if the user goes back while editing the
# timecode.
videos = video_sort(result, "statistics.viewCount", int, True)
select_video(videos, lambda vid: self.select_video(vid, None, videos),
show_back=self.use_playlists,
placeholder=self.video_placeholder)
def pick_playlist(self, playlist):
if playlist != None:
self.request("playlist_contents",
reason="Get playlist contents",
playlist_id=playlist['id'])
def pick_tag(self, tag, tag_list):
if tag is not None:
if tag == "_back":
if self.use_playlists:
return select_playlist(self.playlists, self.pick_playlist,
placeholder=self.playlist_placeholder)
videos = video_sort(tag_list[tag], "statistics.viewCount", int, True)
# Use the default, unless we have a specific placeholder for this.
placeholder = (None if not self.video_tag_placeholder else
self.video_tag_placeholder.format(tag=tag))
# Video ID is in contentDetails.videoId for short results or id for
# full details (due to it being a different type of request)
select_video(videos, lambda vid: self.select_video(vid, tag, tag_list),
show_back=True, placeholder=placeholder)
def select_video(self, video, tag, tag_list):
if video is None:
return
if video['id'] == "_back":
# When using both tags and playlists, the browse order should send
# us back to tags first and from there to playlists.
if self.use_tags:
return select_tag(None, self.pick_tag, self.use_playlists, tag_list,
placeholder=self.tag_placeholder)
return select_playlist(self.playlists, self.pick_playlist,
placeholder=self.playlist_placeholder)
self.picked_video(video, tag, tag_list)
def pick_toc(self, timecode, text, video, tag, tag_list):
if timecode != None:
if timecode == "_back":
if self.use_tags:
return self.pick_tag(tag, tag_list)
else:
return select_video(tag_list, lambda vid: self.select_video(vid, None, None),
show_back=self.use_playlists,
placeholder=self.video_placeholder)
self.picked_toc(timecode, text, video)
def picked_video(self, video, tag, tag_list):
"""
Override this if you want to know what video the user selected; the
default will continue on to prompt the user for a timecode contained
in the video instead.
video represents the video chosen by the user, and tag is the tag they
chose (if prompted; otherwise it is None). The tag_list argument should
be ignored by outside code, as its value and use changes depending on
how the user is browsing around in the content.
"""
placeholder = (None if not self.timecode_placeholder else
self.timecode_placeholder.format(title=video['snippet.title']))
select_timecode(video, lambda a, b: self.pick_toc(a, b, video, tag, tag_list),
show_back=True, placeholder=placeholder)
def picked_toc(self, timecode, text, video):
"""
Override this if you want to know what timecode the user selected from
the table of contents of their selected video. You get told the
timecode string, the text of the TOC entry associated with it, and the
information on the video the user selected.
"""
pass
###----------------------------------------------------------------------------
|
<reponame>apmoore1/target-extraction<filename>tests/analysis/dataset_statistics_test.py
from pathlib import Path
import math
import re
import pytest
import pandas as pd
from target_extraction.data_types import TargetTextCollection, TargetText
from target_extraction.data_types_util import Span
from target_extraction.analysis.dataset_statistics import get_sentiment_counts
from target_extraction.analysis.dataset_statistics import average_target_per_sentences
from target_extraction.analysis.dataset_statistics import dataset_target_sentiment_statistics
from target_extraction.analysis.dataset_statistics import tokens_per_target
from target_extraction.analysis.dataset_statistics import dataset_target_extraction_statistics
from target_extraction.analysis.dataset_statistics import _statistics_to_dataframe
from target_extraction.analysis.dataset_statistics import tokens_per_sentence
from target_extraction.tokenizers import whitespace
DATA_DIR = Path(__file__, '..', '..', 'data', 'analysis', 'sentiment_error_analysis').resolve()
TRAIN_COLLECTION = TargetTextCollection.load_json(Path(DATA_DIR, 'train_with_blank.json'))
TRAIN_COLLECTION.name = 'train'
SENTIMENT_KEY = 'target_sentiments'
def test_get_sentiment_counts():
num_pos = 2
num_neu = 12
num_neg = 5
total = 19.0
true_sentiment_counts = dict([('positive', num_pos), ('neutral', num_neu),
('negative', num_neg)])
sentiment_counts = get_sentiment_counts(TRAIN_COLLECTION, normalised=False,
sentiment_key=SENTIMENT_KEY)
assert len(true_sentiment_counts) == len(sentiment_counts)
for sentiment, count in true_sentiment_counts.items():
assert count == sentiment_counts[sentiment]
sentiment_counts = get_sentiment_counts(TRAIN_COLLECTION, SENTIMENT_KEY)
assert len(true_sentiment_counts) == len(sentiment_counts)
for sentiment, count in true_sentiment_counts.items():
assert count/total == sentiment_counts[sentiment]
with pytest.raises(KeyError):
get_sentiment_counts(TRAIN_COLLECTION, 'wrong_key')
def test_average_target_per_sentences():
number_targets = 19.0
number_sentences = 6.0
true_ats = number_targets / number_sentences
assert true_ats == average_target_per_sentences(TRAIN_COLLECTION,
sentence_must_contain_targets=False)
number_sentences = 5.0
true_ats = number_targets / number_sentences
assert true_ats == average_target_per_sentences(TRAIN_COLLECTION,
sentence_must_contain_targets=True)
def test_tokens_per_sentence():
# Test the case where they would have to tokenize
true_sentence_length = {13: 3, 23: 1, 21: 1, 9: 1}
sentence_length = tokens_per_sentence(TRAIN_COLLECTION, whitespace())
assert len(true_sentence_length) == len(sentence_length)
for length, count in true_sentence_length.items():
assert count == sentence_length[length]
# Test the case when it does not need to be tokenised
sentence_length = tokens_per_sentence(TRAIN_COLLECTION, whitespace())
assert len(true_sentence_length) == len(sentence_length)
for length, count in true_sentence_length.items():
assert count == sentence_length[length]
def test__statistics_to_dataframe():
# Test with just one collection
target_stats = dataset_target_extraction_statistics([TRAIN_COLLECTION])
tl_1 = round((17/19.0) * 100, 2)
tl_2 = round((2/19.0) * 100, 2)
true_stats = {'Name': 'train', 'No. Sentences': 6, 'No. Sentences(t)': 5,
'No. Targets': 19, 'No. Uniq Targets': 13, 'ATS': round(19/6.0,2),
'ATS(t)': round(19/5.0,2), 'TL 1 %': tl_1, 'TL 2 %': tl_2,
'TL 3+ %': 0, 'Mean Sentence Length': 15.33,
'Mean Sentence Length(t)': 16.6}
true_stats_list = {key: [value] for key, value in true_stats.items()}
true_stats_df = pd.DataFrame(true_stats_list)
test_stats_df = _statistics_to_dataframe(target_stats)
pd.testing.assert_frame_equal(true_stats_df, test_stats_df, check_less_precise=2)
# Test with two collections
subcollection = TargetTextCollection(name='sub')
subcollection.add(TRAIN_COLLECTION["81207500773427072"])
subcollection.add(TRAIN_COLLECTION["78522643479064576"])
target_stats = dataset_target_extraction_statistics([subcollection, TRAIN_COLLECTION])
tl_1 = round((6/7.0) * 100, 2)
tl_2 = round((1/7.0) * 100, 2)
sub_stats = {'Name': 'sub', 'No. Sentences': 2, 'No. Sentences(t)': 2,
'No. Targets': 7, 'No. Uniq Targets': 7, 'ATS': round(7/2.0, 2),
'ATS(t)': round(7/2.0, 2),
'TL 1 %': tl_1, 'TL 2 %': tl_2, 'TL 3+ %': 0,
'Mean Sentence Length': 13, 'Mean Sentence Length(t)': 13}
true_stats_list = {key: [value, true_stats[key]]
for key, value in sub_stats.items()}
true_stats_df = pd.DataFrame(true_stats_list)
test_stats_df = _statistics_to_dataframe(target_stats)
pd.testing.assert_frame_equal(true_stats_df, test_stats_df, check_less_precise=2)
def test_tokens_per_target():
# standard/normal case
length_count = tokens_per_target(TRAIN_COLLECTION, 'targets', whitespace())
true_length_count = {1: 17, 2: 2}
assert len(length_count) == len(true_length_count)
for length, count in true_length_count.items():
assert count == length_count[length]
# normalise
length_frac = tokens_per_target(TRAIN_COLLECTION, 'targets', whitespace(),
normalise=True)
true_length_frac = {1: 17/19.0, 2: 2/19.0}
assert len(length_frac) == len(true_length_frac)
for length, frac in true_length_frac.items():
assert math.isclose(frac, length_frac[length], rel_tol=0.01)
# cumulative percentage
length_dist = tokens_per_target(TRAIN_COLLECTION, 'targets', whitespace(),
cumulative_percentage=True)
true_length_dist = {1: 17/19.0, 2: 1.0}
assert len(length_dist) == len(true_length_dist)
for length, dist in true_length_dist.items():
assert math.isclose(dist * 100, length_dist[length], rel_tol=0.01)
@pytest.mark.parametrize("incl_sentence_statistics", (False, True))
@pytest.mark.parametrize("lower", (False, None))
def test_dataset_target_extraction_statistics(lower: bool,
incl_sentence_statistics: bool):
if lower is not None:
target_stats = dataset_target_extraction_statistics([TRAIN_COLLECTION],
lower_target=lower,
incl_sentence_statistics=incl_sentence_statistics)
else:
target_stats = dataset_target_extraction_statistics([TRAIN_COLLECTION],
incl_sentence_statistics=incl_sentence_statistics)
tl_1 = round((17/19.0) * 100, 2)
tl_2 = round((2/19.0) * 100, 2)
true_stats = {'Name': 'train', 'No. Sentences': 6, 'No. Sentences(t)': 5,
'No. Targets': 19, 'No. Uniq Targets': 13, 'ATS': round(19/6.0, 2),
'ATS(t)': round(19/5.0, 2), 'TL 1 %': tl_1, 'TL 2 %': tl_2,
'TL 3+ %': 0.0, 'Mean Sentence Length': 15.33,
'Mean Sentence Length(t)': 16.6}
if not incl_sentence_statistics:
del true_stats['Mean Sentence Length(t)']
del true_stats['Mean Sentence Length']
if lower == False:
true_stats['No. Uniq Targets'] = 14
assert 1 == len(target_stats)
target_stats = target_stats[0]
assert len(true_stats) == len(target_stats)
for stat_name, stat in true_stats.items():
if re.search(r'^TL', stat_name):
assert math.isclose(stat, target_stats[stat_name], rel_tol=0.001)
else:
assert stat == target_stats[stat_name], stat_name
# Multiple collections, where one collection is just the subset of the other
subcollection = TargetTextCollection(name='sub')
subcollection.add(TRAIN_COLLECTION["81207500773427072"])
subcollection.add(TRAIN_COLLECTION["78522643479064576"])
long_target = TargetText(text='some text that contains a long target or two',
spans=[Span(0,14), Span(15, 37)],
targets=['some text that', 'contains a long target'],
target_sentiments=['positive', 'negative'],
text_id='100')
subcollection.add(long_target)
subcollection.tokenize(whitespace())
if lower is not None:
target_stats = dataset_target_extraction_statistics([subcollection, TRAIN_COLLECTION],
lower_target=lower,
incl_sentence_statistics=incl_sentence_statistics)
else:
target_stats = dataset_target_extraction_statistics([subcollection, TRAIN_COLLECTION],
incl_sentence_statistics=incl_sentence_statistics)
tl_1 = round((6/9.0) * 100, 2)
tl_2 = round((1/9.0) * 100, 2)
tl_3 = round((2/9.0) * 100, 2)
sub_stats = {'Name': 'sub', 'No. Sentences': 3, 'No. Sentences(t)': 3,
'No. Targets': 9, 'No. Uniq Targets': 9, 'ATS': round(9/3.0, 2),
'ATS(t)': round(9/3.0, 2), 'TL 1 %': tl_1, 'TL 2 %': tl_2,
'TL 3+ %': tl_3, 'Mean Sentence Length': 11.67,
'Mean Sentence Length(t)': 11.67}
if not incl_sentence_statistics:
del sub_stats['Mean Sentence Length(t)']
del sub_stats['Mean Sentence Length']
true_stats = [sub_stats, true_stats]
assert len(true_stats) == len(target_stats)
for stat_index, stat in enumerate(true_stats):
test_stat = target_stats[stat_index]
assert len(stat) == len(test_stat)
for stat_name, stat_value in stat.items():
if re.search(r'^TL', stat_name):
assert math.isclose(stat_value, test_stat[stat_name], rel_tol=0.001)
else:
assert stat_value == test_stat[stat_name], stat_name
@pytest.mark.parametrize("lower", (False, None))
def test_dataset_target_sentiment_statistics(lower: bool):
if lower is not None:
target_stats = dataset_target_sentiment_statistics([TRAIN_COLLECTION],
lower_target=lower)
else:
target_stats = dataset_target_sentiment_statistics([TRAIN_COLLECTION])
pos_percent = round(get_sentiment_counts(TRAIN_COLLECTION, SENTIMENT_KEY)['positive'] * 100, 2)
pos_count = get_sentiment_counts(TRAIN_COLLECTION, SENTIMENT_KEY, normalised=False)['positive']
pos_count_percent = f'{pos_count} ({pos_percent})'
neu_percent = round(get_sentiment_counts(TRAIN_COLLECTION, SENTIMENT_KEY)['neutral'] * 100, 2)
neu_count = get_sentiment_counts(TRAIN_COLLECTION, SENTIMENT_KEY, normalised=False)['neutral']
neu_count_percent = f'{neu_count} ({neu_percent})'
neg_percent = round(get_sentiment_counts(TRAIN_COLLECTION, SENTIMENT_KEY)['negative'] * 100, 2)
neg_count = get_sentiment_counts(TRAIN_COLLECTION, SENTIMENT_KEY, normalised=False)['negative']
neg_count_percent = f'{neg_count} ({neg_percent})'
tl_1 = round((17/19.0) * 100, 2)
tl_2 = round((2/19.0) * 100, 2)
true_stats = {'Name': 'train', 'No. Sentences': 6, 'No. Sentences(t)': 5,
'No. Targets': 19, 'No. Uniq Targets': 13, 'ATS': round(19/6.0, 2),
'ATS(t)': round(19/5.0, 2), 'POS (%)': pos_count_percent,
'NEG (%)': neg_count_percent, 'NEU (%)': neu_count_percent,
'TL 1 %': tl_1, 'TL 2 %': tl_2, 'TL 3+ %': 0.0,
'Mean Sentence Length': 15.33, 'Mean Sentence Length(t)': 16.6}
if lower == False:
true_stats['No. Uniq Targets'] = 14
print(target_stats)
assert 1 == len(target_stats)
target_stats = target_stats[0]
assert len(true_stats) == len(target_stats)
for stat_name, stat in true_stats.items():
if re.search(r'^TL', stat_name):
assert math.isclose(stat, target_stats[stat_name], rel_tol=0.001)
else:
assert stat == target_stats[stat_name], stat_name
# Multiple collections, where one collection is just the subset of the other
subcollection = TargetTextCollection(name='sub')
subcollection.add(TRAIN_COLLECTION["81207500773427072"])
subcollection.add(TRAIN_COLLECTION["78522643479064576"])
if lower is not None:
target_stats = dataset_target_sentiment_statistics([subcollection, TRAIN_COLLECTION],
lower_target=lower)
else:
target_stats = dataset_target_sentiment_statistics([subcollection, TRAIN_COLLECTION])
pos_percent = round(get_sentiment_counts(subcollection, SENTIMENT_KEY)['positive'] * 100, 2)
pos_count = get_sentiment_counts(subcollection, SENTIMENT_KEY, normalised=False)['positive']
pos_count_percent = f'{pos_count} ({pos_percent})'
neu_percent = round(get_sentiment_counts(subcollection, SENTIMENT_KEY)['neutral'] * 100, 2)
neu_count = get_sentiment_counts(subcollection, SENTIMENT_KEY, normalised=False)['neutral']
neu_count_percent = f'{neu_count} ({neu_percent})'
neg_percent = round(get_sentiment_counts(subcollection, SENTIMENT_KEY)['negative'] * 100, 2)
neg_count = get_sentiment_counts(subcollection, SENTIMENT_KEY, normalised=False)['negative']
neg_count_percent = f'{neg_count} ({neg_percent})'
tl_1 = round((6/7.0) * 100, 2)
tl_2 = round((1/7.0) * 100, 2)
sub_stats = {'Name': 'sub', 'No. Sentences': 2, 'No. Sentences(t)': 2,
'No. Targets': 7, 'No. Uniq Targets': 7, 'ATS': round(7/2.0, 2),
'ATS(t)': round(7/2.0, 2), 'POS (%)': pos_count_percent,
'NEG (%)': neg_count_percent, 'NEU (%)': neu_count_percent,
'TL 1 %': tl_1, 'TL 2 %': tl_2, 'TL 3+ %': 0.0,
'Mean Sentence Length': 13, 'Mean Sentence Length(t)': 13}
true_stats = [sub_stats, true_stats]
assert len(true_stats) == len(target_stats)
for stat_index, stat in enumerate(true_stats):
test_stat = target_stats[stat_index]
assert len(stat) == len(test_stat)
for stat_name, stat_value in stat.items():
if re.search(r'^TL', stat_name):
assert math.isclose(stat_value, test_stat[stat_name], rel_tol=0.001)
else:
assert stat_value == test_stat[stat_name], stat_name |
<gh_stars>10-100
from kivy.app import App
from kivy.properties import ObjectProperty, OptionProperty
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.checkbox import CheckBox
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.settings import Settings, InterfaceWithNoMenu, SettingOptions, SettingItem, \
SettingNumeric, SettingString, SettingBoolean, SettingSpacer, SettingsPanel
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.widget import Widget
from utils import _, import_kv
import_kv(__file__)
class KognitivoSettingsInterface(InterfaceWithNoMenu):
pass
class KognitivoSettingTitle(Label):
title = Label.text
def on_title(self, instance, value):
self.text = _(value)
class KognitivoSettingItem(SettingItem):
pass
class KognitivoSettingString(SettingString, KognitivoSettingItem):
pass
class KognitivoSettingNumeric(SettingNumeric, KognitivoSettingString):
pass
class KognitivoSettingOptionsButton(ToggleButton):
value = ObjectProperty()
class KognitivoSettingOptions(SettingOptions, KognitivoSettingItem):
def _create_popup(self, instance):
from kivy.core.window import Window
from kivy.metrics import dp
# create the popup
content = BoxLayout(orientation='vertical', spacing='5dp')
popup_width = min(0.95 * Window.width, dp(500))
self.popup = popup = Popup(
content=content, title=self.title, size_hint=(None, None),
size=(popup_width, '400dp'))
popup.height = len(self.options) * dp(55) + dp(150)
# add all the options
content.add_widget(Widget(size_hint_y=None, height=1))
uid = str(self.uid)
for option in self.options:
state = 'down' if option == self.value else 'normal'
btn = KognitivoSettingOptionsButton(value=option, state=state, group=uid)
btn.bind(on_release=self._set_option)
content.add_widget(btn)
# finally, add a cancel button to return on the previous panel
btn = Button(text=_('Cancel'), size_hint_y=None, height=dp(50))
content.add_widget(SettingSpacer())
btn.bind(on_release=popup.dismiss)
content.add_widget(btn)
# and open the popup !
popup.open()
def _set_option(self, instance):
self.value = instance.value
self.popup.dismiss()
class GroupToggle(SettingItem):
def __init__(self, toggled_keys=None, **kwargs):
self.toggled_item_keys = toggled_keys or []
super(GroupToggle, self).__init__(**kwargs)
def on_value(self, instance, value):
super(GroupToggle, self).on_value(instance, value)
for key in self.toggled_item_keys:
item = self.panel.settings.get_item(key, self.panel)
if item:
item.disabled = int(value) == 0
class KognitivoSettingBoolean(SettingBoolean, GroupToggle, KognitivoSettingItem):
pass
class TimeSettingLabel(Label):
pass
class TimeSettingButton(Button):
direction = OptionProperty('up', options=['up', 'down'])
class SettingTime(KognitivoSettingString):
up_button = ObjectProperty(None)
down_button = ObjectProperty(None)
def _validate(self, instance):
from datetime import datetime
self._dismiss()
try:
datetime.strptime(self.textinput.text, '%H:%M')
self.value = self.textinput.text
except ValueError:
return
def on_button_press(self, instance):
from datetime import datetime, timedelta
interval = timedelta(minutes=30) if instance.direction == 'up' else timedelta(minutes=-30)
current_datetime = datetime.strptime(self.textinput.text, '%H:%M')
self.textinput.text = (current_datetime + interval).time().strftime('%H:%M')
def _create_popup(self, instance):
# create popup layout
content = BoxLayout(orientation='vertical', spacing='5dp')
self.popup = popup = Popup(title=self.title, content=content, size_hint=(.9, .6))
self.textinput = TimeSettingLabel(text=self.value)
self.textinput.bind(on_text_validate=self._validate)
self.up_button = TimeSettingButton(direction='up')
self.down_button = TimeSettingButton(direction='down')
self.up_button.bind(on_press=self.on_button_press)
self.down_button.bind(on_press=self.on_button_press)
# construct the content, widget are used as a spacer
content.add_widget(Widget())
content.add_widget(self.up_button)
content.add_widget(self.textinput)
content.add_widget(self.down_button)
content.add_widget(Widget())
content.add_widget(SettingSpacer())
# 2 buttons are created for accept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text=u"\uE013", font_name="glyphicons")
btn.bind(on_release=self._validate)
btnlayout.add_widget(btn)
btn = Button(text=u"\uE014", font_name="glyphicons")
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
content.add_widget(btnlayout)
# all done, open the popup !
popup.open()
class KognitivoCheckBox(CheckBox):
pass
class RateUsLabel(ButtonBehavior, Label):
pass
class VersionLabel(Label):
pass
class KognitivoSettingButton(ButtonBehavior, KognitivoSettingItem):
def on_press(self):
app = App.get_running_app()
app.google_client.logout()
self.parent.remove_widget(self)
class KognitivoSettings(Settings):
def __init__(self, **kwargs):
super(KognitivoSettings, self).__init__(**kwargs)
self.register_type('title', KognitivoSettingTitle)
self.register_type('options', KognitivoSettingOptions)
self.register_type('numeric', KognitivoSettingNumeric)
self.register_type('string', KognitivoSettingString)
self.register_type('bool', KognitivoSettingBoolean)
self.register_type('button', KognitivoSettingButton)
self.register_type('time', SettingTime)
def add_kivy_panel(self):
pass
def create_json_panel(self, title, config, filename=None, data=None):
import settings as platform_settings
from kivy.utils import platform
import json
sdata = [d for d in json.loads(data)]
processed_data = []
for d in sdata:
platforms = d.get('platforms', None)
profiles = d.get('profiles', None)
to_add = bool(not platforms and not profiles)
to_add |= bool(platforms and platform in platforms and not profiles)
to_add |= bool(platforms and platform in platforms and platform_settings.PROFILE in profiles)
if to_add:
processed_data.append(d)
panel = SettingsPanel(title=title, settings=self, config=config)
for setting in processed_data:
# determine the type and the class to use
if 'type' not in setting:
raise ValueError('One setting are missing the "type" element')
ttype = setting['type']
cls = self._types.get(ttype)
if cls is None:
raise ValueError(
'No class registered to handle the <%s> type' %
setting['type'])
# create a instance of the class, without the type attribute
del setting['type']
str_settings = {}
for key, item in setting.items():
str_settings[str(key)] = item
instance = cls(panel=panel, **str_settings)
# instance created, add to the panel
panel.add_widget(instance)
notification_toggle = self.get_item('enable_notifications', panel)
# needed to trigger enabling/disabling the groups after the settings panel is built
notification_toggle.on_value(self, notification_toggle.value)
panel.add_widget(RateUsLabel())
panel.add_widget(VersionLabel())
return panel
def get_item(self, key, panel=None):
panel = panel or self.interface.panels[0]
for child in panel.children:
if isinstance(child, SettingItem) and getattr(child, 'key', None) == key:
return child
|
# -*- coding: utf-8 -*-
import pytest
from test.helper import check_evaluation, session
from mathics_scanner.errors import IncompleteSyntaxError
str_test_set_with_oneidentity = """
SetAttributes[SUNIndex, {OneIdentity}];
SetAttributes[SUNFIndex, {OneIdentity}];
SUNIndex[SUNFIndex[___]]:=
(Print["This error shouldn't be triggered here!"];
Abort[]);
SUNFIndex[SUNIndex[___]]:=
(Print["This error shouldn't be triggered here!"];
Abort[]);
SUNIndex /: MakeBoxes[SUNIndex[p_], TraditionalForm]:=ToBoxes[p, TraditionalForm];
SUNFIndex /: MakeBoxes[SUNFIndex[p_], TraditionalForm]:=ToBoxes[p, TraditionalForm];
"""
def test_setdelayed_oneidentity():
"""
This test checks the behavior of DelayedSet over
symbols with the attribute OneIdentity.
"""
expr = ""
for line in str_test_set_with_oneidentity.split("\n"):
if line in ("", "\n"):
continue
expr = expr + line
try:
check_evaluation(
expr, "Null", to_string_expr=False, to_string_expected=False
)
expr = ""
except IncompleteSyntaxError:
continue
@pytest.mark.parametrize(
("str_expr", "str_expected", "msg"),
[
(
None,
None,
None,
),
("Attributes[Pi]", "{Constant, Protected, ReadProtected}", None),
("Unprotect[Pi]; Pi=.; Attributes[Pi]", "{Constant, ReadProtected}", None),
("Unprotect[Pi];Clear[Pi]; Attributes[Pi]", "{Constant, ReadProtected}", None),
("Unprotect[Pi];ClearAll[Pi]; Attributes[Pi]", "{}", None),
("Options[Expand]", "{Modulus :> 0, Trig :> False}", None),
(
"Unprotect[Expand]; Expand=.; Options[Expand]",
"{Modulus :> 0, Trig :> False}",
None,
),
(
"Clear[Expand];Options[Expand]=Join[Options[Expand], {MyOption:>Automatic}]; Options[Expand]",
"{MyOption :> Automatic, Modulus :> 0, Trig :> False}",
"Mathics stores options in a dictionary. This is why ``MyOption`` appears first.",
),
# (
# "ClearAll[Expand]; Options[Expand]",
# "{}",
# "In WMA, options are erased, including the builtin options",
# ),
(None, None, None),
# Check over a builtin symbol
(
"{Pi, Unprotect[Pi];Pi=3;Pi, Clear[Pi];Pi}",
"{Pi, 3, Pi}",
None,
),
(
"{Pi, Unprotect[Pi];Pi=3;Pi, ClearAll[Pi];Pi}",
"{Pi, 3, Pi}",
None,
),
(
"{Pi, Unprotect[Pi];Pi=3;Pi, Pi = .; Pi}",
"{Pi, 3, Pi}",
None,
),
# Check over a user defined symbol
(
"{F[a, b], F=Q; F[a,b], Clear[F]; F[a,b]}",
"{F[a, b], Q[a, b], F[a, b]}",
None,
),
(
"{F[a, b], F=Q; F[a,b], ClearAll[F]; F[a,b]}",
"{F[a, b], Q[a, b], F[a, b]}",
None,
),
(
"{F[a, b], F=Q; F[a,b], F=.; F[a,b]}",
"{F[a, b], Q[a, b], F[a, b]}",
None,
),
# Check over a user defined symbol
(
"{F[a, b], F[x__]:=H[x]; F[a,b], Clear[F]; F[a,b]}",
"{F[a, b], H[a, b], F[a, b]}",
None,
),
(
"{F[a, b], F[x__]:=H[x]; F[a,b], ClearAll[F]; F[a,b]}",
"{F[a, b], H[a, b], F[a, b]}",
None,
),
(
None,
None,
None,
),
(
"{F[a, b], F[x__]:=H[x]; F[a,b], F=.; F[a,b]}",
"{F[a, b], H[a, b], H[a, b]}",
None,
),
(
None,
None,
None,
),
(
"{F[a, b], F[x__]:=H[x]; F[a,b], F[x__]=.; F[a,b]}",
"{F[a, b], H[a, b], F[a, b]}",
None,
),
# Check over a builtin operator
(
"{a+b, Unprotect[Plus]; Plus=Q; a+b, Plus=.; a+b}",
"{a + b, Q[a, b], a + b}",
None,
),
(
"{a+b, Unprotect[Plus]; Plus=Q; a+b, Clear[Plus]; a+b}",
"{a + b, Q[a, b], a + b}",
None,
),
(
"{a+b, Unprotect[Plus]; Plus=Q; a+b, ClearAll[Plus]; a+b}",
"{a + b, Q[a, b], a + b}",
None,
),
(
None,
None,
None,
),
],
)
def test_set_and_clear(str_expr, str_expected, msg):
"""
Test calls to Set, Clear and ClearAll. If
str_expr is None, the session is reset,
in a way that the next test run over a fresh
environment.
"""
check_evaluation(
str_expr,
str_expected,
to_string_expr=True,
to_string_expected=True,
hold_expected=True,
failure_message=msg,
)
@pytest.mark.parametrize(
("str_expr", "str_expected", "message", "out_msgs"),
[
("Pi=4", "4", "Trying to set a protected symbol", ("Symbol Pi is Protected.",)),
(
"Clear[Pi]",
"Null",
"Trying to clear a protected symbol",
("Symbol Pi is Protected.",),
),
(
"Unprotect[$ContextPath];Clear[$Context]",
"Null",
"Trying clear $Context",
("Special symbol $Context cannot be cleared.",),
),
(
"Unprotect[$ContextPath];Clear[$ContextPath]",
"Null",
"Trying clear $ContextPath",
("Special symbol $ContextPath cannot be cleared.",),
),
(
"A=1; B=2; Clear[A, $Context, B];{A,$Context,B}",
"{A, Global`, B}",
"This clears A and B, but not $Context",
("Special symbol $Context cannot be cleared.",),
),
(
"A=1; B=2; ClearAll[A, $Context, B];{A,$Context,B}",
"{A, Global`, B}",
"This clears A and B, but not $Context",
("Special symbol $Context cannot be cleared.",),
),
(
"A=1; B=2; ClearAll[A, $ContextPath, B];{A,$ContextPath,B}",
"{A, {System`, Global`}, B}",
"This clears A and B, but not $ContextPath",
("Special symbol $ContextPath cannot be cleared.",),
),
(
"A=1; B=2; ClearAll[A, $ContextPath, B];{A,$ContextPath,B}",
"{A, {System`, Global`}, B}",
"This clears A and B, but not $ContextPath",
("Special symbol $ContextPath cannot be cleared.",),
),
],
)
def test_set_and_clear_messages(str_expr, str_expected, message, out_msgs):
session.evaluate("ClearAll[a, b, A, B, F, H, Q]")
check_evaluation(
str_expr,
str_expected,
to_string_expr=True,
to_string_expected=True,
hold_expected=True,
failure_message=message,
expected_messages=out_msgs,
)
def test_predecrement():
check_evaluation(
"--5", "4", failure_message="Set::setraw: Cannot assign to raw object 5."
)
def test_assign_list():
check_evaluation("G[x_Real]=x^2; a={G[x]}; {x=1.; a, x=.; a}", "{{1.}, {G[x]}}")
|
<reponame>v-hill/gpu-price-tracker
"""
Main script for running the scraper.
"""
import datetime
import logging
import logging.config
import os
import re
import shutil
from django.utils.timezone import make_aware
from scraper.models import URL, BrandMenu, EbayGraphicsCard, Log, Sale
from scraper.src.product import EBayItem
from scraper.src.webpage import BrandWebPage
def backup_database(database_path):
"""
Backup an existing database with a timestamp.
"""
if os.path.exists(database_path):
old_name = database_path.stem
new_name = f"{datetime.datetime.now().strftime('%Y_%m_%d__%H_%M_%S')}_{old_name}{database_path.suffix}"
new_database_path = database_path.parent / new_name
logging.info(f" Backing up database to: {new_database_path}")
shutil.copy(database_path, new_database_path)
def add_new_gpus(accepted_substrings, log):
brand_menu_qs = BrandMenu.objects.all()
new_gpu_entries = []
for entry in brand_menu_qs:
name_contains_substring = any(
x in entry.text.lower()
for x in [f.lower() for f in accepted_substrings]
)
if name_contains_substring:
gpu_entry = EbayGraphicsCard.objects.filter(
name__exact=entry.text
).first()
if gpu_entry is None:
new_gpu_entry = EbayGraphicsCard(
log=log,
name=entry.text,
collect_data=True,
data_collected=True,
last_collection=make_aware(
datetime.datetime(2000, 1, 1, 1, 1)
),
)
new_gpu_entries.append(new_gpu_entry)
EbayGraphicsCard.objects.bulk_create(new_gpu_entries)
def reset_data_collected_flag(reset_hours, log):
current_datetime = make_aware(datetime.datetime.now())
for gpu_entry in EbayGraphicsCard.objects.all():
if gpu_entry.collect_data:
diff = current_datetime - gpu_entry.last_collection
if diff.seconds >= (60 * 60 * reset_hours):
gpu_entry.data_collected = False
gpu_entry.log = log
gpu_entry.save()
def data_left_to_collect():
# find any gpu in the current log which does not have data_collected
gpu = EbayGraphicsCard.objects.filter(
data_collected=False, collect_data=True
).first()
if gpu is None:
logging.info("No GPUs in current log without data collected")
return False
return True
def navigate_to_gpu_page(webpage, gpu_button_id):
logging.info(" Navigating to page of GPU")
webpage.return_to_start_url()
webpage.open_model_menu()
webpage.open_all_filter_menu()
webpage.select_option(button_id=gpu_button_id)
webpage.apply_selection()
def create_url_obj(url, log, gpu):
url = URL.objects.get_or_create(url=url, log=log, gpu=gpu)
def make_sales_objects(
brand_webpage,
log,
gpu,
):
# create sale objects
logging.info(" Creating sale objects")
soup = brand_webpage.page_source_soup()
items_container = soup.find(
"ul", {"class": re.compile("srp-results srp-grid")}
)
item_tags = items_container.find_all(
"div", {"class": "s-item__wrapper clearfix"}
)
if len(item_tags) == 0:
raise Exception("No items found on page")
try:
num_already_in_db, num_added_to_db = bulk_insertion(
log, gpu, item_tags
)
except:
num_already_in_db, num_added_to_db = individual_insertion(
log, gpu, item_tags
)
logging.info(f" {num_added_to_db} new sale objects added")
logging.info(f" {num_already_in_db} sale objects already in db")
return num_added_to_db, num_already_in_db
def bulk_insertion(log, gpu, item_tags):
num_already_in_db = 0
num_added_to_db = 0
new_sale_items = []
for tag in item_tags:
new_item = EBayItem(tag)
new_item.clean_item()
item_kwargs = new_item.get_kwargs()
item_kwargs["date"] = make_aware(item_kwargs["date"])
new_sale = Sale.objects.filter(**item_kwargs).first()
if new_sale is None:
new_sale = Sale(**item_kwargs)
new_sale.log = log
new_sale.gpu = gpu
new_sale_items.append(new_sale)
num_added_to_db += 1
else:
num_already_in_db += 1
Sale.objects.bulk_create(new_sale_items)
return num_already_in_db, num_added_to_db
def individual_insertion(log, gpu, item_tags):
logging.info(" performing individual insertion")
num_already_in_db = 0
num_added_to_db = 0
for tag in item_tags:
new_item = EBayItem(tag)
new_item.clean_item()
item_kwargs = new_item.get_kwargs()
item_kwargs["date"] = make_aware(item_kwargs["date"])
new_sale = Sale.objects.filter(**item_kwargs).first()
if new_sale is None:
new_sale = Sale(**item_kwargs)
new_sale.log = log
new_sale.gpu = gpu
new_sale.save()
num_added_to_db += 1
else:
num_already_in_db += 1
return num_already_in_db, num_added_to_db
def collect_data(log, gpu, brand_webpage):
brand_webpage.get_pages() # Find page number buttons
next_page_exists = True # assume next page exists
num_added_to_db, num_already_in_db = make_sales_objects(
brand_webpage,
log,
gpu,
)
while next_page_exists and num_already_in_db <= 45:
# Naviagte to the next page and collect item data
next_page_exists = brand_webpage.nav_to_next_page()
if next_page_exists:
new_num_added_to_db, new_num_already_in_db = make_sales_objects(
brand_webpage,
log,
gpu,
)
num_added_to_db += new_num_added_to_db
num_already_in_db += new_num_already_in_db
logging.debug(f" {num_already_in_db} sales already in db")
sales_scraped = log.sales_scraped if log.sales_scraped is not None else 0
sales_scraped += num_added_to_db + num_already_in_db
log.sales_scraped = sales_scraped
current_datetime = make_aware(datetime.datetime.now())
log.sales_added = Sale.objects.filter(log=log).count()
log.end_time = current_datetime
log.save()
gpu.data_collected = True
gpu.last_collection = current_datetime
gpu.save()
logging.info(" Completed data collection")
def process_gpu(log, webpage, main_webdriver, start_url):
if not data_left_to_collect():
return True
gpu = EbayGraphicsCard.objects.filter(
data_collected=False, collect_data=True
).first()
logging.info(f"Collecting data for {gpu.name}")
gpu_button_id = BrandMenu.short_id_from_name(gpu.name)
navigate_to_gpu_page(webpage, gpu_button_id)
create_url_obj(webpage.driver.current_url, log, gpu)
# Now the we're on the page for a particular gpu, create a BrandWebPage instance
brand_webpage = BrandWebPage(main_webdriver, start_url)
brand_webpage.check_number_of_results()
collect_data(log, gpu, brand_webpage)
def calculate_total_collected_per_gpu():
logging.info("Calculating total_collected value for each gpu")
qs = EbayGraphicsCard.objects.all()
for card in qs:
count = Sale.objects.filter(gpu__id=card.id).count()
card.total_collected = count
card.save()
def calculate_sales_added_per_log():
logging.info("Calculating sales_added value for each log")
qs = Log.objects.all()
for log in qs:
count = Sale.objects.filter(log__id=log.id).count()
log.sales_added = count
log.save()
print(log.id, count)
|
<filename>automation_orchestrator/orchestrator/admin.py
import os
import csv
import pytz
import datetime
import subprocess
from tzlocal import get_localzone
from django import forms
from django.db import models
from django.contrib import admin, messages
from django.http import HttpResponse
from django.utils.html import format_html
from simple_history.admin import SimpleHistoryAdmin
from .models import Bot, App, Botflow, FileTrigger, PythonFunction, ScheduleTrigger, EmailImapTrigger, EmailOutlookTrigger, ApiTrigger, BotflowExecution, SmtpAccount, PythonFunction, PythonFunctionExecution
from .monitoring import add_botflow_execution_object, determine_execution_bot
from automation_orchestrator.settings import VERSION
admin.site.site_header = 'Automation Orchestrator'
admin.site.site_title = f'Automation Orchestrator {VERSION}'
admin.site.index_title = 'Orchestrate Amazing Automation'
def queue_item(item, trigger):
add_botflow_execution_object(
bot_pk=determine_execution_bot(item).pk,
app_pk=item.app.pk,
botflow_pk=item.botflow.pk,
trigger=trigger,
custom_status=item.botflow_execution_custom_status
)
def run_selected_triggers(modeladmin, request, queryset):
for item in queryset:
queue_item(item, "No Trigger: Activated Manually")
def run_selected_file_triggers(modeladmin, request, queryset):
run_selected_triggers(modeladmin, request, queryset)
def run_selected_schedule_triggers(modeladmin, request, queryset):
run_selected_triggers(modeladmin, request, queryset)
def run_selected_email_imap_triggers(modeladmin, request, queryset):
run_selected_triggers(modeladmin, request, queryset)
def run_selected_email_outlook_triggers(modeladmin, request, queryset):
run_selected_triggers(modeladmin, request, queryset)
def run_selected_api_triggers(modeladmin, request, queryset):
run_selected_triggers(modeladmin, request, queryset)
def cancel_selected_botflow_executions(modeladmin, request, queryset):
time_zone = str(get_localzone())
time_now = datetime.datetime.now(pytz.timezone(time_zone)).strftime(f"%Y-%m-%dT%H:%M:%S+0{str(int(datetime.datetime.now(pytz.timezone(time_zone)).utcoffset().seconds / 60 / 60))}00")
for item in queryset:
if item.time_start == None or item.time_end == None or item.status == "Running":
item.status = "Cancelled"
if item.time_start == None:
item.time_start = time_now
if item.time_end == None:
item.time_end = time_now
item.save()
def copy_selected_file_triggers(modeladmin, request, queryset):
for item in queryset:
bots = item.bots.all()
item_new = item
item_new.pk = None
item_new.date_created = None
item_new.date_updated = None
item_new.activated = False
item_new.save()
item_new.bots.set([x.id for x in bots])
item_new.save()
def copy_selected_schedule_triggers(modeladmin, request, queryset):
for item in queryset:
bots = item.bots.all()
item_new = item
item_new.pk = None
item_new.date_created = None
item_new.date_updated = None
item_new.activated = False
item_new.next_execution = None
item_new.save()
item_new.bots.set([x.id for x in bots])
item_new.save()
def copy_selected_email_imap_triggers(modeladmin, request, queryset):
for item in queryset:
bots = item.bots.all()
item_new = item
item_new.pk = None
item_new.date_created = None
item_new.date_updated = None
item_new.activated = False
item_new.save()
item_new.bots.set([x.id for x in bots])
item_new.save()
def copy_selected_email_outlook_triggers(modeladmin, request, queryset):
for item in queryset:
bots = item.bots.all()
item_new = item
item_new.pk = None
item_new.date_created = None
item_new.date_updated = None
item_new.activated = False
item_new.save()
item_new.bots.set([x.id for x in bots])
item_new.save()
def copy_selected_api_triggers(modeladmin, request, queryset):
for item in queryset:
bots = item.bots.all()
item_new = item
item_new.pk = None
item_new.date_created = None
item_new.date_updated = None
item_new.activated = False
item_new.save()
item_new.bots.set([x.id for x in bots])
item_new.save()
def export_selected_file_triggers(modeladmin, request, queryset):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="file_triggers.csv"'
writer = csv.writer(response)
writer.writerow(['pk', 'bot', 'app', 'botflow',
'folder_in', 'folder_out',
'activated',
'run_after', 'run_until', 'run_on_week_days', 'run_on_weekend_days',])
file_triggers = queryset.values_list('pk', 'bot', 'app', 'botflow',
'folder_in', 'folder_out',
'activated',
'run_after', 'run_until', 'run_on_week_days', 'run_on_weekend_days',)
for file_trigger in file_triggers:
writer.writerow(file_trigger)
return response
def export_selected_schedule_triggers(modeladmin, request, queryset):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="schedule_triggers.csv"'
writer = csv.writer(response)
writer.writerow(['pk', 'bot', 'app', 'botflow',
'frequency', 'run_every', 'run_start',
'activated',
'run_after', 'run_until', 'run_on_week_days', 'run_on_weekend_days',])
schedule_triggers = queryset.values_list('pk', 'bot', 'app', 'botflow',
'frequency', 'run_every', 'run_start',
'activated',
'run_after', 'run_until', 'run_on_week_days', 'run_on_weekend_days',)
for schedule_trigger in schedule_triggers:
writer.writerow(schedule_trigger)
return response
def export_selected_email_imap_triggers(modeladmin, request, queryset):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="email_imap_triggers.csv"'
writer = csv.writer(response)
writer.writerow(['pk', 'bot', 'app', 'botflow',
'email', 'server', 'port', 'tls',
'folder_in', 'folder_out',
'activated',
'run_after', 'run_until', 'run_on_week_days', 'run_on_weekend_days',])
email_imap_triggers = queryset.values_list('pk', 'bot', 'app', 'botflow',
'email', 'server', 'port', 'tls',
'folder_in', 'folder_out',
'activated',
'run_after', 'run_until', 'run_on_week_days', 'run_on_weekend_days',)
for email_imap_trigger in email_imap_triggers:
writer.writerow(email_imap_trigger)
return response
def export_selected_email_outlook_triggers(modeladmin, request, queryset):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="email_outlook_triggers.csv"'
writer = csv.writer(response)
writer.writerow(['pk', 'bot', 'app', 'botflow',
'email',
'folder_in', 'folder_out',
'activated',
'run_after', 'run_until', 'run_on_week_days', 'run_on_weekend_days',])
email_outlook_triggers = queryset.values_list('pk', 'bot', 'app', 'botflow',
'email',
'folder_in', 'folder_out',
'activated',
'run_after', 'run_until', 'run_on_week_days', 'run_on_weekend_days',)
for email_outlook_trigger in email_outlook_triggers:
writer.writerow(email_outlook_trigger)
return response
def export_selected_api_triggers(modeladmin, request, queryset):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="api_triggers.csv"'
writer = csv.writer(response)
writer.writerow(['pk', 'bot', 'app', 'botflow',
'activated',])
api_triggers = queryset.values_list('pk', 'bot', 'app', 'botflow',
'activated',)
for api_trigger in api_triggers:
writer.writerow(api_trigger)
return response
def export_selected_botflow_executions(modeladmin, request, queryset):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="botflow_executions.csv"'
writer = csv.writer(response)
writer.writerow(['pk', 'time_queued',
'computer_name', 'user_name',
'app', 'botflow', 'trigger',
'priority', 'timeout_minutes',
'status', 'time_start', 'time_end'])
botflow_executions = queryset.values_list('pk', 'time_queued',
'computer_name', 'user_name',
'app', 'botflow', 'trigger',
'priority', 'timeout_minutes',
'status', 'time_start', 'time_end')
for botflow_execution in botflow_executions:
writer.writerow(botflow_execution)
return response
def refresh_selected_bots(modeladmin, request, queryset):
for item in queryset:
try:
computer_name = item.computer_name
user_name = item.user_name
if str(computer_name).lower() == os.environ['COMPUTERNAME'].lower():
sessions = subprocess.run(["query", "session"], stdout=subprocess.PIPE, text=True).stdout.split("\n")
if not "SESSIONNAME" in str(sessions):
if item.status != "Unknown":
item.status = "Unknown"
item.save_without_historical_record()
active = False
for session in sessions:
if f" {user_name.lower()} " in session.lower() and " Active " in session:
active = True
break
if active:
if item.status != "Active" and item.status != "Running":
item.status = "Active"
item.save_without_historical_record()
else:
if item.status != "ERROR":
item.status = "ERROR"
item.save_without_historical_record()
else:
if item.status != "Running":
if (pytz.utc.localize(datetime.datetime.utcnow()) - item.date_updated).seconds > 300:
if item.status != "Unknown":
item.status = "Unknown"
item.save_without_historical_record()
except:
if item.status != "Unknown":
item.status = "Unknown"
item.save_without_historical_record()
def test_selected_apps(modeladmin, request, queryset):
for item in queryset:
if os.path.isfile(item.path):
messages.success(request, f"Successfully located the '{item.name}' App file: {item.path}")
else:
messages.error(request, f"Failed to locate the '{item.name}' App file: {item.path}")
def test_selected_botflows(modeladmin, request, queryset):
for item in queryset:
if os.path.isfile(item.path):
messages.success(request, f"Successfully located the '{item.name}' Botflow file: {item.path}")
else:
messages.error(request, f"Failed to locate the '{item.name}' Botflow file: {item.path}")
def test_selected_file_triggers(modeladmin, request, queryset):
import glob
for item in queryset:
try:
if not os.path.isdir(item.folder_in):
messages.error(request, f"Failed to find the incoming folder: {item.folder_in}")
continue
elif not os.path.isdir(item.folder_out):
messages.error(request, f"Failed to find the outgoing folder: {item.folder_out}")
continue
files = []
for filter in item.filter.split(","):
files = files + [file for file in glob.glob(item.folder_in + "\\" + filter.strip()) if os.path.isfile(file)]
messages.success(request, f"Successfully retrieved {str(len(files))} file(s) in the incoming folder: {item.folder_in}")
if item.status != "Active":
item.status = "Active"
item.save_without_historical_record()
except:
messages.error(request, f"Failed to retrieve files from the incoming folder: {item.folder_in}")
if item.status != "ERROR":
item.status = "ERROR"
item.save_without_historical_record()
def test_selected_email_imap_triggers(modeladmin, request, queryset):
from imaplib import IMAP4, IMAP4_SSL
for item in queryset:
try:
if item.tls:
server = IMAP4_SSL(item.server, item.port)
else:
server = IMAP4(item.server, item.port)
server.login(item.email, item.password)
server.select('INBOX')
server.select('INBOX/' + item.folder_in)
server.select('INBOX/' + item.folder_out)
emails_folder_in = server.select('INBOX/' + item.folder_in, readonly=True)[-1][-1]
emails_folder_in = str(emails_folder_in, 'utf-8', 'ignore')
emails_folder_out = server.select('INBOX/' + item.folder_out, readonly=True)[-1][-1]
emails_folder_out = str(emails_folder_out, 'utf-8', 'ignore')
if "doesn't exist" in emails_folder_in or "doesn't exist" in emails_folder_out:
messages.error(request, f"Failed to connect to email {item.email}!")
if item.status != "ERROR":
item.status = "ERROR"
item.save_without_historical_record()
else:
messages.success(request, f"Successfully connected to email {item.email}! Number of messages detected in the 'INBOX/{item.folder_in}' folder: {emails_folder_in}")
if item.status != "Active":
item.status = "Active"
item.save_without_historical_record()
except:
messages.error(request, f"Failed to connect to email {item.email}!")
if item.status != "ERROR":
item.status = "ERROR"
item.save_without_historical_record()
finally:
try:
server.logout()
except:
pass
server = None
del server
def test_selected_email_outlook_triggers(modeladmin, request, queryset):
import win32com.client as win32
email_outlook = win32.dynamic.Dispatch('Outlook.Application')
for item in queryset:
try:
accounts = email_outlook.Session.Accounts
accounts_list = [account.DisplayName for account in accounts]
if item.email == "Default":
namespace = email_outlook.GetNamespace("MAPI")
else:
if not item.email in accounts_list:
continue
namespace = None
for account in accounts:
if str(item.email).upper() == str(account.DisplayName).upper():
namespace = account.DeliveryStore
break
inbox = namespace.GetDefaultFolder(6)
folder_in = inbox
folder_out = inbox
for folder in item.folder_in.split("/"):
folder_in = folder_in.Folders[folder]
for folder in item.folder_out.split("/"):
folder_out = folder_out.Folders[folder]
emails = folder_in.Items
messages.success(request, f"Successfully connected to email {item.email}! Number of messages detected in the 'INBOX/{item.folder_in}' folder: {str(len(emails))}")
if item.status != "Active":
item.status = "Active"
item.save_without_historical_record()
except:
if item.status != "ERROR":
item.status = "ERROR"
item.save_without_historical_record()
finally:
accounts, accounts_list, namespace, inbox, folder_in, folder_out, emails = None, None, None, None, None, None, None
del accounts, accounts_list, namespace, inbox, folder_in, folder_out, emails
try:
email_outlook.Application.Quit()
except:
pass
email_outlook = None
del email_outlook
def test_selected_smtp_accounts(modeladmin, request, queryset):
from smtplib import SMTP, SMTP_SSL
from email.message import EmailMessage
for item in queryset:
try:
msg = EmailMessage()
msg['Subject'] = "[TEST] Automation Orchestrator"
msg['From'] = item.email
msg['To'] = item.email
with SMTP(item.server, item.port) as server:
if item.tls:
server.starttls()
if item.password != "":
server.login(item.email, item.password)
server.send_message(msg)
messages.success(request, f"Successfully sent an email with {item.email}!")
if item.status != "Active":
item.status = "Active"
item.save_without_historical_record()
except:
if item.tls:
try:
msg = EmailMessage()
msg['Subject'] = "[TEST] Automation Orchestrator"
msg['From'] = item.email
msg['To'] = item.email
with SMTP_SSL(item.server, item.port) as server:
if item.password != "":
server.login(item.email, item.password)
server.send_message(msg)
messages.success(request, f"Successfully sent an email with {item.email}!")
continue
except:
pass
messages.error(request, f"Failed to send email with {item.email}!")
if item.status != "ERROR":
item.status = "ERROR"
item.save_without_historical_record()
class BotAdmin(SimpleHistoryAdmin):
def update_record(self, obj):
return format_html('<a type="submit" class="default" href="/orchestrator/bot/{}/change/">EDIT</a>', obj.id)
def pk_formatted(self, obj):
return obj.pk
fieldsets = (
('General', {
'fields': ('name', 'computer_name', 'user_name',),
}),
('Time Filter', {
'classes': ('collapse',),
'fields': ('run_after', 'run_until', 'run_on_week_days', 'run_on_weekend_days'),
}),
('Nintex RPA', {
'classes': ('collapse',),
'fields': ('nintex_rpa_license_path', 'nintex_rpa_available_foxtrot_licenses', 'nintex_rpa_available_foxbot_licenses',),
}),
)
list_display = ('pk_formatted', 'name', 'computer_name', 'user_name', 'status',
'update_record',)
list_editable = ('name', 'computer_name', 'user_name',)
list_display_links = ['pk_formatted']
pk_formatted.admin_order_field = 'pk'
pk_formatted.short_description = 'ID'
actions = [refresh_selected_bots,]
class AppAdmin(SimpleHistoryAdmin):
def update_record(self, obj):
return format_html('<a type="submit" class="default" href="/orchestrator/app/{}/change/">EDIT</a>', obj.id)
def pk_formatted(self, obj):
return obj.pk
fieldsets = (
('General', {
'fields': ('name', 'path',),
}),
)
list_display = ('pk_formatted', 'name', 'path',
'update_record')
list_editable = ('name', 'path',)
list_display_links = ['pk_formatted']
pk_formatted.admin_order_field = 'pk'
pk_formatted.short_description = 'ID'
actions = [test_selected_apps,]
class BotflowAdmin(SimpleHistoryAdmin):
def update_record(self, obj):
return format_html('<a type="submit" class="default" href="/orchestrator/botflow/{}/change/">EDIT</a>', obj.id)
def pk_formatted(self, obj):
return obj.pk
pk_formatted.admin_order_field = 'pk'
pk_formatted.short_description = 'ID'
fieldsets = (
('General', {
'fields': ('name', 'path',),
}),
('Queueing', {
'fields': ('queue_if_already_running',),
}),
('Prioritization', {
'classes': ('collapse',),
'fields': ('priority',),
}),
('Timeout', {
'classes': ('collapse',),
'fields': ('timeout_minutes', 'timeout_kill_processes',),
}),
('Notifications', {
'classes': ('collapse',),
'fields': ('queued_notification', 'started_notification', 'completed_notification', 'error_notification',),
}),
('Nintex RPA', {
'classes': ('collapse',),
'fields': ('close_bot_automatically',),
}),
)
list_display = ('pk_formatted', 'name', 'path',
'queue_if_already_running',
'priority',
'update_record',)
list_editable = ('name', 'path',
'queue_if_already_running',
'priority',)
list_display_links = ['pk_formatted']
actions = [test_selected_botflows,]
class FileTriggerAdmin(SimpleHistoryAdmin):
def update_record(self, obj):
return format_html('<a type="submit" class="default" href="/orchestrator/filetrigger/{}/change/">EDIT</a>', obj.id)
def pk_formatted(self, obj):
return obj.pk
fieldsets = (
('General', {
'fields': ('bots', 'app', 'botflow',),
}),
('Folders', {
'fields': ('folder_in', 'folder_out',),
}),
('Filter', {
'fields': ('filter',),
}),
('Activate', {
'fields': ('activated',),
}),
('Time Filter', {
'classes': ('collapse',),
'fields': ('run_after', 'run_until', 'run_on_week_days', 'run_on_weekend_days',),
}),
('Botflow Execution Note', {
'classes': ('collapse',),
'fields': ('botflow_execution_custom_status',),
})
)
list_display = ('pk_formatted', 'assigned_bots', 'app', 'botflow',
'folder_in', 'folder_out', 'filter',
'activated', 'status',
'update_record',)
list_editable = ('app', 'botflow',
'folder_in', 'folder_out', 'filter', 'activated',)
list_display_links = ['pk_formatted']
pk_formatted.short_description = 'ID'
run_selected_file_triggers.short_description = "Run selected file triggers"
pk_formatted.admin_order_field = 'pk'
actions = [copy_selected_file_triggers, export_selected_file_triggers, test_selected_file_triggers, run_selected_file_triggers]
class ScheduleTriggerAdmin(SimpleHistoryAdmin):
def update_record(self, obj):
return format_html('<a type="submit" class="default" href="/orchestrator/scheduletrigger/{}/change/">EDIT</a>', obj.id)
def pk_formatted(self, obj):
return obj.pk
def next_execution_formatted(self, obj):
time = obj.next_execution
if time != None and time != "":
time = time.astimezone().strftime("%Y-%m-%d %H:%M")
return time
fieldsets = (
('General', {
'fields': ('bots', 'app', 'botflow'),
}),
('Recurrence', {
'fields': ('frequency', 'run_every', 'run_start',),
}),
('Activate', {
'fields': ('activated',),
}),
('Time Filter', {
'classes': ('collapse',),
'fields': ('run_after', 'run_until', 'run_on_week_days', 'run_on_weekend_days'),
}),
('Botflow Execution Note', {
'classes': ('collapse',),
'fields': ('botflow_execution_custom_status',),
}),
('Next Execution', {
'classes': ('collapse',),
'fields': ('next_execution',),
}),
)
list_display = ('pk_formatted', 'assigned_bots', 'app', 'botflow',
'frequency', 'run_every', 'run_start',
'activated', 'status',
'next_execution_formatted',
'update_record',)
list_editable = ('app', 'botflow',
'frequency', 'run_every', 'run_start', 'activated',)
list_display_links = ['pk_formatted']
exclude = ('past_settings',)
readonly_fields = ('next_execution',)
pk_formatted.short_description = 'ID'
run_selected_schedule_triggers.short_description = "Run selected schedule triggers"
next_execution_formatted.short_description = 'Next Execution'
pk_formatted.admin_order_field = 'pk'
next_execution_formatted.admin_order_field = 'next_execution'
actions = [copy_selected_schedule_triggers, export_selected_schedule_triggers, run_selected_schedule_triggers]
class EmailImapTriggerForm(forms.ModelForm):
class Meta:
model = EmailImapTrigger
fields = '__all__'
widgets = {
'password': forms.PasswordInput(),
}
class EmailImapTriggerAdmin(SimpleHistoryAdmin):
def update_record(self, obj):
return format_html('<a type="submit" class="default" href="/orchestrator/emailimaptrigger/{}/change/">EDIT</a>', obj.id)
def pk_formatted(self, obj):
return obj.pk
fieldsets = (
('General', {
'fields': ('bots', 'app', 'botflow',),
}),
('Email', {
'fields': ('email', 'password',),
}),
('Connection', {
'fields': ('server', 'port', 'tls',),
}),
('Folders', {
'fields': ('folder_in', 'folder_out',),
}),
('Activate', {
'fields': ('activated',),
}),
('Time Filter', {
'classes': ('collapse',),
'fields': ('run_after', 'run_until', 'run_on_week_days', 'run_on_weekend_days',),
}),
('Botflow Execution Note', {
'classes': ('collapse',),
'fields': ('botflow_execution_custom_status',),
})
)
list_display = ('pk_formatted', 'assigned_bots', 'app', 'botflow',
'email',
'folder_in', 'folder_out',
'activated', 'status',
'update_record',)
list_editable = ('app', 'botflow',
'email',
'folder_in', 'folder_out', 'activated',)
list_display_links = ['pk_formatted']
pk_formatted.short_description = 'ID'
copy_selected_email_imap_triggers.short_description = "Copy selected email IMAP triggers"
export_selected_email_imap_triggers.short_description = "Export selected email IMAP triggers"
run_selected_email_imap_triggers.short_description = "Activate selected email IMAP triggers"
test_selected_email_imap_triggers.short_description = "Test selected email IMAP triggers"
pk_formatted.admin_order_field = 'pk'
actions = [copy_selected_email_imap_triggers, export_selected_email_imap_triggers, test_selected_email_imap_triggers, run_selected_email_imap_triggers]
form = EmailImapTriggerForm
class EmailOutlookTriggerAdmin(SimpleHistoryAdmin):
def update_record(self, obj):
return format_html('<a type="submit" class="default" href="/orchestrator/emailoutlooktrigger/{}/change/">EDIT</a>', obj.id)
def pk_formatted(self, obj):
return obj.pk
fieldsets = (
('General', {
'fields': ('bots', 'app', 'botflow',),
}),
('Email', {
'fields': ('email',),
}),
('Folders', {
'fields': ('folder_in', 'folder_out',),
}),
('Activate', {
'fields': ('activated',),
}),
('Time Filter', {
'classes': ('collapse',),
'fields': ('run_after', 'run_until', 'run_on_week_days', 'run_on_weekend_days',),
}),
('Botflow Execution Note', {
'classes': ('collapse',),
'fields': ('botflow_execution_custom_status',),
})
)
list_display = ('pk_formatted', 'assigned_bots', 'app', 'botflow',
'email',
'folder_in', 'folder_out',
'activated', 'status',
'update_record',)
list_editable = ('app', 'botflow',
'email',
'folder_in', 'folder_out', 'activated',)
list_display_links = ['pk_formatted']
pk_formatted.short_description = 'ID'
copy_selected_email_outlook_triggers.short_description = "Copy selected email Outlook triggers"
export_selected_email_outlook_triggers.short_description = "Export selected email Outlook triggers"
run_selected_email_outlook_triggers.short_description = "Run selected email Outlook triggers"
test_selected_email_outlook_triggers.short_description = "Test selected email Outlook triggers"
pk_formatted.admin_order_field = 'pk'
actions = [copy_selected_email_outlook_triggers, export_selected_email_outlook_triggers, run_selected_email_outlook_triggers]
class ApiTriggerAdmin(SimpleHistoryAdmin):
def update_record(self, obj):
return format_html('<a type="submit" class="default" href="/orchestrator/apitrigger/{}/change/">EDIT</a>', obj.id)
def pk_formatted(self, obj):
return obj.pk
fieldsets = (
('General', {
'fields': ('bots', 'app', 'botflow',),
}),
('Activate', {
'fields': ('activated',),
}),
('Botflow Execution Note', {
'classes': ('collapse',),
'fields': ('botflow_execution_custom_status',),
})
)
list_display = ('pk_formatted', 'assigned_bots', 'app', 'botflow',
'activated', 'status',
'update_record',)
list_editable = ('app', 'botflow',
'activated',)
list_display_links = ['pk_formatted']
pk_formatted.short_description = 'ID'
copy_selected_api_triggers.short_description = "Copy selected API triggers"
export_selected_api_triggers.short_description = "Export selected API triggers"
run_selected_api_triggers.short_description = "Run selected API triggers"
pk_formatted.admin_order_field = 'pk'
actions = [copy_selected_api_triggers, export_selected_api_triggers, run_selected_api_triggers]
class BotflowExecutionAdmin(SimpleHistoryAdmin):
def get_ordering(self, request):
return ['-time_queued']
def has_add_permission(self, request, obj=None):
return False
def pk_formatted(self, obj):
return obj.pk
def time_queued_formatted(self, obj):
time = obj.time_queued
if time != None:
time = time.astimezone().strftime("%Y-%m-%d %H:%M:%S")
return time
def time_start_formatted(self, obj):
time = obj.time_start
if time != None:
time = time.astimezone().strftime("%Y-%m-%d %H:%M:%S")
return time
def time_end_formatted(self, obj):
time = obj.time_end
if time != None:
time = time.astimezone().strftime("%Y-%m-%d %H:%M:%S")
return time
def time_updated_formatted(self, obj):
time = obj.time_updated
if time != None:
time = time.astimezone().strftime("%Y-%m-%d %H:%M:%S")
return time
def app_formatted(self, obj):
return os.path.basename(obj.app)
def bot_formatted(self, obj):
return f"{obj.computer_name} - {obj.user_name}"
def botflow_formatted(self, obj):
return os.path.basename(obj.botflow)
def trigger_formatted(self, obj):
trigger = obj.trigger
trigger_file = "File Trigger: "
if trigger.startswith(trigger_file):
if not "Activated Manually" in trigger:
return trigger_file + os.path.basename(trigger[len(trigger_file):])
return trigger
def custom_progress_formatted(self, obj):
progress = str(obj.custom_progress)
progress = progress.replace('.00', '') + "%"
return progress
def custom_status_formatted(self, obj):
note = str(obj.custom_status)
return note
list_display = ('pk_formatted',
'time_queued_formatted',
'bot_formatted',
'app_formatted',
'botflow_formatted',
'trigger_formatted',
'priority',
'status',
'custom_progress_formatted',
'time_start_formatted',
'time_end_formatted',
'time_updated_formatted',
'custom_status_formatted',)
list_display_links = ['pk_formatted']
list_filter = ('computer_name', 'user_name', 'app', 'botflow', 'status',)
readonly_fields = [field.name for field in BotflowExecution._meta.get_fields() if field.name != 'custom_status']
time_queued_formatted.short_description = 'Time Queued'
time_start_formatted.short_description = 'Time Start'
time_end_formatted.short_description = 'Time End'
time_updated_formatted.short_description = 'Time Updated'
app_formatted.short_description = 'App'
bot_formatted.short_description = 'Bot'
botflow_formatted.short_description = 'Botflow'
trigger_formatted.short_description = 'Trigger'
custom_progress_formatted.short_description = 'Progress'
custom_status_formatted.short_description = 'Note'
pk_formatted.short_description = 'ID'
time_queued_formatted.admin_order_field = 'time_queued'
time_start_formatted.admin_order_field = 'time_start'
time_end_formatted.admin_order_field = 'time_end'
time_updated_formatted.admin_order_field = 'time_updated'
app_formatted.admin_order_field = 'app'
bot_formatted.admin_order_field = 'computer_name'
botflow_formatted.admin_order_field = 'botflow'
trigger_formatted.admin_order_field = 'trigger'
custom_progress_formatted.admin_order_field = 'custom_progress'
custom_status_formatted.admin_order_field = 'custom_status'
pk_formatted.admin_order_field = 'pk'
actions = [cancel_selected_botflow_executions, export_selected_botflow_executions,]
list_per_page = 20
class SmtpAccountForm(forms.ModelForm):
class Meta:
model = SmtpAccount
fields = '__all__'
widgets = {
'password': forms.PasswordInput(),
}
class SmtpAccountAdmin(SimpleHistoryAdmin):
def get_ordering(self, request):
return ['-activated']
def update_record(self, obj):
return format_html('<a type="submit" class="default" href="/orchestrator/smtpaccount/{}/change/">EDIT</a>', obj.id)
def pk_formatted(self, obj):
return obj.pk
fieldsets = (
('General', {
'fields': ('email', 'password',),
}),
('Connection', {
'fields': ('server', 'port', 'tls',),
}),
('Activate', {
'fields': ('activated',),
}),
)
list_display = ('pk_formatted', 'email',
'server', 'port', 'tls',
'activated', 'status',
'update_record',)
list_editable = ('server', 'port', 'tls',
'activated',)
list_display_links = ['pk_formatted']
pk_formatted.short_description = 'ID'
test_selected_smtp_accounts.short_description = "Test selected SMTP accounts"
pk_formatted.admin_order_field = 'pk'
actions = [test_selected_smtp_accounts, ]
form = SmtpAccountForm
class PythonFunctionForm(forms.ModelForm):
class Meta:
model = PythonFunction
fields = '__all__'
widgets = {
'description': forms.Textarea(attrs={'rows': 10, 'cols': 150}),
'encrypted_value_1': forms.PasswordInput(),
'encrypted_value_2': forms.PasswordInput(),
'encrypted_value_3': forms.PasswordInput(),
'encrypted_value_4': forms.PasswordInput(),
'encrypted_value_5': forms.PasswordInput(),
'code': forms.Textarea(attrs={'rows': 30, 'cols': 150}),
}
class PythonFunctionAdmin(SimpleHistoryAdmin):
def update_record(self, obj):
return format_html('<a type="submit" class="default" href="/orchestrator/pythonfunction/{}/change/">EDIT</a>', obj.id)
def pk_formatted(self, obj):
return obj.pk
fieldsets = (
('General', {
'fields': ('name', 'description',),
}),
('Encrypted values', {
'fields': ('encrypted_value_1', 'encrypted_value_2', 'encrypted_value_3', 'encrypted_value_4', 'encrypted_value_5',),
}),
('Code', {
'fields': ('code',),
}),
('Activate', {
'fields': ('activated',),
}),
)
list_display = ('pk_formatted', 'name', 'description',
'activated', 'update_record',)
list_editable = ('name',
'activated',)
list_display_links = ['pk_formatted']
pk_formatted.admin_order_field = 'pk'
pk_formatted.short_description = 'ID'
form = PythonFunctionForm
class PythonFunctionExecutionAdmin(SimpleHistoryAdmin):
def get_ordering(self, request):
return ['-time_start']
def has_add_permission(self, request, obj=None):
return False
def pk_formatted(self, obj):
return obj.pk
list_display = ('pk_formatted', 'python_function',
'request_user', 'request_ip',
'time_start', 'time_end',)
list_display_links = ['pk_formatted']
list_filter = ('python_function', 'request_user', 'request_ip',)
readonly_fields = [field.name for field in PythonFunctionExecution._meta.get_fields()]
pk_formatted.admin_order_field = 'pk'
pk_formatted.short_description = 'ID'
list_per_page = 20
admin.site.register(Bot, BotAdmin)
admin.site.register(App, AppAdmin)
admin.site.register(Botflow, BotflowAdmin)
admin.site.register(FileTrigger, FileTriggerAdmin)
admin.site.register(ScheduleTrigger, ScheduleTriggerAdmin)
admin.site.register(EmailImapTrigger, EmailImapTriggerAdmin)
admin.site.register(EmailOutlookTrigger, EmailOutlookTriggerAdmin)
admin.site.register(ApiTrigger, ApiTriggerAdmin)
admin.site.register(BotflowExecution, BotflowExecutionAdmin)
admin.site.register(SmtpAccount, SmtpAccountAdmin)
admin.site.register(PythonFunction, PythonFunctionAdmin)
admin.site.register(PythonFunctionExecution, PythonFunctionExecutionAdmin)
|
<gh_stars>0
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add constraint for routerid
Revision ID: 3<PASSWORD>
Revises: 37<PASSWORD>
Create Date: 2014-02-26 06:47:16.494393
"""
# revision identifiers, used by Alembic.
revision = '31d7f831a591'
down_revision = '37f322<PASSWORD>'
from alembic import op
import sqlalchemy as sa
from sqlalchemy import exc
TABLE_NAME = 'routerl3agentbindings'
PK_NAME = 'pk_routerl3agentbindings'
UC_NAME = 'uniq_routerl3agentbind0router_id0l3_agent_id'
fk_names = {'postgresql':
{'router_id':
'routerl3agentbindings_router_id_fkey',
'l3_agent_id':
'routerl3agentbindings_l3_agent_id_fkey'},
'mysql':
{'router_id':
'routerl3agentbindings_ibfk_2',
'l3_agent_id':
'routerl3agentbindings_ibfk_1'}}
def upgrade(active_plugins=None, options=None):
# In order to sanitize the data during migration,
# the current records in the table need to be verified
# and all the duplicate records which violate the PK
# constraint need to be removed.
context = op.get_context()
if context.bind.dialect.name == 'postgresql':
op.execute('DELETE FROM %(table)s WHERE id in ('
'SELECT %(table)s.id FROM %(table)s LEFT OUTER JOIN '
'(SELECT MIN(id) as id, router_id, l3_agent_id '
' FROM %(table)s GROUP BY router_id, l3_agent_id) AS temp '
'ON %(table)s.id = temp.id WHERE temp.id is NULL);'
% {'table': TABLE_NAME})
else:
op.execute('DELETE %(table)s FROM %(table)s LEFT OUTER JOIN '
'(SELECT MIN(id) as id, router_id, l3_agent_id '
' FROM %(table)s GROUP BY router_id, l3_agent_id) AS temp '
'ON %(table)s.id = temp.id WHERE temp.id is NULL;'
% {'table': TABLE_NAME})
op.drop_column(TABLE_NAME, 'id')
op.create_primary_key(
name=PK_NAME,
table_name=TABLE_NAME,
cols=['router_id', 'l3_agent_id']
)
try:
op.drop_constraint(name=UC_NAME, table_name=TABLE_NAME,
type_='unique')
except exc.OperationalError:
pass
def downgrade(active_plugins=None, options=None):
context = op.get_context()
dialect = context.bind.dialect.name
# Drop the existed foreign key constraints
# In order to perform primary key changes
op.drop_constraint(
name=fk_names[dialect]['l3_agent_id'],
table_name=TABLE_NAME,
type_='foreignkey'
)
op.drop_constraint(
name=fk_names[dialect]['router_id'],
table_name=TABLE_NAME,
type_='foreignkey'
)
op.drop_constraint(
name=PK_NAME,
table_name=TABLE_NAME,
type_='primary'
)
op.add_column(
TABLE_NAME,
sa.Column('id', sa.String(32))
)
# Restore the foreign key constraints
op.create_foreign_key(
name=fk_names[dialect]['router_id'],
source=TABLE_NAME,
referent='routers',
local_cols=['router_id'],
remote_cols=['id'],
ondelete='CASCADE'
)
op.create_foreign_key(
name=fk_names[dialect]['l3_agent_id'],
source=TABLE_NAME,
referent='agents',
local_cols=['l3_agent_id'],
remote_cols=['id'],
ondelete='CASCADE'
)
op.create_primary_key(
name=PK_NAME,
table_name=TABLE_NAME,
cols=['id']
)
|
<reponame>juddcraft12/liamappelbe.github.io<gh_stars>0
from collections import OrderedDict
import random
import sys
#################
### CONSTANTS ###
#################
kFreqDictSize = 30000
kTargetCadence = [1, 0, -1, -1, 1, 0, 1]
kTargetPhonemes = [] # ["AO", "L", "Z"]
kIgnoreSecondaryPoS = True
#######################
### DATA STRUCTURES ###
#######################
kFreqDict = set() # {most common kFreqDictSize words}
kPos = OrderedDict() # {word: [parts of speech]}
kPro = [] # [(word, [phoneme strs], [phoneme ids])}
kPhoIds = OrderedDict() # {phoneme: id}
kPosDict = OrderedDict() # {part of speech: [words]}
kCadDict = OrderedDict() # {part of speech: CadDictEntry(cad tree -> [words])}
def randFromList(a):
return a[random.randrange(len(a))]
def randFromListofLists(a):
n = sum(len(b) for b in a)
if n == 0:
return None
i = random.randrange(n)
for b in a:
if i < len(b):
return b[i]
i -= len(b)
assert(False)
def randFromListofListRanges(a):
n = sum(j - i for b, i, j in a)
if n == 0:
return None, 0
k = random.randrange(n)
for b, i, j in a:
if k < j - i:
return b[k + i]
k -= j - i
assert(False)
class RhymeNode:
def __init__(self):
self.ch = [None] * len(kPhoIds)
self.i = 0
self.i2 = 0
self.j = 0
self.temp = []
def finish(self, w):
self.i = len(w)
w += self.temp
self.i2 = len(w)
self.temp = None
for n in self.ch:
if n is not None:
n.finish(w)
self.j = len(w)
class CadNode:
def __init__(self):
self.ch = [None, None, None]
self.w = None
self.r = RhymeNode()
def add(self, w, rr):
n = self.r
for p in rr:
if n.ch[p] is None:
n.ch[p] = RhymeNode()
n = n.ch[p]
n.temp.append((w, len(rr)))
def finish(self):
for n in self.ch:
if n is not None:
n.finish()
self.w = []
self.r.finish(self.w)
def rhyme(self, rr, z):
n = self.r
for p in rr:
if n != self.r:
z.append((self.w, n.i, n.i2))
n = n.ch[p]
if n is None:
return
z.append((self.w, n.i, n.j))
class CadDictEntry:
def __init__(self):
self.root = CadNode()
def add(self, w, a, rr):
n = self.root
for p in a:
k = ord(p[-1]) - ord('0')
if k < 0 or k > 2:
continue
if n.ch[k] is None:
n.ch[k] = CadNode()
n = n.ch[k]
n.add(w, rr)
def rand(self, c, rr):
a = [self.root]
for k in c:
b = []
if k == -1:
for n in a:
for m in n.ch:
if m is not None:
b.append(m)
else:
for n in a:
m = n.ch[k]
if m is not None:
b.append(m)
a = b
z = []
for n in a:
n.rhyme(rr, z)
return randFromListofListRanges(z)
def finish(self):
self.root.finish()
#################
### READ DATA ###
#################
with open(sys.argv[1], "r") as f:
for l in f:
if len(kFreqDict) >= kFreqDictSize:
break
i = l.rfind(',')
if i == -1:
continue
kFreqDict.add(l[:i].strip().lower())
with open(sys.argv[2], "r", encoding="mac_roman") as f:
while True:
l = f.readline()
if len(l) == 0:
break
a = l[:-1].split('\\')
if len(a) != 2:
continue
word = a[0].strip().lower()
if word not in kFreqDict:
continue
if word not in kPos:
kPos[word] = []
kPos[word] += [p for p in a[1] if p in "NphVtiAvCP!rD"]
with open(sys.argv[3], "r") as f:
for l in f:
l = l.split('#')[0] # Some lines have comments. Discard them.
a = l.split(' ')
assert(a[0].lower() == a[0])
assert(all(a[i].upper() == a[i] for i in range(1, len(a))))
word = a[0].split('(')[0] # Some words have a (2) suffix. Discard it.
if word not in kFreqDict:
continue
a = [p.strip() for p in a[1:] if len(p.strip()) > 0]
syl = sum(1 if p[-1] in "012" else 0 for p in a)
if syl == 0:
continue
a2 = []
for p in a:
if p[-1] in "012":
p = p[:-1]
if p not in kPhoIds:
i = len(kPhoIds)
kPhoIds[p] = i
a2.append(kPhoIds[p])
kPro.append((word, a, [p for p in reversed(a2)]))
##################
### MERGE DATA ###
##################
for word, pro, rr in kPro:
if word not in kPos:
continue
for p in kPos[word]:
if p not in kCadDict:
kCadDict[p] = CadDictEntry()
kCadDict[p].add(word, pro, rr)
if p not in kPosDict:
kPosDict[p] = []
kPosDict[p].append(word)
if kIgnoreSecondaryPoS:
break
for e in kCadDict.values():
e.finish()
#####################
### GRAMMAR RULES ###
#####################
# N Noun apple
# p Plural apples
# h Noun Phrase apple cider
# V Verb (usu participle) argued
# t Verb (transitive) amend
# i Verb (intransitive) arrive
# A Adjective armored
# v Adverb arrogantly
# C Conjunction and
# P Preposition among
# I Interjection alas
# r Pronoun anyone
# D Definite Article another
# S = I? NP VP (C S)?
# NP = (D A* (N | h) | A* p | r) (P NP)?
# VP = v? (i | V) | v? t NP
def randb(pr = 0.5):
return random.random() < pr
def randw(p, c, rr):
return kCadDict[p].rand(c, rr)
def randwraw(p):
return randFromList(kPosDict[p])
def genA():
a = []
while randb(0.5 / (1 + 0.8 * len(a))):
a.append('A')
return a
def genNP(n = 0):
a = []
if randb():
a.append('D')
a += genA()
if randb():
a.append('N')
else:
a.append('h')
elif randb():
a += genA()
a.append('p')
else:
a.append('r')
if randb(0.3 / (n + 1)):
a.append('P')
a += genNP(n + 1)
return a
def genVP():
a = []
if randb():
if randb():
a.append('v')
if randb():
a.append('i')
else:
a.append('V')
else:
if randb():
a.append('v')
a.append('t')
a += genNP()
return a
def genPos(n = 0):
a = []
a += genNP()
a += genVP()
if randb(0.3 / (n + 1)):
a.append('C')
a += genPos(n + 1)
return a
##########################
### SENTENCE GENERATOR ###
##########################
fails = 0
sucesses = 0
def genS(cadence, rhyme):
global fails
global sucesses
rr = [kPhoIds[p] for p in reversed(rhyme)]
ts = len(cadence)
while True:
pos = genPos()
if len(pos) > ts:
fails += 1
continue
syl = [1] * len(pos)
for _ in range(ts - len(pos)):
# This is inefficient if ts is large, but it's fine for now.
syl[random.randrange(len(syl))] += 1
a = []
j, k = len(cadence), 0
for i in range(len(pos) - 1, -1, -1):
pj = j
j -= syl[i]
w, np = randw(pos[i], cadence[j:pj], rr[k:])
a.append(w)
k = min(k + np, len(rr))
if any(w == None for w in a):
fails += 1
continue
sucesses += 1
return list(reversed(a))
def genSraw():
pos = genPos()
print(pos)
return [randwraw(p) for p in pos]
########################
### GENERATE RESULTS ###
########################
for i in range(8):
s = genS(kTargetCadence, kTargetPhonemes)
# s = genSraw()
print(' '.join(s) + '\n')
print("Retry rate: %.1fx" % (fails / sucesses))
########################
### GENERATE JS DATA ###
########################
# print("[%s]" % (",".join("[%r,%r,[%s]]" % (
# word, kPos[word][0], ",".join(repr(p) for p in pro)
# ) for word, pro, rr in kPro if word in kPos)))
|
<reponame>pulumi/pulumi-alicloud<gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'AlertNotificationList',
'AlertQueryList',
'EtlEtlSink',
'OssShipperParquetConfig',
'StoreEncryptConf',
'StoreEncryptConfUserCmkInfo',
'StoreIndexFieldSearch',
'StoreIndexFieldSearchJsonKey',
'StoreIndexFullText',
'StoreShard',
'GetProjectsProjectResult',
'GetStoresStoreResult',
]
@pulumi.output_type
class AlertNotificationList(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "emailLists":
suggest = "email_lists"
elif key == "mobileLists":
suggest = "mobile_lists"
elif key == "serviceUri":
suggest = "service_uri"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AlertNotificationList. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AlertNotificationList.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AlertNotificationList.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
content: str,
type: str,
email_lists: Optional[Sequence[str]] = None,
mobile_lists: Optional[Sequence[str]] = None,
service_uri: Optional[str] = None):
"""
:param str content: Notice content of alarm.
:param str type: Notification type. support Email, SMS, DingTalk, MessageCenter.
:param Sequence[str] email_lists: Email address list.
:param Sequence[str] mobile_lists: SMS sending mobile number.
:param str service_uri: Request address.
"""
pulumi.set(__self__, "content", content)
pulumi.set(__self__, "type", type)
if email_lists is not None:
pulumi.set(__self__, "email_lists", email_lists)
if mobile_lists is not None:
pulumi.set(__self__, "mobile_lists", mobile_lists)
if service_uri is not None:
pulumi.set(__self__, "service_uri", service_uri)
@property
@pulumi.getter
def content(self) -> str:
"""
Notice content of alarm.
"""
return pulumi.get(self, "content")
@property
@pulumi.getter
def type(self) -> str:
"""
Notification type. support Email, SMS, DingTalk, MessageCenter.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="emailLists")
def email_lists(self) -> Optional[Sequence[str]]:
"""
Email address list.
"""
return pulumi.get(self, "email_lists")
@property
@pulumi.getter(name="mobileLists")
def mobile_lists(self) -> Optional[Sequence[str]]:
"""
SMS sending mobile number.
"""
return pulumi.get(self, "mobile_lists")
@property
@pulumi.getter(name="serviceUri")
def service_uri(self) -> Optional[str]:
"""
Request address.
"""
return pulumi.get(self, "service_uri")
@pulumi.output_type
class AlertQueryList(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "chartTitle":
suggest = "chart_title"
elif key == "timeSpanType":
suggest = "time_span_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AlertQueryList. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AlertQueryList.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AlertQueryList.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
chart_title: str,
end: str,
logstore: str,
query: str,
start: str,
time_span_type: Optional[str] = None):
"""
:param str chart_title: chart title
:param str end: end time. example: 20s.
:param str logstore: Query logstore
:param str query: query corresponding to chart. example: * AND aliyun.
:param str start: begin time. example: -60s.
:param str time_span_type: default Custom. No need to configure this parameter.
"""
pulumi.set(__self__, "chart_title", chart_title)
pulumi.set(__self__, "end", end)
pulumi.set(__self__, "logstore", logstore)
pulumi.set(__self__, "query", query)
pulumi.set(__self__, "start", start)
if time_span_type is not None:
pulumi.set(__self__, "time_span_type", time_span_type)
@property
@pulumi.getter(name="chartTitle")
def chart_title(self) -> str:
"""
chart title
"""
return pulumi.get(self, "chart_title")
@property
@pulumi.getter
def end(self) -> str:
"""
end time. example: 20s.
"""
return pulumi.get(self, "end")
@property
@pulumi.getter
def logstore(self) -> str:
"""
Query logstore
"""
return pulumi.get(self, "logstore")
@property
@pulumi.getter
def query(self) -> str:
"""
query corresponding to chart. example: * AND aliyun.
"""
return pulumi.get(self, "query")
@property
@pulumi.getter
def start(self) -> str:
"""
begin time. example: -60s.
"""
return pulumi.get(self, "start")
@property
@pulumi.getter(name="timeSpanType")
def time_span_type(self) -> Optional[str]:
"""
default Custom. No need to configure this parameter.
"""
return pulumi.get(self, "time_span_type")
@pulumi.output_type
class EtlEtlSink(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "accessKeyId":
suggest = "access_key_id"
elif key == "accessKeySecret":
suggest = "access_key_secret"
elif key == "kmsEncryptedAccessKeyId":
suggest = "kms_encrypted_access_key_id"
elif key == "kmsEncryptedAccessKeySecret":
suggest = "kms_encrypted_access_key_secret"
elif key == "roleArn":
suggest = "role_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EtlEtlSink. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EtlEtlSink.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EtlEtlSink.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
endpoint: str,
logstore: str,
name: str,
project: str,
access_key_id: Optional[str] = None,
access_key_secret: Optional[str] = None,
kms_encrypted_access_key_id: Optional[str] = None,
kms_encrypted_access_key_secret: Optional[str] = None,
role_arn: Optional[str] = None,
type: Optional[str] = None):
"""
:param str endpoint: Delivery target logstore region.
:param str logstore: Delivery target logstore.
:param str name: Delivery target name.
:param str project: The project where the target logstore is delivered.
:param str access_key_id: Delivery target logstore access key id.
:param str access_key_secret: Delivery target logstore access key secret.
:param str kms_encrypted_access_key_id: An KMS encrypts access key id used to a log etl job. If the `access_key_id` is filled in, this field will be ignored.
:param str kms_encrypted_access_key_secret: An KMS encrypts access key secret used to a log etl job. If the `access_key_secret` is filled in, this field will be ignored.
:param str role_arn: Sts role info under delivery target logstore. `role_arn` and `(access_key_id, access_key_secret)` fill in at most one. If you do not fill in both, then you must fill in `(kms_encrypted_access_key_id, kms_encrypted_access_key_secret, kms_encryption_access_key_id_context, kms_encryption_access_key_secret_context)` to use KMS to get the key pair.
:param str type: ETL sinks type, the default value is AliyunLOG.
"""
pulumi.set(__self__, "endpoint", endpoint)
pulumi.set(__self__, "logstore", logstore)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "project", project)
if access_key_id is not None:
pulumi.set(__self__, "access_key_id", access_key_id)
if access_key_secret is not None:
pulumi.set(__self__, "access_key_secret", access_key_secret)
if kms_encrypted_access_key_id is not None:
pulumi.set(__self__, "kms_encrypted_access_key_id", kms_encrypted_access_key_id)
if kms_encrypted_access_key_secret is not None:
pulumi.set(__self__, "kms_encrypted_access_key_secret", kms_encrypted_access_key_secret)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def endpoint(self) -> str:
"""
Delivery target logstore region.
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter
def logstore(self) -> str:
"""
Delivery target logstore.
"""
return pulumi.get(self, "logstore")
@property
@pulumi.getter
def name(self) -> str:
"""
Delivery target name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def project(self) -> str:
"""
The project where the target logstore is delivered.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter(name="accessKeyId")
def access_key_id(self) -> Optional[str]:
"""
Delivery target logstore access key id.
"""
return pulumi.get(self, "access_key_id")
@property
@pulumi.getter(name="accessKeySecret")
def access_key_secret(self) -> Optional[str]:
"""
Delivery target logstore access key secret.
"""
return pulumi.get(self, "access_key_secret")
@property
@pulumi.getter(name="kmsEncryptedAccessKeyId")
def kms_encrypted_access_key_id(self) -> Optional[str]:
"""
An KMS encrypts access key id used to a log etl job. If the `access_key_id` is filled in, this field will be ignored.
"""
return pulumi.get(self, "kms_encrypted_access_key_id")
@property
@pulumi.getter(name="kmsEncryptedAccessKeySecret")
def kms_encrypted_access_key_secret(self) -> Optional[str]:
"""
An KMS encrypts access key secret used to a log etl job. If the `access_key_secret` is filled in, this field will be ignored.
"""
return pulumi.get(self, "kms_encrypted_access_key_secret")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[str]:
"""
Sts role info under delivery target logstore. `role_arn` and `(access_key_id, access_key_secret)` fill in at most one. If you do not fill in both, then you must fill in `(kms_encrypted_access_key_id, kms_encrypted_access_key_secret, kms_encryption_access_key_id_context, kms_encryption_access_key_secret_context)` to use KMS to get the key pair.
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
ETL sinks type, the default value is AliyunLOG.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class OssShipperParquetConfig(dict):
def __init__(__self__, *,
name: str,
type: str):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@pulumi.output_type
class StoreEncryptConf(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "encryptType":
suggest = "encrypt_type"
elif key == "userCmkInfo":
suggest = "user_cmk_info"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StoreEncryptConf. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StoreEncryptConf.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StoreEncryptConf.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enable: Optional[bool] = None,
encrypt_type: Optional[str] = None,
user_cmk_info: Optional['outputs.StoreEncryptConfUserCmkInfo'] = None):
"""
:param bool enable: enable encryption. Default `false`
:param str encrypt_type: Supported encryption type, only supports `default(AES)`,` m4`
:param 'StoreEncryptConfUserCmkInfoArgs' user_cmk_info: User bring your own key (BYOK) encryption.[Refer to details](https://www.alibabacloud.com/help/zh/doc-detail/187853.htm?spm=a2c63.p38356.b99.673.cafa2b38qBskFV)
"""
if enable is not None:
pulumi.set(__self__, "enable", enable)
if encrypt_type is not None:
pulumi.set(__self__, "encrypt_type", encrypt_type)
if user_cmk_info is not None:
pulumi.set(__self__, "user_cmk_info", user_cmk_info)
@property
@pulumi.getter
def enable(self) -> Optional[bool]:
"""
enable encryption. Default `false`
"""
return pulumi.get(self, "enable")
@property
@pulumi.getter(name="encryptType")
def encrypt_type(self) -> Optional[str]:
"""
Supported encryption type, only supports `default(AES)`,` m4`
"""
return pulumi.get(self, "encrypt_type")
@property
@pulumi.getter(name="userCmkInfo")
def user_cmk_info(self) -> Optional['outputs.StoreEncryptConfUserCmkInfo']:
"""
User bring your own key (BYOK) encryption.[Refer to details](https://www.alibabacloud.com/help/zh/doc-detail/187853.htm?spm=a2c63.p38356.b99.673.cafa2b38qBskFV)
"""
return pulumi.get(self, "user_cmk_info")
@pulumi.output_type
class StoreEncryptConfUserCmkInfo(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cmkKeyId":
suggest = "cmk_key_id"
elif key == "regionId":
suggest = "region_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StoreEncryptConfUserCmkInfo. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StoreEncryptConfUserCmkInfo.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StoreEncryptConfUserCmkInfo.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
arn: str,
cmk_key_id: str,
region_id: str):
"""
:param str arn: role arn
:param str cmk_key_id: User master key id
:param str region_id: Region id where the user master key id is located
"""
pulumi.set(__self__, "arn", arn)
pulumi.set(__self__, "cmk_key_id", cmk_key_id)
pulumi.set(__self__, "region_id", region_id)
@property
@pulumi.getter
def arn(self) -> str:
"""
role arn
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="cmkKeyId")
def cmk_key_id(self) -> str:
"""
User master key id
"""
return pulumi.get(self, "cmk_key_id")
@property
@pulumi.getter(name="regionId")
def region_id(self) -> str:
"""
Region id where the user master key id is located
"""
return pulumi.get(self, "region_id")
@pulumi.output_type
class StoreIndexFieldSearch(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "caseSensitive":
suggest = "case_sensitive"
elif key == "enableAnalytics":
suggest = "enable_analytics"
elif key == "includeChinese":
suggest = "include_chinese"
elif key == "jsonKeys":
suggest = "json_keys"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StoreIndexFieldSearch. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StoreIndexFieldSearch.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StoreIndexFieldSearch.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
alias: Optional[str] = None,
case_sensitive: Optional[bool] = None,
enable_analytics: Optional[bool] = None,
include_chinese: Optional[bool] = None,
json_keys: Optional[Sequence['outputs.StoreIndexFieldSearchJsonKey']] = None,
token: Optional[str] = None,
type: Optional[str] = None):
"""
:param str name: When using the json_keys field, this field is required.
:param str alias: The alias of one field.
:param bool case_sensitive: Whether the case sensitive for the field. Default to false. It is valid when "type" is "text" or "json".
:param bool enable_analytics: Whether to enable field analytics. Default to true.
:param bool include_chinese: Whether includes the chinese for the field. Default to false. It is valid when "type" is "text" or "json".
:param Sequence['StoreIndexFieldSearchJsonKeyArgs'] json_keys: Use nested index when type is json
:param str token: The string of several split words, like "\r", "#". It is valid when "type" is "text" or "json".
:param str type: The type of one field. Valid values: ["long", "text", "double"]. Default to "long"
"""
pulumi.set(__self__, "name", name)
if alias is not None:
pulumi.set(__self__, "alias", alias)
if case_sensitive is not None:
pulumi.set(__self__, "case_sensitive", case_sensitive)
if enable_analytics is not None:
pulumi.set(__self__, "enable_analytics", enable_analytics)
if include_chinese is not None:
pulumi.set(__self__, "include_chinese", include_chinese)
if json_keys is not None:
pulumi.set(__self__, "json_keys", json_keys)
if token is not None:
pulumi.set(__self__, "token", token)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def name(self) -> str:
"""
When using the json_keys field, this field is required.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def alias(self) -> Optional[str]:
"""
The alias of one field.
"""
return pulumi.get(self, "alias")
@property
@pulumi.getter(name="caseSensitive")
def case_sensitive(self) -> Optional[bool]:
"""
Whether the case sensitive for the field. Default to false. It is valid when "type" is "text" or "json".
"""
return pulumi.get(self, "case_sensitive")
@property
@pulumi.getter(name="enableAnalytics")
def enable_analytics(self) -> Optional[bool]:
"""
Whether to enable field analytics. Default to true.
"""
return pulumi.get(self, "enable_analytics")
@property
@pulumi.getter(name="includeChinese")
def include_chinese(self) -> Optional[bool]:
"""
Whether includes the chinese for the field. Default to false. It is valid when "type" is "text" or "json".
"""
return pulumi.get(self, "include_chinese")
@property
@pulumi.getter(name="jsonKeys")
def json_keys(self) -> Optional[Sequence['outputs.StoreIndexFieldSearchJsonKey']]:
"""
Use nested index when type is json
"""
return pulumi.get(self, "json_keys")
@property
@pulumi.getter
def token(self) -> Optional[str]:
"""
The string of several split words, like "\r", "#". It is valid when "type" is "text" or "json".
"""
return pulumi.get(self, "token")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The type of one field. Valid values: ["long", "text", "double"]. Default to "long"
"""
return pulumi.get(self, "type")
@pulumi.output_type
class StoreIndexFieldSearchJsonKey(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "docValue":
suggest = "doc_value"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StoreIndexFieldSearchJsonKey. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StoreIndexFieldSearchJsonKey.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StoreIndexFieldSearchJsonKey.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
alias: Optional[str] = None,
doc_value: Optional[bool] = None,
type: Optional[str] = None):
"""
:param str name: When using the json_keys field, this field is required.
:param str alias: The alias of one field.
:param bool doc_value: Whether to enable statistics. default to true.
:param str type: The type of one field. Valid values: ["long", "text", "double"]. Default to "long"
"""
pulumi.set(__self__, "name", name)
if alias is not None:
pulumi.set(__self__, "alias", alias)
if doc_value is not None:
pulumi.set(__self__, "doc_value", doc_value)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def name(self) -> str:
"""
When using the json_keys field, this field is required.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def alias(self) -> Optional[str]:
"""
The alias of one field.
"""
return pulumi.get(self, "alias")
@property
@pulumi.getter(name="docValue")
def doc_value(self) -> Optional[bool]:
"""
Whether to enable statistics. default to true.
"""
return pulumi.get(self, "doc_value")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The type of one field. Valid values: ["long", "text", "double"]. Default to "long"
"""
return pulumi.get(self, "type")
@pulumi.output_type
class StoreIndexFullText(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "caseSensitive":
suggest = "case_sensitive"
elif key == "includeChinese":
suggest = "include_chinese"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StoreIndexFullText. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StoreIndexFullText.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StoreIndexFullText.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
case_sensitive: Optional[bool] = None,
include_chinese: Optional[bool] = None,
token: Optional[str] = None):
"""
:param bool case_sensitive: Whether the case sensitive for the field. Default to false. It is valid when "type" is "text" or "json".
:param bool include_chinese: Whether includes the chinese for the field. Default to false. It is valid when "type" is "text" or "json".
:param str token: The string of several split words, like "\r", "#". It is valid when "type" is "text" or "json".
"""
if case_sensitive is not None:
pulumi.set(__self__, "case_sensitive", case_sensitive)
if include_chinese is not None:
pulumi.set(__self__, "include_chinese", include_chinese)
if token is not None:
pulumi.set(__self__, "token", token)
@property
@pulumi.getter(name="caseSensitive")
def case_sensitive(self) -> Optional[bool]:
"""
Whether the case sensitive for the field. Default to false. It is valid when "type" is "text" or "json".
"""
return pulumi.get(self, "case_sensitive")
@property
@pulumi.getter(name="includeChinese")
def include_chinese(self) -> Optional[bool]:
"""
Whether includes the chinese for the field. Default to false. It is valid when "type" is "text" or "json".
"""
return pulumi.get(self, "include_chinese")
@property
@pulumi.getter
def token(self) -> Optional[str]:
"""
The string of several split words, like "\r", "#". It is valid when "type" is "text" or "json".
"""
return pulumi.get(self, "token")
@pulumi.output_type
class StoreShard(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "beginKey":
suggest = "begin_key"
elif key == "endKey":
suggest = "end_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StoreShard. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StoreShard.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StoreShard.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
begin_key: Optional[str] = None,
end_key: Optional[str] = None,
id: Optional[int] = None,
status: Optional[str] = None):
"""
:param int id: The ID of the log project. It formats of `<project>:<name>`.
"""
if begin_key is not None:
pulumi.set(__self__, "begin_key", begin_key)
if end_key is not None:
pulumi.set(__self__, "end_key", end_key)
if id is not None:
pulumi.set(__self__, "id", id)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="beginKey")
def begin_key(self) -> Optional[str]:
return pulumi.get(self, "begin_key")
@property
@pulumi.getter(name="endKey")
def end_key(self) -> Optional[str]:
return pulumi.get(self, "end_key")
@property
@pulumi.getter
def id(self) -> Optional[int]:
"""
The ID of the log project. It formats of `<project>:<name>`.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def status(self) -> Optional[str]:
return pulumi.get(self, "status")
@pulumi.output_type
class GetProjectsProjectResult(dict):
def __init__(__self__, *,
description: str,
id: str,
last_modify_time: str,
owner: str,
project_name: str,
region: str,
status: str):
"""
:param str description: The description of the project.
:param str id: The ID of the project.
:param str last_modify_time: The last modify time of project.
:param str owner: The owner of project.
:param str project_name: The name of the project.
:param str region: The region of project.
:param str status: The status of project.
"""
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "last_modify_time", last_modify_time)
pulumi.set(__self__, "owner", owner)
pulumi.set(__self__, "project_name", project_name)
pulumi.set(__self__, "region", region)
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def description(self) -> str:
"""
The description of the project.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the project.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastModifyTime")
def last_modify_time(self) -> str:
"""
The last modify time of project.
"""
return pulumi.get(self, "last_modify_time")
@property
@pulumi.getter
def owner(self) -> str:
"""
The owner of project.
"""
return pulumi.get(self, "owner")
@property
@pulumi.getter(name="projectName")
def project_name(self) -> str:
"""
The name of the project.
"""
return pulumi.get(self, "project_name")
@property
@pulumi.getter
def region(self) -> str:
"""
The region of project.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter
def status(self) -> str:
"""
The status of project.
"""
return pulumi.get(self, "status")
@pulumi.output_type
class GetStoresStoreResult(dict):
def __init__(__self__, *,
id: str,
store_name: str):
"""
:param str id: The ID of the store.
:param str store_name: The name of the store.
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "store_name", store_name)
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the store.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="storeName")
def store_name(self) -> str:
"""
The name of the store.
"""
return pulumi.get(self, "store_name")
|
import unittest
import os
from unittest import TestCase
from dispatcher.aota.aota import AOTA
from dispatcher.aota.aota_command import Docker, DockerCompose
from dispatcher.aota.aota_command import DirectoryRepo
from dispatcher.aota.aota_error import AotaError
from dispatcher.aota.constants import SupportedDriver
from ..common.mock_resources import *
from mock import patch
from typing import Any
from dispatcher.common.result_constants import (
INSTALL_SUCCESS,
INSTALL_FAILURE,
COMMAND_SUCCESS,
UNABLE_TO_DOWNLOAD_APPLICATION_PACKAGE,
UNABLE_TO_DOWNLOAD_DOCKER_COMPOSE
)
from dispatcher.dispatcher_exception import DispatcherException
from dispatcher.packagemanager import package_manager
from dispatcher.packagemanager.memory_repo import MemoryRepo
from inbm_common_lib.utility import canonicalize_uri
from inbm_lib.trtl import Trtl
SCHEMA_LOCATION = './packaging/config/manifest_schema.xsd'
class TestAOTA(TestCase):
@patch('os.rmdir')
@patch('dispatcher.aota.aota_command.get', return_value=Result(400, "Unable to download application package."))
@patch('dispatcher.aota.checker.verify_source')
@patch('dispatcher.aota.aota_command.AotaCommand.create_repository_cache_repo')
def test_failure_one_application_update(self, mock_create_repo, mock_verify_source, mock_get_file, mock_osdir):
aota = TestAOTA._build_aota(uri='file://sample/test.tar',
app_type='application', cmd='update')
with self.assertRaisesRegex(AotaError, UNABLE_TO_DOWNLOAD_APPLICATION_PACKAGE.message):
aota.run()
def test_application_update_fail(self):
a = TestAOTA._build_aota(app_type='application', cmd='update')
with self.assertRaisesRegex(AotaError, 'missing URL.'):
a.run()
@patch('os.rmdir')
@patch('dispatcher.aota.application_command.get', return_value=Result(200, "ok"))
@patch('dispatcher.aota.checker.verify_source')
@patch('dispatcher.aota.aota_command.AotaCommand.create_repository_cache_repo')
@patch('dispatcher.aota.factory.detect_os', return_value='Ubuntu')
@patch('dispatcher.aota.aota_command.DirectoryRepo.get_repo_path', return_value='abc/bdb')
@patch('inbm_common_lib.shell_runner.PseudoShellRunner.run', return_value=['', 'update failed', 2])
@patch('dispatcher.aota.aota.cleanup_repo')
@patch('dispatcher.aota.application_command.is_inside_container', return_value=False)
def test_raise_when_application_update_fails(self, check_os, mock_cleanup, mock_shell, mock_get_repo, mock_platform,
mock_create_repo,
mock_verify_source, mock_get_file, mock_osdir):
mock_create_repo.return_value = DirectoryRepo(os.path.join('abc', "aota"))
aota = TestAOTA._build_aota(uri='file://sample/test.tar',
app_type='application', cmd='update')
with self.assertRaisesRegex(AotaError, 'AOTA application update FAILED: update failed'):
aota.run()
mock_cleanup.assert_called_once()
@patch('dispatcher.aota.checker.verify_source')
def test_fails_install_with_resource_type_empty(self, mock_verify_source):
a = TestAOTA._build_aota(
uri='file://sample/test.rpm', container_tag='abc', cmd='up', app_type='compose')
with self.assertRaisesRegex(AotaError, 'AOTA compose up FAILED: Unable to download docker-compose container.'):
a.run()
@patch('dispatcher.aota.checker.verify_source', side_effect='Invalid package should be .tar or .tgz')
def test_fails_source_verification_check(self, mock_verify_source):
aota = TestAOTA._build_aota(uri='file://sample/test.rpm', app_type='docker',
container_tag='abc', cmd='load')
with self.assertRaisesRegex(AotaError, 'AOTA docker load FAILED: Invalid package type; should be .tar or .tgz'):
aota.run()
@patch('dispatcher.aota.checker.verify_source')
def test_fails_package_name_load(self, mock_verify_source):
aota = TestAOTA._build_aota(uri='file://sample/test.rpm',
container_tag='abc', app_type='docker', cmd='load')
with self.assertRaisesRegex(AotaError, 'Invalid package type; should be .tar or .tgz'):
aota.run()
@patch('os.rmdir')
@patch('dispatcher.aota.aota_command.TrtlContainer.image_load', return_value=INSTALL_SUCCESS)
@patch('dispatcher.aota.aota_command.get', return_value=Result(200, "OK"))
@patch('dispatcher.aota.checker.verify_source')
@patch('dispatcher.aota.application_command.AotaCommand.create_repository_cache_repo')
def test_success_load(self, mock_create_repo, mock_verify_source, mock_get_file, mock_image_load, mock_osdir):
aota = TestAOTA._build_aota(uri='file://sample/test.tar', app_type='docker',
container_tag='abc', cmd='load', instance='docker')
try:
self.assertEquals(None, aota.load())
except AotaError:
self.fail("Exception raised when not expected.")
@patch('os.rmdir')
@patch('dispatcher.aota.aota_command.TrtlContainer.image_load', return_value=INSTALL_FAILURE)
@patch('dispatcher.aota.aota_command.get', return_value=INSTALL_FAILURE)
@patch('dispatcher.aota.checker.verify_source')
def test_fail_load(self, mock_verify_source, mock_get_file, mock_image_load, mock_osdir):
aota = TestAOTA._build_aota(uri='file://sample/test.tar',
container_tag='abc', cmd='load', app_type='docker')
with self.assertRaisesRegex(AotaError, INSTALL_FAILURE.message):
aota.run()
def test_exception(self):
x = TestAOTA._build_mock_repo(0)
with self.assertRaises(ValueError):
package_manager.get(canonicalize_uri(''), x, 0)
@staticmethod
def _build_mock_repo(num_files=0):
mem_repo = MemoryRepo('test')
if num_files != 0:
for i in range(0, num_files):
mem_repo.add('test' + str(i + 1) + '.rpm', b'0123456789')
return mem_repo
@patch('inbm_lib.trtl.Trtl.image_pull_public', return_value=["", "", 0])
@patch('dispatcher.aota.aota_command.DockerCompose._download')
def test_compose_pull_success(self, mock_download, mock_pull):
aota = TestAOTA._build_aota(container_tag='abc', uri='https://sample/test.tar.gz',
cmd='pull',
app_type='compose', username='tj', password='<PASSWORD>')
try:
aota.run()
except AotaError as e:
self.fail(f'AotoError raised when not expected: {e}')
@patch('dispatcher.aota.checker.verify_source')
@patch('dispatcher.aota.aota_command.get', return_value=dummy_success)
def test_http_server_username_password(self, mock_get_repo, mock_verify_source):
aota = TestAOTA._build_aota(container_tag='abc', uri='http://sample/test.tar.gz',
cmd='pull',
app_type='compose', username='tj', password='<PASSWORD>')
with self.assertRaisesRegex(AotaError, 'Bad request: username/password will not be processed on HTTP server'):
aota.run()
def test_compose_list_with_no_container_tag(self):
aota = TestAOTA._build_aota(app_type='compose', cmd='list')
with self.assertRaisesRegex(
AotaError, 'AOTA compose list FAILED: missing container tag.'):
aota.run()
def test_docker_pull_no_password(self):
aota = TestAOTA._build_aota(app_type='docker', cmd='pull', container_tag="hello",
docker_username='user', docker_registry="https://www.example.com/")
with self.assertRaisesRegex(
AotaError, 'Missing docker password in Manifest'):
aota.run()
def test_docker_pull_no_username(self):
aota = TestAOTA._build_aota(app_type='docker', cmd='pull', container_tag="hello",
docker_password='<PASSWORD>', docker_registry="https://www.example.com/")
with self.assertRaisesRegex(
AotaError, 'Missing docker username in Manifest'):
aota.run()
@patch('inbm_lib.trtl.Trtl.remove_old_images', return_value=None)
@patch('dispatcher.aota.aota_command.Docker.pull')
def test_docker_pull_called(self, mock_docker_pull, mock_trtl_pull):
aota = TestAOTA._build_aota(app_type='docker', cmd='pull', container_tag="hello-world")
aota.run()
mock_docker_pull.assert_called_once()
mock_trtl_pull.assert_not_called()
@patch('inbm_lib.trtl.Trtl.image_pull_public', return_value=["", "", 0])
@patch('inbm_lib.trtl.Trtl.remove_old_images', return_value=None)
def test_docker_pull_public_success(self, mock_trtl_old, mock_trtl_pull):
aota = TestAOTA._build_aota(app_type='docker', cmd='pull', container_tag="hello",
docker_registry='https://docker.hub')
try:
aota.run()
except AotaError:
self.fail('AotaError received when not expected')
def test_application_up_fail(self):
aota = TestAOTA._build_aota(app_type='application', cmd='up')
with self.assertRaisesRegex(
AotaError, 'AOTA application up FAILED: Unsupported Application command: up'):
aota.run()
def test_compose_up_fail_one(self):
aota = TestAOTA._build_aota(app_type='compose', cmd='update',
container_tag="hello",
docker_registry="https://www.example.com/",
uri='file://sample/test.tar.gz')
with self.assertRaisesRegex(
AotaError, 'Unsupported Docker Compose command: update'):
aota.run()
def test_compose_update_fail(self):
aota = TestAOTA._build_aota(app_type='compose', cmd='up', container_tag='abc')
with self.assertRaisesRegex(
AotaError, 'AOTA compose up FAILED: fetch URI is required.'):
aota.run()
def test_docker_update_fail(self):
aota = TestAOTA._build_aota(app_type='docker', cmd='update')
with self.assertRaisesRegex(
AotaError, 'Unsupported Docker command: update'):
aota.run()
def test_docker_up_fail(self):
aota = TestAOTA._build_aota(app_type='docker', cmd='up', container_tag="hello",
docker_registry="https://www.example.com/")
with self.assertRaisesRegex(
AotaError, 'Unsupported Docker command: up'):
aota.run()
@patch('dispatcher.aota.checker.verify_source')
def test_compose_docker_update_fail(self, mock_verify_source):
mock_verify_source.return_value = True
aota = TestAOTA._build_aota(app_type='compose', cmd='update', container_tag="hello",
uri='file://sample/test.tar.gz')
with self.assertRaisesRegex(
AotaError, 'Unsupported Docker Compose command: update'):
aota.run()
@patch('dispatcher.aota.aota_command.DockerCompose.down')
def test_docker_compose_down_function_called(self, mock_docker_down):
aota = TestAOTA._build_aota(app_type='compose', cmd='down', container_tag="hello",
docker_registry="https://www.example.com/")
aota.run()
mock_docker_down.assert_called_once()
def test_docker_down_fail(self):
aota = TestAOTA._build_aota(app_type='docker', cmd='down', container_tag="hello",
docker_registry="https://www.example.com/")
with self.assertRaisesRegex(
AotaError, 'Unsupported Docker command: down'):
aota.run()
@patch('inbm_lib.trtl.Trtl.image_pull_public', return_value=["", "", 3])
def test_docker_pull_public_fail(self, mock_trtl):
aota = TestAOTA._build_aota(app_type='docker', cmd='pull', container_tag="hello",
docker_registry='https://docker.hub')
with self.assertRaisesRegex(AotaError, ''):
aota.run()
@patch('dispatcher.aota.checker.verify_source')
@patch('dispatcher.aota.aota_command.get', return_value=dummy_success)
def test_docker_login_no_password(self, mock_get_repo, mock_verify_source):
aota = TestAOTA._build_aota(container_tag='abc', uri='file://sample/test.tar.gz', cmd='pull',
app_type='compose',
need_repo=False,
docker_registry='amr-registry-pre.caas.intel.com',
docker_username='tj')
with self.assertRaisesRegex(AotaError, 'Missing docker password in Manifest'):
aota.run()
@patch('dispatcher.aota.checker.verify_source')
@patch('dispatcher.aota.aota_command.get', return_value=dummy_success)
def test_docker_login_no_username(self, mock_get_repo, mock_verify_source):
aota = TestAOTA._build_aota(container_tag='abc', uri='file://sample/test.tar.gz', cmd='pull',
app_type='compose',
need_repo=False,
docker_registry='amr-registry-pre.caas.intel.com',
docker_password='<PASSWORD>')
with self.assertRaisesRegex(AotaError, 'Missing docker username in Manifest'):
aota.run()
@patch('dispatcher.aota.checker.verify_source')
@patch('dispatcher.aota.aota_command.get', return_value=dummy_success)
def test_docker_login_no_registry_url(self, mock_get_repo, mock_verify_source):
aota = TestAOTA._build_aota(container_tag='abc', uri='file://sample/test.tar.gz', cmd='pull',
app_type='compose',
need_repo=False, docker_username='tj', docker_password='<PASSWORD>')
with self.assertRaisesRegex(AotaError, 'Missing Docker private registry URL in Manifest'):
aota.run()
@staticmethod
def _build_aota(num_files=0, container_tag=None, uri=None, cmd=None,
app_type=None, need_repo=True, file=None,
username=None, password=<PASSWORD>, docker_registry=None, docker_username=None,
docker_password=None, instance=None, device_reboot=None):
if need_repo:
parsed_manifest = {'config_params': None, 'version': None,
'container_tag': container_tag, 'uri': uri,
'file': file,
'cmd': cmd, 'repo': TestAOTA._build_mock_repo(num_files),
'app_type': app_type,
'username': username, 'password': password,
'docker_registry': docker_registry,
'docker_username': docker_username,
'docker_password': <PASSWORD>_password,
'device_reboot': device_reboot}
else:
parsed_manifest = {'config_params': None, 'version': None,
'container_tag': container_tag, 'uri': uri,
'file': file,
'cmd': cmd, 'app_type': app_type, 'username': username,
'password': password,
'docker_registry': docker_registry,
'docker_username': docker_username,
'docker_password': <PASSWORD>,
'device_reboot': device_reboot}
if instance == 'compose':
return DockerCompose(MockDispatcherCallbacks.build_mock_dispatcher_callbacks(),
parsed_manifest=parsed_manifest,
dbs=ConfigDbs.ON)
elif instance == 'docker':
return Docker(MockDispatcherCallbacks.build_mock_dispatcher_callbacks(),
parsed_manifest=parsed_manifest,
dbs=ConfigDbs.ON)
else:
return AOTA(MockDispatcherCallbacks.build_mock_dispatcher_callbacks(),
parsed_manifest=parsed_manifest,
dbs=ConfigDbs.ON)
@patch('inbm_lib.trtl.Trtl.login', return_value=("", "", 0))
def test_docker_login_success(self, mock_login) -> Any:
aota = self._build_aota(instance='docker', docker_username='username',
docker_password='password', docker_registry='foo')
result = aota.docker_login()
self.assertEqual(None, result)
@patch('inbm_lib.trtl.Trtl.login', return_value=("", "", 1))
def test_docker_login_fail(self, mock_login) -> Any:
aota = self._build_aota(instance='docker')
try:
aota.docker_login()
self.fail("Expected AotaError")
except AotaError as e:
self.assertEqual("Docker Registry is required for Docker Login.", str(e))
@patch('dispatcher.aota.checker.verify_source')
@patch.object(Trtl, 'up', return_value=("", "", 0))
@patch('dispatcher.aota.aota_command.get', return_value=Result(200, "OK"))
def test_compose_up_success(self, mock_start, mock_get, mock_verify):
try:
aota = self._build_aota(uri='file://sample/test.tar', instance='compose', app_type='compose', cmd='up',
container_tag='abc')
aota.up()
except AotaError:
self.fail("Exception raised when not expected.")
@patch('dispatcher.aota.checker.verify_source')
@patch.object(Trtl, 'start', return_value=("", "", 0))
@patch('dispatcher.aota.aota_command.get', return_value=Result(404, "Not Found"))
def test_raises_compose_up_unable_to_get_package(self, mock_start, mock_get, mock_verify):
aota = self._build_aota(instance='compose', container_tag='abc',
uri='file://sample/test.tar')
with self.assertRaisesRegex(AotaError, UNABLE_TO_DOWNLOAD_DOCKER_COMPOSE.message):
aota.up()
@patch.object(Trtl, 'stop_all', return_value=("", "", 0))
def test_raise_When_docker_down_missing_container_tag(self, mock_down):
aota = self._build_aota(cmd='down', app_type='docker', instance='docker')
try:
aota.down()
except AotaError as e:
self.assertEqual('missing container tag.', str(e))
@patch.object(Trtl, 'image_remove_all', return_value=("", "", 0))
@patch.object(Trtl, 'down', return_value=("", "", 0))
def test_compose_remove_success(self, mock_down, mock_remove):
aota = self._build_aota(container_tag='abc', cmd='remove',
app_type='compose', instance='compose')
try:
aota.remove()
except AotaError as e:
self.fail(f'AotaError when not expected: {e}')
@patch.object(Trtl, 'image_remove_all', return_value=("", "couldn't remove image: abc", 1))
@patch.object(Trtl, 'stop_all', return_value=("", "", 0))
def test_docker_remove(self, mock_stop, mock_remove):
aota = self._build_aota(container_tag='abc', cmd='remove',
app_type='docker', instance='docker')
try:
aota.remove()
except AotaError as e:
self.assertEqual("couldn't remove image: abc", str(e))
@patch('dispatcher.aota.aota_command.Docker.down', return_value=COMMAND_SUCCESS)
@patch.object(Trtl, 'image_remove_all', return_value=("", "", 0))
@patch.object(Trtl, 'stop_all', return_value=("", "", 0))
@patch('dispatcher.aota.aota_command.DirectoryRepo.exists', return_value=True)
@patch('shutil.rmtree')
def test_docker_remove_cleanup_dir(self,
mock_rmtree,
mock_exists,
mock_stop,
mock_remove,
mock_docker_down) -> Any:
aota = self._build_aota(container_tag='abc', cmd='remove',
app_type='docker', instance='docker')
aota.remove()
@patch.object(Trtl, 'stop_all', return_value=("", "", 0))
def test_docker_down(self, mock_trtl):
aota = self._build_aota(container_tag='abc', cmd='down', app_type='docker')
with self.assertRaisesRegex(AotaError, 'Unsupported Docker command: down'):
aota.run()
@patch.object(Trtl, 'stop_all', return_value=("", "", 0))
def test_docker_remove_missing_container_tag(self, mock_stop):
aota = self._build_aota(cmd='remove', app_type='docker', instance='docker')
try:
aota.remove()
except AotaError as e:
self.assertEqual("missing container tag.", str(e))
@patch.object(Trtl, 'stats', return_value="container stats here")
def test_docker_stats_success(self, mock_stats):
aota = self._build_aota(cmd='stats', app_type='docker')
res = aota.run()
self.assertEqual(None, res)
@patch('dispatcher.aota.aota_command.Docker.stats')
def test_docker_stats_failed(self, mock_stats):
aota = self._build_aota(cmd='stats', app_type='docker')
aota.run()
mock_stats.assert_called_once()
@patch('os.rmdir')
def test_compose_pull_no_uri(self, mock_rmdir):
aota = self._build_aota(cmd='pull', app_type='compose', container_tag='abc')
with self.assertRaisesRegex(AotaError, "AOTA compose pull FAILED: fetch URI is required."):
aota.run()
@patch('dispatcher.aota.checker.verify_source')
def test_raise_when_container_tag_empty(self, mock_verify_source):
aota = TestAOTA._build_aota(
cmd='up', uri='file://sample/test.rpm', container_tag='', app_type='compose')
with self.assertRaisesRegex(AotaError, "AOTA compose up FAILED: missing container tag."):
aota.run()
@patch('dispatcher.aota.checker.verify_source', side_effect=DispatcherException('Error'))
def test_fails_source_verification_check_http(self, mock_verify_source):
aota = TestAOTA._build_aota(uri='http://sample/test.rpm', app_type='docker',
container_tag='abc', cmd='load')
with self.assertRaisesRegex(AotaError, 'AOTA docker load FAILED: Source verification check failed'):
aota.run()
@patch('os.rmdir')
@patch('dispatcher.aota.aota_command.DockerCompose.up')
@patch('dispatcher.aota.checker.verify_source')
def test_run_command_compose_up_no_error(self, mock_verify_source, mock_composeup, mock_rmdir):
aota = self._build_aota(cmd='up', app_type='compose', uri='http://sample/test.rpm',
container_tag='abc')
res = aota.run()
self.assertTrue(mock_composeup.called)
self.assertEqual(res, None)
@patch('dispatcher.aota.checker.verify_source')
@patch.object(Trtl, 'login', return_value=("", "", 3))
def test_run_command_compose_up_error(self, mock_image_pull_private, mock_verify_source):
aota = self._build_aota(app_type='compose', cmd='up', container_tag="hello",
docker_username='user', docker_registry="https://www.example.com/",
docker_password='<PASSWORD>', uri='https://sample/test.rpm')
with self.assertRaisesRegex(AotaError, "AOTA compose up FAILED: Docker Login Failed"):
aota.run()
@patch('os.rmdir')
@patch('dispatcher.aota.checker.verify_source')
@patch('dispatcher.aota.aota_command.get', return_value=Result(CODE_NOT_FOUND, 'Not Found'))
def test_run_command_compose_pull_download_error(self,
mock_get,
mock_verify_source,
mock_rmdir) -> Any:
aota = self._build_aota(cmd='pull', app_type='compose', uri='http://sample/test.rpm',
container_tag='abc')
with self.assertRaisesRegex(AotaError,
"AOTA compose pull FAILED: Unable to download docker-compose container."):
aota.run()
@patch('os.rmdir')
@patch('dispatcher.aota.aota_command.DockerCompose.docker_login')
@patch('dispatcher.aota.checker.verify_source')
@patch('dispatcher.aota.aota_command.get', return_value=Result(CODE_NOT_FOUND, 'Not Found'))
def test_run_command_compose_pull_with_login_call_error(self,
mock_get,
mock_verify_source,
mock_docker_login,
mock_rmdir):
aota = self._build_aota(app_type='compose', cmd='pull', container_tag="hello",
docker_username='user', docker_registry="https://www.example.com/",
docker_password='<PASSWORD>', uri="http://example.com")
with self.assertRaisesRegex(AotaError,
"AOTA compose pull FAILED: Unable to download docker-compose container."):
aota.run()
mock_docker_login.assert_called()
@patch('dispatcher.aota.aota_command.DockerCompose.list')
def test_compose_list(self, mock_list):
aota = self._build_aota(cmd='list', app_type='compose')
aota.run()
mock_list.assert_called_once()
@patch('inbm_lib.trtl.Trtl.list', return_value=[2, 'could not find tag'])
def test_compose_list_fail(self, mock_list):
aota = self._build_aota(cmd='list', app_type='compose', container_tag='abc')
with self.assertRaisesRegex(AotaError, 'AOTA compose list FAILED: could not find tag'):
aota.run()
def test_run_cmd_down(self):
aota = self._build_aota(cmd='down', app_type='compose')
try:
aota.run()
except AotaError as e:
self.assertEquals('AOTA compose down FAILED: missing container tag.', str(e))
@patch('inbm_lib.trtl.Trtl.remove_old_images', return_value=None)
@patch('dispatcher.aota.checker.verify_source')
@patch('os.rmdir')
@patch('dispatcher.aota.aota_command.TrtlContainer.image_import', return_value=Result(200, 'Success'))
def test_run_cmd_import_with_tag(self, mock_image_import, mock_rmdir, mock_verify, mock_remove):
aota = self._build_aota(cmd='import', app_type='docker',
container_tag='foo', uri="http://example.com")
aota.run()
mock_image_import.assert_called()
@patch('dispatcher.aota.checker.verify_source')
@patch('os.rmdir')
@patch('dispatcher.packagemanager.memory_repo.MemoryRepo.delete')
@patch('dispatcher.aota.aota_command.TrtlContainer.image_import')
def test_run_cmd_import_clean_up_called(self, mock_trtl_cntr, mock_delete, mock_rmdir, mock_verify):
aota = self._build_aota(cmd='import', app_type='docker',
container_tag='foo', instance='docker', uri="http://example.com")
aota._repo_to_clean_up = DirectoryRepo('abc')
mock_trtl_cntr.return_value = Result(400, 'Fail')
try:
aota.import_image()
mock_delete.assert_called_once()
except AotaError as e:
self.assertEquals('Fail', str(e))
@patch('os.rmdir')
@patch('dispatcher.aota.aota_command.TrtlContainer.image_import')
def test_run_cmd_import_without_tag(self, mock_image_import, mock_rmdir):
aota = self._build_aota(cmd='import', app_type='docker', container_tag=None)
with self.assertRaisesRegex(AotaError, 'AOTA docker import_image FAILED: missing container tag.'):
aota.run()
mock_image_import.assert_not_called()
@patch('dispatcher.aota.aota_command.DockerCompose._download', return_value=COMMAND_SUCCESS)
@patch('inbm_lib.trtl.Trtl.image_pull_public', return_value=["", "Error", 2])
def test_raise_compose_pull_missing_container_tag(self, mock_image_pull_public, mock_compose_download):
aota = self._build_aota(cmd='pull', app_type='compose', instance='compose')
try:
aota.pull()
except AotaError as e:
self.assertEquals("missing container tag.", str(e))
@patch('dispatcher.aota.aota_command.DockerCompose.list')
def test_run_command_list_success(self, mock_cmd_list):
aota = self._build_aota(cmd='list', app_type='compose', container_tag="abc")
aota.run()
mock_cmd_list.assert_called_once()
def test_run_command_remove_raise_error(self):
aota = self._build_aota(cmd='remove', app_type='docker')
try:
aota.run()
except AotaError as e:
self.assertEquals(
"AOTA docker remove FAILED: missing container tag.", str(e))
@patch('dispatcher.aota.aota_command.Docker.down', return_value=COMMAND_SUCCESS)
@patch('inbm_lib.trtl.Trtl.image_remove_all', return_value=["", "", 0])
@patch('dispatcher.aota.aota_command.Docker.remove')
def test_run_command_remove_function_called(self, mock_a, mock_image_remove_all, mock_docker_down):
aota = self._build_aota(cmd='remove', app_type='docker')
aota.run()
mock_a.assert_called()
# def test_perform_docker_authentication_field_check(self) -> Any:
#
# aota = self._build_aota(app_type='compose', cmd='pull', container_tag="hello",
# docker_username='us er', docker_registry="https://www.example.com/",
# docker_password='<PASSWORD>', uri="http://example.com")
# try:
# aota._perform_docker_authentication_field_check()
# except AotaError as e:
# self.assertEquals("No spaces allowed in Docker Username/Registry", str(e))
@patch('dispatcher.aota.application_command.is_inside_container', return_value=False)
@patch('dispatcher.aota.checker.check_url')
def test_application_centos_driver_update_raise_error_not_in_container(self, check_url, mock_detect_os):
aota = self._build_aota(cmd='update', app_type='application',
uri="http://example.com", device_reboot="Yes")
self.assertRaises(AotaError, aota.run)
@patch('dispatcher.aota.aota_command.get', return_value=Result(200, "OK"))
@patch('dispatcher.aota.checker.verify_source')
@patch('dispatcher.aota.application_command.AotaCommand.create_repository_cache_repo')
@patch('dispatcher.aota.application_command.is_inside_container', return_value=True)
@patch('dispatcher.aota.factory.detect_os', return_value='CentOS')
def test_application_centos_driver_update_raise_error_if_inb_driver_folder_not_found(self, detect_os,
is_inside_container, create_repo, verify_source, get):
aota = self._build_aota(cmd='update', app_type='application',
uri="http://example.com", device_reboot="Yes")
self.assertRaises(AotaError, aota.run)
@patch('inbm_common_lib.shell_runner.PseudoShellRunner.run', return_value=("", "", 0))
@patch('dispatcher.aota.application_command.Application.identify_package', return_value=SupportedDriver.XLINK.value)
@patch('dispatcher.aota.application_command.move_file')
@patch('os.listdir', return_value=[])
@patch('dispatcher.aota.aota_command.AotaCommand.create_repository_cache_repo')
@patch('dispatcher.aota.factory.is_inside_container', return_value=True, device_reboot="Yes")
@patch('dispatcher.aota.factory.detect_os', return_value='CentOS')
def test_application_centos_driver_update_raise_pass(self, detect_os, mock_detect_os, create_repo, listdir, mock_move,
support_driver, run):
aota = self._build_aota(cmd='update', app_type='application', uri="http://example.com")
self.assertIsNone(aota.run())
if __name__ == '__main__':
unittest.main()
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Class for encoding protocol message primitives.
Contains the logic for encoding every logical protocol field type
into one of the 5 physical wire types.
"""
__author__ = '<EMAIL> (<NAME>)'
import struct
from google.protobuf import message
from google.protobuf.internal import wire_format
from google.protobuf.internal import output_stream
# Note that much of this code is ported from //net/proto/ProtocolBuffer, and
# that the interface is strongly inspired by WireFormat from the C++ proto2
# implementation.
class Encoder(object):
"""Encodes logical protocol buffer fields to the wire format."""
def __init__(self):
self._stream = output_stream.OutputStream()
def ToString(self):
"""Returns all values encoded in this object as a string."""
return self._stream.ToString()
# All the Append*() methods below first append a tag+type pair to the buffer
# before appending the specified value.
def AppendInt32(self, field_number, value):
"""Appends a 32-bit integer to our buffer, varint-encoded."""
self._AppendTag(field_number, wire_format.WIRETYPE_VARINT)
self._stream.AppendVarint32(value)
def AppendInt64(self, field_number, value):
"""Appends a 64-bit integer to our buffer, varint-encoded."""
self._AppendTag(field_number, wire_format.WIRETYPE_VARINT)
self._stream.AppendVarint64(value)
def AppendUInt32(self, field_number, unsigned_value):
"""Appends an unsigned 32-bit integer to our buffer, varint-encoded."""
self._AppendTag(field_number, wire_format.WIRETYPE_VARINT)
self._stream.AppendVarUInt32(unsigned_value)
def AppendUInt64(self, field_number, unsigned_value):
"""Appends an unsigned 64-bit integer to our buffer, varint-encoded."""
self._AppendTag(field_number, wire_format.WIRETYPE_VARINT)
self._stream.AppendVarUInt64(unsigned_value)
def AppendSInt32(self, field_number, value):
"""Appends a 32-bit integer to our buffer, zigzag-encoded and then
varint-encoded.
"""
self._AppendTag(field_number, wire_format.WIRETYPE_VARINT)
zigzag_value = wire_format.ZigZagEncode(value)
self._stream.AppendVarUInt32(zigzag_value)
def AppendSInt64(self, field_number, value):
"""Appends a 64-bit integer to our buffer, zigzag-encoded and then
varint-encoded.
"""
self._AppendTag(field_number, wire_format.WIRETYPE_VARINT)
zigzag_value = wire_format.ZigZagEncode(value)
self._stream.AppendVarUInt64(zigzag_value)
def AppendFixed32(self, field_number, unsigned_value):
"""Appends an unsigned 32-bit integer to our buffer, in little-endian
byte-order.
"""
self._AppendTag(field_number, wire_format.WIRETYPE_FIXED32)
self._stream.AppendLittleEndian32(unsigned_value)
def AppendFixed64(self, field_number, unsigned_value):
"""Appends an unsigned 64-bit integer to our buffer, in little-endian
byte-order.
"""
self._AppendTag(field_number, wire_format.WIRETYPE_FIXED64)
self._stream.AppendLittleEndian64(unsigned_value)
def AppendSFixed32(self, field_number, value):
"""Appends a signed 32-bit integer to our buffer, in little-endian
byte-order.
"""
sign = (value & 0x80000000) and -1 or 0
if value >> 32 != sign:
raise message.EncodeError('SFixed32 out of range: %d' % value)
self._AppendTag(field_number, wire_format.WIRETYPE_FIXED32)
self._stream.AppendLittleEndian32(value & 0xffffffff)
def AppendSFixed64(self, field_number, value):
"""Appends a signed 64-bit integer to our buffer, in little-endian
byte-order.
"""
sign = (value & 0x8000000000000000) and -1 or 0
if value >> 64 != sign:
raise message.EncodeError('SFixed64 out of range: %d' % value)
self._AppendTag(field_number, wire_format.WIRETYPE_FIXED64)
self._stream.AppendLittleEndian64(value & 0xffffffffffffffff)
def AppendFloat(self, field_number, value):
"""Appends a floating-point number to our buffer."""
self._AppendTag(field_number, wire_format.WIRETYPE_FIXED32)
self._stream.AppendRawBytes(struct.pack('f', value))
def AppendDouble(self, field_number, value):
"""Appends a double-precision floating-point number to our buffer."""
self._AppendTag(field_number, wire_format.WIRETYPE_FIXED64)
self._stream.AppendRawBytes(struct.pack('d', value))
def AppendBool(self, field_number, value):
"""Appends a boolean to our buffer."""
self.AppendInt32(field_number, value)
def AppendEnum(self, field_number, value):
"""Appends an enum value to our buffer."""
self.AppendInt32(field_number, value)
def AppendString(self, field_number, value):
"""Appends a length-prefixed unicode string, encoded as UTF-8 to our buffer,
with the length varint-encoded.
"""
self.AppendBytes(field_number, value.encode('utf-8'))
def AppendBytes(self, field_number, value):
"""Appends a length-prefixed sequence of bytes to our buffer, with the
length varint-encoded.
"""
self._AppendTag(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
self._stream.AppendVarUInt32(len(value))
self._stream.AppendRawBytes(value)
# TODO(robinson): For AppendGroup() and AppendMessage(), we'd really like to
# avoid the extra string copy here. We can do so if we widen the Message
# interface to be able to serialize to a stream in addition to a string. The
# challenge when thinking ahead to the Python/C API implementation of Message
# is finding a stream-like Python thing to which we can write raw bytes
# from C. I'm not sure such a thing exists(?). (array.array is pretty much
# what we want, but it's not directly exposed in the Python/C API).
def AppendGroup(self, field_number, group):
"""Appends a group to our buffer.
"""
self._AppendTag(field_number, wire_format.WIRETYPE_START_GROUP)
self._stream.AppendRawBytes(group.SerializeToString())
self._AppendTag(field_number, wire_format.WIRETYPE_END_GROUP)
def AppendMessage(self, field_number, msg):
"""Appends a nested message to our buffer.
"""
self._AppendTag(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
self._stream.AppendVarUInt32(msg.ByteSize())
self._stream.AppendRawBytes(msg.SerializeToString())
def AppendMessageSetItem(self, field_number, msg):
"""Appends an item using the message set wire format.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
self._AppendTag(1, wire_format.WIRETYPE_START_GROUP)
self.AppendInt32(2, field_number)
self.AppendMessage(3, msg)
self._AppendTag(1, wire_format.WIRETYPE_END_GROUP)
def _AppendTag(self, field_number, wire_type):
"""Appends a tag containing field number and wire type information."""
self._stream.AppendVarUInt32(wire_format.PackTag(field_number, wire_type))
|
<gh_stars>0
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from function import FakeQuantize
# 1/11,按照uint,计算scale和zeropoint,都是浮点
# min_val, max_val需要根据具体情况专门统计,但是符号非符号的距离都一样
def calcScaleZeroPoint(min_val, max_val, num_bits=8, signed=False):
if signed: # 举个例子,对于int8,qmin=-128. ,qmax=127.
qmin = - 2. ** (num_bits - 1)
qmax = 2. ** (num_bits - 1) - 1
else: # 举个例子,对于uint8,qmin=0. ,qmax=255.
qmin = 0.
qmax = 2. ** num_bits - 1.
scale = (max_val - min_val) / (qmax - qmin) # S=(rmax-rmin)/(qmax-qmin),scale是浮点数
zero_point = qmax - max_val / scale # Z=round(qmax-rmax/scale),zeropoint也是浮点数
if zero_point < qmin: # 意味着min_val是正数,0<rmin
zero_point = torch.tensor([qmin], dtype=torch.float32).to(min_val.device)
elif zero_point > qmax: # 意味着max_val是正数,rmax<0
# zero_point = qmax
zero_point = torch.tensor([qmax], dtype=torch.float32).to(max_val.device)
zero_point.round_()
# 1.34---[1.34]<-{torch,tensor}->tensor([1.3400])<-{round_()}->tensor([1.])
return scale, zero_point
# 2/11,量化:借助x,s,z,计算量化后的qx值,都是浮点
def quantize_tensor(x, scale, zero_point, num_bits=8, signed=False):
if signed: # 举个例子,对于int8,qmin=-128. ,qmax=127.
qmin = - 2. ** (num_bits - 1)
qmax = 2. ** (num_bits - 1) - 1
else: # 举个例子,对于uint8,qmin=0. ,qmax=255.
qmin = 0.
qmax = 2. ** num_bits - 1.
q_x = zero_point + x / scale
q_x.clamp_(qmin, qmax).round_() # q=round(r/S+Z)
return q_x # 由于pytorch不支持int类型的运算,因此我们还是用float来表示整数
# 尝试一番,目前结论是不支持int的除法
# 3/11,反量化:借助qx,s,z,计算反量化后的x值,都是浮点
def dequantize_tensor(q_x, scale, zero_point):
return scale * (q_x - zero_point) # r=S(q-Z)
# 4/11,寻找符合误差范围的n与M0值:(S1*S2)/S3表示为M,M=M0*2^(-n)
def search(M):
P = 7000
n = 1
while True: # 虽没有break,但满足条件时,return就可跳出循环
Mo = int(round(2 ** n * M)) # int型
# Mo
approx_result = Mo * P >> n # approx_result=Mo*P*2^(-n),其中Mo=M*2^n并四舍五入
result = int(round(M * P)) # result=M*P
error = approx_result - result
print("n=%d, Mo=%f, approx=%d, result=%d, error=%f" % \
(n, Mo, approx_result, result, error))
if math.fabs(error) < 1e-9 or n >= 22: # 最多让M左移22位或者误差小于10^(-9)
return Mo, n
n += 1
# 封装了量化反量化等,更新maxmin,量化,反量化,取出scale zeropoint min max,info串起来
class QParam(nn.Module):
def __init__(self, num_bits=8):
super(QParam, self).__init__() # 继承父类,且继承父类属性,不完全重写
self.num_bits = num_bits
# 声明scale zero_point min max, 不进行梯度更新
scale = torch.tensor([], requires_grad=False)
zero_point = torch.tensor([], requires_grad=False)
min = torch.tensor([], requires_grad=False)
max = torch.tensor([], requires_grad=False)
self.register_buffer('scale', scale)
self.register_buffer('zero_point', zero_point)
self.register_buffer('min', min)
self.register_buffer('max', max)
# 统计样本,更新min、max值,且计算scale和zeropoint
def update(self, tensor):
# 更新max且让max非负:nelment可以统计张量个数;如果没有最大值或者max不是最大值,就更新
if self.max.nelement() == 0 or self.max.data < tensor.max().data:
self.max.data = tensor.max().data
self.max.clamp_(min=0) # 让max不为负:clamp_的作用是设定最大最小值,把框外拉进到框内
# 更新min且让min非正:
if self.min.nelement() == 0 or self.min.data > tensor.min().data:
self.min.data = tensor.min().data
self.min.clamp_(max=0)
self.scale, self.zero_point = calcScaleZeroPoint(self.min, self.max, self.num_bits, False)
# 量化:借助tensor,s,z,计算量化后的tensor值
def quantize_tensor(self, tensor):
return quantize_tensor(tensor, self.scale, self.zero_point, num_bits=self.num_bits)
# 反量化:借助 量化后的tensor值,s,z,计算反量化后的tensor值
def dequantize_tensor(self, q_x):
return dequantize_tensor(q_x, self.scale, self.zero_point)
# 从统一的参数存储中,分别取出scale zeropoint min max
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
key_names = ['scale', 'zero_point', 'min', 'max']
for key in key_names:
value = getattr(self, key) # 得到属性值
value.data = state_dict[prefix + key].data
state_dict.pop(prefix + key)
# __str__会自动调用;info为一个字符串,包括scale zeropoint min max
def __str__(self):
info = 'scale: %.10f ' % self.scale # scale精确到小数点后10位
info += 'zp: %d ' % self.zero_point # zero_point整体print
info += 'min: %.6f ' % self.min # min精确到小数点后6位
info += 'max: %.6f' % self.max # max精确到小数点后6位
return info
# 下面实现基本网络模块的量化形式
# 没看懂-量化基类,减少重复代码,让代码结构更加清晰
class QModule(nn.Module):
# 指定量化的位数;指定是否提供量化输入 (qi) 及输出参数 (qo)。不是每一网络模块都需要统计输入的 min、max,大部分中间层
# 都是用上一层的 qo 来作为自己的 qi 的,另外有些中间层的激活函数也是直接用上一层的 qi 来作为自己的 qi 和 qo
def __init__(self, qi=True, qo=True, num_bits=8):
super(QModule, self).__init__()
if qi:
self.qi = QParam(num_bits=num_bits) # 若接收qi,则为info s,z,min,max
if qo:
self.qo = QParam(num_bits=num_bits) # 若接收qo,则为info s,z,min,max
# 在统计完 min、max 后发挥作用。可把一些项提前固定下来,同时将网络的权重由浮点实数转化为定点整数
def freeze(self):
pass
# 量化 inference 的时候会使用。实际 inference 的时候和正常的 forward 会有一些差异
def quantize_inference(self, x):
raise NotImplementedError('quantize_inference should be implemented.')
# 量化卷积层的实现
class QConv2d(QModule):
# 传入一个 conv_module 模块对应全精度的卷积层,qw 统计weight 的 min、max 以及对 weight 进行量化用
def __init__(self, conv_module, qi=True, qo=True, num_bits=8):
super(QConv2d, self).__init__(qi=qi, qo=qo, num_bits=num_bits)
self.num_bits = num_bits
self.conv_module = conv_module
self.qw = QParam(num_bits=num_bits) # qw为info s,z,min,max
# 计算M、qw、qb,对公式4进行加速
def freeze(self, qi=None, qo=None):
# 报错情况:有qi且qi非空;没有qi且qi为空
if hasattr(self, 'qi') and qi is not None:
raise ValueError('qi has been provided in init function.')
if not hasattr(self, 'qi') and qi is None:
raise ValueError('qi is not existed, should be provided.')
# 报错情况:有qo且qo非空;没有qo且qo为空
if hasattr(self, 'qo') and qo is not None:
raise ValueError('qo has been provided in init function.')
if not hasattr(self, 'qo') and qo is None:
raise ValueError('qo is not existed, should be provided.')
# qi非空,把qi传给self.qi;qo为空,把qo传给self.qo;随后计算M
if qi is not None:
self.qi = qi
if qo is not None:
self.qo = qo
self.M = self.qw.scale * self.qi.scale / self.qo.scale
# 量化权重,(r/s+z),后为r/s
self.conv_module.weight.data = self.qw.quantize_tensor(self.conv_module.weight.data)
self.conv_module.weight.data = self.conv_module.weight.data - self.qw.zero_point
# 量化bias,量化 r/s 为float32,zp为零
self.conv_module.bias.data = quantize_tensor(self.conv_module.bias.data,
scale=self.qi.scale * self.qw.scale,
zero_point=0, num_bits=32, signed=True)
# 同正常的forward一样在float进行,需要统计输入输出以及 weight 的 min、max
def forward(self, x):
# 有qi,则用x更新qi对应参数,FakeQuantize
if hasattr(self, 'qi'):
self.qi.update(x) # 统计样本x,更新qi:min、max值,且计算scale和zeropoint
x = FakeQuantize.apply(x, self.qi) # 用qi对x进行假量化
# 用conv_module.weight更新qw对应参数
self.qw.update(self.conv_module.weight.data) # 统计样本权重,更新qw:min、max值,且计算scale和zeropoint
# 定义卷积:x,bias不变,权重做假量化
x = F.conv2d(x, FakeQuantize.apply(self.conv_module.weight, self.qw), self.conv_module.bias,
stride=self.conv_module.stride,
padding=self.conv_module.padding, dilation=self.conv_module.dilation,
groups=self.conv_module.groups)
# 有qo,则用卷积后的x更新qo,FakeQuantize
if hasattr(self, 'qo'):
self.qo.update(x) # 统计卷积后的x,更新qo:min、max值,且计算scale和zeropoint
x = FakeQuantize.apply(x, self.qo) # 用qo进行假量化
return x
# 在实际 inference 的时候会被调用。卷积操作在int(float整数)上进行,对应公式7
def quantize_inference(self, x):
x = x - self.qi.zero_point
x = self.conv_module(x)
x = self.M * x
x.round_()
x = x + self.qo.zero_point
x.clamp_(0., 2.**self.num_bits-1.).round_()
return x
class QLinear(QModule):
def __init__(self, fc_module, qi=True, qo=True, num_bits=8):
super(QLinear, self).__init__(qi=qi, qo=qo, num_bits=num_bits)
self.num_bits = num_bits
self.fc_module = fc_module
self.qw = QParam(num_bits=num_bits) # 得到qw的参数
def freeze(self, qi=None, qo=None):
if hasattr(self, 'qi') and qi is not None:
raise ValueError('qi has been provided in init function.')
if not hasattr(self, 'qi') and qi is None:
raise ValueError('qi is not existed, should be provided.')
if hasattr(self, 'qo') and qo is not None:
raise ValueError('qo has been provided in init function.')
if not hasattr(self, 'qo') and qo is None:
raise ValueError('qo is not existed, should be provided.')
if qi is not None:
self.qi = qi
if qo is not None:
self.qo = qo
self.M = self.qw.scale * self.qi.scale / self.qo.scale
self.fc_module.weight.data = self.qw.quantize_tensor(self.fc_module.weight.data)
self.fc_module.weight.data = self.fc_module.weight.data - self.qw.zero_point
self.fc_module.bias.data = quantize_tensor(self.fc_module.bias.data, scale=self.qi.scale * self.qw.scale,
zero_point=0, num_bits=32, signed=True)
def forward(self, x):
if hasattr(self, 'qi'):
self.qi.update(x)
x = FakeQuantize.apply(x, self.qi)
self.qw.update(self.fc_module.weight.data)
x = F.linear(x, FakeQuantize.apply(self.fc_module.weight, self.qw), self.fc_module.bias)
if hasattr(self, 'qo'):
self.qo.update(x)
x = FakeQuantize.apply(x, self.qo)
return x
def quantize_inference(self, x):
x = x - self.qi.zero_point
x = self.fc_module(x)
x = self.M * x
x.round_()
x = x + self.qo.zero_point
x.clamp_(0., 2.**self.num_bits-1.).round_()
return x
class QReLU(QModule):
def __init__(self, qi=False, num_bits=None): # qi、num_bits为QReLU的属性,具有默认值
super(QReLU, self).__init__(qi=qi, num_bits=num_bits)
def freeze(self, qi=None):
if hasattr(self, 'qi') and qi is not None:
raise ValueError('qi has been provided in init function.')
if not hasattr(self, 'qi') and qi is None:
raise ValueError('qi is not existed, should be provided.')
if qi is not None:
self.qi = qi
def forward(self, x):
# 有qi,则用x更新qi,FakeQuantize
if hasattr(self, 'qi'):
self.qi.update(x)
x = FakeQuantize.apply(x, self.qi)
x = F.relu(x)
return x
def quantize_inference(self, x):
x = x.clone()
x[x < self.qi.zero_point] = self.qi.zero_point
return x
class QMaxPooling2d(QModule):
def __init__(self, kernel_size=3, stride=1, padding=0, qi=False, num_bits=None):
super(QMaxPooling2d, self).__init__(qi=qi, num_bits=num_bits)
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
def freeze(self, qi=None):
if hasattr(self, 'qi') and qi is not None:
raise ValueError('qi has been provided in init function.')
if not hasattr(self, 'qi') and qi is None:
raise ValueError('qi is not existed, should be provided.')
if qi is not None:
self.qi = qi
def forward(self, x):
if hasattr(self, 'qi'):
self.qi.update(x)
x = FakeQuantize.apply(x, self.qi)
x = F.max_pool2d(x, self.kernel_size, self.stride, self.padding)
return x
def quantize_inference(self, x):
return F.max_pool2d(x, self.kernel_size, self.stride, self.padding)
class QConvBNReLU(QModule):
def __init__(self, conv_module, bn_module, qi=True, qo=True, num_bits=8):
super(QConvBNReLU, self).__init__(qi=qi, qo=qo, num_bits=num_bits)
self.num_bits = num_bits
self.conv_module = conv_module
self.bn_module = bn_module
self.qw = QParam(num_bits=num_bits)
self.qb = QParam(num_bits=32)
def fold_bn(self, mean, std):
if self.bn_module.affine:
gamma_ = self.bn_module.weight / std
weight = self.conv_module.weight * gamma_.view(self.conv_module.out_channels, 1, 1, 1)
if self.conv_module.bias is not None:
bias = gamma_ * self.conv_module.bias - gamma_ * mean + self.bn_module.bias
else:
bias = self.bn_module.bias - gamma_ * mean
else:
gamma_ = 1 / std
weight = self.conv_module.weight * gamma_
if self.conv_module.bias is not None:
bias = gamma_ * self.conv_module.bias - gamma_ * mean
else:
bias = -gamma_ * mean
return weight, bias
def forward(self, x):
if hasattr(self, 'qi'):
self.qi.update(x)
x = FakeQuantize.apply(x, self.qi)
if self.training:
y = F.conv2d(x, self.conv_module.weight, self.conv_module.bias,
stride=self.conv_module.stride,
padding=self.conv_module.padding,
dilation=self.conv_module.dilation,
groups=self.conv_module.groups)
y = y.permute(1, 0, 2, 3) # NCHW -> CNHW
y = y.contiguous().view(self.conv_module.out_channels, -1) # CNHW -> C,NHW
# mean = y.mean(1)
# var = y.var(1)
mean = y.mean(1).detach()
var = y.var(1).detach()
self.bn_module.running_mean = \
self.bn_module.momentum * self.bn_module.running_mean + \
(1 - self.bn_module.momentum) * mean
self.bn_module.running_var = \
self.bn_module.momentum * self.bn_module.running_var + \
(1 - self.bn_module.momentum) * var
else:
mean = Variable(self.bn_module.running_mean)
var = Variable(self.bn_module.running_var)
std = torch.sqrt(var + self.bn_module.eps)
weight, bias = self.fold_bn(mean, std)
self.qw.update(weight.data)
x = F.conv2d(x, FakeQuantize.apply(weight, self.qw), bias,
stride=self.conv_module.stride,
padding=self.conv_module.padding, dilation=self.conv_module.dilation,
groups=self.conv_module.groups)
x = F.relu(x)
if hasattr(self, 'qo'):
self.qo.update(x)
x = FakeQuantize.apply(x, self.qo)
return x
def freeze(self, qi=None, qo=None):
if hasattr(self, 'qi') and qi is not None:
raise ValueError('qi has been provided in init function.')
if not hasattr(self, 'qi') and qi is None:
raise ValueError('qi is not existed, should be provided.')
if hasattr(self, 'qo') and qo is not None:
raise ValueError('qo has been provided in init function.')
if not hasattr(self, 'qo') and qo is None:
raise ValueError('qo is not existed, should be provided.')
if qi is not None:
self.qi = qi
if qo is not None:
self.qo = qo
self.M = self.qw.scale * self.qi.scale / self.qo.scale
weight, bias = self.fold_bn(self.bn_module.running_mean, self.bn_module.running_var)
self.conv_module.weight.data = self.qw.quantize_tensor(weight.data)
self.conv_module.weight.data = self.conv_module.weight.data - self.qw.zero_point
self.conv_module.bias.data = quantize_tensor(bias, scale=self.qi.scale * self.qw.scale,
zero_point=0, num_bits=32, signed=True)
def quantize_inference(self, x):
x = x - self.qi.zero_point
x = self.conv_module(x)
x = self.M * x
x.round_()
x = x + self.qo.zero_point
x.clamp_(0., 2.**self.num_bits-1.).round_()
return x
|
<reponame>mcv-m6-video/mcv-m6-2018-team7<filename>Week2/backgroundEstimationAdaptive.py
import numpy as np
import cv2
import os
import pickle
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.model_selection import GridSearchCV
from AdaptiveClassifier import AdaptiveClassifier
from readInputWriteOutput import readDataset, precisionRecallCurve
def optimalAlphaAdaptive(dataset_path, ID, Color, optimal_rho):
print 'Computing optimal alpha for ' + ID + ' dataset ...'
Precision_vec = []
Recall_vec = []
fscore_vec = []
alpha_vec = np.linspace(0, 30, 21)
train_frames, test_frames, train_gts, test_gts = readDataset(dataset_path, ID, False)
init_mu = np.zeros([1, train_frames.shape[1]])
init_std = np.zeros([1, train_frames.shape[1]])
for alpha in alpha_vec:
clf = AdaptiveClassifier(alpha, optimal_rho, init_mu, init_std)
clf = clf.fit(train_frames,train_gts)
precision, recall, fscore = clf.performance_measures(test_frames, test_gts)
Precision_vec = np.append(Precision_vec, precision)
Recall_vec = np.append(Recall_vec, recall)
fscore_vec = np.append(fscore_vec, fscore)
min, max, idxmin, idxmax = cv2.minMaxLoc(fscore_vec)
print 'Maximum F1-Score with ', ID, ' dataset is ', max, ' with alpha = ', alpha_vec[idxmax[1]]
print 'Precision selected with dataset ', ID, ' is ', Precision_vec[idxmax[1]]
print 'Recall selected with dataset ', ID, ' is ', Recall_vec[idxmax[1]]
return Precision_vec, Recall_vec, fscore_vec, alpha_vec
def optimizeAlphaRho(alpha_values, rho_values, dataset_path, ID, Color):
train_frames, test_frames, train_gts, test_gts = readDataset(dataset_path, ID, Color)
frames = np.vstack((train_frames, test_frames))
labels = np.vstack((train_gts, test_gts))
train_idx = [range(int(round(frames.shape[0] * 0.5)))]
test_idx = [range(int(round(frames.shape[0] * 0.5)), frames.shape[0])]
init_mu = np.zeros([1, train_frames.shape[1]])
init_std = np.zeros([1, train_frames.shape[1]])
parameters = {'alpha': alpha_values, 'rho': rho_values}
# perform grid search to optimize alpha and rho
grid = GridSearchCV(AdaptiveClassifier(init_mu, init_std), parameters, cv=zip(train_idx, test_idx))
grid.fit(frames, labels)
# save results to disk
f = open('gridsearch_' + ID + '.pckl', 'wb')
pickle.dump(grid, f)
f.close()
return grid
def gridSearchAdaptiveClassifier(dataset_path, ID, Color):
# define range of values for alpha and rho
alpha_values = np.linspace(0, 10, 21)
rho_values = np.linspace(0, 1, 21)
# if gridsearch has already been computed just load results, otherwise compute it
if os.path.isfile('gridsearch_' + ID + '.pckl'):
print 'Loading GridSearch...'
f = open('gridsearch_' + ID + '.pckl', 'rb')
grid = pickle.load(f)
f.close()
else:
grid = optimizeAlphaRho(alpha_values, rho_values, dataset_path, ID, Color)
# print optimal alpha and rho with the associated fscore
print('Best parameters for ' + ID + ' dataset: %s Best F-score: %0.5f' % (grid.best_params_, grid.best_score_))
# uncomment to see the score for each parameter combination
# for fscore, params in zip(grid.cv_results_['mean_test_score'], grid.cv_results_['params']):
# print("%0.3f for %r" % (fscore, params))
# plot fscore surface (maximum will indicate best combination of parameters)
fscores = grid.cv_results_['mean_test_score']
X, Y = np.meshgrid(rho_values, alpha_values)
Z = np.reshape(fscores, (len(alpha_values), len(rho_values)))
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_xlabel("rho")
ax.set_ylabel("alpha")
ax.set_zlabel("F-score")
ax.set_zlim(min(fscores), max(fscores))
surf = ax.plot_surface(X, Y, Z, cmap=plt.cm.coolwarm,
rstride=1, cstride=1, linewidth=0, antialiased=True)
fig.colorbar(surf)
plt.show()
return grid.best_params_['alpha'], grid.best_params_['rho'], grid.best_score_
def testAdaptiveClassifier(alpha, rho, dataset_path, ID):
train_frames, test_frames, train_gts, test_gts = readDataset(dataset_path, ID, False)
init_mu = np.zeros([1, train_frames.shape[1]])
init_std = np.zeros([1, train_frames.shape[1]])
clf = AdaptiveClassifier(alpha, rho, init_mu, init_std)
clf = clf.fit(train_frames, train_gts)
precision, recall, fscore = clf.performance_measures(test_frames, test_gts)
print 'F-score:', fscore
print 'Precision: ', precision
print 'Recall: ', recall
|
<reponame>adamancer/imagery_accessor
"""Tests for propagating metadata when using ImageryAccessor"""
from earthpy.plot import plot_rgb, _stretch_im
from earthpy.io import path_to_example
import numpy as np
import matplotlib.pyplot as plt
import pytest
import xarray as xr
import imagery_accessor as ixr
plt.show = lambda: None
@pytest.fixture
def rgb_array():
"""Fixture holding an RGB image for plotting."""
channels = ["red", "green", "blue"]
paths = [path_to_example(f"{ch}.tif") for ch in channels]
stacked = ixr.stack_images(paths, channels)
# Add custom metadata to verify that it carries over to children
stacked.im.metadata["hello"] = "world"
return stacked
@pytest.fixture
def rgb_dataset():
"""Fixture holding an RGB image for plotting."""
channels = ["red", "green", "blue"]
paths = [path_to_example(f"{ch}.tif") for ch in channels]
stacked = ixr.stack_images(paths, channels).to_dataset(dim="band")
# Add custom metadata to verify that it carries over to children
stacked.im.metadata["hello"] = "world"
return stacked
def test_array_to_dataset(rgb_array):
arr = rgb_array.to_dataset(dim="band").to_array(dim="band")
assert arr.attrs == rgb_array.attrs
assert arr.dims == rgb_array.dims
assert arr.im.metadata == rgb_array.im.metadata
@pytest.mark.parametrize("rgb_image", ["rgb_array", "rgb_dataset"])
def test_rgb_extent(rgb_image, request):
"""Tests plot_rgb whether respects axis limits when using extent
This test was adapted from the earthpy library.
"""
rgb_image = request.getfixturevalue(rgb_image)
ax = rgb_image.im.plot_rgb(
title="My Title",
figsize=(5, 5),
)
# Get x and y lims to test extent
plt_ext = ax.get_xlim() + ax.get_ylim()
plt_array = ax.get_images()[0].get_array()
assert ax.figure.bbox_inches.bounds[2:4] == (5, 5)
assert ax.get_title() == "My Title"
#assert np.array_equal(plt_array[0], rgb_image.transpose([1, 2, 0])[1])
assert rgb_image.im.extent == plt_ext
plt.close()
@pytest.mark.parametrize("rgb_image", ["rgb_array", "rgb_dataset"])
def test_obj_metadata(rgb_image, request):
"""Verifies that __init__ doesn't muck up inherited metadata"""
rgb_image = request.getfixturevalue(rgb_image)
assert rgb_image.coords["metadata_ref"].attrs == rgb_image.im._obj.coords["metadata_ref"].attrs
@pytest.mark.parametrize("rgb_image", ["rgb_array", "rgb_dataset"])
def test_xarr_copy(rgb_image, request):
rgb_image = request.getfixturevalue(rgb_image)
rgb_copy = rgb_image.copy()
rgb_copy.im.metadata["hello"] = "universe"
assert id(rgb_image.im.metadata) != id(rgb_copy.im.metadata)
assert rgb_image.im.metadata["hello"] != rgb_copy.im.metadata["hello"]
@pytest.mark.parametrize("rgb_image", ["rgb_array", "rgb_dataset"])
def test_clean_all_bands(rgb_image, request):
rgb_image = request.getfixturevalue(rgb_image)
rgb_clean = rgb_image.im.clean_all_bands()
rgb_clean.im.metadata["hello"] = "universe"
assert id(rgb_image.im.metadata) != id(rgb_clean.im.metadata)
assert rgb_image.im.metadata["hello"] != rgb_clean.im.metadata["hello"]
@pytest.mark.parametrize("rgb_image", ["rgb_array", "rgb_dataset"])
def test_metadata_copy_along_band(rgb_image, request):
rgb_image = request.getfixturevalue(rgb_image)
try:
rgb_arr = [np.mean(a) for a in rgb_image.values]
except TypeError:
rgb_arr = [np.mean(a) for a in rgb_image.values()]
rgb_arr = rgb_image.im.copy_xr_metadata(rgb_arr)
# Make comparisons with arrays, not datasets, to simplify iteration
if isinstance(rgb_image, xr.Dataset):
rgb_image = rgb_image.to_array(dim="band")
rgb_arr = rgb_arr.to_array(dim="band")
for band, band_arr in zip(rgb_image, rgb_arr):
# Metadata should be the same
assert band.im.metadata == band_arr.im.metadata
# Derived should have no dims
assert not band_arr.dims
# Non-dimensional coordinates should be the same
for key, val in band_arr.coords.items():
try:
assert band.coords[key] == val
except ValueError:
# Simple equality fails with np.array
assert np.array_equal(band.coords[key], val)
@pytest.mark.parametrize("rgb_image", ["rgb_array", "rgb_dataset"])
def test_metadata_to_npfunc(rgb_image, request):
rgb_image = request.getfixturevalue(rgb_image)
rgb_arr = rgb_image.im.npfunc("digitize", [-np.inf, 128, np.inf])
# Make comparisons with arrays, not datasets, to simplify iteration
if isinstance(rgb_image, xr.Dataset):
rgb_image = rgb_image.to_array(dim="band")
rgb_arr = rgb_arr.to_array(dim="band")
for band, band_arr in zip(rgb_image, rgb_arr):
# Metadata should be the same
assert band.im.metadata == band_arr.im.metadata
# Derived should have the same dims
assert band.dims == band_arr.dims
# Non-dimensional coordinates should be the same
for key, val in band_arr.coords.items():
try:
assert band.coords[key] == val
except ValueError:
# Simple equality fails with np.array
assert np.array_equal(band.coords[key], val)
@pytest.mark.parametrize("rgb_image", ["rgb_array", "rgb_dataset"])
def test_metadata_to_1d_arr(rgb_image, request):
rgb_image = request.getfixturevalue(rgb_image)
rgb_1d = rgb_image.sum(axis=0)
# Non-index metadata should be the same
assert rgb_image.im.metadata
assert rgb_image.im.metadata["hello"] == rgb_1d.im.metadata["hello"]
# Non-dimensional, non-scalar coords should transfer
ignore_coords = list(rgb_image.dims)
for key, val in rgb_1d.coords.items():
if not rgb_image.coords[key].shape and key not in ignore_coords:
assert rgb_image.coords[key] == val
|
import json
from django.urls import reverse
from rest_framework import status
from authors.apps.authentication.tests.test_base import BaseTestClass
class TestUserProfile(BaseTestClass):
def test_retrieve_profile_without_logging_in_fails(self):
response = self.client.get(f'/api/profiles/{self.test_user.username}',
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_retrieve_profile_with_valid_token_succeeds(self):
sign_up_response = self.client.post(
reverse('auth:login'),
content_type='application/json',
data=json.dumps(self.verified_user_login_credentials))
response = self.client.get(
'/api/profiles/sampleuser',
content_type='application/json',
HTTP_AUTHORIZATION=self.verified_user_login_token())
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual('sampleuser', response.data['username'])
def test_edit_my_profile_succeeds(self):
sign_up_response = self.client.post(
reverse('auth:login'),
content_type='application/json',
data=json.dumps(self.verified_user_login_credentials))
response = self.client.put(
'/api/profiles/sampleuser/edit',
content_type='application/json',
data=json.dumps(self.profile_data),
HTTP_AUTHORIZATION=self.verified_user_login_token()
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_not_authenticated_user_view_author_profiles_fails(self):
response = self.client.get(reverse('profiles:profile-list'))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_authenticated_user_view_author_profiles_succeeds(self):
response = self.client.get(
reverse('profiles:profile-list'),
HTTP_AUTHORIZATION=self.verified_user_login_token())
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_following_unauthorised(self):
sign_up_response = self.client.post(
reverse('auth:login'),
content_type='application/json',
data=json.dumps(
self.verified_user_login_credentials))
response = self.client.post(
'/api/profiles/sampleuser/follow',
content_type='application/json')
message = {"detail": "Authentication credentials were not provided."}
self.assertEqual(response.status_code, 403)
self.assertEqual(response.data, message)
def test_following_success(self):
sign_up_response = self.client.post(
reverse('auth:login'),
content_type='application/json',
data=json.dumps(
self.verified_user_login_credentials))
response = self.client.post(
'/api/profiles/sampleuser/follow',
content_type='application/json',
HTTP_AUTHORIZATION=self.verified_user_login_token())
self.assertEqual(response.status_code, 200)
def test_unfollowing_unauthorised(self):
sign_up_response = self.client.post(
reverse('auth:login'),
content_type='application/json',
data=json.dumps(
self.verified_user_login_credentials))
response = self.client.delete(
'/api/profiles/sampleuser/follow',
content_type='application/json')
message = {"detail": "Authentication credentials were not provided."}
self.assertEqual(response.status_code, 403)
self.assertEqual(response.data, message)
def test_unfollowing_success(self):
sign_up_response = self.client.post(
reverse('auth:login'),
content_type='application/json',
data=json.dumps(
self.verified_user_login_credentials))
response = self.client.post(
'/api/profiles/sampleuser/follow',
content_type='application/json',
HTTP_AUTHORIZATION=self.verified_user_login_token())
response = self.client.delete(
'/api/profiles/sampleuser/follow',
content_type='application/json',
HTTP_AUTHORIZATION=self.verified_user_login_token())
self.assertEqual(response.status_code, 200)
def test_user_followers_unauthorised(self):
sign_up_response = self.client.post(
reverse('auth:login'),
content_type='application/json',
data=json.dumps(
self.verified_user_login_credentials))
response = self.client.get(
'/api/profiles/sampleuser/followers',
content_type='application/json')
message = {"detail": "Authentication credentials were not provided."}
self.assertEqual(response.status_code, 403)
self.assertEqual(response.data, message)
def test_user_followers_success(self):
sign_up_response = self.client.post(
reverse('auth:login'),
content_type='application/json',
data=json.dumps(
self.verified_user_login_credentials))
response = self.client.get(
'/api/profiles/sampleuser/followers',
content_type='application/json',
HTTP_AUTHORIZATION=self.verified_user_login_token())
self.assertEqual(response.status_code, 200)
def test_following_again_same_user(self):
sign_up_response = self.client.post(
reverse('auth:login'),
content_type='application/json',
data=json.dumps(
self.verified_user_login_credentials))
self.client.post(
'/api/profiles/sampleuser/follow',
content_type='application/json',
HTTP_AUTHORIZATION=self.verified_user_login_token())
response = self.client.post(
'/api/profiles/sampleuser/follow',
content_type='application/json',
HTTP_AUTHORIZATION=self.verified_user_login_token())
message = {'message': "You're already following :sampleuser"}
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, message)
|
# -*- coding: utf-8 -*-
"""
Basic layers implemented by keras
"""
from keras import backend as K
from keras.layers import Layer
from keras.layers import Conv2D
from keras.layers import Dense
from keras.layers import ReLU
from keras.layers import LeakyReLU
from keras.layers import PReLU
from keras.layers import Softmax
from keras.layers import BatchNormalization
from keras.layers import MaxPooling2D
from keras.layers import AveragePooling2D
from keras.layers import GlobalAveragePooling2D
from keras.layers import Concatenate
from keras.layers import Add
from keras.layers import Multiply
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers import Reshape
from keras.layers import Permute
from keras.layers import UpSampling2D
# ---------------
# basic layers
# ---------------
def conv(inputs, out_filters, ksize=(3, 3), strides=(1, 1), dilation=(1, 1),
use_bias=True):
"""
Convolution layer
Parameters
----------
inputs: Input tensor
out_filters: Number of output filters
ksize: Kernel size. One integer of tuple of two integers
strides: Strides for moving kernel. One integer of tuple of two integers
dilation: Dilation of kernel. One integer of tuple of two integers
use_bias: Whether to use bias
"""
return Conv2D(filters=out_filters,
kernel_size=ksize,
strides=strides,
padding='same',
dilation_rate=dilation,
use_bias=use_bias)(inputs)
def dense(inputs, out_length, use_bias=True):
"""
Dense connected layer
Parameters
----------
inputs: Input tensor
out_length: Length of outputs
use_bias: Whether to use bias
"""
return Dense(units=out_length, use_bias=use_bias)(inputs)
def relu(inputs):
"""
Relu activation
Parameters
----------
inputs: Input tensor
"""
return ReLU()(inputs)
def leaky_relu(inputs, alpha=0.2):
"""
Leaky relu activation
Parameters
----------
inputs: Input tensor
alpha: Slope of negative neurons
"""
return LeakyReLU(alpha=alpha)(inputs)
def prelu(inputs, shared_axes=(0, 1, 2, 3)):
"""
Parametric relu activation
Parameters
----------
inputs: Input tensor
shared_axes: The axes along which to share learnable parameters for the
activation function.
"""
return PReLU(shared_axes=shared_axes)(inputs)
def softmax(inputs, axis=-1):
"""
Softmax activation
Parameters
----------
inputs: Input tensor
axis: Axis along which the softmax normalization is applied
"""
return Softmax(axis=axis)(inputs)
def batch_normalization(inputs, training=True):
"""
Batch normalization
Parameters
----------
inputs: Input tensor
training: Whether in training phase
"""
return BatchNormalization()(inputs, training=training)
def maxpool(inputs, pool_size=(2, 2)):
"""
Max pool
Parameters
----------
inputs: Input tensor
pool_size: Size of the pooling window. One integer of tuple of two
integers, (height_factor, width_factor)
"""
return MaxPooling2D(pool_size=pool_size)(inputs)
def average_pool(inputs, pool_size=(2, 2)):
"""
Average pool
Parameters
----------
inputs: Input tensor
pool_size: Size of the pooling window. One integer of tuple of two
integers, (height_factor, width_factor)
"""
return AveragePooling2D(pool_size=pool_size)(inputs)
def global_average_pool(inputs):
"""
Global average pool, the height and width dimension will be squeezed, that
is, the dims of output tensor will be (batch, out_filters)
Parameters
----------
inputs: Input tensor
"""
return GlobalAveragePooling2D()(inputs)
def concat(inputs_list):
"""
Concatenate input tensors list
Parameters
----------
inputs_list: Input tensors list
"""
return Concatenate()(inputs_list)
def add(inputs_list):
"""
Add input tensors list
Parameters
----------
inputs_list: Input tensors list
"""
return Add()(inputs_list)
def multiply(inputs_list):
"""
Multiply input tensors list
Parameters
----------
inputs_list: Input tensors list
"""
return Multiply()(inputs_list)
def dropout(inputs, drop_rate):
"""
Applies Dropout to the input
Parameters
----------
inputs: Input tensor
drop_rate: float between 0 and 1. Fraction of the input units to drop.
"""
return Dropout(rate=drop_rate)(inputs)
def flatten(inputs):
"""
Flattens the inputs
Parameters
----------
inputs: Input tensor
"""
return Flatten()(inputs)
def reshape(inputs, new_shape):
"""
Reshapes an output to a certain shape.
Parameters
----------
inputs: Input tensor
new_shape: New shape without batch_size
"""
return Reshape(target_shape=new_shape)(inputs)
def permute(inputs, new_dims):
"""
Permutes the dimensions of the input according to a given pattern
Parameters
----------
inputs: Input tensor
new_dims: Re-ordered dimensions
"""
return Permute(dims=new_dims)(inputs)
def upsample(inputs, factor, interpolation='nearest'):
"""
Upsampling layer by factor
Parameters
----------
inputs: Input tensor
factor: The upsampling factors for (height, width). One integer or tuple of
two integers
interpolation: A string, one of [`nearest`, `bilinear`].
"""
return UpSampling2D(size=factor, interpolation=interpolation)(inputs)
def instance_normalization(inputs):
"""
Instance normalization layer
Parameters
----------
inputs: Input tensor
"""
return InstanceNormalization()(inputs)
# ---------------
# combined layers
# ---------------
def conv_bn_relu(inputs, out_filters, ksize=(3, 3), strides=(1, 1),
dilation=(1, 1), use_bn=True, use_relu=True, training=True):
"""
Combine conv, batch normalization (bn), relu. bn and relu are optional
Parameters
----------
inputs: Input tensor
out_filters: Number of output filters
ksize: Kernel size. One integer of tuple of two integers
strides: Strides for moving kernel. One integer of tuple of two integers
dilation: Dilation of kernel. One integer of tuple of two integers
use_bn: Whether to use bn
use_relu: Whether to use relu
training: Whether in training phase
"""
use_bias = not use_bn
outputs = conv(inputs, out_filters, ksize, strides, dilation, use_bias)
if use_bn:
outputs = batch_normalization(outputs, training=training)
if use_relu:
outputs = relu(outputs)
return outputs
def conv_in_relu(inputs, out_filters, ksize=(3, 3), strides=(1, 1),
dilation=(1, 1), use_in=True, use_relu=True):
"""
Combine conv, instance normalization (in), relu. in and relu are optional
Parameters
----------
inputs: Input tensor
out_filters: Number of output filters
ksize: Kernel size. One integer of tuple of two integers
strides: Strides for moving kernel. One integer of tuple of two integers
dilation: Dilation of kernel. One integer of tuple of two integers
use_in: Whether to use instance normalization
use_relu: Whether to use relu
"""
use_bias = not use_in
outputs = conv(inputs, out_filters, ksize, strides, dilation, use_bias)
if use_in:
outputs = instance_normalization(outputs)
if use_relu:
outputs = relu(outputs)
return outputs
def conv_in_lrelu(inputs, out_filters, ksize=(3, 3), strides=(1, 1),
dilation=(1, 1), use_in=True, use_lrelu=True, alpha=0.2):
"""
Combine conv, instance normalization (in), leaky relu (lrelu).
in and lrelu are optional
Parameters
----------
inputs: Input tensor
out_filters: Number of output filters
ksize: Kernel size. One integer of tuple of two integers
strides: Strides for moving kernel. One integer of tuple of two integers
dilation: Dilation of kernel. One integer of tuple of two integers
use_in: Whether to use instance normalization
use_lrelu: Whether to use leaky relu
"""
use_bias = not use_in
outputs = conv(inputs, out_filters, ksize, strides, dilation, use_bias)
if use_in:
outputs = instance_normalization(outputs)
if use_lrelu:
outputs = leaky_relu(outputs, alpha=alpha)
return outputs
# ---------------
# auxiliaries
# ---------------
class InstanceNormalization(Layer):
"""
Instance normalization layer
"""
def __init__(self):
super(InstanceNormalization, self).__init__()
@classmethod
def compute_output_shape(cls, input_shape):
"""
Compute output shape.
"""
return input_shape
@classmethod
def call(cls, inputs, variance_epsilon=1e-5):
"""
Implementation of instance normalization
Parameters
----------
inputs: Input tensor
variance_epsilon: epsilon added to variance to avoid zero divide
"""
mean = K.mean(inputs, axis=[1, 2], keepdims=True)
std = K.sqrt(
K.var(inputs + variance_epsilon, axis=[1, 2], keepdims=True))
outputs = (inputs - mean) / std
return outputs
|
#!/usr/bin/env python
#
# vm_description.py - Abstract class for reading, editing, and writing VMs
#
# September 2013, <NAME>
# Copyright (c) 2013-2017, 2019 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Abstract superclass for reading, editing, and writing VMs.
.. autosummary::
:nosignatures:
VMInitError
VMDescription
"""
from __future__ import print_function
import atexit
import logging
import os
import os.path
import shutil
import tempfile
import warnings
from COT.data_validation import ValueUnsupportedError
from COT.utilities import directory_size, pretty_bytes
logger = logging.getLogger(__name__)
class VMInitError(EnvironmentError):
"""Class representing errors encountered when trying to init/load a VM."""
class VMDescription(object):
"""Abstract class for reading, editing, and writing VM definitions.
Examples:
Because instantiating this class creates a temporary directory
(:attr:`working_dir`), it's important to always clean up.
This can be done explicitly::
>>> foo = VMDescription("foo.txt", None)
>>> tmpdir = foo.working_dir
>>> os.path.exists(tmpdir)
True
>>> foo.destroy()
>>> os.path.exists(tmpdir)
False
or implicitly by using this class as a context manager::
>>> with VMDescription("foo.txt", None) as foo:
... tmpdir = foo.working_dir
... os.path.exists(tmpdir)
...
True
>>> os.path.exists(tmpdir)
False
If the specific VM class is unknown, you can use the
:meth:`factory` method to try to obtain an appropriate subclass::
>>> try: # doctest: +ELLIPSIS
... with VMDescription.factory("foo.txt", None) as foo:
... print(foo.__class__.__name__)
... except VMInitError as e:
... print(e)
[Errno 2] Unknown VM description type for input file...
**Properties**
.. autosummary::
:nosignatures:
input_file
output_file
working_dir
platform
config_profiles
default_config_profile
environment_properties
environment_transports
networks
network_descriptions
system_types
version_short
version_long
"""
# Many of these methods are abstract interfaces, so quiet, Pylint!
# pylint: disable=missing-raises-doc
# pylint: disable=redundant-returns-doc
# pylint: disable=no-self-use, unused-argument
@classmethod
def detect_type_from_name(cls, filename):
"""Check the given filename to see if it looks like a type we support.
Does not check file contents, as the given filename may not yet exist.
Args:
filename (str): File name or path
Returns:
str: A string representing a recognized and supported type of file
Raises:
ValueUnsupportedError: if COT can't recognize the file type or
doesn't know how to handle this file type.
"""
raise ValueUnsupportedError("filename", filename, ("none implemented"))
@classmethod
def factory(cls, input_file, *args, **kwargs):
"""Select and create the appropriate subclass (factory method).
Args:
input_file (str): Input file to test against each class's
:meth:`detect_type_from_name` implementation.
*args: Passed through to selected subclass :meth:`__init__`.
**kwargs: Passed through to selected subclass :meth:`__init__`.
Returns:
VMDescription: appropriate subclass instance.
Raises:
VMInitError: if no appropriate subclass is identified
VMInitError: if the selected subclass fails instantiation
"""
vm_class = None
supported_types = []
# pylint doesn't know about __subclasses__
# https://github.com/PyCQA/pylint/issues/555
# TODO: this should be fixed when pylint 2.0 is released
# pylint:disable=no-member
for candidate_class in VMDescription.__subclasses__():
try:
candidate_class.detect_type_from_name(input_file)
vm_class = candidate_class
break
except ValueUnsupportedError as exc:
supported_types += [exc.expected_value]
if not vm_class:
raise VMInitError(2,
"Unknown VM description type for input file -"
" only supported types are {0}"
.format(supported_types),
input_file)
logger.info("Loading '%s' as %s", input_file, vm_class.__name__)
try:
vm = vm_class(input_file, *args, **kwargs)
except ValueUnsupportedError as exc:
raise VMInitError(2, str(exc), input_file)
logger.info("Successfully loaded %s from %s",
vm_class.__name__, input_file)
return vm
def __init__(self, input_file, output_file=None):
"""Read the given VM description file into memory.
Also creates a temporary directory as a working directory.
Args:
input_file (str): Data file to read in.
output_file (str): File name to write to.
* If this VM is read-only, (there will never be an output file)
this value should be ``None``
* If the output filename is not yet known, use ``""`` and
subsequently set :attr:`output` when it is determined.
"""
self._input_file = input_file
self._product_class = None
logger.verbose("Creating temporary working directory for this VM")
self._working_dir = tempfile.mkdtemp(prefix="cot")
logger.debug("Working directory: %s", self.working_dir)
self._output_file = None
self.output_file = output_file
atexit.register(self.destroy)
def __enter__(self):
"""Begin a block using this VM as a context manager object."""
return self
def __exit__(self, exc_type, exc_value, trace):
"""Exit the context manager block. If no error, call :meth:`write`.
In any case, also call :meth:`destroy`.
For the parameters, see :mod:`contextlib`.
"""
try:
if exc_type is None:
self.write()
finally:
self.destroy()
def destroy(self):
"""Clean up after ourselves.
Deletes :attr:`self.working_dir` and its contents.
"""
try:
if hasattr(self,
'working_dir') and os.path.exists(self.working_dir):
logger.verbose("Removing working directory")
total_size = directory_size(self.working_dir)
logger.debug("Size of working directory '%s', prior to"
" removal, is %s",
self.working_dir,
pretty_bytes(total_size))
# Clean up
shutil.rmtree(self.working_dir)
except AttributeError:
pass
@property
def input_file(self):
"""Get the data file to read in."""
return self._input_file
@property
def output_file(self):
"""Get/set the filename that :meth:`write` will output to."""
return self._output_file
@output_file.setter
def output_file(self, value):
self._output_file = value
@property
def working_dir(self):
"""Get a temporary directory this instance can use for storage.
Will be automatically erased when :meth:`destroy` is called.
"""
return self._working_dir
def write(self):
"""Write the VM description to :attr:`output_file`, if any."""
if self.output_file:
raise NotImplementedError("write not implemented")
@property
def product_class(self):
"""Get/set the product class identifier, such as com.cisco.csr1000v."""
return self._product_class
@product_class.setter
def product_class(self, product_class):
self._product_class = product_class
@property
def platform(self):
"""Get the Platform instance object associated with this VM.
An instance of :class:`~COT.platforms.Platform` or a more specific
subclass if recognized as such.
"""
raise NotImplementedError("no platform value available.")
def validate_hardware(self):
"""Check sanity of hardware properties for this VM/product/platform.
Returns:
bool: ``True`` if hardware is sane, ``False`` if not.
"""
raise NotImplementedError("validate_hardware not implemented!")
@property
def config_profiles(self):
"""Get the list of supported configuration profiles.
If there are no profiles defined, returns an empty list.
If there is a default profile, it will be first in the list.
"""
raise NotImplementedError("config_profiles not implemented!")
@property
def default_config_profile(self):
"""Get the name of the default configuration profile.
Returns:
str: Profile name or ``None`` if none are defined.
"""
if self.config_profiles:
return self.config_profiles[0]
return None
@property
def environment_properties(self):
"""Get the array of environment properties.
Returns:
list: Array of dicts (one per property) with the keys
``"key"``, ``"value"``, ``"qualifiers"``, ``"type"``,
``"label"``, and ``"description"``.
"""
raise NotImplementedError("environment_properties not implemented")
@property
def environment_transports(self):
"""Get/set the list of environment transport methods."""
raise NotImplementedError("environment_transports not implemented")
@environment_transports.setter
def environment_transports(self, value):
raise NotImplementedError("environment_transports not implemented")
@property
def networks(self):
"""Get the list of network names currently defined in this VM."""
raise NotImplementedError("networks property not implemented!")
@property
def network_descriptions(self):
"""Get list of network descriptions currently defined in this VM."""
raise NotImplementedError(
"network_descriptions property not implemented!")
@property
def system_types(self):
"""Get/set list of virtual system type(s) supported by this VM."""
raise NotImplementedError("system_types not implemented!")
@system_types.setter
def system_types(self, type_list):
raise NotImplementedError("system_types setter not implemented!")
@property
def version_short(self):
"""Get/set a short string describing the product version."""
raise NotImplementedError("version_short not implemented!")
@version_short.setter
def version_short(self, value):
raise NotImplementedError("version_short setter not implemented!")
@property
def version_long(self):
"""Get/set a long string describing the product version."""
raise NotImplementedError("version_long not implemented!")
@version_long.setter
def version_long(self, value):
raise NotImplementedError("version_long setter not implemented")
def predicted_output_size(self):
"""Estimate how much disk space (in bytes) is needed to write out.
Returns:
int: Estimated number of bytes consumed when writing out to
:attr:`output_file` (plus any associated files).
"""
raise NotImplementedError("predicted_output_size not implemented")
# API methods needed for add-disk
def convert_disk_if_needed(self, # pylint: disable=no-self-use
disk_image,
kind): # pylint: disable=unused-argument
"""Convert the disk to a more appropriate format if needed.
Args:
disk_image (DiskRepresentation): Disk to inspect and possibly convert
kind (str): Image type (harddisk/cdrom).
Returns:
DiskRepresentation: :attr:`disk_image`, if no conversion was
required, or a new :class:`~COT.disks.disk.DiskRepresentation`
instance representing a converted image that has been created in
:attr:`output_dir`.
"""
# Some VMs may not need this, so default to do nothing, not error
return disk_image
def search_from_filename(self, filename):
"""From the given filename, try to find any existing objects.
Args:
filename (str): Filename to search from
Returns:
tuple: ``(file, disk, controller_device, disk_device)``, opaque
objects of which any or all may be ``None``
"""
raise NotImplementedError("search_from_filename not implemented")
def search_from_file_id(self, file_id):
"""From the given file ID, try to find any existing objects.
Args:
file_id (str): File ID to search from
Returns:
tuple: ``(file, disk, controller_device, disk_device)``, opaque
objects of which any or all may be ``None``
"""
raise NotImplementedError("search_from_file_id not implemented")
def search_from_controller(self, controller, address):
"""From the controller type and device address, look for existing disk.
Args:
controller (str): ``'ide'`` or ``'scsi'``
address (str): Device address such as ``'1:0'``
Returns:
tuple: ``(file, disk, controller_device, disk_device)``, opaque
objects of which any or all may be ``None``
"""
raise NotImplementedError("search_from_controller not implemented")
def find_open_controller(self, controller_type):
"""Find the first open slot on a controller of the given type.
Args:
controller_type (str): ``'ide'`` or ``'scsi'``
Returns:
tuple: ``(controller_device, address_string)`` or ``(None, None)``
"""
raise NotImplementedError("find_open_controller not implemented")
def get_id_from_file(self, file_obj):
"""Get the file ID from the given opaque file object.
Args:
file_obj (object): File object to query
Returns:
str: Identifier string associated with this object
"""
raise NotImplementedError("get_id_from_file not implemented")
def get_path_from_file(self, file_obj):
"""Get the file path from the given opaque file object.
Args:
file_obj (object): File object to query
Returns:
str: Relative path to the file associated with this object
"""
raise NotImplementedError("get_path_from_file not implemented")
def get_file_ref_from_disk(self, disk):
"""Get the file reference from the given opaque disk object.
Args:
disk (object): Disk object to query
Returns:
str: String that can be used to identify the file associated with
this disk
"""
raise NotImplementedError("get_file_ref_from_disk not implemented")
def get_id_from_disk(self, disk):
"""Get the identifier string associated with the given Disk object.
Args:
disk (object): Disk object
Returns:
str: Identifier string associated with this object
"""
raise NotImplementedError("get_id_from_disk not implemented")
def get_common_subtype(self, device_type):
"""Get the sub-type common to all devices of the given type.
Args:
device_type (str): Device type such as ``'ide'`` or ``'memory'``.
Returns:
str: Subtype string common to all devices of this type, or ``None``,
if multiple such devices exist and they do not all have the same
sub-type.
"""
raise NotImplementedError("get_common_subtype not implemented")
def check_sanity_of_disk_device(self, disk, file_obj,
disk_item, ctrl_item):
"""Check if the given disk is linked properly to the other objects.
Args:
disk (object): Disk object to validate
file_obj (object): File object which this disk should be linked to
(optional)
disk_item (object): Disk device object which should link to
this disk (optional)
ctrl_item (object): Controller device object which should link to
the :attr:`disk_item`
Raises:
ValueMismatchError: if the given items are not linked properly.
"""
raise NotImplementedError(
"check_sanity_of_disk_device not implemented")
def add_file(self, file_path, file_id, file_obj=None, disk=None):
"""Add a new file object to the VM or overwrite the provided one.
Args:
file_path (str): Path to file to add
file_id (str): Identifier string for the file in the VM
file_obj (object): Existing file object to overwrite
disk (object): Existing disk object referencing :attr:`file`.
Returns:
object: New or updated file object
"""
raise NotImplementedError("add_file not implemented")
def remove_file(self, file_obj, disk=None, disk_drive=None):
"""Remove the given file object from the VM.
Args:
file_obj (object): File object to remove
disk (object): Disk object referencing :attr:`file`
disk_drive (object): Disk drive mapping :attr:`file` to a device
"""
raise NotImplementedError("remove_file not implemented")
def add_disk(self, disk_repr, file_id, drive_type, disk=None):
"""Add a new disk object to the VM or overwrite the provided one.
Args:
disk_repr (DiskRepresentation): Disk file representation
file_id (str): Identifier string for the file/disk mapping
drive_type (str): 'harddisk' or 'cdrom'
disk (object): Existing disk object to overwrite
Returns:
object: New or updated disk object
"""
raise NotImplementedError("add_disk not implemented")
def add_controller_device(self, device_type, subtype, address,
ctrl_item=None):
"""Create a new IDE or SCSI controller, or update existing one.
Args:
device_type (str): ``'ide'`` or ``'scsi'``
subtype (str): Subtype such as ``'virtio'`` (optional)
address (int): Controller address such as 0 or 1 (optional)
ctrl_item (object): Existing controller device to update (optional)
Returns:
object: New or updated controller device object
"""
raise NotImplementedError("add_controller_device not implemented")
def add_disk_device(self, drive_type, address, name, description,
disk, file_obj, ctrl_item, disk_item=None):
"""Add a new disk device to the VM or update the provided one.
Args:
drive_type (str): ``'harddisk'`` or ``'cdrom'``
address (str): Address on controller, such as "1:0" (optional)
name (str): Device name string (optional)
description (str): Description string (optional)
disk (object): Disk object to map to this device
file_obj (object): File object to map to this device
ctrl_item (object): Controller object to serve as parent
disk_item (object): Existing disk device to update instead of
making a new device.
Returns:
object: New or updated disk device object.
"""
raise NotImplementedError("add_disk_device not implemented")
# API methods needed for edit-hardware
def create_configuration_profile(self, pid, label, description):
"""Create/update a configuration profile with the given ID.
Args:
pid (str): Profile identifier
label (str): Brief descriptive label for the profile
description (str): Verbose description of the profile
"""
raise NotImplementedError("create_configuration_profile "
"not implemented!")
def delete_configuration_profile(self, profile):
"""Delete the configuration profile with the given ID.
Args:
profile (str): Profile identifier
"""
raise NotImplementedError("delete_configuration_profile "
"not implemented")
# A note on getters/setters that take a profile_list parameter:
#
# A profile name of None is taken to mean "the default for all profiles
# now or in the future that do not explicitly have a different value set."
#
# A profile_list of None or [] is taken to mean "all profiles, including
# the default, as well as any to be defined in the future". For a VM with
# profiles 'a' and 'b' currently defined, this is equivalent to the list
# [None, 'a', 'b']
#
# A profile_list of [None] means "the default value to be inherited by
# any other profiles that do not override it"
#
# A profile_list of [None, "a"] means "the default and profile 'a'". For a
# setter function, this translates to "change 'a' to inherit the default,
# and change the default as well."
#
# A profile_list of ["a", "b", "c"] means "profiles 'a', 'b', and 'c', but
# not the default.
def set_cpu_count(self, cpus, profile_list):
"""Set the number of CPUs.
Args:
cpus (int): Number of CPUs
profile_list (list): Change only the given profiles
"""
raise NotImplementedError("set_cpu_count not implemented!")
def set_memory(self, megabytes, profile_list):
"""Set the amount of RAM, in megabytes.
Args:
megabytes (int): Memory value, in megabytes
profile_list (list): Change only the given profiles
"""
raise NotImplementedError("set_memory not implemented!")
def set_nic_type(self, nic_type, profile_list):
"""Set the hardware type for NICs.
.. deprecated:: 1.5
Use :func:`set_nic_types` instead.
Args:
nic_type (str): NIC hardware type
profile_list (list): Change only the given profiles.
"""
warnings.warn("Use set_nic_types() instead", DeprecationWarning)
self.set_nic_types([nic_type], profile_list)
def set_nic_types(self, type_list, profile_list):
"""Set the hardware type(s) for NICs.
Args:
type_list (list): NIC hardware type(s)
profile_list (list): Change only the given profiles.
"""
raise NotImplementedError("set_nic_types not implemented!")
def get_nic_count(self, profile_list):
"""Get the number of NICs under the given profile(s).
Args:
profile_list (list): Profile(s) of interest.
Returns:
dict: ``{ profile_name : nic_count }``
"""
raise NotImplementedError("get_nic_count not implemented!")
def set_nic_count(self, count, profile_list):
"""Set the given profile(s) to have the given number of NICs.
Args:
count (int): number of NICs
profile_list (list): Change only the given profiles
"""
raise NotImplementedError("set_nic_count not implemented!")
def create_network(self, label, description):
"""Define a new network with the given label and description.
Also serves to update the description of an existing network label.
Args:
label (str): Brief label for the network
description (str): Verbose description of the network
"""
raise NotImplementedError("create_network not implemented!")
def set_nic_networks(self, network_list, profile_list):
"""Set the NIC to network mapping for NICs under the given profile(s).
.. note::
If the length of :attr:`network_list` is less than the number of
NICs, will use the last entry in the list for all remaining NICs.
Args:
network_list (list): List of networks to map NICs to
profile_list (list): Change only the given profiles
"""
raise NotImplementedError("set_nic_networks not implemented!")
def set_nic_mac_addresses(self, mac_list, profile_list):
"""Set the MAC addresses for NICs under the given profile(s).
.. note::
If the length of :attr:`mac_list` is less than the number of NICs,
will use the last entry in the list for all remaining NICs.
Args:
mac_list (list): List of MAC addresses to assign to NICs
profile_list (list): Change only the given profiles
"""
raise NotImplementedError("set_nic_mac_addresses not implemented!")
def set_nic_names(self, name_list, profile_list):
"""Set the device names for NICs under the given profile(s).
Args:
name_list (list): List of names to assign.
profile_list (list): Change only the given profiles
"""
raise NotImplementedError("set_nic_names not implemented!")
def get_serial_count(self, profile_list):
"""Get the number of serial ports under the given profile(s).
Args:
profile_list (list): Change only the given profiles
Returns:
dict: ``{ profile_name : serial_count }``
"""
raise NotImplementedError("get_serial_count not implemented!")
def set_serial_count(self, count, profile_list):
"""Set the given profile(s) to have the given number of NICs.
Args:
count (int): Number of serial ports
profile_list (list): Change only the given profiles
"""
raise NotImplementedError("set_serial_count not implemented!")
def set_serial_connectivity(self, conn_list, profile_list):
"""Set the serial port connectivity under the given profile(s).
Args:
conn_list (list): List of connectivity strings
profile_list (list): Change only the given profiles
"""
raise NotImplementedError("set_serial_connectivity not implemented!")
def get_serial_connectivity(self, profile):
"""Get the serial port connectivity strings under the given profile.
Args:
profile (str): Profile of interest.
Returns:
list: List of connectivity strings
"""
raise NotImplementedError("get_serial_connectivity not implemented!")
def set_scsi_subtype(self, subtype, profile_list):
"""Set the device subtype for the SCSI controller(s).
.. deprecated:: 1.5
Use :func:`set_scsi_subtypes` instead.
Args:
subtype (str): SCSI subtype string
profile_list (list): Change only the given profiles
"""
warnings.warn("Use set_scsi_subtypes() instead", DeprecationWarning)
self.set_scsi_subtypes([subtype], profile_list)
def set_scsi_subtypes(self, type_list, profile_list):
"""Set the device subtype list for the SCSI controller(s).
Args:
type_list (list): SCSI subtype string list
profile_list (list): Change only the given profiles
"""
raise NotImplementedError("set_scsi_subtypes not implemented!")
def set_ide_subtype(self, subtype, profile_list):
"""Set the device subtype for the IDE controller(s).
.. deprecated:: 1.5
Use :func:`set_ide_subtypes` instead.
Args:
subtype (str): IDE subtype string
profile_list (list): Change only the given profiles
"""
warnings.warn("Use set_ide_subtypes() instead", DeprecationWarning)
self.set_ide_subtypes([subtype], profile_list)
def set_ide_subtypes(self, type_list, profile_list):
"""Set the device subtype list for the IDE controller(s).
Args:
type_list (list): IDE subtype string list
profile_list (list): Change only the given profiles
"""
raise NotImplementedError("set_ide_subtypes not implemented!")
# API methods needed for edit-product
# API methods needed for edit-properties
def get_property_value(self, key):
"""Get the value of the given property.
Args:
key (str): Property identifier
Returns:
str: Value of this property, or ``None``
"""
raise NotImplementedError("get_property_value not implemented")
def set_property_value(self, key, value,
user_configurable=None, property_type=None,
label=None, description=None):
"""Set the value of the given property (converting value if needed).
Args:
key (str): Property identifier
value (object): Value to set for this property
user_configurable (bool): Should this property be configurable at
deployment time by the user?
property_type (str): Value type - 'string' or 'boolean'
label (str): Brief explanatory label for this property
description (str): Detailed description of this property
Returns:
str: the (converted) value that was set.
"""
raise NotImplementedError("set_property_value not implemented")
def config_file_to_properties(self, file_path, user_configurable=None):
"""Import each line of a text file into a configuration property.
Args:
file_path (str): File name to import.
user_configurable (bool): Should the properties be configurable at
deployment time by the user?
"""
raise NotImplementedError("config_file_to_properties not implemented")
# API methods needed for info
verbosity_options = {
'brief': 0,
None: 1,
'verbose': 2
}
def info_string(self, width=79, verbosity_option=None):
"""Get a descriptive string summarizing the contents of this VM.
Args:
width (int): Line length to wrap to where possible.
verbosity_option (str): 'brief', None (default), or 'verbose'
Returns:
str: Wrapped, appropriately verbose string.
"""
raise NotImplementedError("info_string not implemented")
def profile_info_string(self, width=79, verbosity_option=None):
"""Get a string summarizing available configuration profiles.
Args:
width (int): Line length to wrap to if possible
verbosity_option (str): 'brief', None (default), or 'verbose'
Returns:
str: Appropriately formatted and verbose string.
"""
raise NotImplementedError("profile_info_string not implemented")
# API methods needed for inject-config
def find_empty_drive(self, drive_type):
"""Find a disk device that exists but contains no data.
Args:
drive_type (str): Disk drive type, such as 'cdrom' or 'harddisk'
Returns:
object: Hardware device object, or None.
"""
raise NotImplementedError("find_empty_drive not implemented")
def find_device_location(self, device):
"""Find the controller type and address of a given device object.
Args:
device (object): Hardware device object.
Returns:
tuple: ``(type, address)``, such as ``("ide", "1:0")``.
"""
raise NotImplementedError("find_device_location not implemented")
if __name__ == "__main__": # pragma: no cover
import doctest
doctest.testmod()
|
<gh_stars>0
#
# This file is part of the FFEA simulation package
#
# Copyright (c) by the Theory and Development FFEA teams,
# as they appear in the README.md file.
#
# FFEA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FFEA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FFEA. If not, see <http://www.gnu.org/licenses/>.
#
# To help us fund FFEA development, we humbly ask that you cite
# the research papers on the package.
#
import sys, os
from time import sleep
import numpy as np
from FFEA_exceptions import *
class FFEA_pin:
def __init__(self, fname = ""):
self.reset()
# Empty fname give an empty object
if fname == "":
self.valid = True
sys.stdout.write("done! Empty object initialised.\n")
return
try:
self.load(fname)
except FFEAFormatError as e:
self.reset()
print_error()
print("Formatting error at line " + e.lin + "\nLine(s) should be formatted as follows:\n\n" + e.lstr)
raise
except FFEAIOError as e:
self.reset()
print_error()
print("Input error for file " + e.fname)
if e.fext != [""]:
print(" Acceptable file types:")
for ext in e.fext:
print(" " + ext)
except IOError:
raise
def load(self, fname):
sys.stdout.write("Loading FFEA pin file...")
# File format?
base, ext = os.path.splitext(fname)
try:
if ext == ".pin":
self.load_pin(fname)
else:
raise FFEAIOError(fname=fname, fext=[".pin"])
except:
raise
self.valid = True
self.empty = False
sys.stdout.write("done!\n")
def load_pin(self, fname):
# Open file
try:
fin = open(fname, "r")
except(IOError):
raise
# Test format
line = fin.readline().strip()
if line != "ffea pinned nodes file" and line != "walrus pinned nodes file":
print("\tExpected 'ffea pinned nodes file' but found " + line)
raise TypeError
num_pinned_nodes = int(fin.readline().split()[1])
fin.readline()
# Read pinned nodes now
pintype = 0
while(True):
line = fin.readline().strip()
if line == "":
break
else:
self.add_pinned_node(line)
fin.close()
def add_pinned_node(self, anint):
self.index.append(int(anint))
self.num_pinned_nodes += 1
def remove_pinned_node(self, anint):
try:
self.index.remove(anint)
self.num_pinned_nodes -= 1
except(ValueError):
print("Index " + str(anint) + " not in list.")
def print_details(self):
print("num_pinned_nodes = %d" % (self.num_pinned_nodes))
sleep(1)
outline = ""
for i in self.index:
outline += str(i) + " "
print(outline)
def write_to_file(self, fname):
with open(fname, "w") as f:
f.write("ffea pinned nodes file\nnum_pinned_nodes %d\npinned nodes:\n" % (self.num_pinned_nodes))
for i in self.index:
f.write("%d\n" % (i))
def pin_radially(self, node, oindex, radius, top=None, linear=0, reset=1):
# Reset first
if reset > 0:
self.reset()
# Pin all within radius
origin = node.pos[oindex]
nindex = -1
# Get relevent index list
if linear == 0:
indices = range(node.num_nodes)
else:
if top == None:
print("Linear indices cannot be found without a topology. Defaulting to all nodes...")
range(node.num_nodes)
else:
indices = []
for el in top.element:
for i in el.n[0:4]:
indices.append(i)
indices = list(set(indices))
for i in indices:
d = np.linalg.norm(node.pos[i] - origin)
if d < radius:
self.add_pinned_node(i)
def reset(self):
self.index = []
self.num_pinned_nodes = 0
self.valid = False
self.empty = True
|
<filename>autoscalingsim/scaling/policiesbuilder/scaling_policy.py
import pandas as pd
from .adjustmentplacement.adjustment_policy import AdjustmentPolicy
from .scaling_policy_conf import ScalingPolicyConfiguration
from autoscalingsim.infrastructure_platform.platform_model import PlatformModel
from autoscalingsim.scaling.scaling_model import ScalingModel
from autoscalingsim.scaling.state_reader import StateReader
from autoscalingsim.scaling.scaling_manager import ScalingManager
from autoscalingsim.simulator import conf_keys
class ScalingPolicy:
"""
Defines *how* the autoscaling is done (mechanism).
Follows the SCAPE process:
Scale: determines the desired service instances count.
Combine: determines how the scaled service instances can be combined.
Adjust: determines the follow-up scaling of the virtual cluster.
Place: maps the scaled service instances onto the nodes.
Enforce: enforce the results of the above steps by updating the shared state.
"""
def __init__(self, simulation_conf : dict, state_reader : StateReader,
scaling_manager : ScalingManager, service_instance_requirements : dict,
configs_contents_table : dict, node_groups_registry : 'NodeGroupsRegistry'):
self.scaling_manager = scaling_manager
self.simulation_start_time = simulation_conf['starting_time']
self.last_sync_timestamp = simulation_conf['starting_time']
self.last_models_refresh_timestamp = simulation_conf['starting_time']
self.scaling_settings = ScalingPolicyConfiguration(configs_contents_table[conf_keys.CONF_SCALING_POLICY_KEY])
if 'models_refresh_period' in simulation_conf:
self.scaling_settings.models_refresh_period = simulation_conf['models_refresh_period']
self.platform_model = PlatformModel(state_reader, scaling_manager,
service_instance_requirements, self.scaling_settings.services_scaling_config,
simulation_conf, configs_contents_table, node_groups_registry)
def reconcile_state(self, cur_timestamp : pd.Timestamp):
if cur_timestamp - self.last_models_refresh_timestamp >= self.scaling_settings.models_refresh_period:
self.scaling_manager.refresh_models(cur_timestamp)
self.last_models_refresh_timestamp = cur_timestamp
if (cur_timestamp - self.simulation_start_time >= self.scaling_settings.warm_up) and \
(cur_timestamp - self.last_sync_timestamp >= self.scaling_settings.sync_period):
desired_states_to_process = self.scaling_manager.compute_desired_state(cur_timestamp)
if len(desired_states_to_process) > 0:
self.platform_model.adjust_platform_state(cur_timestamp, desired_states_to_process)
self.last_sync_timestamp = cur_timestamp
self.platform_model.step(cur_timestamp)
def compute_desired_node_count(self, simulation_start : pd.Timestamp,
simulation_step : pd.Timedelta,
simulation_end : pd.Timestamp) -> dict:
return self.platform_model.compute_desired_node_count(simulation_start, simulation_step, simulation_end)
def compute_actual_node_count_and_cost(self, simulation_start : pd.Timestamp,
simulation_step : pd.Timedelta,
simulation_end : pd.Timestamp) -> dict:
return self.platform_model.compute_actual_node_count_and_cost(simulation_start, simulation_step, simulation_end)
def compute_actual_node_count(self, simulation_start : pd.Timestamp,
simulation_step : pd.Timedelta,
simulation_end : pd.Timestamp) -> dict:
return self.platform_model.compute_actual_node_count(simulation_start, simulation_step, simulation_end)
def scaling_settings_for_service(self, service_name : str):
return self.scaling_settings.scaling_settings_for_service(service_name)
@property
def service_regions(self):
return self.platform_model.service_regions
|
import unittest
import numpy as np
import pandas as pd
from cbsyst.MyAMI_V2 import MyAMI_params, MyAMI_K_calc, MyAMI_pK_calc, MyAMI_K_calc_multi
class MyAMIConsistency(unittest.TestCase):
"""Compare MyAMI_V2 with MyAMI_V1"""
def test_CompareToMyAMI_V1(self):
# parameters calculated by MyAMI_V1.py
MyAMI_orig = {'K0': np.array([-60.240900000000003, 93.451700000000002, 23.358499999999999, 0.023517, -0.023656, 0.0047035999999999996]),
'K1': np.array([61.217199999999998, -3633.8600000000001, -9.6776999999999997, 0.011554999999999999, -0.00011519999999999999]),
'K2': np.array([-25.928999999999998, -471.77999999999997, 3.16967, 0.017809999999999999, -0.0001122]),
'KB': np.array([148.0248, 137.1942, 1.6214200000000001, -8966.8999999999996, -2890.5300000000002, -77.941999999999993, 1.728, -0.099599999999999994, -24.4344, -25.085000000000001, -0.24740000000000001, 0.053104999999999999]),
'KW': np.array([148.96520000000001, -13847.26, -23.652100000000001, 118.67, -5.9770000000000003, 1.0495000000000001, -0.016150000000000001]),
'KspC': np.array([-171.90649999999999, -0.077993000000000007, 2839.319, 71.594999999999999, -0.77712000000000003, 0.0028425999999999998, 178.34, -0.077109999999999998, 0.0041248999999999999]),
'KspA': np.array([-171.94499999999999, -0.077993000000000007, 2903.2930000000001, 71.594999999999999, -0.068392999999999995, 0.0017275999999999999, 88.135000000000005, -0.10018000000000001, 0.0059414999999999997]),
'KSO4': np.array([141.328, -4276.1000000000004, -23.093, -13856.0, 324.56999999999999, -47.985999999999997, 35474.0, -771.53999999999996, 114.723, -2698.0, 1776.0])}
MyAMI_lowMgnormCa = {'K0': np.array([-58.835988024241153, 91.481812147562721, 22.67747869243458, 0.015798688488939287, -0.018255877435858551, 0.0038003679139008897]),
'K1': np.array([58.062087886559837, -3494.0233318473447, -9.2038164315804138, 0.011168501289914329, -0.00011365376326036976]),
'K2': np.array([-31.434097349400538, -162.75662343373008, 3.9109079889323324, 0.017704525744139957, -0.00011016412477939389]),
'KB': np.array([156.24940221168944, 128.25079663389661, 1.2732109192923926, -9381.59308693192, -2723.8280995436671, -66.130468335345725, 2.0505262462401235, -0.11549440220126056, -25.63858301431771, -23.400388268842654, -0.19484439742973533, 0.04906551592740143]),
'KW': np.array([167.00613348978843, -14400.583448847821, -26.52650914480521, 338.16897225894417, -10.913107565225641, 1.7768705047133142, -0.018636421994989119]),
'KspC': np.array([-82.768891656782529, -0.0510250425127734, 653.61741054522099, 35.217868927106146, -0.88625140484347653, 0.0030161168011013243, 188.12909155930794, -0.074743157746358937, 0.0040202923717078919]),
'KspA': np.array([-81.943776995027889, -0.050733308492461167, 697.26556788488676, 34.861317352102368, -0.17500557330551639, 0.0018967287129921546, 97.551225643759196, -0.097813597252809872, 0.0058373362605226046]),
'KSO4': np.array([144.01723254985009, -4387.5774837397103, -23.495881238664886, -14072.430144624854, 329.36367918201313, -48.71841105264248, 35279.449308067575, -770.95513413049468, 114.68004975234182, -2574.4649294352289, 1744.9018046633666])}
MyAMI_highMgnormCa = {'K0': np.array([-61.810571613553734, 95.652592105230966, 24.119389775679647, 0.031633200121371587, -0.029340456577593793, 0.0056540258038219647]),
'K1': np.array([64.646362988472191, -3785.8006010138361, - 10.192756962536393, 0.011991951216023209, - 0.00011695382574553078]),
'K2': np.array([-22.608853013802992, -640.62695443600217, 2.7186241756853655, 0.017134698149918966, -0.00010846862856783123]),
'KB': np.array([135.80210783355605, 148.02810999968719, 1.9073200904360372, -8397.0531700794145, -3102.8610734941999, -91.196260901984544, 1.8875655121628416, -0.10394558858478999, -22.634895306112007, -27.082927065489599, - 0.29068978419138441, 0.057501272476377069]),
'KW': np.array([152.35156939395085, -14181.651163426186, - 24.014243686459675, 94.885045534960184, -5.0639203227863989, 0.91026133561441924, -0.017428588478068725]),
'KspC': np.array([-204.75418064702887, -0.088662754155618279, 3593.6747830384475, 85.188432139333557, -0.77303216591690571, 0.0028322646621595665, 180.47102892397942, -0.077798210121014622, 0.0041609885276488552]),
'KspA': np.array([-205.27704739574395, -0.088822477794205643, 3669.2749301858271, 85.387619157326156, -0.065400426582049809, 0.0017191937785523521, 90.430831723766886, -0.10086886021110948, 0.0059773450025442203]),
'KSO4': np.array([138.11090558191418, -4143.2416843287729, - 22.610868816926811, -13639.732449040992, 319.889250580885, -47.26969380854311, 35696.914533285184, -772.52584556489103, 114.82170476859116, -2829.3346454669118, 1808.8923394974406])}
MyAMI_normMglowCa = {'K0': np.array([-60.354861415114186, 93.6114901661186, 23.413741974192469, 0.019471443043081429, -0.020880580475490663, 0.0042360614216956959]),
'K1': np.array([60.571282498053101, -3604.851785021769, -9.5808093449178067, 0.011627611432340741, -0.00011554542195645259]),
'K2': np.array([-27.797620974421175, -379.10039640360452, 3.4344772188147248, 0.017795324243456347, -0.00011189730601039271]),
'KB': np.array([149.73578915124264, 135.51878754422717, 1.5697190283621318, -9047.2694347882134, -2859.3270820038338, -75.691260952752486, 1.7275714796068642, -0.10040470700438676, -24.686290139620521, -24.773314274598235, -0.23961001755283762, 0.052391979614480146]),
'KW': np.array([149.5757384358902, -13863.557270210315, -23.750438094274756, 129.08418248409774, -6.2195684718423587, 1.084610305608863, -0.015439222265140332]),
'KspC': np.array([-211.52170472418692, -0.090199167158515448, 3856.1688808669751, 87.682933659918803, -0.85761852467556132, 0.0029765579810714267, 189.36084847727781, -0.076643561611619324, 0.0041047729754152317]),
'KspA': np.array([-212.32447841169937, -0.090444362804512873, 3938.9176464157658, 87.995927923836049, -0.1501097667611383, 0.0018636059614886204, 99.328910907417679, -0.099710661830045427, 0.0059212989088641534]),
'KSO4': np.array([141.1770439414571, -4273.0265734374207, -23.069475159874695, -13881.516800613268, 325.4259752983441, -48.119398949498262, 35445.935681241004, -771.56704443236777, 114.73121075935127, -2676.1174771340106, 1770.1552350112891])}
MyAMI_normMghighCa = {'K0': np.array([-58.590903344471016, 91.138226208152034, 22.558657775967909, 0.049708549006778388, -0.041565798253250033, 0.0077243125633299553]),
'K1': np.array([64.31775412475875, -3774.150593504583, -10.142460932422811, 0.010789364340744423, -0.00011172101044368213]),
'K2': np.array([-18.1184401789505, -847.60828395037345, 2.0555415100456993, 0.018079843009880359, -0.00011364444693652285]),
'KB': np.array([141.5124421720904, 144.20233560051338, 1.8668270247130392, -8660.6047342969923, -3013.252222111963, -88.573923681298325, 1.6904831429939082, -0.093069165984674337, -23.475935776590447, -26.399150028099069, -0.28426489312070991, 0.056213603865328497]),
'KW': np.array([145.10404153248484, -13633.01312911182, -23.109609760314786, 80.944017507293069, -5.2690814493605655, 0.95142669086974152, -0.020499466300191754]),
'KspC': np.array([41.56845663058197, -0.012494837547226051, -2621.270517368956, -15.106275506980497, -0.42490307162652657, 0.0022546429971244037, 128.59267314835913, -0.078567023251951862, 0.004187021665224497]),
'KspA': np.array([45.46883333289923, -0.011231833768196238, -2654.0719220492115, -16.719120961494937, 0.28994297991389995, 0.0011290794887153634, 37.493189951682702, -0.10162567004805068, 0.0060026679175019422]),
'KSO4': np.array([144.299587462815, -4382.0135712955152, -23.545133256925382, -13723.014934595181, 320.04593550412079, -47.277080818942601, 35505.166190487507, -770.55784024737886, 114.56947002838029, -2752.9698393649651, 1790.6413670134023])}
MyAMI_lowMglowCa = {'K0': np.array([-59.031957809407686, 91.756584913758871, 22.772475270805277, 0.012439048955401214, -0.015957562302394309, 0.0034127941095757714]),
'K1': np.array([57.645653166592687, -3475.20508087085, -9.1413869167198492, 0.011261638932758035, -0.00011407852450762963]),
'K2': np.array([-33.797052214730329, -34.687019726678315, 4.2408857775382689, 0.017505028643035214, -0.00010825154698773285]),
'KB': np.array([156.5145262978281, 127.50333419476038, 1.2215711713924888, -9399.3466614485496, -2714.0159959999696, -64.48136298090111, 2.1233847822014575, -0.11922787349734948, -25.676139876177906, -23.252155109817927, -0.18709894308737055, 0.04864009697090256]),
'KW': np.array([167.10645326206787, -14407.78610729866, -26.535372869940193, 339.51112629715283, -10.93693969267213, 1.7782409980790552, -0.017212537882267356]),
'KspC': np.array([-133.24446950415958, -0.067315974817180324, 1914.5973496967124, 55.857418991475782, -1.0537934906898849, 0.0032909347910871382, 211.26771344577173, -0.07375599800265277, 0.0039795671790368381]),
'KspA': np.array([-133.57641396700726, -0.067394420420986825, 1986.7281617372416, 55.974478381552849, -0.34424060172868359, 0.002174466671642204, 120.936193025869, -0.096829351862153537, 0.0057968937363111032]),
'KSO4': np.array([142.87795647594166, -4336.8363580664582, -23.324131972826077, -14191.380110202912, 331.82738359227761, -49.092005161740197, 35356.825240910941, -772.1635449423793, 114.86252820078565, -2594.6365722656565, 1751.0699498051545])}
MyAMI_highMghighCa = {'K0': np.array([-60.242558923531988, 93.454026697777721, 23.35930393085005, 0.058514324588747689, -0.047729633422100563, 0.0087550495701775415]),
'K1': np.array([67.975551223686139, -3936.2440524408544, -10.691852254750499, 0.011246896099572095, -0.00011355427640403392]),
'K2': np.array([-17.015715892221774, -902.20421770611688, 1.9199531113654376, 0.017212262772804524, -0.00010799407526682958]),
'KB': np.array([132.80183421700798, 154.26130412176894, 2.2945018159380695, -8235.6462474105119, -3180.0374178989064, -106.69605945646256, 1.6982781679275936, -0.090838190573174118, -22.203197266497337, -28.305328913879592, -0.34878219590073556, 0.060890052818741602]),
'KW': np.array([148.59341506108012, -13995.447057143834, -23.46484882933628, 54.475141138360499, -4.2384008335651258, 0.79103107136328821, -0.020072375108112162]),
'KspC': np.array([-32.141860679870902, -0.035931500758019999, -823.93579550223012, 15.115652324166364, -0.53054878823398877, 0.0024230088434383269, 145.65725450322446, -0.078466431425245023, 0.0041921237816589232]),
'KspA': np.array([-29.574105962378695, -0.035101253030014816, -824.33399730398355, 14.049494193893432, 0.18187006354626084, 0.0013017899807812074, 54.926624770335998, -0.1015408905334869, 0.0060085347187136574]),
'KSO4': np.array([141.44308527847932, -4263.2391572297274, -23.117231632134168, -13446.393136941231, 313.79318037915908, -46.323718708827585, 35723.642314394208, -770.85435112916298, 114.56164858754464, -2903.5083191909271, 1828.8097340479994])}
# Compare to MyAMI_V2
def pcomp(par, opar):
diffs = []
Ks = MyAMI_K_calc(param_dict=par)
oKs = MyAMI_K_calc(param_dict=opar)
for k in Ks.keys():
diffs.append(Ks[k] - oKs[k])
return all(abs(np.array(diffs)) < 1e-6)
self.assertTrue(pcomp(MyAMI_params(),
MyAMI_orig), msg='Ambient params')
self.assertTrue(pcomp(MyAMI_params(0.01, 0.01),
MyAMI_lowMgnormCa), msg='Low Mg, Norm Ca params')
self.assertTrue(pcomp(MyAMI_params(0.01, 0.1),
MyAMI_highMgnormCa), msg='High Mg, Norm Ca params')
self.assertTrue(pcomp(MyAMI_params(0.005, 0.05),
MyAMI_normMglowCa), msg='Normal Mg, Low Ca params')
self.assertTrue(pcomp(MyAMI_params(0.05, 0.05),
MyAMI_normMghighCa), msg='Normal Mg, High Ca params')
self.assertTrue(pcomp(MyAMI_params(0.05, 0.1),
MyAMI_highMghighCa), msg='High Mg, High Ca params')
self.assertTrue(pcomp(MyAMI_params(0.005, 0.01),
MyAMI_lowMglowCa), msg='Low Mg, Low Ca params')
return
def test_CompareToDickson2007(self):
# Check params @ 25ºC and 35 PSU
# Parameters are from Dickson, Sabine & Christian
# (Guide to best practises for ocean CO2 measurements,
# PICES Special Publication, 2007), Chapter 5.7.2 (seawater).
# Except KspC and KspA, which are from from Zeebe &
# Wolf-Gladrow, 2001, Appendix A.10
K_ckeck = {'K0': np.exp(-3.5617),
'K1': 10**(-5.8472),
'K2': 10**(-8.9660),
'KB': np.exp(-19.7964),
# 'KW': np.exp(-30.434),
'KSO4': np.exp(-2.30),
'KspC': 10**-6.3693,
'KspA': 10**-6.1883}
Ks = MyAMI_K_calc()
for k, p in K_ckeck.items():
self.assertAlmostEqual(Ks[k] / p, 1,
places=3,
msg='failed on ' + k)
return
def test_CompareToMehrbachData(self):
"""
Compares pK1 and pK2 calcualted by MyAMI_V2 to data from
Mehrbach et al (1973), as per Lueker et al (2000).
Test data on Total pH scale taken from Table 2 of Lueker et al (2000)
"""
# read data
lk = pd.read_csv('cbsyst/test_data/Lueker2000/Lueker2000_Table2.csv', comment='#')
# calculate MyAMI Ks
mKs = MyAMI_K_calc(lk.TempC, lk.Sal)
# calculate pK1 and pK2 2 residuals
rpK1 = lk.pK1 - -np.log10(mKs.K1)
rpK2 = lk.pK2 - -np.log10(mKs.K2)
# calculate median and 95% CI of residuals
rpK1_median = rpK1.median()
rpK1_95ci = np.percentile(rpK1[~np.isnan(rpK1)], (2.5, 97.5))
self.assertLessEqual(abs(rpK1_median), 0.005, msg='Median offset from Mehrbach (1973) pK1.')
self.assertTrue(all(abs(rpK1_95ci) <= 0.02), msg='95% CI of difference from Mehrbach pK1 <= 0.02')
rpK2_median = rpK1.median()
rpK2_95ci = np.percentile(rpK2[~np.isnan(rpK2)], (2.5, 97.5))
self.assertLessEqual(abs(rpK2_median), 0.005, msg='Median offset from Mehrbach (1973) pK2.')
self.assertTrue(all(abs(rpK2_95ci) <= 0.02), msg='95% CI of difference from Mehrbach pK2 <= 0.02')
return
if __name__ == '__main__':
unittest.main()
|
<filename>acoustics/testing_detailed_sound_propagation.py
# -*- coding: utf-8 -*-
"""Tests for the detailed sound propagation module
Created on Tue Jun 18 11:37:57 2019
@author: tbeleyur
"""
import unittest
import numpy as np
np.random.seed(82319)
import pandas as pd
import scipy.spatial as spatial
import statsmodels.api as sm
import statsmodels.formula.api as smf
from detailed_sound_propagation import *
class TestGetPointsInBetween(unittest.TestCase):
def setUp(self):
self.start = np.array([1,1])
self.end = np.array([5,5])
self.points_between = np.row_stack(([2,2],[3,3],[9,9]))
self.kwargs = {'rectangle_width':0.1}
def test_basic(self):
between = get_points_in_between(self.start, self.end, self.points_between,
**self.kwargs)
self.assertEqual(between.shape[0],2)
def test_basic2(self):
'''Move start point to -x,-y quadrant
'''
self.start *= -1
between = get_points_in_between(self.start, self.end, self.points_between,
**self.kwargs)
self.assertEqual(between.shape[0],2)
def test_basic3(self):
'''start point to -x,+y quadrant
'''
self.start[0] *= -1
between = get_points_in_between(self.start, self.end, self.points_between,
**self.kwargs)
self.assertEqual(between.shape[0],0)
def test_basic4(self):
'''start point to +x,-y
'''
self.start[1] *= -1
between = get_points_in_between(self.start, self.end, self.points_between,
**self.kwargs)
self.assertEqual(between.shape[0],0)
def test_basic5(self):
'''test that points within the rectangle are being picked detected.
'''
self.start = np.array([1,-1])
self.kwargs['rectangle_width'] = 5.0
between = get_points_in_between(self.start, self.end, self.points_between,
**self.kwargs)
self.assertEqual(between.shape[0],2)
def test_basic6(self):
'''
'''
self.start = np.array([5,-5])
between = get_points_in_between(self.start, self.end, self.points_between,
**self.kwargs)
self.assertEqual(between.shape[0],0)
def test_bitmore(self):
'''Make a bunch of points vertically aligned, and then
rotate them - and then check if they're picked up correctly.
'''
theta = np.deg2rad(0)
width = 0.6
rotation_matrix = rot_mat(theta)
self.kwargs['rectangle_width'] = width
self.start = np.array([0,-1])
x_coods = np.random.choice(np.arange(-width*0.5, width*0.5, 0.01),10)
y_coods = np.random.choice(np.arange(0, 2, 0.01),10)
self.between_points = np.column_stack((x_coods, y_coods))
self.other_points = np.random.choice(np.arange(0.8,0.9,0.1),10).reshape(-1,2)
self.all_other_points = np.row_stack((self.between_points,
self.other_points))
self.end = np.array([0,5])
rot_end = np.dot(rotation_matrix,self.end)
rot_start = np.dot(rotation_matrix, self.start)
rot_allotherpoints = np.apply_along_axis(dot_product_for_rows, 1,
self.all_other_points,
rotation_matrix)
between = get_points_in_between(rot_start, rot_end,
rot_allotherpoints,
**self.kwargs)
expected_numpoints = self.between_points.shape[0]
self.assertEqual(between.shape[0], expected_numpoints)
def test_bitmore_2(self):
'''Make a bunch of points horizontially aligned, and then
rotate them - and then check if they're picked up correctly.
'''
theta = np.radians(-10)
width = 0.2
rotation_matrix = rot_mat(theta)
self.kwargs['rectangle_width'] = width
self.start = np.array([0,0])
self.end = np.array([5,0])
y_coods = np.random.choice(np.arange(-width*0.5, width*0.5, 0.01),10)
x_coods = np.random.choice(np.arange(0, 2, 0.01),10)
self.between_points = np.column_stack((x_coods, y_coods))
self.other_points = np.random.choice(np.arange(90,120,1),10).reshape(-1,2)
self.all_other_points = np.row_stack((self.between_points,
self.other_points))
rot_end = np.dot(rotation_matrix,self.end)
rot_start = np.dot(rotation_matrix, self.start)
rot_allotherpoints = np.apply_along_axis(dot_product_for_rows, 1,
self.all_other_points,
rotation_matrix)
between = get_points_in_between(rot_start, rot_end,
rot_allotherpoints,
**self.kwargs)
expected_numpoints = self.between_points.shape[0]
self.assertEqual(between.shape[0], expected_numpoints)
def test_pointsonaline(self):
'''
'''
bats_xy = np.array(([1,0],[1,0.05]))
focal_bat = np.array([0,0])
receiver = np.array([2,0])
between = get_points_in_between(focal_bat, receiver,
bats_xy,
**self.kwargs)
expected = 2
obtained = between.shape[0]
self.assertEqual(expected, obtained)
def test_points_onaline2(self):
bats_xy = np.array([0.5,0.05]).reshape(-1,1)
start = np.array([2,0])
end = np.array([0,0])
between = get_points_in_between(start, end,
bats_xy,
**self.kwargs)
expected = 1
obtained = between.shape[0]
self.assertEqual(expected, obtained)
def test_points_on_yaxis(self):
#otherpts = np.random.normal(0,5,2000).reshape(-1,2)
num_points = 100
y_coods = np.random.choice(np.arange(0.1, 5, 0.01),num_points)
x_coods = np.tile(0.02,num_points)
between_points = np.column_stack((x_coods, y_coods))
#otherpts = np.array(([1,0],[1,0.05]))
#print(get_points_in_between(np.array([2,0]), np.array([0,0]), otherpts, **kwargs ) )
start = time.time()
betw_points = get_points_in_between(np.array([0,0]), np.array([0,10]), between_points,
**self.kwargs)
self.assertEqual(betw_points.shape[0], num_points)
class TestSoundprop_w_AcousticShadowing(unittest.TestCase):
def setUp(self):
self.kwargs = {}
self.kwargs['shadow_strength'] = -3.0
width = 0.2
self.kwargs['implement_shadowing'] = True
self.kwargs['rectangle_width'] = width
self.kwargs['shadow_TS'] = [-9]
self.start_point = np.array([0,0])
self.end_point = np.array([0,10])
self.kwargs['emitted_source_level'] = {'dBSPL':100, 'ref_distance':1.0}
x_coods = np.tile(0,2)
y_coods = np.array([1,2])
self.kwargs['R'] = 10.0
self.other_points = np.column_stack((x_coods, y_coods))
self.make_shadowing_model()
self.kwargs['min_spacing'] = 1.0
def make_shadowing_model(self):
'''Make the required statistical model to predict amount of acoustic
shadowing
'''
num_obstacles = np.tile([0,5,10], 15)
spacing = np.tile([0,0.5,1.0],15)
spacing_dummy = sm.categorical(spacing, drop=True)
intercept = np.ones(num_obstacles.size)
beta = [-49, -1, 0, -4, -5]
all_X = np.column_stack((intercept, num_obstacles, spacing_dummy))
true_RL = np.dot(all_X,beta)
RL_w_error = true_RL + np.random.normal(0,0.5,num_obstacles.size)
input_df = pd.DataFrame(data={'obstacles':num_obstacles,
'spacing':spacing,
'RL':RL_w_error})
input_df['spacing'] = pd.Categorical(input_df['spacing'])
self.shadowing_model = smf.ols('RL~obstacles+spacing', data=input_df).fit()
self.kwargs['acoustic_shadowing_model'] = self.shadowing_model
def test_shadowing_by_bats(self):
''' start and end at 45 degrees and a cloud of points in between.
'''
rl_w_shadowing = soundprop_w_acoustic_shadowing(self.start_point,
self.end_point,
self.other_points,
**self.kwargs)
self.kwargs['implement_shadowing']=False
rl_wo_shadowing = soundprop_w_acoustic_shadowing(self.start_point,
self.end_point,
self.other_points,
**self.kwargs)
shadowing_effect = np.round(rl_w_shadowing - rl_wo_shadowing,2)
expected_df = pd.DataFrame(data={'spacing':[1.0]*2,
'obstacles':[0,2]})
expected_shadowing = float(np.diff(self.shadowing_model.predict(expected_df)))
self.assertEqual(np.round(expected_shadowing,2), shadowing_effect)
#print(rl_w_shadowing)
#self.assertEqual(expected, np.around(rl_w_shadowing))
def test_nobatsinbetween(self):
'''
'''
self.other_points = np.row_stack(([2,2],[4,4],[9,9],[6,6]))
shadowing = soundprop_w_acoustic_shadowing( self.start_point,
self.end_point,
self.other_points,
**self.kwargs)
R_startend = spatial.distance.euclidean(self.start_point,
self.end_point)
expected = 80.0
self.assertEqual(expected, shadowing)
def test_3batangle(self):
self.kwargs['bats_xy'] = np.array(([0,0],[0.5,0.05],[2,0]))
self.kwargs['implement_shadowing'] = True
self.kwargs['rectangle_width'] = 0.3
self.kwargs['shadow_TS'] = [-13]
self.start_point = self.kwargs['bats_xy'][2,:]
self.end_point = self.kwargs['bats_xy'][0,:]
self.other_points = np.array([0.5,0.05]).reshape(1,-1)
shadowing = soundprop_w_acoustic_shadowing( self.start_point,
self.end_point,
self.other_points,
**self.kwargs)
print(shadowing)
class Testcalculate_acoustic_shadowing(unittest.TestCase):
def setUp(self):
'''setup a general regression object that can be used to test stuff
- thanks to the amazing docs page :
https://www.statsmodels.org/stable/examples/notebooks/generated/ols.html
'''
num_obstacles = np.tile([0,5,10], 15)
spacing = np.tile([0,0.5,1.0],15)
spacing_dummy = sm.categorical(spacing, drop=True)
intercept = np.ones(num_obstacles.size)
beta = [-49, -1, 0, -4, -5]
all_X = np.column_stack((intercept, num_obstacles, spacing_dummy))
true_RL = np.dot(all_X,beta)
RL_w_error = true_RL + np.random.normal(0,0.5,num_obstacles.size)
input_df = pd.DataFrame(data={'obstacles':num_obstacles,
'spacing':spacing,
'RL':RL_w_error})
input_df['spacing'] = pd.Categorical(input_df['spacing'])
self.shadowing_model = smf.ols('RL~obstacles+spacing', data=input_df).fit()
self.kwargs = {}
self.kwargs['acoustic_shadowing_model'] = self.shadowing_model
def test_basic_shadowing(self):
'''
'''
self.kwargs['min_spacing'] = 0.5
obtained_shadowing = np.array(calculate_acoustic_shadowing(1,**self.kwargs))
test_input_df = pd.DataFrame(data={'obstacles':[0,1],
'spacing':[0.5]*2})
predictions = np.array(self.shadowing_model.predict(test_input_df))
expected_shadowing = predictions[1] - predictions[0]
self.assertEqual(expected_shadowing, obtained_shadowing)
class TestAtmosAttenutation(unittest.TestCase):
def test_atmabs(self):
SL = 100
ref_dist = 1.0
distance = 1.0
RL_wabs = calc_RL(distance, SL,
ref_dist, atmospheric_attenuation=-3)
RL = calc_RL(distance, SL,
ref_dist)
diff = RL - RL_wabs
self.assertEqual(np.round(diff),3.0)
if __name__ == '__main__':
unittest.main() |
import pytest
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from src.minerva_db.sql.models import User, Group, Membership
from src.minerva_db.sql.api.utils import to_jsonapi
from .factories import GroupFactory, UserFactory, MembershipFactory
from . import sa_obj_to_dict, statement_log
class TestUser():
def test_create_user(self, client, session):
keys = ('uuid',)
d = sa_obj_to_dict(UserFactory(), keys)
assert to_jsonapi(d) == client.create_user(**d)
assert d == sa_obj_to_dict(session.query(User).one(), keys)
@pytest.mark.parametrize('duplicate_key', ['uuid'])
def test_create_user_duplicate(self, client, duplicate_key):
keys = ('uuid',)
d1 = sa_obj_to_dict(UserFactory(), keys)
d2 = sa_obj_to_dict(UserFactory(), keys)
d2[duplicate_key] = d1[duplicate_key]
print(d1)
client.create_user(**d1)
with pytest.raises(IntegrityError):
client.create_user(**d2)
def test_get_user(self, client, db_user):
keys = ('uuid',)
d = sa_obj_to_dict(db_user, keys)
assert to_jsonapi(d) == client.get_user(db_user.uuid)
def test_get_user_nonexistant(self, client):
with pytest.raises(NoResultFound):
client.get_user('nonexistant')
def test_get_user_query_count(self, connection, client, db_user):
user_uuid = db_user.uuid
with statement_log(connection) as statements:
client.get_user(user_uuid)
assert len(statements) == 1
class TestGroup():
def test_create_group(self, client, session, db_user):
keys = ('uuid', 'name')
d = sa_obj_to_dict(GroupFactory(), keys)
assert to_jsonapi(d) == client.create_group(user_uuid=db_user.uuid,
**d)
assert d == sa_obj_to_dict(session.query(Group).one(), keys)
def test_create_group_owner(self, client, session, db_user):
keys = ('uuid', 'name')
d = sa_obj_to_dict(GroupFactory(), keys)
client.create_group(user_uuid=db_user.uuid, **d)
assert session.query(Membership).one().membership_type == 'Owner'
@pytest.mark.parametrize('duplicate_key', ['uuid', 'name'])
def test_create_group_duplicate(self, client, db_user, duplicate_key):
keys = ('uuid', 'name')
d1 = sa_obj_to_dict(GroupFactory(), keys)
d2 = sa_obj_to_dict(GroupFactory(), keys)
d2[duplicate_key] = d1[duplicate_key]
client.create_group(user_uuid=db_user.uuid, **d1)
with pytest.raises(IntegrityError):
client.create_group(user_uuid=db_user.uuid, **d2)
def test_get_group(self, client, db_group):
keys = ('uuid', 'name')
d = sa_obj_to_dict(db_group, keys)
assert to_jsonapi(d) == client.get_group(db_group.uuid)
def test_get_group_nonexistant(self, client):
with pytest.raises(NoResultFound):
client.get_group('nonexistant')
def test_get_group_query_count(self, connection, client, db_group):
group_uuid = db_group.uuid
with statement_log(connection) as statements:
client.get_group(group_uuid)
assert len(statements) == 1
class TestMembership():
def test_create_membership(self, client, session, db_user, db_group):
keys = ['user_uuid', 'group_uuid', 'membership_type']
d = sa_obj_to_dict(MembershipFactory(), keys)
d['user_uuid'] = db_user.uuid
d['group_uuid'] = db_group.uuid
m = client.create_membership(db_group.uuid, db_user.uuid, 'Member')
assert to_jsonapi(d) == m
assert d == sa_obj_to_dict(session.query(Membership).one(), keys)
def test_create_membership_duplicate(self, client, session, db_user,
db_group):
client.create_membership(db_group.uuid, db_user.uuid, 'Member')
with pytest.raises(IntegrityError):
client.create_membership(db_group.uuid, db_user.uuid, 'Owner')
def test_create_membership_nonexistant_group(self, client, session,
db_user):
with pytest.raises(NoResultFound):
client.create_membership('nonexistant', db_user.uuid, 'Member')
def test_create_membership_nonexistant_user(self, client, session,
db_group):
with pytest.raises(NoResultFound):
client.create_membership(db_group.uuid, 'nonexistant', 'Member')
def test_get_membership(self, client, db_membership):
membership_keys = ('user_uuid', 'group_uuid', 'membership_type')
group_keys = ('uuid', 'name')
user_keys = ('uuid',)
d_membership = sa_obj_to_dict(db_membership, membership_keys)
d_group = sa_obj_to_dict(db_membership.group, group_keys)
d_user = sa_obj_to_dict(db_membership.user, user_keys)
assert to_jsonapi(
d_membership,
{
'groups': [d_group],
'users': [d_user]
}
) == client.get_membership(db_membership.group_uuid,
db_membership.user_uuid)
def test_get_membership_nonexistant(self, client, db_user, db_group):
with pytest.raises(NoResultFound):
client.get_membership(db_group.uuid, db_user.uuid)
def test_get_membership_query_count(self, connection, client,
db_membership):
group_uuid = db_membership.group_uuid
user_uuid = db_membership.user_uuid
with statement_log(connection) as statements:
client.get_membership(group_uuid, user_uuid)
assert len(statements) == 1
def test_update_membership(self, client, session, db_user, db_group):
keys = ['user_uuid', 'group_uuid', 'membership_type']
db_membership = MembershipFactory(group=db_group, user=db_user,
membership_type='Member')
session.add(db_membership)
session.commit()
d = sa_obj_to_dict(db_membership, keys)
d['membership_type'] = 'Owner'
assert to_jsonapi(d)['data'] == client.update_membership(
db_group.uuid,
db_user.uuid,
'Owner'
)['data']
def test_delete_membership(self, client, session,
group_granted_read_hierarchy):
db_membership = group_granted_read_hierarchy['membership']
client.delete_membership(db_membership.group_uuid,
db_membership.user_uuid)
assert 0 == session.query(Membership).count()
assert 1 == session.query(User).count()
assert 1 == session.query(Group).count()
# TODO Add user to group when already a member
# TODO Remove user from group when not a member
def test_is_member(self, client, db_membership):
assert client.is_member(db_membership.group_uuid,
db_membership.user_uuid)
def test_isnt_member(self, client, db_user, db_group):
assert not client.is_member(db_group.uuid, db_user.uuid)
def test_is_owner(self, client, db_ownership):
assert client.is_owner(db_ownership.group_uuid, db_ownership.user_uuid)
def test_isnt_owner(self, client, db_membership):
assert not client.is_owner(db_membership.group_uuid,
db_membership.user_uuid)
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.cluster import SpectralClustering
from umap import UMAP
from multidr.tdr import TDR
from multidr.cl import CL
def plot_results(results):
plt.figure(figsize=(8, 6))
for i, Z in enumerate(
['Z_n_dt', 'Z_n_td', 'Z_d_nt', 'Z_d_tn', 'Z_t_dn', 'Z_t_nd']):
plt.subplot(2, 3, i + 1)
plt.scatter(results[Z][:, 0], results[Z][:, 1], s=5, c='#84B5B2')
if Z == 'Z_n_dt':
plt.title('Instance sim ' r'$v^{(D \rightarrow T)}_{n}$')
elif Z == 'Z_n_td':
plt.title('Instance sim ' r'$v^{(T \rightarrow D)}_{n}$')
elif Z == 'Z_d_nt':
plt.title('Variable sim ' r'$v^{(N \rightarrow T)}_{d}$')
elif Z == 'Z_d_tn':
plt.title('Variable sim ' r'$v^{(T \rightarrow N)}_{d}$')
elif Z == 'Z_t_dn':
plt.title('Time point sim ' r'$v^{(D \rightarrow N)}_{t}$')
elif Z == 'Z_t_nd':
plt.title('Time point sim ' r'$v^{(N \rightarrow D)}_{t}$')
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.title('Two-step DR results')
plt.show()
###
### 1. Two-step DR examples
###
## Examples below are from Fujiwara et al., "A Visual Analytics Framework for
## Reviewing Multivariate Time-Series Data with Dimensionality Reduction", 2020
##
## All the parameters listed here are the same with the one used in the paper
##
# Air qulaity data (Case Study 1)
X = np.load('./data/air_quality/tensor.npy')
n_neighbors = 7
min_dist = 0.15
# # MHEALTH data (Case Study 2)
# # DOWNLOAD DATA FROM https://takanori-fujiwara.github.io/s/multidr/
# X = np.load('./data/mhealth/tensor.npy')
# n_neighbors = 7
# min_dist = 0.15
# # MHEALTH data (Case Study 2)
# # DOWNLOAD DATA FROM https://takanori-fujiwara.github.io/s/multidr/
# X = np.load('./data/highschool_2012/tensor.npy')
# n_neighbors = 15
# min_dist = 0.0
T, N, D = X.shape
tdr = TDR(first_learner=PCA(n_components=1),
second_learner=UMAP(n_components=2,
n_neighbors=n_neighbors,
min_dist=min_dist))
results = tdr.fit_transform(X,
first_scaling=True,
second_scaling=False,
verbose=True)
plot_results(results)
###
### 2. Two-step DR interpretation examples
###
## 2-1. parametric mappings
print('Explained variance ratio')
print('t: ' + str(tdr.first_learner['t'].explained_variance_ratio_[0]))
print('n: ' + str(tdr.first_learner['n'].explained_variance_ratio_[0]))
print('d: ' + str(tdr.first_learner['d'].explained_variance_ratio_[0]))
## 2-2. feature contributions
clustering = SpectralClustering(n_clusters=3,
assign_labels="discretize",
random_state=0).fit(results['Z_n_dt'])
plt.figure(figsize=(6, 6))
plt.scatter(results['Z_n_dt'][:, 0],
results['Z_n_dt'][:, 1],
s=5,
c=clustering.labels_)
plt.title('Z_n_dt with spectral clustering')
plt.show()
Y_nt = tdr.Y_tn.transpose()
cl = CL()
plt.figure(figsize=(8, 4))
for cluster_id in np.unique(clustering.labels_):
cluster = Y_nt[clustering.labels_ == cluster_id, :]
others = Y_nt[clustering.labels_ != cluster_id, :]
cl.fit(cluster, others, var_thres_ratio=0.5, max_log_alpha=2)
plt.plot(cl.fcs, c=plt.get_cmap('Accent')(cluster_id))
plt.xlabel('time')
plt.ylabel('Feature contribution (without scaling)')
plt.title('Feature cotributions')
plt.show()
|
<filename>src/app/beer_garden/api/http/handlers/v1/garden.py<gh_stars>0
# -*- coding: utf-8 -*-
import json
from brewtils.errors import ModelValidationError
from brewtils.models import Operation
from brewtils.schema_parser import SchemaParser
from beer_garden.api.authorization import Permissions
from beer_garden.api.http.handlers import AuthorizationHandler
from beer_garden.api.http.schemas.v1.garden import GardenReadSchema
from beer_garden.authorization import user_has_permission_for_object
from beer_garden.db.mongo.api import MongoParser
from beer_garden.db.mongo.models import Garden
from beer_garden.garden import local_garden
from beer_garden.user import initiate_user_sync
GARDEN_CREATE = Permissions.GARDEN_CREATE.value
GARDEN_READ = Permissions.GARDEN_READ.value
GARDEN_UPDATE = Permissions.GARDEN_UPDATE.value
GARDEN_DELETE = Permissions.GARDEN_DELETE.value
class GardenAPI(AuthorizationHandler):
async def get(self, garden_name):
"""
---
summary: Retrieve a specific Garden
parameters:
- name: garden_name
in: path
required: true
description: Read specific Garden Information
type: string
responses:
200:
description: Garden with the given garden_name
schema:
$ref: '#/definitions/Garden'
404:
$ref: '#/definitions/404Error'
50x:
$ref: '#/definitions/50xError'
tags:
- Garden
"""
garden = self.get_or_raise(Garden, GARDEN_READ, name=garden_name)
if user_has_permission_for_object(self.current_user, GARDEN_UPDATE, garden):
response = MongoParser.serialize(garden)
else:
response = GardenReadSchema().dumps(garden).data
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.write(response)
async def delete(self, garden_name):
"""
---
summary: Delete a specific Garden
parameters:
- name: garden_name
in: path
required: true
description: Garden to use
type: string
responses:
204:
description: Garden has been successfully deleted
404:
$ref: '#/definitions/404Error'
50x:
$ref: '#/definitions/50xError'
tags:
- Garden
"""
garden = self.get_or_raise(Garden, GARDEN_DELETE, name=garden_name)
await self.client(Operation(operation_type="GARDEN_DELETE", args=[garden.name]))
self.set_status(204)
async def patch(self, garden_name):
"""
---
summary: Partially update a Garden
description: |
The body of the request needs to contain a set of instructions detailing the
updates to apply. Currently the only operations are:
* initializing
* running
* stopped
* block
* update
```JSON
[
{ "operation": "" }
]
```
parameters:
- name: garden_name
in: path
required: true
description: Garden to use
type: string
- name: patch
in: body
required: true
description: Instructions for how to update the Garden
schema:
$ref: '#/definitions/Patch'
responses:
200:
description: Garden with the given garden_name
schema:
$ref: '#/definitions/Garden'
400:
$ref: '#/definitions/400Error'
404:
$ref: '#/definitions/404Error'
50x:
$ref: '#/definitions/50xError'
tags:
- Garden
"""
garden = self.get_or_raise(Garden, GARDEN_UPDATE, name=garden_name)
patch = SchemaParser.parse_patch(self.request.decoded_body, from_string=True)
for op in patch:
operation = op.operation.lower()
if operation in ["initializing", "running", "stopped", "block"]:
response = await self.client(
Operation(
operation_type="GARDEN_UPDATE_STATUS",
args=[garden.name, operation.upper()],
)
)
elif operation == "heartbeat":
response = await self.client(
Operation(
operation_type="GARDEN_UPDATE_STATUS",
args=[garden.name, "RUNNING"],
)
)
elif operation == "config":
response = await self.client(
Operation(
operation_type="GARDEN_UPDATE_CONFIG",
args=[SchemaParser.parse_garden(op.value, from_string=False)],
)
)
elif operation == "sync":
response = await self.client(
Operation(
operation_type="GARDEN_SYNC",
kwargs={"sync_target": garden.name},
)
)
else:
raise ModelValidationError(f"Unsupported operation '{op.operation}'")
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.write(response)
class GardenListAPI(AuthorizationHandler):
async def get(self):
"""
---
summary: Retrieve a list of Gardens
responses:
200:
description: Garden with the given garden_name
schema:
type: array
items:
$ref: '#/definitions/Garden'
404:
$ref: '#/definitions/404Error'
50x:
$ref: '#/definitions/50xError'
tags:
- Garden
"""
permitted_gardens = self.permissioned_queryset(Garden, GARDEN_READ)
response_gardens = []
for garden in permitted_gardens.no_cache():
if user_has_permission_for_object(self.current_user, GARDEN_UPDATE, garden):
response_gardens.append(MongoParser.serialize(garden))
else:
response_gardens.append(GardenReadSchema().dump(garden).data)
response = json.dumps(response_gardens)
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.write(response)
async def post(self):
"""
---
summary: Create a new Garden
parameters:
- name: garden
in: body
description: The Garden definition to create
schema:
$ref: '#/definitions/Garden'
responses:
201:
description: A new Garden has been created
schema:
$ref: '#/definitions/Garden'
400:
$ref: '#/definitions/400Error'
50x:
$ref: '#/definitions/50xError'
tags:
- Garden
"""
garden = SchemaParser.parse_garden(self.request.decoded_body, from_string=True)
self.verify_user_permission_for_object(GARDEN_CREATE, garden)
response = await self.client(
Operation(
operation_type="GARDEN_CREATE",
args=[garden],
)
)
self.set_status(201)
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.write(response)
async def patch(self):
"""
---
summary: Partially update a Garden
description: |
The body of the request needs to contain a set of instructions detailing the
updates to apply. Currently the only operations are:
* sync
* sync_users
```JSON
[
{ "operation": "" }
]
```
parameters:
- name: garden_name
in: path
required: true
description: Garden to use
type: string
- name: patch
in: body
required: true
description: Instructions for how to update the Garden
schema:
$ref: '#/definitions/Patch'
responses:
204:
description: Patch operation has been successfully forwarded
400:
$ref: '#/definitions/400Error'
404:
$ref: '#/definitions/404Error'
50x:
$ref: '#/definitions/50xError'
tags:
- Garden
"""
self.verify_user_permission_for_object(GARDEN_UPDATE, local_garden())
patch = SchemaParser.parse_patch(self.request.decoded_body, from_string=True)
for op in patch:
operation = op.operation.lower()
if operation == "sync":
await self.client(
Operation(
operation_type="GARDEN_SYNC",
)
)
elif operation == "sync_users":
# requires GARDEN_UPDATE for all gardens
for garden in Garden.objects.all():
self.verify_user_permission_for_object(GARDEN_UPDATE, garden)
initiate_user_sync()
else:
raise ModelValidationError(f"Unsupported operation '{op.operation}'")
self.set_status(204)
|
# -*- coding: utf-8 -*-
#================================================================
# Copyright (C) 2020 * Ltd. All rights reserved.
# Time : 2020/3/15 18:01
# Author : Xuguosheng
# contact: <EMAIL>
# File : character_5.py
# Software: PyCharm
# Description :图像分类 fashsion_minist
#================================================================
import tensorflow as tf
from tensorflow import keras
import numpy as np
import time
import sys
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import fashion_mnist
import cv2
import os
# os.environ['CUDA_DEVICE_ORDER'] = '-1'
def get_fashion_mnist_labels(labels):
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
def softmax(logits, axis=-1):
return tf.exp(logits)/tf.reduce_sum(tf.exp(logits), axis, keepdims=True)
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
batch_size = 256
if sys.platform.startswith('win'):
num_workers = 0 # 0表示不用额外的进程来加速读取数据
else:
num_workers = 4
x_train = tf.cast(x_train/255.,dtype=tf.float32)
x_test = tf.cast(x_test/255.,dtype=tf.float32)
train_iter = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size)
test_iter = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)
# num_inputs = 784
# num_outputs = 10
# W = tf.Variable(tf.random.normal(shape=(num_inputs,num_outputs),dtype=tf.float32))
# b = tf.Variable(tf.zeros(num_outputs,dtype=tf.float32))
num_inputs = 784
num_outputs = 10
W = tf.Variable(tf.random.normal(shape=(num_inputs, num_outputs), mean=0, stddev=0.01, dtype=tf.float32))
b = tf.Variable(tf.zeros(num_outputs, dtype=tf.float32))
def net(X):
xx = tf.reshape(X, shape=(-1, W.shape[0]))
logits = tf.matmul(xx, W)
logits +=b
return softmax(logits)
def cross_entropy(y_hat, y):
y = tf.cast(tf.reshape(y, shape=[-1, 1]),dtype=tf.int32)
y = tf.one_hot(y, depth=y_hat.shape[-1])
y = tf.cast(tf.reshape(y, shape=[-1, y_hat.shape[-1]]),dtype=tf.int32)
return -tf.math.log(tf.boolean_mask(y_hat, y)+1e-8)
# y_hat = np.array([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])
# y = np.array([0, 2], dtype='int32')
# z = tf.boolean_mask(y_hat, tf.one_hot(y, depth=3))
def accuracy(y_hat, y):
return np.mean((tf.argmax(y_hat, axis=1) == y))#判定最大值索引与结果比较
def evaluate_accuracy(data_iter,net):
acc_sum,n =0.0 ,0
for x,y in data_iter:
y = tf.cast(y,dtype=tf.int32)
acc_sum +=np.sum(tf.cast(tf.argmax(net(x),axis =1),dtype=tf.int32)==y)
n +=y.shape[0]
return acc_sum/n
num_epochs, lr = 5, 0.1
# 本函数已保存在d2lzh包中方便以后使用
def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params=None, lr=None, trainer=None):
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
for X, y in train_iter:
with tf.GradientTape() as tape:
y_hat = net(X)
l = tf.reduce_sum(loss(y_hat, y))
grads = tape.gradient(l, params)
if trainer is None:
# 如果没有传入优化器,则使用原先编写的小批量随机梯度下降
for i, param in enumerate(params):
param.assign_sub(lr * grads[i] / batch_size)
else:
# tf.keras.optimizers.SGD 直接使用是随机梯度下降 theta(t+1) = theta(t) - learning_rate * gradient
# 这里使用批量梯度下降,需要对梯度除以 batch_size, 对应原书代码的 trainer.step(batch_size)
trainer.apply_gradients(zip([grad / batch_size for grad in grads], params))
# trainer.apply_gradients(zip(grads/batch_size, params))
y = tf.cast(y, dtype=tf.float32)
train_l_sum += l.numpy()
train_acc_sum += tf.reduce_sum(tf.cast(tf.argmax(y_hat, axis=1) == tf.cast(y, dtype=tf.int64), dtype=tf.int64)).numpy()
n += y.shape[0]
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))
start = time.time()
trainer = tf.keras.optimizers.SGD(lr)
train_ch3(net, train_iter, test_iter, cross_entropy, num_epochs, batch_size, [W, b], lr,trainer)
print('use time is: ',time.time()-start)
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(10, activation=tf.nn.softmax)])
print(len(model.weights))
print(model.weights[0])
print(model.weights[1])
random_x = tf.random.normal(shape=(1,28,28),mean=0,stddev=0.1,dtype=tf.float32)
random_y = model.predict(random_x)
yyy = np.array(random_y)
index = np.argmax(yyy)
conf = np.max(yyy)
print(random_y)
print(index,conf)
# 数据读取与测试,28 *28的灰度图
# feature,label=x_train[0],y_train[0]
# print(type(x_test),type(y_test))
# print(feature,label)
# cv2.imshow('first_img',feature)
# cv2.waitKey(0)
# keras.layers.Conv2d()
keras.layers.Dense(10, activation=tf.nn.softmax)])
lr = 0.05
optimizer = keras.optimizers.SGD(lr)
loss = 'sparse_categorical_crossentropy'
model.compile(optimizer,loss,metrics=['accuracy'])
model.fit(x_train,y_train,batch_size=256)
test_loss, test_acc = model.evaluate(x_test, y_test)
print('Test Acc:',test_acc)
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from scipy.stats import truncnorm
import tinyms as ts
from tinyms import layers, Tensor
from tinyms.layers import ReLU, MaxPool2d, Flatten, Dropout
def _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size):
fan_in = in_channel * kernel_size * kernel_size
scale = 1.0
scale /= max(1., fan_in)
stddev = (scale ** 0.5) / .87962566103423978
mu, sigma = 0, stddev
weight = truncnorm(-2, 2, loc=mu, scale=sigma).rvs(out_channel * in_channel * kernel_size * kernel_size)
return ts.reshape(weight, (out_channel, in_channel, kernel_size, kernel_size))
def _weight_variable(shape, factor=0.01):
init_value = np.random.randn(*shape).astype(np.float32) * factor
return Tensor(init_value)
def _conv3x3(in_channel, out_channel, stride=1):
weight_shape = (out_channel, in_channel, 3, 3)
weight = _weight_variable(weight_shape)
return layers.Conv2d(in_channel, out_channel,
kernel_size=3, stride=stride, padding=0, pad_mode='same', weight_init=weight)
def _conv1x1(in_channel, out_channel, stride=1):
weight_shape = (out_channel, in_channel, 1, 1)
weight = _weight_variable(weight_shape)
return layers.Conv2d(in_channel, out_channel,
kernel_size=1, stride=stride, padding=0, pad_mode='same', weight_init=weight)
def _conv7x7(in_channel, out_channel, stride=1):
weight_shape = (out_channel, in_channel, 7, 7)
weight = _weight_variable(weight_shape)
return layers.Conv2d(in_channel, out_channel,
kernel_size=7, stride=stride, padding=0, pad_mode='same', weight_init=weight)
def _conv11x11(in_channel, out_channel, stride=1):
weight_shape = (out_channel, in_channel, 11, 11)
weight = _weight_variable(weight_shape)
return layers.Conv2d(in_channel, out_channel,
kernel_size=11, stride=stride, padding=2, pad_mode='pad', weight_init=weight)
def _conv5x5(in_channel, out_channel, stride=1):
weight_shape = (out_channel, in_channel, 5, 5)
weight = _weight_variable(weight_shape)
return layers.Conv2d(in_channel, out_channel,
kernel_size=5, stride=stride, padding=2, pad_mode='pad', weight_init=weight)
def _bn(channel):
return layers.BatchNorm2d(channel, eps=1e-4, momentum=0.9,
gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)
def _bn_last(channel):
return layers.BatchNorm2d(channel, eps=1e-4, momentum=0.9,
gamma_init=0, beta_init=0, moving_mean_init=0, moving_var_init=1)
def _fc(in_channel, out_channel):
weight_shape = (out_channel, in_channel)
weight = _weight_variable(weight_shape)
return layers.Dense(in_channel, out_channel, has_bias=True, weight_init=weight, bias_init=0)
class AlexNet(layers.Layer):
"""
Get AlexNet neural network.
Args:
class_num (int): Class number. Default: 1000.
Returns:
layers.Layer, layer instance of AlexNet neural network.
Examples:
>>> from tinyms.model import AlexNet
>>>
>>> net = AlexNet(class_num=1000)
"""
def __init__(self, class_num=1000):
super(AlexNet, self).__init__()
self.features = layers.SequentialLayer(
[
_conv11x11(3, 64, 4),
ReLU(),
MaxPool2d(kernel_size=3, stride=2),
_conv5x5(64, 192),
ReLU(),
MaxPool2d(kernel_size=3, stride=2),
_conv3x3(192, 384),
ReLU(),
_conv3x3(384, 256),
ReLU(),
_conv3x3(256, 256),
ReLU(),
MaxPool2d(kernel_size=3, stride=2),
Flatten(),
Dropout(),
_fc(256*6*6, 4096),
ReLU(),
Dropout(),
_fc(4096, 4096),
ReLU(),
_fc(4096, class_num)
]
)
def construct(self, x):
x = self.features(x)
return x
def alexnet(**kwargs):
"""
Get AlexNet neural network.
Args:
class_num (int): Class number. Default: 10.
Returns:
layers.Layer, layer instance of AlexNet neural network.
Examples:
>>> from tinyms.model import alexnet
>>>
>>> net = alexnet(class_num=10)
"""
return AlexNet(class_num=kwargs.get('class_num', 10))
|
# -*- coding: utf-8 -*-
"""
The MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import os
import collections
import gzip
import operator
import argparse
import datetime
import time
import subprocess
import shlex
import tempfile
import warnings
try:
import snaptools.utilities
import snaptools.global_var
import snaptools.gtf
from snaptools.snap import *
from snaptools.utilities import file_type
except Exception:
print("Package snaptools not installed!")
sys.exit(1)
try:
import numpy as np
except Exception:
print("Package numpy not installed!")
sys.exit(1)
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import h5py
import h5py
except Exception:
print("Package numpy not installed!")
sys.exit(1)
try:
import pysam
except Exception:
print("Package pysam not installed!")
sys.exit(1)
try:
import pybedtools
except Exception:
print("Package pybedtools not installed!")
sys.exit(1)
def snap_gmat(snap_file,
gene_file,
buffer_size,
tmp_folder,
verbose):
"""
Create a cell x peak matrix from snap file.
Required:
--------
snap_file:
a snap format file;
gene_file:
a bed format file contains gene locations;
Optional:
--------
tmp_folder:
folder to store temporarily created files;
verbose:
a boolen variable indicates whether to output the progress [True];
"""
if not os.path.exists(snap_file):
print(('error: ' + snap_file + ' does not exist!'))
sys.exit(1)
# check if snap_file is a snap-format file
file_format = snaptools.utilities.checkFileFormat(snap_file)
if file_format != "snap":
print(("error: input file %s is not a snap file!" % snap_file))
if not os.path.exists(gene_file):
print(('error: ' + gene_file + ' does not exist!'))
sys.exit(1)
# check if GM session already exists
f = h5py.File(snap_file, "r", libver='earliest')
if "GM" in f:
print(("error: cell x gene matrix already exists, delete it first using snap-del"))
sys.exit(1)
f.close()
# check if snap_file is a snap-format file
file_format = snaptools.utilities.checkFileFormat(gene_file)
if file_format != "bed":
print(("error: input file %s is not a bed file!" % snap_file))
# check if gene file is a bed file with the last column as gene
gene_list = set([str(item.name) for item in pybedtools.BedTool(gene_file)])
#gene_dict = collections.OrderedDict(list(zip(gene_list, range(1, len(gene_list) + 1))));
gene_dict = collections.OrderedDict(
list(zip(gene_list, list(range(1, len(gene_list) + 1)))))
# extract the barcodes
barcode_dict = getBarcodesFromSnap(snap_file)
# first cut the fragments into small piecies, write them down
fout_frag = tempfile.NamedTemporaryFile(delete=False, dir=tmp_folder)
dump_read(snap_file, fout_frag.name, buffer_size, None, tmp_folder, True)
# in parallel find the overlap cell and peaks
frag_bt = pybedtools.BedTool(fout_frag.name)
peak_bt = pybedtools.BedTool(gene_file)
# count for frequency
cell_peak_arr = collections.defaultdict(list)
for item in frag_bt.intersect(peak_bt, wa=True, wb=True):
key = str(item.fields[7])
if key in gene_dict:
idy = gene_dict[key]
barcode = item.name.split(":")[0]
idx = barcode_dict[barcode].id
cell_peak_arr[idx].append(idy)
IDX_LIST = []
IDY_LIST = []
VAL_LIST = []
for barcode_id in cell_peak_arr:
d = collections.Counter(cell_peak_arr[barcode_id])
IDX_LIST += [barcode_id] * len(d)
for peak_id in d:
IDY_LIST.append(peak_id)
VAL_LIST.append(d[peak_id])
f = h5py.File(snap_file, "a", libver='earliest')
dt = h5py.special_dtype(vlen=bytes)
f.create_dataset("GM/name", data=[np.string_(item) for item in gene_dict.keys(
)], dtype=h5py.special_dtype(vlen=bytes), compression="gzip", compression_opts=9)
f.create_dataset("GM/idx", data=IDX_LIST, dtype="uint32",
compression="gzip", compression_opts=9)
f.create_dataset("GM/idy", data=IDY_LIST, dtype="uint32",
compression="gzip", compression_opts=9)
f.create_dataset("GM/count", data=VAL_LIST, dtype="uint8",
compression="gzip", compression_opts=9)
f.close()
# remove the temporary files
subprocess.check_call(["rm", fout_frag.name])
return 0
|
import plotly.express as px
if __name__ == '__main__':
# logs on 50 epochs, from /docs/logs/ae_training.txt
# plots are located in /docs/plots/gae training loss.png, and gae link prediction error.png
training_losses = [
-0.6715881810951391,
-0.7151892182838857,
-0.7224757316891431,
-0.7264024511300905,
-0.7299145745203072,
-0.7324045745096792,
-0.7351830048149893,
-0.73668583803509,
-0.73873143789187,
-0.7397169764955246,
-0.741215530210862,
-0.7416448934952022,
-0.7432585700432064,
-0.7435045193676925,
-0.7435280323226258,
-0.7447441721634683,
-0.7454526615874288,
-0.7457076122907066,
-0.7461530249312545,
-0.7465042970864532,
-0.7467404599806563,
-0.7470795269629256,
-0.7473542332253844,
-0.7478588119075072,
-0.7483040032695182,
-0.7481911926917967,
-0.7480894749042011,
-0.7493158909021128,
-0.7488626465275513,
-0.7491466442546243,
-0.7490413098983701,
-0.7496405843676224,
-0.7488224900777067,
-0.7496250724911097,
-0.7502672023638761,
-0.749392285592125,
-0.7505346844445414,
-0.7507446526887998,
-0.7497987773287949,
-0.7506863548190242,
-0.7507426261901855,
-0.7505321231449816,
-0.7512352675743166,
-0.7499904774710116,
-0.7511589444098781,
-0.7510878306518542,
-0.7512279756428986,
-0.7509021187105385,
-0.7512520907925532,
-0.7515817673052129]
# link weight prediction validation mean squared error
validation_losses = [
0.022223,
0.020152,
0.018434,
0.016903,
0.015696,
0.014744,
0.013808,
0.013013,
0.012332,
0.011599,
0.011051,
0.010597,
0.010205,
0.009807,
0.009455,
0.009089,
0.008784,
0.008581,
0.008294,
0.008098,
0.007890,
0.007695,
0.007553,
0.007399,
0.007240,
0.007073,
0.006939,
0.006855,
0.006676,
0.006537,
0.006477,
0.006389,
0.006282,
0.006228,
0.006145,
0.006071,
0.006011,
0.005960,
0.005906,
0.005839,
0.005788,
0.005705,
0.005710,
0.005656,
0.005585,
0.005571,
0.005500,
0.005460,
0.005446,
0.005406
]
fig = px.line(training_losses, labels={'index': 'epochs', 'value': 'training loss'})
fig['data'][0]['line']['color'] = "#ff0000"
fig.show()
fig = px.line(validation_losses, labels={'index': 'epochs', 'value': 'link weight prediction error'})
fig.show() |
#!/usr/bin/env python
#===============================================================================
import os, sys, json, time
import shutil, glob, zipfile, tempfile
import datetime as dt
import xml.etree.ElementTree as et
import logging
LOG_FILENAME = 'brac_sync.log'
logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG,)
logger = logging.getLogger('BracSync')
import entryutils
import common
#import captureurl
from webkitrenderer import WebkitRenderer
from threading import Thread
from dialog_settings import DialogSettings
from PyQt4 import QtGui, QtCore, uic
#===============================================================================
class BracSynchronizer(QtGui.QSystemTrayIcon):
#-------------------------------------------------------------------------------
def __init__(self, icon, parent=None):
QtGui.QSystemTrayIcon.__init__(self, icon, parent)
logger.debug('Initializing')
self.trayMenu = QtGui.QMenu(parent)
self.actionStartStop = self.trayMenu.addAction("Start/Stop")
self.actionSynchronize = self.trayMenu.addAction("Synchronize")
self.actionSettings = self.trayMenu.addAction("Settings")
self.actionExit = self.trayMenu.addAction("Exit")
self.actionStartStop .triggered.connect(self.start)
self.actionSynchronize.triggered.connect(self.syncBracs)
self.actionSettings .triggered.connect(self.settings)
self.actionExit .triggered.connect(self.exit)
self.setContextMenu(self.trayMenu)
if getattr(sys, 'frozen', False):
self.homedir = os.path.dirname(sys.executable)
elif __file__:
file_path = os.path.dirname(__file__)
self.homedir = os.path.abspath(os.path.join(file_path, os.path.pardir))
if common.getos() == 'win':
p = os.path.join(os.environ['APPDATA'], 'uofs/bric-a-brac')
elif common.getos() == 'mac':
p = os.path.join('/Users', os.environ['USER'])
p = os.path.join(p, 'Library/Application Support')
p = os.path.join(p, 'uofs/bric-a-brac')
if not os.path.exists(p): os.makedirs(p)
self.entries_path = os.path.join(p, 'bracList.json')
self.loadEntries()
self.renderer = WebkitRenderer()
mtimer = QtCore.QTimer(self)
mtimer.timeout.connect(self.checkIfEntriesModified)
mtimer.start(10000)
stimer = QtCore.QTimer(self)
stimer.timeout.connect(self.sync)
stimer.start(20000)
#-------------------------------------------------------------------------------
def loadEntries(self):
self.entries = []
try:
with open(self.entries_path, 'r') as f:
self.entries = json.loads(f.read())
f.close()
logger.debug('loaded entries from %s' % self.entries_path)
except ValueError as e:
logger.error('couldn\'t load entries file at %s' % self.entries_path)
pass
except IOError as e:
logger.debug('couldn\'t load entries, creating initial entiries file at %s' % self.entries_path)
f = open(self.entries_path, 'w')
f.close()
#-------------------------------------------------------------------------------
def saveEntries(self):
out_str = json.dumps(self.entries, sort_keys = True, indent = 2)
f = open(self.entries_path, 'w')
f.write(out_str)
f.close()
logger.debug('saved entries to %s' % self.entries_path)
#-------------------------------------------------------------------------------
def checkIfEntriesModified(self):
logger.debug('checking if any entries modified')
save = False
toremove = []
for e in self.entries:
if not os.path.exists(e['path']) \
or (e['type'] == 'dir' and not os.path.isdir(e['path'])) \
or (e['type'] == 'file' and not os.path.isfile(e['path'])):
toremove.append(e['path'])
continue
do_update = False
if os.path.getmtime(e['path']) != e['mtime']:
do_update = True
elif e['type'] == 'dir':
for b in e['bracList']:
if not os.path.isfile(b['path']) or os.path.getmtime(b['path']) != b['mtime']:
do_update = True
break
if not do_update and e.has_key('subdirs'):
for sd in e['subdirs']:
if not os.path.isdir(sd):
do_update = True
break
elif os.path.getmtime(sd) != e['subdirs'][sd]:
do_update = True
break
if do_update:
entryutils.updateEntryTimeTable(e)
save = True
break
#I guess it's better not to remove them
#I'll leave it up to the user to clean up the entries.
#if len(toremove) > 0:
# self.entries = [x for x in self.entries if x['path'] not in toremove]
# save = True
if save:
self.saveEntries()
#-------------------------------------------------------------------------------
def sync(self):
logger.debug('syncing entries')
self.syncBracs()
self.setStatus('off')
#-------------------------------------------------------------------------------
def setStatus(self, status, notice = None):
#self.icon.setStatus(status)
if status == "off":
self.setIcon(QtGui.QIcon("resources/brac-16x16.png"))
elif status == "on":
self.setIcon(QtGui.QIcon("resources/brac-syncing-16x16.png"))
#if status == "on" and not self.popup.opened():
# self.popup.show(notice)
#-------------------------------------------------------------------------------
def start(self):
print "again"
#-------------------------------------------------------------------------------
def settings(self):
settings_dlg = DialogSettings(self);
settings_dlg.exec_();
#-------------------------------------------------------------------------------
def exit(self):
QtCore.QCoreApplication.instance().quit()
#-------------------------------------------------------------------------------
def setEntries(self, entries):
self.entries = entries
self.saveEntries()
self.syncBracs()
#-------------------------------------------------------------------------------
def needUpdate(self, timeinterval, lastupdate):
lastupdate = time.strptime(lastupdate, '%Y-%m-%d %H:%M:%S')
dt_lastupdate = dt.datetime.fromtimestamp(time.mktime(lastupdate))
interval = dict(zip(['week', 'day', 'hour', 'minute', 'second'], [int(x) for x in timeinterval.replace('-', ' ').replace(':', ' ').split()]))
dt_deltatime = dt.timedelta(weeks = interval['week'], days = interval['day'], hours = interval['hour'], minutes = interval['minute'], seconds = interval['second'])
dt_nexttime = dt_lastupdate + dt_deltatime
dt_curtime = dt.datetime.fromtimestamp(time.time())
if dt_nexttime > dt_curtime or dt_nexttime == dt_lastupdate:
return False
return True
#-------------------------------------------------------------------------------
def syncBracs(self):
for e in self.entries:
if not e.get('bracList', False):
continue
toremove = []
dirModified = False
for b in e['bracList']:
if not os.path.isfile(b['path']):
toremove.append(b['path'])
continue
if os.path.getmtime(b['path']) != b['mtime']:
b['timetable'] = entryutils.getBracTimeTable(b['path'])
b['mtime'] = os.path.getmtime(b['path'])
needupdate = False
for bric in b['timetable']:
if self.needUpdate(bric['timeinterval'], bric['lastupdate']):
needupdate = True
break
if needupdate:
dirModified = True
self.setStatus('on', 'Synchronizing!\n%s' % b['path'])
self.syncBrac(b['path'])
b['timetable'] = entryutils.getBracTimeTable(b['path'])
b['mtime'] = os.path.getmtime(b['path'])
if dirModified:
e['mtime'] = os.path.getmtime(e['path'])
if len(toremove) > 0:
e['bracList'] = [x for x in e['bracList'] if x['path'] not in toremove]
self.saveEntries()
#-------------------------------------------------------------------------------
def syncBrac(self, path):
logger.debug('syncing brac: %s' % path)
if not os.path.isfile(path): return False
tempdir = tempfile.mkdtemp()
if not os.path.isdir(tempdir):
logger.error('tempdir %s does not exists' % tempdir)
return
zf_brac = zipfile.ZipFile(path, 'a')
zf_brac.extract('brac.xml', tempdir)
bracxml = et.parse(os.path.join(tempdir, 'brac.xml'))
bracdef = bracxml.getroot()
layers = bracdef.find('layers');
if layers == None:
logger.error('brac %s has no layers node!' % path)
return
resolution = dict(zip(['width', 'height'], bracdef.attrib['resolution'].split()))
vars = {
'tools': os.path.join(self.homedir, 'tools'),
'bracpath-brac': path,
'bracname-brac': os.path.split(path)[1],
'bracname-zip': "%s.%s.zip" % (os.path.split(path)[1], time.time()),
'bracpath-zip': "%s.%s.zip" % (path, time.time()),
'tempdir': tempdir,
}
for layer in layers:
if layer.tag != 'bric':
continue
bric = layer
#extracting bric files to its temp directory
revision = str(int(bric.attrib['revision']) + 1)
vars['bricid'] = bric.attrib['id']
vars['bricdir'] = os.path.join(vars['tempdir'], r'bric.%s' % vars['bricid'])
vars['bricpath'] = os.path.join(vars['bricdir'], r'%s.png' % revision)
vars['bricdefpath'] = os.path.join(vars['bricdir'], 'bric.xml')
bricid = bric.attrib['id']
newbricdir = os.path.join(tempdir, r'bric.%s' % bricid)
zf_brac.extract(r'bric.%s/bric.xml' % bricid, tempdir)
bricxml = et.parse(os.path.join(newbricdir, 'bric.xml'))
bricdef = bricxml.getroot()
if not self.needUpdate(bric.attrib['timeinterval'], bricdef[len(bricdef) - 1].attrib['date']):
continue
#extracting bric attributes
bricregion = dict(zip(['x', 'y', 'w', 'h'], bricdef.attrib['region'].split()))
bricres = dict(zip(['w', 'h'], bric.attrib['resolution'].split()))
params = {
'width' : int(resolution['width']),
'height': int(resolution['height']),
}
#captureurl.capture(vars['bricurl'], vars['bricpath'], params)
#taking screenshot
try:
image = self.renderer.render(
url = bricdef.attrib['url'],
filename = vars['bricpath'],
width = int(bricres['w']),
height = int(bricres['h']),
x = int(bricregion['x']),
y = int(bricregion['y']),
w = int(bricregion['w']),
h = int(bricregion['h'])
)
#cropping
#image = image.copy(int(vars['bricx']), int(vars['bricy']), int(vars['bricw']), int(vars['brich']))
#image.save(vars['bricpath'], 'png')
if os.path.exists(vars['bricpath']):
logger.debug('generated %s' % vars['bricpath'])
else:
logger.error('failed to generate %s' % vars['bricpath'])
continue
except RuntimeError, e:
logger.error(e.message)
continue
#updating brac and bric xml files
snapshot_time = time.strftime('%Y-%m-%d %H:%M:%S')
snapshot = et.Element('snapshot', {'revision': revision, 'date': snapshot_time})
bricdef.append(snapshot)
common.indent(bricdef)
bricxml.write(os.path.join(newbricdir, 'bric.xml'))
bric.attrib['revision'] = revision
bric.attrib['lastupdate'] = snapshot_time
common.indent(bracdef)
bracxml.write(os.path.join(tempdir, 'brac.xml'))
zf_brac.close()
if common.getos() == 'win':
os.system('ren "%(bracpath-brac)s" "%(bracname-zip)s"' % vars)
os.system('cd /d %(tools)s & 7za.exe a "%(bracpath-zip)s" "%(tempdir)s/*"' % vars)
os.system('ren "%(bracpath-zip)s" "%(bracname-brac)s"' % vars)
if common.getos() == 'mac':
os.system('mv "%(bracpath-brac)s" "%(bracpath-zip)s"' % vars)
os.system('cd %(tools)s ; ./7za a "%(bracpath-zip)s" "%(tempdir)s/*"' % vars)
os.system('mv "%(bracpath-zip)s" "%(bracpath-brac)s"' % vars)
shutil.rmtree(tempdir)
print 'sync done'
return True
#===============================================================================
def main():
app = QtGui.QApplication(sys.argv)
w = QtGui.QWidget()
trayIcon = BracSynchronizer(QtGui.QIcon("resources/brac-16x16.png"), w)
trayIcon.show()
sys.exit(app.exec_())
#-------------------------------------------------------------------------------
if __name__ == '__main__':
main()
#===============================================================================
|
# Copyright 2018 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
import time
import pytest
from oceandb_driver_interface.oceandb import OceanDb
from oceandb_driver_interface.search_model import FullTextModel, QueryModel
from oceandb_elasticsearch_driver.utils import query_parser
from .ddo_example import ddo_sample
es = OceanDb('./tests/oceandb.ini').plugin
def delete_all():
result = es.driver.es.search(index=es.driver.db_index, body={'size': 1000})
records = result['hits']['hits']
for doc in records:
_id = doc['_id']
try:
es.delete(_id)
except Exception as e:
print(e)
def test_plugin_type_is_es():
assert es.type == 'Elasticsearch'
def test_write_without_id():
object_id = es.write({"value": "test"})
es.delete(object_id)
def test_write_error():
with pytest.raises(
ValueError,
message="Resource \"1\" already exists, use update instead"
):
es.write({"value": "test"}, 1)
es.write({"value": "test"}, 1)
es.delete(1)
def test_delete_error():
with pytest.raises(
ValueError,
message="Resource \"abc\" does not exists"
):
es.delete("abc")
def test_plugin_write_and_read():
es.write({"value": "test"}, 1)
assert es.read(1)['value'] == 'test'
es.delete(1)
def test_update():
es.write({"value": "test"}, 1)
assert es.read(1)['value'] == 'test'
es.update({"value": "testUpdated"}, 1)
assert es.read(1)['value'] == 'testUpdated'
es.delete(1)
def test_plugin_list():
delete_all()
count = 27
for i in range(count):
try:
es.write({f"value{i}": f"test{i}"}, i)
except ValueError as e:
print(f'resource already exist: {i} <error>: {e}')
assert len(list(es.list())) == count
assert list(es.list())[0]['value0'] == 'test0'
es.delete(0)
time.sleep(2)
assert len(list(es.list())) == count-1
result = list(es.list(search_from=2, search_to=4))
assert len(result) == 3
result = list(es.list(search_from=3, search_to=5))
assert result[1]['value13'] == 'test13'
result = list(es.list(search_from=1, limit=2))
assert result[0]['value10'] == 'test10'
for i in range(1, count):
es.delete(i)
def test_search_query():
delete_all()
es.write(ddo_sample, ddo_sample['id'])
search_model = QueryModel({'cost': ["0", "12"]})
assert es.query(search_model)[0][0]['id'] == ddo_sample['id']
search_model_2 = QueryModel({'license': ['CC-BY']})
assert es.query(search_model_2)[0][0]['id'] == ddo_sample['id']
search_model_3 = QueryModel({'cost': ["0", "12"], 'license': ['CC-BY']})
assert es.query(search_model_3)[0][0]['id'] == ddo_sample['id']
search_model_4 = QueryModel(
{'cost': ["0", "12"], 'license': ['CC-BY'], 'metadata_type': ['dataset']})
assert es.query(search_model_4)[0][0]['id'] == ddo_sample['id']
search_model_5 = QueryModel({'sample': []})
assert es.query(search_model_5)[0][0]['id'] == ddo_sample['id']
search_model_6 = QueryModel({'created': ['2016-02-07T16:02:20Z', '2016-02-09T16:02:20Z']})
assert len(es.query(search_model_6)[0]) == 1
search_model_7 = QueryModel({'dateCreated': ['2016-02-07T16:02:20Z', '2016-02-09T16:02:20Z']})
assert es.query(search_model_7)[0][0]['id'] == ddo_sample['id']
search_model_8 = QueryModel({'datePublished': ['2016-02-07T16:02:20Z', '2016-02-09T16:02:20Z']})
assert len(es.query(search_model_8)[0]) == 1
search_model_9 = QueryModel(
{'datePublished': ['2016-02-07T16:02:20Z', '2016-02-09T16:02:20Z'], 'text': ['Weather']})
assert len(es.query(search_model_9)[0]) == 1
search_model_10 = QueryModel({'text': ['Weather']})
assert len(es.query(search_model_10)[0]) == 1
assert len(es.query(QueryModel({'text': ['UK']}))[0]) == 1
assert len(es.query(QueryModel({'text': ['uk']}))[0]) == 1
assert len(es.query(QueryModel({'text': ['uK']}))[0]) == 1
assert len(es.query(QueryModel({'text': ['2015']}))[0]) == 0
assert len(es.query(QueryModel({'text': ['2011']}))[0]) == 1
assert len(es.query(QueryModel({'text': ['2011', 'uuuukkk', 'temperature']}))[0]) == 1
assert len(es.query(QueryModel({'service.attributes.additionalInformation.customField': ['customValue']}))[0]) == 1
assert len(es.query(QueryModel({'service.attributes.additionalInformation.nonExistentField': ['customValue']}))[0]) == 0
search_model = QueryModel({'cost': ["0", "12"], 'text': ['Weather']})
assert es.query(search_model)[0][0]['id'] == ddo_sample['id']
search_model_dataToken = QueryModel({'dataToken': ['0x2eD6d94Ec5Af12C43B924572F9aFFe470DC83282']})
assert len(es.query(search_model_dataToken)[0]) == 1
es.delete(ddo_sample['id'])
def test_full_text_query():
es.write({"value": "test1"}, 1)
es.write({"value": "test2"}, 2)
es.write({"value": "test3"}, 3)
es.write({"value": "foo4"}, 4)
es.write({"value": "foo5"}, 5)
es.write({"value": "test6"}, 6)
search_model = FullTextModel('foo?', {'value': 1}, offset=6, page=1)
assert len(es.text_query(search_model)) == 2
es.delete(1)
es.delete(2)
es.delete(3)
es.delete(4)
es.delete(5)
es.delete(6)
def test_full_text_query_tree():
def delete_ids(ids, mult=1):
for i in ids:
try:
es.delete(i*mult)
except Exception:
pass
delete_ids(range(1, 7))
delete_ids(range(1, 8), 10)
es.write({"father": {"son": "test1"}}, 1)
es.write({"father": {"son": "test2"}}, 2)
es.write({"father": {"son": "test3"}}, 3)
es.write({"father": {"son": "foo4"}}, 4)
es.write({"father": {"son": "foo5"}}, 5)
es.write({"father": {"son": "test6"}}, 6)
search_model = FullTextModel('foo?', {'father.son': -1}, offset=6, page=1)
results = es.text_query(search_model)[0]
assert len(results) == 2
assert results[0]['father']['son'] == 'foo5'
# test sorting by numbers
es.write({"price": {"value": 1, "ocean": 2.3, "pool": "0x1"}}, 10)
es.write({"price": {"value": 2, "ocean": 2.3, "pool": "0x2"}}, 20)
es.write({"price": {"value": 3, "ocean": 2.3, "pool": "0x3"}}, 30)
es.write({"price": {"value": 11, "ocean": 2.3, "pool": "0x11"}}, 40)
es.write({"price": {"value": 12, "ocean": 2.3, "pool": "0x12"}}, 50)
es.write({"price": {"value": 13, "ocean": 2.3, "pool": "0x13"}}, 60)
es.write({"price": {"value": 4, "ocean": 2.3, "pool": "0x4"}}, 70)
search_model = FullTextModel('0x*', {'price.value': 1}) # ascending
results = es.text_query(search_model)[0]
assert len(results) == 7, ''
values = [r["price"]["value"] for r in results]
assert values == [1, 2, 3, 4, 11, 12, 13]
search_model = FullTextModel('0x*', {'price.pool': 1})
results = es.text_query(search_model)[0]
assert len(results) == 7, ''
pools = [r["price"]["pool"] for r in results]
assert pools == ["0x1", "0x11", "0x12", "0x13", "0x2", "0x3", "0x4"]
delete_ids(range(1, 7))
delete_ids(range(1, 8), 10)
def test_query_parser():
query = {'cost': ["0", "100"]}
assert query_parser(query) == ({"bool": {"must": [{"bool": {"should": [{"range": {"service.attributes.main.cost": {"gte": "0", "lte": "100"}}}]}}]}})
query = {'cost': ["15"]}
assert query_parser(query) == ({"bool": {"must": [{"bool": {"should": [{"match": {"service.attributes.main.cost": "15"}}]}}]}})
query = {'license': ['CC-BY']}
assert query_parser(query) == ({"bool": {"must": [{"bool": {"should": [{"match": {"service.attributes.main.license": "CC-BY"}}]}}]}})
query = {'metadata_type': ['dataset', 'algorithm']}
assert query_parser(query) == ({
"bool": {"must": [{
"bool": {
"should": [
{"match": {"service.attributes.main.type": "dataset"}},
{"match": {"service.attributes.main.type": "algorithm"}}
]}
}]}
})
query = {'type': ['Access', 'Metadata']}
assert query_parser(query) == ({"bool": {"must": [{"bool": {"should": [{"match": {"service.type": "Access"}}, {"match": {"service.type": "Metadata"}}]}}]}})
query = {'cost': ["0", "10"], 'type': ['Access', 'Metadata']}
assert query_parser(query) == ({
"bool": {
"must": [
{"bool": {"should": [{"range": {"service.attributes.main.cost": {"gte": "0", "lte": "10"}}}]}},
{"bool": {"should": [{"match": {"service.type": "Access"}}, {"match": {"service.type": "Metadata"}}]}}
]
}
})
query = {'license': []}
assert query_parser(query) == ({"bool": {"must": [{"bool": {"should": []}}]}})
query = {'license': [], 'type': ['Access', 'Metadata']}
assert query_parser(query) == ({
"bool": {
"must": [
{"bool": {"should": []}},
{"bool": {"should": [
{"match": {"service.type": "Access"}},
{"match": {"service.type": "Metadata"}}]}}
]}
})
query = {'license': ['CC-BY'], 'type': ['Access', 'Metadata']}
assert query_parser(query) == ({
"bool": {
"must": [
{"bool": {"should": [{"match": {"service.attributes.main.license": "CC-BY"}}]}},
{"bool": {"should": [
{"match": {"service.type": "Access"}},
{"match": {"service.type": "Metadata"}}]}}
]}
})
query = {'license': ['CC-BY'], 'created': ['2016-02-07T16:02:20Z', '2016-02-09T16:02:20Z']}
assert query_parser(query)["bool"]["must"][1]["bool"]["should"][0]["range"]["created"]["gte"].year == 2016
query = {'datePublished': ['2017-02-07T16:02:20Z', '2017-02-09T16:02:20Z']}
assert query_parser(query)["bool"]["must"][0]["bool"]["should"][0]["range"]["service.attributes.main.datePublished"]["gte"].year == 2017
query = {'categories': ['weather', 'other']}
assert query_parser(query) == ({"bool": {"must": [{"bool": {"should": [{"match": {"service.attributes.additionalInformation.categories": "weather"}}, {"match": {"service.attributes.additionalInformation.categories": "other"}}]}}]}})
query = {'service.attributes.additionalInformation.customField': ['customValue']}
assert query_parser(query) == ({"bool": {"must": [{"bool": {"should": [{"match": {"service.attributes.additionalInformation.customField": "customValue"}}]}}]}})
query = {'service.attributes.additionalInformation.customNumber': [2, 5]}
assert query_parser(query) == ({"bool": {"must": [{"bool": {"should": [{"range": {"service.attributes.additionalInformation.customNumber": {"gte": 2, "lte": 5}}}]}}]}})
def test_default_sort():
es.write(ddo_sample, ddo_sample['id'])
ddo_sample2 = ddo_sample.copy()
ddo_sample2['id'] = 'did:op:cb36cf78d87f4ce4a784f17c2a4a694f19f3fbf05b814ac6b0b7197163888864'
ddo_sample2['service'][2]['attributes']['curation']['rating'] = 0.99
es.write(ddo_sample2, ddo_sample2['id'])
search_model = QueryModel({'cost': [0, 12]})
assert es.query(search_model)[0][0]['id'] == ddo_sample2['id']
es.delete(ddo_sample['id'])
es.delete(ddo_sample2['id'])
|
import sys
import os
import os.path as osp
import argparse
import numpy as np
import cv2
import torch
import torchvision.transforms as transforms
from torch.nn.parallel.data_parallel import DataParallel
import torch.backends.cudnn as cudnn
from tqdm import tqdm
sys.path.insert(0, osp.join('..', 'main'))
sys.path.insert(0, osp.join('..', 'data'))
sys.path.insert(0, osp.join('..', 'common'))
from config import cfg
from model import get_pose_net
from dataset import generate_patch_image
from utils_pose.pose_utils import process_bbox, pixel2cam
from utils_pose.vis import vis_keypoints, vis_3d_multiple_skeleton
import cv2
import torch
from glob import glob
from pathlib import Path
import copy
from torch.utils.data import Dataset
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, dest='gpu_ids')
parser.add_argument('--test_epoch', type=str, dest='test_epoch')
parser.add_argument('--image_dir', type=str, default='')
args = parser.parse_args()
# test gpus
if not args.gpu_ids:
assert 0, print("Please set proper gpu ids")
if '-' in args.gpu_ids:
gpus = args.gpu_ids.split('-')
gpus[0] = 0 if not gpus[0].isdigit() else int(gpus[0])
gpus[1] = len(mem_info()) if not gpus[1].isdigit() else int(gpus[1]) + 1
args.gpu_ids = ','.join(map(lambda x: str(x), list(range(*gpus))))
assert args.test_epoch, 'Test epoch is required.'
return args
def xyxy2xywh(labels):
#minx, miny, maxx, maxy
width = label[:,2] - label[:,0]
height = label[:,3] - label[:,1]
labels[:,2] = width
labels[:,3] = height
return labels
def select_biggest_box(boxes):
max_idx = 0
max_area = 0
if boxes.shape[0] == 1:
return boxes
for i in range(boxes.shape[0]):
width = boxes[i,2] - boxes[i,0]
height = boxes[i,3] - boxes[i,1]
area = width * height
if max_area < area:
max_area = area
max_idx = i
return boxes[max_idx:max_idx+1,:]
# argument parsing
args = parse_args()
cfg.set_args(args.gpu_ids)
cudnn.benchmark = True
# MuCo joint set
joint_num = 21
joints_name = ('Head_top', 'Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Spine', 'Head', 'R_Hand', 'L_Hand', 'R_Toe', 'L_Toe')
flip_pairs = ( (2, 5), (3, 6), (4, 7), (8, 11), (9, 12), (10, 13), (17, 18), (19, 20) )
skeleton = ( (0, 16), (16, 1), (1, 15), (15, 14), (14, 8), (14, 11), (8, 9), (9, 10), (10, 19), (11, 12), (12, 13), (13, 20), (1, 2), (2, 3), (3, 4), (4, 17), (1, 5), (5, 6), (6, 7), (7, 18) )
# snapshot load
model_path = './snapshot_%d.pth.tar' % int(args.test_epoch)
assert osp.exists(model_path), 'Cannot find model at ' + model_path
print('Load checkpoint from {}'.format(model_path))
model = get_pose_net(cfg, False, joint_num)
model = DataParallel(model).cuda()
ckpt = torch.load(model_path)
model.load_state_dict(ckpt['network'])
model.eval()
# prepare input image
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=cfg.pixel_mean, std=cfg.pixel_std)])
# img_path = 'input.png'
# original_img = cv2.imread(img_path)
# original_img_height, original_img_width = original_img.shape[:2]
#Load yolov5
detector = torch.hub.load('ultralytics/yolov5', 'yolov5l')
detector.cuda()
# Only detect person
detector.classes = [0]
# Confidence threshold
model.conf = 0.5
#Bounding box inference
images = glob(args.image_dir + '/*.png')
images += glob(args.image_dir + '/*.jpg')
# image processing
imgs = []
labels = []
for i in tqdm(images):
original_img = cv2.imread(i)
img = copy.deepcopy(original_img)
img = img[..., ::-1]
pred = detector(img)
#print(pred.xyxy[0].shape)
pred = pred.xyxy[0]
if pred.shape[0] != 0:
# Select biggest bounding box
pred = select_biggest_box(pred)
imgs.append(original_img)
labels.append(pred.cpu())
print(f'The number of image: {len(imgs)}')
print(len(labels))
os.makedirs(args.image_dir + '/results/', exist_ok = True)
os.makedirs(args.image_dir + '/3d_results/', exist_ok = True)
cnt = 1
for img, label in tqdm(zip(imgs, labels)):
original_img = img
original_img_height, original_img_width = img.shape[:2]
# prepare bbox # xmin, ymin, width, height
label = xyxy2xywh(label)
bbox_list = label[:,:4]
#root_depth_list = [11250.5732421875, 15522.8701171875, 11831.3828125, 8852.556640625, 12572.5966796875] # obtain this from RootNet (https://github.com/mks0601/3DMPPE_ROOTNET_RELEASE/tree/master/demo)
root_depth_list = [10000,10000,10000,10000,10000,10000,10000,10000,10000,10000] # for 3d visualization
#assert len(bbox_list) == len(root_depth_list)
person_num = len(bbox_list)
# normalized camera intrinsics
focal = [1500, 1500] # x-axis, y-axis
princpt = [original_img_width/2, original_img_height/2] # x-axis, y-axis
# print('focal length: (' + str(focal[0]) + ', ' + str(focal[1]) + ')')
# print('principal points: (' + str(princpt[0]) + ', ' + str(princpt[1]) + ')')
# for each cropped and resized human image, forward it to PoseNet
output_pose_2d_list = []
output_pose_3d_list = []
for n in range(person_num):
bbox = process_bbox(bbox_list[n].cpu().numpy(), original_img_width, original_img_height)
img, img2bb_trans = generate_patch_image(original_img, bbox, False, 1.0, 0.0, False)
img = transform(img).cuda()[None,:,:,:]
# forward
with torch.no_grad():
pose_3d = model(img) # x,y: pixel, z: root-relative depth (mm)
# inverse affine transform (restore the crop and resize)
pose_3d = pose_3d[0].cpu().numpy()
pose_3d[:,0] = pose_3d[:,0] / cfg.output_shape[1] * cfg.input_shape[1]
pose_3d[:,1] = pose_3d[:,1] / cfg.output_shape[0] * cfg.input_shape[0]
pose_3d_xy1 = np.concatenate((pose_3d[:,:2], np.ones_like(pose_3d[:,:1])),1)
img2bb_trans_001 = np.concatenate((img2bb_trans, np.array([0,0,1]).reshape(1,3)))
pose_3d[:,:2] = np.dot(np.linalg.inv(img2bb_trans_001), pose_3d_xy1.transpose(1,0)).transpose(1,0)[:,:2]
output_pose_2d_list.append(pose_3d[:,:2].copy())
#root-relative discretized depth -> absolute continuous depth
pose_3d[:,2] = (pose_3d[:,2] / cfg.depth_dim * 2 - 1) * (cfg.bbox_3d_shape[0]/2) + root_depth_list[n]
pose_3d = pixel2cam(pose_3d, focal, princpt)
output_pose_3d_list.append(pose_3d.copy())
# visualize 2d poses
vis_img = original_img.copy()
for n in range(person_num):
vis_kps = np.zeros((3,joint_num))
vis_kps[0,:] = output_pose_2d_list[n][:,0]
vis_kps[1,:] = output_pose_2d_list[n][:,1]
vis_kps[2,:] = 1
vis_img = vis_keypoints(vis_img, vis_kps, skeleton, joints_name)
cv2.imwrite(args.image_dir + f'/results/output_{cnt}_2d.jpg', vis_img)
cnt+=1
# visualize 3d poses
vis_kps = np.array(output_pose_3d_list)
vis_3d_multiple_skeleton(vis_kps, np.ones_like(vis_kps), skeleton, f'output_{cnt}_3d (x,y,z: camera-centered. mm.).jpg', args.image_dir+f'/3d_results/')
|
<reponame>ravi-mosaicml/ravi-composer<filename>tests/loggers/test_file_logger.py
# Copyright 2021 MosaicML. All Rights Reserved.
import os
import pathlib
import sys
import pytest
from torch.utils.data import DataLoader
from composer import Callback, Event, State, Trainer
from composer.loggers import FileLogger, FileLoggerHparams, Logger, LoggerDestination, LogLevel
from composer.utils.collect_env import disable_env_report
from tests.common.datasets import RandomClassificationDataset
from tests.common.models import SimpleModel
class FileArtifactLoggerTracker(LoggerDestination):
def __init__(self) -> None:
self.logged_artifacts = []
def log_file_artifact(self, state: State, log_level: LogLevel, artifact_name: str, file_path: pathlib.Path, *,
overwrite: bool):
del state, overwrite # unused
self.logged_artifacts.append((log_level, artifact_name, file_path))
@pytest.mark.parametrize("log_level", [LogLevel.EPOCH, LogLevel.BATCH])
@pytest.mark.timeout(10)
def test_file_logger(dummy_state: State, log_level: LogLevel, tmpdir: pathlib.Path):
log_file_name = os.path.join(tmpdir, "output.log")
log_destination = FileLoggerHparams(
log_interval=3,
log_level=log_level,
filename=log_file_name,
artifact_name="{run_name}/rank{rank}.log",
buffer_size=1,
flush_interval=1,
).initialize_object()
file_tracker_destination = FileArtifactLoggerTracker()
logger = Logger(dummy_state, destinations=[log_destination, file_tracker_destination])
log_destination.run_event(Event.INIT, dummy_state, logger)
log_destination.run_event(Event.EPOCH_START, dummy_state, logger)
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
dummy_state.timer.on_batch_complete()
log_destination.run_event(Event.BATCH_END, dummy_state, logger)
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
dummy_state.timer.on_batch_complete()
log_destination.run_event(Event.BATCH_END, dummy_state, logger)
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
log_destination.run_event(Event.BATCH_END, dummy_state, logger)
dummy_state.timer.on_epoch_complete()
log_destination.run_event(Event.EPOCH_END, dummy_state, logger)
log_destination.run_event(Event.EPOCH_START, dummy_state, logger)
logger.data_fit({"metric": "fit"}) # should print
logger.data_epoch({"metric": "epoch"}) # should print on batch level, since epoch calls are always printed
logger.data_batch({"metric": "batch"}) # should print on batch level, since we print every 3 steps
dummy_state.timer.on_epoch_complete()
log_destination.run_event(Event.EPOCH_END, dummy_state, logger)
log_destination.run_event(Event.EPOCH_START, dummy_state, logger)
logger.data_epoch({"metric": "epoch1"}) # should print, since we log every 3 epochs
dummy_state.timer.on_epoch_complete()
log_destination.run_event(Event.EPOCH_END, dummy_state, logger)
log_destination.run_event(Event.EPOCH_START, dummy_state, logger)
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
dummy_state.timer.on_batch_complete()
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
logger.data_epoch({"metric": "epoch2"}) # should print on batch level, since epoch calls are always printed
logger.data_batch({"metric": "batch1"}) # should NOT print
dummy_state.timer.on_batch_complete()
log_destination.run_event(Event.BATCH_END, dummy_state, logger)
dummy_state.timer.on_epoch_complete()
log_destination.run_event(Event.EPOCH_END, dummy_state, logger)
log_destination.close(dummy_state, logger)
with open(log_file_name, 'r') as f:
if log_level == LogLevel.EPOCH:
assert f.readlines() == [
'[FIT][batch=2]: { "metric": "fit", }\n',
'[EPOCH][batch=2]: { "metric": "epoch1", }\n',
]
else:
assert log_level == LogLevel.BATCH
assert f.readlines() == [
'[FIT][batch=2]: { "metric": "fit", }\n',
'[EPOCH][batch=2]: { "metric": "epoch", }\n',
'[BATCH][batch=2]: { "metric": "batch", }\n',
'[EPOCH][batch=2]: { "metric": "epoch1", }\n',
'[EPOCH][batch=3]: { "metric": "epoch2", }\n',
]
# Flush interval is 1, so there should be one log_file call per LogLevel
# Flushes also happen per each eval_start, epoch_start, and close()
# If the loglevel is batch, flushing also happens every epoch end
if log_level == LogLevel.EPOCH:
#
assert len(file_tracker_destination.logged_artifacts) == int(dummy_state.timer.epoch) + int(
dummy_state.timer.epoch) + 1
else:
assert log_level == LogLevel.BATCH
assert len(file_tracker_destination.logged_artifacts) == int(dummy_state.timer.batch) + int(
dummy_state.timer.epoch) + int(dummy_state.timer.epoch) + 1
@pytest.mark.timeout(15) # disk can be slow on Jenkins
def test_file_logger_capture_stdout_stderr(dummy_state: State, tmpdir: pathlib.Path):
log_file_name = os.path.join(tmpdir, "output.log")
log_destination = FileLoggerHparams(filename=log_file_name,
buffer_size=1,
flush_interval=1,
capture_stderr=True,
capture_stdout=True).initialize_object()
# capturing should start immediately
print("Hello, stdout!\nExtra Line")
print("Hello, stderr!\nExtra Line2", file=sys.stderr)
logger = Logger(dummy_state, destinations=[log_destination])
log_destination.run_event(Event.INIT, dummy_state, logger)
log_destination.run_event(Event.EPOCH_START, dummy_state, logger)
log_destination.run_event(Event.BATCH_START, dummy_state, logger)
log_destination.run_event(Event.BATCH_END, dummy_state, logger)
log_destination.close(dummy_state, logger)
with open(log_file_name, 'r') as f:
assert f.readlines() == [
'[stdout]: Hello, stdout!\n',
'[stdout]: Extra Line\n',
'[stderr]: Hello, stderr!\n',
'[stderr]: Extra Line2\n',
]
class ExceptionRaisingCallback(Callback):
def fit_start(self, state: State, logger: Logger) -> None:
del state, logger # unused
raise RuntimeError("My Exception!")
def test_exceptions_are_printed(tmpdir: pathlib.Path):
# Test that exceptions are printed to stderr, which is captured by the file logger
# The file logger stops capturing stdout/stderr when it is closed
# Here, we construct a trainer that raises an exception on Event.FIT_START
# and assert that the exception is written to the logfile
exception_raising_callback = ExceptionRaisingCallback()
logfile_name = str(tmpdir / "logfile.txt")
file_logger = FileLogger(filename=logfile_name, capture_stderr=True)
dataloader = DataLoader(RandomClassificationDataset())
model = SimpleModel()
trainer = Trainer(model=model,
train_dataloader=dataloader,
max_duration=1,
callbacks=[exception_raising_callback],
loggers=[file_logger])
disable_env_report() # Printing the full report in this test can cause timeouts
# manually calling `sys.excepthook` for the exception, as it is impossible to write a test
# that validates unhandled exceptions are logged, since the test validation code would by definition
# need to handle the exception!
try:
trainer.fit()
except RuntimeError:
exc_type, exc_value, tb = sys.exc_info()
assert exc_type is not None
assert exc_value is not None
assert tb is not None
sys.excepthook(exc_type, exc_value, tb)
trainer.close()
with open(logfile_name, "r") as f:
log_lines = f.readlines()
assert "[stderr]: RuntimeError: My Exception!\n" == log_lines[-1]
# Since the trainer was closed, future prints should not appear in the file logger
print("SHOULD NOT BE CAPTURED")
with open(logfile_name, "r") as f:
logfile = f.read()
assert "SHOULD NOT BE CAPTURED" not in logfile
|
import ast
import uuid
import etcd
import json
import requests
import logging
import logging.config
from datetime import datetime
from pengrixio.database import etcdc
from pengrixio.k8s.node import kube_list_node
from pengrixio.k8s.pod import kube_list_pod
from pengrixio.k8s.namespace import kube_list_namespace
from pengrixio.k8s.namespace import kube_create_namespace
from pengrixio.k8s.namespace import kube_delete_namespace
from pengrixio.api.tenant.bizlogic import get_tenant
from pengrixio.config import PROJECT_ROOT
from pengrixio.k8s.utils import cmd
logging.config.fileConfig('logging.conf')
log = logging.getLogger('pengrixio')
def get_edge(name=None):
"""Get edge list.
"""
l_edge = list()
if name is None:
s_rsc = '{}/edge'.format(etcdc.prefix)
else:
s_rsc = '{}/edge/{}'.format(etcdc.prefix, name)
try:
r = etcdc.read(s_rsc, recursive=True, sorted=True)
except etcd.EtcdKeyNotFound as e:
log.error(e)
else:
for child in r.children:
d = ast.literal_eval(child.value)
l_edge.append(d)
finally:
return l_edge
def get_edge_info(name):
"""Get edge list.
"""
t_ret = (False, '')
if not name: # <name> should be specified.
return (False, 'Edge name should be specified.')
d = dict()
s_rsc = '{}/edge/{}'.format(etcdc.prefix, name)
try:
r = etcdc.read(s_rsc)
except etcd.EtcdKeyNotFound as e:
log.error(e)
return (False, e)
else:
d = ast.literal_eval(r.value)
t_ret = (True, d)
finally:
return t_ret
def create_edge(data):
"""Create edge
edgePostSerializer
"""
if not data.get('name'):
return (False, 'Edge name should be specified.')
t_ret = (False, '')
s_uuid = str(uuid.uuid4())
s_created = datetime.utcnow().isoformat() + 'Z'
data['createdAt'] = s_created
data['uid'] = s_uuid
log.debug(data)
s_rsc = '{}/edge/{}'.format(etcdc.prefix, data['name'])
try:
etcdc.write(s_rsc, data, prevExist=False)
except etcd.EtcdKeyAlreadyExist as e:
log.error(e)
t_ret = (False, e)
else:
t_ret = (True, 'etcd {} is created.'.format(data['name']))
finally:
return t_ret
def delete_edge(name):
"""Delete edge.
"""
if not name: # <name> should be specified.
return (False, 'Edge name should be specified.')
t_ret = (False, '')
s_rsc = '{}/edge/{}'.format(etcdc.prefix, name)
try:
r = etcdc.delete(s_rsc)
except etcd.EtcdKeyNotFound as e:
log.error(e)
t_ret = (False, e)
else:
t_ret = (True, 'edge {} is deleted.'.format(name))
finally:
return t_ret
def update_edge(name, data):
"""Update edge
edgePatchSerializer
"""
t_ret = (False, '')
if not name: # <name> should be specified.
return t_ret
data = dict((k, v) for k, v in data.items() if v)
s_rsc = '{}/edge/{}'.format(etcdc.prefix, name)
try:
r = etcdc.read(s_rsc)
except etcd.EtcdKeyNotFound as e:
log.error(e)
t_ret = (False, e)
return t_ret
d = ast.literal_eval(r.value)
s_modified = datetime.utcnow().isoformat() + 'Z'
data['modifiedAt'] = s_modified
d.update(data.items())
log.debug(d)
s_rsc = '{}/edge/{}'.format(etcdc.prefix, name)
try:
etcdc.write(s_rsc, d, prevExist=True)
except etcd.EtcdKeyAlreadyExist as e:
log.error(e)
t_ret = (False, e)
else:
t_ret = (True, 'etcd {} is created.'.format(name))
finally:
return t_ret
|
from pathlib import Path
import math
import sys
import time
from PIL import Image
import numpy as np
import torch
from . import utils
from ..data_utils import (
to_coco, from_coco, get_image_path, SEG_FP, scale_boxes)
from ..utils import print_metrics
from ..viz import visualize_boxes
from ..metric import score_boxes, get_metrics
from .dataset import get_target_boxes_labels
def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq):
model.train()
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter(
'lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
lr_scheduler = None
if epoch == 0:
warmup_factor = 1. / 1000
warmup_iters = min(1000, len(data_loader) - 1)
lr_scheduler = utils.warmup_lr_scheduler(
optimizer, warmup_iters, warmup_factor)
for images, targets in metric_logger.log_every(
data_loader, print_freq, header):
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
loss_value = losses_reduced.item()
if not math.isfinite(loss_value):
print('Loss is {}, stopping training'.format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step()
metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
return {k: m.global_avg for k, m in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(model, data_loader, device, output_dir, threshold):
cpu_device = torch.device('cpu')
model.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
scores = []
clf_gt = []
for images, targets in metric_logger.log_every(data_loader, 100, header):
images = list(img.to(device) for img in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
torch.cuda.synchronize()
model_time = time.time()
outputs = model(images)
outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
model_time = time.time() - model_time
evaluator_time = time.time()
for target, image, output in zip(targets, images, outputs):
item = data_loader.dataset.df.iloc[target['idx'].item()]
del target
target_boxes, target_labels = get_target_boxes_labels(item)
target_boxes = torch.from_numpy(target_boxes)
boxes = output['boxes'][output['scores'] >= threshold]
boxes = to_coco(boxes)
with Image.open(get_image_path(
item, data_loader.dataset.root)) as original_image:
ow, oh = original_image.size
_, h, w = image.shape
w_scale = ow / w
h_scale = oh / h
scaled_boxes = scale_boxes(boxes, w_scale, h_scale)
scores.append(
dict(score_boxes(
truth_boxes=from_coco(target_boxes).numpy(),
truth_label=np.ones(target_labels.shape[0]),
preds_center=torch.stack(
[scaled_boxes[:, 0] + scaled_boxes[:, 2] * 0.5,
scaled_boxes[:, 1] + scaled_boxes[:, 3] * 0.5]
).t().numpy(),
preds_label=np.ones(boxes.shape[0]),
), image_id=item.image_id))
clf_gt.append({
'labels': get_clf_gt(
target_boxes=target_boxes,
target_labels=target_labels,
boxes=scaled_boxes),
'image_id': item.image_id,
})
if output_dir:
unscaled_target_boxes = scale_boxes(
target_boxes, 1 / w_scale, 1 / h_scale)
_save_predictions(
image, boxes, unscaled_target_boxes,
path=output_dir / f'{item.image_id}.jpg')
evaluator_time = time.time() - evaluator_time
metric_logger.update(
model_time=model_time, evaluator_time=evaluator_time)
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
metrics = get_metrics(scores)
print_metrics(metrics)
return metrics, (scores, clf_gt)
def _save_predictions(image, boxes, target, path: Path):
image = (image.detach().cpu() * 255).to(torch.uint8)
image = np.rollaxis(image.numpy(), 0, 3)
image = visualize_boxes(image, boxes, thickness=3)
image = visualize_boxes(image, target, color=(0, 255, 0), thickness=2)
Image.fromarray(image).save(path)
def get_clf_gt(target_boxes, target_labels, boxes, min_iou=0.5) -> str:
""" Create ground truth for classification from predicted boxes
in the same format as original ground truth, with addition of a class for
false negatives. Perform matching using box IoU.
"""
if boxes.shape[0] == 0:
return ''
if target_boxes.shape[0] == 0:
labels = [SEG_FP] * boxes.shape[0]
else:
ious = bbox_overlaps(from_coco(target_boxes).numpy(),
from_coco(boxes).numpy())
ious_argmax = np.argmax(ious, axis=0)
assert ious_argmax.shape == (boxes.shape[0],)
labels = []
for k in range(boxes.shape[0]):
n = ious_argmax[k]
if ious[n, k] >= min_iou:
label = target_labels[n]
else:
label = SEG_FP
labels.append(label)
return ' '.join(
label + ' ' + ' '.join(str(int(round(float(x)))) for x in box)
for box, label in zip(boxes, labels))
def bbox_overlaps(
bboxes1: np.ndarray, bboxes2: np.ndarray, mode='iou') -> np.ndarray:
"""Calculate the ious between each bbox of bboxes1 and bboxes2.
GH:open-mmlab/mmdetection/mmdet/core/evaluation/bbox_overlaps.py
Args:
bboxes1(ndarray): shape (n, 4)
bboxes2(ndarray): shape (k, 4)
mode(str): iou (intersection over union) or iof (intersection
over foreground)
Returns:
ious(ndarray): shape (n, k)
"""
assert mode in ['iou', 'iof']
bboxes1 = bboxes1.astype(np.float32)
bboxes2 = bboxes2.astype(np.float32)
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
ious = np.zeros((rows, cols), dtype=np.float32)
if rows * cols == 0:
return ious
exchange = False
if bboxes1.shape[0] > bboxes2.shape[0]:
bboxes1, bboxes2 = bboxes2, bboxes1
ious = np.zeros((cols, rows), dtype=np.float32)
exchange = True
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
for i in range(bboxes1.shape[0]):
x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum(
y_end - y_start + 1, 0)
if mode == 'iou':
union = area1[i] + area2 - overlap
else:
union = area1[i] if not exchange else area2
ious[i, :] = overlap / union
if exchange:
ious = ious.T
return ious
|
import logging
import ray
from ray.rllib.agents.ddpg.ddpg_tf_policy import build_ddpg_models, \
get_distribution_inputs_and_class, validate_spaces
from ray.rllib.agents.dqn.dqn_tf_policy import postprocess_nstep_and_prio, \
PRIO_WEIGHTS
from ray.rllib.models.torch.torch_action_dist import TorchDeterministic, \
TorchDirichlet
from ray.rllib.policy.policy_template import build_policy_class
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.spaces.simplex import Simplex
from ray.rllib.utils.torch_ops import apply_grad_clipping, huber_loss, l2_loss
torch, nn = try_import_torch()
logger = logging.getLogger(__name__)
def build_ddpg_models_and_action_dist(policy, obs_space, action_space, config):
model = build_ddpg_models(policy, obs_space, action_space, config)
# TODO(sven): Unify this once we generically support creating more than
# one Model per policy. Note: Device placement is done automatically
# already for `policy.model` (but not for the target model).
device = (torch.device("cuda")
if torch.cuda.is_available() else torch.device("cpu"))
policy.target_model = policy.target_model.to(device)
if isinstance(action_space, Simplex):
return model, TorchDirichlet
else:
return model, TorchDeterministic
def ddpg_actor_critic_loss(policy, model, _, train_batch):
twin_q = policy.config["twin_q"]
gamma = policy.config["gamma"]
n_step = policy.config["n_step"]
use_huber = policy.config["use_huber"]
huber_threshold = policy.config["huber_threshold"]
l2_reg = policy.config["l2_reg"]
input_dict = {
"obs": train_batch[SampleBatch.CUR_OBS],
"is_training": True,
}
input_dict_next = {
"obs": train_batch[SampleBatch.NEXT_OBS],
"is_training": True,
}
model_out_t, _ = model(input_dict, [], None)
model_out_tp1, _ = model(input_dict_next, [], None)
target_model_out_tp1, _ = policy.target_model(input_dict_next, [], None)
# Policy network evaluation.
# prev_update_ops = set(tf1.get_collection(tf.GraphKeys.UPDATE_OPS))
policy_t = model.get_policy_output(model_out_t)
# policy_batchnorm_update_ops = list(
# set(tf1.get_collection(tf.GraphKeys.UPDATE_OPS)) - prev_update_ops)
policy_tp1 = \
policy.target_model.get_policy_output(target_model_out_tp1)
# Action outputs.
if policy.config["smooth_target_policy"]:
target_noise_clip = policy.config["target_noise_clip"]
clipped_normal_sample = torch.clamp(
torch.normal(
mean=torch.zeros(policy_tp1.size()),
std=policy.config["target_noise"]).to(policy_tp1.device),
-target_noise_clip, target_noise_clip)
policy_tp1_smoothed = torch.min(
torch.max(
policy_tp1 + clipped_normal_sample,
torch.tensor(
policy.action_space.low,
dtype=torch.float32,
device=policy_tp1.device)),
torch.tensor(
policy.action_space.high,
dtype=torch.float32,
device=policy_tp1.device))
else:
# No smoothing, just use deterministic actions.
policy_tp1_smoothed = policy_tp1
# Q-net(s) evaluation.
# prev_update_ops = set(tf1.get_collection(tf.GraphKeys.UPDATE_OPS))
# Q-values for given actions & observations in given current
q_t = model.get_q_values(model_out_t, train_batch[SampleBatch.ACTIONS])
# Q-values for current policy (no noise) in given current state
q_t_det_policy = model.get_q_values(model_out_t, policy_t)
actor_loss = -torch.mean(q_t_det_policy)
if twin_q:
twin_q_t = model.get_twin_q_values(model_out_t,
train_batch[SampleBatch.ACTIONS])
# q_batchnorm_update_ops = list(
# set(tf1.get_collection(tf.GraphKeys.UPDATE_OPS)) - prev_update_ops)
# Target q-net(s) evaluation.
q_tp1 = policy.target_model.get_q_values(target_model_out_tp1,
policy_tp1_smoothed)
if twin_q:
twin_q_tp1 = policy.target_model.get_twin_q_values(
target_model_out_tp1, policy_tp1_smoothed)
q_t_selected = torch.squeeze(q_t, axis=len(q_t.shape) - 1)
if twin_q:
twin_q_t_selected = torch.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
q_tp1 = torch.min(q_tp1, twin_q_tp1)
q_tp1_best = torch.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = \
(1.0 - train_batch[SampleBatch.DONES].float()) * \
q_tp1_best
# Compute RHS of bellman equation.
q_t_selected_target = (train_batch[SampleBatch.REWARDS] +
gamma**n_step * q_tp1_best_masked).detach()
# Compute the error (potentially clipped).
if twin_q:
td_error = q_t_selected - q_t_selected_target
twin_td_error = twin_q_t_selected - q_t_selected_target
if use_huber:
errors = huber_loss(td_error, huber_threshold) \
+ huber_loss(twin_td_error, huber_threshold)
else:
errors = 0.5 * \
(torch.pow(td_error, 2.0) + torch.pow(twin_td_error, 2.0))
else:
td_error = q_t_selected - q_t_selected_target
if use_huber:
errors = huber_loss(td_error, huber_threshold)
else:
errors = 0.5 * torch.pow(td_error, 2.0)
critic_loss = torch.mean(train_batch[PRIO_WEIGHTS] * errors)
# Add l2-regularization if required.
if l2_reg is not None:
for name, var in policy.model.policy_variables(as_dict=True).items():
if "bias" not in name:
actor_loss += (l2_reg * l2_loss(var))
for name, var in policy.model.q_variables(as_dict=True).items():
if "bias" not in name:
critic_loss += (l2_reg * l2_loss(var))
# Model self-supervised losses.
if policy.config["use_state_preprocessor"]:
# Expand input_dict in case custom_loss' need them.
input_dict[SampleBatch.ACTIONS] = train_batch[SampleBatch.ACTIONS]
input_dict[SampleBatch.REWARDS] = train_batch[SampleBatch.REWARDS]
input_dict[SampleBatch.DONES] = train_batch[SampleBatch.DONES]
input_dict[SampleBatch.NEXT_OBS] = train_batch[SampleBatch.NEXT_OBS]
[actor_loss, critic_loss] = model.custom_loss(
[actor_loss, critic_loss], input_dict)
# Store values for stats function.
policy.actor_loss = actor_loss
policy.critic_loss = critic_loss
policy.td_error = td_error
policy.q_t = q_t
# Return two loss terms (corresponding to the two optimizers, we create).
return policy.actor_loss, policy.critic_loss
def make_ddpg_optimizers(policy, config):
"""Create separate optimizers for actor & critic losses."""
# Set epsilons to match tf.keras.optimizers.Adam's epsilon default.
policy._actor_optimizer = torch.optim.Adam(
params=policy.model.policy_variables(),
lr=config["actor_lr"],
eps=1e-7)
policy._critic_optimizer = torch.optim.Adam(
params=policy.model.q_variables(), lr=config["critic_lr"], eps=1e-7)
# Return them in the same order as the respective loss terms are returned.
return policy._actor_optimizer, policy._critic_optimizer
def apply_gradients_fn(policy):
# For policy gradient, update policy net one time v.s.
# update critic net `policy_delay` time(s).
if policy.global_step % policy.config["policy_delay"] == 0:
policy._actor_optimizer.step()
policy._critic_optimizer.step()
# Increment global step & apply ops.
policy.global_step += 1
def build_ddpg_stats(policy, batch):
stats = {
"actor_loss": policy.actor_loss,
"critic_loss": policy.critic_loss,
"mean_q": torch.mean(policy.q_t),
"max_q": torch.max(policy.q_t),
"min_q": torch.min(policy.q_t),
"mean_td_error": torch.mean(policy.td_error),
"td_error": policy.td_error,
}
return stats
def before_init_fn(policy, obs_space, action_space, config):
# Create global step for counting the number of update operations.
policy.global_step = 0
class ComputeTDErrorMixin:
def __init__(self, loss_fn):
def compute_td_error(obs_t, act_t, rew_t, obs_tp1, done_mask,
importance_weights):
input_dict = self._lazy_tensor_dict(
SampleBatch({
SampleBatch.CUR_OBS: obs_t,
SampleBatch.ACTIONS: act_t,
SampleBatch.REWARDS: rew_t,
SampleBatch.NEXT_OBS: obs_tp1,
SampleBatch.DONES: done_mask,
PRIO_WEIGHTS: importance_weights,
}))
# Do forward pass on loss to update td errors attribute
# (one TD-error value per item in batch to update PR weights).
loss_fn(self, self.model, None, input_dict)
# Self.td_error is set within actor_critic_loss call.
return self.td_error
self.compute_td_error = compute_td_error
class TargetNetworkMixin:
def __init__(self):
# Hard initial update from Q-net(s) to target Q-net(s).
self.update_target(tau=1.0)
def update_target(self, tau=None):
tau = tau or self.config.get("tau")
# Update_target_fn will be called periodically to copy Q network to
# target Q network, using (soft) tau-synching.
# Full sync from Q-model to target Q-model.
if tau == 1.0:
self.target_model.load_state_dict(self.model.state_dict())
# Partial (soft) sync using tau-synching.
else:
model_vars = self.model.variables()
target_model_vars = self.target_model.variables()
assert len(model_vars) == len(target_model_vars), \
(model_vars, target_model_vars)
for var, var_target in zip(model_vars, target_model_vars):
var_target.data = tau * var.data + \
(1.0 - tau) * var_target.data
def setup_late_mixins(policy, obs_space, action_space, config):
ComputeTDErrorMixin.__init__(policy, ddpg_actor_critic_loss)
TargetNetworkMixin.__init__(policy)
DDPGTorchPolicy = build_policy_class(
name="DDPGTorchPolicy",
framework="torch",
loss_fn=ddpg_actor_critic_loss,
get_default_config=lambda: ray.rllib.agents.ddpg.ddpg.DEFAULT_CONFIG,
stats_fn=build_ddpg_stats,
postprocess_fn=postprocess_nstep_and_prio,
extra_grad_process_fn=apply_grad_clipping,
optimizer_fn=make_ddpg_optimizers,
validate_spaces=validate_spaces,
before_init=before_init_fn,
before_loss_init=setup_late_mixins,
action_distribution_fn=get_distribution_inputs_and_class,
make_model_and_action_dist=build_ddpg_models_and_action_dist,
apply_gradients_fn=apply_gradients_fn,
mixins=[
TargetNetworkMixin,
ComputeTDErrorMixin,
])
|
"""
"""
import os
import numpy as np
import pandas as pd
import gams
import logging
import builtins
import time
from copy import deepcopy
from .utility import index_from_symbol, symbol_is_scalar, is_iterable, map_lowest_level, index_names_from_symbol, \
all_na, map_to_int_where_possible
import gams2numpy
logger = logging.getLogger(__name__)
class GamsPandasDatabase:
"""
GamsPandasDatabase converts sets, parameters, and variables between a GAMS database and Pandas series.
When as symbol is first retrieved it is converted to a Pandas series and stored in self.series
Changes to retrieved series are written to the GAMS database on export.
"""
def __init__(self, database=None, workspace=None, auto_sort_index=True):
if database is None:
if workspace is None:
workspace = gams.GamsWorkspace()
database = workspace.add_database()
self.database = database
self.gams2numpy = gams2numpy.Gams2Numpy(database.workspace.system_directory)
self.auto_sort_index = auto_sort_index
self.series = {}
def __getattr__(self, item):
return self[item]
def copy(self):
obj = type(self).__new__(self.__class__)
obj.__dict__.update(self.__dict__)
obj.database = self.database.workspace.add_database(source_database=self.database)
obj.series = deepcopy(self.series)
return obj
def merge(self, other, symbol_names=None, inplace=False):
"""
Merge two GamsPandasDatabases.
symbol_names: list of symbol names to get from other. If None, all symbols in other are merged.
"""
if inplace:
db = self
else:
db = self.copy()
if symbol_names is None:
symbol_names = other.symbols
for name in other.sets:
if name in symbol_names:
series = other[name].texts
db.create_set(series.name, series.index, other[name].explanatory_text, series)
for name in other.variables:
if name in symbol_names:
db.add_variable_from_series(other[name], other[name].explanatory_text)
for name in other.parameters:
if name in symbol_names:
db.add_parameter_from_series(other[name], other[name].explanatory_text)
return db
@property
def symbols(self):
"""Dictionary of all symbols in the underlying GAMS database"""
return {symbol.name: symbol for symbol in self.database}
@property
def sets(self):
"""Dictionary of all sets in the underlying GAMS database"""
return {symbol.name: symbol for symbol in self.database if isinstance(symbol, gams.GamsSet)}
@property
def variables(self):
"""Dictionary of all variables in the underlying GAMS database"""
return {symbol.name: symbol for symbol in self.database if isinstance(symbol, gams.GamsVariable)}
@property
def parameters(self):
"""Dictionary of all parameters in the underlying GAMS database"""
return {symbol.name: symbol for symbol in self.database if isinstance(symbol, gams.GamsParameter)}
@property
def equations(self):
"""Dictionary of all equations in the underlying GAMS database"""
return {symbol.name: symbol for symbol in self.database if isinstance(symbol, gams.GamsEquation)}
def add_to_builtins(self, *args):
"""Retrieve any number symbol names from the database and add their Pandas representations to the global namespace."""
for identifier in args:
setattr(builtins, identifier, self[identifier])
def get(self, *args):
"""Retrieve any nymber of symbol names and return a list of their Pandas representations."""
return [self[i] for i in args]
def add_parameter_from_dataframe(self, identifier, df, explanatory_text="", add_missing_domains=False,
value_column_index=-1):
"""Add parameter symbol to database based on a Pandas DataFrame."""
domains = list(df.columns[:value_column_index:])
for d in domains:
if d not in self:
if add_missing_domains:
self.create_set(name=d, index=df[d].unique(), explanatory_text="")
else:
raise KeyError(
f"'{d}' is not a set in the database. Enable add_missing_domains or add the set to the database manually.")
self.database.add_parameter_dc(identifier, domains, explanatory_text)
if domains:
series = df.set_index(domains).iloc[:, 0]
else:
series = df[df.columns[0]]
self.series[identifier] = series
def add_parameter_from_series(self, series, explanatory_text="", add_missing_domains=False):
"""Add parameter symbol to database based on a Pandas series."""
if len(series) == 1 and series.index.name in [None, "*"]:
df = pd.DataFrame(series)
else:
df = series.reset_index()
self.add_parameter_from_dataframe(series.name, df, explanatory_text, add_missing_domains)
def add_variable_from_dataframe(self, identifier, df, explanatory_text="", add_missing_domains=False,
value_column_index=-1):
"""Add variable symbol to database based on a Pandas DataFrame."""
domains = list(df.columns[:value_column_index:])
for d in domains:
if d not in self:
if add_missing_domains:
self.create_set(name=d, index=df[d].unique(), explanatory_text="")
else:
raise KeyError(
f"'{d}' is not a set in the database. Enable add_missing_domains or add the set to the database manually.")
self.database.add_variable_dc(identifier, gams.VarType.Free, domains, explanatory_text)
if domains:
series = df.set_index(domains).iloc[:, 0]
else:
series = df[df.columns[0]]
self.series[identifier] = series
def add_variable_from_series(self, series, explanatory_text="", add_missing_domains=False):
"""Add variable symbol to database based on a Pandas series."""
if len(series) == 1 and series.index.name in [None, "*"]:
df = pd.DataFrame(series)
else:
df = series.reset_index()
self.add_variable_from_dataframe(series.name, df, explanatory_text, add_missing_domains)
def get_index(self, x):
if x is None or isinstance(x, pd.Index):
return x
elif isinstance(x, str):
return self[x]
elif len(x) and isinstance(x[0], (pd.Index, tuple, list)):
multi_index = pd.MultiIndex.from_product(x)
multi_index.names = [getattr(i, "name", None) for i in x]
return multi_index
else:
return pd.Index(x)
def create_set(self, name, index, explanatory_text="", texts=None, domains=None):
"""
Add a new GAMS Set to the database and return an Pandas representation of the Set.
:param str name: Name of the set
:param iterable index: Iterable of record keys to be added to the set
:param str explanatory_text: Explanatory text added to the GAMS set
:param iterable texts: Iterable of record labels - should match the size of the index parameter.
:param iterable domains: Names of domains that the set should be defined over
:return: Pandas Index
"""
if len(index) and isinstance(index[0], pd.Index):
multi_index = pd.MultiIndex.from_product(index)
multi_index.names = [getattr(i, "name", None) for i in index]
index = multi_index
elif isinstance(index, pd.Index):
index = index.copy()
else:
index = pd.Index(index)
index.explanatory_text = explanatory_text
if texts is None:
texts = map_lowest_level(str, index)
index.texts = pd.Series(texts, index=index)
index.texts.name = index.name
if domains is None:
domains = ["*" if i in (None, name) else i for i in self.get_domains_from_index(index, name)]
index.domains = domains
index.names = domains
index.name = name
self.database.add_set_dc(index.name, domains, explanatory_text)
self.series[index.name] = index
return self[name]
def __create_variable_or_parameter(self, symbol_type, name, index, explanatory_text, data, dtype, copy, add_missing_domains):
if index is not None:
series = pd.Series(data, self.get_index(index), dtype, name, copy)
series.explanatory_text = explanatory_text
getattr(self, f"add_{symbol_type}_from_series")(series, explanatory_text, add_missing_domains)
elif isinstance(data, pd.DataFrame):
getattr(self, f"add_{symbol_type}_from_dataframe")(name, data, explanatory_text, add_missing_domains)
elif isinstance(data, pd.Series):
getattr(self, f"add_{symbol_type}_from_series")(data, explanatory_text, add_missing_domains)
else:
if is_iterable(data) and len(data) and is_iterable(data[0]):
self.__add_variable_or_parameter_to_database(symbol_type, name, len(data[0]), explanatory_text)
elif is_iterable(data):
getattr(self, f"add_{symbol_type}_from_dataframe")(name, pd.DataFrame(data), explanatory_text, add_missing_domains)
else:
self.__add_variable_or_parameter_to_database(symbol_type, name, 0, explanatory_text)
self[name] = data
return self[name]
def __add_variable_or_parameter_to_database(self, symbol_type, name, dim, explanatory_text):
assert symbol_type in ["parameter", "variable"]
if symbol_type == "parameter":
self.database.add_parameter(name, dim, explanatory_text)
elif symbol_type == "variable":
self.database.add_variable(name, dim, gams.VarType.Free, explanatory_text)
def create_variable(self, name, index=None, explanatory_text="", data=None, dtype=None, copy=False, add_missing_domains=False):
return self.__create_variable_or_parameter("variable", name, index, explanatory_text, data, dtype, copy, add_missing_domains)
def create_parameter(self, name, index=None, explanatory_text="", data=None, dtype=None, copy=False, add_missing_domains=False):
return self.__create_variable_or_parameter("parameter", name, index, explanatory_text, data, dtype, copy, add_missing_domains)
@staticmethod
def get_domains_from_index(index, name):
if hasattr(index, "domains"):
domains = index.domains
elif hasattr(index, "name"):
domains = index.names
else:
domains = [index.name]
return ["*" if i in (None, name) else i for i in domains]
@staticmethod
@np.vectorize
def detuple(t):
"""Returns the iterable unchanged, except if it is a singleton, then the element is returned"""
if isinstance(t, str):
return t
try:
if len(t) == 1:
return t[0]
except TypeError:
pass
return t
def series_from_symbol(self, symbol, attributes, attribute):
index_names = index_names_from_symbol(symbol)
try:
df = pd.DataFrame(
self.gams2numpy.gmdReadSymbolStr(self.database, symbol.name),
columns=[*index_names, *attributes],
)
except: # In case gams2numpy breaks, try using the regular API
df = pd.DataFrame(
[[*rec.keys, getattr(rec, attribute)] for rec in list(symbol)],
columns=[*index_names, attribute],
)
for i in index_names:
df[i] = map_to_int_where_possible(df[i])
series = df.set_index(index_names)[attribute].astype(float)
series.name = symbol.name
return series
def series_from_variable(self, symbol, attr="level"):
return self.series_from_symbol(symbol, attributes=["level", "marginal", "lower", "upper", "scale"], attribute="level")
def series_from_parameter(self, symbol):
return self.series_from_symbol(symbol, attributes=["value"], attribute="value")
def __getitem__(self, item):
if item not in self.series:
symbol = self.symbols[item]
if isinstance(symbol, gams.GamsSet):
self.series[item] = index_from_symbol(symbol)
elif isinstance(symbol, gams.GamsVariable):
if symbol_is_scalar(symbol):
self.series[item] = symbol.find_record().level if len(symbol) else None
else:
self.series[item] = self.series_from_variable(symbol)
if self.auto_sort_index:
self.series[item] = self.series[item].sort_index()
elif isinstance(symbol, gams.GamsParameter):
if symbol_is_scalar(symbol):
self.series[item] = symbol.find_record().value if len(symbol) else None
else:
self.series[item] = self.series_from_parameter(symbol)
if self.auto_sort_index:
self.series[item] = self.series[item].sort_index()
elif isinstance(symbol, gams.GamsEquation):
return symbol
return self.series[item]
def __setitem__(self, name, value):
if name in self.symbols:
if not is_iterable(value) and is_iterable(self[name]): # If assigning a scalar to all records in a series
value = pd.Series(value, index=self[name].index)
self.set_symbol_records(self.symbols[name], value)
self.series[name] = value
else:
if not value.name:
value.name = name
self.series[name] = value
def items(self):
return self.symbols.items()
def keys(self):
return self.symbols.keys()
def values(self):
return self.symbols.values()
def save_series_to_database(self, series_names=None):
"""Save Pandas series to GAMS database"""
if series_names is None:
series_names = self.series.keys()
for symbol_name in series_names:
self.set_symbol_records(self.symbols[symbol_name], self.series[symbol_name])
def export(self, path):
"""Save changes to database and export database to GDX file."""
self.save_series_to_database()
for i in range(1, 10):
try:
self.database.export(os.path.abspath(path))
break
except gams.workspace.GamsException:
time.sleep(i)
else:
self.database.export(os.path.abspath(path))
def set_parameter_records(self, symbol, value):
symbol.clear()
if all_na(value): pass
elif symbol_is_scalar(symbol):
symbol.add_record().value = value
else:
df = value.reset_index()
df[value.index.names] = df[value.index.names].astype(str)
self.gams2numpy.gmdFillSymbolStr(self.database, symbol, df.to_numpy())
@staticmethod
def set_variable_records(symbol, value):
symbol.clear()
if all_na(value): pass
elif symbol_is_scalar(symbol):
symbol.add_record().level = value
elif list(value.keys()) == [0]: # If singleton series
symbol.add_record().level = value[0]
else:
for k, v in value.items():
symbol.add_record(map_lowest_level(str, k)).level = v
@staticmethod
def set_set_records(symbol, value):
if isinstance(value, pd.Index):
texts = getattr(value, "texts", None)
value = texts if texts is not None else pd.Series(map(str, value), index=value)
symbol.clear()
if all_na(value): pass
elif symbol_is_scalar(symbol):
symbol.add_record().text = str(value)
elif list(value.keys()) == [0]: # If singleton series
symbol.add_record().text = value[0]
else:
for k, v in value.items():
symbol.add_record(map_lowest_level(str, k)).text = v
def set_symbol_records(self, symbol, value):
"""Convert Pandas series to records in a GAMS Symbol"""
if isinstance(symbol, gams.GamsSet):
self.set_set_records(symbol, value)
elif isinstance(symbol, gams.GamsVariable):
self.set_variable_records(symbol, value)
elif isinstance(symbol, gams.GamsParameter):
self.set_parameter_records(symbol, value)
else:
TypeError(f"{type(symbol)} is not (yet) supported by gams_pandas")
def __iter__(self):
return iter(self.symbols)
def __len__(self):
return len(self.symbols)
def get_text(self, name):
"""Get explanatory text of GAMS symbol."""
return self.symbols[name].get_text()
|
<filename>population_gravity/data_prep.py
import os
import logging
import pkg_resources
import rasterio
import numpy as np
import pandas as pd
from rasterio.io import MemoryFile
import population_gravity.downscale_utilities as utils
def mask_raster(metadata, file_name, raster_object, source_window):
"""Subset the mosiac using a window of the source historical raster and write to file.
:param file_name: Full path with file name and extension to the output file
:type file_name: str
:param raster_object: Rasterio object
:param source_window: A rasterio.windows.Window object from the bounding box of the target for
the full mosaic
"""
# read only the window of the mosaic to an array
masked = raster_object.read(1, window=source_window)
# write the mosiac raster to file
with rasterio.open(file_name, 'w', **metadata) as dest:
dest.write(masked, indexes=1)
return masked
def mask_raster_memory(metadata, raster_object, source_window):
"""Subset the mosiac using a window of the source historical raster and write to memory.
:param raster_object: Rasterio object
:param source_window: A rasterio.windows.Window object from the bounding box of the target for
the full mosaic
"""
# read only the window of the mosaic to an array
masked = raster_object.read(1, window=source_window)
# write output to memory
with MemoryFile() as memfile:
dataset = memfile.open(**metadata)
dataset.write(masked, indexes=1)
return dataset
def construct_file_name(state_name, prev_step, designation, scenario, output_directory, run_number='', suffix='',
extension='.tif'):
"""Construct output file name.
:param state_name: Name of state
:type state_name: str
:param prev_step: Previous time step.
:type prev_step: int, str
:param designation: Either 'urban', 'rural', or 'total'
:type designation: str
:param suffix: String to append to end of file name before extension
:type suffix: str
:param extension: Raster extension without the dot; default 'tif'
:type extension: str
:returns: Full file path for an output raster
"""
if (len(suffix) > 0):
fle = f"{state_name}_1km_{scenario}_{designation}_{prev_step}{suffix}{extension}"
else:
if type(run_number) == int:
delim = '_'
else:
delim = ''
fle = f"{state_name}_1km_{scenario}_{designation}_{prev_step}{delim}{run_number}{suffix}{extension}"
return os.path.join(output_directory, fle)
def get_state_neighbors(state_name):
"""Get all neighboring states and the target state from lookup file as a list"""
df = pd.read_csv(pkg_resources.resource_filename('population_gravity', 'data/neighboring_states_150km.csv'))
# get the actual state name from the near states because they are not lower case like what is being passed
state_find = df.loc[(df['target_state'] == state_name) & (df['near_state'].str.lower() == state_name)].values[0][-1]
# extract a list of all neighboring states including the target state
state_list = df.loc[df['target_state'] == state_name]['near_state'].to_list()
# ensure that the target state comes first to prevent any issue with the reverse painter's algorithm for merge
state_list.insert(0, state_list.pop(state_list.index(state_find)))
# make all lower case
return [i.lower() for i in state_list]
def construct_file_list(prev_step, setting, state_name, template_raster, one_dimension_indices):
"""Construct a list of arrays from rasters or arrays.
:param prev_step: int. Previous time step; e.g., year
:param setting: str. Either 'urban' or 'rural'
#TODO: load prev years CSV files
"""
out_list = []
# Get all neighboring states including the target state as a list
neighbors = get_state_neighbors(state_name)
for i in neighbors:
# check for either the 'tif', 'npy', or 'csv' files in the output directory, use 'tif' first
tif = construct_file_name(i, prev_step, setting, extension='.tif')
npy1d = construct_file_name(i, prev_step, setting, extension='_1d.npy')
npy2d = construct_file_name(i, prev_step, setting, extension='_2d.npy')
csv_gz = construct_file_name(i, prev_step, setting, extension='.csv.gz')
if os.path.isfile(tif):
logging.info(f"Using file '{tif}' for previous time step mosaic of neighboring states.")
out_list.append(rasterio.open(tif))
elif os.path.isfile(npy1d):
logging.info(f"Using file '{npy1d}' for previous time step mosaic of neighboring states.")
# load npy file to array
array1d = np.load(npy1d)
out_list.append(
utils.array_to_raster_memory(template_raster, array1d, one_dimension_indices))
elif os.path.isfile(npy2d):
logging.info(f"Using file '{npy2d}' for previous time step mosaic of neighboring states.")
# load npy file to array
array2d = np.load(npy2d)
out_list.append(utils.array2d_to_raster_memory(array2d, raster_profile=template_raster[4]))
elif os.path.isfile(csv_gz):
logging.info(f"Using file '{csv_gz}' for previous time step mosaic of neighboring states.")
array1d = pd.read_csv(csv_gz, compression='gzip', sep=',')['value'].values
out_list.append(
utils.array_to_raster_memory(template_raster, array1d, one_dimension_indices))
else:
raise FileNotFoundError(f"No spatial file found for '{i}' for setting '{setting}' and year '{prev_step}'")
return out_list
def mosaic_neighbors(yr, metadata, bbox):
"""asdf"""
# build raster list for all neighboring raster outputs from the previous time step
urban_file_list = construct_file_list(yr, 'urban')
rural_file_list = construct_file_list(yr, 'rural')
# build mosaics
urban_mosaic = utils.mosaic_memory(urban_file_list, metadata.copy())
rural_mosaic = utils.mosaic_memory(rural_file_list, metadata.copy())
# create a rasterio.windows.Window object from the bounding box of the target for the full mosaic
target_window = urban_mosaic.window(*bbox)
# write the urban masked mosiac raster to memory
urban_mask_file = mask_raster_memory(urban_mosaic, target_window)
urban_mosaic.close()
# write the urban masked mosiac raster to memory
rural_mask_file = mask_raster_memory(rural_mosaic, target_window)
rural_mosaic.close()
return urban_mask_file, rural_mask_file
|
<reponame>Sloth6/plant_growth<gh_stars>10-100
""" Evolution utilties shared by the 3 evolution modules (local, neat & novelty)
"""
from __future__ import print_function
import os, time, math, json
from os.path import join as pjoin
import MultiNEAT as NEAT
import numpy as np
from cymesh.shape_features import d2_features, a3_features
from coral_growth.simulate import simulate_genome
def create_initial_population(Form, params):
# Create network size based off form and parameters.
n_inputs, n_outputs = Form.calculate_inouts(params)
genome_prototype = NEAT.Genome(
0, # ID
n_inputs,
0, # NumHidden
n_outputs,
False, # FS_NEAT
NEAT.ActivationFunction.UNSIGNED_SIGMOID, # Output activation function.
NEAT.ActivationFunction.UNSIGNED_SIGMOID, # Hidden activation function.
1, # Seed type, must be 1 to have hidden nodes.
params.neat,
0 # Number of hidden layers. Each will have NumHidden nodes
)
pop = NEAT.Population(
genome_prototype, # Seed genome.
params.neat,
True, # Randomize weights.
1.0, # Random Range.
int(time.time()) # Random number generator seed.
)
return pop
def evaluate(Form, genome, traits, params):
""" Run the simulation and return the fitness.
"""
try:
form = simulate_genome(Form, genome, traits, [params])[0]
fitness = form.fitness()
assert math.isfinite(fitness), 'Not-finite'
print('.', end='', flush=True)
return fitness
except AssertionError as e:
print('Exception:', e, end='', flush=True)
return 0
def shape_descriptor(form, n=1024*1024):
if form is None:
return np.zeros(64)
else:
d2 = d2_features(form.mesh, n_points=n, n_bins=32, hrange=(0.0, 3.0))
a3 = a3_features(form.mesh, n_points=n, n_bins=32, hrange=(0.0, 3.0))#vmin=0.0, vmax=math.pi)
return np.hstack((d2, a3))
def evaluate_novelty(Form, genome, traits, params):
""" Run the simulation and return the fitness and feature vector.
"""
try:
form = simulate_genome(Form, genome, traits, [params])[0]
fitness = form.fitness()
features = shape_descriptor(form)
assert math.isfinite(fitness), 'Not-finite'
print('.', end='', flush=True)
return fitness, features
except AssertionError as e:
print('AssertionError:', e, end='', flush=True)
return 0, shape_descriptor(None)
def evaluate_genomes_novelty(Form, genomes, params, pool):
""" Evaluate all (parallel / serial wrapper """
if pool:
data = [ (Form, g, g.GetGenomeTraits(), params) for g in genomes ]
ff = pool.starmap(evaluate_novelty, data)
else:
ff = [ evaluate_novelty(Form, g, g.GetGenomeTraits(), params) for g in genomes ]
fitness_list, feature_list = zip(*ff)
return fitness_list, feature_list
def simulate_and_save(Form, genome, params, out_dir, generation, fitness, meanf):
export_folder = pjoin(out_dir, str(generation))
os.mkdir(export_folder)
genome.Save(pjoin(export_folder, 'genome.txt'))
with open(pjoin(out_dir, 'scores.txt'), 'a') as f:
f.write("%i\t%f\t%f\n" % (generation, fitness, meanf))
traits = genome.GetGenomeTraits()
with open(pjoin(export_folder, 'traits.json'), 'w+') as f:
json.dump(traits, f, indent=2)
return simulate_genome(Form, genome, traits, [params], export_folder=export_folder)
|
<filename>src/abaqus/Interaction/ConcentratedRadiationToAmbient.py
from abaqusConstants import *
from .Interaction import Interaction
from ..Region.Region import Region
class ConcentratedRadiationToAmbient(Interaction):
"""The ConcentratedRadiationToAmbient object defines radiant heat transfer between a point
and its nonreflecting environment.
The ConcentratedRadiationToAmbient object is derived from the Interaction object.
Notes
-----
This object can be accessed by:
.. code-block:: python
import interaction
mdb.models[name].interactions[name]
"""
def __init__(self, name: str, createStepName: str, region: Region, ambientTemperature: float,
ambientTemperatureAmp: str, emissivity: float, nodalArea: float = 1,
explicitRegionType: SymbolicConstant = LAGRANGIAN, field: str = '',
distributionType: SymbolicConstant = UNIFORM):
"""This method creates a ConcentratedRadiationToAmbient object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].ConcentratedRadiationToAmbient
Parameters
----------
name
A String specifying the repository key.
createStepName
A String specifying the name of the step in which the ConcentratedRadiationToAmbient
object is created.
region
A Region object specifying the region to which the concentrated radiation interaction is
applied. The interaction is applied to each node in the region.
ambientTemperature
A Float specifying the reference ambient temperature, θ0θ0.
ambientTemperatureAmp
A String specifying the name of the Amplitude object that gives the variation of the
ambient temperature with time.Note:Use None in an Abaqus/Standard analysis to specify
that the reference ambient temperature is applied immediately at the beginning of the
step or linearly over the step. Use None in an Abaqus/Explicit analysis to specify that
the reference ambient temperature is applied throughout the step.
emissivity
A Float specifying the emissivity, ϵϵ.
nodalArea
A Float specifying the area associated with the node where the concentrated radiation
interaction is applied. The default value is 1.0.
explicitRegionType
A SymbolicConstant specifying how the concentrated radiation is applied to the boundary
of an adaptive mesh domain. Possible values are LAGRANGIAN, SLIDING, and EULERIAN. The
default value is LAGRANGIAN.Note:*explicitRegionType* applies only during an
Abaqus/Explicit analysis.
field
A String specifying the name of the AnalyticalField object associated with this
interaction. The *field* argument applies only when *distributionType*=ANALYTICAL_FIELD.
The default value is an empty string.
distributionType
A SymbolicConstant specifying how the radiation is defined. Possible values are UNIFORM
and ANALYTICAL_FIELD. The default value is UNIFORM.
Returns
-------
A ConcentratedRadiationToAmbient object.
"""
super().__init__()
pass
def setValues(self, nodalArea: float = 1, explicitRegionType: SymbolicConstant = LAGRANGIAN,
field: str = '', distributionType: SymbolicConstant = UNIFORM):
"""This method modifies the data for an existing ConcentratedRadiationToAmbient object in
the step where it is created.
Parameters
----------
nodalArea
A Float specifying the area associated with the node where the concentrated radiation
interaction is applied. The default value is 1.0.
explicitRegionType
A SymbolicConstant specifying how the concentrated radiation is applied to the boundary
of an adaptive mesh domain. Possible values are LAGRANGIAN, SLIDING, and EULERIAN. The
default value is LAGRANGIAN.Note:*explicitRegionType* applies only during an
Abaqus/Explicit analysis.
field
A String specifying the name of the AnalyticalField object associated with this
interaction. The *field* argument applies only when *distributionType*=ANALYTICAL_FIELD.
The default value is an empty string.
distributionType
A SymbolicConstant specifying how the radiation is defined. Possible values are UNIFORM
and ANALYTICAL_FIELD. The default value is UNIFORM.
"""
pass
def setValuesInStep(self, stepName: str, nodalArea: float = 1, field: str = '',
distributionType: SymbolicConstant = UNIFORM):
"""This method modifies the propagating data of an existing ConcentratedRadiationToAmbient
object in the specified step.
Parameters
----------
stepName
A String specifying the name of the step in which the interaction is modified.
nodalArea
A Float specifying the area associated with the node where the concentrated radiation
interaction is applied. The default value is 1.0.
field
A String specifying the name of the AnalyticalField object associated with this
interaction. The *field* argument applies only when *distributionType*=ANALYTICAL_FIELD.
The default value is an empty string.
distributionType
A SymbolicConstant specifying how the radiation is defined. Possible values are UNIFORM
and ANALYTICAL_FIELD. The default value is UNIFORM.
"""
pass
|
"""
API operations on User objects.
"""
import copy
import json
import logging
import re
from collections import OrderedDict
from markupsafe import escape
from sqlalchemy import (
false,
or_,
true
)
from galaxy import (
exceptions,
util,
web
)
from galaxy.exceptions import ObjectInvalid
from galaxy.managers import (
api_keys,
users
)
from galaxy.managers.context import ProvidesUserContext
from galaxy.model import User, UserAddress
from galaxy.security.validate_user_input import (
validate_email,
validate_password,
validate_publicname
)
from galaxy.tools.toolbox.filters import FilterFactory
from galaxy.util import (
docstring_trim,
listify
)
from galaxy.web import (
expose_api,
expose_api_anonymous
)
from galaxy.web.form_builder import AddressField
from galaxy.webapps.base.controller import (
BaseAPIController,
BaseUIController,
UsesFormDefinitionsMixin,
UsesTagsMixin
)
from galaxy.webapps.base.webapp import GalaxyWebTransaction
log = logging.getLogger(__name__)
class UserAPIController(BaseAPIController, UsesTagsMixin, BaseUIController, UsesFormDefinitionsMixin):
def __init__(self, app):
super().__init__(app)
self.user_manager = users.UserManager(app)
self.user_serializer = users.UserSerializer(app)
self.user_deserializer = users.UserDeserializer(app)
self.api_key_manager = api_keys.ApiKeyManager(app)
@expose_api
def index(self, trans: ProvidesUserContext, deleted='False', f_email=None, f_name=None, f_any=None, **kwd):
"""
GET /api/users
GET /api/users/deleted
Displays a collection (list) of users.
:param deleted: (optional) If true, show deleted users
:type deleted: bool
:param f_email: (optional) An email address to filter on. (Non-admin
users can only use this if ``expose_user_email`` is ``True`` in
galaxy.ini)
:type f_email: str
:param f_name: (optional) A username to filter on. (Non-admin users
can only use this if ``expose_user_name`` is ``True`` in
galaxy.ini)
:type f_name: str
:param f_any: (optional) Filter on username OR email. (Non-admin users
can use this, the email filter and username filter will
only be active if their corresponding ``expose_user_*`` is
``True`` in galaxy.ini)
:type f_any: str
"""
rval = []
query = trans.sa_session.query(User)
deleted = util.string_as_bool(deleted)
if f_email and (trans.user_is_admin or trans.app.config.expose_user_email):
query = query.filter(User.email.like("%%%s%%" % f_email))
if f_name and (trans.user_is_admin or trans.app.config.expose_user_name):
query = query.filter(User.username.like("%%%s%%" % f_name))
if f_any:
if trans.user_is_admin:
query = query.filter(or_(
User.email.like("%%%s%%" % f_any),
User.username.like("%%%s%%" % f_any)
))
else:
if trans.app.config.expose_user_email and trans.app.config.expose_user_name:
query = query.filter(or_(
User.email.like("%%%s%%" % f_any),
User.username.like("%%%s%%" % f_any)
))
elif trans.app.config.expose_user_email:
query = query.filter(User.email.like("%%%s%%" % f_any))
elif trans.app.config.expose_user_name:
query = query.filter(User.username.like("%%%s%%" % f_any))
if deleted:
# only admins can see deleted users
if not trans.user_is_admin:
return []
query = query.filter(User.table.c.deleted == true())
else:
# special case: user can see only their own user
# special case2: if the galaxy admin has specified that other user email/names are
# exposed, we don't want special case #1
if not trans.user_is_admin and not trans.app.config.expose_user_name and not trans.app.config.expose_user_email:
item = trans.user.to_dict(value_mapper={'id': trans.security.encode_id})
return [item]
query = query.filter(User.table.c.deleted == false())
for user in query:
item = user.to_dict(value_mapper={'id': trans.security.encode_id})
# If NOT configured to expose_email, do not expose email UNLESS the user is self, or
# the user is an admin
if user is not trans.user and not trans.user_is_admin:
expose_keys = ["id"]
if trans.app.config.expose_user_name:
expose_keys.append("username")
if trans.app.config.expose_user_email:
expose_keys.append("email")
new_item = {}
for key, value in item.items():
if key in expose_keys:
new_item[key] = value
item = new_item
# TODO: move into api_values
rval.append(item)
return rval
@expose_api_anonymous
def show(self, trans: ProvidesUserContext, id, deleted='False', **kwd):
"""
GET /api/users/{encoded_id}
GET /api/users/deleted/{encoded_id}
GET /api/users/current
Displays information about a user.
"""
deleted = util.string_as_bool(deleted)
try:
# user is requesting data about themselves
if id == "current":
# ...and is anonymous - return usage and quota (if any)
if not trans.user:
item = self.anon_user_api_value(trans)
return item
# ...and is logged in - return full
else:
user = trans.user
else:
user = self.get_user(trans, id, deleted=deleted)
# check that the user is requesting themselves (and they aren't del'd) unless admin
if not trans.user_is_admin:
assert trans.user == user
assert not user.deleted
except exceptions.ItemDeletionException:
raise
except Exception:
raise exceptions.RequestParameterInvalidException('Invalid user id specified', id=id)
return self.user_serializer.serialize_to_view(user, view='detailed')
@expose_api
def create(self, trans: GalaxyWebTransaction, payload: dict, **kwd):
"""
POST /api/users
Creates a new Galaxy user.
"""
if not trans.app.config.allow_user_creation and not trans.user_is_admin:
raise exceptions.ConfigDoesNotAllowException('User creation is not allowed in this Galaxy instance')
if trans.app.config.use_remote_user and trans.user_is_admin:
user = trans.get_or_create_remote_user(remote_user_email=payload['remote_user_email'])
elif trans.user_is_admin:
username = payload['username']
email = payload['email']
password = <PASSWORD>['password']
message = "\n".join((validate_email(trans, email),
validate_password(trans, password, password),
validate_publicname(trans, username))).rstrip()
if message:
raise exceptions.RequestParameterInvalidException(message)
else:
user = self.user_manager.create(email=email, username=username, password=password)
else:
raise exceptions.NotImplemented()
item = user.to_dict(view='element', value_mapper={'id': trans.security.encode_id,
'total_disk_usage': float})
return item
@expose_api
def update(self, trans: ProvidesUserContext, id: str, payload: dict, **kwd):
"""
update( self, trans, id, payload, **kwd )
* PUT /api/users/{id}
updates the values for the item with the given ``id``
:type id: str
:param id: the encoded id of the item to update
:type payload: dict
:param payload: a dictionary of new attribute values
:rtype: dict
:returns: an error object if an error occurred or a dictionary containing
the serialized item after any changes
"""
current_user = trans.user
user_to_update = self.user_manager.by_id(self.decode_id(id))
# only allow updating other users if they're admin
editing_someone_else = current_user != user_to_update
is_admin = self.user_manager.is_admin(current_user)
if editing_someone_else and not is_admin:
raise exceptions.InsufficientPermissionsException('You are not allowed to update that user', id=id)
self.user_deserializer.deserialize(user_to_update, payload, user=current_user, trans=trans)
return self.user_serializer.serialize_to_view(user_to_update, view='detailed')
@expose_api
def delete(self, trans, id, **kwd):
"""
DELETE /api/users/{id}
delete the user with the given ``id``
Functionality restricted based on admin status
:param id: the encoded id of the user to delete
:type id: str
:param purge: (optional) if True, purge the user
:type purge: bool
"""
user_to_update = self.user_manager.by_id(self.decode_id(id))
if trans.user_is_admin:
purge = util.string_as_bool(kwd.get('purge', False))
if purge:
log.debug("Purging user %s", user_to_update)
self.user_manager.purge(user_to_update)
else:
self.user_manager.delete(user_to_update)
else:
if trans.user == user_to_update:
self.user_manager.delete(user_to_update)
else:
raise exceptions.InsufficientPermissionsException('You may only delete your own account.', id=id)
return self.user_serializer.serialize_to_view(user_to_update, view='detailed')
@web.require_admin
@expose_api
def undelete(self, trans, id, **kwd):
"""
POST /api/users/deleted/{id}/undelete
Undelete the user with the given ``id``
:param id: the encoded id of the user to be undeleted
:type id: str
"""
user = self.get_user(trans, id)
self.user_manager.undelete(user)
return self.user_serializer.serialize_to_view(user, view='detailed')
# TODO: move to more basal, common resource than this
def anon_user_api_value(self, trans):
"""Return data for an anonymous user, truncated to only usage and quota_percent"""
usage = trans.app.quota_agent.get_usage(trans)
percent = trans.app.quota_agent.get_percent(trans=trans, usage=usage)
return {'total_disk_usage': int(usage),
'nice_total_disk_usage': util.nice_size(usage),
'quota_percent': percent}
def _get_extra_user_preferences(self, trans):
"""
Reads the file user_preferences_extra_conf.yml to display
admin defined user informations
"""
return trans.app.config.user_preferences_extra['preferences']
def _build_extra_user_pref_inputs(self, preferences, user):
"""
Build extra user preferences inputs list.
Add values to the fields if present
"""
if not preferences:
return []
extra_pref_inputs = list()
# Build sections for different categories of inputs
for item, value in preferences.items():
if value is not None:
input_fields = copy.deepcopy(value["inputs"])
for input in input_fields:
help = input.get('help', '')
required = 'Required' if util.string_as_bool(input.get('required')) else ''
if help:
input['help'] = f"{help} {required}"
else:
input['help'] = required
field = item + '|' + input['name']
for data_item in user.extra_preferences:
if field in data_item:
input['value'] = user.extra_preferences[data_item]
extra_pref_inputs.append({'type': 'section', 'title': value['description'], 'name': item, 'expanded': True, 'inputs': input_fields})
return extra_pref_inputs
@expose_api
def get_information(self, trans, id, **kwd):
"""
GET /api/users/{id}/information/inputs
Return user details such as username, email, addresses etc.
:param id: the encoded id of the user
:type id: str
"""
user = self._get_user(trans, id)
email = user.email
username = user.username
inputs = list()
inputs.append({
'id': 'email_input',
'name': 'email',
'type': 'text',
'label': 'Email address',
'value': email,
'help': 'If you change your email address you will receive an activation link in the new mailbox and you have to activate your account by visiting it.'})
if trans.webapp.name == 'galaxy':
inputs.append({
'id': 'name_input',
'name': 'username',
'type': 'text',
'label': 'Public name',
'value': username,
'help': 'Your public name is an identifier that will be used to generate addresses for information you share publicly. Public names must be at least three characters in length and contain only lower-case letters, numbers, and the "-" character.'})
info_form_models = self.get_all_forms(trans, filter=dict(deleted=False), form_type=trans.app.model.FormDefinition.types.USER_INFO)
if info_form_models:
info_form_id = trans.security.encode_id(user.values.form_definition.id) if user.values else None
info_field = {
'type': 'conditional',
'name': 'info',
'cases': [],
'test_param': {
'name': 'form_id',
'label': 'User type',
'type': 'select',
'value': info_form_id,
'help': '',
'data': []
}
}
for f in info_form_models:
values = None
if info_form_id == trans.security.encode_id(f.id) and user.values:
values = user.values.content
info_form = f.to_dict(user=user, values=values, security=trans.security)
info_field['test_param']['data'].append({'label': info_form['name'], 'value': info_form['id']})
info_field['cases'].append({'value': info_form['id'], 'inputs': info_form['inputs']})
inputs.append(info_field)
address_inputs = [{'type': 'hidden', 'name': 'id', 'hidden': True}]
for field in AddressField.fields():
address_inputs.append({'type': 'text', 'name': field[0], 'label': field[1], 'help': field[2]})
address_repeat = {'title': 'Address', 'name': 'address', 'type': 'repeat', 'inputs': address_inputs, 'cache': []}
address_values = [address.to_dict(trans) for address in user.addresses]
for address in address_values:
address_cache = []
for input in address_inputs:
input_copy = input.copy()
input_copy['value'] = address.get(input['name'])
address_cache.append(input_copy)
address_repeat['cache'].append(address_cache)
inputs.append(address_repeat)
# Build input sections for extra user preferences
extra_user_pref = self._build_extra_user_pref_inputs(self._get_extra_user_preferences(trans), user)
for item in extra_user_pref:
inputs.append(item)
else:
if user.active_repositories:
inputs.append(dict(id='name_input', name='username', label='Public name:', type='hidden', value=username, help='You cannot change your public name after you have created a repository in this tool shed.'))
else:
inputs.append(dict(id='name_input', name='username', label='Public name:', type='text', value=username, help='Your public name provides a means of identifying you publicly within this tool shed. Public names must be at least three characters in length and contain only lower-case letters, numbers, and the "-" character. You cannot change your public name after you have created a repository in this tool shed.'))
return {
'email': email,
'username': username,
'addresses': [address.to_dict(trans) for address in user.addresses],
'inputs': inputs,
}
@expose_api
def set_information(self, trans, id, payload=None, **kwd):
"""
PUT /api/users/{id}/information/inputs
Save a user's email, username, addresses etc.
:param id: the encoded id of the user
:type id: str
:param payload: data with new settings
:type payload: dict
"""
payload = payload or {}
user = self._get_user(trans, id)
# Update email
if 'email' in payload:
email = payload.get('email')
message = validate_email(trans, email, user)
if message:
raise exceptions.RequestParameterInvalidException(message)
if user.email != email:
# Update user email and user's private role name which must match
private_role = trans.app.security_agent.get_private_user_role(user)
private_role.name = email
private_role.description = 'Private role for ' + email
user.email = email
trans.sa_session.add(user)
trans.sa_session.add(private_role)
trans.sa_session.flush()
if trans.app.config.user_activation_on:
# Deactivate the user if email was changed and activation is on.
user.active = False
if self.user_manager.send_activation_email(trans, user.email, user.username):
message = 'The login information has been updated with the changes.<br>Verification email has been sent to your new email address. Please verify it by clicking the activation link in the email.<br>Please check your spam/trash folder in case you cannot find the message.'
else:
message = 'Unable to send activation email, please contact your local Galaxy administrator.'
if trans.app.config.error_email_to is not None:
message += ' Contact: %s' % trans.app.config.error_email_to
raise exceptions.InternalServerError(message)
# Update public name
if 'username' in payload:
username = payload.get('username')
message = validate_publicname(trans, username, user)
if message:
raise exceptions.RequestParameterInvalidException(message)
if user.username != username:
user.username = username
# Update user custom form
user_info_form_id = payload.get('info|form_id')
if user_info_form_id:
prefix = 'info|'
user_info_form = trans.sa_session.query(trans.app.model.FormDefinition).get(trans.security.decode_id(user_info_form_id))
user_info_values = {}
for item in payload:
if item.startswith(prefix):
user_info_values[item[len(prefix):]] = payload[item]
form_values = trans.model.FormValues(user_info_form, user_info_values)
trans.sa_session.add(form_values)
user.values = form_values
# Update values for extra user preference items
extra_user_pref_data = dict()
extra_pref_keys = self._get_extra_user_preferences(trans)
if extra_pref_keys is not None:
for key in extra_pref_keys:
key_prefix = key + '|'
for item in payload:
if item.startswith(key_prefix):
# Show error message if the required field is empty
if payload[item] == "":
# Raise an exception when a required field is empty while saving the form
keys = item.split("|")
section = extra_pref_keys[keys[0]]
for input in section['inputs']:
if input['name'] == keys[1] and input['required']:
raise exceptions.ObjectAttributeMissingException("Please fill the required field")
extra_user_pref_data[item] = payload[item]
user.preferences["extra_user_preferences"] = json.dumps(extra_user_pref_data)
# Update user addresses
address_dicts = {}
address_count = 0
for item in payload:
match = re.match(r'^address_(?P<index>\d+)\|(?P<attribute>\S+)', item)
if match:
groups = match.groupdict()
index = int(groups['index'])
attribute = groups['attribute']
address_dicts[index] = address_dicts.get(index) or {}
address_dicts[index][attribute] = payload[item]
address_count = max(address_count, index + 1)
user.addresses = []
for index in range(0, address_count):
d = address_dicts[index]
if d.get('id'):
try:
user_address = trans.sa_session.query(UserAddress).get(trans.security.decode_id(d['id']))
except Exception as e:
raise exceptions.ObjectNotFound('Failed to access user address ({}). {}'.format(d['id'], e))
else:
user_address = UserAddress()
trans.log_event('User address added')
for field in AddressField.fields():
if str(field[2]).lower() == 'required' and not d.get(field[0]):
raise exceptions.ObjectAttributeMissingException('Address {}: {} ({}) required.'.format(index + 1, field[1], field[0]))
setattr(user_address, field[0], str(d.get(field[0], '')))
user_address.user = user
user.addresses.append(user_address)
trans.sa_session.add(user_address)
trans.sa_session.add(user)
trans.sa_session.flush()
trans.log_event('User information added')
return {'message': 'User information has been saved.'}
@expose_api
def set_favorite(self, trans, id, object_type, payload=None, **kwd):
"""Add the object to user's favorites
PUT /api/users/{id}/favorites/{object_type}
:param id: the encoded id of the user
:type id: str
:param object_type: the object type that users wants to favorite
:type object_type: str
:param object_id: the id of an object that users wants to favorite
:type object_id: str
"""
payload = payload or {}
self._validate_favorite_object_type(object_type)
user = self._get_user(trans, id)
favorites = json.loads(user.preferences['favorites']) if 'favorites' in user.preferences else {}
if object_type == 'tools':
tool_id = payload.get('object_id')
tool = self.app.toolbox.get_tool(tool_id)
if not tool:
raise exceptions.ObjectNotFound("Could not find tool with id '%s'." % tool_id)
if not tool.allow_user_access(user):
raise exceptions.AuthenticationFailed("Access denied for tool with id '%s'." % tool_id)
if 'tools' in favorites:
favorite_tools = favorites['tools']
else:
favorite_tools = []
if tool_id not in favorite_tools:
favorite_tools.append(tool_id)
favorites['tools'] = favorite_tools
user.preferences['favorites'] = json.dumps(favorites)
trans.sa_session.flush()
return favorites
@expose_api
def remove_favorite(self, trans, id, object_type, object_id, payload=None, **kwd):
"""Remove the object from user's favorites
DELETE /api/users/{id}/favorites/{object_type}/{object_id:.*?}
:param id: the encoded id of the user
:type id: str
:param object_type: the object type that users wants to favorite
:type object_type: str
:param object_id: the id of an object that users wants to remove from favorites
:type object_id: str
"""
payload = payload or {}
self._validate_favorite_object_type(object_type)
user = self._get_user(trans, id)
favorites = json.loads(user.preferences['favorites']) if 'favorites' in user.preferences else {}
if object_type == 'tools':
if 'tools' in favorites:
favorite_tools = favorites['tools']
if object_id in favorite_tools:
del favorite_tools[favorite_tools.index(object_id)]
favorites['tools'] = favorite_tools
user.preferences['favorites'] = json.dumps(favorites)
trans.sa_session.flush()
else:
raise exceptions.ObjectNotFound('Given object is not in the list of favorites')
return favorites
def _validate_favorite_object_type(self, object_type):
if object_type in ['tools']:
pass
else:
raise exceptions.ObjectAttributeInvalidException("This type is not supported. Given object_type: %s" % object_type)
@expose_api
def get_password(self, trans, id, payload=None, **kwd):
"""
Return available password inputs.
"""
payload = payload or {}
return {'inputs': [{'name': 'current', 'type': 'password', 'label': 'Current password'},
{'name': 'password', 'type': 'password', 'label': 'New password'},
{'name': 'confirm', 'type': 'password', 'label': 'Confirm password'}]}
@expose_api
def set_password(self, trans, id, payload=None, **kwd):
"""
Allows to the logged-in user to change own password.
"""
payload = payload or {}
user, message = self.user_manager.change_password(trans, id=id, **payload)
if user is None:
raise exceptions.AuthenticationRequired(message)
return {"message": "Password has been changed."}
@expose_api
def get_permissions(self, trans, id, payload=None, **kwd):
"""
Get the user's default permissions for the new histories
"""
payload = payload or {}
user = self._get_user(trans, id)
roles = user.all_roles()
inputs = []
for index, action in trans.app.model.Dataset.permitted_actions.items():
inputs.append({'type': 'select',
'multiple': True,
'optional': True,
'name': index,
'label': action.action,
'help': action.description,
'options': list({(r.name, r.id) for r in roles}),
'value': [a.role.id for a in user.default_permissions if a.action == action.action]})
return {'inputs': inputs}
@expose_api
def set_permissions(self, trans, id, payload=None, **kwd):
"""
Set the user's default permissions for the new histories
"""
payload = payload or {}
user = self._get_user(trans, id)
permissions = {}
for index, action in trans.app.model.Dataset.permitted_actions.items():
action_id = trans.app.security_agent.get_action(action.action).action
permissions[action_id] = [trans.sa_session.query(trans.app.model.Role).get(x) for x in (payload.get(index) or [])]
trans.app.security_agent.user_set_default_permissions(user, permissions)
return {'message': 'Permissions have been saved.'}
@expose_api
def get_toolbox_filters(self, trans, id, payload=None, **kwd):
"""
API call for fetching toolbox filters data. Toolbox filters are specified in galaxy.ini.
The user can activate them and the choice is stored in user_preferences.
"""
payload = payload or {}
user = self._get_user(trans, id)
filter_types = self._get_filter_types(trans)
saved_values = {}
for name, value in user.preferences.items():
if name in filter_types:
saved_values[name] = listify(value, do_strip=True)
inputs = [{
'type': 'hidden',
'name': 'helptext',
'label': 'In this section you may enable or disable Toolbox filters. Please contact your admin to configure filters as necessary.'
}]
errors = {}
factory = FilterFactory(trans.app.toolbox)
for filter_type in filter_types:
self._add_filter_inputs(factory, filter_types, inputs, errors, filter_type, saved_values)
return {'inputs': inputs, 'errors': errors}
@expose_api
def set_toolbox_filters(self, trans, id, payload=None, **kwd):
"""
API call to update toolbox filters data.
"""
payload = payload or {}
user = self._get_user(trans, id)
filter_types = self._get_filter_types(trans)
for filter_type in filter_types:
new_filters = []
for prefixed_name in payload:
if payload.get(prefixed_name) == 'true' and prefixed_name.startswith(filter_type):
prefix = filter_type + '|'
new_filters.append(prefixed_name[len(prefix):])
user.preferences[filter_type] = ','.join(new_filters)
trans.sa_session.add(user)
trans.sa_session.flush()
return {'message': 'Toolbox filters have been saved.'}
def _add_filter_inputs(self, factory, filter_types, inputs, errors, filter_type, saved_values):
filter_inputs = list()
filter_values = saved_values.get(filter_type, [])
filter_config = filter_types[filter_type]['config']
filter_title = filter_types[filter_type]['title']
for filter_name in filter_config:
function = factory.build_filter_function(filter_name)
if function is None:
errors[f'{filter_type}|{filter_name}'] = 'Filter function not found.'
short_description, description = None, None
doc_string = docstring_trim(function.__doc__)
split = doc_string.split('\n\n')
if split:
short_description = split[0]
if len(split) > 1:
description = split[1]
else:
log.warning('No description specified in the __doc__ string for %s.' % filter_name)
filter_inputs.append({
'type': 'boolean',
'name': filter_name,
'label': short_description or filter_name,
'help': description or 'No description available.',
'value': 'true' if filter_name in filter_values else 'false'
})
if filter_inputs:
inputs.append({'type': 'section', 'title': filter_title, 'name': filter_type, 'expanded': True, 'inputs': filter_inputs})
def _get_filter_types(self, trans):
return OrderedDict([('toolbox_tool_filters', {'title': 'Tools', 'config': trans.app.config.user_tool_filters}),
('toolbox_section_filters', {'title': 'Sections', 'config': trans.app.config.user_tool_section_filters}),
('toolbox_label_filters', {'title': 'Labels', 'config': trans.app.config.user_tool_label_filters})])
@expose_api
def api_key(self, trans, id, payload=None, **kwd):
"""
Create API key.
"""
payload = payload or {}
user = self._get_user(trans, id)
return self.api_key_manager.create_api_key(user)
@expose_api
def get_or_create_api_key(self, trans, id, payload=None, **kwd):
"""
Unified 'get or create' for API key
"""
payload = payload or {}
user = self._get_user(trans, id)
return self.api_key_manager.get_or_create_api_key(user)
@expose_api
def get_api_key(self, trans, id, payload=None, **kwd):
"""
Get API key inputs.
"""
payload = payload or {}
user = self._get_user(trans, id)
return self._build_inputs_api_key(user)
@expose_api
def set_api_key(self, trans, id, payload=None, **kwd):
"""
Get API key inputs with new API key.
"""
payload = payload or {}
user = self._get_user(trans, id)
self.api_key_manager.create_api_key(user)
return self._build_inputs_api_key(user, message='Generated a new web API key.')
def _build_inputs_api_key(self, user, message=''):
"""
Build API key inputs.
"""
inputs = [{'name': 'api-key',
'type': 'text',
'label': 'Current API key:',
'value': user.api_keys[0].key if user.api_keys else 'Not available.',
'readonly': True,
'help': ' An API key will allow you to access via web API. Please note that this key acts as an alternate means to access your account and should be treated with the same care as your login password.'}]
return {'message': message, 'inputs': inputs}
@expose_api
def get_custom_builds(self, trans, id, payload=None, **kwd):
"""
GET /api/users/{id}/custom_builds
Returns collection of custom builds.
:param id: the encoded id of the user
:type id: str
"""
payload = payload or {}
user = self._get_user(trans, id)
dbkeys = json.loads(user.preferences['dbkeys']) if 'dbkeys' in user.preferences else {}
valid_dbkeys = {}
update = False
for key, dbkey in dbkeys.items():
if 'count' not in dbkey and 'linecount' in dbkey:
chrom_count_dataset = trans.sa_session.query(trans.app.model.HistoryDatasetAssociation).get(dbkey['linecount'])
if chrom_count_dataset and not chrom_count_dataset.deleted and chrom_count_dataset.state == trans.app.model.HistoryDatasetAssociation.states.OK:
chrom_count = int(open(chrom_count_dataset.file_name).readline())
dbkey['count'] = chrom_count
valid_dbkeys[key] = dbkey
update = True
else:
valid_dbkeys[key] = dbkey
if update:
user.preferences['dbkeys'] = json.dumps(valid_dbkeys)
dbkey_collection = []
for key, attributes in valid_dbkeys.items():
attributes['id'] = key
dbkey_collection.append(attributes)
return dbkey_collection
@expose_api
def add_custom_builds(self, trans, id, key, payload=None, **kwd):
"""
PUT /api/users/{id}/custom_builds/{key}
Add new custom build.
:param id: the encoded id of the user
:type id: str
:param id: custom build key
:type id: str
:param payload: data with new build details
:type payload: dict
"""
payload = payload or {}
user = self._get_user(trans, id)
dbkeys = json.loads(user.preferences['dbkeys']) if 'dbkeys' in user.preferences else {}
name = payload.get('name')
len_type = payload.get('len|type')
len_value = payload.get('len|value')
if len_type not in ['file', 'fasta', 'text'] or not len_value:
raise exceptions.RequestParameterInvalidException('Please specify a valid data source type.')
if not name or not key:
raise exceptions.RequestParameterMissingException('You must specify values for all the fields.')
elif key in dbkeys:
raise exceptions.DuplicatedIdentifierException('There is already a custom build with that key. Delete it first if you want to replace it.')
else:
# Have everything needed; create new build.
build_dict = {'name': name}
if len_type in ['text', 'file']:
# Create new len file
new_len = trans.app.model.HistoryDatasetAssociation(extension='len', create_dataset=True, sa_session=trans.sa_session)
trans.sa_session.add(new_len)
new_len.name = name
new_len.visible = False
new_len.state = trans.app.model.Job.states.OK
new_len.info = 'custom build .len file'
try:
trans.app.object_store.create(new_len.dataset)
except ObjectInvalid:
raise exceptions.InternalServerError('Unable to create output dataset: object store is full.')
trans.sa_session.flush()
counter = 0
lines_skipped = 0
with open(new_len.file_name, 'w') as f:
# LEN files have format:
# <chrom_name><tab><chrom_length>
for line in len_value.split('\n'):
# Splits at the last whitespace in the line
lst = line.strip().rsplit(None, 1)
if not lst or len(lst) < 2:
lines_skipped += 1
continue
chrom, length = lst[0], lst[1]
try:
length = int(length)
except ValueError:
lines_skipped += 1
continue
if chrom != escape(chrom):
build_dict['message'] = 'Invalid chromosome(s) with HTML detected and skipped.'
lines_skipped += 1
continue
counter += 1
f.write(f'{chrom}\t{length}\n')
build_dict['len'] = new_len.id
build_dict['count'] = counter
else:
build_dict['fasta'] = trans.security.decode_id(len_value)
dataset = trans.sa_session.query(trans.app.model.HistoryDatasetAssociation).get(build_dict['fasta'])
try:
new_len = dataset.get_converted_dataset(trans, 'len')
new_linecount = new_len.get_converted_dataset(trans, 'linecount')
build_dict['len'] = new_len.id
build_dict['linecount'] = new_linecount.id
except Exception:
raise exceptions.ToolExecutionError('Failed to convert dataset.')
dbkeys[key] = build_dict
user.preferences['dbkeys'] = json.dumps(dbkeys)
trans.sa_session.flush()
return build_dict
@expose_api
def delete_custom_builds(self, trans, id, key, payload=None, **kwd):
"""
DELETE /api/users/{id}/custom_builds/{key}
Delete a custom build.
:param id: the encoded id of the user
:type id: str
:param id: custom build key to be deleted
:type id: str
"""
payload = payload or {}
user = self._get_user(trans, id)
dbkeys = json.loads(user.preferences['dbkeys']) if 'dbkeys' in user.preferences else {}
if key and key in dbkeys:
del dbkeys[key]
user.preferences['dbkeys'] = json.dumps(dbkeys)
trans.sa_session.flush()
return {'message': 'Deleted %s.' % key}
else:
raise exceptions.ObjectNotFound('Could not find and delete build (%s).' % key)
def _get_user(self, trans, id):
user = self.get_user(trans, id)
if not user:
raise exceptions.RequestParameterInvalidException('Invalid user (%s).' % id)
if user != trans.user and not trans.user_is_admin:
raise exceptions.InsufficientPermissionsException('Access denied.')
return user
|
#!/usr/bin/env python
import os, sys, re
from optparse import OptionParser
from jobTree.scriptTree.target import Target
from jobTree.scriptTree.stack import Stack
from jobTree.src.bioio import getTempFile
from sonLib.bioio import logger
from sonLib.bioio import system
from sonLib.bioio import nameValue
from sonLib.bioio import getTempDirectory
from sonLib.bioio import setLogLevel
#Download, Unzip files & organize them to the output directory in this format:
#Outdir/
# Sample1/
# ILLUMINA/
# PAIRED/
# file1_reads1
# file1_reads2
# file2_reads1
# file2_reads2
# ...
# SINGLE/
# file1
# file2
# ...
# LS454/
# PAIRED/
# SINGLE/
class Setup(Target):
def __init__(self, options):
Target.__init__(self, time=0.00025)
self.options = options
def run(self):
setLogLevel("INFO")
file2info = readSeqIndex(self.options.seqIndexFile, self.options.samples)
#logger.info("Done reading sequence.index file\n")
for sample in file2info:
self.addChildTarget( RunSample(self.options, file2info[sample]) )
class RunSample(Target):
def __init__(self, options, file2info):
Target.__init__(self, time=0.00005)
self.options = options
self.file2info = file2info
def run(self):
filename2info = self.file2info
for file in filename2info:
readfile = filename2info[file]
samplePath = os.path.join( self.options.outdir, readfile.sample, readfile.platform, readfile.libLayout )
system("mkdir -p %s" %(samplePath))
samplefile = os.path.join(samplePath, readfile.name)
sampleAddress = "%s/%s" %(self.options.ftpaddress, readfile.path)
if not os.path.exists( samplefile.rstrip('.gz') ):
system( "ascp -i ~/.aspera/connect/etc/asperaweb_id_dsa.putty -Tr -Q -l 500M -L- %s %s" %(sampleAddress, samplePath) )
#Done downloading all the files, now gunzip them:
self.setFollowOnTarget( Uncompress(self.options, filename2info) )
#self.addChildTarget( GetFile(self.options, readfile['sample'], readfile['platform'], readfile['libLayout'], readfile['name'], readfile['path']) )
class Uncompress(Target):
def __init__(self, options, file2info):
Target.__init__(self, time=0.000001)
self.options = options
self.file2info = file2info
def run(self):
file2info = self.file2info
for file in self.file2info:
readfile = file2info[file]
samplePath = os.path.join( self.options.outdir, readfile.sample, readfile.platform, readfile.libLayout )
samplefile = os.path.join(samplePath, readfile.name)
if not os.path.exists( samplefile.rstrip('.gz') ):
self.addChildTarget( UncompressFile(samplefile) )
class UncompressFile(Target):
def __init__(self, samplefile):
Target.__init__(self, time=0.1)
self.file = samplefile
def run(self):
system("gunzip %s" %self.file)
#class GetFile(Target):
# def __init__(self, options, sample, platform, libLayout, name, path):
# Target.__init__(self, time=0.00025)
# #Target.__init__(self)
# self.options = options
# self.sample = sample
# self.platform = platform
# self.libLayout = libLayout
# self.name = name
# self.path = path
#
# def run(self):
# #logger.info("GETFILE\n")
# samplePath = os.path.join( self.options.outdir, self.sample, self.platform, self.libLayout )
# system("mkdir -p %s" %(samplePath))
# samplefile = os.path.join(samplePath, self.name)
#
# sampleAddress = "%s/%s" %(self.options.ftpaddress, self.path)
# if not os.path.exists( samplefile.rstrip('.gz') ):
# system( "ascp -i ~/.aspera/connect/etc/asperaweb_id_dsa.putty -Tr -Q -l 500M -L- %s %s" %(sampleAddress, samplePath) )
# #system("wget -O - %s > %s" %(sampleAddress, samplefile))
# system("gunzip %s" %samplefile)
class Readfile():
def __init__(self, line):
items = line.strip().split('\t')
l = items[0].split('/')
self.path = items[0]
self.name = l[ len(l) -1 ]
self.sample = items[9]
self.platform = items[12].lower() #ILLUMINA, LS454, ABI_SOLID
self.insertSize = items[17]
self.libLayout = items[18].lower() #PAIRED, OR SINGLE
self.pairedFile = items[19]
self.withdrawn = items[20]
self.withdrawnDate = items[21]
#if self.pairedFile == "":#If paired, but not mate file, then set as single
# self.libLayout = "single"
self.readCount = items[23]
self.baseCount = items[24]
#def readLine(line):
# readfile = {}
# items = line.strip().split('\t')
# l = items[0].split('/')
# readfile['path'] = items[0]
# readfile['name'] = l[ len(l) -1 ]
# readfile['sample'] = items[9]
# readfile['platform'] = items[12].lower() #ILLUMINA, LS454, ABI_SOLID
# readfile['insertSize'] = items[17]
# readfile['libLayout'] = items[18].lower() #PAIRED, OR SINGLE
# readfile['pairedFile'] = items[19]
# readfile['withdrawn'] = items[20]
# readfile['withdrawnDate'] = items[21]
#
# #if self.pairedFile == "":#If paired, but not mate file, then set as single
# # self.libLayout = "single"
# readfile['readCount'] = items[23]
# readfile['baseCount'] = items[24]
# return readfile
def readSeqIndex(file, samples):
#Read sequence.index file:
infoFh = open(file, 'r')
file2info = {} #key = sample, val = {filename:Readfile}
for line in infoFh.readlines():
rf = Readfile( line )
if rf.sample not in samples:
continue
elif rf.pairedFile == "" and rf.libLayout == "paired":
continue
elif rf.withdrawn == '1' or rf.withdrawnDate != '':
continue
elif rf.sample not in file2info:
file2info[ rf.sample ] = { rf.name: rf }
else:
file2info[ rf.sample ][ rf.name ] = rf
#if rf['sample'] not in samples:
# continue
#elif rf['pairedFile'] == "" and rf['libLayout'] == "paired":
# continue
#elif rf['withdrawn'] == '1' or rf['withdrawnDate'] != '':
# continue
#elif rf['sample'] not in file2info:
# file2info[ rf['sample'] ] = { rf['name'] : rf }
#elif rf['name'] not in file2info[ rf['sample'] ]:
# file2info[ rf['sample'] ][rf['name']] = rf
infoFh.close()
return file2info
def checkOptions(options, args, parser ):
if len(args) < 4:
parser.error("Need 3 input files, only %d provided\n" %(len(args)))
if not os.path.exists(args[0]):
parser.error("sequence.index file %s does not exist\n" %(args[0]))
system("mkdir -p %s" %args[2])
options.seqIndexFile = args[0]
options.samples = args[1].split(',')
options.outdir = args[2]
options.ftpaddress = args[3] #<EMAIL>:vol1/ftp
def main():
#ftpAddress: <EMAIL>:vol1/ftp
usage = "Usage: %prog [options] <sequence.index> <sample1[,sample2,...]> <outdir> <ftpAddress>"
parser = OptionParser( usage = usage )
Stack.addJobTreeOptions(parser)
#initOptions( parser )
options, args = parser.parse_args()
checkOptions( options, args, parser )
i = Stack( Setup(options) ).startJobTree(options)
if i:
raise RuntimeError("The jobTree contains %d failed jobs\n" %i)
if __name__ == "__main__":
from referenceViz.src.getReads import *
main()
|
<reponame>sivaprakashniet/push_pull
from __future__ import absolute_import
from kombu import Exchange, Queue
from mock import Mock
from celery.app.amqp import Queues, TaskPublisher
from celery.five import keys
from celery.tests.case import AppCase
class test_TaskProducer(AppCase):
def test__exit__(self):
publisher = self.app.amqp.TaskProducer(self.app.connection())
publisher.release = Mock()
with publisher:
pass
publisher.release.assert_called_with()
def test_declare(self):
publisher = self.app.amqp.TaskProducer(self.app.connection())
publisher.exchange.name = 'foo'
publisher.declare()
publisher.exchange.name = None
publisher.declare()
def test_retry_policy(self):
prod = self.app.amqp.TaskProducer(Mock())
prod.channel.connection.client.declared_entities = set()
prod.publish_task('tasks.add', (2, 2), {},
retry_policy={'frobulate': 32.4})
def test_publish_no_retry(self):
prod = self.app.amqp.TaskProducer(Mock())
prod.channel.connection.client.declared_entities = set()
prod.publish_task('tasks.add', (2, 2), {}, retry=False, chord=123)
self.assertFalse(prod.connection.ensure.call_count)
def test_publish_custom_queue(self):
prod = self.app.amqp.TaskProducer(Mock())
self.app.amqp.queues['some_queue'] = Queue(
'xxx', Exchange('yyy'), 'zzz',
)
prod.channel.connection.client.declared_entities = set()
prod.publish = Mock()
prod.publish_task('tasks.add', (8, 8), {}, retry=False,
queue='some_queue')
self.assertEqual(prod.publish.call_args[1]['exchange'], 'yyy')
self.assertEqual(prod.publish.call_args[1]['routing_key'], 'zzz')
def test_event_dispatcher(self):
prod = self.app.amqp.TaskProducer(Mock())
self.assertTrue(prod.event_dispatcher)
self.assertFalse(prod.event_dispatcher.enabled)
class test_TaskConsumer(AppCase):
def test_accept_content(self):
with self.app.pool.acquire(block=True) as conn:
self.app.conf.CELERY_ACCEPT_CONTENT = ['application/json']
self.assertEqual(
self.app.amqp.TaskConsumer(conn).accept,
set(['application/json'])
)
self.assertEqual(
self.app.amqp.TaskConsumer(conn, accept=['json']).accept,
set(['application/json']),
)
class test_compat_TaskPublisher(AppCase):
def test_compat_exchange_is_string(self):
producer = TaskPublisher(exchange='foo', app=self.app)
self.assertIsInstance(producer.exchange, Exchange)
self.assertEqual(producer.exchange.name, 'foo')
self.assertEqual(producer.exchange.type, 'direct')
producer = TaskPublisher(exchange='foo', exchange_type='topic',
app=self.app)
self.assertEqual(producer.exchange.type, 'topic')
def test_compat_exchange_is_Exchange(self):
producer = TaskPublisher(exchange=Exchange('foo'), app=self.app)
self.assertEqual(producer.exchange.name, 'foo')
class test_PublisherPool(AppCase):
def test_setup_nolimit(self):
self.app.conf.BROKER_POOL_LIMIT = None
try:
delattr(self.app, '_pool')
except AttributeError:
pass
self.app.amqp._producer_pool = None
pool = self.app.amqp.producer_pool
self.assertEqual(pool.limit, self.app.pool.limit)
self.assertFalse(pool._resource.queue)
r1 = pool.acquire()
r2 = pool.acquire()
r1.release()
r2.release()
r1 = pool.acquire()
r2 = pool.acquire()
def test_setup(self):
self.app.conf.BROKER_POOL_LIMIT = 2
try:
delattr(self.app, '_pool')
except AttributeError:
pass
self.app.amqp._producer_pool = None
pool = self.app.amqp.producer_pool
self.assertEqual(pool.limit, self.app.pool.limit)
self.assertTrue(pool._resource.queue)
p1 = r1 = pool.acquire()
p2 = r2 = pool.acquire()
r1.release()
r2.release()
r1 = pool.acquire()
r2 = pool.acquire()
self.assertIs(p2, r1)
self.assertIs(p1, r2)
r1.release()
r2.release()
class test_Queues(AppCase):
def test_queues_format(self):
self.app.amqp.queues._consume_from = {}
self.assertEqual(self.app.amqp.queues.format(), '')
def test_with_defaults(self):
self.assertEqual(Queues(None), {})
def test_add(self):
q = Queues()
q.add('foo', exchange='ex', routing_key='rk')
self.assertIn('foo', q)
self.assertIsInstance(q['foo'], Queue)
self.assertEqual(q['foo'].routing_key, 'rk')
def test_with_ha_policy(self):
qn = Queues(ha_policy=None, create_missing=False)
qn.add('xyz')
self.assertIsNone(qn['xyz'].queue_arguments)
qn.add('xyx', queue_arguments={'x-foo': 'bar'})
self.assertEqual(qn['xyx'].queue_arguments, {'x-foo': 'bar'})
q = Queues(ha_policy='all', create_missing=False)
q.add(Queue('foo'))
self.assertEqual(q['foo'].queue_arguments, {'x-ha-policy': 'all'})
qq = Queue('xyx2', queue_arguments={'x-foo': 'bari'})
q.add(qq)
self.assertEqual(q['xyx2'].queue_arguments, {
'x-ha-policy': 'all',
'x-foo': 'bari',
})
q2 = Queues(ha_policy=['A', 'B', 'C'], create_missing=False)
q2.add(Queue('foo'))
self.assertEqual(q2['foo'].queue_arguments, {
'x-ha-policy': 'nodes',
'x-ha-policy-params': ['A', 'B', 'C'],
})
def test_select_add(self):
q = Queues()
q.select(['foo', 'bar'])
q.select_add('baz')
self.assertItemsEqual(keys(q._consume_from), ['foo', 'bar', 'baz'])
def test_deselect(self):
q = Queues()
q.select(['foo', 'bar'])
q.deselect('bar')
self.assertItemsEqual(keys(q._consume_from), ['foo'])
def test_with_ha_policy_compat(self):
q = Queues(ha_policy='all')
q.add('bar')
self.assertEqual(q['bar'].queue_arguments, {'x-ha-policy': 'all'})
def test_add_default_exchange(self):
ex = Exchange('fff', 'fanout')
q = Queues(default_exchange=ex)
q.add(Queue('foo'))
self.assertEqual(q['foo'].exchange, ex)
def test_alias(self):
q = Queues()
q.add(Queue('foo', alias='barfoo'))
self.assertIs(q['barfoo'], q['foo'])
|
"""Final experiments, trained on the full training set.
This uses hyperparameters chosen by cross-validation from exp_* """
import os
import dill
import pickle
import numpy as np
from marseille.datasets import get_dataset_loader, load_embeds
from marseille.custom_logging import logging
from marseille.argrnn import BaselineArgumentLSTM, ArgumentLSTM
from marseille.io import load_csr
from .exp_svmstruct import fit_predict as fit_pred_pystruct
from .exp_linear import BaselineStruct
hyperparams = {
'linear': {
'bare': {
'cdcp': {'alpha': 0.001},
'ukp': {'alpha': 0.01}
},
'full': {
'cdcp': {'alpha': 0.001},
'ukp': {'alpha': 0.01}
},
'strict': {
'cdcp': {'alpha': 0.001},
'ukp': {'alpha': 0.01}
}
},
'linear-struct': {
'bare': {
'cdcp': {'C': 0.3},
'ukp': {'C': 0.01}
},
'full': {
'cdcp': {'C': 0.1},
'ukp': {'C': 0.01}
},
'strict': {
'cdcp': {'C': 0.1},
'ukp': {'C': 0.03}
}
},
'rnn': {
'bare': {
'cdcp': {'max_iter': 25, 'mlp_dropout': 0.15},
'ukp': {'max_iter': 100, 'mlp_dropout': 0.05}
},
'full': {
'cdcp': {'max_iter': 25, 'mlp_dropout': 0.05},
'ukp': {'max_iter': 50, 'mlp_dropout': 0.15}
},
'strict': {
'cdcp': {'max_iter': 25, 'mlp_dropout': 0.05},
'ukp': {'max_iter': 25, 'mlp_dropout': 0.1}
}
},
'rnn-struct': {
'bare': {
'cdcp': {'max_iter': 25, 'mlp_dropout': 0.25},
'ukp': {'max_iter': 100, 'mlp_dropout': 0.15}
},
'full': {
'cdcp': {'max_iter': 100, 'mlp_dropout': 0.25},
'ukp': {'max_iter': 10, 'mlp_dropout': 0.05}
},
'strict': {
'cdcp': {'max_iter': 10, 'mlp_dropout': 0.2},
'ukp': {'max_iter': 10, 'mlp_dropout': 0.15}
}
}
}
exact_test = True
if __name__ == '__main__':
from docopt import docopt
usage = """
Usage:
exp_train_test (cdcp|ukp) --method=M --model=N [--dynet-seed N --dynet-mem N]
Options:
--method: one of (linear, linear-struct, rnn, rnn-struct)
--model: one of (bare, full, strict)
"""
args = docopt(usage)
dataset = 'cdcp' if args['cdcp'] else 'ukp'
method = args['--method']
model = args['--model']
params = hyperparams[method][model][dataset]
load_tr, ids_tr = get_dataset_loader(dataset, split="train")
load_te, ids_te = get_dataset_loader(dataset, split="test")
train_docs = list(load_tr(ids_tr))
test_docs = list(load_te(ids_te))
logging.info("{} {} on {} ({})".format(method, model, dataset, params))
filename = os.path.join('test_results',
'exact={}_{}_{}_{}'.format(exact_test,
dataset,
method,
model))
if not os.path.exists('test_results'):
os.makedirs('test_results')
# logic for constraints and compat features
# note that compat_features and second_order aren't used
# if the model isn't structured, but it's more readable this way.
if model == 'bare':
constraints = ''
compat_features = False
second_order = False
elif model == 'full':
constraints = dataset
compat_features = True
second_order = True
elif model == 'strict':
constraints = '{}+strict'.format(dataset)
compat_features = True
second_order = True
else:
raise ValueError('Invalid model: {}'.format(model))
# logic for which second order features to use, if any
grandparents = second_order and dataset == 'ukp'
coparents = second_order
siblings = second_order and dataset == 'cdcp'
if method == 'linear':
ds = 'erule' if dataset == 'cdcp' else 'ukp-essays'
path = os.path.join("data", "process", ds, "folds", "traintest", "{}")
X_tr_link, y_tr_link = load_csr(path.format('train.npz'),
return_y=True)
X_te_link, y_te_link = load_csr(path.format('test.npz'),
return_y=True)
X_tr_prop, y_tr_prop = load_csr(path.format('prop-train.npz'),
return_y=True)
X_te_prop, y_te_prop = load_csr(path.format('prop-test.npz'),
return_y=True)
baseline = BaselineStruct(alpha_link=params['alpha'],
alpha_prop=params['alpha'],
l1_ratio=0,
exact_test=exact_test)
baseline.fit(X_tr_link, y_tr_link, X_tr_prop, y_tr_prop)
Y_pred = baseline.predict(X_te_link, X_te_prop, test_docs, constraints)
with open('{}.model.pickle'.format(filename), "wb") as fp:
pickle.dump(baseline, fp)
np.save('{}.model'.format(filename),
(baseline.prop_clf_.coef_, baseline.link_clf_.coef_))
elif method == 'linear-struct':
clf, Y_te, Y_pred, vects = fit_pred_pystruct(train_docs, test_docs,
dataset=dataset, class_weight='balanced',
constraints=constraints, compat_features=compat_features,
second_order=second_order, coparents=coparents,
grandparents=grandparents, siblings=siblings,
exact_test=exact_test, return_vectorizers=True, **params)
with open('{}.vectorizers.pickle'.format(filename), "wb") as fp:
pickle.dump(vects, fp)
with open('{}.model.pickle'.format(filename), "wb") as fp:
pickle.dump(clf, fp)
np.save('{}.model'.format(filename), clf.w)
elif method == "rnn":
Y_train = [doc.label for doc in train_docs]
Y_te = [doc.label for doc in test_docs]
embeds = load_embeds(dataset)
rnn = BaselineArgumentLSTM(lstm_dropout=0,
prop_mlp_layers=2,
score_at_iter=None,
n_mlp=128,
n_lstm=128,
lstm_layers=2,
link_mlp_layers=1,
embeds=embeds,
link_bilinear=True,
constraints=constraints,
exact_test=exact_test,
**params)
rnn.fit(train_docs, Y_train)
with open('{}.model.pickle'.format(filename), "wb") as fp:
pickle.dump(rnn, fp)
rnn.save('{}.model.dynet'.format(filename))
Y_pred = rnn.predict(test_docs)
elif method == "rnn-struct":
Y_train = [doc.label for doc in train_docs]
Y_te = [doc.label for doc in test_docs]
embeds = load_embeds(dataset)
rnn = ArgumentLSTM(lstm_dropout=0,
prop_mlp_layers=2,
score_at_iter=None,
n_mlp=128,
n_lstm=128,
lstm_layers=2,
link_mlp_layers=1,
embeds=embeds,
link_bilinear=True,
class_weight='balanced',
second_order_multilinear=True,
exact_inference=False,
constraints=constraints,
compat_features=compat_features,
grandparent_layers=(1 if grandparents else 0),
coparent_layers=(1 if coparents else 0),
sibling_layers=(1 if siblings else 0),
exact_test=exact_test,
**params)
rnn.fit(train_docs, Y_train)
with open('{}.model.pickle'.format(filename), "wb") as fp:
pickle.dump(rnn, fp)
rnn.save('{}.model.dynet'.format(filename))
Y_pred = rnn.predict(test_docs)
with open('{}.predictions.dill'.format(filename), "wb") as f:
dill.dump(Y_pred, f)
|
import time
import argparse
import nonRedundantResolver
import overlapResolver
import gapFiller
import twoRepeatOneBridgeSolver
import houseKeeper
import IORobot
###################################################### Starting point
def mainFlow(folderName , mummerLink, pickupname, mapcontigsname):
print "Go Bears! ! !"
print "pickupname, mapcontigsname", pickupname, mapcontigsname
if not pickupname in ["noEmbed.fasta", "improved.fasta", "improved2.fasta"]:
nonRedundantResolver.removeEmbedded(folderName , mummerLink)
if not pickupname in ["improved.fasta", "improved2.fasta"]:
overlapResolver.fetchSuccessor(folderName , mummerLink)
overlapResolver.formSeqGraph(folderName , mummerLink)
if not pickupname in ["improved2.fasta"]:
gapFiller.fillGap(folderName , mummerLink)
twoRepeatOneBridgeSolver.xPhased(folderName , mummerLink)
# ECReduction(folderName , mummerLink )
# compareWithReference(folderName , mummerLink)
IORobot.fillInMissed(folderName, mummerLink, houseKeeper.globalContigName, "noEmbedtmp.fasta", "noEmbed.fasta")
IORobot.fillInMissed(folderName, mummerLink, houseKeeper.globalContigName, "improvedtmp.fasta", "improved.fasta")
IORobot.fillInMissed(folderName, mummerLink, houseKeeper.globalContigName, "improved2tmp.fasta", "improved2.fasta")
IORobot.fillInMissed(folderName, mummerLink, houseKeeper.globalContigName, "improved3tmp.fasta", "improved3.fasta")
if mapcontigsname != None:
houseKeeper.performMapping(folderName, mummerLink, mapcontigsname)
print "<3 Do cool things that matter <3"
# folderName = "S_cerivisea/"
# mummerLink = "MUMmer3.23/"
t0 = time.time()
parser = argparse.ArgumentParser(description='FinisherSC : a repeat-aware tool to upgrade de-novo assembly with long reads')
parser.add_argument('folderName')
parser.add_argument('mummerLink')
parser.add_argument('-p', '--pickup', help='Picks up existing work (input is noEmbed.fasta, improved.fasta or improved2.fasta)', required=False)
parser.add_argument('-o', '--mapcontigs', help='Maps new contigs to old contigs(input is of the format of contigs.fasta_improved3.fasta which means improved3.fasta will be mapped back to contigs.fasta; Output can be found in mappingResults.txt in the destinedFolder;)', required=False)
parser.add_argument('-f', '--fast', help= 'Fast aligns contigs (input is True)', required=False)
parser.add_argument('-par', '--parallel', help= 'Fast aligns contigs (input is maximum number of threads)', required=False)
parser.add_argument('-l', '--large', help= 'Large number of contigs/large size of contigs (input is True)', required=False)
args = vars(parser.parse_args())
print "args", args
pathExists, newFolderName, newMummerLink = houseKeeper.checkingPath(args['folderName'], args['mummerLink'])
if args['fast'] == "True":
houseKeeper.globalFast = True
else:
houseKeeper.globalFast = False
if args['parallel'] != None:
houseKeeper.globalParallel = int(args['parallel'])
else:
houseKeeper.globalParallel = 1
if args['large'] == "True":
houseKeeper.globalLarge = True
else:
houseKeeper.globalLarge = False
if pathExists:
mainFlow(newFolderName, newMummerLink, args['pickup'], args['mapcontigs'])
else:
print "Sorry. The above folders or files are missing. If you continue to have problems, please contact me(<NAME>) at <EMAIL>"
print "Time", time.time() - t0
|
from datetime import timedelta
from operator import methodcaller
import itertools
import math
import pytest
sa = pytest.importorskip('sqlalchemy')
pytest.importorskip('psycopg2')
import os
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from datashape import dshape
from odo import odo, drop, discover
from blaze import (
data,
atan2,
by,
coalesce,
compute,
concat,
cos,
greatest,
join,
least,
radians,
sin,
sqrt,
symbol,
transform,
)
from blaze.interactive import iscorescalar
from blaze.utils import example, normalize
names = ('tbl%d' % i for i in itertools.count())
@pytest.fixture(scope='module')
def pg_ip():
return os.environ.get('POSTGRES_IP', 'localhost')
@pytest.fixture
def url(pg_ip):
return 'postgresql://postgres@{}/test::%s'.format(pg_ip)
@pytest.yield_fixture
def sql(url):
ds = dshape('var * {A: string, B: int64}')
try:
t = data(url % next(names), dshape=ds)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
assert t.dshape == ds
t = data(odo([('a', 1), ('b', 2)], t))
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_with_null(url):
ds = dshape(""" var * {name: ?string,
sex: ?string,
amount: int,
id: int,
comment: ?string}
""")
rows = [('Alice', 'F', 100, 1, 'Alice comment'),
(None, 'M', 300, 2, None),
('Drew', 'F', 100, 4, 'Drew comment'),
('Bob', 'M', 100, 5, 'Bob comment 2'),
('Drew', 'M', 200, 5, None),
('first', None, 300, 4, 'Missing info'),
(None, None, 300, 6, None)]
try:
x = url % next(names)
t = data(x, dshape=ds)
print(x)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
assert t.dshape == ds
t = data(odo(rows, t))
try:
yield t
finally:
drop(t)
@pytest.yield_fixture(scope='module')
def nyc(pg_ip):
# odoing csv -> pandas -> postgres is more robust, as it doesn't require
# the postgres server to be on the same filesystem as the csv file.
nyc_pd = odo(example('nyc.csv'), pd.DataFrame)
try:
t = odo(nyc_pd,
'postgresql://postgres@{}/test::nyc'.format(pg_ip))
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def big_sql(url):
try:
t = data(url % next(names), dshape='var * {A: string, B: int64}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo(zip(list('a'*100), list(range(100))), t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sqla(url):
try:
t = data(url % next(names), dshape='var * {A: ?string, B: ?int32}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([('a', 1), (None, 1), ('c', None)], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sqlb(url):
try:
t = data(url % next(names), dshape='var * {A: string, B: int64}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([('a', 1), ('b', 2)], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_with_dts(url):
try:
t = data(url % next(names), dshape='var * {A: datetime}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([(d,) for d in pd.date_range('2014-01-01', '2014-02-01')], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_with_timedeltas(url):
try:
t = data(url % next(names), dshape='var * {N: timedelta}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([(timedelta(seconds=n),) for n in range(10)], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_two_tables(url):
dshape = 'var * {a: int32}'
try:
t = data(url % next(names), dshape=dshape)
u = data(url % next(names), dshape=dshape)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield u, t
finally:
drop(t)
drop(u)
@pytest.yield_fixture
def products(url):
try:
products = data(url % 'products',
dshape="""var * {
product_id: int64,
color: ?string,
price: float64}""",
primary_key=['product_id'])
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield products
finally:
drop(products)
@pytest.yield_fixture
def orders(url, products):
try:
orders = data(url % 'orders',
dshape="""var * {
order_id: int64,
product_id: map[int64, T],
quantity: int64}""",
foreign_keys=dict(product_id=products.data.c.product_id),
primary_key=['order_id'])
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield orders
finally:
drop(orders)
# TODO: scope these as module because I think pytest is caching sa.Table, which
# doesn't work if remove it after every run
@pytest.yield_fixture
def main(url):
try:
main = odo([(i, int(np.random.randint(10))) for i in range(13)],
url % 'main',
dshape=dshape('var * {id: int64, data: int64}'),
primary_key=['id'])
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield main
finally:
drop(main)
@pytest.yield_fixture
def pkey(url, main):
choices = [u'AAPL', u'HPQ', u'ORCL', u'IBM', u'DOW', u'SBUX', u'AMD',
u'INTC', u'GOOG', u'PRU', u'MSFT', u'AIG', u'TXN', u'DELL',
u'PEP']
n = 100
data = list(zip(range(n),
np.random.choice(choices, size=n).tolist(),
np.random.uniform(10000, 20000, size=n).tolist(),
np.random.randint(main.count().scalar(), size=n).tolist()))
try:
pkey = odo(data, url % 'pkey',
dshape=dshape('var * {id: int64, sym: string, price: float64, main: map[int64, T]}'),
foreign_keys=dict(main=main.c.id),
primary_key=['id'])
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield pkey
finally:
drop(pkey)
@pytest.yield_fixture
def fkey(url, pkey):
try:
fkey = odo([(i,
int(np.random.randint(pkey.count().scalar())),
int(np.random.randint(10000)))
for i in range(10)],
url % 'fkey',
dshape=dshape('var * {id: int64, sym_id: map[int64, T], size: int64}'),
foreign_keys=dict(sym_id=pkey.c.id),
primary_key=['id'])
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield fkey
finally:
drop(fkey)
@pytest.yield_fixture
def sql_with_float(url):
try:
t = data(url % next(names), dshape='var * {c: float64}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t
finally:
drop(t)
@pytest.yield_fixture(scope='module')
def nyc_csv(pg_ip):
try:
t = odo(
example('nyc.csv'),
'postgresql://postgres@{}/test::nyc'.format(pg_ip),
)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t
finally:
drop(t)
def test_nyc_csv(nyc_csv):
t = symbol('t', discover(nyc_csv))
assert compute(t.nrows, nyc_csv, return_type='core') > 0
def test_postgres_create(sql):
assert odo(sql, list) == [('a', 1), ('b', 2)]
def test_postgres_isnan(sql_with_float):
dta = (1.0,), (float('nan'),)
table = odo(dta, sql_with_float)
sym = symbol('s', discover(dta))
assert compute(sym.isnan(), table, return_type=list) == [(False,), (True,)]
def test_insert_from_subselect(sql_with_float):
data = pd.DataFrame([{'c': 2.0}, {'c': 1.0}])
tbl = odo(data, sql_with_float)
s = symbol('s', discover(data))
odo(compute(s[s.c.isin((1.0, 2.0))].sort(), tbl, return_type='native'), sql_with_float),
tm.assert_frame_equal(
odo(sql_with_float, pd.DataFrame).iloc[2:].reset_index(drop=True),
pd.DataFrame([{'c': 1.0}, {'c': 2.0}]),
)
def test_concat(sql_two_tables):
t_table, u_table = sql_two_tables
t_data = pd.DataFrame(np.arange(5), columns=['a'])
u_data = pd.DataFrame(np.arange(5, 10), columns=['a'])
odo(t_data, t_table)
odo(u_data, u_table)
t = symbol('t', discover(t_data))
u = symbol('u', discover(u_data))
tm.assert_frame_equal(
compute(concat(t, u).sort('a'), {t: t_table, u: u_table}, return_type=pd.DataFrame),
pd.DataFrame(np.arange(10), columns=['a']),
)
def test_concat_invalid_axis(sql_two_tables):
t_table, u_table = sql_two_tables
t_data = pd.DataFrame(np.arange(5), columns=['a'])
u_data = pd.DataFrame(np.arange(5, 10), columns=['a'])
odo(t_data, t_table)
odo(u_data, u_table)
# We need to force the shape to not be a record here so we can
# create the `Concat` node with an axis=1.
t = symbol('t', '5 * 1 * int32')
u = symbol('u', '5 * 1 * int32')
with pytest.raises(ValueError) as e:
compute(concat(t, u, axis=1), {t: t_table, u: u_table}, return_type='native')
# Preserve the suggestion to use merge.
assert "'merge'" in str(e.value)
def test_timedelta_arith(sql_with_dts):
delta = timedelta(days=1)
dates = pd.Series(pd.date_range('2014-01-01', '2014-02-01'))
sym = symbol('s', discover(dates))
assert (
compute(sym + delta, sql_with_dts, return_type=pd.Series) == dates + delta
).all()
assert (
compute(sym - delta, sql_with_dts, return_type=pd.Series) == dates - delta
).all()
assert (
compute(sym - (sym - delta), sql_with_dts, return_type=pd.Series) ==
dates - (dates - delta)
).all()
@pytest.mark.parametrize('func', ('var', 'std'))
def test_timedelta_stat_reduction(sql_with_timedeltas, func):
sym = symbol('s', discover(sql_with_timedeltas))
expr = getattr(sym.N, func)()
deltas = pd.Series([timedelta(seconds=n) for n in range(10)])
expected = timedelta(
seconds=getattr(deltas.astype('int64') / 1e9, func)(ddof=expr.unbiased)
)
assert compute(expr, sql_with_timedeltas, return_type=timedelta) == expected
def test_coerce_bool_and_sum(sql):
sql = sql.data
n = sql.name
t = symbol(n, discover(sql))
expr = (t.B > 1.0).coerce(to='int32').sum()
result = compute(expr, sql).scalar()
expected = compute(t.B, sql, return_type=pd.Series).gt(1).sum()
assert result == expected
def test_distinct_on(sql):
sql = sql.data
t = symbol('t', discover(sql))
computation = compute(t[['A', 'B']].sort('A').distinct('A'), sql, return_type='native')
assert normalize(str(computation)) == normalize("""
SELECT DISTINCT ON (anon_1."A") anon_1."A", anon_1."B"
FROM (SELECT {tbl}."A" AS "A", {tbl}."B" AS "B"
FROM {tbl}) AS anon_1 ORDER BY anon_1."A" ASC
""".format(tbl=sql.name))
assert odo(computation, tuple) == (('a', 1), ('b', 2))
def test_relabel_columns_over_selection(big_sql):
t = symbol('t', discover(big_sql))
result = compute(t[t['B'] == 2].relabel(B=u'b'),
big_sql, return_type=pd.DataFrame)
expected = pd.DataFrame([['a', 2]], columns=[u'A', u'b'])
tm.assert_frame_equal(result, expected)
def test_auto_join_field(orders):
t = symbol('t', discover(orders))
expr = t.product_id.color
result = compute(expr, orders, return_type='native')
expected = """SELECT
products.color
FROM products, orders
WHERE orders.product_id = products.product_id
"""
assert normalize(str(result)) == normalize(expected)
def test_auto_join_projection(orders):
t = symbol('t', discover(orders))
expr = t.product_id[['color', 'price']]
result = compute(expr, orders, return_type='native')
expected = """SELECT
products.color,
products.price
FROM products, orders
WHERE orders.product_id = products.product_id
"""
assert normalize(str(result)) == normalize(expected)
@pytest.mark.xfail
@pytest.mark.parametrize('func', ['max', 'min', 'sum'])
def test_foreign_key_reduction(orders, products, func):
t = symbol('t', discover(orders))
expr = methodcaller(func)(t.product_id.price)
result = compute(expr, orders, return_type='native')
expected = """WITH alias as (select
products.price as price
from
products, orders
where orders.product_id = products.product_id)
select {0}(alias.price) as price_{0} from alias
""".format(func)
assert normalize(str(result)) == normalize(expected)
def test_foreign_key_chain(fkey):
t = symbol('t', discover(fkey))
expr = t.sym_id.main.data
result = compute(expr, fkey, return_type='native')
expected = """SELECT
main.data
FROM main, fkey, pkey
WHERE fkey.sym_id = pkey.id and pkey.main = main.id
"""
assert normalize(str(result)) == normalize(expected)
@pytest.mark.xfail(raises=AssertionError,
reason='CTE mucks up generation here')
@pytest.mark.parametrize('grouper', ['sym', ['sym']])
def test_foreign_key_group_by(fkey, grouper):
t = symbol('fkey', discover(fkey))
expr = by(t.sym_id[grouper], avg_price=t.sym_id.price.mean())
result = compute(expr, fkey, return_type='native')
expected = """SELECT
pkey.sym,
avg(pkey.price) AS avg_price
FROM pkey, fkey
WHERE fkey.sym_id = pkey.id
GROUP BY pkey.sym
"""
assert normalize(str(result)) == normalize(expected)
@pytest.mark.parametrize('grouper', ['sym_id', ['sym_id']])
def test_group_by_map(fkey, grouper):
t = symbol('fkey', discover(fkey))
expr = by(t[grouper], id_count=t.size.count())
result = compute(expr, fkey, return_type='native')
expected = """SELECT
fkey.sym_id,
count(fkey.size) AS id_count
FROM fkey
GROUP BY fkey.sym_id
"""
assert normalize(str(result)) == normalize(expected)
def test_foreign_key_isin(fkey):
t = symbol('fkey', discover(fkey))
expr = t.sym_id.isin([1, 2])
result = compute(expr, fkey, return_type='native')
expected = """SELECT
fkey.sym_id IN (%(sym_id_1)s, %(sym_id_2)s) AS anon_1
FROM fkey
"""
assert normalize(str(result)) == normalize(expected)
@pytest.mark.xfail(raises=AssertionError, reason='Not yet implemented')
def test_foreign_key_merge_expression(fkey):
from blaze import merge
t = symbol('fkey', discover(fkey))
expr = merge(t.sym_id.sym, t.sym_id.main.data)
expected = """
select pkey.sym, main.data
from
fkey, pkey, main
where
fkey.sym_id = pkey.id and pkey.main = main.id
"""
result = compute(expr, fkey, return_type='native')
assert normalize(str(result)) == normalize(expected)
def test_join_type_promotion(sqla, sqlb):
t, s = symbol(sqla.name, discover(sqla)), symbol(sqlb.name, discover(sqlb))
expr = join(t, s, 'B', how='inner')
result = set(map(tuple, compute(expr, {t: sqla, s: sqlb}, return_type='native').execute().fetchall()))
expected = set([(1, 'a', 'a'), (1, None, 'a')])
assert result == expected
@pytest.mark.parametrize(['n', 'column'],
[(1, 'A'), (-1, 'A'),
(1, 'B'), (-1, 'B'),
(0, 'A'), (0, 'B')])
def test_shift_on_column(n, column, sql):
sql = sql.data
t = symbol('t', discover(sql))
expr = t[column].shift(n)
result = compute(expr, sql, return_type=pd.Series)
expected = odo(sql, pd.DataFrame)[column].shift(n)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('n', [-1, 0, 1])
def test_shift_arithmetic(sql, n):
t = symbol('t', discover(sql))
expr = t.B - t.B.shift(n)
result = compute(expr, sql, return_type=pd.Series)
df = odo(sql, pd.DataFrame)
expected = df.B - df.B.shift(n)
tm.assert_series_equal(result, expected)
def test_dist(nyc):
def distance(lat1, lon1, lat2, lon2, R=3959):
# http://andrew.hedges.name/experiments/haversine/
dlon = radians(lon2 - lon1)
dlat = radians(lat2 - lat1)
a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2
return R * 2 * atan2(sqrt(a), sqrt(1 - a))
t = symbol('t', discover(nyc))
filtered = t[
(t.pickup_latitude >= 40.477399) &
(t.pickup_latitude <= 40.917577) &
(t.dropoff_latitude >= 40.477399) &
(t.dropoff_latitude <= 40.917577) &
(t.pickup_longitude >= -74.259090) &
(t.pickup_longitude <= -73.700272) &
(t.dropoff_longitude >= -74.259090) &
(t.dropoff_longitude <= -73.700272) &
(t.passenger_count < 6)
]
dist = distance(filtered.pickup_latitude, filtered.pickup_longitude,
filtered.dropoff_latitude, filtered.dropoff_longitude)
transformed = transform(filtered, dist=dist)
assert (
compute(transformed.dist.max(), nyc, return_type=float) ==
compute(transformed.dist, nyc, return_type=pd.Series).max()
)
def test_multiple_columns_in_transform(nyc):
t = symbol('t', discover(nyc))
t = t[
(t.pickup_latitude >= 40.477399) &
(t.pickup_latitude <= 40.917577) &
(t.dropoff_latitude >= 40.477399) &
(t.dropoff_latitude <= 40.917577) &
(t.pickup_longitude >= -74.259090) &
(t.pickup_longitude <= -73.700272) &
(t.dropoff_longitude >= -74.259090) &
(t.dropoff_longitude <= -73.700272) &
(t.passenger_count < 6)
]
hours = t.trip_time_in_secs.coerce('float64') / 3600.0
avg_speed_in_mph = t.trip_distance / hours
d = transform(t, avg_speed_in_mph=avg_speed_in_mph, mycol=avg_speed_in_mph + 1)
df = compute(d[d.avg_speed_in_mph <= 200], nyc, return_type=pd.DataFrame)
assert not df.empty
def test_coerce_on_select(nyc):
t = symbol('t', discover(nyc))
t = t[
(t.pickup_latitude >= 40.477399) &
(t.pickup_latitude <= 40.917577) &
(t.dropoff_latitude >= 40.477399) &
(t.dropoff_latitude <= 40.917577) &
(t.pickup_longitude >= -74.259090) &
(t.pickup_longitude <= -73.700272) &
(t.dropoff_longitude >= -74.259090) &
(t.dropoff_longitude <= -73.700272) &
(t.passenger_count < 6)
]
t = transform(t, pass_count=t.passenger_count + 1)
result = compute(t.pass_count.coerce('float64'), nyc, return_type='native')
s = odo(result, pd.Series)
expected = compute(t, nyc, return_type=pd.DataFrame) \
.passenger_count.astype('float64') + 1.0
assert list(s) == list(expected)
def test_interactive_len(sql):
t = data(sql)
assert len(t) == int(t.count())
def test_sample_n(nyc):
t = symbol('t', discover(nyc))
result = compute(t.sample(n=14), nyc, return_type=pd.DataFrame)
assert len(result) == 14
def test_sample_bounded(nyc):
t = symbol('t', discover(nyc))
nrows = compute(t.nrows, nyc, return_type=int)
result = compute(t.sample(n=2*nrows), nyc, return_type=pd.DataFrame)
assert len(result) == nrows
def test_sample_frac(nyc):
t = symbol('t', discover(nyc))
result = compute(t.sample(frac=0.5), nyc, return_type=pd.DataFrame)
num_rows = compute(t.nrows, nyc, return_type=int)
# *Sigh* have to do proper rounding manually; Python's round() builtin is
# borked.
fractional, integral = math.modf(num_rows * 0.5)
assert int(integral + (0 if fractional < 0.5 else 1)) == len(result)
def test_sample(big_sql):
nn = symbol('nn', discover(big_sql))
nrows = odo(compute(nn.nrows, big_sql), int)
result = compute(nn.sample(n=nrows // 2), big_sql, return_type=pd.DataFrame)
assert len(result) == nrows // 2
result2 = compute(nn.sample(frac=0.5), big_sql, return_type=pd.DataFrame)
assert len(result) == len(result2)
@pytest.mark.parametrize("sep", [None, " -- "])
def test_str_cat_with_null(sql_with_null, sep):
t = symbol('t', discover(sql_with_null))
res = compute(t.name.str_cat(t.sex, sep=sep), sql_with_null,
return_type=list)
res = [r[0] for r in res]
cols = compute(t[['name', 'sex']], sql_with_null, return_type=list)
for r, (n, s) in zip(res, cols):
if n is None or s is None:
assert r is None
else:
assert (r == n + s if sep is None else r == n + sep + s)
def test_chain_str_cat_with_null(sql_with_null):
t = symbol('t', discover(sql_with_null))
expr = (t.name
.str_cat(t.comment, sep=' ++ ')
.str_cat(t.sex, sep=' -- '))
res = compute(expr, sql_with_null, return_type=list)
res = [r[0] for r in res]
cols = compute(t[['name', 'comment', 'sex']], sql_with_null,
return_type=list)
for r, (n, c, s) in zip(res, cols):
if n is None or c is None or s is None:
assert r is None
else:
assert (r == n + ' ++ ' + c + ' -- ' + s)
def test_str_cat_bcast(sql_with_null):
t = symbol('t', discover(sql_with_null))
lit_sym = symbol('s', 'string')
s = t[t.amount <= 200]
result = compute(s.comment.str_cat(lit_sym, sep=' '),
{t: sql_with_null, lit_sym: '!!'},
return_type=pd.Series)
df = compute(s, sql_with_null,
return_type=pd.DataFrame)
expected = df.comment.str.cat(['!!']*len(df.comment), sep=' ')
assert all(expected[~expected.isnull()] == result[~result.isnull()])
assert all(expected[expected.isnull()].index == result[result.isnull()].index)
def test_str_cat_where_clause(sql_with_null):
"""
Invokes the (Select, Select) path for compute_up
"""
t = symbol('t', discover(sql_with_null))
s = t[t.amount <= 200]
c1 = s.comment.str_cat(s.sex, sep=' -- ')
bres = compute(c1, sql_with_null, return_type=pd.Series)
df_s = compute(s, sql_with_null, return_type=pd.DataFrame)
exp = df_s.comment.str.cat(df_s.sex, ' -- ')
assert all(exp[~exp.isnull()] == bres[~bres.isnull()])
assert all(exp[exp.isnull()].index == bres[bres.isnull()].index)
def test_core_compute(nyc):
t = symbol('t', discover(nyc))
assert isinstance(compute(t, nyc, return_type='core'), pd.DataFrame)
assert isinstance(compute(t.passenger_count, nyc, return_type='core'), pd.Series)
assert iscorescalar(compute(t.passenger_count.mean(), nyc, return_type='core'))
assert isinstance(compute(t, nyc, return_type=list), list)
@pytest.fixture
def gl_data(sql_two_tables):
u_data, t_data = sql_two_tables
# populate the tables with some data and return it
return data(odo([(1,)], u_data)), data(odo([(2,)], t_data))
def test_greatest(gl_data):
u, t = gl_data
assert odo(greatest(u.a.max(), t.a.max()), int) == 2
def test_least(gl_data):
u, t = gl_data
assert odo(least(u.a.max(), t.a.max()), int) == 1
def test_coalesce(sqla):
t = symbol('t', discover(sqla))
assert (
compute(coalesce(t.B, -1), {t: sqla}, return_type=list) ==
[(1,), (1,), (-1,)]
)
assert (
compute(coalesce(t.A, 'z'), {t: sqla}, return_type=list) ==
[('a',), ('z',), ('c',)]
)
def test_any(sql):
s = symbol('s', discover(sql))
assert compute((s.B == 1).any(), {s: sql}, return_type='core')
assert compute((s.B == 2).any(), {s: sql}, return_type='core')
assert compute(~(s.B == 3).any(), {s: sql}, return_type='core')
def test_all(sql):
s = symbol('s', discover(sql))
assert compute(s.B.isin({1, 2}).all(), {s: sql}, return_type='core')
assert compute(~(s.B == 1).all(), {s: sql}, return_type='core')
assert compute(~(s.B == 2).all(), {s: sql}, return_type='core')
assert compute(~(s.B == 3).all(), {s: sql}, return_type='core')
|
<filename>Yellow_Pages_Angleterre/unit_tests.py
import time
from bs4 import BeautifulSoup
import requests
import pymysql.cursors
import unittest
class UnitTestsDataMinerYellowPagesAngleterre(unittest.TestCase):
def test_extract_one_email_from_one_result(self):
url = "https://www.scoot.co.uk/England/-/London/The-Glenlyn-Hotel-500001032719.html"
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103'
}
time.sleep(3)
# Request the content of a page from the url
html = requests.get(url, headers=headers)
# Parse the content of html_doc
soup = BeautifulSoup(html.content, 'html.parser')
if soup.find('a', {'data-yext-click': 'website'}) is not None:
email = "info@" + soup.find('a', {'data-yext-click': 'website'}) \
.get('href') \
.replace('www.', '') \
.replace("https://", "") \
.replace("http://", "") \
.split('/')[0]
print("email : " + email)
else:
print("no email business")
def test_extract_each_email_from_one_page_of_results_for_one_activity_and_one_capital(self):
print("test_extract_each_email_from_one_page_of_results_for_one_activity_and_one_capital")
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103'
}
url = "https://www.scoot.co.uk/find/hotel-in-london"
time.sleep(2)
# Request the content of a page from the url
html = requests.get(url, headers=headers)
# Parse the content of html_doc
soup = BeautifulSoup(html.content, 'html.parser')
if soup.find('a', {'data-yext-click': 'name'}) is not None:
all_single_product = soup.find_all('a', {'data-yext-click': 'name'})
for single_product in all_single_product:
url = 'https://www.scoot.co.uk' + single_product.get('href')
time.sleep(2)
# Request the content of a page from the url
html = requests.get(url, headers=headers)
# Parse the content of html_doc
soup = BeautifulSoup(html.content, 'html.parser')
if soup.find('a', {'data-yext-click': 'website'}) is not None:
email = "info@" + soup.find('a', {'data-yext-click': 'website'}) \
.get('href') \
.replace('www.', '') \
.replace("https://", "") \
.replace("http://", "") \
.split('/')[0]
print("email : " + email)
else:
print("no email business")
else:
print("no div class single-product")
def test_extract_each_email_from_all_pages_of_results_for_one_activity_and_one_capital(self):
print("test_extract_each_email_from_all_pages_of_results_for_one_activity_and_one_capital")
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103'
}
activity = "hotel"
city = "london"
number_of_pages = 0
url_page = "https://www.scoot.co.uk/find/" + activity + "-in-" + city
time.sleep(2)
html = requests.get(url_page, headers=headers)
soup = BeautifulSoup(html.content, 'html.parser')
if soup.find("span", {"class": "result-header-results"}) is not None:
number_of_pages_with_coma = int(soup.find("span", {"class": "result-header-results"})
.text
.replace(" ", "")
.replace("results", "")
) / 20
if int(str(number_of_pages_with_coma).split(".")[1][:1]) < 5:
number_of_pages += round(number_of_pages_with_coma) + 1
print('number_of_pages : ' + str(number_of_pages))
elif int(str(number_of_pages_with_coma).split(".")[1][:1]) >= 5:
number_of_pages += round(number_of_pages_with_coma)
print('number_of_pages : ' + str(number_of_pages))
else:
print("error pages")
i_1 = 0
if number_of_pages > 1:
for i in range(1, number_of_pages + 1):
if i <= 10:
url = url_page + "&page=" + str(i)
print(url)
time.sleep(2)
# Request the content of a page from the url
html = requests.get(url, headers=headers)
# Parse the content of html_doc
soup = BeautifulSoup(html.content, 'html.parser')
if soup.find('a', {'data-yext-click': 'name'}) is not None:
all_single_product = soup.find_all('a', {'data-yext-click': 'name'})
for single_product in all_single_product:
i_1 += 1
url = 'https://www.scoot.co.uk' + single_product.get('href')
time.sleep(2)
# Request the content of a page from the url
html = requests.get(url, headers=headers)
# Parse the content of html_doc
soup = BeautifulSoup(html.content, 'html.parser')
if soup.find('a', {'data-yext-click': 'website'}) is not None:
email = "info@" + soup.find('a', {'data-yext-click': 'website'}) \
.get('href') \
.replace('www.', '') \
.replace("https://", "") \
.replace("http://", "") \
.split('/')[0]
print(str(i_1) + " email : " + email)
else:
print(str(i_1) + " no email business")
else:
print("no div class single-product")
else:
if soup.find('a', {'data-yext-click': 'name'}) is not None:
all_single_product = soup.find_all('a', {'data-yext-click': 'name'})
for single_product in all_single_product:
i_1 += 1
url = 'https://www.scoot.co.uk' + single_product.get('href')
time.sleep(2)
# Request the content of a page from the url
html = requests.get(url, headers=headers)
# Parse the content of html_doc
soup = BeautifulSoup(html.content, 'html.parser')
if soup.find('a', {'data-yext-click': 'website'}) is not None:
email = "info@" + soup.find('a', {'data-yext-click': 'website'}) \
.get('href') \
.replace('www.', '') \
.replace("https://", "") \
.replace("http://", "") \
.split('/')[0]
print(str(i_1) + " email : " + email)
else:
print(str(i_1) + " no email business")
else:
print("no div class single-product")
def test_extract_each_email_from_all_pages_of_results_for_all_activities_and_all_capitals(self):
print("test_extract_each_email_from_all_pages_of_results_for_all_activities_and_all_capitals")
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103'
}
activites = [
{'id': '1', 'url': 'employment'}, # Temporary employment agencies
{'id': '2', 'url': 'real-estate'}, # Real estate
{'id': '3', 'url': 'recruitment'}, # Recruiter
{'id': '4', 'url': 'software'}, # software
{'id': '5', 'url': 'hotel'}, # hotel
{'id': '6', 'url': 'social'}, # social landlord
{'id': '7', 'url': 'cleaning'}, # cleaning
{'id': '8', 'url': 'charity'}, # charity
{'id': '9', 'url': 'financial'}, # financial
{'id': '10', 'url': 'restaurant'}, # restaurant
{'id': '11', 'url': 'building'}, # building
{'id': '12', 'url': 'hairdresser'}, # hairdresser
{'id': '13', 'url': 'florist'}, # florist
{'id': '14', 'url': 'locksmith'}, # locksmith
{'id': '15', 'url': 'bakery'}, # bakery
{'id': '16', 'url': 'insurance'}, # insurance
{'id': '17', 'url': 'pharmacy'}, # pharmacy
{'id': '18', 'url': 'mover'}, # mover
{'id': '19', 'url': 'electricity'}, # electricity
{'id': '20', 'url': 'plumbing'}, # plumbing
{'id': '21', 'url': 'security'}, # security
{'id': '22', 'url': 'attorney'}, # attorney
{'id': '23', 'url': 'bank'}, # bank
{'id': '24', 'url': 'garage'}, # garage
{'id': '25', 'url': 'dentist'}, # dentist
{'id': '26', 'url': 'doctor'}, # doctor
{'id': '27', 'url': 'accountant'}, # accountant
{'id': '28', 'url': 'grocery'}, # grocery stores
{'id': '29', 'url': 'notary'}, # notary
{'id': '30', 'url': 'jewellery'}, # jewellery
{'id': '31', 'url': 'tailor'}, # tailor
{'id': '32', 'url': 'butcher'}, # butcher
{'id': '33', 'url': 'library'}, # library
{'id': '34', 'url': 'architect'}, # architect
{'id': '36', 'url': 'cement'}, # cement
{'id': '37', 'url': 'heating'}, # heating
{'id': '38', 'url': 'boat'}, # boat
{'id': '39', 'url': 'cold'}, # cold
{'id': '41', 'url': 'steel'}, # steel
{'id': '42', 'url': 'chemical'}, # chemical
{'id': '43', 'url': 'gas'}, # gas
{'id': '44', 'url': 'gold'} # gold
]
capitales_du_monde = [
{'id': '820', 'nom': 'aberdeenshire', 'pays': 'united kingdom'},
{'id': '821', 'nom': 'aberdeen', 'pays': 'united kingdom'},
{'id': '822', 'nom': 'argyll', 'pays': 'united kingdom'},
{'id': '823', 'nom': 'ards', 'pays': 'united kingdom'},
{'id': '824', 'nom': 'antrim', 'pays': 'united kingdom'},
{'id': '825', 'nom': 'angus', 'pays': 'united kingdom'},
{'id': '826', 'nom': 'bath', 'pays': 'united kingdom'},
{'id': '827', 'nom': 'blackburn-with-darwen', 'pays': 'united kingdom'},
{'id': '828', 'nom': 'bournemouth', 'pays': 'united kingdom'},
{'id': '829', 'nom': 'bedford', 'pays': 'united kingdom'},
{'id': '830', 'nom': 'brighton', 'pays': 'united kingdom'},
{'id': '831', 'nom': 'blackpool', 'pays': 'united kingdom'},
{'id': '832', 'nom': 'bracknell-forest', 'pays': 'united kingdom'},
{'id': '833', 'nom': 'bradford', 'pays': 'united kingdom'},
{'id': '834', 'nom': 'bristol', 'pays': 'united kingdom'},
{'id': '835', 'nom': 'bury', 'pays': 'united kingdom'},
{'id': '836', 'nom': 'cheshire', 'pays': 'united kingdom'},
{'id': '837', 'nom': 'clackmannanshire', 'pays': 'united kingdom'},
{'id': '838', 'nom': 'calderdale', 'pays': 'united kingdom'},
{'id': '839', 'nom': 'cumbria', 'pays': 'united kingdom'},
{'id': '840', 'nom': 'carmarthenshire', 'pays': 'united kingdom'},
{'id': '841', 'nom': 'cornwall', 'pays': 'united kingdom'},
{'id': '842', 'nom': 'coventry', 'pays': 'united kingdom'},
{'id': '843', 'nom': 'cardiff', 'pays': 'united kingdom'},
{'id': '844', 'nom': 'croydon', 'pays': 'united kingdom'},
{'id': '845', 'nom': 'conwy', 'pays': 'united kingdom'},
{'id': '846', 'nom': 'darlington', 'pays': 'united kingdom'},
{'id': '847', 'nom': 'derbyshire', 'pays': 'united kingdom'},
{'id': '848', 'nom': 'denbighshire', 'pays': 'united kingdom'},
{'id': '849', 'nom': 'derby', 'pays': 'united kingdom'},
{'id': '850', 'nom': 'devon', 'pays': 'united kingdom'},
{'id': '851', 'nom': 'dumfries', 'pays': 'united kingdom'},
{'id': '852', 'nom': 'doncaster', 'pays': 'united kingdom'},
{'id': '853', 'nom': 'dundee', 'pays': 'united kingdom'},
{'id': '854', 'nom': 'dorset', 'pays': 'united kingdom'},
{'id': '855', 'nom': 'ayrshire', 'pays': 'united kingdom'},
{'id': '856', 'nom': 'edinburgh ', 'pays': 'united kingdom'},
{'id': '857', 'nom': 'dunbartonshire', 'pays': 'united kingdom'},
{'id': '858', 'nom': 'lothian', 'pays': 'united kingdom'},
{'id': '859', 'nom': 'eilean-siar', 'pays': 'united kingdom'},
{'id': '860', 'nom': 'renfrewshire', 'pays': 'united kingdom'},
{'id': '861', 'nom': 'falkirk', 'pays': 'united kingdom'},
{'id': '862', 'nom': 'fife', 'pays': 'united kingdom'},
{'id': '863', 'nom': 'glasgow', 'pays': 'united kingdom'},
{'id': '864', 'nom': 'highland', 'pays': 'united kingdom'},
{'id': '865', 'nom': 'inverclyde', 'pays': 'united kingdom'},
{'id': '866', 'nom': 'kirklees', 'pays': 'united kingdom'},
{'id': '867', 'nom': 'knowsley', 'pays': 'united kingdom'},
{'id': '868', 'nom': 'lancashire', 'pays': 'united kingdom'},
{'id': '869', 'nom': 'leicester', 'pays': 'united kingdom'},
{'id': '870', 'nom': 'leeds', 'pays': 'united kingdom'},
{'id': '871', 'nom': 'liverpool', 'pays': 'united kingdom'},
{'id': '872', 'nom': 'luton', 'pays': 'united kingdom'},
{'id': '873', 'nom': 'manchester', 'pays': 'united kingdom'},
{'id': '874', 'nom': 'middlesbrough', 'pays': 'united kingdom'},
{'id': '875', 'nom': 'medway', 'pays': 'united kingdom'},
{'id': '876', 'nom': 'midlothian', 'pays': 'united kingdom'},
{'id': '877', 'nom': 'moray', 'pays': 'united kingdom'},
{'id': '878', 'nom': 'newcastle', 'pays': 'united kingdom'},
{'id': '879', 'nom': 'nottingham', 'pays': 'united kingdom'},
{'id': '880', 'nom': 'lanarkshire ', 'pays': 'united kingdom'},
{'id': '881', 'nom': 'lincolnshire', 'pays': 'united kingdom'},
{'id': '882', 'nom': 'somerset', 'pays': 'united kingdom'},
{'id': '883', 'nom': 'tyneside', 'pays': 'united kingdom'},
{'id': '884', 'nom': 'oldham', 'pays': 'united kingdom'},
{'id': '885', 'nom': 'orkney', 'pays': 'united kingdom'},
{'id': '886', 'nom': 'oxfordshire', 'pays': 'united kingdom'},
{'id': '887', 'nom': 'pembrokeshire', 'pays': 'united kingdom'},
{'id': '888', 'nom': 'perth', 'pays': 'united kingdom'},
{'id': '889', 'nom': 'plymouth', 'pays': 'united kingdom'},
{'id': '890', 'nom': 'portsmouth', 'pays': 'united kingdom'},
{'id': '891', 'nom': 'peterborough', 'pays': 'united kingdom'},
{'id': '892', 'nom': 'redcar', 'pays': 'united kingdom'},
{'id': '893', 'nom': 'rochdale', 'pays': 'united kingdom'},
{'id': '894', 'nom': 'reading', 'pays': 'united kingdom'},
{'id': '895', 'nom': 'renfrewshire', 'pays': 'united kingdom'},
{'id': '896', 'nom': 'rotherham', 'pays': 'united kingdom'},
{'id': '897', 'nom': 'rutland', 'pays': 'united kingdom'},
{'id': '898', 'nom': 'sandwell', 'pays': 'united kingdom'},
{'id': '899', 'nom': 'suffolk', 'pays': 'united kingdom'},
{'id': '900', 'nom': 'sefton', 'pays': 'united kingdom'},
{'id': '901', 'nom': 'gloucestershire', 'pays': 'united kingdom'},
{'id': '902', 'nom': 'sheffield', 'pays': 'united kingdom'},
{'id': '903', 'nom': 'stockport', 'pays': 'united kingdom'},
{'id': '904', 'nom': 'salford', 'pays': 'united kingdom'},
{'id': '905', 'nom': 'slough', 'pays': 'united kingdom'},
{'id': '906', 'nom': 'lanarkshire', 'pays': 'united kingdom'},
{'id': '907', 'nom': 'sunderland', 'pays': 'united kingdom'},
{'id': '908', 'nom': 'solihull', 'pays': 'united kingdom'},
{'id': '909', 'nom': 'stirling', 'pays': 'united kingdom'},
{'id': '910', 'nom': 'southampton', 'pays': 'united kingdom'},
{'id': '911', 'nom': 'swindon', 'pays': 'united kingdom'},
{'id': '912', 'nom': 'tameside', 'pays': 'united kingdom'},
{'id': '913', 'nom': 'telford', 'pays': 'united kingdom'},
{'id': '914', 'nom': 'thurrock', 'pays': 'united kingdom'},
{'id': '915', 'nom': 'torbay', 'pays': 'united kingdom'},
{'id': '916', 'nom': 'trafford', 'pays': 'united kingdom'},
{'id': '917', 'nom': 'berkshire', 'pays': 'united kingdom'},
{'id': '918', 'nom': 'dunbartonshire', 'pays': 'united kingdom'},
{'id': '919', 'nom': 'wigan', 'pays': 'united kingdom'},
{'id': '920', 'nom': 'wakefield', 'pays': 'united kingdom'},
{'id': '921', 'nom': 'walsall', 'pays': 'united kingdom'},
{'id': '922', 'nom': 'lothian', 'pays': 'united kingdom'},
{'id': '923', 'nom': 'wolverhampton', 'pays': 'united kingdom'},
{'id': '924', 'nom': 'windsor', 'pays': 'united kingdom'},
{'id': '925', 'nom': 'wokingham', 'pays': 'united kingdom'},
{'id': '926', 'nom': 'wirral', 'pays': 'united kingdom'},
{'id': '927', 'nom': 'warrington', 'pays': 'united kingdom'},
{'id': '928', 'nom': 'york', 'pays': 'united kingdom'},
{'id': '929', 'nom': 'shetland', 'pays': 'united kingdom'},
]
try:
for capitale in capitales_du_monde:
for activite in activites:
activity = activite.get('url')
city = capitale.get('nom')
number_of_pages = 0
url_page = "https://www.scoot.co.uk/find/" + activity + "-in-" + city
time.sleep(2)
html = requests.get(url_page, headers=headers)
soup = BeautifulSoup(html.content, 'html.parser')
if soup.find("span", {"class": "result-header-results"}) is not None:
number_of_pages_with_coma = int(soup.find("span", {"class": "result-header-results"})
.text
.replace(" ", "")
.replace("results", "")
) / 20
if int(str(number_of_pages_with_coma).split(".")[1][:1]) < 5:
number_of_pages += round(number_of_pages_with_coma) + 1
print('number_of_pages : ' + str(number_of_pages))
elif int(str(number_of_pages_with_coma).split(".")[1][:1]) >= 5:
number_of_pages += round(number_of_pages_with_coma)
print('number_of_pages : ' + str(number_of_pages))
else:
print("error pages")
i_1 = 0
if number_of_pages > 1:
for i in range(1, number_of_pages + 1):
if i <= 10:
url = url_page + "&page=" + str(i)
print(url)
time.sleep(2)
# Request the content of a page from the url
html = requests.get(url, headers=headers)
# Parse the content of html_doc
soup = BeautifulSoup(html.content, 'html.parser')
if soup.find('a', {'data-yext-click': 'name'}) is not None:
all_single_product = soup.find_all('a', {'data-yext-click': 'name'})
for single_product in all_single_product:
i_1 += 1
url = 'https://www.scoot.co.uk' + single_product.get('href')
time.sleep(2)
# Request the content of a page from the url
html = requests.get(url, headers=headers)
# Parse the content of html_doc
soup = BeautifulSoup(html.content, 'html.parser')
if soup.find('a', {'data-yext-click': 'website'}) is not None:
email = "info@" + soup.find('a', {'data-yext-click': 'website'}) \
.get('href') \
.replace('www.', '') \
.replace("https://", "") \
.replace("http://", "") \
.split('/')[0]
print(str(i_1) + " email : " + email)
try:
connection = pymysql.connect(
host='localhost',
port=3306,
user='',
password='',
db='contacts_professionnels',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
with connection.cursor() as cursor:
try:
sql = "INSERT INTO `emails` (" \
"`id_activite`, " \
"`id_capitale_du_monde`, " \
"`email`) VALUE (%s, %s, %s)"
cursor.execute(sql, (
activite.get('id'),
capitale.get('id'),
email))
connection.commit()
print(str(i_1)
+ " The record is stored : "
+ email)
connection.close()
except Exception as e:
print(str(i_1)
+ " The record already exists : "
+ email
+ " " + str(e))
connection.close()
except Exception as e:
print(str(i_1) + " An error with the email : " + email + " " + str(e))
else:
print(str(i_1) + " no email business")
else:
print("no div class single-product")
else:
if soup.find('a', {'data-yext-click': 'name'}) is not None:
all_single_product = soup.find_all('a', {'data-yext-click': 'name'})
for single_product in all_single_product:
i_1 += 1
url = 'https://www.scoot.co.uk' + single_product.get('href')
time.sleep(2)
# Request the content of a page from the url
html = requests.get(url, headers=headers)
# Parse the content of html_doc
soup = BeautifulSoup(html.content, 'html.parser')
if soup.find('a', {'data-yext-click': 'website'}) is not None:
email = "info@" + soup.find('a', {'data-yext-click': 'website'}) \
.get('href') \
.replace('www.', '') \
.replace("https://", "") \
.replace("http://", "") \
.split('/')[0]
print(str(i_1) + " email : " + email)
try:
connection = pymysql.connect(
host='localhost',
port=3306,
user='',
password='',
db='contacts_professionnels',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
with connection.cursor() as cursor:
try:
sql = "INSERT INTO `emails` (" \
"`id_activite`, " \
"`id_capitale_du_monde`, " \
"`email`) VALUE (%s, %s, %s)"
cursor.execute(sql, (
activite.get('id'),
capitale.get('id'),
email))
connection.commit()
print(str(i_1)
+ " The record is stored : "
+ email)
connection.close()
except Exception as e:
print(str(i_1)
+ " The record already exists : "
+ email
+ " " + str(e))
connection.close()
except Exception as e:
print(str(i_1) + " An error with the email : " + email + " " + str(e))
else:
print(str(i_1) + " no email business")
else:
print("no div class single-product")
except Exception as e:
print("error : " + str(e))
if __name__ == '__main__':
unittest.main()
|
<filename>data_conversion/hapmap2vcf.py<gh_stars>10-100
#!/usr/bin/python
#!/usr/bin/env python
import sys
import random
import os.path
import subprocess
tmpath = sys.argv[0][:-13] + "../tmp/"
def printusage():
print("Usage python hapmap2vcf.py -i [input hapmap path] -o [output vcf path]")
if len(sys.argv) < 5 or not ((sys.argv[1]=="-i" and sys.argv[3]=="-o") or (sys.argv[1]=="-o" and sys.argv[3]=="-i")):
print("Error: Wrong input.")
printusage()
exit()
i_path = ""
o_path = ""
if sys.argv[1]=="-i":
i_path = sys.argv[2]
o_path = sys.argv[4]
else:
i_path = sys.argv[4]
o_path = sys.argv[2]
javaversion = subprocess.Popen('''echo $(java -version 2>&1 |awk 'NR==1{gsub(/"/,"");print $3}')''',shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)
jv = javaversion.stdout.readlines()
gitversion = subprocess.Popen('''echo $(git --version 2>&1 |awk 'NR==1{gsub(/"/,"");print $3}')''',shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)
gv = gitversion.stdout.readlines()
if len(jv) != 1:
print("Error: Java does not detected.")
exit()
if len(gv) != 1:
print("Error: Git does not detected.")
exit()
js = jv[0]
jl = js.split(".")
gs = gv[0]
gl = gs.split(".")
if len(jl) < 3:
print("Error: Java does not detected.")
exit()
if len(gl) < 3:
print("Error: Git does not detected.")
exit()
try:
i = int(jl[1])
except:
print("Error: Java does not detected.")
exit()
try:
gi = int(gl[1])
except:
print("Error: Git does not detected.")
exit()
if i == 6:
opt = subprocess.Popen('''git clone git://git.code.sf.net/p/tassel/tassel3-standalone '''+tmpath+"tassel3-standalone",shell=True,close_fds=True)
o = opt.stdout.readlines()
print(o)
rand = str(random.randint(0,9))+str(random.randint(0,9))+str(random.randint(0,9))+str(random.randint(0,9))+str(random.randint(0,9))+str(random.randint(0,9))
if os.path.exists(tmpath + "tassel3-standalone"):
opt = subprocess.Popen(tmpath+"tassel3-standalone/run_pipeline.pl -Xms10g -Xmx100g -h "+i_path+" -sortPositions -export "+o_path+" -exportType VCF",shell=True,close_fds=True)
opt.wait()
opt = subprocess.Popen("rm -rf "+tmpath+"tassel3-standalone/",shell=True,close_fds=True)
opt.wait()
else:
print("Error: Failed to download Tassel 3.")
exit()
elif i == 7:
opt = subprocess.Popen('''git clone git://git.code.sf.net/p/tassel/tassel4-standalone '''+tmpath+"tassel4-standalone",shell=True,close_fds=True)
opt.wait()
if os.path.exists(tmpath + "tassel4-standalone"):
opt = subprocess.Popen(tmpath+"tassel4-standalone/run_pipeline.pl -Xms10g -Xmx100g -h "+i_path+" -sortPositions -export "+o_path+" -exportType VCF",shell=True,close_fds=True)
opt.wait()
opt = subprocess.Popen("rm -rf "+tmpath+"tassel4-standalone/",shell=True,close_fds=True)
opt.wait()
else:
print("Error: Failed to download Tassel 4.")
exit()
elif i >=8:
opt = subprocess.Popen('''git clone https://bitbucket.org/tasseladmin/tassel-5-standalone.git '''+tmpath+"tassel-5-standalone",shell=True,close_fds=True)
opt.wait()
if os.path.exists(tmpath + "tassel-5-standalone"):
opt = subprocess.Popen(tmpath+"tassel-5-standalone/run_pipeline.pl -Xms10g -Xmx100g -h "+i_path+" -sortPositions -export "+o_path+" -exportType VCF",shell=True,close_fds=True)
opt.wait()
opt = subprocess.Popen("rm -rf "+tmpath+"tassel-5-standalone/",shell=True,close_fds=True)
opt.wait()
else:
print("Error: Failed to download Tassel 5.")
exit()
else:
print("Error: Java version too old to use Tassel.")
exit()
print("Finished.") |
# -*- coding: utf-8 -*-
# Copyright(C) 2014 Bezleputh
#
# This file is part of a woob module.
#
# This woob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This woob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this woob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from woob.browser.pages import HTMLPage, JsonPage
from woob.browser.elements import ItemElement, ListElement, DictElement, method
from woob.browser.filters.json import Dict
from woob.browser.filters.standard import (Currency, Format, CleanText,
Regexp, CleanDecimal, Date, Env,
BrowserURL)
from woob.browser.filters.html import Attr, XPath, CleanHTML
from woob.capabilities.housing import (Housing, HousingPhoto, City,
UTILITIES, ENERGY_CLASS, POSTS_TYPES,
ADVERT_TYPES, HOUSE_TYPES)
from woob.capabilities.base import NotAvailable, NotLoaded
from woob.tools.capabilities.housing.housing import PricePerMeterFilter
from woob.tools.compat import urljoin
class CitiesPage(JsonPage):
@method
class get_cities(DictElement):
item_xpath = '*/children'
class item(ItemElement):
klass = City
def condition(self):
return Dict('lct_parent_id')(self) != '0'
obj_id = Format('%s_%s', Dict('lct_id'), Dict('lct_level'))
obj_name = Format('%s %s', Dict('lct_name'), Dict('lct_post_code'))
class PhonePage(HTMLPage):
def get_phone(self):
return CleanText('//div[has-class("phone")]', children=False)(self.doc)
class HousingPage(HTMLPage):
@method
class get_housing(ItemElement):
klass = Housing
obj_id = Env('_id')
def obj_type(self):
url = BrowserURL('housing', _id=Env('_id'))(self)
if 'colocation' in url:
return POSTS_TYPES.SHARING
elif 'location' in url:
isFurnished = False
for li in XPath('//ul[@itemprop="description"]/li')(self):
label = CleanText('./span[has-class("criteria-label")]')(li)
if label.lower() == "meublé":
isFurnished = (
CleanText('./span[has-class("criteria-value")]')(li).lower() == 'oui'
)
if isFurnished:
return POSTS_TYPES.FURNISHED_RENT
else:
return POSTS_TYPES.RENT
elif 'vente' in url:
return POSTS_TYPES.SALE
return NotAvailable
obj_advert_type = ADVERT_TYPES.PROFESSIONAL
def obj_house_type(self):
house_type = CleanText('.//h2[@class="offerMainFeatures"]/div')(self).lower()
if house_type == "appartement":
return HOUSE_TYPES.APART
elif house_type == "maison":
return HOUSE_TYPES.HOUSE
elif house_type == "terrain":
return HOUSE_TYPES.LAND
elif house_type == "parking":
return HOUSE_TYPES.PARKING
else:
return HOUSE_TYPES.OTHER
obj_title = Attr('//meta[@property="og:title"]', 'content')
obj_area = CleanDecimal(
CleanText(
'//p[@class="offerArea"]/span',
),
default=NotAvailable
)
obj_rooms = CleanDecimal(
Regexp(
CleanText('//p[@class="offerRooms"]/span'),
'(\d) p.',
default=NotAvailable
),
default=NotAvailable
)
obj_bedrooms = CleanDecimal(
Regexp(
CleanText('//p[@class="offerRooms"]/span'),
'(\d) ch.',
default=NotAvailable
),
default=NotAvailable
)
obj_cost = CleanDecimal('//*[@itemprop="price"]', default=0)
obj_currency = Currency(
'//*[@itemprop="price"]'
)
def obj_utilities(self):
notes = CleanText('//p[@class="offer-description-notes"]')(self)
if "Loyer mensuel charges comprises" in notes:
return UTILITIES.INCLUDED
else:
return UTILITIES.UNKNOWN
obj_price_per_meter = PricePerMeterFilter()
obj_date = Date(Regexp(CleanText('//div[@class="offer-description-notes"]'),
u'.* Mis à jour: (\d{2}/\d{2}/\d{4}).*'),
dayfirst=True)
obj_text = CleanHTML('//p[@class="descrProperty"]')
obj_location = CleanText('//em[@class="infoAdresse"]')
obj_station = CleanText(
'//div[has-class("offer-description-metro")]',
default=NotAvailable
)
obj_url = BrowserURL('housing', _id=Env('_id'))
def obj_photos(self):
photos = []
for img in XPath('//ul[@class="thumbsContainer"]//img/@src')(self):
if img.endswith('.svg'):
continue
url = u'%s' % img.replace('182x136', '800x600')
url = urljoin(self.page.url, url) # Ensure URL is absolute
photos.append(HousingPhoto(url))
return photos
def obj_DPE(self):
energy_value = CleanText(
'//ul[@class="energyInfosDPE"]//li[@class="energyInfos"]/span/@data-class',
default=""
)(self)
if len(energy_value):
energy_value = energy_value.replace("DPE", "").strip()[0]
return getattr(ENERGY_CLASS, energy_value, NotAvailable)
def obj_GES(self):
greenhouse_value = CleanText(
'//ul[@class="energyInfosGES"]//li[@class="energyInfos"]/span/@data-class',
default=""
)(self)
if len(greenhouse_value):
greenhouse_value = greenhouse_value.replace("GES", "").strip()[0]
return getattr(ENERGY_CLASS, greenhouse_value, NotAvailable)
def obj_details(self):
details = {}
details["creationDate"] = Date(
Regexp(
CleanText(
'//div[@class="offer-description-notes"]'
),
u'.*Mis en ligne: (\d{2}/\d{2}/\d{4}).*'
),
dayfirst=True
)(self)
honoraires = CleanText(
(
'//div[has-class("offer-price")]/span[has-class("lbl-agencyfees")]'
),
default=None
)(self)
if honoraires:
details["Honoraires"] = (
"{} (TTC, en sus)".format(
honoraires.split(":")[1].strip()
)
)
for li in XPath('//ul[@itemprop="description"]/li')(self):
label = CleanText('./span[has-class("criteria-label")]')(li)
value = CleanText('./span[has-class("criteria-value")]')(li)
details[label] = value
return details
def get_phone_url_datas(self):
a = XPath('//button[has-class("js-show-phone-offer-sale-bottom")]')(self.doc)[0]
urlcontact = 'http://www.logic-immo.com/modalMail'
params = {}
params['universe'] = CleanText('./@data-univers')(a)
params['source'] = CleanText('./@data-source')(a)
params['pushcontact'] = CleanText('./@data-pushcontact')(a)
params['mapper'] = CleanText('./@data-mapper')(a)
params['offerid'] = CleanText('./@data-offerid')(a)
params['offerflag'] = CleanText('./@data-offerflag')(a)
params['campaign'] = CleanText('./@data-campaign')(a)
params['xtpage'] = CleanText('./@data-xtpage')(a)
params['offertransactiontype'] = CleanText('./@data-offertransactiontype')(a)
params['aeisource'] = CleanText('./@data-aeisource')(a)
params['shownumber'] = CleanText('./@data-shownumber')(a)
params['corail'] = 1
return urlcontact, params
class SearchPage(HTMLPage):
@method
class iter_sharing(ListElement):
item_xpath = '//article[has-class("offer-block")]'
class item(ItemElement):
klass = Housing
obj_id = Format('colocation-%s', CleanText('./div/header/@id', replace=[('header-offer-', '')]))
obj_type = POSTS_TYPES.SHARING
obj_advert_type = ADVERT_TYPES.PROFESSIONAL
obj_title = CleanText(CleanHTML('./div/header/section/p[@class="property-type"]/span/@title'))
obj_area = CleanDecimal('./div/header/section/p[@class="offer-attributes"]/a/span[@class="offer-area-number"]',
default=0)
obj_cost = CleanDecimal('./div/header/section/p[@class="price"]', default=0)
obj_currency = Currency(
'./div/header/section/p[@class="price"]'
)
obj_utilities = UTILITIES.UNKNOWN
obj_text = CleanText(
'./div/div[@class="content-offer"]/section[has-class("content-desc")]/p/span[has-class("offer-text")]/@title',
default=NotLoaded
)
obj_date = Date(Regexp(CleanText('./div/header/section/p[has-class("update-date")]'),
".*(\d{2}/\d{2}/\d{4}).*"))
obj_location = CleanText(
'(./div/div[@class="content-offer"]/section[has-class("content-desc")]/p)[1]/span/@title',
default=NotLoaded
)
@method
class iter_housings(ListElement):
item_xpath = '//div[has-class("offer-list")]//div[has-class("offer-block")]'
class item(ItemElement):
offer_details_wrapper = (
'.//div[has-class("offer-details-wrapper")]'
)
klass = Housing
obj_id = Format(
'%s-%s',
Regexp(Env('type'), '(.*)-.*'),
CleanText('./@id', replace=[('header-offer-', '')])
)
obj_type = Env('query_type')
obj_advert_type = ADVERT_TYPES.PROFESSIONAL
def obj_house_type(self):
house_type = CleanText('.//div[has-class("offer-details-caracteristik")]/meta[@itemprop="name"]/@content')(self).lower()
if house_type == "appartement":
return HOUSE_TYPES.APART
elif house_type == "maison":
return HOUSE_TYPES.HOUSE
elif house_type == "terrain":
return HOUSE_TYPES.LAND
elif house_type == "parking":
return HOUSE_TYPES.PARKING
else:
return HOUSE_TYPES.OTHER
obj_title = CleanText('.//div[has-class("offer-details-type")]/a/@title')
obj_url = Format(u'%s%s',
CleanText('.//div/a[@class="offer-link"]/@href'),
CleanText('.//div/a[@class="offer-link"]/\
@data-orpi', default=""))
obj_area = CleanDecimal(
(
offer_details_wrapper +
'/div/div/div[has-class("offer-details-second")]' +
'/div/h3[has-class("offer-attributes")]/span' +
'/span[has-class("offer-area-number")]'
),
default=NotLoaded
)
obj_rooms = CleanDecimal(
(
offer_details_wrapper +
'/div/div/div[has-class("offer-details-second")]' +
'/div/h3[has-class("offer-attributes")]' +
'/span[has-class("offer-rooms")]' +
'/span[has-class("offer-rooms-number")]'
),
default=NotAvailable
)
obj_cost = CleanDecimal(
Regexp(
CleanText(
(
offer_details_wrapper +
'/div/p[@class="offer-price"]/span'
),
default=NotLoaded
),
'(.*) [%s%s%s]' % (u'€', u'$', u'£'),
default=NotLoaded
),
default=NotLoaded
)
obj_currency = Currency(
offer_details_wrapper + '/div/p[has-class("offer-price")]/span'
)
obj_price_per_meter = PricePerMeterFilter()
obj_utilities = UTILITIES.UNKNOWN
obj_text = CleanText(
offer_details_wrapper + '/div/div/div/p[has-class("offer-description")]/span'
)
obj_location = CleanText(
offer_details_wrapper + '/div[@class="offer-details-location"]',
replace=[('Voir sur la carte','')]
)
def obj_photos(self):
photos = []
url = None
try:
url = Attr(
'.//div[has-class("offer-picture")]//img',
'src'
)(self)
except:
pass
if url:
url = url.replace('335x253', '800x600')
url = urljoin(self.page.url, url) # Ensure URL is absolute
photos.append(HousingPhoto(url))
return photos
def obj_details(self):
details = {}
honoraires = CleanText(
(
self.offer_details_wrapper +
'/div/div/p[@class="offer-agency-fees"]'
),
default=None
)(self)
if honoraires:
details["Honoraires"] = (
"{} (TTC, en sus)".format(
honoraires.split(":")[1].strip()
)
)
return details
|
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name,too-many-locals,too-many-arguments,too-many-lines
import pytest
from raiden.utils import publickey_to_address
from raiden.transfer import channel
from raiden.transfer.events import ContractSendChannelClose
from raiden.transfer.mediated_transfer import mediator
from raiden.transfer.mediated_transfer.state import (
MediationPairState,
MediatorTransferState,
)
from raiden.transfer.mediated_transfer.state_change import (
ActionInitMediator,
ReceiveSecretReveal,
)
from raiden.transfer.mediated_transfer.events import (
EventUnlockFailed,
EventUnlockSuccess,
SendBalanceProof,
SendMediatedTransfer,
SendRefundTransfer,
SendRevealSecret,
)
from raiden.transfer.state import (
CHANNEL_STATE_CLOSED,
CHANNEL_STATE_SETTLED,
TransactionExecutionStatus,
)
from raiden.transfer.state_change import Block
from raiden.tests.utils import factories
from raiden.tests.utils.factories import (
ADDR,
HOP1,
HOP1_KEY,
HOP1_TIMEOUT,
HOP2,
HOP2_KEY,
HOP3,
HOP3_KEY,
HOP4,
HOP4_KEY,
HOP5_KEY,
UNIT_HASHLOCK,
UNIT_REVEAL_TIMEOUT,
UNIT_SECRET,
UNIT_TOKEN_ADDRESS,
UNIT_TRANSFER_AMOUNT,
UNIT_TRANSFER_IDENTIFIER,
UNIT_TRANSFER_INITIATOR,
UNIT_TRANSFER_SENDER,
UNIT_TRANSFER_TARGET,
)
def make_transfer_pair(
payee,
initiator,
target,
amount,
expiration,
secret,
reveal_timeout=UNIT_REVEAL_TIMEOUT):
payer_expiration = expiration
payee_expiration = expiration - reveal_timeout
return MediationPairState(
factories.make_signed_transfer(amount, initiator, target, payer_expiration, secret=secret),
payee,
factories.make_transfer(amount, initiator, target, payee_expiration, secret=secret),
)
def make_transfers_pair(privatekeys, amount):
transfers_pair = list()
channelmap = dict()
initial_expiration = (2 * len(privatekeys) + 1) * UNIT_REVEAL_TIMEOUT
next_expiration = initial_expiration
addresses = list()
for pkey in privatekeys:
pubkey = pkey.public_key.format(compressed=False)
address = publickey_to_address(pubkey)
addresses.append(address)
key_address = list(zip(privatekeys, addresses))
for (payer_key, payer_address), payee_address in zip(key_address[:-1], addresses[1:]):
assert next_expiration > 0
receive_channel = factories.make_channel(
our_address=factories.HOP1,
our_balance=amount,
partner_balance=amount,
partner_address=payer_address,
token_address=UNIT_TOKEN_ADDRESS,
)
pay_channel = factories.make_channel(
our_address=factories.HOP1,
our_balance=amount,
partner_balance=amount,
partner_address=payee_address,
token_address=UNIT_TOKEN_ADDRESS,
)
received_transfer = factories.make_signed_transfer(
amount,
UNIT_TRANSFER_INITIATOR,
UNIT_TRANSFER_TARGET,
next_expiration,
UNIT_SECRET,
channel_identifier=receive_channel.identifier,
pkey=payer_key,
sender=payer_address,
)
is_valid, msg = channel.handle_receive_mediatedtransfer(
receive_channel,
received_transfer,
)
assert is_valid, msg
mediatedtransfer_event = channel.send_mediatedtransfer(
pay_channel,
UNIT_TRANSFER_INITIATOR,
UNIT_TRANSFER_TARGET,
amount,
UNIT_TRANSFER_IDENTIFIER,
received_transfer.lock.expiration - UNIT_REVEAL_TIMEOUT,
UNIT_HASHLOCK,
)
assert mediatedtransfer_event
sent_transfer = mediatedtransfer_event.transfer
pair = MediationPairState(
received_transfer,
mediatedtransfer_event.recipient,
sent_transfer,
)
transfers_pair.append(pair)
channelmap[receive_channel.identifier] = receive_channel
channelmap[pay_channel.identifier] = pay_channel
# assumes that the node sending the refund will follow the protocol and
# decrement the expiration for its lock
next_expiration = next_expiration - UNIT_REVEAL_TIMEOUT
return channelmap, transfers_pair
def test_is_lock_valid():
""" A hash time lock is valid up to the expiration block. """
expiration = 10
assert mediator.is_lock_valid(expiration, 5) is True
assert mediator.is_lock_valid(expiration, 10) is True, 'lock is expired at the next block'
assert mediator.is_lock_valid(expiration, 11) is False
def test_is_safe_to_wait():
""" It's safe to wait for a secret while there are more than reveal timeout
blocks until the lock expiration.
"""
expiration = 40
# expiration is in 30 blocks, 19 blocks safe for waiting
block_number = 10
reveal_timeout = 10
assert mediator.is_safe_to_wait(expiration, reveal_timeout, block_number) is True
# expiration is in 20 blocks, 10 blocks safe for waiting
block_number = 20
reveal_timeout = 10
assert mediator.is_safe_to_wait(expiration, reveal_timeout, block_number) is True
# expiration is in 11 blocks, 1 block safe for waiting
block_number = 29
reveal_timeout = 10
assert mediator.is_safe_to_wait(expiration, reveal_timeout, block_number) is True
# at the block 30 it's not safe to wait anymore
block_number = 30
reveal_timeout = 10
assert mediator.is_safe_to_wait(expiration, reveal_timeout, block_number) is False
block_number = 40
reveal_timeout = 10
assert mediator.is_safe_to_wait(expiration, reveal_timeout, block_number) is False
block_number = 50
reveal_timeout = 10
assert mediator.is_safe_to_wait(expiration, reveal_timeout, block_number) is False
def test_is_channel_close_needed_unpaid():
""" Don't close the channel if the payee transfer is not paid. """
amount = 10
expiration = 10
reveal_timeout = 5
safe_block = expiration - reveal_timeout - 1
unsafe_block = expiration - reveal_timeout
channel_state = factories.make_channel(reveal_timeout=reveal_timeout)
# even if the secret is known by the payee, the transfer is paid only if a
# withdraw on-chain happened or if the mediator has sent a balance proof
for unpaid_state in ('payee_pending', 'payee_secret_revealed', 'payee_refund_withdraw'):
unpaid_pair = make_transfer_pair(
payee=HOP2,
initiator=HOP3,
target=HOP4,
amount=amount,
expiration=expiration,
reveal_timeout=reveal_timeout,
secret=UNIT_SECRET,
)
unpaid_pair.payer_state = unpaid_state
assert mediator.is_channel_close_needed(channel_state, unpaid_pair, safe_block) is False
assert mediator.is_channel_close_needed(channel_state, unpaid_pair, unsafe_block) is False
def test_is_channel_close_needed_paid():
""" Close the channel if the payee transfer is paid but the payer has not paid. """
amount = 10
expiration = 10
reveal_timeout = 5
safe_block = expiration - reveal_timeout - 1
unsafe_block = expiration - reveal_timeout
channel_state = factories.make_channel(reveal_timeout=reveal_timeout)
for paid_state in ('payee_contract_withdraw', 'payee_balance_proof'):
paid_pair = make_transfer_pair(
payee=HOP2,
initiator=HOP3,
target=HOP4,
amount=amount,
expiration=expiration,
reveal_timeout=reveal_timeout,
secret=UNIT_SECRET,
)
paid_pair.payee_state = paid_state
assert mediator.is_channel_close_needed(channel_state, paid_pair, safe_block) is False
assert mediator.is_channel_close_needed(channel_state, paid_pair, unsafe_block) is True
def test_is_channel_close_needed_channel_closing():
""" If the channel is already closing the answer is always no. """
amount = 10
expiration = 10
reveal_timeout = 5
safe_block = expiration - reveal_timeout - 1
unsafe_block = expiration - reveal_timeout
channel_state = factories.make_channel(reveal_timeout=reveal_timeout)
channel_state.close_transaction = TransactionExecutionStatus(5, None, None)
for state in MediationPairState.valid_payee_states:
pair = make_transfer_pair(
payee=HOP2,
initiator=HOP3,
target=HOP4,
amount=amount,
expiration=expiration,
reveal_timeout=reveal_timeout,
secret=UNIT_SECRET,
)
pair.payee_state = state
assert mediator.is_channel_close_needed(channel_state, pair, safe_block) is False
assert mediator.is_channel_close_needed(channel_state, pair, unsafe_block) is False
def test_is_channel_close_needed_channel_closed():
""" If the channel is already closed the answer is always no. """
amount = 10
expiration = 10
reveal_timeout = 5
safe_block = expiration - reveal_timeout - 1
unsafe_block = expiration - reveal_timeout
channel_state = factories.make_channel(reveal_timeout=reveal_timeout)
channel_state.close_transaction = TransactionExecutionStatus(
None,
5,
TransactionExecutionStatus.SUCCESS,
)
for state in MediationPairState.valid_payee_states:
pair = make_transfer_pair(
payee=HOP2,
initiator=HOP3,
target=HOP4,
amount=amount,
expiration=expiration,
reveal_timeout=reveal_timeout,
secret=UNIT_SECRET,
)
pair.payee_state = state
assert mediator.is_channel_close_needed(channel_state, pair, safe_block) is False
assert mediator.is_channel_close_needed(channel_state, pair, unsafe_block) is False
def test_is_channel_close_needed_closed():
amount = 10
expiration = 10
reveal_timeout = 5
safe_block = expiration - reveal_timeout - 1
unsafe_block = expiration - reveal_timeout
channel_state = factories.make_channel(reveal_timeout=reveal_timeout)
paid_pair = make_transfer_pair(
payee=HOP2,
initiator=HOP3,
target=HOP4,
amount=amount,
expiration=expiration,
reveal_timeout=reveal_timeout,
secret=UNIT_SECRET,
)
paid_pair.payee_state = 'payee_balance_proof'
assert mediator.is_channel_close_needed(channel_state, paid_pair, safe_block) is False
assert mediator.is_channel_close_needed(channel_state, paid_pair, unsafe_block) is True
def test_is_valid_refund():
amount = 30
expiration = 50
transfer = factories.make_transfer(
amount,
UNIT_TRANSFER_INITIATOR,
UNIT_TRANSFER_TARGET,
expiration,
UNIT_SECRET,
)
refund_lower_expiration = factories.make_signed_transfer(
amount,
UNIT_TRANSFER_INITIATOR,
UNIT_TRANSFER_TARGET,
expiration - 1,
UNIT_SECRET,
)
assert mediator.is_valid_refund(transfer, refund_lower_expiration) is True
refund_same_expiration = factories.make_signed_transfer(
amount,
UNIT_TRANSFER_INITIATOR,
UNIT_TRANSFER_TARGET,
expiration,
UNIT_SECRET,
)
assert mediator.is_valid_refund(transfer, refund_same_expiration) is False
def test_refund_from_target_is_invalid():
amount = 30
expiration = 50
target = UNIT_TRANSFER_SENDER
transfer = factories.make_transfer(
amount,
UNIT_TRANSFER_INITIATOR,
target,
expiration,
UNIT_SECRET,
)
refund_from_target = factories.make_signed_transfer(
amount,
UNIT_TRANSFER_INITIATOR,
UNIT_TRANSFER_TARGET,
expiration - 1,
UNIT_SECRET,
)
# target cannot refund
assert not mediator.is_valid_refund(transfer, refund_from_target)
def test_get_timeout_blocks():
settle_timeout = 30
block_number = 5
not_closed = None
early_expire = 10
early_block = mediator.get_timeout_blocks(
settle_timeout,
not_closed,
early_expire,
block_number,
)
assert early_block == 5 - mediator.TRANSIT_BLOCKS, 'must use the lock expiration'
equal_expire = 30
equal_block = mediator.get_timeout_blocks(
settle_timeout,
not_closed,
equal_expire,
block_number,
)
assert equal_block == 25 - mediator.TRANSIT_BLOCKS
# This is the fix for test_lock_timeout_lower_than_previous_channel_settlement_period
large_expire = 70
large_block = mediator.get_timeout_blocks(
settle_timeout,
not_closed,
large_expire,
block_number,
)
assert large_block == 30 - mediator.TRANSIT_BLOCKS, 'must use the settle timeout'
closed_block_number = 2
large_block = mediator.get_timeout_blocks(
settle_timeout,
closed_block_number,
large_expire,
block_number,
)
assert large_block == 27 - mediator.TRANSIT_BLOCKS, 'must use the close block'
# the computed timeout may be negative, in which case the calling code must /not/ use it
negative_block_number = large_expire
negative_block = mediator.get_timeout_blocks(
settle_timeout,
not_closed,
large_expire,
negative_block_number,
)
assert negative_block == -mediator.TRANSIT_BLOCKS
def test_next_route_amount():
""" Routes that dont have enough available_balance must be ignored. """
amount = 10
reveal_timeout = 30
timeout_blocks = reveal_timeout + 10
amount = UNIT_TRANSFER_AMOUNT
channel1 = factories.make_channel(our_balance=amount)
channel2 = factories.make_channel(our_balance=0)
channel3 = factories.make_channel(our_balance=amount)
channelmap = {
channel1.identifier: channel1,
channel2.identifier: channel2,
channel3.identifier: channel3,
}
# the first available route should be used
available_routes = [factories.route_from_channel(channel1)]
chosen_channel = mediator.next_channel_from_routes(
available_routes,
channelmap,
amount,
timeout_blocks,
)
assert chosen_channel.identifier == channel1.identifier
# additional routes do not change the order
available_routes = [
factories.route_from_channel(channel1),
factories.route_from_channel(channel2),
]
chosen_channel = mediator.next_channel_from_routes(
available_routes,
channelmap,
amount,
timeout_blocks,
)
assert chosen_channel.identifier == channel1.identifier
available_routes = [
factories.route_from_channel(channel3),
factories.route_from_channel(channel1),
]
chosen_channel = mediator.next_channel_from_routes(
available_routes,
channelmap,
amount,
timeout_blocks,
)
assert chosen_channel.identifier == channel3.identifier
# a channel without capacity must be skipped
available_routes = [
factories.route_from_channel(channel2),
factories.route_from_channel(channel1),
]
chosen_channel = mediator.next_channel_from_routes(
available_routes,
channelmap,
amount,
timeout_blocks,
)
assert chosen_channel.identifier == channel1.identifier
def test_next_route_reveal_timeout():
""" Routes with a larger reveal timeout than timeout_blocks must be ignored. """
amount = 10
balance = 20
timeout_blocks = 10
channel1 = factories.make_channel(our_balance=balance, reveal_timeout=timeout_blocks * 2)
channel2 = factories.make_channel(our_balance=balance, reveal_timeout=timeout_blocks + 1)
channel3 = factories.make_channel(our_balance=balance, reveal_timeout=timeout_blocks // 2)
channel4 = factories.make_channel(our_balance=balance, reveal_timeout=timeout_blocks)
channelmap = {
channel1.identifier: channel1,
channel2.identifier: channel2,
channel3.identifier: channel3,
channel4.identifier: channel4,
}
available_routes = [
factories.route_from_channel(channel1),
factories.route_from_channel(channel2),
factories.route_from_channel(channel3),
factories.route_from_channel(channel4),
]
chosen_channel = mediator.next_channel_from_routes(
available_routes,
channelmap,
amount,
timeout_blocks,
)
assert chosen_channel.identifier == channel3.identifier
def test_next_transfer_pair():
timeout_blocks = 47
block_number = 3
balance = 10
initiator = HOP1
target = ADDR
expiration = 50
secret = UNIT_SECRET
payer_transfer = factories.make_signed_transfer(
balance,
initiator,
target,
expiration,
secret,
)
channel1 = factories.make_channel(our_balance=balance, token_address=UNIT_TOKEN_ADDRESS)
channelmap = {channel1.identifier: channel1}
available_routes = [factories.route_from_channel(channel1)]
pair, events = mediator.next_transfer_pair(
payer_transfer,
available_routes,
channelmap,
timeout_blocks,
block_number,
)
assert pair.payer_transfer == payer_transfer
assert pair.payee_address == channel1.partner_state.address
assert pair.payee_transfer.lock.expiration < pair.payer_transfer.lock.expiration
assert isinstance(events[0], SendMediatedTransfer)
send_transfer = events[0]
assert send_transfer.recipient == pair.payee_address
transfer = send_transfer.transfer
assert transfer.identifier == payer_transfer.identifier
assert transfer.token == payer_transfer.token
assert transfer.initiator == payer_transfer.initiator
assert transfer.target == payer_transfer.target
assert transfer.lock.amount == payer_transfer.lock.amount
assert transfer.lock.hashlock == payer_transfer.lock.hashlock
assert transfer.lock.expiration < payer_transfer.lock.expiration
def test_set_payee():
amount = 10
_, transfers_pair = make_transfers_pair(
[
HOP2_KEY,
HOP3_KEY,
HOP4_KEY,
],
amount,
)
# assert pre conditions
assert transfers_pair[0].payer_state == 'payer_pending'
assert transfers_pair[0].payee_state == 'payee_pending'
assert transfers_pair[1].payer_state == 'payer_pending'
assert transfers_pair[1].payee_state == 'payee_pending'
mediator.set_payee_state_and_check_reveal_order(
transfers_pair,
HOP2,
'payee_secret_revealed',
)
# payer address was used, no payee state should change
assert transfers_pair[0].payer_state == 'payer_pending'
assert transfers_pair[0].payee_state == 'payee_pending'
assert transfers_pair[1].payer_state == 'payer_pending'
assert transfers_pair[1].payee_state == 'payee_pending'
mediator.set_payee_state_and_check_reveal_order(
transfers_pair,
HOP3,
'payee_secret_revealed',
)
# only the transfer where the address is a payee should change
assert transfers_pair[0].payer_state == 'payer_pending'
assert transfers_pair[0].payee_state == 'payee_secret_revealed'
assert transfers_pair[1].payer_state == 'payer_pending'
assert transfers_pair[1].payee_state == 'payee_pending'
def test_set_expired_pairs():
""" The transfer pair must switch to expired at the right block. """
amount = 10
_, transfers_pair = make_transfers_pair(
[HOP2_KEY, HOP3_KEY],
amount,
)
pair = transfers_pair[0]
# do not generate events if the secret is not known
first_unsafe_block = pair.payer_transfer.lock.expiration - UNIT_REVEAL_TIMEOUT
mediator.set_expired_pairs(
transfers_pair,
first_unsafe_block,
)
assert pair.payee_state == 'payee_pending'
assert pair.payer_state == 'payer_pending'
# edge case for the payee lock expiration
payee_expiration_block = pair.payee_transfer.lock.expiration
mediator.set_expired_pairs(
transfers_pair,
payee_expiration_block,
)
assert pair.payee_state == 'payee_pending'
assert pair.payer_state == 'payer_pending'
# payee lock expired
mediator.set_expired_pairs(
transfers_pair,
payee_expiration_block + 1,
)
assert pair.payee_state == 'payee_expired'
assert pair.payer_state == 'payer_pending'
# edge case for the payer lock expiration
payer_expiration_block = pair.payer_transfer.lock.expiration
mediator.set_expired_pairs(
transfers_pair,
payer_expiration_block,
)
assert pair.payee_state == 'payee_expired'
assert pair.payer_state == 'payer_pending'
# payer lock has expired
mediator.set_expired_pairs(
transfers_pair,
payer_expiration_block + 1,
)
assert pair.payee_state == 'payee_expired'
assert pair.payer_state == 'payer_expired'
def test_events_for_refund():
amount = 10
expiration = 30
timeout_blocks = expiration
block_number = 1
refund_channel = factories.make_channel(
our_balance=amount,
partner_balance=amount,
partner_address=UNIT_TRANSFER_SENDER,
)
received_transfer = factories.make_signed_transfer(
amount,
UNIT_TRANSFER_INITIATOR,
UNIT_TRANSFER_TARGET,
expiration,
UNIT_SECRET,
channel_identifier=refund_channel.identifier,
)
is_valid, msg = channel.handle_receive_mediatedtransfer(
refund_channel,
received_transfer,
)
assert is_valid, msg
refund_transfer = factories.make_transfer(
amount,
UNIT_TRANSFER_INITIATOR,
UNIT_TRANSFER_TARGET,
expiration,
UNIT_SECRET,
)
small_timeout_blocks = refund_channel.reveal_timeout
small_refund_events = mediator.events_for_refund_transfer(
refund_channel,
refund_transfer,
small_timeout_blocks,
block_number,
)
assert not small_refund_events
events = mediator.events_for_refund_transfer(
refund_channel,
refund_transfer,
timeout_blocks,
block_number,
)
assert events
assert events[0].lock.expiration < block_number + timeout_blocks
assert events[0].lock.amount == amount
assert events[0].lock.hashlock == refund_transfer.lock.hashlock
assert events[0].recipient == refund_channel.partner_state.address
def test_events_for_revealsecret():
""" The secret is revealed backwards to the payer once the payee sent the
SecretReveal.
"""
our_address = ADDR
amount = 10
_, transfers_pair = make_transfers_pair(
[HOP2_KEY, HOP3_KEY, HOP4_KEY],
amount,
)
events = mediator.events_for_revealsecret(
transfers_pair,
our_address,
)
# the secret is known by this node, but no other payee is at a secret known
# state, do nothing
assert not events
first_pair = transfers_pair[0]
last_pair = transfers_pair[1]
last_pair.payee_state = 'payee_secret_revealed'
events = mediator.events_for_revealsecret(
transfers_pair,
UNIT_SECRET,
)
# the last known hop sent a secret reveal message. This node learned the
# secret and now must reveal it to the payer node from the transfer pair
assert len(events) == 1
assert events[0].secret == UNIT_SECRET
assert events[0].receiver == last_pair.payer_transfer.balance_proof.sender
assert last_pair.payer_state == 'payer_secret_revealed'
events = mediator.events_for_revealsecret(
transfers_pair,
our_address,
)
# the payeee from the first_pair did not send a secret reveal message, do
# nothing
assert not events
first_pair.payee_state = 'payee_secret_revealed'
events = mediator.events_for_revealsecret(
transfers_pair,
UNIT_SECRET,
)
assert len(events) == 1
assert events[0].secret == UNIT_SECRET
assert events[0].receiver == first_pair.payer_transfer.balance_proof.sender
assert first_pair.payer_state == 'payer_secret_revealed'
def test_events_for_revealsecret_secret_unknown():
""" When the secret is not known there is nothing to do. """
amount = 10
_, transfers_pair = make_transfers_pair(
[HOP2_KEY, HOP3_KEY, HOP4_KEY],
amount,
)
events = mediator.events_for_revealsecret(
transfers_pair,
ADDR,
)
assert not events
def test_events_for_revealsecret_all_states():
""" The secret must be revealed backwards to the payer if the payee knows
the secret.
"""
payee_secret_known = (
'payee_secret_revealed',
'payee_refund_withdraw',
'payee_contract_withdraw',
'payee_balance_proof',
)
amount = 10
for state in payee_secret_known:
_, transfers_pair = make_transfers_pair(
[HOP2_KEY, HOP3_KEY],
amount,
)
pair = transfers_pair[0]
pair.payee_state = state
events = mediator.events_for_revealsecret(
transfers_pair,
UNIT_SECRET,
)
assert events[0].secret == UNIT_SECRET
assert events[0].receiver == HOP2
def test_events_for_balanceproof():
""" Test the simple case where the last hop has learned the secret and sent
it to the mediator node.
"""
amount = 10
channelmap, transfers_pair = make_transfers_pair(
[HOP1_KEY, HOP2_KEY],
amount,
)
last_pair = transfers_pair[-1]
last_pair.payee_state = 'payee_secret_revealed'
# the lock has not expired yet
block_number = last_pair.payee_transfer.lock.expiration
events = mediator.events_for_balanceproof(
channelmap,
transfers_pair,
block_number,
UNIT_SECRET,
UNIT_HASHLOCK,
)
assert len(events) == 2
balance_proof = next(e for e in events if isinstance(e, SendBalanceProof))
unlock = next(e for e in events if isinstance(e, EventUnlockSuccess))
assert unlock
assert balance_proof
assert balance_proof.receiver == last_pair.payee_address
assert last_pair.payee_state == 'payee_balance_proof'
def test_events_for_balanceproof_channel_closed():
""" Balance proofs are useless if the channel is closed/settled. The payee
needs to go on-chain and use the latest known balance proof which includes
this lock in the locksroot.
"""
amount = 10
block_number = 5
for invalid_state in (CHANNEL_STATE_CLOSED, CHANNEL_STATE_SETTLED):
channelmap, transfers_pair = make_transfers_pair(
[HOP2_KEY, HOP3_KEY],
amount,
)
last_pair = transfers_pair[-1]
channel_identifier = last_pair.payee_transfer.balance_proof.channel_address
last_channel = channelmap[channel_identifier]
if invalid_state == CHANNEL_STATE_CLOSED:
channel.set_closed(last_channel, block_number)
else:
channel.set_settled(last_channel, block_number)
last_pair.payee_state = 'payee_secret_revealed'
events = mediator.events_for_balanceproof(
channelmap,
transfers_pair,
block_number,
UNIT_SECRET,
UNIT_HASHLOCK,
)
assert not events
def test_events_for_balanceproof_middle_secret():
""" Even though the secret should only propagate from the end of the chain
to the front, if there is a payee node in the middle that knows the secret
the Balance Proof is nevertheless sent.
This can be done safely because the secret is known to the mediator and
there is `reveal_timeout` blocks to withdraw the lock on-chain with the payer.
"""
amount = 10
channelmap, transfers_pair = make_transfers_pair(
[HOP2_KEY, HOP3_KEY, HOP4_KEY, HOP5_KEY],
amount,
)
block_number = 1
middle_pair = transfers_pair[1]
middle_pair.payee_state = 'payee_secret_revealed'
events = mediator.events_for_balanceproof(
channelmap,
transfers_pair,
block_number,
UNIT_SECRET,
UNIT_HASHLOCK,
)
balance_proof = next(e for e in events if isinstance(e, SendBalanceProof))
assert len(events) == 2
assert any(isinstance(e, EventUnlockSuccess) for e in events)
assert balance_proof.receiver == middle_pair.payee_address
assert middle_pair.payee_state == 'payee_balance_proof'
def test_events_for_balanceproof_secret_unknown():
""" Nothing to do if the secret is not known. """
block_number = 1
amount = 10
channelmap, transfers_pair = make_transfers_pair(
[HOP2_KEY, HOP3_KEY, HOP4_KEY],
amount,
)
# the secret is not known, so no event should be used
events = mediator.events_for_balanceproof(
channelmap,
transfers_pair,
block_number,
UNIT_SECRET,
UNIT_HASHLOCK,
)
assert not events
def test_events_for_balanceproof_lock_expired():
""" The balance proof should not be sent if the lock has expired. """
amount = 10
channelmap, transfers_pair = make_transfers_pair(
[HOP2_KEY, HOP3_KEY, HOP4_KEY, HOP5_KEY],
amount,
)
last_pair = transfers_pair[-1]
last_pair.payee_state = 'payee_secret_revealed'
block_number = last_pair.payee_transfer.lock.expiration + 1
# the lock has expired, do not send a balance proof
events = mediator.events_for_balanceproof(
channelmap,
transfers_pair,
block_number,
UNIT_SECRET,
UNIT_HASHLOCK,
)
assert not events
middle_pair = transfers_pair[-2]
middle_pair.payee_state = 'payee_secret_revealed'
# Even though the last node did not receive the payment we should send the
# balance proof to the middle node to avoid unnecessarily closing the
# middle channel. This state should not be reached under normal operation.
# The last hop needs to choose a proper reveal_timeout and must go on-chain
# to withdraw the token before the lock expires.
events = mediator.events_for_balanceproof(
channelmap,
transfers_pair,
block_number,
UNIT_SECRET,
UNIT_HASHLOCK,
)
balance_proof = next(e for e in events if isinstance(e, SendBalanceProof))
assert len(events) == 2
assert any(isinstance(e, EventUnlockSuccess) for e in events)
assert balance_proof.receiver == middle_pair.payee_address
assert middle_pair.payee_state == 'payee_balance_proof'
def test_events_for_close():
""" The node must close to unlock on-chain if the payee was paid. """
amount = 10
for payee_state in ('payee_balance_proof', 'payee_contract_withdraw'):
channelmap, transfers_pair = make_transfers_pair(
[HOP2_KEY, HOP3_KEY],
amount,
)
pair = transfers_pair[0]
pair.payee_state = payee_state
channel_identifier = pair.payer_transfer.balance_proof.channel_address
channel_state = channelmap[channel_identifier]
block_number = (
pair.payer_transfer.lock.expiration - channel_state.reveal_timeout
)
events = mediator.events_for_close(
channelmap,
transfers_pair,
block_number,
)
assert isinstance(events[0], ContractSendChannelClose)
assert events[0].channel_identifier == pair.payer_transfer.balance_proof.channel_address
assert pair.payer_state == 'payer_waiting_close'
def test_events_for_close_hold_for_unpaid_payee():
""" If the secret is known but the payee transfer has not been paid the
node must not settle on-chain, otherwise the payee can burn tokens to
force the mediator to close a channel.
"""
amount = 10
channelmap, transfers_pair = make_transfers_pair(
[HOP2_KEY, HOP3_KEY],
amount,
)
pair = transfers_pair[0]
for channel_state in channelmap.values():
channel.register_secret(channel_state, UNIT_SECRET, UNIT_HASHLOCK)
# preconditions
assert pair.payee_state not in mediator.STATE_TRANSFER_PAID
# do not generate events if the secret is known AND the payee is not paid
channel_identifier = pair.payer_transfer.balance_proof.channel_address
channel_state = channelmap[channel_identifier]
first_unsafe_block = pair.payer_transfer.lock.expiration - channel_state.reveal_timeout
events = mediator.events_for_close(
channelmap,
transfers_pair,
first_unsafe_block,
)
assert not events
assert pair.payee_state not in mediator.STATE_TRANSFER_PAID
assert pair.payer_state not in mediator.STATE_TRANSFER_PAID
payer_expiration_block = pair.payer_transfer.lock.expiration
events = mediator.events_for_close(
channelmap,
transfers_pair,
payer_expiration_block,
)
assert not events
assert pair.payee_state not in mediator.STATE_TRANSFER_PAID
assert pair.payer_state not in mediator.STATE_TRANSFER_PAID
payer_expiration_block = pair.payer_transfer.lock.expiration
events = mediator.events_for_close(
channelmap,
transfers_pair,
payer_expiration_block + 1,
)
assert not events
assert pair.payee_state not in mediator.STATE_TRANSFER_PAID
assert pair.payer_state not in mediator.STATE_TRANSFER_PAID
def test_secret_learned():
amount = UNIT_TRANSFER_AMOUNT
target = HOP2
from_expiration = HOP1_TIMEOUT
from_channel = factories.make_channel(
partner_balance=amount,
partner_address=UNIT_TRANSFER_SENDER,
token_address=UNIT_TOKEN_ADDRESS,
)
from_route = factories.route_from_channel(from_channel)
from_transfer = factories.make_signed_transfer_for(
from_channel,
amount,
HOP1,
target,
from_expiration,
UNIT_SECRET,
)
channel1 = factories.make_channel(
our_balance=amount,
token_address=UNIT_TOKEN_ADDRESS,
)
available_routes = [factories.route_from_channel(channel1)]
channelmap = {
from_channel.identifier: from_channel,
channel1.identifier: channel1,
}
block_number = 1
payment_network_identifier = factories.make_address()
init_state_change = ActionInitMediator(
payment_network_identifier,
available_routes,
from_route,
from_transfer,
)
initial_state = None
iteration = mediator.state_transition(
initial_state,
init_state_change,
channelmap,
block_number,
)
iteration = mediator.secret_learned(
iteration.new_state,
channelmap,
block_number,
UNIT_SECRET,
UNIT_HASHLOCK,
channel1.partner_state.address,
'payee_secret_revealed',
)
transfer_pair = iteration.new_state.transfers_pair[0]
assert from_transfer.lock.expiration > transfer_pair.payee_transfer.lock.expiration
assert mediator.is_send_transfer_almost_equal(transfer_pair.payee_transfer, from_transfer)
assert transfer_pair.payee_address == available_routes[0].node_address
assert transfer_pair.payer_transfer.balance_proof.sender == from_route.node_address
assert transfer_pair.payer_transfer == from_transfer
assert iteration.new_state.secret == UNIT_SECRET
assert transfer_pair.payee_state == 'payee_balance_proof'
assert transfer_pair.payer_state == 'payer_secret_revealed'
reveal_events = [e for e in iteration.events if isinstance(e, SendRevealSecret)]
assert len(reveal_events) == 1
balance_events = [e for e in iteration.events if isinstance(e, SendBalanceProof)]
assert len(balance_events) == 1
def test_mediate_transfer():
amount = 10
block_number = 5
target = HOP2
expiration = 30
payer_channel = factories.make_channel(
partner_balance=amount,
partner_address=UNIT_TRANSFER_SENDER,
token_address=UNIT_TOKEN_ADDRESS,
)
payer_transfer = factories.make_signed_transfer_for(
payer_channel,
amount,
HOP1,
target,
expiration,
UNIT_SECRET,
)
channel1 = factories.make_channel(
our_balance=amount,
token_address=UNIT_TOKEN_ADDRESS,
)
channelmap = {
channel1.identifier: channel1,
payer_channel.identifier: payer_channel,
}
possible_routes = [factories.route_from_channel(channel1)]
mediator_state = MediatorTransferState(UNIT_HASHLOCK)
iteration = mediator.mediate_transfer(
mediator_state,
possible_routes,
payer_channel,
channelmap,
payer_transfer,
block_number,
)
assert len(iteration.events) == 1
send_transfer = iteration.events[0]
assert isinstance(send_transfer, SendMediatedTransfer)
transfer = send_transfer.transfer
assert transfer.identifier == payer_transfer.identifier
assert transfer.token == payer_transfer.token
assert transfer.lock.amount == payer_transfer.lock.amount
assert transfer.lock.hashlock == payer_transfer.lock.hashlock
assert transfer.target == payer_transfer.target
assert payer_transfer.lock.expiration > transfer.lock.expiration
assert send_transfer.recipient == channel1.partner_state.address
def test_init_mediator():
amount = UNIT_TRANSFER_AMOUNT
target = HOP2
from_expiration = HOP1_TIMEOUT
from_channel = factories.make_channel(
partner_balance=amount,
partner_address=UNIT_TRANSFER_SENDER,
token_address=UNIT_TOKEN_ADDRESS,
)
from_route = factories.route_from_channel(from_channel)
from_transfer = factories.make_signed_transfer_for(
from_channel,
amount,
HOP1,
target,
from_expiration,
UNIT_SECRET,
)
channel1 = factories.make_channel(
our_balance=amount,
partner_address=HOP2,
token_address=UNIT_TOKEN_ADDRESS,
)
available_routes = [factories.route_from_channel(channel1)]
channelmap = {
from_channel.identifier: from_channel,
channel1.identifier: channel1,
}
block_number = 1
payment_network_identifier = factories.make_address()
init_state_change = ActionInitMediator(
payment_network_identifier,
available_routes,
from_route,
from_transfer,
)
mediator_state = None
iteration = mediator.state_transition(
mediator_state,
init_state_change,
channelmap,
block_number,
)
assert isinstance(iteration.new_state, MediatorTransferState)
assert iteration.new_state.transfers_pair[0].payer_transfer == from_transfer
msg = 'we have a valid route, the mediated transfer event must be emitted'
assert iteration.events, msg
mediated_transfers = [e for e in iteration.events if isinstance(e, SendMediatedTransfer)]
assert len(mediated_transfers) == 1, 'mediated_transfer should /not/ split the transfer'
send_transfer = mediated_transfers[0]
mediated_transfer = send_transfer.transfer
assert mediated_transfer.token == from_transfer.token, 'transfer token address mismatch'
assert mediated_transfer.lock.amount == from_transfer.lock.amount, 'transfer amount mismatch'
msg = 'transfer expiration mismatch'
assert mediated_transfer.lock.expiration < from_transfer.lock.expiration, msg
assert mediated_transfer.lock.hashlock == from_transfer.lock.hashlock, 'wrong hashlock'
def test_no_valid_routes():
amount = UNIT_TRANSFER_AMOUNT
target = HOP2
from_expiration = HOP1_TIMEOUT
from_channel = factories.make_channel(
our_balance=amount,
partner_balance=amount,
partner_address=UNIT_TRANSFER_SENDER,
token_address=UNIT_TOKEN_ADDRESS,
)
from_route = factories.route_from_channel(from_channel)
from_transfer = factories.make_signed_transfer_for(
from_channel,
amount,
HOP1,
target,
from_expiration,
UNIT_SECRET,
)
channel1 = factories.make_channel(
our_balance=amount - 1,
token_address=UNIT_TOKEN_ADDRESS,
)
channel2 = factories.make_channel(
our_balance=0,
token_address=UNIT_TOKEN_ADDRESS,
)
available_routes = [
factories.route_from_channel(channel1),
factories.route_from_channel(channel2),
]
channelmap = {
from_channel.identifier: from_channel,
channel1.identifier: channel1,
channel2.identifier: channel2,
}
block_number = 1
payment_network_identifier = factories.make_address()
init_state_change = ActionInitMediator(
payment_network_identifier,
available_routes,
from_route,
from_transfer,
)
mediator_state = None
iteration = mediator.state_transition(
mediator_state,
init_state_change,
channelmap,
block_number,
)
assert iteration.new_state is None
assert len(iteration.events) == 1
assert isinstance(iteration.events[0], SendRefundTransfer)
def test_lock_timeout_lower_than_previous_channel_settlement_period():
# For a path A-B-C, B cannot forward a mediated transfer to C with
# a lock timeout larger than the settlement timeout of the A-B
# channel.
#
# Consider that an attacker controls both nodes A and C:
#
# Channels A <-> B and B <-> C have a settlement=10 and B has a
# reveal_timeout=5
#
# (block=1) A -> B [T1 expires=20]
# (block=1) B -> C [T2 expires=20-5]
# (block=1) A close channel A-B
# (block=5) C close channel B-C (waited until lock_expiration=settle_timeout)
# (block=11) A call settle on channel A-B (settle_timeout is over)
# (block=12) C call unlock on channel B-C (lock is still valid)
#
# If B used min(lock.expiration, previous_channel.settlement)
#
# (block=1) A -> B [T1 expires=20]
# (block=1) B -> C [T2 expires=min(20,10)-5]
# (block=1) A close channel A-B
# (block=4) C close channel B-C (waited all possible blocks)
# (block=5) C call unlock on channel B-C (C is forced to unlock)
# (block=6) B learns the secret
# (block=7) B call unlock on channel A-B (settle_timeout is over)
amount = UNIT_TRANSFER_AMOUNT
target = HOP2
high_from_expiration = 20
low_reveal_timeout = 5
low_settlement_expiration = 10
from_channel = factories.make_channel(
our_balance=amount,
partner_balance=amount,
partner_address=UNIT_TRANSFER_SENDER,
token_address=UNIT_TOKEN_ADDRESS,
reveal_timeout=low_reveal_timeout,
settle_timeout=low_settlement_expiration,
)
from_route = factories.route_from_channel(from_channel)
from_transfer = factories.make_signed_transfer_for(
from_channel,
amount,
HOP1,
target,
high_from_expiration,
UNIT_SECRET,
)
# Assert the precondition for the test. The message is still valid, and the
# receiver cannot control the received lock expiration
assert from_transfer.lock.expiration >= from_channel.settle_timeout
channel1 = factories.make_channel(
our_balance=amount,
token_address=UNIT_TOKEN_ADDRESS,
reveal_timeout=low_reveal_timeout,
settle_timeout=low_settlement_expiration,
)
available_routes = [
factories.route_from_channel(channel1),
]
channelmap = {
from_channel.identifier: from_channel,
channel1.identifier: channel1,
}
block_number = 1
payment_network_identifier = factories.make_address()
init_state_change = ActionInitMediator(
payment_network_identifier,
available_routes,
from_route,
from_transfer,
)
mediator_state = None
iteration = mediator.state_transition(
mediator_state,
init_state_change,
channelmap,
block_number,
)
assert isinstance(iteration.new_state, MediatorTransferState)
assert iteration.events
send_mediated = next(e for e in iteration.events if isinstance(e, SendMediatedTransfer))
assert send_mediated
msg = 'transfer expiration must be lower than the funding channel settlement window'
assert send_mediated.transfer.lock.expiration < low_settlement_expiration, msg
def test_do_not_withdraw_an_almost_expiring_lock_if_a_payment_didnt_occur():
# For a path A1-B-C-A2, an attacker controlling A1 and A2 should not be
# able to force B-C to close the channel by burning token.
#
# The attack would be as follows:
#
# - Attacker uses two nodes to open two really cheap channels A1 <-> B and
# node A2 <-> C
# - Attacker sends a mediated message with the lowest possible token
# amount from A1 through B and C to A2
# - Since the attacker controls A1 and A2 it knows the secret, she can choose
# when the secret is revealed
# - The secret is held back until the hash time lock B->C has *expired*,
# then it's revealed (meaning that the attacker is losing token, that's why
# it's using the lowest possible amount)
# - C wants the token from B. It will reveal the secret and close the channel
# (because it must assume the balance proof won't make in time and it needs
# to unlock on-chain)
#
# Mitigation:
#
# - C should only close the channel B-C if he has paid A2, since this may
# only happen if the lock for the transfer C-A2 has not yet expired then C
# has enough time to follow the protocol without closing the channel B-C.
amount = UNIT_TRANSFER_AMOUNT
block_number = 1
from_expiration = HOP1_TIMEOUT
# C's channel with the Attacker node A2
attacked_channel = factories.make_channel(
our_balance=amount,
token_address=UNIT_TOKEN_ADDRESS,
)
target_attacker2 = attacked_channel.partner_state.address
bc_channel = factories.make_channel(
our_balance=amount,
partner_balance=amount,
partner_address=UNIT_TRANSFER_SENDER,
token_address=UNIT_TOKEN_ADDRESS,
)
from_route = factories.route_from_channel(bc_channel)
from_transfer = factories.make_signed_transfer_for(
bc_channel,
amount,
HOP1,
target_attacker2,
from_expiration,
UNIT_SECRET,
)
available_routes = [
factories.route_from_channel(attacked_channel),
]
channelmap = {
bc_channel.identifier: bc_channel,
attacked_channel.identifier: attacked_channel,
}
payment_network_identifier = factories.make_address()
init_state_change = ActionInitMediator(
payment_network_identifier,
available_routes,
from_route,
from_transfer,
)
mediator_state = None
iteration = mediator.state_transition(
mediator_state,
init_state_change,
channelmap,
block_number,
)
attack_block_number = from_transfer.lock.expiration - attacked_channel.reveal_timeout
assert not mediator.is_safe_to_wait(
from_transfer.lock.expiration,
attacked_channel.reveal_timeout,
attack_block_number,
)
# Wait until it's not safe to wait for the off-chain unlock for B-C (and expire C-A2)
new_iteration = iteration
for new_block_number in range(block_number, attack_block_number + 1):
new_iteration = mediator.state_transition(
new_iteration.new_state,
Block(new_block_number),
channelmap,
new_block_number,
)
assert not any(
event
for event in new_iteration.events
if not isinstance(event, EventUnlockFailed)
)
# and reveal the secret
receive_secret = ReceiveSecretReveal(
UNIT_SECRET,
target_attacker2,
)
attack_iteration = mediator.state_transition(
new_iteration.new_state,
receive_secret,
channelmap,
attack_block_number,
)
assert not any(
isinstance(event, ContractSendChannelClose)
for event in attack_iteration.events
)
# don't go on-chain since the balance proof was not received
for new_block_number in range(block_number, from_transfer.lock.expiration + 1):
new_iteration = mediator.state_transition(
new_iteration.new_state,
Block(new_block_number),
channelmap,
new_block_number,
)
assert not any(
event
for event in new_iteration.events
if not isinstance(event, EventUnlockFailed)
)
@pytest.mark.xfail(reason='Not implemented. Issue: #382')
def mediate_transfer_payee_timeout_must_be_lower_than_settlement_and_payer_timeout():
# Test:
# - the current payer route/transfer is the reference, not the from_route / from_transfer
# - the lowest value from blocks_until_settlement and lock expiration must be used
raise NotImplementedError()
def test_payee_timeout_must_be_lower_than_payer_timeout_minus_reveal_timeout():
# The payee could reveal the secret on its lock expiration block, the
# mediator node will respond with a balance-proof to the payee since the
# lock is valid and the mediator can safely get the token from the payer.
# The secret is known and if there are no additional blocks the mediator
# will be at risk of not being able to withdraw on-chain, so the channel
# will be closed to safely withdraw.
#
# T2.expiration cannot be equal to T1.expiration - reveal_timeout:
#
# v- T1.expiration - reveal_timeout
# T1 |------****|
# T2 |--****| ^- T1.expiration
# ^- T2.expiration
#
# Race:
# 1> Secret is learned
# 2> balance-proof is sent to payee (payee transfer is paid)
# 3! New block is mined and Raiden learns about it
# 4> Now the secret is known, the payee is paid, and the current block is
# equal to the payer.expiration - reveal-timeout -> withdraw on chain
#
# The race is depending on the handling of 3 before 4.
#
block_number = 5
expiration = 30
payer_channel = factories.make_channel(
partner_balance=UNIT_TRANSFER_AMOUNT,
partner_address=UNIT_TRANSFER_SENDER,
token_address=UNIT_TOKEN_ADDRESS,
)
payer_transfer = factories.make_signed_transfer_for(
payer_channel,
UNIT_TRANSFER_AMOUNT,
HOP1,
UNIT_TRANSFER_TARGET,
expiration,
UNIT_SECRET,
)
channel1 = factories.make_channel(
our_balance=UNIT_TRANSFER_AMOUNT,
token_address=UNIT_TOKEN_ADDRESS,
)
channelmap = {
channel1.identifier: channel1,
payer_channel.identifier: payer_channel,
}
possible_routes = [factories.route_from_channel(channel1)]
mediator_state = MediatorTransferState(UNIT_HASHLOCK)
iteration = mediator.mediate_transfer(
mediator_state,
possible_routes,
payer_channel,
channelmap,
payer_transfer,
block_number,
)
send_mediated = next(e for e in iteration.events if isinstance(e, SendMediatedTransfer))
assert isinstance(send_mediated, SendMediatedTransfer)
race_block = payer_transfer.lock.expiration - channel1.reveal_timeout - mediator.TRANSIT_BLOCKS
assert mediator.TRANSIT_BLOCKS > 0
assert send_mediated.transfer.lock.expiration == race_block
|
<filename>visgeom/plot.py
import visgeom.utils
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import numpy as np
def axis_equal(ax):
"""Emulate ax.axis('equal') for 3D axes, which is currently not supported in matplotlib.
:param ax: Current axes
"""
ax.set_box_aspect([1, 1, 1])
limits = np.array([
ax.get_xlim3d(),
ax.get_ylim3d(),
ax.get_zlim3d(),
])
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
ax.set_xlim3d([origin[0] - radius, origin[0] + radius])
ax.set_ylim3d([origin[1] - radius, origin[1] + radius])
ax.set_zlim3d([origin[2] - radius, origin[2] + radius])
def plot_pose(ax, pose, **kwargs):
"""Plot the pose (R, t) in the global frame.
Keyword Arguments
* *alpha* -- Alpha value (transparency), default 1
* *axis_colors* -- List of colors for each axis, default ('r', 'g', 'b')
* *scale* -- Scale factor, default 1.0
* *text* -- Text description plotted at pose origin, default ''
:param ax: Current axes
:param pose: The pose (R, t) of the local frame relative to the global frame,
where R is a 3x3 rotation matrix and t is a 3D column vector.
:param kwargs: See above
:return: List of artists.
"""
R, t = pose
alpha = kwargs.get('alpha', 1)
axis_colors = kwargs.get('axis_colors', ('r', 'g', 'b'))
scale = kwargs.get('scale', 1)
text = kwargs.get('text', '')
artists = []
# If R is a valid rotation matrix, the columns are the local orthonormal basis vectors in the global frame.
for i in range(0, 3):
axis_line = np.column_stack((t, t + R[:, i, np.newaxis] * scale))
artists.extend(ax.plot(axis_line[0, :], axis_line[1, :], axis_line[2, :], axis_colors[i] + '-', alpha=alpha))
if text:
artists.extend([ax.text(t[0, 0], t[1, 0], t[2, 0], text)])
return artists
def plot_camera_frustum(ax, K, pose_w_c, **kwargs):
"""Plot a camera frustum in the global "world" frame.
Keyword Arguments
* *alpha* -- Alpha value (transparency), default 1
* *edgecolor* -- Frustum color, default 'k'
* *img_size* -- Size of image in pixels, default (2*K[0, 2], 2*K[1, 2])
* *scale* -- Scale factor, default 1.0
* *text* -- Text description plotted at camera origin, default ''
:param ax: Current axes
:param K: Camera calibration matrix (3x3 upper triangular matrix)
:param pose_w_c: The pose (R, t) of the camera frame relative to the world frame,
where R is a 3x3 rotation matrix and t is a 3D column vector.
:param kwargs: See above
:return: List of artists.
"""
R_w_c, t_w_c = pose_w_c
alpha = kwargs.get('alpha', 1)
edgecolor = kwargs.get('edgecolor', 'k')
img_size = kwargs.get('img_size', (2 * K[0, 2], 2 * K[1, 2]))
scale = kwargs.get('scale', 1)
text = kwargs.get('text', '')
# Homogeneous coordinates (normalised) for the corner pixels.
img_corners_uh = np.array([[0, img_size[0], img_size[0], 0],
[0, 0, img_size[1], img_size[1]],
np.ones(4)])
# Corners transformed to the normalised image plane.
img_corners_xn = np.linalg.inv(K) @ img_corners_uh
# Frustum points in the camera coordinate system.
frustum_x_c = np.hstack((np.zeros((3, 1)), img_corners_xn))
# Frustum points in the world coordinate system.
S = scale * np.identity(3)
frustum_x_w = R_w_c @ S @ frustum_x_c + t_w_c
# Plot outline.
inds = (0, 4, 3, 0, 1, 4, 0, 3, 2, 0, 2, 1, 0)
artists = ax.plot(frustum_x_w[0, inds], frustum_x_w[1, inds], frustum_x_w[2, inds], edgecolor + '-', alpha=alpha)
if text:
artists.extend([ax.text(t_w_c[0, 0], t_w_c[1, 0], t_w_c[2, 0], text)])
return artists
def plot_camera_image_plane(ax, K, pose_w_c, **kwargs):
"""Plot a camera image plane in the global "world" frame.
Keyword Arguments
* *alpha* -- Alpha value (transparency), default 0.25
* *edgecolor* -- Color of edge around image plane, default 'k'
* *facecolor* -- Image plane color, default 'b'
* *img_size* -- Size of image in pixels, default (2*K[0, 2], 2*K[1, 2])
* *scale* -- Scale factor, default 1.0
:param ax: Current axes
:param K: Camera calibration matrix (3x3 upper triangular matrix)
:param pose_w_c: The pose (R, t) of the camera frame relative to the world frame,
where R is a 3x3 rotation matrix and t is a 3D column vector.
:param kwargs: See above
:return: List of artists.
"""
R_w_c, t_w_c = pose_w_c
alpha = kwargs.get('alpha', 0.25)
edgecolor = kwargs.get('edgecolor', 'k')
facecolor = kwargs.get('facecolor', 'b')
img_size = kwargs.get('img_size', (2 * K[0, 2], 2 * K[1, 2]))
scale = kwargs.get('scale', 1)
# Homogeneous coordinates (normalised) for the corner pixels.
img_corners_uh = np.array([[0, img_size[0], img_size[0], 0],
[0, 0, img_size[1], img_size[1]],
np.ones(4)])
# Corners transformed to the normalised image plane.
img_corners_xn = np.linalg.inv(K) @ img_corners_uh
# Image plane points in the world coordinate system.
S = scale * np.identity(3)
plane_x_w = R_w_c @ S @ img_corners_xn + t_w_c
# Plot plane
poly = Poly3DCollection([plane_x_w.T], alpha=alpha, facecolor=facecolor, edgecolor=edgecolor)
artists = [ax.add_collection(poly)]
return artists
def plot_covariance_ellipsoid(ax, mean, covariance, chi2_val=11.345, **kwargs):
"""Plot a 3D covariance ellipsoid.
Keyword Arguments
* *alpha* -- Alpha value (transparency), default 0.2
* *color* -- Ellipsoid surface color, default 'r'
* *n* -- Granularity of the ellipsoid, default 20
:param ax: Current axes
:param mean: The mean, a 3D column vector.
:param covariance: The covariance, a 3x3 matrix.
:param chi2_val: The chi-square distribution value for the ellipsoid scale. Default 11.345 corresponds to 99%
:param kwargs: See above
:return: List of artists.
"""
alpha = kwargs.get('alpha', 0.2)
color = kwargs.get('color', 'r')
n = kwargs.get('n', 20)
u, s, _ = np.linalg.svd(covariance)
scale = np.sqrt(chi2_val * s)
x, y, z = visgeom.utils.generate_ellipsoid(n, pose=(u, mean), scale=scale)
return [ax.plot_surface(x, y, z, alpha=alpha, color=color)]
|
<reponame>ssavrim/curator
import elasticsearch
import curator
import os
import json
import click
import string, random, tempfile
from click import testing as clicktest
from mock import patch, Mock, MagicMock
from . import CuratorTestCase
from . import testvars as testvars
import logging
logger = logging.getLogger(__name__)
host, port = os.environ.get('TEST_ES_SERVER', 'localhost:9200').split(':')
port = int(port) if port else 9200
class TestLoggingModules(CuratorTestCase):
def test_logger_without_null_handler(self):
mock = Mock()
modules = {'logger': mock, 'logger.NullHandler': mock.module}
self.write_config(
self.args['configfile'],
testvars.client_conf_logfile.format(host, port, os.devnull)
)
with patch.dict('sys.modules', modules):
self.create_repository()
test = clicktest.CliRunner()
result = test.invoke(
curator.repo_mgr_cli,
[
'--config', self.args['configfile'],
'show'
]
)
self.assertEqual(self.args['repository'], result.output.rstrip())
class TestCLIRepositoryCreate(CuratorTestCase):
def test_create_fs_repository_success(self):
self.write_config(
self.args['configfile'],
testvars.client_conf_logfile.format(host, port, os.devnull)
)
test = clicktest.CliRunner()
result = test.invoke(
curator.repo_mgr_cli,
[
'--config', self.args['configfile'],
'create',
'fs',
'--repository', self.args['repository'],
'--location', self.args['location']
]
)
self.assertTrue(1, len(self.client.snapshot.get_repository(repository=self.args['repository'])))
self.assertEqual(0, result.exit_code)
def test_create_fs_repository_fail(self):
self.write_config(
self.args['configfile'],
testvars.client_conf_logfile.format(host, port, os.devnull)
)
test = clicktest.CliRunner()
result = test.invoke(
curator.repo_mgr_cli,
[
'--config', self.args['configfile'],
'create',
'fs',
'--repository', self.args['repository'],
'--location', os.devnull
]
)
self.assertEqual(1, result.exit_code)
def test_create_s3_repository_fail(self):
self.write_config(
self.args['configfile'],
testvars.client_conf_logfile.format(host, port, os.devnull)
)
test = clicktest.CliRunner()
result = test.invoke(
curator.repo_mgr_cli,
[
'--config', self.args['configfile'],
'create',
's3',
'--bucket', 'mybucket',
'--repository', self.args['repository'],
]
)
self.assertEqual(1, result.exit_code)
class TestCLIDeleteRepository(CuratorTestCase):
def test_delete_repository_success(self):
self.create_repository()
self.write_config(
self.args['configfile'],
testvars.client_conf_logfile.format(host, port, os.devnull)
)
test = clicktest.CliRunner()
_ = test.invoke(
curator.repo_mgr_cli,
[
'--config', self.args['configfile'],
'delete',
'--yes', # This ensures no prompting will happen
'--repository', self.args['repository']
]
)
self.assertFalse(
curator.repository_exists(self.client, self.args['repository'])
)
def test_delete_repository_notfound(self):
self.write_config(
self.args['configfile'],
testvars.client_conf_logfile.format(host, port, os.devnull)
)
test = clicktest.CliRunner()
result = test.invoke(
curator.repo_mgr_cli,
[
'--config', self.args['configfile'],
'delete',
'--yes', # This ensures no prompting will happen
'--repository', self.args['repository']
]
)
self.assertEqual(1, result.exit_code)
class TestCLIShowRepositories(CuratorTestCase):
def test_show_repository(self):
self.create_repository()
self.write_config(
self.args['configfile'],
testvars.client_conf_logfile.format(host, port, os.devnull)
)
test = clicktest.CliRunner()
result = test.invoke(
curator.repo_mgr_cli,
[
'--config', self.args['configfile'],
'show'
]
)
self.assertEqual(self.args['repository'], result.output.rstrip())
|
"""A setuptools based setup module.
See:
https://packaging.python.org/guides/distributing-packages-using-setuptools/
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import os
import re
import ast
from os import path
# io.open is needed for projects that support Python 2.7
# It ensures open() defaults to text mode with universal newlines,
# and accepts an argument to specify the text encoding
# Python 3 only projects can skip this import
from io import open
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
CURDIR = os.path.abspath(os.path.dirname(__file__))
def get_version():
main_file = os.path.join(CURDIR, "src/eventisc", "__init__.py")
_version_re = re.compile(r"__version__\s+=\s+(?P<version>.*)")
with open(main_file, "r", encoding="utf8") as f:
match = _version_re.search(f.read())
version = match.group("version") if match is not None else '"unknown"'
return str(ast.literal_eval(version))
setup(
name='event-isc', # Required
version=get_version(), # Required
description="Library for inter-service event-based communication",
long_description=long_description, # Optional
long_description_content_type='text/markdown', # Optional (see note above)
url='https://github.com/radiocut/event-isc/', # Optional
author='<NAME>', # Optional
author_email='<EMAIL>', # Optional
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# These classifiers are *not* checked by 'pip install'. See instead
# 'python_requires' below.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='event inter-service-communication celery microservices', # Optional
package_dir={'': 'src'}, # Optional
packages=find_packages(where='src'), # Required
# Specify which Python versions you support. In contrast to the
# 'Programming Language' classifiers above, 'pip install' will check this
# and refuse to install the project if the version does not match. If you
# do not support Python 2, you can simplify this to '>=3.5' or similar, see
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
python_requires='>=3.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4',
install_requires=['environs', 'pyyaml'], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
extras_require={ # Optional
'requests': ['requests'],
'celery': ['celery'],
'pika': ['pika'],
'dev': ['check-manifest', 'responses', 'celery', 'requests', 'pika'],
'test': ['coverage', 'responses', 'celery', 'requests', 'pika'],
},
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
project_urls={ # Optional
'Bug Reports': 'https://github.com/radiocutfm/event-isc/issues',
'Funding': 'https://donate.pypi.org',
'Say Thanks!': 'https://saythanks.io/to/guillermo.narvaja%40radiocut.fm',
'Source': 'https://github.com/radiocutfm/event-isc/',
},
)
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time
import json
import board
import busio
import audioio
import storage
import audiomp3
import neopixel
import digitalio
import adafruit_sdcard
from adafruit_esp32spi import adafruit_esp32spi
from adafruit_esp32spi import adafruit_esp32spi_wifimanager
import adafruit_esp32spi.adafruit_esp32spi_socket as socket
import displayio
from adafruit_display_text.label import Label
from adafruit_bitmap_font import bitmap_font
font = bitmap_font.load_font("/fonts/OstrichSans-Heavy-18.bdf")
display = board.DISPLAY
splash = displayio.Group(max_size=2)
bg_group = displayio.Group(max_size=1)
quote = displayio.Group(max_size=1, x=48, y=100)
splash.append(bg_group)
splash.append(quote)
### WiFi ###
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
# If you are using a board with pre-defined ESP32 Pins:
esp32_cs = digitalio.DigitalInOut(board.ESP_CS)
esp32_ready = digitalio.DigitalInOut(board.ESP_BUSY)
esp32_reset = digitalio.DigitalInOut(board.ESP_RESET)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
# Verify nina-fw version >= 1.4.0
assert (
int(bytes(esp.firmware_version).decode("utf-8")[2]) >= 4
), "Please update nina-fw to >=1.4.0."
status_light = neopixel.NeoPixel(
board.NEOPIXEL, 1, brightness=0.2
)
wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light)
# Enable the speaker
speaker_enable = digitalio.DigitalInOut(board.SPEAKER_ENABLE)
speaker_enable.direction = digitalio.Direction.OUTPUT
speaker_enable.value = True
a = audioio.AudioOut(board.SPEAKER)
print("Init SD Card")
sdcard = None
sd_cs = digitalio.DigitalInOut(board.SD_CS)
sdcard = adafruit_sdcard.SDCard(spi, sd_cs)
vfs = storage.VfsFat(sdcard)
storage.mount(vfs, "/sd")
def displayQuote(text):
with open("/images/quote_bg.bmp", "rb") as bitmap_file:
bitmap = displayio.OnDiskBitmap(bitmap_file)
tile_grid = displayio.TileGrid(bitmap, pixel_shader=displayio.ColorConverter())
if bg_group:
bg_group.pop()
bg_group.append(tile_grid)
status_label = Label(font, text=text, color=0xFFFFFF, max_glyphs=300)
status_label.x = 0
status_label.y = 0
if quote:
quote.pop()
quote.append(status_label)
display.show(splash)
start = time.monotonic()
while time.monotonic() - start < 1.0:
pass
def postToAPI(endpoint, data):
headers = {"x-api-key": secrets['x-api-key']}
response = wifi.post(endpoint, json=data, headers=headers, timeout=30)
data = response.json()
print("JSON Response: ", data)
response.close()
return data
def downloadfile(url, filename):
chunk_size = 4096
r = wifi.get(url, stream=True)
content_length = int(r.headers["content-length"])
remaining = content_length
print("Downloading file as ", filename)
file = open(filename, "wb")
for i in r.iter_content(min(remaining, chunk_size)):
print(remaining)
remaining -= len(i)
file.write(i)
file.close()
r.close()
print("Download done")
def playMP3(filename):
data = open(filename, "rb")
mp3 = audiomp3.MP3Decoder(data)
a.play(mp3)
while a.playing:
pass
mp3.deinit()
def synthesizeSpeech(data):
response = postToAPI(secrets['endpoint'], data)
downloadfile(response['url'], '/sd/cache.mp3')
displayQuote(response['text'])
playMP3("/sd/cache.mp3")
def speakText(text, voice):
data = { "text": text, "voiceId": voice }
synthesizeSpeech(data)
def speakQuote(tags, voice):
data = { "tags": tags, "voiceId": voice }
synthesizeSpeech(data)
# Connect to WiFi
print("Connecting to WiFi...")
wifi.connect()
print("Connected!")
displayQuote("Ready!")
speakText('Hello world! I am an Adafruit PyPortal running Circuit Python speaking to you using AWS Serverless', 'Joanna')
while True:
speakQuote('equality, humanity', 'Joanna')
time.sleep(60*secrets['interval'])
|
<filename>script_utils.py
from math import hypot, inf
import math
import time
import cv2
import numpy
import mss
from colors import BANK_TEXT_COLOR, get_mask
from auto_gui import click
from settings import MONITOR, INVENTORY_SLOT_RECTS_ABSOLUTE, CENTER_OF_SCREEN_RELATIVE, DEBUG
from script_random import rsleep, random_point_near_center_of_rect
def get_screenshot(monitor=MONITOR):
with mss.mss() as screenshot:
return numpy.array(screenshot.grab(monitor))
def get_screenshot_bgr(monitor=MONITOR):
screenshot = get_screenshot(monitor)
return cv2.cvtColor(screenshot, cv2.COLOR_BGRA2BGR)
def is_image_on_screen(image, threshold=0.80):
# Get the current screen
screenshot = get_screenshot_bgr()
# Find match
result = cv2.matchTemplate(screenshot, image, cv2.TM_CCOEFF_NORMED)
_, max_val, _, _ = cv2.minMaxLoc(result)
# Check if match
return max_val > threshold
def get_rectangle(mask, color_name):
contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 1:
print(f"Warning! More than one rect for {color_name}")
return
try:
x, y, w, h = cv2.boundingRect(contours[0])
except IndexError as e:
print(f"Couldn't find {color_name}")
exit()
top_left = (x, y)
bottom_right = (x + w, y + h)
return (top_left, bottom_right)
def get_closest_rectangle_to_center(color, image=None):
"""Gets the closest `color` rectangle to the center of the screen (settings.CENTER_OF_SCREEN_RELATIVE).
This is because the player is almost always in the center."""
frame = get_screenshot()
mask = get_mask(frame, color)
contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
rectangles = []
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
top_left = (x, y)
bottom_right = (x + w, y + h)
rectangles.append((top_left, bottom_right))
closest_rect = None
closest_distance = inf
for rect in rectangles:
center_x, center_y = random_point_near_center_of_rect(*rect)
debug_rectangle(image, rect[0], rect[1])
distance = hypot(
center_x - CENTER_OF_SCREEN_RELATIVE[0],
center_y - CENTER_OF_SCREEN_RELATIVE[1],
)
if DEBUG:
print(f"{center_x},{center_y}: {distance}")
if distance < closest_distance:
closest_rect = rect
closest_distance = distance
return closest_rect
def wait_for_bank(click_coor=None):
"""click_while_waiting expects an (x,y) to click within"""
inside_bank = False
while not inside_bank:
if click_coor:
click(click_coor[0], click_coor[1])
rsleep(0.25)
frame = get_screenshot()
bank_amount_mask = get_mask(frame, BANK_TEXT_COLOR)
inside_bank = numpy.any(bank_amount_mask)
if not click_coor:
time.sleep(0.1) # Benkmarked at .033 without
def wait_for_deposit_all(click_coor):
empty_inventory_pic = cv2.imread("pics/empty_inventory.png", cv2.IMREAD_COLOR)
start = time.time()
empty_inventory = False
click(*click_coor)
while not empty_inventory:
if time.time() - start >= 1.5:
click(*click_coor)
empty_inventory = is_image_on_screen(empty_inventory_pic)
# time.sleep(0.1) # Benkmarked at ~0.18 without
def reset_xp_tracker():
click(825, 325)
click(825, 325, button="right")
click(840, 362)
def drop_all(slots_to_click, time_to_move=(0.12, 0.03)):
"""left click only at the moment"""
for slot_index in slots_to_click:
x, y = random_point_near_center_of_rect(*INVENTORY_SLOT_RECTS_ABSOLUTE[slot_index - 1], absolute=True)
# time_to_move=(0.05, 0.01) is WAY too fast
# The default of time_to_move=(0.15, 0.05) feels good for nearly all activities except dropping
# your inventory quickly. A human doing it hundreds of times is a fair bit faster.
click(x, y, time_to_move=time_to_move)
def debug_rectangle(image, top_left, bottom_right):
cv2.rectangle(
image,
top_left,
bottom_right,
[0, 0, 255],
2,
)
def debug_point(image, x, y, width=5):
cv2.circle(
image,
(x, y),
radius=width,
color=(255, 255, 0),
thickness=-1,
)
def debug_line(image, start_point, end_point, width=5):
cv2.line(
image,
start_point,
end_point,
color=(255, 255, 0),
thickness=width,
)
THRESHOLD = 0.85
def debug_images_on_screen(screenshot, images: list):
"""
Draws red boxes around the list of images on the screen.
Draws them onto screenshot. Expects something else to imshow screenshot.
"""
for image in images:
top_left, bottom_right = get_image_on_screen(screenshot, image, threshold=THRESHOLD)
debug_rectangle(screenshot, top_left, bottom_right)
def debug_points_on_screen(screenshot, points: list, width=5):
"""
Draws cyan circles where each of the points are.
Draws them onto screenshot. Expects something else to imshow screenshot.
"""
for point in points:
debug_point(screenshot, point[0], point[1], width)
def get_image_on_screen(screenshot, image, threshold=0.85):
result = cv2.matchTemplate(screenshot, image, cv2.TM_CCOEFF_NORMED)
w, h = image.shape[:2]
matching_points = numpy.where(result >= threshold)
all_matches = zip(*matching_points[::-1])
# Throwing away all but one match. There could be multiple here.
try:
top_left = list(all_matches)[0]
except IndexError:
print("Couldn't find inventory")
return None, None
bottom_right = (top_left[0] + w, top_left[1] + h)
return top_left, bottom_right
def display_debug_screenshot(screenshot, monitor=MONITOR, refresh_rate_ms=1000):
"""
params:
monitor: Where on the screen to screenshot
refresh_rate_ms: milliseconds to wait between refresh
"""
# This will resize the screenshot to the size of the thing you actually screenshot.
# That way when you display (imshow) it, it's the same size as the thing you screenshot.
# Otherwise it's fullscreen.
resized = cv2.resize(screenshot, (monitor["width"], monitor["height"]))
cv2.imshow("Game Preview", resized)
cv2.waitKey(delay=refresh_rate_ms)
def get_inventory_corner_points(screenshot):
# Top Left of Inventory
try:
top_left_im = cv2.imread("pics/inventory_top_left.png", cv2.IMREAD_COLOR)
top_left_invent_point, _ = get_image_on_screen(screenshot, top_left_im)
except Exception as e:
print("Couldn't find top_left corner", e)
top_left_invent_point = None
# Top Right of Inventory
try:
top_right_im = cv2.imread("pics/inventory_top_right.png", cv2.IMREAD_COLOR)
w, _ = top_right_im.shape[:2]
top_left, _ = get_image_on_screen(screenshot, top_right_im)
top_right_invent_point = (top_left[0] + w, top_left[1])
except Exception as e:
print("Couldn't find top_right corner", e)
top_right_invent_point = None
# Bottom Left of Inventory
try:
bottom_left_im = cv2.imread("pics/inventory_bottom_left.png", cv2.IMREAD_COLOR)
_, h = bottom_left_im.shape[:2]
top_left, _ = get_image_on_screen(screenshot, bottom_left_im)
bottom_left_invent_point = (top_left[0], top_left[1] + h)
except Exception as e:
print("Couldn't find bottom_left corner", e)
bottom_left_invent_point = None
# Bottom Right of Inventory
try:
bottom_right_im = cv2.imread("pics/inventory_bottom_right.png", cv2.IMREAD_COLOR)
_, bottom_right_invent_point = get_image_on_screen(screenshot, bottom_right_im)
except Exception as e:
print("Couldn't find bottom_right corner", e)
bottom_right_invent_point = None
return top_left_invent_point, top_right_invent_point, bottom_left_invent_point, bottom_right_invent_point
def get_inventory_slots(monitor):
screenshot = get_screenshot_bgr(monitor)
tl, tr, bl, br = get_inventory_corner_points(screenshot)
if all([tl, tr, bl, br]):
return calculate_inventory_slots(tl, tr, bl, br)
else:
return None
def calculate_inventory_slots(top_left, top_right, bottom_left, bottom_right):
"""
This is ugly and I'm ashamed and it works. ':D
"""
# column_height = bottom_left[1] - top_left[1]
LEFT_RIGHT_ADJUST_FRACTION = 0.03
COLUMN_WIDTH_DIVIDE = 5
width = top_right[0] - top_left[0]
LEFT_RIGHT_ADJUST = math.floor(width * LEFT_RIGHT_ADJUST_FRACTION)
left = top_left[0] - LEFT_RIGHT_ADJUST
top = top_left[1]
right = bottom_right[0] + LEFT_RIGHT_ADJUST
bottom = bottom_right[1]
column_width = (right - left) // COLUMN_WIDTH_DIVIDE
x_values = []
for col in range(1, 5):
x_values.append(math.floor(left + column_width * col))
TOP_BOTTOM_ADJUST_FRACTION = 0.02
ROW_WIDTH_DIVIDE = 8
height = bottom_left[1] - top_left[1]
TOP_BOTTOM_ADJUST = math.floor(height * TOP_BOTTOM_ADJUST_FRACTION)
left = top_left[0]
top = top_left[1] - TOP_BOTTOM_ADJUST
right = bottom_right[0]
bottom = bottom_right[1] + TOP_BOTTOM_ADJUST * 2
row_width = (bottom - top) // ROW_WIDTH_DIVIDE
inventory_points = []
for row in range(1, 8):
y = math.floor(top + row_width * row)
for x in x_values:
inventory_points.append((x, y))
return inventory_points
def get_osrs_windows():
from Quartz import (
CGWindowListCopyWindowInfo,
kCGWindowListOptionOnScreenOnly,
kCGNullWindowID,
)
options = kCGWindowListOptionOnScreenOnly
windowList = CGWindowListCopyWindowInfo(options, kCGNullWindowID)
osrs_windows = []
for window in windowList:
title = window.get("kCGWindowOwnerName")
if str(title.lower()) == "runelite":
osrs_windows.append(window.get("kCGWindowBounds"))
return osrs_windows
|
<reponame>Kenny-Z/IOT-Message-Board-with-Face-Recognition
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'display.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(1024, 600)
Form.setMinimumSize(QtCore.QSize(1024, 600))
Form.setMaximumSize(QtCore.QSize(1024, 600))
Form.setStyleSheet("QWidget#Form{\n"
" border-image: url(:/login/main.jpg);\n"
"}")
self.textEdit = QtWidgets.QTextEdit(Form)
self.textEdit.setGeometry(QtCore.QRect(80, 190, 851, 231))
self.textEdit.setStyleSheet("font: 75 22pt \"MS Shell Dlg 2\";")
self.textEdit.setObjectName("textEdit")
self.pushButton_4 = QtWidgets.QPushButton(Form)
self.pushButton_4.setGeometry(QtCore.QRect(300, 460, 200, 80))
self.pushButton_4.setStyleSheet("QPushButton { \n"
" \n"
" background-color: rgb(255, 255, 255);\n"
" font: 16pt \"MV Boli\";\n"
" border-radius: 25px; \n"
" color: rgb(0, 0, 0);\n"
" border: 2px solid rgb(0,0,0);\n"
"\n"
"}\n"
"QPushButton:hover {\n"
" \n"
" background-color: rgb(0, 0, 0);\n"
" color: rgb(255, 255, 255);\n"
" border: 2px solid rgb(255,255,255);\n"
" \n"
"\n"
"}\n"
"QPushButton:pressed{\n"
" background-color: rgb(255, 255, 255);\n"
" color: rgb(0, 0, 0);\n"
" border-color: rgb(255, 255, 255);\n"
" border: 2px solid rgb(0,0,0);\n"
"}\n"
"")
self.pushButton_4.setObjectName("pushButton_4")
self.diaplay_login = QtWidgets.QPushButton(Form)
self.diaplay_login.setGeometry(QtCore.QRect(520, 460, 200, 80))
self.diaplay_login.setStyleSheet("QPushButton { \n"
" \n"
" background-color: rgb(255, 255, 255);\n"
" font: 16pt \"MV Boli\";\n"
" border-radius: 25px; \n"
" color: rgb(0, 0, 0);\n"
" border: 2px solid rgb(0,0,0);\n"
"\n"
"}\n"
"QPushButton:hover {\n"
" \n"
" background-color: rgb(0, 0, 0);\n"
" color: rgb(255, 255, 255);\n"
" border: 2px solid rgb(255,255,255);\n"
" \n"
"\n"
"}\n"
"QPushButton:pressed{\n"
" background-color: rgb(255, 255, 255);\n"
" color: rgb(0, 0, 0);\n"
" border-color: rgb(255, 255, 255);\n"
" border: 2px solid rgb(0,0,0);\n"
"}\n"
"")
self.diaplay_login.setCheckable(False)
self.diaplay_login.setObjectName("diaplay_login")
self.pushButton = QtWidgets.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(780, 130, 150, 50))
self.pushButton.setStyleSheet("QPushButton { \n"
" \n"
" background-color: rgb(255, 255, 255);\n"
" font: 12pt \"MV Boli\";\n"
" border-radius: 25px; \n"
" color: rgb(0, 0, 0);\n"
" border: 2px solid rgb(0,0,0);\n"
"\n"
"}\n"
"QPushButton:hover {\n"
" \n"
" background-color: rgb(0, 0, 0);\n"
" color: rgb(255, 255, 255);\n"
" border: 2px solid rgb(255,255,255);\n"
" \n"
"\n"
"}\n"
"QPushButton:pressed{\n"
" background-color: rgb(255, 255, 255);\n"
" color: rgb(0, 0, 0);\n"
" border-color: rgb(255, 255, 255);\n"
" border: 2px solid rgb(0,0,0);\n"
"}\n"
"")
self.pushButton.setObjectName("pushButton")
self.retranslateUi(Form)
self.diaplay_login.clicked.connect(Form.show_login_GUI)
self.pushButton_4.clicked.connect(Form.image)
self.pushButton.clicked.connect(Form.refresh_msgs)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.pushButton_4.setText(_translate("Form", "Knock Knock!!"))
self.diaplay_login.setText(_translate("Form", "Login"))
self.pushButton.setText(_translate("Form", "Refresh"))
import image_rc
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains utils to setup scope for a Block."""
import inspect
import logging
from contextlib import ExitStack, contextmanager
from typing import Any, Iterator, List, Optional
import popart
logger = logging.getLogger(__name__)
class Scope:
def __init__(self,
name: str,
vgid: Optional[int] = None,
execution_phase: Optional[int] = None,
pipeline_stage: Optional[int] = None,
additional_scopes: Optional[List]=None):
if (execution_phase is not None) and (pipeline_stage is not None):
raise ValueError(
'Cannot set both `execution_phase` and `pipeline_stage`.')
if execution_phase is not None:
self.execution_phase: int = execution_phase
if pipeline_stage is not None:
self.pipeline_stage: int = pipeline_stage
self.name: str = name
self.vgid: int = vgid
self.additional_scopes = additional_scopes
def __setattr__(self, name: str, value: Any):
if name == 'execution_phase':
if hasattr(self, 'pipeline_stage'):
raise ValueError(
'Cannot set `execution_phase` when `pipeline_stage` exists.'
)
if name == 'pipeline_stage':
if hasattr(self, 'execution_phase'):
raise ValueError(
'Cannot set `pipeline_stage` when `execution_phase` exists.'
)
super(Scope, self).__setattr__(name, value)
def __repr__(self) -> str:
if hasattr(self, 'execution_phase'):
return f'Namescope: {self.name}, Execution phase: {self.execution_phase}, VGID: {self.vgid}'
elif hasattr(self, 'pipeline_stage'):
return f'Namescope: {self.name}, Pipeline stage: {self.pipeline_stage}, VGID: {self.vgid}'
if hasattr(self, 'vgid'):
return f'Namescope: {self.name}, VGID: {self.vgid}'
return f'Namescope: {self.name}'
class ScopeProvider():
"""Utility that book-keeps scopes.
This class tracks scope index assignment used in phased_execution mode.
Attributes:
phased_execution_type: 'DUAL' or 'SINGLE' denoting number of devices per replica in phased execution mode.
"""
def __init__(self, phased_execution_type="DUAL"):
if phased_execution_type == "DUAL":
self.scope_increment = 1
start_phase = -1
else:
self.scope_increment = 2
start_phase = -2
self.prev_phase = start_phase
self.phased_execution_type = phased_execution_type
def scope_provider(self, builder: popart.Builder,
scope: Scope) -> Iterator[ExitStack]:
"""Generate scope for popart layers.
Args:
builder (popart.Builder): Builder used to create popart model.
scope: Scope
Yields:
Iterator[ExitStack]: Stack of builder contexts.
"""
context = ExitStack()
if hasattr(scope, 'name'):
context.enter_context(builder.nameScope(scope.name))
if hasattr(scope, 'execution_phase'):
if (scope.execution_phase - self.prev_phase) > 1:
logger.warning('Skipping phased_execution scope: {0} -> {1}'.format(
self.prev_phase, scope.execution_phase))
context.enter_context(builder.executionPhase(scope.execution_phase))
self.prev_phase = max(self.prev_phase, scope.execution_phase)
if hasattr(scope, 'pipeline_stage'):
context.enter_context(builder.pipelineStage(scope.pipeline_stage))
if scope.vgid is not None:
context.enter_context(builder.virtualGraph(scope.vgid))
if scope.additional_scopes:
for scope in scope.additional_scopes:
context.enter_context(scope)
return context
@contextmanager
def __call__(self, builder, scope):
logger.debug(scope)
context = self.scope_provider(builder, scope)
yield context
context.close()
def get_next_phase(self) -> int:
"""Get next execution phase.
Returns:
int: Next execution phase.
"""
self.prev_phase += self.scope_increment
return self.prev_phase
def get_prev_phase(self) -> int:
"""Get last execution phase.
Returns:
int: Previous execution phase.
"""
return self.prev_phase
def get_scope(self, name, execution_phase=None, skip_scope=False, additional_scopes=None):
if inspect.stack()[1].function == 'forward':
raise ValueError(
'Scoping must be assigned during layer definition, before the forward pass.'
)
if execution_phase is None:
return Scope(name, additional_scopes=additional_scopes)
if execution_phase == 'next':
if skip_scope:
self.get_next_phase()
execution_phase = self.get_next_phase()
if execution_phase == 'prev':
execution_phase = self.get_prev_phase()
vgid = execution_phase % 2
return Scope(name, vgid, execution_phase, additional_scopes=additional_scopes)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##############################
# import dependencies
##############################
import os
import csv
import argparse
import datetime
import numpy as np
from glob import glob
from keras import backend
from keras import layers, models
from keras.models import Model
from keras.optimizers import Adam
from keras.utils import plot_model
from keras.models import Sequential
from keras.layers import Dense, Activation, concatenate
from keras.layers import LSTM, Dropout, CuDNNLSTM, Input
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.backend.tensorflow_backend import clear_session
from keras.callbacks import EarlyStopping, ModelCheckpoint
##############################
# define functions
##############################
def load_data(subtype="words"):
# save np.load
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a, **k: np_load_old(*a, allow_pickle=True, **k)
# call load_data with allow_pickle implicitly set to true
if subtype == "words":
X_train = np.load("./data/rnn/words/X_train.npy")
X_valid = np.load("./data/rnn/words/X_valid.npy")
X_test = np.load("./data/rnn/words/X_test.npy")
elif subtype == "char":
X_train = np.load("./data/rnn/char/X_train.npy")
X_valid = np.load("./data/rnn/char/X_valid.npy")
X_test = np.load("./data/rnn/char/X_test.npy")
elif subtype == "all":
X_train = (np.load("./data/rnn/words/X_train.npy"),
np.load("./data/rnn/char/X_train.npy"))
X_valid = (np.load("./data/rnn/words/X_valid.npy"),
np.load("./data/rnn/char/X_valid.npy"))
X_test = (np.load("./data/rnn/words/X_test.npy"),
np.load("./data/rnn/char/X_test.npy"))
y_train = np.load("./data/rnn/y_train.npy")
y_valid = np.load("./data/rnn/y_valid.npy")
y_test = np.load("./data/rnn/y_test.npy")
# restore np.load for future normal usage
np.load = np_load_old
return X_train, X_valid, X_test, y_train, y_valid, y_test
def getCurrentTime():
return datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
def getModel(embedding_matrix_words,
embedding_matrix_char,
subtype="words",
embedding_vector_size=300,
droprate=0.2,
vocab_size_tokens=5000,
max_text_length_tokens=500,
vocab_size_char=65,
max_text_length_char=1000):
if subtype in ["words", "char"]:
model = Sequential()
if subtype == "words":
if embedding_matrix_words is not None:
model.add(
Embedding(vocab_size_tokens,
embedding_vector_size,
input_length=max_text_length_tokens,
weights=[embedding_matrix_words]))
else:
model.add(
Embedding(vocab_size_tokens,
embedding_vector_size,
input_length=max_text_length_tokens))
elif subtype == "char":
if embedding_matrix_words is not None:
model.add(
Embedding(vocab_size_char,
embedding_vector_size,
input_length=max_text_length_char,
weights=[embedding_matrix_char]))
else:
model.add(
Embedding(vocab_size_char,
embedding_vector_size,
input_length=max_text_length_char))
model.add(Conv1D(filters=32, kernel_size=3, padding='same'))
model.add(Conv1D(filters=64, kernel_size=3, padding='same'))
model.add(Activation("relu"))
model.add(Dropout(droprate))
model.add(MaxPooling1D(pool_size=2))
if len(backend.tensorflow_backend._get_available_gpus()) > 0:
model.add(CuDNNLSTM(100))
model.add(Dropout(droprate))
else:
model.add(LSTM(100, recurrent_dropout=droprate))
model.add(Dense(50))
model.add(Activation("relu"))
model.add(Dense(1))
model.add(Activation("sigmoid"))
return model
elif subtype == "all":
# define token-based model
input_tokens = Input(shape=(max_text_length_tokens, ))
if embedding_matrix_words is not None:
tokens = Embedding(vocab_size_tokens,
embedding_vector_size,
input_length=max_text_length_tokens,
weights=[embedding_matrix_words])(input_tokens)
else:
tokens = Embedding(
vocab_size_tokens,
embedding_vector_size,
input_length=max_text_length_tokens)(input_tokens)
tokens = Conv1D(filters=32, kernel_size=3, padding='same')(tokens)
tokens = Conv1D(filters=64, kernel_size=3, padding='same')(tokens)
tokens = Activation("relu")(tokens)
tokens = Dropout(droprate)(tokens)
tokens = MaxPooling1D(pool_size=2)(tokens)
if len(backend.tensorflow_backend._get_available_gpus()) > 0:
tokens = CuDNNLSTM(100)(tokens)
tokens = Dropout(droprate)(tokens)
else:
tokens = LSTM(100, recurrent_dropout=droprate)(tokens)
tokens = Model(inputs=input_tokens, outputs=tokens)
# define-character based model
input_char = Input(shape=(max_text_length_char, ))
if embedding_matrix_char is not None:
char = Embedding(vocab_size_char,
embedding_vector_size,
input_length=max_text_length_char,
weights=[embedding_matrix_char])(input_char)
else:
char = Embedding(vocab_size_char,
embedding_vector_size,
input_length=max_text_length_char)(input_char)
char = Conv1D(filters=32, kernel_size=3, padding='same')(char)
char = Conv1D(filters=64, kernel_size=3, padding='same')(char)
char = Activation("relu")(char)
char = Dropout(droprate)(char)
char = MaxPooling1D(pool_size=2)(char)
if len(backend.tensorflow_backend._get_available_gpus()) > 0:
char = CuDNNLSTM(100)(char)
char = Dropout(droprate)(char)
else:
char = LSTM(100, recurrent_dropout=droprate)(char)
char = Model(inputs=input_char, outputs=char)
combined = concatenate([tokens.output, char.output])
combined = Dense(50)(combined)
combined = Activation("relu")(combined)
combined = Dense(1)(combined)
combined = Activation("sigmoid")(combined)
model = Model(inputs=[tokens.input, char.input], outputs=combined)
return model
def singleRun(subtype="words", pre_trained_embeddings=True):
X_train, X_valid, X_test, y_train, y_valid, y_test = load_data(subtype)
if subtype == "words":
if pre_trained_embeddings:
embedding_matrix_words = np.load(
"./data/glove/glove.6B.300d_word_emb.npy")
else:
embedding_matrix_words = None
embedding_matrix_char = None
elif subtype == "char":
if pre_trained_embeddings:
embedding_matrix_char = np.load(
"./data/glove/glove.6B.300d_char_emb.npy")
else:
embedding_matrix_char = None
embedding_matrix_words = None
else:
if pre_trained_embeddings:
embedding_matrix_words = np.load(
"./data/glove/glove.6B.300d_word_emb.npy")
embedding_matrix_char = np.load(
"./data/glove/glove.6B.300d_char_emb.npy")
else:
embedding_matrix_words = None
embedding_matrix_char = None
model = getModel(embedding_matrix_words, embedding_matrix_char, subtype)
model.compile(optimizer="adam",
loss="binary_crossentropy",
metrics=['accuracy'])
if subtype in ["words", "char"]:
model.fit(X_train,
y_train,
epochs=3,
batch_size=256,
validation_data=(X_valid, y_valid),
shuffle=True)
scores = model.evaluate(X_test, y_test, verbose=1)
print("Accuracy: " + str(scores[1] * 100) + "%")
elif subtype == "all":
model.fit([X_train[0], X_train[1]],
y_train,
epochs=3,
batch_size=256,
validation_data=([X_valid[0], X_valid[1]], y_valid),
shuffle=True)
scores = model.evaluate([X_test[0], X_test[1]], y_test, verbose=1)
print("Accuracy: " + str(scores[1] * 100) + "%")
return model
def gridSearch(subtype="words", pre_trained_embeddings=True):
# load data into memory
X_train, X_valid, X_test, y_train, y_valid, y_test = load_data(subtype)
# create log directory and info csv
if pre_trained_embeddings:
current_time = getCurrentTime() + "_rnn_" + subtype + "_glove_embed"
else:
current_time = getCurrentTime() + "_rnn_" + subtype + "_random_embed"
os.makedirs("pickles/" + current_time)
csvfile = open('pickles/' + current_time + '/' + 'log.csv', 'w')
fieldnames = [
"model", "pre_trained_embeddings", "embedding_size", "droprate",
"batch_size", "learning_rate", "best_train", "best_val", "best_test"
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
csvfile.flush()
# define grid parameters
embedding_size = [50, 100, 200, 300]
droprate = np.linspace(0.1, 0.3, 3)
batch_size = [128, 256]
learning_rate = np.linspace(0.001, 0.006, 3)
counter = 0
record_test = 0
# run grid-search
for e in embedding_size:
# save np.load
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a, **k: np_load_old(*a, allow_pickle=True, **k)
if subtype == "words":
if pre_trained_embeddings:
embedding_matrix_words = np.load("./data/glove/glove.6B." +
str(e) + "d_word_emb.npy")
else:
embedding_matrix_words = None
embedding_matrix_char = None
elif subtype == "char":
if pre_trained_embeddings:
embedding_matrix_char = np.load("./data/glove/glove.6B." +
str(e) + "d_char_emb.npy")
else:
embedding_matrix_char = None
embedding_matrix_words = None
else:
if pre_trained_embeddings:
embedding_matrix_words = np.load("./data/glove/glove.6B." +
str(e) + "d_word_emb.npy")
embedding_matrix_char = np.load("./data/glove/glove.6B." +
str(e) + "d_char_emb.npy")
else:
embedding_matrix_words = None
embedding_matrix_char = None
np.load = np_load_old
# move into grid-search loop
for d in droprate:
for b in batch_size:
for l in learning_rate:
clear_session()
model = getModel(embedding_matrix_words,
embedding_matrix_char,
subtype,
embedding_vector_size=e,
droprate=d)
callbacks = [
EarlyStopping(monitor='val_acc',
patience=5,
restore_best_weights=True),
ModelCheckpoint(filepath='./pickles/' + current_time +
'/best_model_' + str(counter) + '.h5',
monitor='val_acc',
save_best_only=True)
]
model.compile(optimizer=Adam(lr=l),
loss="binary_crossentropy",
metrics=['accuracy'])
if subtype in ["words", "char"]:
history = model.fit(X_train,
y_train,
epochs=50,
batch_size=b,
validation_data=(X_valid, y_valid),
shuffle=True,
callbacks=callbacks)
scores = model.evaluate(X_test, y_test, verbose=1)
print("Accuracy: " + str(scores[1] * 100) + "%")
elif subtype == "all":
history = model.fit(
[X_train[0], X_train[1]],
y_train,
epochs=50,
batch_size=b,
validation_data=([X_valid[0],
X_valid[1]], y_valid),
shuffle=True,
callbacks=callbacks)
scores = model.evaluate([X_test[0], X_test[1]],
y_test,
verbose=1)
print("Accuracy: " + str(scores[1] * 100) + "%")
max_index = np.argmax(history.history["val_acc"])
best_test = scores[1]
if best_test >= record_test:
record_test = best_test
todel = [
el for el in glob("./pickles/" + current_time +
"/best_model*")
if 'best_model_' + str(counter) + '.h5' not in el
]
if len(todel) > 0:
for el in todel:
os.remove(el)
else:
os.remove('./pickles/' + current_time +
'/best_model_' + str(counter) + '.h5')
# write to csv file in loop
writer.writerow({
"model":
str(counter),
"pre_trained_embeddings":
str(pre_trained_embeddings),
"embedding_size":
str(e),
"droprate":
str(d),
"batch_size":
str(b),
"learning_rate":
str(l),
"best_train":
str(history.history["acc"][max_index]),
"best_val":
str(history.history["val_acc"][max_index]),
"best_test":
str(best_test)
})
csvfile.flush()
counter += 1
# clear memory
del model
del callbacks
del history
csvfile.close()
return 0
def plot_K_model(name, subtype="words"):
# code adopted from https://github.com/keras-team/keras/issues/10386
# useful to convert from sequential to functional for plotting model
if subtype in ["words", "char"]:
clear_session()
model = getModel(None, None, subtype)
input_layer = layers.Input(batch_shape=model.layers[0].input_shape)
prev_layer = input_layer
for layer in model.layers:
prev_layer = layer(prev_layer)
funcmodel = models.Model([input_layer], [prev_layer])
elif subtype == "all":
funcmodel = getModel(None, None, subtype)
plot_model(funcmodel, to_file='../img/' + name + '.png', show_shapes=True)
###############################
# main command call
###############################
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--subtype",
type=str,
default="words",
help=
"which model subtype to use; either 'words', 'char' or 'all' <default:'words'>"
)
parser.add_argument(
"--pre-trained-embeddings",
default=False,
action="store_true",
help=
"option to use pre-trained word/character embeddings, disabled by default"
)
parser.add_argument(
"--grid-search",
default=True,
action="store_true",
help="option to conduct grid-search, enabled by default")
parser.add_argument(
"--single-run",
default=False,
action="store_true",
help=
"option to conduct single run based on default hyperparameters, disabled by default"
)
parser.add_argument(
"--plot",
default=False,
action="store_true",
help="option for plotting keras model, disabled by default")
parser.add_argument(
"--name",
type=str,
default="model",
help=
"if --plot option is chosen, this provides name of the model image <default:'model'>"
)
args = parser.parse_args()
assert args.subtype in ["words", "char", "all"]
if args.plot:
plot_K_model(args.name, args.subtype)
elif args.single_run:
singleRun(args.subtype, args.pre_trained_embeddings)
elif args.grid_search:
gridSearch(args.subtype, args.pre_trained_embeddings)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author : ningchao
# @Time : 20/11/7 15:58
import utils
import logging
import numpy as np
from typing import *
from tqdm import tqdm
from category import Category
from sklearn.feature_extraction.text import CountVectorizer
def preliminary_labeling(category_tree: Category, segs: List[List[str]]):
"""
TODO: 应支持自定义的预标注规则, 或读取预标注结果
遍历文档,根据初始关键词做预标注, 同时得到文档词频矩阵
:param category_tree: 分类树根节点
:param segs: 文档集分词结果
:return: 返回单词索引表({word: index})和文档词频矩阵(documents_size, vocab_size), type='csr_matrix'
"""
# 默认的token_pattern会过滤掉单字
cv = CountVectorizer(analyzer="word", max_df=0.8, min_df=0.00001, token_pattern=r"(?u)\b\w+\b")
logging.info("初始化文档词频矩阵")
document_vectors = cv.fit_transform([" ".join(seg) for seg in segs]) # csr_matrix
vocabulary = cv.vocabulary_
logging.info("词典大小: {}".format(len(vocabulary)))
logging.info("文档预标注")
for i, seg in tqdm(enumerate(segs)):
for word in seg:
category = category_tree.find_category_by_word(word)
if category is not None:
category.add_document(i)
break
return vocabulary, document_vectors
def init_bayes_model(category_tree: Category, documents_size: int, vocab_size: int):
"""
初始化模型所用参数
:param category_tree: 分类树根节点
:param documents_size: 文档数
:param vocab_size: 单词数
:return: P(C) -> (category_size, )
P(C|D) -> (category_size, documents_size)
P(W|C) -> (vocab_size, category_size)
"""
category_list = category_tree.get_category_list()
category_size = len(category_list)
category_prior_probability = np.zeros(category_size) # 类别先验概率P(C)
category_document_cond_probability = np.zeros(([documents_size, category_size])) # 文档条件概率P(C|D)
# 根据预标注结果初始化P(C)和P(C|D)
logging.info("参数初始化")
for c, category in tqdm(enumerate(category_list)):
category_path = category.split("/")
category_documents = category_tree.find_category(category_path).get_documents()
for document_index in category_documents:
category_document_cond_probability[document_index, c] = 1.0
category_prior_probability[c] = (1.0 + len(category_documents)) / (category_size + documents_size) # using Laplace smooth
category_document_cond_probability = category_document_cond_probability.T # 转置便于矩阵乘法
word_category_cond_probability = np.zeros([vocab_size, len(category_list)])
logging.info("预标注比例: {}/{}".format(int(category_document_cond_probability.sum()), documents_size))
return category_prior_probability, category_document_cond_probability, word_category_cond_probability
def maximization_step(document_vectors, p_c, p_c_d, p_w_c):
# E-step更新P(C|D)后, 在M-step中更新P(W|C) (function 1)和P(C) (function 2)
logging.info("Horizontal M-step")
category_vectors = p_c_d @ document_vectors # shape=(category_size, vocab_size)
category_size = p_c.shape[0]
documents_size = document_vectors.shape[0] # 原论文documents_size = |D| + |H|
vocab_size = document_vectors.shape[1]
for c in tqdm(range(category_size)):
category_vectors_sum = category_vectors[c].sum()
for v in range(vocab_size):
p_w_c[v, c] = (1 + category_vectors[c, v]) / (vocab_size + category_vectors_sum)
for c in range(category_size):
p_c[c] = (1.0 + p_c_d[c].sum()) / (category_size + documents_size)
def maximization_step_with_shrinkage(category_tree: Category, document_vectors, p_c, p_c_d, p_w_c, p_w_c_k, lambda_matrix, beta_matrix, iter: int):
# E-step更新P(C|D)后, 在M-step中更新P(W|C)(公式1)和P(C) (function 2)
documents_size, vocab_size = document_vectors.shape
category_size, lambda_size = lambda_matrix.shape
category_list = category_tree.get_category_list()
# vertical M
if iter > 0:
shrinkage_maximization_step(lambda_matrix, beta_matrix, p_c_d)
# horizontal M
# update P^{α}(w|c)
logging.info("Horizontal M-step")
for c in tqdm(range(category_size)):
category_path = category_list[c].split("/")
dep_list = []
category_depth = len(category_path)
for k in range(category_depth):
# 第一层为该类自身, 然后沿着层级直到ROOT(不包含ROOT)
dep_list.append(category_list.index("/".join(category_path)))
category_vectors = p_c_d[dep_list] @ document_vectors # 只需取出包含的类别
if category_vectors.ndim == 1:
category_vectors = category_vectors.reshape(1, -1)
category_vector_hierarchy = category_vectors.sum(axis=0) # 将父分类的文本集也算入子分类中
category_vector_hierarchy_sum = category_vector_hierarchy.sum()
for v in range(vocab_size):
p_w_c_k[v, c, k] = (1.0 + category_vector_hierarchy[v]) / (vocab_size + category_vector_hierarchy_sum)
category_path = category_path[:-1]
category_vector_root = document_vectors.sum(axis=0)
category_vector_root_sum = document_vectors.sum()
for v in range(vocab_size):
p_w_c_k[v, :, -2] = (1.0 + category_vector_root[0, v]) / (vocab_size + category_vector_root_sum) # category_vector_root.ndim=2
p_w_c_k[:, :, -1] = 1.0 / vocab_size
# update p_w_c (function 4)
for v in range(vocab_size):
p_w_c[v] = (lambda_matrix * p_w_c_k[v]).sum(axis=1)
# update p_c (function 2)
for c in range(category_size):
p_c[c] = (1 + p_c_d[c].sum()) / (category_size + documents_size)
def expectation_step_with_shrinkage(document_vectors, p_c, p_w_c, p_w_c_k, lambda_matrix, beta_matrix):
# M-step更新P(W|C)和P(C)后, 在E-step中更新P(C|D) (function 3)
# vertical E
shrinkage_expectation_step(document_vectors, lambda_matrix, beta_matrix, p_w_c_k)
# horizontal E
logging.info("Horizontal E-step")
# 求log将function(3)中累乘改为累加
log_p_d_c = document_vectors @ np.log(p_w_c) # shape=(documents_size, category_size)
log_p_c_d = np.log(p_c).reshape(-1, 1) + log_p_d_c.T # shape=(category_size, documents_size)
return utils.softmax(log_p_c_d)
def hierarchical_shrinkage_init(category_tree: Category, document_vectors):
"""
shrinkage步骤利用分类的层次关系来缓解特征稀疏的问题
1/|V|(λ4) <- ROOT(λ3) <- 新闻(λ2) <- 国际新闻(λ1) <- 经济新闻(λ0)
按层次关系将父分类词的概率加权后累加在子分类上
:param category_tree: 分类树root节点
:param document_vectors: 文档词频矩阵
:return: λ -> (category_size, max_depth + 2)
β -> (documents_size, category_size, max_depth + 2)
P^{α}(W|C) -> (vocab_size, category_size, max_depth + 2)
"""
logging.info("初始化shrinkage参数")
max_depth = Category.get_max_depth(category_tree)
category_list = category_tree.get_category_list()
category_size = len(category_list)
lambda_size = max_depth + 2
lambda_matrix = np.zeros([category_size, lambda_size])
for c, path in enumerate(category_list):
category_node = category_tree.find_category(path.split("/"))
depth = category_node.get_depth()
init_lambda_val = 1.0 / (depth + 2)
for k in range(depth):
lambda_matrix[c, k] = init_lambda_val
lambda_matrix[c, max_depth] = init_lambda_val
lambda_matrix[c, max_depth+1] = init_lambda_val
# init β
documents_size, vocab_size = document_vectors.shape
beta_matrix = np.zeros([documents_size, category_size, lambda_size])
# init P^{α}(W|C)
p_w_c_k = np.zeros([vocab_size, category_size, lambda_size])
return lambda_matrix, beta_matrix, p_w_c_k
def shrinkage_maximization_step(lambda_matrix, beta_matrix, p_c_d):
# update λ (function 6)
logging.info("Vertical M-step")
documents_size, category_size, lambda_size = beta_matrix.shape
for c in tqdm(range(category_size)):
norm_val = p_c_d[c].sum()
for k in range(lambda_size):
lambda_matrix[c, k] = beta_matrix[:, c, k] @ p_c_d[c]
lambda_matrix[c, k] /= norm_val
def shrinkage_expectation_step(document_vectors, lambda_matrix, beta_matrix, p_w_c_k):
# update β (function 5)
logging.info("Vertical E-step")
documents_size = document_vectors.shape[0]
# 遍历前获取所有nonzero索引, 因为csr_matrix索引效率较低
document_vectors_nonzero = document_vectors.nonzero()
document_vectors_nonzero_count = np.bincount(document_vectors_nonzero[0], minlength=documents_size)
for d, v in tqdm(zip(*document_vectors_nonzero)):
p_w_c_alpha = lambda_matrix * p_w_c_k[v] # shape = (category_size, lambda_size)
p_w_c_alpha = p_w_c_alpha / p_w_c_alpha.sum(axis=1).reshape(-1, 1)
beta_matrix[d] += p_w_c_alpha
# 改变了计算顺序, 由于精度原因导致结果与旧版略微不一致
for d in range(documents_size):
if document_vectors_nonzero_count[d] > 0:
beta_matrix[d] /= document_vectors_nonzero_count[d] # 公式6分母上的Σk
def main(word_file: str, cropus_file: str, result_file: str, model_save_path=None, max_iters=5):
"""
模型训练主函数
:param word_file: 关键词文件路径
:param cropus_file: 待分类样本文件路径
:param result_file: 分类结果保存路径
:param model_save_path: 模型参数保存路径
:param max_iters: 迭代轮数
:return:
"""
category_tree = utils.load_seed_keywords(word_file)
datas = utils.load_data(cropus_file)
segs = utils.word_segment(datas)
vocabulary, document_vectors = preliminary_labeling(category_tree, segs)
del segs
p_c, p_c_d, p_w_c = init_bayes_model(category_tree, documents_size=len(datas), vocab_size=len(vocabulary))
lambda_matrix, beta_matrix, p_w_c_k = hierarchical_shrinkage_init(category_tree, document_vectors)
for _i in range(max_iters):
logging.info("EM迭代进度: {}/{}".format(_i + 1, max_iters))
maximization_step_with_shrinkage(category_tree, document_vectors, p_c, p_c_d, p_w_c, p_w_c_k, lambda_matrix, beta_matrix, _i)
p_c_d = expectation_step_with_shrinkage(document_vectors, p_c, p_w_c, p_w_c_k, lambda_matrix, beta_matrix)
category_list = category_tree.get_category_list()
fw = open(result_file, "w", encoding="utf-8")
for i in range(len(datas)):
prob = p_c_d[:, i]
predict_category = category_list[prob.argmax()]
fw.write(datas[i] + "\t" + predict_category + "\n")
fw.close()
if model_save_path is not None:
utils.save_model(model_save_path, vocabulary, p_c, p_w_c, category_list)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main(word_file="resources/dict/words_toutiao_news.txt",
cropus_file="resources/cropus/toutiao_cat_data.txt",
result_file="resources/cropus/toutiao_cat_data_result.txt",
model_save_path="resources/model/toutiao_news_model",
max_iters=5)
|
<reponame>ghsecuritylab/mini-client<filename>mbed-client/factory-configurator-client/devenv/update-license/replace_license_header.py
#!/usr/local/bin/python3
# ----------------------------------------------------------------------------
# Copyright 2016-2017 ARM Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import os
import sys
import re
import getopt
import shutil
files_ext_list = [ 'c' , 'cpp', 'h' , 'py' , 'java' , 'mk', 'lua', 'sh', 'groovy', 'json' ]
copyright_max_size=40
ext_copyright = list()
debug_flag=0
def remove_bad_ascii(file_path):
f=open(file_path,'rb')
data=f.read()
f.close()
filtered_data = ""
changed = 0
for num in data:
if num<127:
filtered_data += chr(num)
else:
changed = 1
if changed:
print("Changing ",file_path)
f=open(file_path,'w')
f.write(filtered_data)
f.close()
return changed
def get_copyright_offset(file_data):
if len(file_data) > 0:
if file_data[0][0:2] == "#!": # File starts with the shebang line
return 1 # Start copyright comment on second line
return 0
def prepare_copyright(copyright_file, file_type):
result = ""
first_line_remove = 0
i=0
# First line handling -
if file_type == "xml":
result += ("<!--")
first_line_remove = 4
try:
file = open(copyright_file, 'r')
except IOError as e:
print("I/O error opening %s: %s (%d)" % (copyright_file, e.strerror, e.errno))
sys.exit(1)
# Middle lines handling -
for line in file:
i = i + 1
if i == 1:
line = line[first_line_remove:]
if file_type == "xml":
result += line
elif file_type == "c" or file_type == "cpp" or file_type == "h" or file_type == 'java' or file_type == 'groovy' or file_type == 'json': # Multi-line comment
result += ("// " + line)
elif file_type == "py" or file_type == "mk" or file_type == "sh": # Single line comment
result += ("# " + line)
elif file_type == "lua": # Single line comment
result += ("-- " + line)
else:
print("Unknown file type: %s" % file_type)
sys.exit(1)
# Last line handling -
if file_type == "xml":
result = result[:-4] + "-->"
# Before adding a new-line at the end of the copyright message, make sure there isn't one there already.
result = result.rstrip("\n")
result+="\n"
file.close()
return result
def get_file_ext(file_name):
return file_name.split(".")[-1]
def get_file_ext_index(file_ext):
return files_ext_list.index(file_ext)
def file_valid(file_name):
file_ext = get_file_ext(file_name)
if file_ext in files_ext_list:
return 1
return 0
def file_hidden(file_name):
if file_name.startswith('.'):
return 1
return 0
def find_copyright_header(file_data):
# Search for new ARM copyright message.
i=0
start_line = 0
for line in file_data:
char_num = line.find("----------------------------------------------------------------------------")
if not char_num == -1:
start_line = i
break
i=i+1
i=0
for line in file_data:
i=i+1
char_num = line.find("Copyright 2016-2017 ARM Ltd")
if not char_num == -1:
for line in file_data[i:]:
i=i+1
char_num = line.find("----------------------------------------------------------------------------")
if not char_num == -1:
return start_line, i
# Search for old ARM copyright message.
#First old copyright message
i=0
start_line = 0
for line in file_data:
char_num = line.find("Copyright 2017 ARM Ltd.")
if not char_num == -1:
start_line = i
break
i=i+1
i=0
for line in file_data:
i=i+1
char_num = line.find("limitations under the License.")
if not char_num == -1:
return start_line, i
#Second old copyright message
i=0
start_line = 0
for line in file_data:
char_num = line.find("* Copyright 2014 (c) Discretix Technologies Ltd.")
if not char_num == -1:
start_line = i-1 #in this case Copyright is the second line
break
i=i+1
i=0
for line in file_data:
i=i+1
char_num = line.find("***************************************************************/")
if not char_num == -1:
return start_line, i
return -1,-1
def replace_copyright_header(curr_path, file_name):
# For debugging purposes, this can be uncommented -
print "The path is " + curr_path
full_file_name = curr_path + "/" + file_name
tmp_file_name = "tmp.txt"
try:
file = open(full_file_name, 'r')
except IOError:
print("Error: file not found ",full_file_name)
exit(1)
try:
file_data = file.readlines()
except UnicodeDecodeError:
print("Fixing bad unicode in ",full_file_name)
file.close()
remove_bad_ascii(full_file_name)
file = open(full_file_name, 'r')
file_data = file.readlines()
file.close()
header_first_line, header_last_line = find_copyright_header(file_data[:copyright_max_size])
if header_last_line == -1:
print("Header not found: ",full_file_name)
header_last_line = 0
header_first_line = 0
file_ext_index = get_file_ext_index(get_file_ext(file_name))
out_file = open(tmp_file_name, 'w')
file_offset = get_copyright_offset(file_data)
# If the file starts after a shebang line, avoid duplicating it.
if file_offset > header_last_line:
header_last_line = file_offset
# Write the existing shebang line, if exists.
out_file.writelines(file_data[0:file_offset])
# Write the new copyright message.
out_file.writelines(ext_copyright[file_ext_index])
# Write whatever lines came before the original copyright message.
out_file.writelines(file_data[file_offset:header_first_line])
# Write everything that came after the original copyright message.
out_file.writelines(file_data[header_last_line:])
out_file.close()
try:
shutil.copyfile(tmp_file_name, full_file_name)
except IOError:
print("Error: unable to copy file (file might be non-writeable...) ",full_file_name)
print("Trying again, setting it to world writable first.")
try:
stat_obj = os.stat(full_file_name)
os.chmod(full_file_name, 0777)
shutil.copyfile(tmp_file_name, full_file_name)
os.chmod(full_file_name, stat_obj.st_mode)
except:
print("Error: still can't copy file ",full_file_name)
exit(1)
return 0
def verify_copyright_header(curr_path, file_name):
full_file_name = curr_path + "/" + file_name
try:
file = open(full_file_name, 'r')
except IOError:
print("Error: file not found ",full_file_name)
exit(1)
try:
file_data = file.readlines()
except UnicodeDecodeError:
print("Error: reading from ",full_file_name)
file.close()
return 1
file.close()
header_first_line, header_last_line = find_copyright_header(file_data[:copyright_max_size])
if header_last_line == -1:
print("Header not found: ",full_file_name)
header_last_line = 0
header_first_line = 0
file_ext_index = get_file_ext_index(get_file_ext(file_name))
print(file_ext_index)
lines = "".join(file_data[header_first_line:header_last_line])
if lines[:-1] != ext_copyright[file_ext_index][:-2]:
print("Error in file ", full_file_name)
print(lines[:-1])
print("-----")
print(ext_copyright[file_ext_index][:-2])
print("-----")
return 0
def run_verify(curr_path):
for file_name in os.listdir(curr_path):
full_file_name = curr_path + "/" + file_name
if os.path.isdir(full_file_name) == True and not file_name.startswith('.'):
dir_iter(full_file_name)
else:
if file_valid(file_name) and not file_hidden(file_name):
verify_copyright_header(curr_path, file_name)
def dir_iter(curr_path):
print("Replacing source license at directory ", curr_path)
for file_name in os.listdir(curr_path):
full_file_name = curr_path + "/" + file_name
if os.path.isdir(full_file_name) == True and not file_name.startswith('.'):
dir_iter(full_file_name)
else:
if file_valid(file_name) and not file_name.startswith('.') and not file_hidden(file_name):
if debug_flag == 1:
print("Replacing header for file ", full_file_name)
replace_copyright_header(curr_path, file_name)
def dir_list_iter(dir_list):
#print("dir_list: ", dir_list)
for curr_path in dir_list:
dir_iter(curr_path)
def files_iter(files_list):
print(files_list)
for file_name in files_list:
if file_valid(file_name):
if debug_flag == 1:
print("Replacing ", file_name)
replace_copyright_header(".", file_name)
else:
print("Error: unsupported file type ",file_name)
def usage():
print("replace_license_header.py -c <copyright text file> [-v (verify only!)] -d <root directory>|-f <file 1> <file 2>...")
def parse_args():
copyright_file = ""
dir_list = []
files_list = []
verify = 0
debug_flag = 0
for arg in sys.argv[1:]:
idx = sys.argv.index(arg)
if arg == "-c":
copyright_file = sys.argv[idx+1]
if arg == "-d":
# empty dir - do nothing
if idx >= len(sys.argv) - 1:
exit(0)
for dirname in sys.argv[idx+1:]:
dir_list.append(dirname)
if arg == "-D":
print("Debugging..........")
debug_flag = 1
if arg == "-f":
# empty file list - do nothing
if idx >= len(sys.argv) - 1:
exit(0)
for file in sys.argv[idx+1:]:
files_list.append(file)
break
if arg == "-v":
verify = 1
if (copyright_file == ""):
print("Error: select copyright header file")
usage()
exit(1)
if (len(dir_list) == 0 and len(files_list) == 0):
print("Error: select files list or directory")
usage()
exit(1)
if (len(dir_list) > 0 and len(files_list) > 0):
print("Error: select files list OR directory. Not both!")
usage()
exit(1)
if debug_flag == 1:
print("Arguments:")
print("dir list: ", dir_list)
print("files_list: ", files_list)
print("copyright_file: ", copyright_file)
return copyright_file, dir_list, files_list, verify
def main ():
copyright_file, dir_list, files_list, verify = parse_args()
for ext in files_ext_list:
ext_copyright.append(prepare_copyright(copyright_file, ext))
if verify == 1:
if len(dir_list) == 0:
print("Error: verify must run on directory")
exit(1)
print("Error: verify is not implemented!")
#run_verify(dir_list)
if len(dir_list) == 0:
files_iter(files_list)
else:
dir_list_iter(dir_list)
#############################
if __name__ == "__main__":
main()
|
<reponame>khooi8913/imagetagger_model
from typing import Set
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.postgres.fields import JSONField
from django.db import models
import os
from imagetagger.users.models import Team
class Image(models.Model):
image_set = models.ForeignKey(
'ImageSet', on_delete=models.CASCADE, related_name='images')
name = models.CharField(max_length=100)
filename = models.CharField(max_length=100, unique=True)
time = models.DateTimeField(auto_now_add=True)
checksum = models.BinaryField()
width = models.IntegerField(default=800)
height = models.IntegerField(default=600)
metadata = JSONField(default=dict)
def path(self):
return os.path.join(self.image_set.root_path(), self.filename)
def relative_path(self):
return os.path.join(self.image_set.path, self.filename)
def delete(self, *args, **kwargs):
self.image_set.zip_state = ImageSet.ZipState.INVALID
self.image_set.save(update_fields=('zip_state',))
super(Image, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
self.image_set.zip_state = ImageSet.ZipState.INVALID
self.image_set.save(update_fields=('zip_state',))
super(Image, self).save(*args, **kwargs)
def __str__(self):
return u'Image: {0}'.format(self.name)
class ImageSet(models.Model):
class Meta:
unique_together = [
'name',
'team',
]
class ZipState:
INVALID = 0
READY = 1
PROCESSING = 2
PRIORITIES = (
(1, 'High'),
(0, 'Normal'),
(-1, 'Low'),
)
ZIP_STATES = (
(ZipState.INVALID, 'invalid'),
(ZipState.READY, 'ready'),
(ZipState.PROCESSING, 'processing'),
)
path = models.CharField(max_length=100, unique=True, null=True)
name = models.CharField(max_length=100)
location = models.CharField(max_length=100, null=True, blank=True)
description = models.TextField(max_length=1000, null=True, blank=True)
time = models.DateTimeField(auto_now_add=True)
team = models.ForeignKey(
Team,
on_delete=models.SET_NULL,
related_name='image_sets',
null=True,
)
creator = models.ForeignKey(settings.AUTH_USER_MODEL,
default=None,
on_delete=models.SET_NULL,
null=True,
blank=True)
public = models.BooleanField(default=False)
public_collaboration = models.BooleanField(default=False)
image_lock = models.BooleanField(default=False)
priority = models.IntegerField(choices=PRIORITIES, default=0)
main_annotation_type = models.ForeignKey(
to='annotations.AnnotationType',
on_delete=models.SET_NULL,
null=True,
blank=True,
default=None
)
pinned_by = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='pinned_sets')
zip_state = models.IntegerField(choices=ZIP_STATES, default=ZipState.INVALID)
metadata = JSONField(default=dict)
def root_path(self):
return os.path.join(settings.IMAGE_PATH, self.path)
def zip_path(self):
return os.path.join(self.path, self.zip_name())
def zip_name(self):
return "imageset_{}.zip".format(self.id)
def tmp_zip_path(self):
return os.path.join(self.path, ".tmp." + self.zip_name())
@property
def image_count(self):
if hasattr(self, 'image_count_agg'):
return self.image_count_agg
return self.images.count()
def get_perms(self, user: get_user_model()) -> Set[str]:
"""Get all permissions of the user."""
perms = set()
if self.team is not None:
if self.team.is_admin(user):
perms.update({
'verify',
'annotate',
'create_export',
'delete_annotation',
'delete_export',
'delete_set',
'delete_images',
'edit_annotation',
'edit_set',
'read',
})
if self.team.is_member(user):
perms.update({
'verify',
'annotate',
'create_export',
'delete_annotation',
'delete_export',
'edit_annotation',
'edit_set',
'read',
})
if user == self.creator:
perms.update({
'verify',
'annotate',
'create_export',
'delete_annotation',
'delete_export',
'delete_set',
'delete_images',
'edit_annotation',
'edit_set',
'read',
})
if self.public:
perms.update({
'read',
'create_export',
})
if self.public_collaboration:
perms.update({
'verify',
'annotate',
'delete_annotation',
'edit_annotation',
})
return perms
def has_perm(self, permission: str, user: get_user_model()) -> bool:
"""Check whether user has specified permission."""
return permission in self.get_perms(user)
def __str__(self):
return u'Imageset: {0}'.format(self.name)
@property
def prio_symbol(self):
if self.priority is -1:
return '<span class="glyphicon glyphicon-download" data-toggle="tooltip" data-placement="right" title="Low labeling priority"></span>'
elif self.priority is 0:
return ''
elif self.priority is 1:
return '<span class="glyphicon glyphicon-exclamation-sign" data-toggle="tooltip" data-placement="right" title="High labeling priority"></span>'
class SetTag(models.Model):
name = models.CharField(max_length=100, unique=True)
imagesets = models.ManyToManyField(ImageSet, related_name='set_tags')
|
r"""
================
Error Middleware
================
.. versionadded:: 0.2.0
Middleware to handle errors in aiohttp applications.
.. versionchanged:: 1.0.0
Previosly, ``error_middleware`` required ``default_handler`` to be passed
on initialization. However in **1.0.0** version ``aiohttp-middlewares`` ships
default error handler, which log exception traceback into
``aiohttp_middlewares.error`` logger and responds with given JSON:
.. code-block:: json
{
"detail": "str"
}
For example, if view handler raises ``ValueError("wrong value")`` the default
error handler provides 500 Server Error JSON:
.. code-block:: json
{
"detail": "wrong value"
}
In same time, it is still able to provide custom default error handler if you
need more control on error handling.
Other notable change in **1.0.0** version is allowing to ignore exception or
tuple of exceptions (as in ``try/catch`` block) from handling via middleware.
This might be helpful, when you don't want, for example, to have in Sentry
``web.HTTPNotFound`` and/or ``web.BadRequest`` errors.
Usage
=====
.. code-block:: python
import re
from aiohttp import web
from aiohttp_middlewares import (
default_error_handler,
error_context,
error_middleware,
)
# Error handler for API requests
async def api_error(request: web.Request) -> web.Response:
with error_context(request) as context:
return web.json_response(
context.data, status=context.status
)
# Basic usage (default error handler for whole application)
app = web.Application(middlewares=[error_middleware()])
# Advanced usage (multiple error handlers for different
# application parts)
app = web.Application(
middlewares=[
error_middleware(
default_handler=default_error_handler,
config={re.compile(r"^\/api"): api_error},
)
]
)
# Ignore aiohttp.web HTTP Not Found errors from handling via middleware
app = web.Application(
middlewares=[
error_middleware(ignore_exceptions=web.HTTPNotFound)
]
)
"""
import logging
from contextlib import contextmanager
from functools import partial
from typing import Dict, Iterator, Optional, Tuple, Union
import attr
from aiohttp import web
from .annotations import DictStrAny, ExceptionType, Handler, Middleware, Url
from .utils import match_path
DEFAULT_EXCEPTION = Exception("Unhandled aiohttp-middlewares exception.")
REQUEST_ERROR_KEY = "error"
Config = Dict[Url, Handler]
logger = logging.getLogger(__name__)
@attr.dataclass(frozen=True, slots=True)
class ErrorContext:
"""Context with all necessary data about the error."""
err: Exception
message: str
status: int
data: DictStrAny
async def default_error_handler(request: web.Request) -> web.Response:
"""Default error handler to respond with JSON error details.
If, for example, ``aiohttp.web`` view handler raises
``ValueError("wrong value")`` exception, default error handler will produce
JSON response of 500 status with given content:
.. code-block:: json
{
"detail": "wrong value"
}
And to see the whole exception traceback in logs you need to enable
``aiohttp_middlewares`` in logging config.
.. versionadded:: 1.0.0
"""
with error_context(request) as context:
logger.error(context.message, exc_info=True)
return web.json_response(context.data, status=context.status)
@contextmanager
def error_context(request: web.Request) -> Iterator[ErrorContext]:
"""Context manager to retrieve error data inside of error handler (view).
The result instance will contain:
- Error itself
- Error message (by default: ``str(err)``)
- Error status (by default: ``500``)
- Error data dict (by default: ``{"detail": str(err)}``)
"""
err = get_error_from_request(request)
message = getattr(err, "message", None) or str(err)
data = getattr(err, "data", None) or {"detail": message}
status = getattr(err, "status", None) or 500
yield ErrorContext(err=err, message=message, status=status, data=data)
def error_middleware(
*,
default_handler: Handler = default_error_handler,
config: Config = None,
ignore_exceptions: Union[ExceptionType, Tuple[ExceptionType, ...]] = None
) -> Middleware:
"""Middleware to handle exceptions in aiohttp applications.
To catch all possible errors, please put this middleware on top of your
``middlewares`` list (**but after CORS middleware if it is used**) as:
.. code-block:: python
from aiohttp import web
from aiohttp_middlewares import (
error_middleware,
timeout_middleware,
)
app = web.Application(
midllewares=[error_middleware(...), timeout_middleware(...)]
)
:param default_handler:
Default handler to called on error catched by error middleware.
:param config:
When application requires multiple error handlers, provide mapping in
format ``Dict[Url, Handler]``, where ``Url`` can be an exact string
to match path or regex and ``Handler`` is a handler to be called when
``Url`` matches current request path if any.
:param ignore_exceptions:
Do not process given exceptions via error middleware.
"""
get_response = partial(
get_error_response,
default_handler=default_handler,
config=config,
ignore_exceptions=ignore_exceptions,
)
@web.middleware
async def middleware(
request: web.Request, handler: Handler
) -> web.StreamResponse:
try:
return await handler(request)
except Exception as err:
return await get_response(request, err)
return middleware
def get_error_from_request(request: web.Request) -> Exception:
"""Get previously stored error from request dict.
Return default exception if nothing stored before.
"""
return request.get(REQUEST_ERROR_KEY) or DEFAULT_EXCEPTION
def get_error_handler(
request: web.Request, config: Optional[Config]
) -> Optional[Handler]:
"""Find error handler matching current request path if any."""
if not config:
return None
path = request.rel_url.path
for item, handler in config.items():
if match_path(item, path):
return handler
return None
async def get_error_response(
request: web.Request,
err: Exception,
*,
default_handler: Handler = default_error_handler,
config: Config = None,
ignore_exceptions: Union[ExceptionType, Tuple[ExceptionType, ...]] = None
) -> web.StreamResponse:
"""Actual coroutine to get response for given request & error.
.. versionadded:: 1.1.0
This is a cornerstone of error middleware and can be reused in attempt to
overwrite error middleware logic.
For example, when you need to post-process response and it may result in
extra exceptions it is useful to make ``custom_error_middleware`` as
follows,
.. code-block:: python
from aiohttp import web
from aiohttp_middlewares import get_error_response
from aiohttp_middlewares.annotations import Handler
@web.middleware
async def custom_error_middleware(
request: web.Request, handler: Handler
) -> web.StreamResponse:
try:
response = await handler(request)
post_process_response(response)
except Exception as err:
return await get_error_response(request, err)
"""
if ignore_exceptions and isinstance(err, ignore_exceptions):
raise err
set_error_to_request(request, err)
error_handler = get_error_handler(request, config) or default_handler
return await error_handler(request)
def set_error_to_request(request: web.Request, err: Exception) -> Exception:
"""Store catched error to request dict."""
request[REQUEST_ERROR_KEY] = err
return err
|
<reponame>pulumi/pulumi-alicloud
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetVbrHealthChecksResult',
'AwaitableGetVbrHealthChecksResult',
'get_vbr_health_checks',
]
@pulumi.output_type
class GetVbrHealthChecksResult:
"""
A collection of values returned by getVbrHealthChecks.
"""
def __init__(__self__, cen_id=None, checks=None, id=None, ids=None, output_file=None, vbr_instance_id=None, vbr_instance_owner_id=None, vbr_instance_region_id=None):
if cen_id and not isinstance(cen_id, str):
raise TypeError("Expected argument 'cen_id' to be a str")
pulumi.set(__self__, "cen_id", cen_id)
if checks and not isinstance(checks, list):
raise TypeError("Expected argument 'checks' to be a list")
pulumi.set(__self__, "checks", checks)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ids and not isinstance(ids, list):
raise TypeError("Expected argument 'ids' to be a list")
pulumi.set(__self__, "ids", ids)
if output_file and not isinstance(output_file, str):
raise TypeError("Expected argument 'output_file' to be a str")
pulumi.set(__self__, "output_file", output_file)
if vbr_instance_id and not isinstance(vbr_instance_id, str):
raise TypeError("Expected argument 'vbr_instance_id' to be a str")
pulumi.set(__self__, "vbr_instance_id", vbr_instance_id)
if vbr_instance_owner_id and not isinstance(vbr_instance_owner_id, int):
raise TypeError("Expected argument 'vbr_instance_owner_id' to be a int")
pulumi.set(__self__, "vbr_instance_owner_id", vbr_instance_owner_id)
if vbr_instance_region_id and not isinstance(vbr_instance_region_id, str):
raise TypeError("Expected argument 'vbr_instance_region_id' to be a str")
pulumi.set(__self__, "vbr_instance_region_id", vbr_instance_region_id)
@property
@pulumi.getter(name="cenId")
def cen_id(self) -> Optional[str]:
"""
The ID of the Cloud Enterprise Network (CEN) instance.
"""
return pulumi.get(self, "cen_id")
@property
@pulumi.getter
def checks(self) -> Sequence['outputs.GetVbrHealthChecksCheckResult']:
"""
A list of CEN VBR Heath Checks. Each element contains the following attributes:
"""
return pulumi.get(self, "checks")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def ids(self) -> Sequence[str]:
"""
A list of the CEN VBR Heath Check IDs.
"""
return pulumi.get(self, "ids")
@property
@pulumi.getter(name="outputFile")
def output_file(self) -> Optional[str]:
return pulumi.get(self, "output_file")
@property
@pulumi.getter(name="vbrInstanceId")
def vbr_instance_id(self) -> Optional[str]:
"""
The ID of the VBR instance.
"""
return pulumi.get(self, "vbr_instance_id")
@property
@pulumi.getter(name="vbrInstanceOwnerId")
def vbr_instance_owner_id(self) -> Optional[int]:
return pulumi.get(self, "vbr_instance_owner_id")
@property
@pulumi.getter(name="vbrInstanceRegionId")
def vbr_instance_region_id(self) -> str:
"""
The ID of the region where the VBR instance is deployed.
"""
return pulumi.get(self, "vbr_instance_region_id")
class AwaitableGetVbrHealthChecksResult(GetVbrHealthChecksResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVbrHealthChecksResult(
cen_id=self.cen_id,
checks=self.checks,
id=self.id,
ids=self.ids,
output_file=self.output_file,
vbr_instance_id=self.vbr_instance_id,
vbr_instance_owner_id=self.vbr_instance_owner_id,
vbr_instance_region_id=self.vbr_instance_region_id)
def get_vbr_health_checks(cen_id: Optional[str] = None,
output_file: Optional[str] = None,
vbr_instance_id: Optional[str] = None,
vbr_instance_owner_id: Optional[int] = None,
vbr_instance_region_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVbrHealthChecksResult:
"""
This data source provides CEN VBR Health Checks available to the user.
> **NOTE:** Available in 1.98.0+
:param str cen_id: The ID of the Cloud Enterprise Network (CEN) instance.
:param str vbr_instance_id: The ID of the VBR instance.
:param int vbr_instance_owner_id: The User ID (UID) of the account to which the VBR instance belongs.
:param str vbr_instance_region_id: The ID of the region where the VBR instance is deployed.
"""
__args__ = dict()
__args__['cenId'] = cen_id
__args__['outputFile'] = output_file
__args__['vbrInstanceId'] = vbr_instance_id
__args__['vbrInstanceOwnerId'] = vbr_instance_owner_id
__args__['vbrInstanceRegionId'] = vbr_instance_region_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('alicloud:cen/getVbrHealthChecks:getVbrHealthChecks', __args__, opts=opts, typ=GetVbrHealthChecksResult).value
return AwaitableGetVbrHealthChecksResult(
cen_id=__ret__.cen_id,
checks=__ret__.checks,
id=__ret__.id,
ids=__ret__.ids,
output_file=__ret__.output_file,
vbr_instance_id=__ret__.vbr_instance_id,
vbr_instance_owner_id=__ret__.vbr_instance_owner_id,
vbr_instance_region_id=__ret__.vbr_instance_region_id)
|
from enum import Enum
from utils.wavefront import Loader as load_wavefront
from pyglet.image import load as load_image
# Animation speed for 3D cube
class Speed(Enum):
Slow = 5
Medium = 15
Fast = 30
Immediate = 90
# Side length of the cube
CubeSize = 90
PieceScale = 0.995
FaceSize = 0.85
AnimationTick = 1.0 / 24
# Configuration used for 3D drawing
Faces = [[4, 5, 6, 7], [3, 0, 4, 7], [0, 1, 5, 4], [1, 2, 6, 5], [2, 3, 7, 6], [0, 1, 2, 3]]
Vertices = [-1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1]
FacesScale = [[FaceSize, 1, FaceSize], [1, FaceSize, FaceSize], [FaceSize, FaceSize, 1],
[1, FaceSize, FaceSize], [FaceSize, FaceSize, 1], [FaceSize, 1, FaceSize]]
# Color and texture configuration
Colors = [[30, 30, 30], [255, 255, 255], [0, 100, 50], [150, 0, 0], [0, 50, 155], [230, 100, 0], [255, 255, 0]]
Background = [0.3, 0.3, 0.3, 1]
TextureUV = [[0, 0], [0, 1.0], [1.0, 1.0], [1.0, 0]]
TextureMask = load_image('resources/mask.png').get_texture()
RoundedCube = load_wavefront('resources/rounded_cube.obj')
# Axis, front face and rotation direction for animation
Animation = {
'L': {'axis': 0, 'face': -1, 'dir': -1},
'M': {'axis': 0, 'face': 0, 'dir': -1},
'R': {'axis': 0, 'face': +1, 'dir': +1},
'X': {'axis': 0, 'face': None, 'dir': -1},
'U': {'axis': 1, 'face': +1, 'dir': +1},
'E': {'axis': 1, 'face': 0, 'dir': +1},
'D': {'axis': 1, 'face': -1, 'dir': -1},
'Y': {'axis': 1, 'face': None, 'dir': +1},
'F': {'axis': 2, 'face': +1, 'dir': +1},
'S': {'axis': 2, 'face': 0, 'dir': +1},
'B': {'axis': 2, 'face': -1, 'dir': -1},
'Z': {'axis': 2, 'face': None, 'dir': +1},
}
# Front face and piece-face index for textual moves
Moves = {
'L': dict(face=[1], indices=[[36, 39, 42, 51, 48, 45, 24, 21, 18, 6, 3, 0]]),
'M': dict(face=[], indices=[[37, 40, 43, 52, 49, 46, 25, 22, 19, 7, 4, 1]]),
'R': dict(face=[3], indices=[[2, 5, 8, 20, 23, 26, 47, 50, 53, 44, 41, 38]]),
'X': dict(face=[1, -3], indices=[
[36, 39, 42, 51, 48, 45, 24, 21, 18, 6, 3, 0],
[37, 40, 43, 52, 49, 46, 25, 22, 19, 7, 4, 1],
[38, 41, 44, 53, 50, 47, 26, 23, 20, 8, 5, 2],
]),
'U': dict(face=[0], indices=[[9, 10, 11, 18, 19, 20, 27, 28, 29, 38, 37, 36]]),
'E': dict(face=[], indices=[[12, 13, 14, 21, 22, 23, 30, 31, 32, 41, 40, 39]]),
'D': dict(face=[5], indices=[[42, 43, 44, 35, 34, 33, 26, 25, 24, 17, 16, 15]]),
'Z': dict(face=[2, 4], indices=[
[11, 14, 17, 45, 46, 47, 33, 30, 27, 8, 7, 6],
[10, 13, 16, 48, 49, 50, 34, 31, 28, 5, 4, 3],
[9, 12, 15, 51, 52, 53, 35, 32, 29, 2, 1, 0]
]),
'F': dict(face=[2], indices=[[11, 14, 17, 45, 46, 47, 33, 30, 27, 8, 7, 6]]),
'S': dict(face=[], indices=[[10, 13, 16, 48, 49, 50, 34, 31, 28, 5, 4, 3]]),
'B': dict(face=[-4], indices=[[0, 1, 2, 29, 32, 35, 53, 52, 51, 15, 12, 9]]),
'Y': dict(face=[0, -5], indices=[
[9, 10, 11, 18, 19, 20, 27, 28, 29, 38, 37, 36],
[12, 13, 14, 21, 22, 23, 30, 31, 32, 41, 40, 39],
[15, 16, 17, 24, 25, 26, 33, 34, 35, 44, 43, 42]
]),
}
|
from datetime import datetime
from . import db, api
from .consts import API_HOSTNAME
class CommonModel(object):
def _serialize(self):
"""Jsonify the sql alchemy query result."""
convert = dict()
d = dict()
# noinspection PyUnresolvedReferences
for c in self.__class__.__table__.columns:
v = getattr(self, c.name)
if c.type in convert.keys() and v is not None:
try:
d[c.name] = convert[c.type](v)
except:
d[c.name] = "Error: Failed to covert using ", str(
convert[c.type])
elif v is None:
if hasattr(c.type, '__visit_name__') and c.type.__visit_name__ == 'JSON':
d[c.name] = None
elif "INTEGER" == str(c.type) or "NUMERIC" == str(c.type):
# print "??"
d[c.name] = 0
elif "DATETIME" == str(c.type):
d[c.name] = None
else:
# print c.type
d[c.name] = str()
elif isinstance(v, datetime):
if v.utcoffset() is not None:
v = v - v.utcoffset()
d[c.name] = v.strftime('%Y-%m-%d %H:%M:%S')
else:
d[c.name] = v
return d
def json(self):
raise NotImplementedError()
class Channel(db.Model, CommonModel):
__tablename__ = 'channel'
DCCON_TYPE_OPEN_DCCON = 'open_dccon'
DCCON_TYPE_OPEN_DCCON_RELATIVE_PATH = 'open_dccon_rel_path'
DCCON_TYPE_BRIDGE_BBCC = 'bridge_bbcc'
DCCON_TYPE_FUNZINNU = 'funzinnu'
DCCON_TYPE_TELK = 'telk'
DCCON_TYPE_BRIDGE_BBCC_CUSTOM_URL = 'bridge_bbcc_cu'
DCCON_TYPES = (
DCCON_TYPE_OPEN_DCCON,
DCCON_TYPE_OPEN_DCCON_RELATIVE_PATH,
DCCON_TYPE_BRIDGE_BBCC,
DCCON_TYPE_FUNZINNU,
DCCON_TYPE_TELK,
DCCON_TYPE_BRIDGE_BBCC_CUSTOM_URL
)
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Unicode(64, collation='c'), nullable=False, unique=True)
dccon_url = db.Column(db.Unicode(512, collation='c'), nullable=True)
dccon_type = db.Column(db.Unicode(32, collation='c'), nullable=False, default=DCCON_TYPE_OPEN_DCCON)
last_cache_update = db.Column(db.DateTime(), nullable=True)
is_using_cache = db.Column(db.Boolean(), nullable=False, default=False)
cached_dccon = db.Column(db.JSON(), nullable=True)
def json(self):
channel = self._serialize()
channel.pop('id')
channel.pop('cached_dccon')
if self.is_using_cache:
channel['cached_dccon_url'] = self.cached_dccon_url()
channel['proxy_dccon_url'] = self.proxy_dccon_url()
return channel
def cached_dccon_url(self):
from .apis.channels import ApiChannelCachedDccon
return 'https://{host}{url}'.format(
host=API_HOSTNAME,
url=api.url_for(ApiChannelCachedDccon, user_id=self.user_id)
)
def proxy_dccon_url(self):
from .apis.channels import ApiChannelProxyDccon
return 'https://{host}{url}'.format(
host=API_HOSTNAME,
url=api.url_for(ApiChannelProxyDccon, user_id=self.user_id)
)
|
"""Bump versions in files for release.
Requirements:
- tomlkit: Format preserving TOML parser.
"""
import pathlib
import sys
import re
import argparse
import fileinput
from packaging.version import parse as parse_version
import tomlkit
"""The base oso directory."""
BASE = pathlib.Path(__file__).parent.parent
"""Regular expression for capturing version."""
VERSION_RE = r"[\w.]+"
def log(*args):
print(*args, file=sys.stderr)
def replace_version(target_version, path, match_re=None):
"""Set version to ``target_version`` in ``path``.
:param match_re: If provided, replace capture group 1 that matches match_re
"""
if match_re is None:
log(f"{path}: {target_version}")
with open(path, 'w') as f:
f.write(target_version + "\n")
return
match_re = re.compile(match_re)
with fileinput.FileInput(files=(path.as_posix(),), inplace=True) as f:
match_found = False
for line in f:
match = match_re.search(line)
if match is not None:
match_found = True
start, end = match.span(1)
replace_line_with = line
replace_line_with = (replace_line_with[:start] + target_version
+ replace_line_with[end:])
log(f"{path}: {line.strip()} => {replace_line_with.strip()}")
# In file input context, stdout writes to the file.
sys.stdout.write(replace_line_with)
else:
sys.stdout.write(line)
assert match_found, f"Match not found for {path}"
def replace_version_toml(filename, mutations):
"""Apply ``mutations`` to TOML formatted ``filename``.
:param mutations: Mutations is a dictionary describing keys and values
in the TOML file to update. Keys are specified as a ``.``
separated path.
"""
with open(filename, "r") as f:
contents_str = f.read()
contents = tomlkit.parse(contents_str)
for (path, update) in mutations.items():
parts = path.split(".")
o = contents
for part in parts[:-1]:
o = o[part]
log(f"{filename}: {path} => {update}")
o[parts[-1]] = update
with open(filename, "w") as f:
write_str = tomlkit.dumps(contents)
f.write(write_str)
def bump_oso_version(version):
replace_version(version, BASE / "VERSION")
replace_version(version,
BASE / "languages/java/oso/pom.xml",
fr"<!-- oso_version --><version>({VERSION_RE})<\/version>")
replace_version(version,
BASE / "docs/examples/Makefile",
fr"JAVA_PACKAGE_JAR_PATH := .*\/oso-({VERSION_RE})\.jar")
replace_version(version,
BASE / "languages/js/package.json",
fr'"version": "({VERSION_RE})"')
replace_version(version,
BASE / "languages/python/docs/conf.py",
fr'version = "({VERSION_RE})"')
replace_version(version,
BASE / "languages/python/docs/conf.py",
fr'release = "({VERSION_RE})"')
replace_version(version,
BASE / "languages/python/oso/oso/oso.py",
fr'__version__ = "({VERSION_RE})"')
replace_version(version,
BASE / "languages/ruby/Gemfile.lock",
fr'oso-oso \(({VERSION_RE})\)')
replace_version(version,
BASE / "languages/ruby/lib/oso/version.rb",
fr"VERSION = '({VERSION_RE})'")
replace_version_toml(BASE / "languages/rust/oso-derive/Cargo.toml",
{
"package.version": version
})
replace_version_toml(BASE / "languages/rust/oso/Cargo.toml",
{
"package.version": version,
"dependencies.oso-derive.version": f"={version}",
"dependencies.polar-core.version": f"={version}",
"dev-dependencies.oso-derive.version": f"={version}",
})
replace_version_toml(BASE / "polar-c-api/Cargo.toml",
{
"package.version": version,
"dependencies.polar-core.version": f"={version}",
})
replace_version_toml(BASE / "polar-core/Cargo.toml",
{
"package.version": version,
})
replace_version_toml(BASE / "polar-wasm-api/Cargo.toml",
{
"package.version": version,
"dependencies.polar-core.version": f"={version}",
})
replace_version_toml(BASE / "polar-language-server/Cargo.toml",
{
"package.version": version,
"dependencies.polar-core.version": f"={version}",
})
replace_version(version,
BASE / ".github/workflows/publish-docs.yml",
fr'default: "({VERSION_RE})" # oso_version')
def oso_python_dependency_version(version):
"""Get oso version that Python dependencies should pin to.
0.14.5 => 0.14.0
"""
parsed = parse_version(version)
return ".".join((str(parsed.major), str(parsed.minor), str(0)))
def bump_sqlalchemy_version(version, oso_version):
replace_version(version,
BASE / "languages/python/sqlalchemy-oso/sqlalchemy_oso/__init__.py",
fr'__version__ = "({VERSION_RE})"')
replace_version(oso_python_dependency_version(oso_version),
BASE / "languages/python/sqlalchemy-oso/requirements.txt",
fr'oso~=({VERSION_RE})')
replace_version(version,
BASE / ".github/workflows/publish-docs.yml",
fr'default: "({VERSION_RE})" # sqlalchemy_oso_version')
def bump_flask_version(version, oso_version):
replace_version(version,
BASE / "languages/python/flask-oso/flask_oso/__init__.py",
fr'__version__ = "({VERSION_RE})"')
replace_version(oso_python_dependency_version(oso_version),
BASE / "languages/python/flask-oso/requirements.txt",
fr'oso~=({VERSION_RE})')
replace_version(version,
BASE / ".github/workflows/publish-docs.yml",
fr'default: "({VERSION_RE})" # flask_oso_version')
def bump_django_version(version, oso_version):
replace_version(version,
BASE / "languages/python/django-oso/django_oso/__init__.py",
fr'__version__ = "({VERSION_RE})"')
replace_version(oso_python_dependency_version(oso_version),
BASE / "languages/python/django-oso/requirements.txt",
fr'oso~=({VERSION_RE})')
replace_version(version,
BASE / ".github/workflows/publish-docs.yml",
fr'default: "({VERSION_RE})" # django_oso_version')
def bump_versions(oso_version=None, sqlalchemy_version=None,
flask_version=None, django_version=None):
if oso_version is not None:
bump_oso_version(oso_version)
if sqlalchemy_version is not None:
assert oso_version is not None, "--oso_version must be provided"
bump_sqlalchemy_version(sqlalchemy_version, oso_version)
if flask_version is not None:
assert oso_version is not None, "--oso_version must be provided"
bump_flask_version(flask_version, oso_version)
if django_version is not None:
assert oso_version is not None, "--oso_version must be provided"
bump_django_version(django_version, oso_version)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--oso_version')
parser.add_argument('--sqlalchemy_version')
parser.add_argument('--flask_version')
parser.add_argument('--django_version')
bump_versions(**vars(parser.parse_args()))
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.