text stringlengths 957 885k |
|---|
#import modules
import json
from binance.client import Client
import pandas as pd
import os
from envs import env
import time
import statistics
import numpy as np
from scipy.stats import kurtosis, skew
#import classes from ./ folder
import postgresdbAccess
class tradingAccess:
def __init__(self):
#read fibonacci retracements from json
with open('fibLvl.json') as file:
self.fibLvl = json.load(file)
#read if live trading is enabled
try:
self.liveTrading=env("liveTrading", 'False').lower() in ('true', '1', 't')
self.liveVolume=env("liveVolume")
self.dbTable=env("dbTable")
self.baseCurrency=env('baseCurrency')
apiSecret=env('apiSecret')
apiKey=env('apiKey')
except KeyError:
print("No env variables set.")
sys.exit(1)
#connect to binance to get current balance
self.client = Client(apiKey, apiSecret, {'timeout':600})
def openTrade(self, fib, i, large, cor, tick, statisticsTools):
if self.liveTrading == True:
#wait a bit because otherwise api error
time.sleep(0.01)
#get assets available before buy
assetsBefore = float(self.client.get_asset_balance(asset=self.baseCurrency)['free'])
#skip rest of function if funds not sufficient
if assetsBefore <= float(self.liveVolume) * 1.1:
return
#buy
self.client.order_market_buy(symbol = tick['symbol'], quoteOrderQty = self.liveVolume)
#get assets available after buy after waiting 5 seconds
time.sleep(5)
assetsAfter = float(self.client.get_asset_balance(asset=self.baseCurrency)['free'])
#calculate price
positionCost = assetsBefore - assetsAfter
else:
positionCost = 0
#write the advice
sql = ("UPDATE " + self.dbTable + " SET " +
" takeProfit = '" + str(fib[3][i+5]) +
"', stopLoss = '" + str(fib[2][i-7]) +
"', corValue = '" + str(cor) +
"', startId = '" + str(large[0].min()) +
"', midId = '" + str(large[0].max()) +
"', fibLevel = '" + str(fib[0][i]) +
"', positionCost = '" + str(positionCost) +
"', stDev = '" + str(statisticsTools["stDev"]) +
"', kurtosis = '" + str(statisticsTools["kurtosis"]) +
"', skew = '" + str(statisticsTools["skew"]) +
"' WHERE id IN(SELECT max(id) FROM " + self.dbTable + ");")
self.postgres.sqlUpdate(sql)
def runCalculation(self, tick):
self.postgres = postgresdbAccess.postgresAccess()
sql = ("SELECT id, askprice FROM " + self.dbTable +
" WHERE symbol LIKE '" + tick['symbol'] +
"' AND time > NOW() - INTERVAL '33 hours';")
largeData = pd.DataFrame(self.postgres.sqlQuery(sql))
#only run calculation if data for 2000 minutes (roughly 36 hours including calc. times ) to not buy ICOs on Biance (usually unprofitable at the beginning).
#is present
if len(largeData) > 2000:
#convert columns id and askprice to float
largeData = largeData.apply(pd.to_numeric, errors='coerce', downcast='float')
#calculate diff
diff = largeData[1].max() - largeData[1].min()
# calculate fibRetracements
fibRetracement = pd.DataFrame(self.fibLvl)
maxAsk = largeData[1].max()
for lvl in fibRetracement:
fibRetracement[1] = maxAsk - diff * fibRetracement[0]
fibRetracement[2] = fibRetracement[1] * 0.9995
fibRetracement[3] = fibRetracement[1] * 1.0005
#see of currently an open trade exists
sql = ("SELECT count(*) FROM " + self.dbTable +
""" WHERE takeprofit is not null and resultpercent is null and
symbol like '""" + tick['symbol'] + "';")
#get correlation of id and price
corValue = largeData[0].corr(largeData[1])
#get statistical parameters
statisticsTools = {}
statisticsTools["stDev"] = statistics.stdev(largeData[1])
statisticsTools["skew"] = skew(largeData[1])
statisticsTools["kurtosis"] = kurtosis(largeData[1])
#if no open trade for symbol exists and price in between 7th fiblvl
for i in [7]:
if (float(tick["priceChangePercent"]) <= -10 or
float(tick["priceChangePercent"]) >= 1):
if (int(self.postgres.sqlQuery(sql)[0][0]) == 0 and
float(statisticsTools["skew"]) <= -0.1 and
float(tick['askPrice']) <= fibRetracement[3][i] and
float(tick['askPrice']) >= fibRetracement[2][i]):
self.openTrade(fibRetracement, i, largeData, corValue, tick, statisticsTools)
self.postgres.databaseClose()
|
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2020 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>3</version>
<name>TS_CMHAL_SetStartFreq</name>
<primitive_test_id/>
<primitive_test_name>CMHAL_SetStartFreq</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>Validate docsis_SetStartFreq() api by setting a new start frequency value</synopsis>
<groups_id/>
<execution_time>15</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_CMHAL_110</test_case_id>
<test_objective>Validate docsis_SetStartFreq() api by setting a new start frequency value</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state of DUT that includes component under test Cable Modem
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>docsis_SetStartFreq
docsis_GetDownFreq
Device.X_CISCO_COM_CableModem.DownstreamChannelNumberOfEntries
Device.X_CISCO_COM_CableModem.DownstreamChannel.i.Frequency</api_or_interface_used>
<input_parameters>None</input_parameters>
<automation_approch>1. Load cmhal module
2. Get the current startFrequency using docsis_GetDownFreq() and save it
3. Get Device.X_CISCO_COM_CableModem.DownstreamChannelNumberOfEntries and check if it is greater than 0
4. Chose a new frequency from Device.X_CISCO_COM_CableModem.DownstreamChannel.i.Frequency, which is not equal to the initial start frequency
5. Set the chosen start frequency using docsis_SetStartFreq()
6. Wait for 60 seconds for set operation to reflect.
7. Invoke docsis_GetDownFreq() and check if previous set value is saved
8. Revert back to the initial start frequency
9. Unload cmhal module
</automation_approch>
<expected_output>docsis_SetStartFreq() api should successfully set a new start frequency value</expected_output>
<priority>High</priority>
<test_stub_interface>cmhal</test_stub_interface>
<test_script>TS_CMHAL_SetStartFreq</test_script>
<skipped>No</skipped>
<release_version>M84</release_version>
<remarks>None</remarks>
</test_cases>
<script_tags/>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from time import sleep;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("cmhal","1");
obj1 = tdklib.TDKScriptingLibrary("tdkbtr181","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_CMHAL_SetStartFreq');
obj1.configureTestCase(ip,port,'TS_CMHAL_SetStartFreq');
#Get the result of connection with test component and DUT
loadmodulestatus =obj.getLoadModuleResult();
loadmodulestatus1 =obj1.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus ;
print "[LIB LOAD STATUS] : %s" %loadmodulestatus1 ;
if "SUCCESS" in loadmodulestatus.upper() and "SUCCESS" in loadmodulestatus1.upper():
obj.setLoadModuleStatus("SUCCESS");
obj1.setLoadModuleStatus("SUCCESS");
tdkTestObj = obj.createTestStep("CMHAL_GetParamUlongValue");
tdkTestObj.addParameter("paramName","DownFreq");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
startFreq = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the start frequency";
print "EXPECTED RESULT 1: Should get the start frequency";
print "ACTUAL RESULT 1: Start frequency is %s" %startFreq;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj1.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj.addParameter("ParamName","Device.X_CISCO_COM_CableModem.DownstreamChannelNumberOfEntries");
expectedresult="SUCCESS";
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
DSChannelCount = tdkTestObj.getResultDetails();
print "TEST STEP 2: Get the DownstreamChannelNumberOfEntries";
print "EXPECTED RESULT 2: Should get the DownstreamChannelNumberOfEntries";
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT 2: DownstreamChannelNumberOfEntries is %s" %DSChannelCount
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
if int(DSChannelCount) > 1:
flag = 0
print "Find a new start frequency value from DSChannel details"
for i in range (1,3):
tdkTestObj = obj1.createTestStep('TDKB_TR181Stub_Get');
param = "Device.X_CISCO_COM_CableModem.DownstreamChannel."+str(i)+".Frequency"
tdkTestObj.addParameter("ParamName", param);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
print "TEST STEP 3: Get the DownstreamChannel.%d.Frequency" %i;
print "EXPECTED RESULT 3: Should get the DownstreamChannel.%d.Frequency" %i;
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT 3: DownstreamChannel.%d.Frequency is %s" %(i,details)
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
if "MHz" in details:
details=details.split(" ")[0]
if '.' in details:
details=float(details)
newFreq=(int(details))*1000000
else:
newFreq = int(details)
if newFreq != int(startFreq):
flag = 1
break;
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT 3: %s" %details;
print "[TEST EXECUTION RESULT] : FAILURE";
if flag == 1:
tdkTestObj = obj.createTestStep("CMHAL_SetStartFreq");
tdkTestObj.addParameter("Value",newFreq);
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult :
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 4: Set the Start frequency to a new value ", newFreq;
print "EXPECTED RESULT 4: Should successfully set the Start frequency to a new value";
print "ACTUAL RESULT 4: ",details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
#Wait for the set operation to reflect
sleep(60)
#validate the set function using get
tdkTestObj = obj.createTestStep("CMHAL_GetParamUlongValue");
tdkTestObj.addParameter("paramName","DownFreq");
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
newStartFreq = tdkTestObj.getResultDetails();
if expectedresult in actualresult and int(newStartFreq) == newFreq:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 5: Get the start frequency and check if it became the new value set";
print "EXPECTED RESULT 5: Start frequency should change to the new value: ",newFreq;
print "ACTUAL RESULT 5: New Start frequency is %s" %newStartFreq;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 5: Get the start frequency and check if it became the new value set";
print "EXPECTED RESULT 5: Start frequency should change to the new value: ",newFreq;
print "ACTUAL RESULT 5: New Start frequency is %s" %newStartFreq;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE"
#Revert the start freq
tdkTestObj = obj.createTestStep("CMHAL_SetStartFreq");
tdkTestObj.addParameter("Value",int(startFreq));
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult :
#Wait for the set operation to reflect
sleep(60)
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP : Revert the value of Start frequency";
print "EXPECTED RESULT : Should revert the start frequency";
print "ACTUAL RESULT : ",details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP : Revert the value of Start frequency";
print "EXPECTED RESULT : Should revert the start frequency";
print "ACTUAL RESULT : ",details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 4: Set the Start frequency to a new value", newFreq;
print "EXPECTED RESULT 4: Should successfully set the Start frequency to a new value";
print "ACTUAL RESULT 4: ",details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
print "ERROR: Failed to get new start frequency to validate set api"
tdkTestObj.setResultStatus("FAILURE");
print "[TEST EXECUTION RESULT] : FAILURE"
else:
print "ERROR: Not enough DSChannelCount. Count retreived is ", DSChannelCount
tdkTestObj.setResultStatus("FAILURE");
print "[TEST EXECUTION RESULT] : FAILURE"
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT 2: %s" %DSChannelCount
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get the start frequency";
print "EXPECTED RESULT 1: Should get the start frequency";
print "ACTUAL RESULT 1: Start frequency is %s" %StartFreq;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
obj.unloadModule("cmhal");
obj1.unloadModule("tdkbtr181");
else:
print "Failed to load the module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
|
import abc
from ..utils import SQLite3StorageMixin, warn
RACE_CONDITION = "WerkzeugCacheTempSubscriberStorage race condition."
class SQLite3SubscriberStorageBase(SQLite3StorageMixin):
def __init__(self, path):
self.TABLE_SETUP_SQL = """
create table if not exists {}(
callback_id text primary key,
mode text not null,
topic_url text not null,
hub_url text not null,
secret text,
lease_seconds integer,
expiration_time integer not null
)
""".format(self.TABLE_NAME)
self.SETITEM_SQL = """
insert or replace into {}(callback_id, mode, topic_url, hub_url,
secret, lease_seconds, expiration_time)
values(?, ?, ?, ?, ?, ?, ? + strftime('%s', 'now'))
""".format(self.TABLE_NAME)
self.GETITEM_SQL = """
select mode, topic_url, hub_url, secret, lease_seconds from {}
where callback_id=? and expiration_time > strftime('%s', 'now');
""".format(self.TABLE_NAME)
self.DELITEM_SQL = """
delete from {} where callback_id=?
""".format(self.TABLE_NAME)
super().__init__(path)
def pop(self, callback_id):
with self.connection() as connection:
cursor = connection.execute(self.GETITEM_SQL, (callback_id,))
result = cursor.fetchone()
connection.execute(self.DELITEM_SQL, (callback_id,))
if result:
return dict(result)
raise KeyError(callback_id)
# temp storage
class AbstractTempSubscriberStorage(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __setitem__(self, callback_id, subscription_request):
"""Store a new subscription request under the key callback_id. A
subscription request is a dict-like object with the following keys:
- mode
- topic_url
- hub_url
- secret
- lease_seconds
- timeout: after this amount of seconds, the request itself does no
longer have to be stored.
"""
@abc.abstractmethod
def pop(self, callback_id):
"""Get a subscription request as stored by __setitem__, return it, and
remove the request from the store. Make sure the request has not
expired!
If there is no value for callback_id, raise a KeyError.
"""
def cleanup(self):
"""Remove any expired subscription requests from the store. If your
backend handles this automatically, there is no need to override this
method.
"""
class WerkzeugCacheTempSubscriberStorage(AbstractTempSubscriberStorage):
def __init__(self, cache):
"""Cache should share the API of werkzeug.contrib.cache.BaseCache"""
self.cache = cache
def __setitem__(self, callback_id, subscription_request):
request = dict(subscription_request) # clone, as we're going to modify
self.cache.set(callback_id, request, timeout=request.pop('timeout'))
def pop(self, callback_id):
# technically, a race condition could occur here. But it would only
# occur if a callback_id is in use by multiple hubs. In that case,
# we have bigger problems.
result = self.cache.get(callback_id)
delete_success = self.cache.delete(callback_id)
if result:
if not delete_success:
warn(RACE_CONDITION)
return result
raise KeyError(callback_id)
class SQLite3TempSubscriberStorage(AbstractTempSubscriberStorage,
SQLite3SubscriberStorageBase):
TABLE_NAME = 'subscriber_temp'
CLEANUP_SQL = """
delete from subscriber_temp where expiration_time > strftime('%s', 'now')
"""
def __setitem__(self, callback_id, request):
with self.connection() as connection:
connection.execute(self.SETITEM_SQL, (callback_id,
request['mode'],
request['topic_url'],
request['hub_url'],
request['secret'],
request['lease_seconds'],
request['timeout']))
def cleanup(self):
with self.connection() as connection:
connection.execute(self.CLEANUP_SQL)
pop = SQLite3SubscriberStorageBase.pop
class AbstractSubscriberStorage(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __getitem__(self, callback_id):
"""Get a subscription by its callback_id, which is a dict-like object
with the following keys:
- mode
- topic_url
- hub_url
- secret
- lease_seconds
"""
@abc.abstractmethod
def __delitem__(self, callback_id):
"""Delete an object by its callback_id"""
@abc.abstractmethod
def __setitem__(self, callback_id, subscription):
"""Store a new subscription under the key callback_id. Note that a
subscription should disappear from any queries after lease_seconds has
passed from the moment of storage on, with the exception of
close_to_expiration.
"""
@abc.abstractmethod
def close_to_expiration(self, margin_in_seconds):
"""Return an iterator of subscriptions that are near (or already past)
their expiration time. margin_in_seconds specifies what 'near' is.
Note that the key 'callback_id' needs to be included in the resulting
object as well!
"""
@abc.abstractmethod
def pop(self, callback_id):
"""Atomic combination of __getitem__ and __delitem__."""
class SQLite3SubscriberStorage(AbstractSubscriberStorage,
SQLite3SubscriberStorageBase):
TABLE_NAME = 'subscriber'
CLOSE_TO_EXPIRATION_SQL = """
select callback_id, mode, topic_url, hub_url, secret, lease_seconds
from subscriber where expiration_time < strftime('%s', 'now') + ?
"""
def __getitem__(self, callback_id):
with self.connection() as connection:
cursor = connection.execute(self.GETITEM_SQL, (callback_id,))
result = cursor.fetchone()
if result:
return dict(result)
raise KeyError(callback_id)
def __delitem__(self, callback_id):
with self.connection() as connection:
connection.execute(self.DELITEM_SQL, (callback_id,))
def __setitem__(self, callback_id, subscription):
with self.connection() as conn:
conn.execute(self.SETITEM_SQL, (callback_id,
subscription['mode'],
subscription['topic_url'],
subscription['hub_url'],
subscription['secret'],
subscription['lease_seconds'],
subscription['lease_seconds']))
def close_to_expiration(self, margin_in_seconds):
args = (margin_in_seconds,)
with self.connection() as conn:
yield from iter(conn.execute(self.CLOSE_TO_EXPIRATION_SQL, args))
pop = SQLite3SubscriberStorageBase.pop
|
<gh_stars>0
# Copyright 2019 Grakn Labs Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves import tkinter as tk
from grakn.client import GraknClient
import datetime
def transform_to_range(val, old_min, old_max, new_min, new_max):
"""
Transform a value from an old range to a new range
:return: scaled value
"""
old_range = (old_max - old_min)
new_range = (new_max - new_min)
new_val = (((val - old_min) * new_range) / old_range) + new_min
return new_val
def transform_coords(lon, lat, min_lon, max_lon, min_lat, max_lat, new_width, new_height):
"""
Transforms grid coordinates to a coordinate system that can be easily rendered.
:param lon: longitude of the coordinates to scale
:param lat: latitude of the coordinates to scale
:param min_lon: the minimum longitude, which will be mapped to x = 0
:param max_lon: the maximum longitude, which will be mapped to x = new_width
:param min_lat: the minimum latitude, which will be mapped to y = 0
:param max_lat: the minimum latitude, which will be mapped to y = new_height
:param new_width: the maximum height of the coordinates to map to
:param new_height: the maximum width of the coordinates to map to
:return:
"""
lon = transform_to_range(lon, min_lon, max_lon, 0, new_width)
lat = new_height - transform_to_range(lat, min_lat, max_lat, 0, new_height)
return lon, lat
def _create_circle(self, x, y, r, **kwargs):
"""
Helper function for easily drawing circles with tkinter, rather than ovals
:param x: circle centre x-coordinate
:param y: circle centre y-coordinate
:param r: circle radius
:param kwargs:
:return:
"""
return self.create_oval(x-r, y-r, x+r, y+r, **kwargs)
def execute_and_log(query, transaction):
print("\n" + ";\n".join(query.split(";")).replace("match", "match\n"))
response = transaction.query(query)
print("... query complete.")
return response
# Attach the circle helper function to tkinter so that we can use it more naturally
tk.Canvas.create_circle = _create_circle
class TubeGui:
# Zoom attributes
ZOOM_IN_SCALE = 2
ZOOM_OUT_SCALE = 1/ZOOM_IN_SCALE
# Size attributes
RIVER_THAMES_WIDTH = 10
STATION_FONT_SIZE = 12
STATION_CIRCLE_RADIUS = 1
STATION_K_CORE_MAX_RADIUS = 8
STATION_DEGREE_MAX_RADIUS = 10
ROUTES_DEGREE_MAX_RADIUS = 8
TUNNEL_SHORTEST_PATH_WIDTH = 10
# Station connections
LINE_WIDTH = 2
LINE_SPACING = 0.5
# Color attributes
RIVER_THAMES_COLOUR = "#def"
STATION_K_CORE_COLOUR = "#AAF"
STATION_DEGREE_COLOUR = "#FAA"
ROUTES_DEGREE_COLOUR = "#AFA"
TUNNEL_SHORTEST_PATH_COLOUR = "#DDD"
# Hotkeys
STATION_K_CORE_KEY = "k"
STATION_DEGREE_KEY = "d"
ROUTES_DEGREE_KEY = "r"
CLEAR_SHORTEST_PATH_KEY = "q"
CLEAR_ALL_KEY = "c"
# Used in calculating aspect ratio
COMPUTE_MIN_LAT = "compute min of lat, in station;"
COMPUTE_MAX_LAT = "compute max of lat, in station;"
COMPUTE_MIN_LON = "compute min of lon, in station;"
COMPUTE_MAX_LON = "compute max of lon, in station;"
COMPUTE_CENTRALITY_TUNNEL_DEGREE = "compute centrality of station, in [station, tunnel], using degree;"
COMPUTE_CENTRALITY_TUNNEL_KCORE = "compute centrality of station, in [station, tunnel], using k-core;"
COMPUTE_CENTRALITY_ROUTE_DEGREE = "compute centrality of station, in [station, route], using degree;"
ANALYTICAL_QUERIES = [COMPUTE_CENTRALITY_TUNNEL_DEGREE, COMPUTE_CENTRALITY_TUNNEL_KCORE, COMPUTE_CENTRALITY_ROUTE_DEGREE]
def __init__(self, session, root=tk.Tk()):
"""
Main visualisation class. Builds an interactive map of the London tube.
:param root:
:param client:
"""
start_time = datetime.datetime.now()
self._root = root
self._session = session
self.w, self.h = self._root.winfo_screenwidth(), self._root.winfo_screenheight()
self._root.geometry("%dx%d+0+0" % (self.w, self.h))
self._root.focus_set()
self._root.bind("<Escape>", lambda e: e.widget.quit())
self._root.bind("<Key>", self._key_handler)
self._root.title('London Tube Map')
self._canvas = tk.Canvas(self._root)
self._canvas.bind("<ButtonPress-1>", self._scan_start)
self._canvas.bind("<ButtonRelease-1>", self._scan_stop)
self._canvas.bind("<B1-Motion>", self._scan_move)
self._canvas.pack(fill=tk.BOTH, expand=1) # Stretch canvas to root window size.
# We want to scale the longitude and lonitude to fit the image
# To do this we need the minimum and maximum of the longitude and latitude,
# we can query for this easily in Grakn!
with session.transaction().read() as transaction:
self.min_lat = list(execute_and_log(self.COMPUTE_MIN_LAT, transaction))[0].number()
self.max_lat = list(execute_and_log(self.COMPUTE_MAX_LAT, transaction))[0].number()
self.min_lon = list(execute_and_log(self.COMPUTE_MIN_LON, transaction))[0].number()
self.max_lon = list(execute_and_log(self.COMPUTE_MAX_LON, transaction))[0].number()
# aspect ratio as width over height, which is longitude over latitude
aspect_ratio = (self.max_lon - self.min_lon) / (self.max_lat - self.min_lat)
self.new_width = self.w
self.new_height = self.new_width / aspect_ratio
self._draw_river_thames()
# We need to associate the id of the station entity in Grakn to the rendered dot on the screen, so that we can
# find the Grakn id of a station that is clicked on
self._station_point_ids = dict()
# Also store the station coords so that we don't have to query Grakn for them again
self._station_canvas_coords = dict()
self._station_centrality_points = dict()
self._draw()
# self._draw_stations()
# ===== Event state variables =====
self._displaying_centrality = False
self._scale = 1
self._shortest_path_stations = []
self._shortest_path_elements = []
self._scan_delta = (0, 0)
self._x_pos = 0
self._y_pos = 0
self._scanning = False
end_time = datetime.datetime.now()
print("- - - - - -\nTime taken: " + str(end_time - start_time))
@staticmethod
def get_visualisation_data(session):
"""
Retrieve the data required for visualising the tube network
:return: coordinates of all stations and their names
"""
with session.transaction().read() as transaction:
print("\nRetriving coordinates to draw stations and tunnels ...")
answers_iterator = execute_and_log(
'match' +
' $sta1 isa station, has lon $lon1, has lat $lat1, has name $sta1-nam;' +
' $sta2 isa station, has lon $lon2, has lat $lat2, has name $sta2-nam;' +
' $tun ($sta1, $sta2, service: $sec) isa tunnel, has identifier $tun-id;' +
' $tul isa tube-line, has name $tul-nam;' +
' (section: $sec, route-operator: $tul) isa route;' +
'get $lon1, $lat1, $lon2, $lat2, $tun-id, $tul-nam, $sta1-nam, $sta2-nam, $sta1, $sta2;', transaction
)
coordinates = {}
for answer in answers_iterator:
answer = answer.map()
tube_line_name = answer.get("tul-nam").value()
tunnel_id = answer.get("tun-id").value()
if tunnel_id in list(coordinates.keys()):
current_tube_lines = coordinates[tunnel_id]["tube-lines"]
if tube_line_name not in current_tube_lines:
current_tube_lines.append(tube_line_name)
updated_tube_lines = sorted(current_tube_lines)
coordinates[tunnel_id]["tube-lines"] = updated_tube_lines
else:
lon1, lat1 = answer.get('lon1').value(), answer.get('lat1').value()
lon2, lat2 = answer.get('lon2').value(), answer.get('lat2').value()
coordinates[tunnel_id] = {
"tube-lines": [tube_line_name],
"from": {
"lon": lon1,
"lat": lat1,
"station_name": answer.get("sta1-nam").value()[:-len(" Underground Station")],
"station_id": answer.get("sta1").id
},
"to": {
"lon": lon2,
"lat": lat2,
"station_name": answer.get("sta2-nam").value()[:-len(" Underground Station")],
"station_id": answer.get("sta2").id
}
}
return coordinates
@staticmethod
def find_shortest_path(session, ids):
query = "compute path from " + ids[0] + ", to " + ids[1] + ", in [station, tunnel];"
with session.transaction().read() as transaction:
shortest_path_concept_list = list(execute_and_log(query, transaction))[0]
# The response contains the different permutations for each path through stations. We are interested only in
# which stations the path passes through
shortest_path_ids = []
for shortest_path_node_id in shortest_path_concept_list.list():
concepts_list= list(transaction.query("match $sta id " + shortest_path_node_id + "; $sta has name $nam; get;"))
if len(concepts_list) > 0:
concept = concepts_list[0]
if concept.map().get("sta").type().label() == 'station':
shortest_path_ids.append(shortest_path_node_id)
return shortest_path_ids
@staticmethod
def compute_centrality(session, query):
centrality_details = {
"centrality_set": [],
"max_score": 0
}
with session.transaction().read() as transaction:
centralities = list(execute_and_log(query, transaction))
# Find the max centrality value, that way we can scale the visualisation up to a maximum radius
centrality_details["max_score"] = max([int(centrality.measurement()) for centrality in centralities])
for centrality in centralities:
centrality_set = {
"measurement": centrality.measurement(),
"concept_ids": []
}
for concept_id in centrality.set():
centrality_set["concept_ids"].append(concept_id)
centrality_details["centrality_set"].append(centrality_set)
print(centrality_details)
return centrality_details
def _transform_coords(self, lon, lat):
"""
Transfrom grid coordinates to canvas coordinates
:param lon: grid coordinate longitude
:param lat: grid coordinate latitude
:return: transformed coordination
"""
return transform_coords(
lon, lat, self.min_lon, self.max_lon, self.min_lat, self.max_lat, self.new_width, self.new_height
)
def _draw_river_thames(self):
"""
draws a depiction of the River Thames, based on grid coordinates of the river's approximate centre-line.
"""
# Grid coordinates of a path along the centre-line of the River Thames
THAMES_WAYPOINTS = (
(51.388592,-0.426814),(51.404487,-0.409858),(51.409538,-0.390457),(51.407749,-0.379153),(51.412011,-0.361944),(51.405223,-0.345663),(51.391487,-0.326768),
(51.400803,-0.309137),(51.424900,-0.308209),(51.432526,-0.326262),(51.443253,-0.329383),(51.451718,-0.303738),(51.456028,-0.305088),(51.465068,-0.320778),(51.471164,-0.319176),
(51.484088,-0.297243),(51.487082,-0.288217),(51.483983,-0.279444),(51.471742,-0.266622),(51.470839,-0.261951),(51.474607,-0.251973),(51.484884,-0.249098),(51.489681,-0.237854),
(51.487997,-0.229271),(51.473779,-0.223284),(51.466401,-0.211568),(51.464329,-0.191527),(51.467697,-0.182686),(51.480180,-0.175627),(51.484764,-0.148011),(51.483788,-0.137282),
(51.487129,-0.127519),(51.506112,-0.120438),(51.508943,-0.116210),(51.508916,-0.094474),(51.505297,-0.074797),(51.502198,-0.064648),(51.502131,-0.056944),(51.508155,-0.044370),
(51.508035,-0.035337),(51.505244,-0.029843),(51.491952,-0.029285),(51.485566,-0.020659),(51.485272,-0.007892),(51.489935,-0.001047),(51.501490,-0.005360),(51.507260,0.001378),
(51.506526,0.005648),(51.496922,0.021677),(51.497620,0.073815),(51.511549,0.090750),(51.516348,0.127855),(51.506580,0.167984),(51.503888,0.172763),(51.485042,0.184526),
(51.485852,0.213678),(51.457240,0.280714)
)
scaled_thames_coords = []
for lat, lon in THAMES_WAYPOINTS:
lon, lat = self._transform_coords(lon, lat)
scaled_thames_coords.append((lon, lat))
self._canvas.create_line(
*scaled_thames_coords,
width=self.RIVER_THAMES_WIDTH,
fill=self.RIVER_THAMES_COLOUR,
joinstyle=tk.ROUND
)
def _draw(self):
"""
Draws everything in the visualiser
"""
print("\nDrawing ...")
coordinates = self.get_visualisation_data(self._session)
drawn_station_ids = []
for tunnel_id, details in coordinates.items():
TUBE_LINE_COLOURS = {
"Bakerloo": "#B36305",
"Central": "#E32017",
"Circle": "#FFD300",
"District": "#00782A",
"Hammersmith & City": "#F3A9BB",
"Jubilee": "#A0A5A9",
"Metropolitan": "#9B0056",
"Northern": "#000000",
"Piccadilly": "#003688",
"Victoria": "#0098D4",
"Waterloo & City": "#95CDBA",
}
# Draw tunnels
for i, tube_line_name in enumerate(details["tube-lines"]):
# Trigonometry to draw parallel lines with consistent distance between them
from_lon, from_lat = self._transform_coords(float(details["from"]["lon"]), float(details["from"]["lat"]))
to_lon, to_lat = self._transform_coords(float(details["to"]["lon"]), float(details["to"]["lat"]))
x = to_lon - from_lon
y = to_lat - from_lat
z = self.LINE_SPACING # desired orthogonal displacement of parallel lines
grad = y / x # gradient of the connection to draw
# The change in coordinates needed to achieve this
y = ((grad ** 2 + 1) ** -0.5) * z
x = grad * y
self._canvas.create_line(
from_lon - (i * x),
from_lat + (i * y),
to_lon - (i * x),
to_lat + (i * y),
fill=TUBE_LINE_COLOURS[tube_line_name],
width=self.LINE_WIDTH
)
# Draw stations
for station in [details["from"], details["to"]]:
station_id = station["station_id"]
if station_id not in drawn_station_ids: # draw each station only once
lon, lat = self._transform_coords(float(station["lon"]), float(station["lat"]))
# lon, lat = station["lon"], station["lat"]
stating_name = station["station_name"]
# Write label
station_label_tag = self._canvas.create_text(
lon + self.STATION_CIRCLE_RADIUS,
lat + self.STATION_CIRCLE_RADIUS,
text=stating_name,
anchor=tk.NW,
font=('Johnston', self.STATION_FONT_SIZE, 'bold'),
fill="#666"
)
# Draw circle
station_tag = self._canvas.create_circle(
lon,
lat,
self.STATION_CIRCLE_RADIUS,
fill="white",
outline="black"
)
self._station_canvas_coords[station_id] = (lon, lat)
self._station_point_ids[station_id] = station_tag
# station selection event handlers
def callback_wrapper(event, id=station_id): return self._on_station_select(id)
event_sequence = "<Shift-ButtonPress-1>"
self._canvas.tag_bind(station_tag, event_sequence, callback_wrapper)
self._canvas.tag_bind(station_label_tag, event_sequence, callback_wrapper)
drawn_station_ids.append(station_id)
print("\nDone! you can now interact with the visualiser.")
def _scan_start(self, event):
"""
Processes the start of dragging with the mouse to pan
:param event: event instance
"""
self._canvas.scan_mark(event.x, event.y)
self._scan_start_pos = event.x, event.y
self._scanning = True
def _scan_move(self, event):
"""
Processes moving the mouse during dragging to pan
:param event: event instance
"""
self._canvas.scan_dragto(event.x, event.y, gain=1)
self._scan_delta = event.x - self._scan_start_pos[0], event.y - self._scan_start_pos[1]
def _scan_stop(self, event):
"""
Processes the end of dragging with the mouse to pan
:param event: event instance
"""
self._x_pos += self._scan_delta[0]
self._y_pos += self._scan_delta[1]
self._scan_delta = (0, 0)
self._scanning = False
def _key_handler(self, event):
"""
Handle a key press event, dispatching to the desired behaviour
:param event: event instance, including the character that was pressed
"""
if event.char == "+" or event.char == "=":
self.zoom("in")
if event.char == "-" or event.char == "_":
self.zoom("out")
if not self._displaying_centrality:
if event.char == self.STATION_DEGREE_KEY:
self.display_centrality(self.COMPUTE_CENTRALITY_TUNNEL_DEGREE, self.STATION_DEGREE_MAX_RADIUS, self.STATION_DEGREE_COLOUR)
if event.char == self.STATION_K_CORE_KEY:
self.display_centrality(self.COMPUTE_CENTRALITY_TUNNEL_KCORE, self.STATION_K_CORE_MAX_RADIUS, self.STATION_K_CORE_COLOUR)
if event.char == self.ROUTES_DEGREE_KEY:
self.display_centrality(self.COMPUTE_CENTRALITY_ROUTE_DEGREE, self.ROUTES_DEGREE_MAX_RADIUS, self.ROUTES_DEGREE_COLOUR)
if event.char == self.CLEAR_SHORTEST_PATH_KEY:
self.clear_shortest_path()
if event.char == self.CLEAR_ALL_KEY:
self.clear_all()
def _on_station_select(self, station_id):
"""
To be called when the user selects a station. Needs to be passed the unique Naptan-id of the station
:param event:
:param station_id:
:return:
"""
self._shortest_path_stations.append(station_id)
x, y = self._get_station_point_coords(station_id)
r = self._transform_to_current_scale(2 * self.STATION_CIRCLE_RADIUS)
c = self._canvas.create_circle(x, y, r, fill=self.TUNNEL_SHORTEST_PATH_COLOUR, outline="")
self._canvas.tag_lower(c, 1)
self._shortest_path_elements.append(c)
print(self._shortest_path_stations)
if len(self._shortest_path_stations) > 1:
shortest_path_ids = self.find_shortest_path(self._session, [self._shortest_path_stations[-2], self._shortest_path_stations[-1]])
self.display_shortest_path(shortest_path_ids)
def display_shortest_path(self, shortest_path_ids):
"""
Renders the shortest path(s) from station to station
:param shortest_path_ids: response from Grakn server
"""
path_points = []
for station_id in shortest_path_ids:
# Add a point on the path for every station on the path
x0, y0, x1, y1 = self._canvas.coords(self._station_point_ids[station_id])
point = int((x0 + x1) / 2), int((y0 + y1) / 2)
path_points.append(point)
path = self._canvas.create_line(*path_points, width=self.TUNNEL_SHORTEST_PATH_WIDTH, fill=self.TUNNEL_SHORTEST_PATH_COLOUR, joinstyle=tk.ROUND, dash=(3, 3))
self._shortest_path_elements.append(path)
# Put the path behind the other visual elements on the map
self._canvas.tag_lower(path, 1)
def _get_station_point_coords(self, station_id):
"""
Get the canvas coordinates of a station from its ID
:param station_id: the ID of the desired station
:return: the centre-point coordinates of the circle used to represent the station
"""
x0, y0, x1, y1 = self._canvas.coords(self._station_point_ids[station_id])
point = (x0 + x1) / 2, (y0 + y1) / 2
return point
def clear_shortest_path(self):
"""
Delete from the canvas the elements being used to display shortest paths
"""
self._canvas.delete(*self._shortest_path_elements)
self._shortest_path_stations = []
def clear_all(self):
self.clear_shortest_path()
self.hide_centrality()
def zoom(self, direction):
"""
"Zoom" the screen to magnify details. This entails scaling up the whole canvas, and some slightly complex
translation of the canvas to give the effect of zooming in on the canvas point that sits at the centre of
the window
:param direction: "in" or "out", whether to magnify or de-magnify the map
"""
if self._scanning:
print("Currently scanning. Stop scanning to zoom.")
else:
if direction == "in":
scaling = self.ZOOM_IN_SCALE
elif direction == "out":
scaling = self.ZOOM_OUT_SCALE
else:
raise ValueError("Call to zoom didn't specify a valid direction")
# First, scale up the canvas about its origin. Doing this about the canvas origin keeps adding other
# elements to the canvas simple, because then only scaling needs to be applied
self._canvas.scale('all', 0, 0, scaling, scaling)
# Update the persistent scale value
self._scale *= scaling
# Find the displacement to shift the canvas by, so that is appears to scale about the centre-point of the
# window
dx = -int((1 - scaling) * (self._x_pos - self.w / 2))
dy = -int((1 - scaling) * (self._y_pos - self.h / 2))
# Since we're shifting by this amount, also add this displacement to the persistent scan variables
self._x_pos += dx
self._y_pos += dy
# Set an anchor to drag from. I believe this point is arbitrary
self._canvas.scan_mark(0, 0)
# The canvas is being scaled about its origin, so we only need to drag the delta to centre the scaling
self._canvas.scan_dragto(dx, dy, gain=1)
def _transform_to_current_scale(self, val):
"""
Take a value, e.g. a coordinate, and scale it according to the current scaling of the canvas. This is mostly
for the benefot of adding or removing rendered elements after the map has been zoomed
:param val:
:return:
"""
return val * self._scale
def display_centrality(self, query, upper_radius, colour):
"""
Show an infographic-style visualisation of centrality, where the radius of the circles plotted corresponds to
the centrality score
:param query: graql centrality query as a string
:param upper_radius:
:param colour:
:return:
"""
centrality_details = self.compute_centrality(self._session, query)
for centrality_set in centrality_details["centrality_set"]:
radius = self._transform_to_current_scale(
(int(int(centrality_set["measurement"])) / centrality_details["max_score"]) * upper_radius
)
for concept_id in centrality_set["concept_ids"]:
print(concept_id, centrality_set["measurement"], centrality_details["max_score"])
station_element_id = self._station_point_ids[concept_id]
lon, lat = self._station_canvas_coords[concept_id]
lon = self._transform_to_current_scale(lon)
lat = self._transform_to_current_scale(lat)
centrality_element_id = self._canvas.create_circle(lon, lat, radius, fill=colour, outline="")
self._station_centrality_points[concept_id] = centrality_element_id
# Send the drawn elements to behind the station point
self._canvas.tag_lower(centrality_element_id, station_element_id)
print(self._station_centrality_points)
self._displaying_centrality = True
def hide_centrality(self):
if self._displaying_centrality:
for concept_id, point_id in self._station_centrality_points.items():
self._canvas.delete(point_id)
self._displaying_centrality = False
def init(shouldHalt):
root = tk.Tk() # Build the Tkinter application
with GraknClient(uri="localhost:48555") as client:
with client.session(keyspace="tube_network") as session:
tube_gui = TubeGui(session, root)
if shouldHalt:
root.mainloop()
if __name__ == "__main__":
init(True)
|
<reponame>osukurikku/kuriso
# KrykiZZ fix for some country id's
countryCodes = {
"IO": 104,
"PS": 178,
"LV": 132,
"GI": 82,
"MZ": 154,
"BZ": 37,
"TR": 217,
"CV": 52,
"BI": 26,
"CM": 47,
"JM": 109,
"GU": 91,
"CY": 54,
"BW": 35,
"KW": 120,
"MY": 153,
"SH": 193,
"PG": 171,
"PW": 180,
"FM": 72,
"HR": 97,
"YT": 238,
"JO": 110,
"HK": 94,
"MW": 151,
"AZ": 18,
"IQ": 105,
"DO": 60,
"RS": 239,
"PK": 173,
"BR": 31,
"SN": 199,
"LI": 126,
"CD": 40,
"MG": 137,
"PE": 169,
"CK": 45,
"SJ": 195,
"SZ": 205,
"PM": 175,
"LY": 133,
"BV": 34,
"KN": 117,
"GR": 88,
"CC": 39,
"IN": 103,
"DZ": 61,
"SK": 196,
"VC": 229,
"GW": 92,
"BQ": 0,
"UM": 224,
"AF": 5,
"TZ": 221,
"AO": 11,
"AW": 17,
"AE": 0,
"PF": 170,
"MK": 139,
"AR": 13,
"AQ": 12,
"SL": 197,
"HT": 98,
"NF": 158,
"SS": 190,
"MU": 149,
"VA": 228,
"EC": 62,
"LC": 125,
"MX": 152,
"CW": 0,
"LT": 130,
"GN": 85,
"ZM": 241,
"LU": 131,
"NG": 159,
"MS": 147,
"MV": 150,
"DJ": 57,
"MQ": 145,
"IE": 101,
"CG": 40,
"LK": 127,
"NZ": 166,
"KR": 119,
"RO": 184,
"KE": 112,
"MF": 252,
"SR": 201,
"PA": 168,
"KI": 115,
"NL": 161,
"DM": 59,
"TC": 206,
"KZ": 122,
"CR": 50,
"NR": 164,
"UZ": 227,
"GE": 79,
"KP": 118,
"PN": 176,
"BY": 36,
"NI": 160,
"IR": 106,
"VI": 232,
"MA": 134,
"NO": 162,
"PT": 179,
"PY": 181,
"CU": 51,
"SC": 189,
"TT": 218,
"CA": 38,
"IT": 108,
"GF": 80,
"CN": 48,
"GQ": 87,
"LR": 128,
"BA": 19,
"TD": 207,
"AU": 16,
"MM": 141,
"HU": 99,
"EG": 64,
"JE": 250,
"IL": 102,
"BL": 251,
"BS": 32,
"SE": 191,
"MC": 135,
"SD": 190,
"ZA": 240,
"IM": 249,
"MO": 143,
"GL": 83,
"TV": 219,
"FK": 71,
"GB": 77,
"NA": 155,
"AM": 9,
"WS": 236,
"UY": 226,
"EE": 63,
"TL": 216,
"BT": 33,
"VU": 234,
"WF": 235,
"AX": 247,
"TK": 212,
"MN": 142,
"SB": 188,
"XK": 0,
"BH": 25,
"ID": 100,
"SV": 203,
"TG": 209,
"BF": 23,
"GG": 248,
"IS": 107,
"FJ": 70,
"KG": 113,
"BD": 21,
"ZW": 243,
"AI": 7,
"NP": 163,
"KH": 114,
"BJ": 27,
"EH": 65,
"BE": 22,
"SM": 198,
"CX": 53,
"TW": 220,
"KM": 116,
"AS": 14,
"AT": 15,
"LA": 123,
"US": 225,
"SY": 204,
"SO": 200,
"AD": 3,
"OM": 167,
"GT": 90,
"CF": 41,
"GY": 93,
"VN": 233,
"VE": 230,
"PH": 172,
"TM": 213,
"VG": 231,
"GP": 86,
"CZ": 55,
"GM": 84,
"MR": 146,
"TN": 214,
"SI": 194,
"TO": 215,
"UG": 223,
"SA": 187,
"ST": 202,
"QA": 182,
"FI": 69,
"CO": 49,
"AG": 6,
"PR": 177,
"PL": 174,
"GH": 81,
"GA": 76,
"TJ": 211,
"SX": 0,
"KY": 121,
"BO": 30,
"UA": 222,
"MP": 144,
"TF": 208,
"LB": 124,
"MT": 148,
"FR": 74,
"JP": 111,
"RU": 185,
"RW": 186,
"NC": 156,
"NE": 157,
"BN": 29,
"CI": 44,
"TH": 210,
"DE": 56,
"ET": 68,
"FO": 73,
"YE": 237,
"DK": 58,
"BG": 24,
"GS": 89,
"HM": 95,
"BB": 20,
"BM": 28,
"ML": 140,
"SG": 192,
"GD": 78,
"NU": 165,
"RE": 183,
"LS": 129,
"ER": 66,
"ME": 242,
"HN": 96,
"AL": 8,
"CH": 43,
"MD": 136,
"ES": 67,
"CL": 46,
"MH": 138
}
def get_country_id(code: str) -> int:
return countryCodes.get(code, 0)
def get_country_letters(code: int) -> str:
for key, value in countryCodes.items():
if value == code:
return key
return "XX"
|
<filename>mopidy_raspiradio/gui.py
import time
from luma.core import cmdline, error
from luma.core.interface.serial import i2c, spi
from luma.core.render import canvas
from luma.oled.device import ssd1306, ssd1322, ssd1325, ssd1331, sh1106
from PIL import ImageFont
import timers
class ProgressBar(object):
__progress_padding = 2
__progress_height = 10
__progress_width = 5
__progress_line_width = 2
__progress_x_offset = __progress_width/2
__progress_y_offset = __progress_height/2
__time_format = '%M:%S'
def __init__(self, y_pos, lcd_width, font):
self.font = font
y_pos += self.__progress_padding
progress_line_y_pos = y_pos + self.__progress_y_offset
self.progress_line_extents = [(self.__progress_x_offset, progress_line_y_pos), (lcd_width - self.__progress_x_offset, progress_line_y_pos)]
self.progress_marker_y_extents = (y_pos, y_pos + self.__progress_height)
self.progress = 0
self.track_length = None
self.scale_factor = None
self.time_str = '- / -'
def draw(self, canvas):
if self.track_length is None:
progress_pos = 0
final_time_str = '- / -'
else:
progress_pos = int(round(self.progress * self.scale_factor))
final_time_str = self.time_str.format(self.format_time(self.progress))
canvas.line([(progress_pos, self.progress_marker_y_extents[0]), (progress_pos, self.progress_marker_y_extents[1])], width=self.__progress_width)
canvas.line(self.progress_line_extents, width=self.__progress_line_width)
canvas.text((self.__progress_x_offset, self.progress_marker_y_extents[1]), final_time_str, font=self.font)
def set_progress(self, progress):
self.progress = progress
def format_time(self, t):
return time.strftime(self.__time_format, time.gmtime(t))
def set_track_length(self, track_length):
self.track_length = track_length
self.scale_factor = float(self.progress_line_extents[1][0]) / self.track_length
self.time_str = '{} / ' + self.format_time(track_length)
def find_center(total_width, object_width):
return int(round(float(total_width - object_width) / 2))
class UI(object):
def __init__(self, lcd, device_args, config):
pass
def on_switch_to(self):
pass
def on_switch_from(self):
pass
class Clock(UI):
__clock_str = '%I:%M %p'
def __init__(self, lcd, device_args, config):
UI.__init__(self, lcd, device_args, config)
self.lcd = lcd
self.font = ImageFont.truetype(font=config['clock_font_file'], size=config['clock_font_size'])
self.update_thread = timers.UpdateInterval(1.0/config['refresh_rate'], self.tick)
self.cur_time = time.time()
_, height = self.font.getsize(self.format_time())
self.y_pos = find_center(device_args.height, height)
self.lcd_width = device_args.width
def format_time(self):
return time.strftime(self.__clock_str, time.localtime(self.cur_time))
def tick(self, force_redraw=False):
new_time = time.time()
if new_time != self.cur_time:
self.cur_time = new_time
self.draw()
def start(self):
self.update_thread.start()
def stop(self):
self.update_thread.stop()
def draw(self):
with canvas(self.lcd) as cvs:
time_str = self.format_time()
width, _ = self.font.getsize(time_str)
x_pos = find_center(self.lcd_width, width)
cvs.text((x_pos, self.y_pos), time_str, font=self.font)
def on_switch_to(self):
self.tick(force_redraw=True)
self.start()
def on_switch_from(self):
self.stop()
class PlaybackDisplay(UI):
__fields = ['title', 'artist', 'album']
def __init__(self, lcd, device_args, config):
UI.__init__(self, lcd, device_args, config)
self.lcd = lcd
self.track_info = {}
self.progress = 0
self.fonts = {}
self.fonts_y_pos = {}
y_pos = 0
for field in self.__fields:
font = ImageFont.truetype(font=config[field + '_font_file'], size=config[field + '_font_size'])
self.fonts[field] = font
self.fonts_y_pos[field] = y_pos
_, height = font.getsize('M')
y_pos += height
self.progress_bar = ProgressBar(y_pos,
device_args.width,
ImageFont.truetype(font=config['progress_bar_font_file'],
size=config['progress_bar_font_size']))
def draw_trackinfo(self, draw):
for field in self.__fields:
draw.text((0, self.fonts_y_pos[field]), self.track_info[field], font=self.fonts[field])
def draw(self):
with canvas(self.lcd) as cvs:
self.draw_trackinfo(cvs)
self.progress_bar.draw(cvs)
def set_artist(self, artist):
self.track_info['artist'] = artist
def set_album(self, album):
self.track_info['album'] = album
def set_title(self, title):
self.track_info['title'] = title
def set_track(self, track):
self.track_info['track'] = track
def set_track_length(self, length):
self.progress_bar.set_track_length(length)
def set_progress(self, progress):
self.progress_bar.set_progress(progress)
class GuiModes:
CLOCK = 0
PLAYBACK = 1
class Gui(object):
__ui_types = { GuiModes.CLOCK: Clock,
GuiModes.PLAYBACK: PlaybackDisplay
}
def __init__(self, config):
self.mode = None
self.uis = {}
self.cur_ui = None
parser = cmdline.create_parser('')
device_args = parser.parse_args(config['lcd_config'].split(' '))
try:
lcd = cmdline.create_device(device_args)
except error.Error as e:
parser.error(e)
for ui_type, ui_cls in self.__ui_types.iteritems():
self.uis[ui_type] = ui_cls(lcd, device_args, config)
def get_mode(self):
return self.mode
def set_mode(self, mode):
self.mode = mode
if self.cur_ui is not None:
self.cur_ui.on_switch_from()
self.cur_ui = self.uis[self.mode]
self.cur_ui.on_switch_to()
def get_ui(self):
return self.cur_ui
|
#!/usr/bin/env python
import sys
from cafysis.file_io.ninfo import NinfoFile
from cafysis.elements.ninfo import NinfoSet
if len(sys.argv) != 3:
print ("Usage: SCRIPT [input ninfo 1] [input ninfo 2]")
sys.exit(2)
nf = NinfoFile(sys.argv[1])
nf.open_to_read()
ns1 = NinfoSet()
nf.read_all(ns1)
nf.close()
nf = NinfoFile(sys.argv[2])
nf.open_to_read()
ns2 = NinfoSet()
nf.read_all(ns2)
nf.close()
nhb1 = len(ns1.hbondDTs)
nhb2 = len(ns2.hbondDTs)
# Do not use index 0
flag1 = [False]*(nhb1+1)
flag2 = [False]*(nhb2+1)
def value_check(x1, x2):
eps = 0.01
if x1 - eps < x2 < x1 + eps:
return True
else:
return False
for ihb1 in range(nhb1):
hb1 = ns1.hbondDTs[ihb1]
id1 = hb1.id
pair1 = (hb1.imp1, hb1.imp2)
for ihb2 in range(nhb2):
hb2 = ns2.hbondDTs[ihb2]
id2 = hb2.id
if pair1 in ((hb2.imp1, hb2.imp2), (hb2.imp2, hb2.imp1)):
if flag1[id1]:
print('ihb1 is already found', id1)
sys.exit(2)
if flag2[id2]:
print('ihb1 is already found', id2)
sys.exit(2)
# dist
if not value_check(hb1.native, hb2.native):
print('native differ', id1, id2, hb1.native, hb2.native)
if not value_check(hb1.factor, hb2.factor):
print('factor differ', id1, id2, hb1.factor, hb2.factor)
if not value_check(hb1.coef, hb2.coef):
print('coef differ', id1, id2, hb1.coef, hb2.coef)
# angl
# hb1.ang1 = hb2.ang1
if (hb1.ang1_imp1, hb1.ang1_imp2, hb1.ang1_imp3) in ((hb2.ang1_imp1, hb2.ang1_imp2, hb2.ang1_imp3),
(hb2.ang1_imp3, hb2.ang1_imp2, hb2.ang1_imp1)):
if not value_check(hb1.ang1_native, hb2.ang1_native):
print('ang1_native differ', id1, id2, hb1.ang1_native, hb2.ang1_native)
if not value_check(hb1.ang1_coef, hb2.ang1_coef):
print('ang1_coef differ', id1, id2, hb1.ang1_coef, hb2.ang1_coef)
# hb1.ang2 must be the same as hb2.ang2
if (hb1.ang2_imp1, hb1.ang2_imp2, hb1.ang2_imp3) not in ((hb2.ang2_imp1, hb2.ang2_imp2, hb2.ang2_imp3),
(hb2.ang2_imp3, hb2.ang2_imp2, hb2.ang2_imp1)):
print('hb1.ang2 must equal to hb2.ang2')
if not value_check(hb1.ang2_native, hb2.ang2_native):
print('ang2_native differ', id1, id2, hb1.ang2_native, hb2.ang2_native)
if not value_check(hb1.ang2_coef, hb2.ang2_coef):
print('ang2_coef differ', id1, id2, hb1.ang2_coef, hb2.ang2_coef)
# hb1.ang1 = hb2.ang2
elif (hb1.ang1_imp1, hb1.ang1_imp2, hb1.ang1_imp3) in ((hb2.ang2_imp1, hb2.ang2_imp2, hb2.ang2_imp3),
(hb2.ang2_imp3, hb2.ang2_imp2, hb2.ang2_imp1)):
if not value_check(hb1.ang1_native, hb2.ang2_native):
print('ang1_native differ', id1, id2, hb1.ang1_native, hb2.ang2_native)
if not value_check(hb1.ang1_coef, hb2.ang2_coef):
print('ang1_coef differ', id1, id2, hb1.ang1_coef, hb2.ang2_coef)
# hb1.ang2 must be the same as hb2.ang1
if (hb1.ang2_imp1, hb1.ang2_imp2, hb1.ang2_imp3) not in ((hb2.ang1_imp1, hb2.ang1_imp2, hb2.ang1_imp3),
(hb2.ang1_imp3, hb2.ang1_imp2, hb2.ang1_imp1)):
print('hb1.ang2 must equal to hb2.ang1')
if not value_check(hb1.ang2_native, hb2.ang1_native):
print('ang2_native differ', id1, id2, hb1.ang2_native, hb2.ang1_native)
if not value_check(hb1.ang2_coef, hb2.ang1_coef):
print('ang2_coef differ', id1, id2, hb1.ang2_coef, hb2.ang1_coef)
else:
print("angl IDs do not match", id1, id2)
# dihd
# dih0
if (hb1.dih0_imp1, hb1.dih0_imp2, hb1.dih0_imp3, hb1.dih0_imp4) not in ((hb2.dih0_imp1, hb2.dih0_imp2, hb2.dih0_imp3, hb2.dih0_imp4),
(hb2.dih0_imp4, hb2.dih0_imp3, hb2.dih0_imp2, hb2.dih0_imp1)):
print('hb1.dih0 must equal to hb2.dih0')
if not value_check(hb1.dih0_native, hb2.dih0_native):
print('dih0_native differ', id1, id2, hb1.dih0_native, hb2.dih0_native)
if not value_check(hb1.dih0_coef, hb2.dih0_coef):
print('dih0_coef differ', id1, id2, hb1.dih0_coef, hb2.dih0_coef)
# hb1.dih1 = hb2.dih1
if (hb1.dih1_imp1, hb1.dih1_imp2, hb1.dih1_imp3, hb1.dih1_imp4) in ((hb2.dih1_imp1, hb2.dih1_imp2, hb2.dih1_imp3, hb2.dih1_imp4),
(hb2.dih1_imp4, hb2.dih1_imp3, hb2.dih1_imp2, hb2.dih1_imp1)):
if not value_check(hb1.dih1_native, hb2.dih1_native):
print('dih1_native differ', id1, id2, hb1.dih1_native, hb2.dih1_native)
if not value_check(hb1.dih1_coef, hb2.dih1_coef):
print('dih1_coef differ', id1, id2, hb1.dih1_coef, hb2.dih1_coef)
# hb1.dih2 must equal to hb2.dih2
if (hb1.dih2_imp1, hb1.dih2_imp2, hb1.dih2_imp3, hb1.dih2_imp4) not in ((hb2.dih2_imp1, hb2.dih2_imp2, hb2.dih2_imp3, hb2.dih2_imp4),
(hb2.dih2_imp4, hb2.dih2_imp3, hb2.dih2_imp2, hb2.dih2_imp1)):
print('hb1.dih2 must equal to hb2.dih2')
if not value_check(hb1.dih2_native, hb2.dih2_native):
print('dih2_native differ', id1, id2, hb1.dih2_native, hb2.dih2_native)
if not value_check(hb1.dih2_coef, hb2.dih2_coef):
print('dih2_coef differ', id1, id2, hb1.dih2_coef, hb2.dih2_coef)
# hb1.dih1 = hb2.dih2
elif (hb1.dih1_imp1, hb1.dih1_imp2, hb1.dih1_imp3, hb1.dih1_imp4) in ((hb2.dih2_imp1, hb2.dih2_imp2, hb2.dih2_imp3, hb2.dih2_imp4),
(hb2.dih2_imp4, hb2.dih2_imp3, hb2.dih2_imp2, hb2.dih2_imp1)):
if not value_check(hb1.dih1_native, hb2.dih2_native):
print('dih1_native differ', id1, id2, hb1.dih1_native, hb2.dih2_native)
if not value_check(hb1.dih1_coef, hb2.dih2_coef):
print('dih1_coef differ', id1, id2, hb1.dih1_coef, hb2.dih2_coef)
# hb1.dih2 must equal to hb2.dih1
if (hb1.dih2_imp1, hb1.dih2_imp2, hb1.dih2_imp3, hb1.dih2_imp4) not in ((hb2.dih1_imp1, hb2.dih1_imp2, hb2.dih1_imp3, hb2.dih1_imp4),
(hb2.dih1_imp4, hb2.dih1_imp3, hb2.dih1_imp2, hb2.dih1_imp1)):
print('hb1.dih2 must equal to hb2.dih1')
if not value_check(hb1.dih2_native, hb2.dih1_native):
print('dih2_native differ', id1, id2, hb1.dih2_native, hb2.dih1_native)
if not value_check(hb1.dih2_coef, hb2.dih1_coef):
print('dih2_coef differ', id1, id2, hb1.dih2_coef, hb2.dih1_coef)
else:
print("dih IDs do not match", id1, id2)
flag1[id1] = True
flag2[id2] = True
for id1 in range(1, nhb1+1):
if flag1[id1]:
pass
else:
print('No match found in ninfo2, but exists in ninfo1:', id1)
for id2 in range(1, nhb2+1):
if flag2[id2]:
pass
else:
print('No match found in ninfo1, but exists in ninfo2:', id2)
|
#!/usr/bin/env python
"""Add an Intersight user by providing Cisco.com user ID and role via the Intersight API."""
import sys
import json
import argparse
from intersight.intersight_api_client import IntersightApiClient
from intersight.apis import iam_permission_api
from intersight.apis import iam_idp_reference_api
from intersight.apis import iam_user_api
def add_user(intersight_api_params, username, user_role='Account Administrator'):
# Create Intersight API instance
# ----------------------
api_instance = IntersightApiClient(
host=intersight_api_params['api_base_uri'],
private_key=intersight_api_params['api_private_key_file'],
api_key_id=intersight_api_params['api_key_id'],
)
# GET Permissions
permissions_handle = iam_permission_api.IamPermissionApi(api_instance)
kwargs = dict(filter="Name eq '%s'" % user_role)
permissions_result = permissions_handle.iam_permissions_get(**kwargs)
if permissions_result.results:
# GET IdpReference
idp_reference_handle = iam_idp_reference_api.IamIdpReferenceApi(api_instance)
idp_reference_name = 'Cisco'
kwargs = dict(filter="Name eq '%s'" % idp_reference_name)
idp_reference_result = idp_reference_handle.iam_idp_references_get(**kwargs)
if idp_reference_result.results:
user_matches = False
# GET Users
users_handle = iam_user_api.IamUserApi(api_instance)
kwargs = dict(filter="Email eq '%s'" % username)
users_result = users_handle.iam_users_get(**kwargs)
if (
users_result.results and
users_result.results[0].permissions[0].moid == permissions_result.results[0].moid and
users_result.results[0].idpreference.moid == idp_reference_result.results[0].moid
):
user_matches = True
if not user_matches:
# POST Users with Permissions and IdpReference
users_body = {
'Email': username,
'Idpreference': idp_reference_result.results[0].moid,
'Permissions': [permissions_result.results[0].moid],
}
users_result = users_handle.iam_users_post(users_body)
result['changed'] = True
else: # user exists and IdP/Permissions match
print('User exists with requested role:', username)
else:
print('Could not find IdP', idp_reference_name)
else:
print('Invalid user role', user_role)
if __name__ == "__main__":
result = dict(changed=False)
try:
# settings are pulled from the json string or JSON file passed as an arg
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--id', required=True, help='Cisco ID of the user to add')
roles = ['Account Administrator', 'Read-Only']
parser.add_argument('-r', '--role', choices=roles, required=True, help='Role of the user to add')
help_str = 'JSON file with Intersight API parameters. Default: intersight_api_params.json'
parser.add_argument('-a', '--api_params', default='intersight_api_params.json', help=help_str)
args = parser.parse_args()
with open(args.api_params, 'r') as api_file:
intersight_api_params = json.load(api_file)
add_user(intersight_api_params, args.id, args.role)
except Exception as err:
print("Exception:", str(err))
import traceback
print('-' * 60)
traceback.print_exc(file=sys.stdout)
print('-' * 60)
sys.exit(1)
print(json.dumps(result))
sys.exit(0)
|
"""
General helper functions for Gabby Gums.
Function abilities include:
Functions for handling long text
Sending Error Logs to the Global error log channel
Getting Audit logs.
Check permissions on a channel.
Part of the Gabby Gums Discord Logger.
"""
import sys
import string
import asyncio
import logging
import traceback
from datetime import datetime, timedelta
from typing import Union, Optional, Dict, List, TYPE_CHECKING
import discord
from discord.ext import commands
if TYPE_CHECKING:
from bot import GGBot
log = logging.getLogger(__name__)
# Type aliases
GuildChannel = Union[discord.TextChannel, discord.VoiceChannel, discord.CategoryChannel]
async def send_long_msg(channel: [discord.TextChannel, commands.Context], message: str, code_block: bool = False, code_block_lang: str = "python"):
if code_block:
if len(code_block_lang) > 0:
code_block_lang = code_block_lang + "\n"
code_block_start = f"```{code_block_lang}"
code_block_end = "```"
code_block_extra_length = len(code_block_start) + len(code_block_end)
chunks = split_text(message, max_size=2000 - code_block_extra_length)
message_chunks = [code_block_start + chunk + code_block_end for chunk in chunks]
else:
message_chunks = split_text(message, max_size=2000)
for chunk in message_chunks:
await channel.send(chunk)
def split_text(text: Union[str, List], max_size: int = 2000, delimiter: str = "\n") -> List[str]:
"""Splits the input text such that no entry is longer that the max size """
delim_length = len(delimiter)
if isinstance(text, str):
if len(text) < max_size:
return [text]
text = text.split(delimiter)
else:
if sum(len(i) for i in text) < max_size:
return ["\n".join(text)]
output = []
tmp_str = ""
count = 0
for fragment in text:
fragment_length = len(fragment) + delim_length
if fragment_length > max_size:
raise ValueError("A single line exceeded the max length. Can not split!") # TODO: Find a better way than throwing an error.
if count + fragment_length > max_size:
output.append(tmp_str)
tmp_str = ""
count = 0
count += fragment_length
tmp_str += f"{fragment}{delimiter}"
output.append(tmp_str)
return output
async def log_error_msg(bot: 'GGBot', error_messages: Optional[Union[str, List[str], Exception]], header: Optional[str] = None, code_block: bool = False) -> bool:
"""
Attempts to send a message to the Global Error Discord Channel.
Returns False if the error_log_channel is not defined in the Config,
if the error_log_channel can not be resolved to an actual channel, or if the message fails to send.
Returns True if successful.
"""
if 'error_log_channel' not in bot.config:
return False # No error log channel defined in config, can not log
# Check to see if there was an error message passed and bail if there wasn't
if error_messages is None:
return True # Should this be True? False isn't really accurate either....
# If list is empty, return
elif isinstance(error_messages, list): # If type is list
if len(error_messages) == 0: # List is empty. Bail
return True # Should this be True? False isn't really accurate either....
# Convert it into a single string.
error_messages = "\n".join(error_messages)
elif isinstance(error_messages, Exception):
error_messages = full_stack()
code_block = True # Override code block for exceptions.
else:
if error_messages == "": # Empty
return True # Should this be True? False isn't really accurate either....
# Try to get the channel from discord.py.
error_log_channel = bot.get_channel(bot.config['error_log_channel'])
if error_log_channel is None:
return False
# If the header option is used, include the header message at the front of the message
if header is not None:
error_messages = f"{header}\n{error_messages}"
# Attempt to send the message
try:
await send_long_msg(error_log_channel, error_messages, code_block=code_block)
return True
except discord.DiscordException as e:
log.exception(f"Error sending log to Global Error Discord Channel!: {e}")
return False
def full_stack():
exc = sys.exc_info()[0]
if exc is not None:
f = sys.exc_info()[-1].tb_frame.f_back
stack = traceback.extract_stack(f, limit=5)
else:
stack = traceback.extract_stack(limit=5)[:-1] # last one would be full_stack()
trc = 'Traceback (most recent call last):\n'
stackstr = trc + ''.join(traceback.format_list(stack))
if exc is not None:
stackstr += ' ' + traceback.format_exc().lstrip(trc)
return stackstr
class MissingAuditLogPermissions(Exception):
pass
async def get_audit_logs(guild: discord.Guild, audit_action: discord.AuditLogAction, target_user: Union[discord.User, discord.Member, discord.Object],
in_last: Optional[timedelta] = None, delay_before_fetch: int = 1) -> List[discord.AuditLogEntry]:
"""
Fetches the audit logs from Discord API, then filters the logs to include
only those that match the audit log action and target user in the last timedelta period specified.
Additionally, you can specify a delay before fetching the logs using delay_before_fetch.
This is useful for avoiding a race condition with Discord.
Raises utils.miscUtils.MissingAuditLogPermissions if we are missing permissions to view the audit logs.
:param guild: The Guild to fettch the audit logs from
:param audit_action: The audit log type that we want to filter on
:param target_user: The user we want to filter on
:param in_last: How far back in time should we request logs for
:param delay_before_fetch: How long should we wait (in seconds) before fetching the logs.
:return: All the logs that match.
"""
permissions: discord.Permissions = guild.me.guild_permissions
if permissions.view_audit_log:
# if in_last is None:
# in_last = timedelta.max
after_time = datetime.utcnow() - in_last if in_last else datetime.min
def predicate(entry: discord.AuditLogEntry):
if target_user is not None and entry.target is not None:
return entry.created_at > after_time and entry.target.id == target_user.id
else:
return entry.created_at > after_time
await asyncio.sleep(delay_before_fetch) # Sleep for a bit to ensure we don't hit a race condition with the Audit Log.
audit_log_entries = await guild.audit_logs(action=audit_action, oldest_first=False).filter(predicate).flatten()
return audit_log_entries
else:
raise MissingAuditLogPermissions
def prettify_permission_name(perm_name: str) -> str:
"""Takes a internal D.py permission name (such as send_tts_messages) and converts it to a prettified form suitable for showing to users (send_tts_messages -> Send TTS Messages)"""
pretty_perm_name = string.capwords(f"{perm_name}".replace('_', ' ')) # Capitalize the permission names and replace underlines with spaces.
pretty_perm_name = "Send TTS Messages" if pretty_perm_name == "Send Tts Messages" else pretty_perm_name # Mak sure that we capitalize the TTS acronym properly.
return pretty_perm_name
def check_permissions(channel: GuildChannel, additional_perms: Optional[Dict[str, bool]] = None) -> List[str]:
"""Checks to see if the channel has the default needed permissions (Read, Send, Embed) and the passed additional permissions.
Returns a list of missing permissions."""
standard_perms = {'read_messages': True, 'send_messages': True, 'embed_links': True} # Permissions that EVERY channel requires.
additional_perms = {} if additional_perms is None else additional_perms
missing_perms = []
# make sure all the permissions we are checking are valid
for perm, value in additional_perms:
if perm not in discord.Permissions.VALID_FLAGS:
raise TypeError('%r is not a valid permission name.' % perm)
ch_perms: discord.Permissions = channel.guild.me.permissions_in(channel)
for perm, value in ch_perms:
if (perm in standard_perms and standard_perms[perm] != value) or (perm in additional_perms and standard_perms[perm] != value):
missing_perms.append(perm)
return missing_perms
|
from __future__ import absolute_import, division, print_function
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object, map, zip)
__author__ = "<NAME>"
import numpy as np
from astropy import units
from .frame_converter import convert_nuFnu_to_nuLnu_src, convert_nu_to_src
from .utils import *
__all__=['SED']
class SED(object):
"""
Class handling the SED
"""
def __init__(self,name=None,
nu=None,
nuFnu=None,
nu_residuals=None,
residuals=None,
nu_src_residuals=None,
nuLnu_src_residuals=None,
dl=None,
z=None,
log_log=False):
self.name=name
self._nu_units=units.Hz
self._nuFnu_units=units.erg/units.cm**2/units.s
self._nu_src_units=units.Hz
self._nuLnu_src_units = units.erg/units.s
self.nu=(nu)
self.nuFnu=(nuFnu)
self._loglog=log_log
if z is not None and dl is not None:
self.nu_src= z
#print('1')
self.nuLnu_src = (z,dl)
self.nu_residuals=nu_residuals
self.residuals=residuals
self.nu_src_residuals = nu_src_residuals
self.nuLnu_src_residuals = nuLnu_src_residuals
@property
def nu(self):
return self._nu
@nu.setter
def nu(self,nu):
if nu is None:
self._nu=nu
else:
self._nu=nu*self._nu_units
@property
def nuFnu(self):
return self._nuFnu
@nuFnu.setter
def nuFnu(self,nuFnu):
if nuFnu is None:
self._nuFnu=nuFnu
else:
self._nuFnu = nuFnu * self._nuFnu_units
@property
def nu_src(self):
return self._nu_src
@nu_src.setter
def nu_src(self, z):
#print('->self._nu',self._nu)
if self._nu is None:
self._nu_src = self._nu
else:
if self._loglog is True:
self._nu_src =np.log10( convert_nu_to_src(10**(self._nu.value),z,in_frame='obs') )* self._nu_units
else:
self._nu_src = convert_nu_to_src(self._nu.value,z,in_frame='obs') * self._nu_units
#print('->self._nu_src', self._nu_src)
@property
def nuLnu_src(self):
return self._nuLnu
@nuLnu_src.setter
def nuLnu_src(self, t):
z,dl=t
#print('2')
#print('->',t,z,dl,self._loglog)
if self._nuFnu is None:
self._nuLnu = None
else:
if self._loglog is True:
self._nuLnu =np.log10( convert_nuFnu_to_nuLnu_src(10**(self._nuFnu.value),z,'obs',dl)) * self._nuLnu_src_units
else:
self._nuLnu = convert_nuFnu_to_nuLnu_src(self._nuFnu.value, z, 'obs', dl) * self._nuLnu_src_units
#print('->self._nuLnu', self._nuLnu.max())
def get_model_points(self,log_log=False,frame='obs'):
check_frame(frame)
if frame == 'obs':
x, y = self.nu.value, self.nuFnu.value
elif frame == 'src':
x, y = self.nu_src.value, self.nuLnu_src.value
else:
unexpetced_behaviour()
if log_log==True:
msk=y>0
x=np.log10(x[msk])
y=np.log10(y[msk])
return x, y
def get_residuals(self, log_log=False):
residuals = self.residuals
nu_residuals = self.nu_residuals
if log_log == False:
return nu_residuals, residuals
else:
return nu_residuals, np.log10(residuals)
def fill(self,nu=None,nuFnu=None,nu_residuals=None,residuals=None,log_log=False):
self._loglog=log_log
#if nu is not None:
self.nu=(nu)
#if nuFnu is not None:
self.nuFnu=(nuFnu)
if residuals is not None:
self.residuals=residuals
if nu_residuals is not None:
self.nu_residuals=nu_residuals
def fill_nuLnu(self, nu_src_residuals=None, nuLnu_src_residuals=None, z=None, dl=None):
if z is not None and dl is not None:
self.nu_src = (z)
self.nuLnu_src = (z,dl)
if nuLnu_src_residuals is not None:
self.nuLnu_src_residuals = nuLnu_src_residuals
if nu_src_residuals is not None:
self.nu_src_residuals = nu_src_residuals
#
# class poly_shape(object):
# """
# Class for log-log polynomial shapes
# """
# def __init__(self,name=None,nu=None,nuFnu=None):
# self.name=name
# self.nu=nu
# self.nuFnu=nuFnu
#
# def get_model_points(self):
# return self.nu,self.nuFnu
#
#
|
<gh_stars>10-100
from attr import dataclass
import pytest
import numpy as np
import ezomero
import filecmp
import os
from omero.gateway import TagAnnotationWrapper
def test_omero_connection(conn, omero_params):
assert conn.getUser().getName() == omero_params[0]
# Test posts
############
def test_post_dataset(conn, project_structure, users_groups, timestamp):
# Orphaned dataset, with descripion
ds_test_name = 'test_post_dataset_' + timestamp
did = ezomero.post_dataset(conn, ds_test_name, description='New test')
assert conn.getObject("Dataset", did).getName() == ds_test_name
assert conn.getObject("Dataset", did).getDescription() == "New test"
# Dataset in default project, no description
ds_test_name2 = 'test_post_dataset2_' + timestamp
project_info = project_structure[0]
pid = project_info[0][1]
did2 = ezomero.post_dataset(conn, ds_test_name2, project_id=pid)
ds = conn.getObjects("Dataset", opts={'project': pid})
ds_names = [d.getName() for d in ds]
assert ds_test_name2 in ds_names
# Dataset in non-existing project ID
ds_test_name3 = 'test_post_dataset3_' + timestamp
pid = 99999999
did3 = ezomero.post_dataset(conn, ds_test_name3, project_id=pid)
assert did3 is None
# Dataset in cross-group project, valid permissions
username = users_groups[1][0][0] # test_user1
groupname = users_groups[0][0][0] # test_group_1
current_conn = conn.suConn(username, groupname)
ds_test_name4 = 'test_post_dataset4_' + timestamp
project_info = project_structure[0]
pid = project_info[3][1] # proj3 (in test_group_2)
did4 = ezomero.post_dataset(current_conn, ds_test_name4, project_id=pid)
current_conn.SERVICE_OPTS.setOmeroGroup('-1')
ds = current_conn.getObjects("Dataset", opts={'project': pid})
ds_names = [d.getName() for d in ds]
current_conn.close()
assert ds_test_name4 in ds_names
# Dataset in cross-group project, invalid permissions
username = users_groups[1][2][0] # test_user3
groupname = users_groups[0][1][0] # test_group_2
current_conn = conn.suConn(username, groupname)
ds_test_name5 = 'test_post_dataset5_' + timestamp
project_info = project_structure[0]
pid = project_info[1][1] # proj1 (in test_group_1)
did5 = ezomero.post_dataset(current_conn, ds_test_name5, project_id=pid)
current_conn.close()
assert did5 is None
# Dataset in cross-group project, valid permissions
# across_groups flag unset
username = users_groups[1][0][0] # test_user1
groupname = users_groups[0][0][0] # test_group_1
current_conn = conn.suConn(username, groupname)
ds_test_name6 = 'test_post_dataset6_' + timestamp
project_info = project_structure[0]
pid = project_info[3][1] # proj3 (in test_group_2)
did6 = ezomero.post_dataset(current_conn, ds_test_name6, project_id=pid,
across_groups=False)
current_conn.close()
assert did6 is None
conn.deleteObjects("Dataset", [did, did2, did4], deleteAnns=True,
deleteChildren=True, wait=True)
def test_post_image(conn, project_structure, users_groups, timestamp,
image_fixture):
dataset_info = project_structure[1]
did = dataset_info[0][1]
# Post image in dataset
image_name = 'test_post_image_' + timestamp
im_id = ezomero.post_image(conn, image_fixture, image_name,
description='This is an image',
dataset_id=did)
assert conn.getObject("Image", im_id).getName() == image_name
# Post orphaned image
im_id2 = ezomero.post_image(conn, image_fixture, image_name)
assert conn.getObject("Image", im_id2).getName() == image_name
# Post image to non-existent dataset
did3 = 999999999
im_id3 = ezomero.post_image(conn, image_fixture, image_name,
description='This is an image',
dataset_id=did3)
assert im_id3 is None
# Post image cross-group, valid permissions
username = users_groups[1][0][0] # test_user1
groupname = users_groups[0][0][0] # test_group_1
current_conn = conn.suConn(username, groupname)
dataset_info = project_structure[1]
did4 = dataset_info[3][1] # ds2 (in test_group_2)
image_name = 'test_post_image_' + timestamp
im_id4 = ezomero.post_image(current_conn, image_fixture, image_name,
description='This is an image',
dataset_id=did4)
current_conn.SERVICE_OPTS.setOmeroGroup('-1')
assert current_conn.getObject("Image", im_id4).getName() == image_name
current_conn.close()
# Post image cross-group, ivvalid permissions
username = users_groups[1][2][0] # test_user3
groupname = users_groups[0][1][0] # test_group_2
current_conn = conn.suConn(username, groupname)
dataset_info = project_structure[1]
did5 = dataset_info[1][1] # ds1 (in test_group_1)
image_name = 'test_post_image_' + timestamp
im_id5 = ezomero.post_image(current_conn, image_fixture, image_name,
description='This is an image',
dataset_id=did5)
current_conn.close()
assert im_id5 is None
# Post image cross-group, valid permissions, across_groups unset
username = users_groups[1][0][0] # test_user1
groupname = users_groups[0][0][0] # test_group_1
current_conn = conn.suConn(username, groupname)
dataset_info = project_structure[1]
did6 = dataset_info[3][1] # ds2 (in test_group_2)
image_name = 'test_post_image_' + timestamp
im_id6 = ezomero.post_image(current_conn, image_fixture, image_name,
description='This is an image',
dataset_id=did6, across_groups=False)
current_conn.close()
assert im_id6 is None
conn.deleteObjects("Image", [im_id, im_id2, im_id4], deleteAnns=True,
deleteChildren=True, wait=True)
def test_post_get_map_annotation(conn, project_structure, users_groups):
image_info = project_structure[2]
im_id = image_info[0][1]
# This test both ezomero.post_map_annotation and ezomero.get_map_annotation
kv = {"key1": "value1",
"key2": "value2"}
ns = "jax.org/omeroutils/tests/v0"
map_ann_id = ezomero.post_map_annotation(conn, "Image", im_id, kv, ns)
kv_pairs = ezomero.get_map_annotation(conn, map_ann_id)
assert kv_pairs["key2"] == "value2"
# Test posting to non-existing object
im_id2 = 999999999
map_ann_id2 = ezomero.post_map_annotation(conn, "Image", im_id2, kv, ns)
assert map_ann_id2 is None
# Test posting cross-group
username = users_groups[1][0][0] # test_user1
groupname = users_groups[0][0][0] # test_group_1
current_conn = conn.suConn(username, groupname)
im_id3 = image_info[2][1] # im2, in test_group_2
map_ann_id3 = ezomero.post_map_annotation(current_conn, "Image", im_id3,
kv, ns)
kv_pairs3 = ezomero.get_map_annotation(current_conn, map_ann_id3)
assert kv_pairs3["key2"] == "value2"
current_conn.close()
# Test posting to an invalid cross-group
username = users_groups[1][2][0] # test_user3
groupname = users_groups[0][1][0] # test_group_2
current_conn = conn.suConn(username, groupname)
im_id4 = image_info[1][1] # im1(in test_group_1)
map_ann_id4 = ezomero.post_map_annotation(current_conn, "Image", im_id4,
kv, ns)
assert map_ann_id4 is None
current_conn.close()
# Test posting cross-group, across_groups unset
username = users_groups[1][0][0] # test_user1
groupname = users_groups[0][0][0] # test_group_1
current_conn = conn.suConn(username, groupname)
im_id6 = image_info[2][1] # im2, in test_group_2
map_ann_id6 = ezomero.post_map_annotation(current_conn, "Image", im_id6,
kv, ns, across_groups=False)
assert map_ann_id6 is None
current_conn.close()
conn.deleteObjects("Annotation", [map_ann_id, map_ann_id3],
deleteAnns=True, deleteChildren=True, wait=True)
def test_post_get_file_annotation(conn, project_structure, users_groups,
tmp_path):
image_info = project_structure[2]
im_id = image_info[0][1]
# This test both ezomero.post_file_annotation and
# ezomero.get_file_annotation
d = tmp_path / "input"
d.mkdir()
file_path = d / "hello.txt"
file_path.write_text("hello world!")
file_ann = str(file_path)
ns = "jax.org/omeroutils/tests/v0"
file_ann_id = ezomero.post_file_annotation(conn, "Image", im_id, file_ann,
ns)
return_ann = ezomero.get_file_annotation(conn, file_ann_id)
assert filecmp.cmp(return_ann, file_ann)
os.remove(return_ann)
# Test posting to non-existing object
im_id2 = 999999999
file_ann_id2 = ezomero.post_file_annotation(conn, "Image", im_id2,
file_ann, ns)
assert file_ann_id2 is None
# Test posting cross-group
username = users_groups[1][0][0] # test_user1
groupname = users_groups[0][0][0] # test_group_1
current_conn = conn.suConn(username, groupname)
im_id3 = image_info[2][1] # im2, in test_group_2
file_ann_id3 = ezomero.post_file_annotation(current_conn, "Image", im_id3,
file_ann, ns)
return_ann3 = ezomero.get_file_annotation(current_conn, file_ann_id3)
assert filecmp.cmp(return_ann3, file_ann)
os.remove(return_ann3)
current_conn.close()
# Test posting to an invalid cross-group
username = users_groups[1][2][0] # test_user3
groupname = users_groups[0][1][0] # test_group_2
current_conn = conn.suConn(username, groupname)
im_id4 = image_info[1][1] # im1(in test_group_1)
file_ann_id4 = ezomero.post_file_annotation(current_conn, "Image", im_id4,
file_ann, ns)
assert file_ann_id4 is None
current_conn.close()
# Test posting cross-group, across_groups unset
username = users_groups[1][0][0] # test_user1
groupname = users_groups[0][0][0] # test_group_1
current_conn = conn.suConn(username, groupname)
im_id5 = image_info[2][1] # im2, in test_group_2
file_ann_id5 = ezomero.post_file_annotation(current_conn, "Image", im_id5,
file_ann, ns,
across_groups=False)
assert file_ann_id5 is None
current_conn.close()
conn.deleteObjects("Annotation", [file_ann_id, file_ann_id3],
deleteAnns=True, deleteChildren=True, wait=True)
def test_post_roi(conn, project_structure, roi_fixture, users_groups):
image_info = project_structure[2]
im_id = image_info[0][1]
roi_id = ezomero.post_roi(conn, im_id,
shapes=roi_fixture['shapes'],
name=roi_fixture['name'],
description=roi_fixture['desc'],
fill_color=roi_fixture['fill_color'],
stroke_color=roi_fixture['stroke_color'],
stroke_width=roi_fixture['stroke_width'])
roi_in_omero = conn.getObject('Roi', roi_id)
assert roi_in_omero.getName() == roi_fixture['name']
assert roi_in_omero.getDescription() == roi_fixture['desc']
# Test posting to a non-existing image
im_id2 = 999999999
with pytest.raises(Exception): # TODO: verify which exception type
_ = ezomero.post_roi(conn, im_id2,
shapes=roi_fixture['shapes'],
name=roi_fixture['name'],
description=roi_fixture['desc'],
fill_color=roi_fixture['fill_color'],
stroke_color=roi_fixture['stroke_color'],
stroke_width=roi_fixture['stroke_width'])
# Test posting to an invalid cross-group
username = users_groups[1][2][0] # test_user3
groupname = users_groups[0][1][0] # test_group_2
current_conn = conn.suConn(username, groupname)
im_id4 = image_info[1][1] # im1(in test_group_1)
with pytest.raises(Exception): # TODO: verify which exception type
_ = ezomero.post_roi(current_conn, im_id4,
shapes=roi_fixture['shapes'],
name=roi_fixture['name'],
description=roi_fixture['desc'],
fill_color=roi_fixture['fill_color'],
stroke_color=roi_fixture['stroke_color'],
stroke_width=roi_fixture['stroke_width'])
current_conn.close()
conn.deleteObjects("Roi", [roi_id], deleteAnns=True,
deleteChildren=True, wait=True)
def test_post_project(conn, timestamp):
# No description
new_proj = "test_post_project_" + timestamp
pid = ezomero.post_project(conn, new_proj)
assert conn.getObject("Project", pid).getName() == new_proj
# With description
new_proj2 = "test_post_project2_" + timestamp
desc = "Now with a description"
pid2 = ezomero.post_project(conn, new_proj2, description=desc)
assert conn.getObject("Project", pid2).getDescription() == desc
conn.deleteObjects("Project", [pid, pid2], deleteAnns=True,
deleteChildren=True, wait=True)
def test_post_screen(conn, timestamp):
# No description
new_screen = "test_post_screen_" + timestamp
sid = ezomero.post_screen(conn, new_screen)
assert conn.getObject("Screen", sid).getName() == new_screen
# With description
new_screen2 = "test_post_screen2_" + timestamp
desc = "Now with a description"
sid2 = ezomero.post_screen(conn, new_screen2, description=desc)
assert conn.getObject("Screen", sid2).getDescription() == desc
conn.deleteObjects("Screen", [sid, sid2], deleteAnns=True,
deleteChildren=True, wait=True)
def test_post_project_type(conn):
with pytest.raises(TypeError):
_ = ezomero.post_project(conn, 123)
with pytest.raises(TypeError):
_ = ezomero.post_project(conn, '123', description=1245)
def test_post_screen_type(conn):
with pytest.raises(TypeError):
_ = ezomero.post_screen(conn, 123)
with pytest.raises(TypeError):
_ = ezomero.post_screen(conn, '123', description=1245)
# Test gets
###########
def test_get_image(conn, project_structure, users_groups):
image_info = project_structure[2]
im_id = image_info[0][1]
# test default
im, im_arr = ezomero.get_image(conn, im_id)
assert im.getId() == im_id
assert im_arr.shape == (1, 20, 201, 200, 3)
assert im.getPixelsType() == im_arr.dtype
# test non-existent id
im_id2 = 999999999
im2, im_arr2 = ezomero.get_image(conn, im_id2)
assert im2 is None
assert im_arr2 is None
# test cross-group valid
username = users_groups[1][0][0] # test_user1
groupname = users_groups[0][0][0] # test_group_1
current_conn = conn.suConn(username, groupname)
im_id3 = image_info[2][1] # im2, in test_group_2
im3, im_arr3 = ezomero.get_image(current_conn, im_id3)
assert im3.getId() == im_id3
assert im_arr3.shape == (1, 20, 201, 200, 3)
assert im3.getPixelsType() == im_arr3.dtype
current_conn.close()
# test cross-group invalid
username = users_groups[1][2][0] # test_user3
groupname = users_groups[0][1][0] # test_group_2
current_conn = conn.suConn(username, groupname)
im_id4 = image_info[1][1] # im1(in test_group_1)
im4, im_arr4 = ezomero.get_image(current_conn, im_id4)
assert im4 is None
assert im_arr4 is None
current_conn.close()
# test cross-group valid, across_groups unset
username = users_groups[1][0][0] # test_user1
groupname = users_groups[0][0][0] # test_group_1
current_conn = conn.suConn(username, groupname)
im_id5 = image_info[2][1] # im2, in test_group_2
im5, im_arr5 = ezomero.get_image(current_conn, im_id5, across_groups=False)
assert im5 is None
assert im_arr5 is None
current_conn.close()
# test xyzct
im, im_arr = ezomero.get_image(conn, im_id, xyzct=True)
assert im_arr.shape == (200, 201, 20, 3, 1)
# test no pixels
im, im_arr = ezomero.get_image(conn, im_id, no_pixels=True)
assert im_arr is None
# test that IndexError comes up when pad=False
with pytest.raises(IndexError):
im, im_arr = ezomero.get_image(conn, im_id,
start_coords=(195, 195, 18, 0, 0),
axis_lengths=(10, 10, 3, 4, 3),
pad=False)
# test crop
im, im_arr = ezomero.get_image(conn, im_id,
start_coords=(101, 101, 10, 0, 0),
axis_lengths=(10, 10, 3, 3, 1))
assert im_arr.shape == (1, 3, 10, 10, 3)
assert np.allclose(im_arr[0, 0, 0, 0, :], [0, 0, 255])
# test crop with padding
im, im_arr = ezomero.get_image(conn, im_id,
start_coords=(195, 195, 18, 0, 0),
axis_lengths=(10, 11, 3, 4, 3),
pad=True)
assert im_arr.shape == (3, 3, 11, 10, 4)
def test_get_tag_and_tag_ids(conn, project_structure):
image_info = project_structure[2]
im_id = image_info[0][1]
tag_ann = TagAnnotationWrapper(conn)
tag_ann.setValue('test_tag')
tag_ann.save()
tag_id = tag_ann.getId()
im = conn.getObject('Image', im_id)
im.linkAnnotation(tag_ann)
tag_id_from_im = ezomero.get_tag_ids(conn, 'Image', im_id)[0]
assert tag_id_from_im == tag_id
tag_text = ezomero.get_tag(conn, tag_id)
assert tag_text == 'test_tag'
conn.deleteObjects("Annotation",
[tag_id],
deleteAnns=True,
deleteChildren=True,
wait=True)
def test_get_image_ids(conn, project_structure, screen_structure,
users_groups):
project_info = project_structure[0]
dataset_info = project_structure[1]
image_info = project_structure[2]
# Based on project ID (also tests cross-group)
proj3_id = project_info[3][1]
im2_id = image_info[2][1] # im2, belongs to proj3/ds2
im3_id = image_info[3][1] # im3, belongs to proj3/ds3
im4_id = image_info[4][1] # im4, belongs to proj3/ds3
proj3_im_ids = ezomero.get_image_ids(conn, project=proj3_id)
assert set(proj3_im_ids) == set([im2_id, im3_id, im4_id])
# Based on dataset ID
ds0_id = dataset_info[0][1] # Belongs to proj0
im0_id = image_info[0][1] # Belongs to ds0
ds0_im_ids = ezomero.get_image_ids(conn, dataset=ds0_id)
assert set(ds0_im_ids) == set([im0_id])
# test cross-group valid
username = users_groups[1][0][0] # test_user1
groupname = users_groups[0][0][0] # test_group_1
current_conn = conn.suConn(username, groupname)
ds6_id = dataset_info[6][1] # dataset 6 in test_group_2
im6_id = image_info[6][1] # im6, in ds6
im7_id = image_info[7][1] # im7, in ds6
ds6_im_ids = ezomero.get_image_ids(current_conn, dataset=ds6_id)
assert set(ds6_im_ids) == set([im6_id, im7_id])
current_conn.close()
# test cross-group invalid
username = users_groups[1][2][0] # test_user3
groupname = users_groups[0][1][0] # test_group_2 (test_user3 is mbr)
current_conn = conn.suConn(username, groupname)
ds1_id = dataset_info[1][1] # ds1, in test_group1 (test_user3 not mbr)
ds1_im_ids = ezomero.get_image_ids(current_conn, dataset=ds1_id)
assert not ds1_im_ids
# test cross-group valid, across_groups unset
username = users_groups[1][0][0] # test_user1
groupname = users_groups[0][0][0] # test_group_1
current_conn = conn.suConn(username, groupname)
ds3_id = dataset_info[3][1] # ds3 in test_group_2
ds3_im_ids = ezomero.get_image_ids(current_conn, dataset=ds3_id,
across_groups=False)
assert not ds3_im_ids
current_conn.close()
# Return nothing on bad input
bad_im_ids = ezomero.get_image_ids(conn, dataset=999999)
assert not bad_im_ids
# Based on well ID
well_id = screen_structure[1]
plate_im_id1 = screen_structure[2]
well_im_ids = ezomero.get_image_ids(conn, well=well_id)
assert set(well_im_ids) == set([plate_im_id1])
# Based on plate ID
plate_id = screen_structure[0]
plate_im_id2 = screen_structure[5]
plate_im_ids = ezomero.get_image_ids(conn, plate=plate_id)
assert set(plate_im_ids) == set([plate_im_id1, plate_im_id2])
def test_get_image_ids_params(conn):
with pytest.raises(ValueError):
_ = ezomero.get_image_ids(conn, project=1, plate=2)
with pytest.raises(ValueError):
_ = ezomero.get_image_ids(conn, dataset=1, well=2)
with pytest.raises(TypeError):
_ = ezomero.get_image_ids(conn, dataset='test')
with pytest.raises(TypeError):
_ = ezomero.get_image_ids(conn, project='test')
with pytest.raises(TypeError):
_ = ezomero.get_image_ids(conn, well='test')
with pytest.raises(TypeError):
_ = ezomero.get_image_ids(conn, plate='test')
def test_get_map_annotation_ids(conn, project_structure):
kv = {"key1": "value1",
"key2": "value2"}
ns = "jax.org/omeroutils/tests/v0"
image_info = project_structure[2]
im_id = image_info[0][1]
map_ann_id = ezomero.post_map_annotation(conn, "Image", im_id, kv, ns)
map_ann_id2 = ezomero.post_map_annotation(conn, "Image", im_id, kv, ns)
map_ann_id3 = ezomero.post_map_annotation(conn, "Image", im_id, kv, ns)
ns2 = "different namespace"
map_ann_id4 = ezomero.post_map_annotation(conn, "Image", im_id, kv, ns2)
map_ann_ids = ezomero.get_map_annotation_ids(conn, "Image", im_id, ns=ns)
good_ids = [map_ann_id, map_ann_id2, map_ann_id3]
assert all([mid in map_ann_ids for mid in good_ids])
assert map_ann_id4 not in map_ann_ids
conn.deleteObjects("Annotation",
[map_ann_id, map_ann_id2, map_ann_id3, map_ann_id4],
deleteAnns=True,
deleteChildren=True,
wait=True)
def test_get_file_annotation_ids(conn, project_structure, tmp_path):
image_info = project_structure[2]
im_id = image_info[0][1]
d = tmp_path / "input"
d.mkdir()
file_path = d / "hello.txt"
file_path.write_text("hello world!")
file_ann = str(file_path)
ns = "jax.org/omeroutils/tests/v0"
file_ann_id = ezomero.post_file_annotation(conn, "Image", im_id,
file_ann, ns)
file_ann_id2 = ezomero.post_file_annotation(conn, "Image", im_id,
file_ann, ns)
file_ann_id3 = ezomero.post_file_annotation(conn, "Image", im_id,
file_ann, ns)
ns2 = "different namespace"
file_ann_id4 = ezomero.post_file_annotation(conn, "Image", im_id,
file_ann, ns2)
file_ann_ids = ezomero.get_file_annotation_ids(conn, "Image", im_id, ns=ns)
good_ids = [file_ann_id, file_ann_id2, file_ann_id3]
assert all([mid in file_ann_ids for mid in good_ids])
assert file_ann_id4 not in file_ann_ids
conn.deleteObjects("Annotation",
[file_ann_id, file_ann_id2, file_ann_id3, file_ann_id4],
deleteAnns=True,
deleteChildren=True,
wait=True)
def test_get_well_id(conn, screen_structure):
plate_id = screen_structure[0]
well_id = screen_structure[1]
well2_id = screen_structure[4]
well_id_result = ezomero.get_well_id(conn, plate_id, row=1, column=1)
well2_id_result = ezomero.get_well_id(conn, plate_id, row=2, column=2)
assert well_id == well_id_result
assert well2_id == well2_id_result
assert ezomero.get_well_id(conn, plate_id, row=5, column=9) is None
def test_get_well_id_params(conn):
with pytest.raises(ValueError):
_ = ezomero.get_well_id(conn, "Plate name", row=0, column=0)
with pytest.raises(ValueError):
_ = ezomero.get_well_id(conn, 9999, row='A', column=0)
with pytest.raises(ValueError):
_ = ezomero.get_well_id(conn, 9999, row=0, column='B')
def test_get_group_id(conn):
gid = ezomero.get_group_id(conn, 'system')
assert gid == 0
gid = ezomero.get_group_id(conn, 'user')
assert gid == 1
gid = ezomero.get_group_id(conn, 'guest')
assert gid == 2
def test_get_user_id(conn, users_groups):
# test straight usage
username = users_groups[1][0][0] # test_user1
uid = users_groups[1][0][1]
user = ezomero.get_user_id(conn, username)
assert user == uid
# test invalid input
user = ezomero.get_user_id(conn, "9999999999")
assert user is None
# test cross-group
username = users_groups[1][0][0] # test_user1
groupname = users_groups[0][0][0] # test_group_1
current_conn = conn.suConn(username, groupname)
target_username = users_groups[1][2][0] # test_user3
target_uid = users_groups[1][2][1]
user = ezomero.get_user_id(current_conn, target_username)
assert user == target_uid
current_conn.close()
# Test puts
###########
def test_put_map_annotation(conn, project_structure, users_groups):
kv = {"key1": "value1",
"key2": "value2"}
ns = "jax.org/omeroutils/tests/v0"
image_info = project_structure[2]
im_id = image_info[0][1]
map_ann_id = ezomero.post_map_annotation(conn, "Image", im_id, kv, ns)
kv = {"key1": "changed1",
"key2": "value2"}
ezomero.put_map_annotation(conn, map_ann_id, kv)
kv_pairs = ezomero.get_map_annotation(conn, map_ann_id)
assert kv_pairs['key1'] == kv['key1']
# test cross-group
kv = {"key1": "value1",
"key2": "value2"}
username = users_groups[1][0][0] # test_user1
groupname = users_groups[0][0][0] # test_group_1
current_conn = conn.suConn(username, groupname)
im_id2 = image_info[2][1] # im2, in test_group_2
map_ann_id2 = ezomero.post_map_annotation(current_conn, "Image", im_id2,
kv, ns)
print(map_ann_id2)
kv = {"key1": "changed1",
"key2": "value2"}
ezomero.put_map_annotation(current_conn, map_ann_id2, kv)
kv_pairs = ezomero.get_map_annotation(current_conn, map_ann_id2)
assert kv_pairs['key1'] == kv['key1']
current_conn.close()
# test cross-group, across_groups unset
kv = {"key1": "value1",
"key2": "value2"}
username = users_groups[1][0][0] # test_user1
groupname = users_groups[0][0][0] # test_group_1
current_conn = conn.suConn(username, groupname)
im_id3 = image_info[2][1] # im2, in test_group_2
map_ann_id3 = ezomero.post_map_annotation(current_conn, "Image", im_id3,
kv, ns)
print(map_ann_id3)
kv_changed = {"key1": "changed1",
"key2": "value2"}
with pytest.raises(ValueError):
ezomero.put_map_annotation(current_conn, map_ann_id3, kv_changed,
across_groups=False)
kv_pairs = ezomero.get_map_annotation(current_conn, map_ann_id3)
assert kv_pairs['key1'] == kv['key1']
current_conn.close()
# test non-existent ID
with pytest.raises(ValueError):
ezomero.put_map_annotation(conn, 9999999, kv)
conn.deleteObjects("Annotation",
[map_ann_id, map_ann_id2],
deleteAnns=True,
deleteChildren=True,
wait=True)
|
import numpy as np
import pygame
from highway_env.road.lane import LineType
from highway_env.road.road import Road
from highway_env.vehicle.graphics import VehicleGraphics
class WorldSurface(pygame.Surface):
_initial_scaling = 5.5
_initial_centering = [0.5, 0.5]
_scaling_factor = 1.3
_moving_factor = 0.1
def __init__(self, size, flags, surf):
super().__init__(size, flags, surf)
self.origin = np.array([0, 0])
self.scaling = self._initial_scaling
self.centering_position = self._initial_centering
def pix(self, length):
return int(length * self.scaling)
def pos2pix(self, x, y):
return self.pix(x - self.origin[0]), self.pix(y - self.origin[1])
def vec2pix(self, vec):
return self.pos2pix(vec[0], vec[1])
def move_display_window_to(self, position):
self.origin = position -np.array(
[self.centering_position[0] * self.get_width() / self.scaling,
self.centering_position[1] * self.get_height() / self.scaling]
)
def handle_event(self, event):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_l:
self.scaling *= 1 / self._scaling_factor
if event.key == pygame.K_o:
self.scaling *= self._scaling_factor
if event.key == pygame.K_m:
self.centering_position[0] -= self._moving_factor
if event.key == pygame.K_k:
self.centering_position[0] += self._moving_factor
class LaneGraphics:
_stripe_spacing = 5
_stripe_length = 3
_stripe_width = 0.3
@classmethod
def display(klass, lane, surface):
stripes_count = int(2 * (surface.get_height() + surface.get_width()) / (klass._stripe_spacing * surface.scaling))
s_origin, _ = lane.local_coordinates(surface.origin)
s0 = (int(s_origin) // klass._stripe_spacing - stripes_count // 2) * klass._stripe_spacing
for side in range(2):
if lane.line_types[side] == LineType.STRIPED:
klass.striped_line(lane, surface, stripes_count, s0, side)
elif lane.line_types[side] == LineType.CONTINUOUS:
klass.continuous_curve(lane, surface, stripes_count, s0, side)
elif lane.line_types[side] == LineType.CONTINUOUS_LINE:
klass.continuous_line(lane, surface, stripes_count, s0, side)
@classmethod
def striped_line(klass, lane, surface, stripes_count, longitudinal, side):
"""
Draw a striped line on one side of a lane, on a surface.
:param lane: the lane
:param surface: the pygame surface
:param stripes_count: the number of stripes to draw
:param longitudinal: the longitudinal position of the first stripe [m]
:param side: which side of the road to draw [0:left, 1:right]
"""
starts = longitudinal + np.arange(stripes_count) * klass._stripe_spacing
ends = longitudinal + np.arange(stripes_count) * klass._stripe_spacing + klass._stripe_length
lats = [(side - 0.5) * lane.width_at(s) for s in starts]
klass.draw_stripes(lane, surface, starts, ends, lats)
@classmethod
def continuous_line(klass, lane, surface, stripes_count, longitudinal, side):
"""
Draw a continuous line on one side of a lane, on a surface.
:param lane: the lane
:param surface: the pygame surface
:param stripes_count: the number of stripes that would be drawn if the line was striped
:param longitudinal: the longitudinal position of the start of the line [m]
:param side: which side of the road to draw [0:left, 1:right]
"""
starts = [longitudinal + 0 * klass._stripe_spacing]
ends = [longitudinal + stripes_count * klass._stripe_spacing + klass._stripe_length]
lats = [(side - 0.5) * lane.width_at(s) for s in starts]
klass.draw_stripes(lane, surface, starts, ends, lats)
@classmethod
def draw_stripes(klass, lane, surface, starts, ends, lats):
"""
Draw a set of stripes along a lane.
:param lane: the lane
:param surface: the surface to draw on
:param starts: a list of starting longitudinal positions for each stripe [m]
:param ends: a list of ending longitudinal positions for each stripe [m]
:param lats: a list of lateral positions for each stripe [m]
"""
starts = np.clip(starts, 0, lane.length)
ends = np.clip(ends, 0, lane.length)
for k in range(len(starts)):
if abs(starts[k] - ends[k]) > 0.5 * klass._stripe_length:
pygame.draw.line(surface, surface.WHITE,
(surface.vec2pix(lane.position(starts[k], lats[k]))),
(surface.vec2pix(lane.position(ends[k], lats[k]))),
max(surface.pix(klass.STRIPE_WIDTH), 1))
@classmethod
def draw_ground(klass, lane, surface, color, width, draw_surface = None):
draw_surface = draw_surface or surface
stripes_count = int(2 * (surface.get_height() + surface.get_width()) / (klass._stripe_spacing * surface.scaling))
s_origin, _ = lane.local_coordinates(surface.origin)
s0 = (int(s_origin) // klass._stripe_spacing - stripes_count // 2) * klass._stripe_spacing
dots = []
for side in range(2):
longis = np.clip(s0 + np.arange(stripes_count) * klass._stripe_spacing, 0, lane.length)
lats = [2 * (side - 0.5) * width for _ in longis]
new_dots = [surface.vec2pix(lane.position(longi, lat)) for longi, lat in zip(longis, lats)]
new_dots = reversed(new_dots) if side else new_dots
dots.extend(new_dots)
pygame.draw.polygon(draw_surface, color, dots, 0)
class RoadGraphics:
"""
A visualization of a road lanes and vehicles.
"""
@staticmethod
def display(road, surface):
"""
Display the road lanes on a surface.
:param road: the road to be displayed
:param surface: the pygame surface
"""
surface.fill(surface.GREY)
for _from in road.network.graph.keys():
for _to in road.network.graph[_from].keys():
for l in road.network.graph[_from][_to]:
LaneGraphics.display(l, surface)
@staticmethod
def display_traffic(road, surface, simulation_frequency = 15, offscreen = False):
"""
Display the road vehicles on a surface.
:param road: the road to be displayed
:param surface: the pygame surface
:param simulation_frequency: simulation frequency
:param offscreen: render without displaying on a screen
"""
if road.record_history:
for v in road.vehicles:
VehicleGraphics.display_history(v, surface, simulation=simulation_frequency, offscreen=offscreen)
for v in road.vehicles:
VehicleGraphics.display(v, surface, offscreen=offscreen) |
import datetime
import json
import pytest
from django.conf import settings
from django.urls import reverse
from freezegun import freeze_time
from parkings.models import Parking
from ..utils import (
ALL_METHODS, check_method_status_codes, check_required_fields, delete,
patch, post, put)
list_url = reverse('operator:v1:parking-list')
def get_detail_url(obj):
return reverse('operator:v1:parking-detail', kwargs={'pk': obj.pk})
@pytest.fixture
def new_parking_data():
return {
'zone': 3,
'registration_number': 'JLH-247',
'time_start': '2016-12-10T20:34:38Z',
'time_end': '2016-12-10T23:33:29Z',
'location': {'coordinates': [60.16896809536978, 24.942075065834615], 'type': 'Point'},
}
@pytest.fixture
def updated_parking_data():
return {
'zone': 2,
'registration_number': 'VSM-162',
'time_start': '2016-12-12T20:34:38Z',
'time_end': '2016-12-12T23:33:29Z',
'location': {'coordinates': [60.16899227603715, 24.9482582558314], 'type': 'Point'},
}
def check_parking_data_matches_parking_object(parking_data, parking_obj):
"""
Check that a parking data dict and an actual Parking object match.
"""
# string or integer valued fields should match 1:1
for field in {'registration_number', 'zone'}:
assert parking_data[field] == getattr(parking_obj, field)
assert parking_data['time_start'] == parking_obj.time_start.strftime('%Y-%m-%dT%H:%M:%SZ')
obj_time_end = parking_obj.time_end.strftime('%Y-%m-%dT%H:%M:%SZ') if parking_obj.time_end else None
assert parking_data['time_end'] == obj_time_end
obj_location = json.loads(parking_obj.location.geojson) if parking_obj.location else None
assert parking_data['location'] == obj_location
def check_response_parking_data(posted_parking_data, response_parking_data):
"""
Check that parking data dict in a response has the right fields and matches the posted one.
"""
expected_keys = {
'id', 'zone', 'registration_number',
'terminal_number',
'time_start', 'time_end',
'location', 'created_at', 'modified_at',
'status',
}
posted_data_keys = set(posted_parking_data)
returned_data_keys = set(response_parking_data)
assert returned_data_keys == expected_keys
# assert common fields equal
for key in returned_data_keys & posted_data_keys:
assert response_parking_data[key] == posted_parking_data[key]
def test_disallowed_methods(operator_api_client, parking):
list_disallowed_methods = ('get', 'put', 'patch', 'delete')
check_method_status_codes(operator_api_client, list_url, list_disallowed_methods, 405)
detail_disallowed_methods = ('get', 'post')
check_method_status_codes(operator_api_client, get_detail_url(parking), detail_disallowed_methods, 405)
def test_unauthenticated_and_normal_users_cannot_do_anything(api_client, user_api_client, parking):
urls = (list_url, get_detail_url(parking))
check_method_status_codes(api_client, urls, ALL_METHODS, 401)
check_method_status_codes(user_api_client, urls, ALL_METHODS, 403, error_code='permission_denied')
def test_parking_required_fields(operator_api_client, parking):
expected_required_fields = {'registration_number', 'time_start', 'zone'}
check_required_fields(operator_api_client, list_url, expected_required_fields)
check_required_fields(operator_api_client, get_detail_url(parking), expected_required_fields, detail_endpoint=True)
def test_post_parking(operator_api_client, operator, new_parking_data):
response_parking_data = post(operator_api_client, list_url, new_parking_data)
# check data in the response
check_response_parking_data(new_parking_data, response_parking_data)
# check the actual object
new_parking = Parking.objects.get(id=response_parking_data['id'])
check_parking_data_matches_parking_object(new_parking_data, new_parking)
# operator should be autopopulated
assert new_parking.operator == operator
def test_post_parking_optional_fields_omitted(operator_api_client, new_parking_data):
new_parking_data.pop('time_end')
new_parking_data.pop('location')
response_parking_data = post(operator_api_client, list_url, new_parking_data)
new_parking_data['time_end'] = None
new_parking_data['location'] = None
check_response_parking_data(new_parking_data, response_parking_data)
new_parking = Parking.objects.get(id=response_parking_data['id'])
check_parking_data_matches_parking_object(new_parking_data, new_parking)
def test_post_parking_optional_fields_null(operator_api_client, new_parking_data):
new_parking_data['time_end'] = None
new_parking_data['location'] = None
response_parking_data = post(operator_api_client, list_url, new_parking_data)
check_response_parking_data(new_parking_data, response_parking_data)
new_parking = Parking.objects.get(id=response_parking_data['id'])
check_parking_data_matches_parking_object(new_parking_data, new_parking)
def test_put_parking(operator_api_client, parking, updated_parking_data):
detail_url = get_detail_url(parking)
response_parking_data = put(operator_api_client, detail_url, updated_parking_data)
# check data in the response
check_response_parking_data(updated_parking_data, response_parking_data)
# check the actual object
parking.refresh_from_db()
check_parking_data_matches_parking_object(updated_parking_data, parking)
def test_put_parking_optional_fields_omitted(operator_api_client, parking, updated_parking_data):
detail_url = get_detail_url(parking)
updated_parking_data.pop('time_end')
updated_parking_data.pop('location')
response_parking_data = put(operator_api_client, detail_url, updated_parking_data)
updated_parking_data['time_end'] = None
updated_parking_data['location'] = None
check_response_parking_data(updated_parking_data, response_parking_data)
parking.refresh_from_db()
check_parking_data_matches_parking_object(updated_parking_data, parking)
def test_put_parking_optional_fields_null(operator_api_client, parking, updated_parking_data):
detail_url = get_detail_url(parking)
updated_parking_data['time_end'] = None
updated_parking_data['location'] = None
response_parking_data = put(operator_api_client, detail_url, updated_parking_data)
check_response_parking_data(updated_parking_data, response_parking_data)
parking.refresh_from_db()
check_parking_data_matches_parking_object(updated_parking_data, parking)
def test_patch_parking(operator_api_client, parking):
detail_url = get_detail_url(parking)
new_zone = parking.zone % 3 + 1
response_parking_data = patch(operator_api_client, detail_url, {'zone': new_zone})
# check data in the response
check_response_parking_data({'zone': new_zone}, response_parking_data)
# check the actual object
parking.refresh_from_db()
assert parking.zone == new_zone
def test_delete_parking(operator_api_client, parking):
detail_url = get_detail_url(parking)
delete(operator_api_client, detail_url)
assert not Parking.objects.filter(id=parking.id).exists()
def test_operator_cannot_be_set(operator_api_client, operator, operator_2, new_parking_data, updated_parking_data):
new_parking_data['operator'] = str(operator_2.id)
# POST
response_parking_data = post(operator_api_client, list_url, new_parking_data)
new_parking = Parking.objects.get(id=response_parking_data['id'])
assert new_parking.operator == operator
# PUT
detail_url = get_detail_url(new_parking)
put(operator_api_client, detail_url, updated_parking_data)
new_parking.refresh_from_db()
assert new_parking.operator == operator
# PATCH
patch(operator_api_client, detail_url, {'operator': str(operator_2.id)})
new_parking.refresh_from_db()
assert new_parking.operator == operator
def test_cannot_modify_other_than_own_parkings(operator_2_api_client, parking, new_parking_data):
detail_url = get_detail_url(parking)
put(operator_2_api_client, detail_url, new_parking_data, 404)
patch(operator_2_api_client, detail_url, new_parking_data, 404)
delete(operator_2_api_client, detail_url, 404)
def test_cannot_modify_parking_after_modify_period(operator_api_client, new_parking_data, updated_parking_data):
start_time = datetime.datetime(2010, 1, 1, 12, 00)
error_message = 'Grace period has passed. Only "time_end" can be updated via PATCH.'
error_code = 'grace_period_over'
with freeze_time(start_time):
response_parking_data = post(operator_api_client, list_url, new_parking_data)
new_parking = Parking.objects.get(id=response_parking_data['id'])
end_time = start_time + settings.PARKKIHUBI_TIME_PARKINGS_EDITABLE + datetime.timedelta(minutes=1)
with freeze_time(end_time):
# PUT
error_data = put(operator_api_client, get_detail_url(new_parking), updated_parking_data, 403)
assert error_message in error_data['detail']
assert error_data['code'] == error_code
# PATCH other fields than 'time_end'
for field_name in updated_parking_data:
if field_name == 'time_end':
continue
parking_data = {field_name: updated_parking_data[field_name]}
error_data = patch(operator_api_client, get_detail_url(new_parking), parking_data, 403)
assert error_message in error_data['detail']
assert error_data['code'] == error_code
def test_can_modify_time_end_after_modify_period(operator_api_client, new_parking_data):
start_time = datetime.datetime(2010, 1, 1, 12, 00)
with freeze_time(start_time):
response_parking_data = post(operator_api_client, list_url, new_parking_data)
new_parking = Parking.objects.get(id=response_parking_data['id'])
end_time = start_time + settings.PARKKIHUBI_TIME_PARKINGS_EDITABLE + datetime.timedelta(minutes=1)
with freeze_time(end_time):
parking_data = {'time_end': '2016-12-12T23:33:29Z'}
patch(operator_api_client, get_detail_url(new_parking), parking_data, 200)
new_parking.refresh_from_db()
assert new_parking.time_end.day == 12 # old day was 10
def test_time_start_cannot_be_after_time_end(operator_api_client, parking, new_parking_data):
new_parking_data['time_start'] = '2116-12-10T23:33:29Z'
detail_url = get_detail_url(parking)
error_message = '"time_start" cannot be after "time_end".'
# POST
error_data = post(operator_api_client, list_url, new_parking_data, status_code=400)
assert error_message in error_data['non_field_errors']
# PUT
error_data = put(operator_api_client, detail_url, new_parking_data, status_code=400)
assert error_message in error_data['non_field_errors']
# PATCH
patch_data = {'time_start': '2116-12-10T23:33:29Z'}
error_data = patch(operator_api_client, detail_url, patch_data, status_code=400)
assert error_message in error_data['non_field_errors']
def test_parking_registration_number_special_chars(operator_api_client, new_parking_data):
new_parking_data['registration_number'] = 'ÅÄÖÆØ-:'
response_parking_data = post(operator_api_client, list_url, new_parking_data)
check_response_parking_data(new_parking_data, response_parking_data)
new_parking = Parking.objects.get(id=response_parking_data['id'])
check_parking_data_matches_parking_object(new_parking_data, new_parking)
|
#!/usr/bin/python
#####################################################################
# Cloud Routes Management Scripts: Get Stats
# ------------------------------------------------------------------
# Description:
# ------------------------------------------------------------------
# Pull newly created users from the database and automatically
# subscribe them to the MailChimp mailing list.
# ------------------------------------------------------------------
# Original Author: <NAME> (themetric)
# Maintainers:
# - <NAME> (themetric)
# - <NAME> (madflojo)
#####################################################################
# Imports
# ------------------------------------------------------------------
# Clean Paths for All
import sys
import yaml
import rethinkdb as r
from rethinkdb.errors import RqlDriverError, RqlRuntimeError
import requests
import json
import pprint
from runbookdb import RunbookDB
# Load Configuration
# ------------------------------------------------------------------
if len(sys.argv) < 2:
print("Hey, thats not how you launch this...")
print("%s <config file>") % sys.argv[0]
sys.exit(1)
configfile = sys.argv[1]
with open(configfile, 'r') as cfh:
config = yaml.safe_load(cfh)
# Open External Connections
# ------------------------------------------------------------------
# RethinkDB Server
# [DONE] TODO move default connection into module
db=RunbookDB(configfile)
conn=db.connect()
# Helper Functions
# ------------------------------------------------------------------
# Run For Loop
# ------------------------------------------------------------------
msg = {
"ezkey" : config['stathat_key'],
"data" : []
}
# Get user count
try:
result = r.table('users').count().run(conn)
except (RqlDriverError, RqlRuntimeError) as e:
print("Got error while performing query: %s") % e.message
print("Exiting...")
sys.exit(1)
msg['data'].append({
'stat' : "[%s] Total Users" % config['envname'],
"value" : result
})
# Get upgraded user count and monitor count
try:
result = r.table('users').filter({'acttype': 'pro'}).run(conn)
except (RqlDriverError, RqlRuntimeError) as e:
print("Got error while performing query: %s") % e.message
print("Exiting...")
sys.exit(1)
total_up_users = {
'monthly' : 0,
'yearly' : 0,
'total' : 0
}
total_up_mons = {
'monthly' : 0,
'yearly' : 0,
'total' : 0
}
for user in result:
total_up_users['total'] = total_up_users['total'] + 1
total_up_mons['total'] = total_up_mons['total'] + user['subplans']
if "monthly" in user['subscription']:
total_up_users['monthly'] = total_up_users['monthly'] + 1
total_up_mons['monthly'] = total_up_mons['monthly'] + user['subplans']
elif "yearly" in user['subscription']:
total_up_users['yearly'] = total_up_users['yearly'] + 1
total_up_mons['yearly'] = total_up_mons['yearly'] + user['subplans']
msg['data'].append({
'stat' : "[%s] Total Upgraded Users" % config['envname'],
"value" : total_up_users['total']
})
msg['data'].append({
'stat' : "[%s] Total Purchased Monitors" % config['envname'],
"value" : total_up_mons['total']
})
msg['data'].append({
'stat' : "[%s] Total Upgraded Users - Monthly Subscription" % config['envname'],
"value" : total_up_users['monthly']
})
msg['data'].append({
'stat' : "[%s] Total Purchased Monitors - Monthly Subscription" % config['envname'],
"value" : total_up_mons['monthly']
})
msg['data'].append({
'stat' : "[%s] Total Upgraded Users - Yearly Subscription" % config['envname'],
"value" : total_up_users['yearly']
})
msg['data'].append({
'stat' : "[%s] Total Purchased Monitors - Yearly Subscription" % config['envname'],
"value" : total_up_mons['yearly']
})
# Get monitor count
try:
result = r.table('monitors').count().run(conn)
except (RqlDriverError, RqlRuntimeError) as e:
print("Got error while performing query: %s") % e.message
print("Exiting...")
sys.exit(1)
msg['data'].append({
'stat' : "[%s] Total Monitors" % config['envname'],
"value" : result
})
# Get reaction count
try:
result = r.table('reactions').count().run(conn)
except (RqlDriverError, RqlRuntimeError) as e:
print("Got error while performing query: %s") % e.message
print("Exiting...")
sys.exit(1)
msg['data'].append({
'stat' : "[%s] Total Reactions" % config['envname'],
"value" : result
})
pprint.pprint(msg)
payload = json.dumps(msg)
headers = { 'Content-Type': 'application/json' }
req = requests.post(url="http://api.stathat.com/ez", headers=headers, data=payload)
if req.status_code >= 200 and req.status_code <= 299:
print("Successfully sent stats to stathat")
|
from __future__ import absolute_import
import re
from lxml import etree
from datetime import datetime
from datetime import timedelta
class TempestTestcaseList(object):
_FULL_CLASSNAME = re.compile(r'^(\w|\.)*')
_TEST_PARAMETERS = re.compile(r'\[(.*)\]')
_TEMPEST_UUID_RGX = re.compile(r'(\b[0-9a-f]{8}\b-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-\b[0-9a-f]{12}\b)')
_date_time_format = '%Y-%m-%dT%H:%M:%SZ' # the highest degree of accuracy that qtest will accept (no micro seconds)
def __init__(self, test_entry_string):
"""Creates a new Tempest Test Execution
Args:
test_entry_string: (str) A single line from 'tempest run --list-tests'
"""
self._test_entry_string = test_entry_string.strip()
self._xml_element = None
self._time = '0.00'
self._date_time_now = datetime.utcnow() # use same time for all operations
@property
def xml_element(self):
"""Produces xml in the format produced by tempest
property is a caching property
Returns:
etree.Element : an xml testcase element
"""
if self._xml_element is None:
# a dict key = name in xml, value = value if there is one
d = {
'name': self._xml_name,
'classname': self.classname,
'time': self.time
}
# build an etree element
xml = etree.Element('testcase')
for xml_attrib_name, value in list(d.items()):
if value: # only add attribute if there is a value for it
xml.attrib[xml_attrib_name] = value
properties = etree.Element('properties')
properties.append(etree.Element('property', {'name': 'start_time', 'value': self.start_time}))
properties.append(etree.Element('property', {'name': 'end_time', 'value': self.end_time}))
properties.append(etree.Element('property', {'name': 'test_id', 'value': self.idempotent_id}))
properties.append(etree.Element('property', {'name': 'test_step', 'value': 'false'}))
xml.append(properties)
self._xml_element = xml
return self._xml_element
@property
def start_time(self):
"""Gets the start time
Returns:
str: the end date of this test execution
"""
return self._date_time_now.strftime(self._date_time_format)
@property
def end_time(self):
"""Gets the end time
Returns:
str: the end date of this test execution
"""
start = datetime.strptime(self.start_time, self._date_time_format)
# if time is a fraction of a second round up to one second
time = 1 if float(self.time) < 1 else self.time
duration = timedelta(seconds=float(time))
end = start + duration
return end.strftime(self._date_time_format)
@property
def _xml_name(self):
"""The name as it appears in the XML
not to be confused with name which is the name of the test
Returns:
str: the value for name as it would appear in tempest xml
"""
params = list(self.test_tags)
params.insert(0, "id-{}".format(self.idempotent_id))
params = ','.join(params)
return "{name}[{params}]".format(name=self.name, params=params)
@property
def name(self):
"""The name
Returns:
str: The name of the test
None
"""
try:
return self._FULL_CLASSNAME.match(self._test_entry_string).group(0).split('.')[-1]
except AttributeError:
return None
@property
def classname(self):
"""The classname
Returns:
str: The classname of the test
None
"""
try:
return '.'.join(self._FULL_CLASSNAME.match(self._test_entry_string).group(0).split('.')[:-1])
except AttributeError:
return None
@property
def idempotent_id(self):
"""The idempotent ID
Returns:
str: The UUID of the test
None
"""
try:
return self._TEMPEST_UUID_RGX.search(self._test_entry_string).group(0)
except AttributeError:
return None
@property
def time(self):
"""The elapsed time of the test case
Returns:
str: the time in string format
"""
return self._time
@time.setter
def time(self, value):
"""Sets the time property
Args:
value: (str) the time in a string
"""
self._time = value
@property
def test_tags(self):
"""The tags associated with this test
Returns:
list: A list of strings
None
"""
try:
params = self._TEST_PARAMETERS.search(self._test_entry_string).group(1).split(',')
return [param for param in params if self._TEMPEST_UUID_RGX.search(param) is None]
except AttributeError:
return None
@property
def xml_failure_elements(self):
"""The xml child elements with the tag failure
Returns:
list: list of failure elements
"""
return self.xml_element.findall('failure')
@property
def xml_error_elements(self):
"""The xml child elements with the tag error
Returns:
list: list of error elements
"""
return self.xml_element.findall('error')
|
#!/usr/bin/env python
# This plan contains tests that demonstrate failures as well.
"""
This example shows how to display various data modelling techniques and their
associated statistics in Testplan. The models used are:
* linear regression
* classification
* clustering
"""
import os
import sys
from testplan import test_plan
from testplan.testing.multitest import MultiTest
from testplan.testing.multitest.suite import testsuite, testcase
from testplan.report.testing.styles import Style
from testplan.common.utils.timing import Timer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plot
import numpy as np
# Create a Matplotlib scatter plot.
def create_scatter_plot(title, x, y, label, c=None):
plot.scatter(x, y, c=c, label=label)
plot.grid()
plot.xlabel("x")
plot.ylabel("y")
plot.xlim((0, 1))
plot.ylim((-2, 2))
plot.title(title)
# Use the original docstring, formatting
# it using kwargs via string interpolation.
# e.g. `foo: {foo}, bar: {bar}`.format(foo=2, bar=5)` -> 'foo: 2, bar: 5'
def interpolate_docstring(docstring, kwargs):
return docstring.format(**kwargs)
@testsuite
class ModelExamplesSuite(object):
def setup(self, env, result):
"""
Load the raw data from the CSV file.
Log this data as a table in the report.
"""
# Load the raw cosine data from the CSV file.
self.x, self.y = np.loadtxt(
os.path.join(os.path.dirname(__file__), "cos_data.csv"),
delimiter=",",
unpack=True,
skiprows=1,
)
self.x_test = np.linspace(0, 1, 100)
# Log it to display in the report, this will show the first 5 and last 5
# rows if there are more than 10 rows.
data = [["X", "y"]] + [
[self.x[i], self.y[i]] for i in range(len(self.x))
]
result.table.log(data, description="Raw cosine data")
@testcase(
parameters={"degrees": [2, 3, 4, 5, 10, 15]},
docstring_func=interpolate_docstring,
)
def polynomial_regression(self, env, result, degrees):
"""
Create and train a polynomial regression function with {degrees} degrees
of freedom. Check if the Mean Square Error (MSE) and time to train the
model are within their thresholds. Display the train data and the model
on a plot.
"""
# This example was based on
# http://scikit-learn.org/stable/auto_examples/model_selection/plot_underfitting_overfitting.html
# Create the pipeline to train a polynomial regression with varying
# degrees of freedom.
polynomial_features = PolynomialFeatures(
degree=degrees, include_bias=False
)
pipeline = Pipeline(
[
("polynomial_features", polynomial_features),
("linear_regression", LinearRegression()),
]
)
# Train the model and record how long this takes.
timer = Timer()
with timer.record("train_model"):
pipeline.fit(self.x[:, np.newaxis], self.y)
scores = cross_val_score(
pipeline,
self.x[:, np.newaxis],
self.y,
scoring="neg_mean_squared_error",
cv=10,
)
# Check the Mean Square Error (MSE) and time to train the model are
# within their thresholds.
result.less(
-scores.mean(),
0.05,
description="Mean Square Error threshold on test data",
)
result.less(
timer["train_model"].elapsed,
1,
description="How long did the model take to train?",
)
# Display the train data and the model on a plot.
create_scatter_plot(
title="{} degrees of freedom model & Train data".format(degrees),
x=self.x,
y=self.y,
label="Samples",
c="black",
)
y_test = pipeline.predict(self.x_test[:, np.newaxis])
plot.plot(self.x_test, y_test, label="Model")
plot.legend(loc="best")
result.matplot(plot)
# Hard-coding `pdf_path` and 'pdf_style' so that the downloadable example gives
# meaningful and presentable output. NOTE: this programmatic arguments passing
# approach will cause Testplan to ignore any command line arguments related to
# that functionality.
@test_plan(
name="Basic Data Modelling Example",
pdf_path=os.path.join(os.path.dirname(__file__), "report.pdf"),
pdf_style=Style(passing="assertion-detail", failing="assertion-detail"),
)
def main(plan):
"""
Testplan decorated main function to add and execute MultiTests.
:return: Testplan result object.
:rtype: :py:class:`~testplan.base.TestplanResult`
"""
model_examples = MultiTest(
name="Model Examples", suites=[ModelExamplesSuite()]
)
plan.add(model_examples)
if __name__ == "__main__":
sys.exit(not main())
|
<gh_stars>0
from typing import Dict, List, Any, Union
import numpy as np
import pytorch_lightning as pl
import torch
from omegaconf import DictConfig
from utils.text_processing_utils import Embedder
from utils.utils import load_obj
class WSDLightning(pl.LightningModule):
def __init__(self, hparams: Dict[str, float], conf: DictConfig, tag_to_idx: Dict, embedder: Embedder,
num_steps: int = 0):
super().__init__()
self.conf = conf
self.hparams = hparams
self.tag_to_idx = tag_to_idx
self.embedder = embedder
self.num_steps = num_steps
self.model = load_obj(self.conf.model.model_class)(embeddings_dim=self.conf.data.embedding_shape,
tag_to_idx=self.tag_to_idx, **self.conf.model.params)
# if the metric we are using is a class
if self.conf.training.metric.functional is False:
self.metric = load_obj(self.conf.training.metric.metric_class)(**self.conf.training.metric.params)
def forward(self, texts, lengths, *args, **kwargs):
return self.model(texts, lengths)
def configure_optimizers(
self
):
optimizer = load_obj(self.conf.training.optimizer.name)(self.model.parameters(),
**self.conf.training.optimizer.params)
if 'transformers.get_linear_schedule_with_warmup' not in self.conf.training.scheduler.name:
scheduler = load_obj(self.conf.train_setup.scheduler.name)(
optimizer, **self.conf.train_setup.scheduler.params
)
scheduler_dict = {
'scheduler': scheduler,
'interval': self.conf.train_setup.scheduler.step,
'monitor': self.conf.train_setup.scheduler.monitor,
'name': 'scheduler',
}
else:
num_train_steps = self.num_steps * (self.conf.trainer.min_epochs + 7)
num_warm = round(num_train_steps * 0.1)
scheduler = load_obj(self.conf.train_setup.scheduler.name)(
optimizer, num_training_steps=num_train_steps, num_warmup_steps=num_warm
)
scheduler_dict = {'scheduler': scheduler, 'name': 'scheduler'}
return [optimizer], [scheduler_dict]
def training_step(self, batch, *args, **kwargs):
sentences, lengths, tags = batch
embeddings = self.embedder(sentences)
tag_preds, loss, tag_preds_list = self.model(embeddings, lengths, tags)
# if the metric we are using is a function
if self.conf.training.metric.functional:
# Creating flatten tags list for computing score with sklearn
tags = tags.flatten().tolist()
tags_list = [i for i in tags if i != self.tag_to_idx['PAD']]
metric_score = load_obj(self.conf.training.metric.metric_class)(tags_list, tag_preds_list,
**self.conf.training.metric.params)
metric_score = torch.tensor(metric_score)
else:
tags = tags.flatten()
tags = tags[tags != self.tag_to_idx['PAD']]
metric_score = self.metric(tag_preds, tags)
log = {'train_metric': metric_score.item(), 'loss': loss.item()}
# metric to be logged to a progress bar
prog_log = {'train_metric': metric_score.item()}
return {'loss': loss, 'log': log, 'progress_bar': prog_log}
def validation_step(self, batch, *args, **kwargs):
sentences, lengths, tags = batch
embeddings = self.embedder(sentences)
tag_preds, loss, tag_preds_list = self.model(embeddings, lengths, tags)
if self.conf.training.metric.functional:
tags = tags.flatten().tolist()
tags = [i for i in tags if i != self.tag_to_idx['PAD']]
metric_score = load_obj(self.conf.training.metric.metric_class)(tags, tag_preds_list,
**self.conf.training.metric.params)
metric_score = torch.tensor(metric_score)
else:
tags = tags.flatten()
tags = tags[tags != self.tag_to_idx['PAD']]
metric_score = self.metric(tag_preds, tags)
log = {'valid_loss': loss.item()}
return {'valid_loss': loss, 'log': log, 'step_metric': metric_score, 'predicted_list': tag_preds_list,
'predicted_seq': tag_preds, 'true_seq': tags}
def validation_epoch_end(self, outputs: List[Any]) -> Dict:
mean_loss = np.stack([x['valid_loss'] for x in outputs]).mean()
mean_metric = np.stack([x['step_metric'] for x in outputs]).mean()
# Computing values for a metric
if self.conf.training.metric.functional:
true_vals = [x['true_seq'] for x in outputs]
y_true = [list for sublist in true_vals for list in sublist]
pred_vals = [x['predicted_list'] for x in outputs]
y_pred = [list for sublist in pred_vals for list in sublist]
valid_score = load_obj(self.conf.training.metric.metric_class)(y_true, y_pred,
**self.conf.training.metric.params)
valid_score = torch.tensor(valid_score)
else:
y_true = torch.cat([x['true_seq'] for x in outputs])
y_pred = torch.cat([x['predicted_seq'] for x in outputs])
valid_score = self.metric(y_pred.reshape(-1, 1), y_true.reshape(-1, 1))
tensorboard_logs = {'valid_score': valid_score, 'valid_score_mean': mean_metric, 'valid_mean_loss': mean_loss}
return {'validation_loss': mean_loss, 'log': tensorboard_logs, 'progress_bar': tensorboard_logs}
def test_step(self, batch, *args, **kwargs):
sentences, lengths, tags = batch
embeddings = self.embedder(sentences)
tag_preds, loss, tag_preds_list = self.model(embeddings, lengths, tags)
if self.conf.training.metric.functional:
tags = tags.flatten().tolist()
tags = [i for i in tags if i != self.tag_to_idx['PAD']]
metric_score = load_obj(self.conf.training.metric.metric_class)(tags, tag_preds_list,
**self.conf.training.metric.params)
metric_score = torch.tensor(metric_score)
else:
tags = tags.flatten()
tags = tags[tags != self.tag_to_idx['PAD']]
metric_score = self.metric(tag_preds, tags)
log = {'test_loss': loss.item()}
return {'test_loss': loss, 'log': log, 'step_metric_test': metric_score, 'predicted_list': tag_preds_list,
'predicted_seq': tag_preds, 'true_seq': tags}
def test_epoch_end(self, outputs: List[Any]) -> Dict:
mean_loss = np.stack([x['test_loss'] for x in outputs]).mean()
# Computing values for a metric
if self.conf.training.metric.functional:
true_vals = [x['true_seq'] for x in outputs]
y_true = [list for sublist in true_vals for list in sublist]
pred_vals = [x['predicted_list'] for x in outputs]
y_pred = [list for sublist in pred_vals for list in sublist]
test_score = load_obj(self.conf.training.metric.metric_class)(y_true, y_pred,
**self.conf.training.metric.params)
test_score = torch.tensor(test_score)
else:
y_true = torch.cat([x['true_seq'] for x in outputs])
y_pred = torch.cat([x['predicted_seq'] for x in outputs])
test_score = self.metric(y_pred.reshape(-1, 1), y_true.reshape(-1, 1))
# PytorchLightning doesn't like not one-element tensors in the output
y_true = np.array(y_true).astype(int)
y_pred = np.array(y_pred).astype(int)
return {'mean_test_loss': mean_loss, 'test_score': test_score, 'predicted': y_true, 'true': y_pred}
|
<filename>-Telecom-Churn-Prediction-with-Boosting-/code.py
# --------------
import pandas as pd
from sklearn.model_selection import train_test_split
#path - Path of file
# Code starts here
df = pd.read_csv(path)
X = df.drop(columns=['customerID','Churn'])
y = df['Churn']
X_train,X_test,y_train,y_test = train_test_split(X,y, test_size=0.3, random_state=0)
# --------------
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
X_train['TotalCharges'] = X_train['TotalCharges'].replace(' ',np.NaN)
X_test['TotalCharges'] = X_test['TotalCharges'].replace(' ',np.NaN)
X_train['TotalCharges'] = X_train.TotalCharges.astype(float)
X_test['TotalCharges'] = X_test['TotalCharges'] .astype(float)
X_train['TotalCharges'] = X_train['TotalCharges'].fillna(X_train['TotalCharges'].mean())
X_test['TotalCharges']= X_test['TotalCharges'].fillna(X_test['TotalCharges'].mean())
print(X_train.isnull().sum())
cols = X_train.columns
numeric_cols = X_train._get_numeric_data()
cat_cols = list(set(cols)-set(numeric_cols))
le = LabelEncoder()
X_train[cat_cols] = X_train[cat_cols].apply(lambda col : le.fit_transform(col))
X_test[cat_cols] = X_test[cat_cols].apply(lambda col : le.fit_transform(col))
y_train = y_train.replace({'No':0, 'Yes':1})
y_test = y_test.replace({'No':0, 'Yes':1})
# --------------
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
# Code starts here
print(X_train.head(),X_test.head(),y_train.head(),y_test.head())
ada_model = AdaBoostClassifier(random_state=0)
ada_model.fit(X_train,y_train)
y_pred = ada_model.predict(X_test)
ada_score = accuracy_score(y_test,y_pred)
ada_cm = confusion_matrix(y_test,y_pred)
ada_cr = classification_report(y_test,y_pred)
# --------------
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
#Parameter list
parameters={'learning_rate':[0.1,0.15,0.2,0.25,0.3],
'max_depth':range(1,3)}
# Code starts here
xgb_model = XGBClassifier(random_state=0)
xgb_model.fit(X_train,y_train)
y_pred = xgb_model.predict(X_test)
xgb_score = accuracy_score(y_test,y_pred)
xgb_cm = confusion_matrix(y_test,y_pred)
xgb_cr = classification_report(y_test,y_pred)
print(xgb_score,xgb_cm,xgb_cr)
clf_model = GridSearchCV(estimator=xgb_model,param_grid=parameters)
clf_model.fit(X_train,y_train)
y_pred = clf_model.predict(X_test)
clf_score = accuracy_score(y_test,y_pred)
clf_cm = confusion_matrix(y_test,y_pred)
clf_cr = classification_report(y_test,y_pred)
print(clf_score,clf_cm,clf_cr)
|
import numpy as np
aes_sbox = np.array([
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
])
aes_sbox_inv = np.zeros(256, dtype=np.uint8)
for i in range(256):
aes_sbox_inv[aes_sbox[i] & 0xfF] = i
|
<reponame>pulumi/pulumi-alicloud
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['InstanceArgs', 'Instance']
@pulumi.input_type
class InstanceArgs:
def __init__(__self__, *,
instance_type: pulumi.Input[str],
max_tps: pulumi.Input[str],
payment_type: pulumi.Input[str],
queue_capacity: pulumi.Input[str],
support_eip: pulumi.Input[bool],
instance_name: Optional[pulumi.Input[str]] = None,
logistics: Optional[pulumi.Input[str]] = None,
max_eip_tps: Optional[pulumi.Input[str]] = None,
modify_type: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
renewal_duration: Optional[pulumi.Input[int]] = None,
renewal_duration_unit: Optional[pulumi.Input[str]] = None,
renewal_status: Optional[pulumi.Input[str]] = None,
storage_size: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Instance resource.
:param pulumi.Input[str] instance_type: The Instance Type. Valid values: `professional`, `vip`.
:param pulumi.Input[str] max_tps: The peak TPS traffic. The smallest valid value is 1000 and the largest value is 100,000.
:param pulumi.Input[str] payment_type: The payment type. Valid values: `Subscription`.
:param pulumi.Input[str] queue_capacity: The queue capacity. The smallest value is 50 and the step size 5.
:param pulumi.Input[bool] support_eip: Whether to support EIP.
:param pulumi.Input[str] instance_name: The instance name.
:param pulumi.Input[str] max_eip_tps: The max eip tps. It is valid when `support_eip` is true. The valid value is [128, 45000] with the step size 128.
:param pulumi.Input[str] modify_type: The modify type. Valid values: `Downgrade`, `Upgrade`. It is required when updating other attributes.
:param pulumi.Input[int] period: The period. Valid values: `1`, `12`, `2`, `24`, `3`, `6`.
:param pulumi.Input[int] renewal_duration: RenewalDuration. Valid values: `1`, `12`, `2`, `3`, `6`.
:param pulumi.Input[str] renewal_duration_unit: Auto-Renewal Cycle Unit Values Include: Month: Month. Year: Years. Valid values: `Month`, `Year`.
:param pulumi.Input[str] renewal_status: Whether to renew an instance automatically or not. Default to "ManualRenewal".
- `AutoRenewal`: Auto renewal.
- `ManualRenewal`: Manual renewal.
- `NotRenewal`: No renewal any longer. After you specify this value, Alibaba Cloud stop sending notification of instance expiry, and only gives a brief reminder on the third day before the instance expiry.
:param pulumi.Input[str] storage_size: The storage size. It is valid when `instance_type` is vip.
"""
pulumi.set(__self__, "instance_type", instance_type)
pulumi.set(__self__, "max_tps", max_tps)
pulumi.set(__self__, "payment_type", payment_type)
pulumi.set(__self__, "queue_capacity", queue_capacity)
pulumi.set(__self__, "support_eip", support_eip)
if instance_name is not None:
pulumi.set(__self__, "instance_name", instance_name)
if logistics is not None:
pulumi.set(__self__, "logistics", logistics)
if max_eip_tps is not None:
pulumi.set(__self__, "max_eip_tps", max_eip_tps)
if modify_type is not None:
pulumi.set(__self__, "modify_type", modify_type)
if period is not None:
pulumi.set(__self__, "period", period)
if renewal_duration is not None:
pulumi.set(__self__, "renewal_duration", renewal_duration)
if renewal_duration_unit is not None:
pulumi.set(__self__, "renewal_duration_unit", renewal_duration_unit)
if renewal_status is not None:
pulumi.set(__self__, "renewal_status", renewal_status)
if storage_size is not None:
pulumi.set(__self__, "storage_size", storage_size)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> pulumi.Input[str]:
"""
The Instance Type. Valid values: `professional`, `vip`.
"""
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="maxTps")
def max_tps(self) -> pulumi.Input[str]:
"""
The peak TPS traffic. The smallest valid value is 1000 and the largest value is 100,000.
"""
return pulumi.get(self, "max_tps")
@max_tps.setter
def max_tps(self, value: pulumi.Input[str]):
pulumi.set(self, "max_tps", value)
@property
@pulumi.getter(name="paymentType")
def payment_type(self) -> pulumi.Input[str]:
"""
The payment type. Valid values: `Subscription`.
"""
return pulumi.get(self, "payment_type")
@payment_type.setter
def payment_type(self, value: pulumi.Input[str]):
pulumi.set(self, "payment_type", value)
@property
@pulumi.getter(name="queueCapacity")
def queue_capacity(self) -> pulumi.Input[str]:
"""
The queue capacity. The smallest value is 50 and the step size 5.
"""
return pulumi.get(self, "queue_capacity")
@queue_capacity.setter
def queue_capacity(self, value: pulumi.Input[str]):
pulumi.set(self, "queue_capacity", value)
@property
@pulumi.getter(name="supportEip")
def support_eip(self) -> pulumi.Input[bool]:
"""
Whether to support EIP.
"""
return pulumi.get(self, "support_eip")
@support_eip.setter
def support_eip(self, value: pulumi.Input[bool]):
pulumi.set(self, "support_eip", value)
@property
@pulumi.getter(name="instanceName")
def instance_name(self) -> Optional[pulumi.Input[str]]:
"""
The instance name.
"""
return pulumi.get(self, "instance_name")
@instance_name.setter
def instance_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_name", value)
@property
@pulumi.getter
def logistics(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "logistics")
@logistics.setter
def logistics(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logistics", value)
@property
@pulumi.getter(name="maxEipTps")
def max_eip_tps(self) -> Optional[pulumi.Input[str]]:
"""
The max eip tps. It is valid when `support_eip` is true. The valid value is [128, 45000] with the step size 128.
"""
return pulumi.get(self, "max_eip_tps")
@max_eip_tps.setter
def max_eip_tps(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_eip_tps", value)
@property
@pulumi.getter(name="modifyType")
def modify_type(self) -> Optional[pulumi.Input[str]]:
"""
The modify type. Valid values: `Downgrade`, `Upgrade`. It is required when updating other attributes.
"""
return pulumi.get(self, "modify_type")
@modify_type.setter
def modify_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "modify_type", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
"""
The period. Valid values: `1`, `12`, `2`, `24`, `3`, `6`.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter(name="renewalDuration")
def renewal_duration(self) -> Optional[pulumi.Input[int]]:
"""
RenewalDuration. Valid values: `1`, `12`, `2`, `3`, `6`.
"""
return pulumi.get(self, "renewal_duration")
@renewal_duration.setter
def renewal_duration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "renewal_duration", value)
@property
@pulumi.getter(name="renewalDurationUnit")
def renewal_duration_unit(self) -> Optional[pulumi.Input[str]]:
"""
Auto-Renewal Cycle Unit Values Include: Month: Month. Year: Years. Valid values: `Month`, `Year`.
"""
return pulumi.get(self, "renewal_duration_unit")
@renewal_duration_unit.setter
def renewal_duration_unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "renewal_duration_unit", value)
@property
@pulumi.getter(name="renewalStatus")
def renewal_status(self) -> Optional[pulumi.Input[str]]:
"""
Whether to renew an instance automatically or not. Default to "ManualRenewal".
- `AutoRenewal`: Auto renewal.
- `ManualRenewal`: Manual renewal.
- `NotRenewal`: No renewal any longer. After you specify this value, Alibaba Cloud stop sending notification of instance expiry, and only gives a brief reminder on the third day before the instance expiry.
"""
return pulumi.get(self, "renewal_status")
@renewal_status.setter
def renewal_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "renewal_status", value)
@property
@pulumi.getter(name="storageSize")
def storage_size(self) -> Optional[pulumi.Input[str]]:
"""
The storage size. It is valid when `instance_type` is vip.
"""
return pulumi.get(self, "storage_size")
@storage_size.setter
def storage_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_size", value)
@pulumi.input_type
class _InstanceState:
def __init__(__self__, *,
instance_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
logistics: Optional[pulumi.Input[str]] = None,
max_eip_tps: Optional[pulumi.Input[str]] = None,
max_tps: Optional[pulumi.Input[str]] = None,
modify_type: Optional[pulumi.Input[str]] = None,
payment_type: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
queue_capacity: Optional[pulumi.Input[str]] = None,
renewal_duration: Optional[pulumi.Input[int]] = None,
renewal_duration_unit: Optional[pulumi.Input[str]] = None,
renewal_status: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
storage_size: Optional[pulumi.Input[str]] = None,
support_eip: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering Instance resources.
:param pulumi.Input[str] instance_name: The instance name.
:param pulumi.Input[str] instance_type: The Instance Type. Valid values: `professional`, `vip`.
:param pulumi.Input[str] max_eip_tps: The max eip tps. It is valid when `support_eip` is true. The valid value is [128, 45000] with the step size 128.
:param pulumi.Input[str] max_tps: The peak TPS traffic. The smallest valid value is 1000 and the largest value is 100,000.
:param pulumi.Input[str] modify_type: The modify type. Valid values: `Downgrade`, `Upgrade`. It is required when updating other attributes.
:param pulumi.Input[str] payment_type: The payment type. Valid values: `Subscription`.
:param pulumi.Input[int] period: The period. Valid values: `1`, `12`, `2`, `24`, `3`, `6`.
:param pulumi.Input[str] queue_capacity: The queue capacity. The smallest value is 50 and the step size 5.
:param pulumi.Input[int] renewal_duration: RenewalDuration. Valid values: `1`, `12`, `2`, `3`, `6`.
:param pulumi.Input[str] renewal_duration_unit: Auto-Renewal Cycle Unit Values Include: Month: Month. Year: Years. Valid values: `Month`, `Year`.
:param pulumi.Input[str] renewal_status: Whether to renew an instance automatically or not. Default to "ManualRenewal".
- `AutoRenewal`: Auto renewal.
- `ManualRenewal`: Manual renewal.
- `NotRenewal`: No renewal any longer. After you specify this value, Alibaba Cloud stop sending notification of instance expiry, and only gives a brief reminder on the third day before the instance expiry.
:param pulumi.Input[str] status: The status of the resource.
:param pulumi.Input[str] storage_size: The storage size. It is valid when `instance_type` is vip.
:param pulumi.Input[bool] support_eip: Whether to support EIP.
"""
if instance_name is not None:
pulumi.set(__self__, "instance_name", instance_name)
if instance_type is not None:
pulumi.set(__self__, "instance_type", instance_type)
if logistics is not None:
pulumi.set(__self__, "logistics", logistics)
if max_eip_tps is not None:
pulumi.set(__self__, "max_eip_tps", max_eip_tps)
if max_tps is not None:
pulumi.set(__self__, "max_tps", max_tps)
if modify_type is not None:
pulumi.set(__self__, "modify_type", modify_type)
if payment_type is not None:
pulumi.set(__self__, "payment_type", payment_type)
if period is not None:
pulumi.set(__self__, "period", period)
if queue_capacity is not None:
pulumi.set(__self__, "queue_capacity", queue_capacity)
if renewal_duration is not None:
pulumi.set(__self__, "renewal_duration", renewal_duration)
if renewal_duration_unit is not None:
pulumi.set(__self__, "renewal_duration_unit", renewal_duration_unit)
if renewal_status is not None:
pulumi.set(__self__, "renewal_status", renewal_status)
if status is not None:
pulumi.set(__self__, "status", status)
if storage_size is not None:
pulumi.set(__self__, "storage_size", storage_size)
if support_eip is not None:
pulumi.set(__self__, "support_eip", support_eip)
@property
@pulumi.getter(name="instanceName")
def instance_name(self) -> Optional[pulumi.Input[str]]:
"""
The instance name.
"""
return pulumi.get(self, "instance_name")
@instance_name.setter
def instance_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_name", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> Optional[pulumi.Input[str]]:
"""
The Instance Type. Valid values: `professional`, `vip`.
"""
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter
def logistics(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "logistics")
@logistics.setter
def logistics(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logistics", value)
@property
@pulumi.getter(name="maxEipTps")
def max_eip_tps(self) -> Optional[pulumi.Input[str]]:
"""
The max eip tps. It is valid when `support_eip` is true. The valid value is [128, 45000] with the step size 128.
"""
return pulumi.get(self, "max_eip_tps")
@max_eip_tps.setter
def max_eip_tps(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_eip_tps", value)
@property
@pulumi.getter(name="maxTps")
def max_tps(self) -> Optional[pulumi.Input[str]]:
"""
The peak TPS traffic. The smallest valid value is 1000 and the largest value is 100,000.
"""
return pulumi.get(self, "max_tps")
@max_tps.setter
def max_tps(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_tps", value)
@property
@pulumi.getter(name="modifyType")
def modify_type(self) -> Optional[pulumi.Input[str]]:
"""
The modify type. Valid values: `Downgrade`, `Upgrade`. It is required when updating other attributes.
"""
return pulumi.get(self, "modify_type")
@modify_type.setter
def modify_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "modify_type", value)
@property
@pulumi.getter(name="paymentType")
def payment_type(self) -> Optional[pulumi.Input[str]]:
"""
The payment type. Valid values: `Subscription`.
"""
return pulumi.get(self, "payment_type")
@payment_type.setter
def payment_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "payment_type", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
"""
The period. Valid values: `1`, `12`, `2`, `24`, `3`, `6`.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter(name="queueCapacity")
def queue_capacity(self) -> Optional[pulumi.Input[str]]:
"""
The queue capacity. The smallest value is 50 and the step size 5.
"""
return pulumi.get(self, "queue_capacity")
@queue_capacity.setter
def queue_capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "queue_capacity", value)
@property
@pulumi.getter(name="renewalDuration")
def renewal_duration(self) -> Optional[pulumi.Input[int]]:
"""
RenewalDuration. Valid values: `1`, `12`, `2`, `3`, `6`.
"""
return pulumi.get(self, "renewal_duration")
@renewal_duration.setter
def renewal_duration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "renewal_duration", value)
@property
@pulumi.getter(name="renewalDurationUnit")
def renewal_duration_unit(self) -> Optional[pulumi.Input[str]]:
"""
Auto-Renewal Cycle Unit Values Include: Month: Month. Year: Years. Valid values: `Month`, `Year`.
"""
return pulumi.get(self, "renewal_duration_unit")
@renewal_duration_unit.setter
def renewal_duration_unit(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "renewal_duration_unit", value)
@property
@pulumi.getter(name="renewalStatus")
def renewal_status(self) -> Optional[pulumi.Input[str]]:
"""
Whether to renew an instance automatically or not. Default to "ManualRenewal".
- `AutoRenewal`: Auto renewal.
- `ManualRenewal`: Manual renewal.
- `NotRenewal`: No renewal any longer. After you specify this value, Alibaba Cloud stop sending notification of instance expiry, and only gives a brief reminder on the third day before the instance expiry.
"""
return pulumi.get(self, "renewal_status")
@renewal_status.setter
def renewal_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "renewal_status", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the resource.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="storageSize")
def storage_size(self) -> Optional[pulumi.Input[str]]:
"""
The storage size. It is valid when `instance_type` is vip.
"""
return pulumi.get(self, "storage_size")
@storage_size.setter
def storage_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_size", value)
@property
@pulumi.getter(name="supportEip")
def support_eip(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to support EIP.
"""
return pulumi.get(self, "support_eip")
@support_eip.setter
def support_eip(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "support_eip", value)
class Instance(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
instance_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
logistics: Optional[pulumi.Input[str]] = None,
max_eip_tps: Optional[pulumi.Input[str]] = None,
max_tps: Optional[pulumi.Input[str]] = None,
modify_type: Optional[pulumi.Input[str]] = None,
payment_type: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
queue_capacity: Optional[pulumi.Input[str]] = None,
renewal_duration: Optional[pulumi.Input[int]] = None,
renewal_duration_unit: Optional[pulumi.Input[str]] = None,
renewal_status: Optional[pulumi.Input[str]] = None,
storage_size: Optional[pulumi.Input[str]] = None,
support_eip: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Provides a RabbitMQ (AMQP) Instance resource.
For information about RabbitMQ (AMQP) Instance and how to use it, see [What is Instance](https://www.alibabacloud.com/help/doc-detail/101631.htm).
> **NOTE:** Available in v1.128.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
professional = alicloud.amqp.Instance("professional",
instance_type="professional",
max_eip_tps="128",
max_tps="1000",
payment_type="Subscription",
period=1,
queue_capacity="50",
support_eip=True)
vip = alicloud.amqp.Instance("vip",
instance_type="vip",
max_eip_tps="128",
max_tps="5000",
payment_type="Subscription",
period=1,
queue_capacity="50",
storage_size="700",
support_eip=True)
```
## Import
RabbitMQ (AMQP) Instance can be imported using the id, e.g.
```sh
$ pulumi import alicloud:amqp/instance:Instance example <id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] instance_name: The instance name.
:param pulumi.Input[str] instance_type: The Instance Type. Valid values: `professional`, `vip`.
:param pulumi.Input[str] max_eip_tps: The max eip tps. It is valid when `support_eip` is true. The valid value is [128, 45000] with the step size 128.
:param pulumi.Input[str] max_tps: The peak TPS traffic. The smallest valid value is 1000 and the largest value is 100,000.
:param pulumi.Input[str] modify_type: The modify type. Valid values: `Downgrade`, `Upgrade`. It is required when updating other attributes.
:param pulumi.Input[str] payment_type: The payment type. Valid values: `Subscription`.
:param pulumi.Input[int] period: The period. Valid values: `1`, `12`, `2`, `24`, `3`, `6`.
:param pulumi.Input[str] queue_capacity: The queue capacity. The smallest value is 50 and the step size 5.
:param pulumi.Input[int] renewal_duration: RenewalDuration. Valid values: `1`, `12`, `2`, `3`, `6`.
:param pulumi.Input[str] renewal_duration_unit: Auto-Renewal Cycle Unit Values Include: Month: Month. Year: Years. Valid values: `Month`, `Year`.
:param pulumi.Input[str] renewal_status: Whether to renew an instance automatically or not. Default to "ManualRenewal".
- `AutoRenewal`: Auto renewal.
- `ManualRenewal`: Manual renewal.
- `NotRenewal`: No renewal any longer. After you specify this value, Alibaba Cloud stop sending notification of instance expiry, and only gives a brief reminder on the third day before the instance expiry.
:param pulumi.Input[str] storage_size: The storage size. It is valid when `instance_type` is vip.
:param pulumi.Input[bool] support_eip: Whether to support EIP.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: InstanceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a RabbitMQ (AMQP) Instance resource.
For information about RabbitMQ (AMQP) Instance and how to use it, see [What is Instance](https://www.alibabacloud.com/help/doc-detail/101631.htm).
> **NOTE:** Available in v1.128.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
professional = alicloud.amqp.Instance("professional",
instance_type="professional",
max_eip_tps="128",
max_tps="1000",
payment_type="Subscription",
period=1,
queue_capacity="50",
support_eip=True)
vip = alicloud.amqp.Instance("vip",
instance_type="vip",
max_eip_tps="128",
max_tps="5000",
payment_type="Subscription",
period=1,
queue_capacity="50",
storage_size="700",
support_eip=True)
```
## Import
RabbitMQ (AMQP) Instance can be imported using the id, e.g.
```sh
$ pulumi import alicloud:amqp/instance:Instance example <id>
```
:param str resource_name: The name of the resource.
:param InstanceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(InstanceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
instance_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
logistics: Optional[pulumi.Input[str]] = None,
max_eip_tps: Optional[pulumi.Input[str]] = None,
max_tps: Optional[pulumi.Input[str]] = None,
modify_type: Optional[pulumi.Input[str]] = None,
payment_type: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
queue_capacity: Optional[pulumi.Input[str]] = None,
renewal_duration: Optional[pulumi.Input[int]] = None,
renewal_duration_unit: Optional[pulumi.Input[str]] = None,
renewal_status: Optional[pulumi.Input[str]] = None,
storage_size: Optional[pulumi.Input[str]] = None,
support_eip: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = InstanceArgs.__new__(InstanceArgs)
__props__.__dict__["instance_name"] = instance_name
if instance_type is None and not opts.urn:
raise TypeError("Missing required property 'instance_type'")
__props__.__dict__["instance_type"] = instance_type
__props__.__dict__["logistics"] = logistics
__props__.__dict__["max_eip_tps"] = max_eip_tps
if max_tps is None and not opts.urn:
raise TypeError("Missing required property 'max_tps'")
__props__.__dict__["max_tps"] = max_tps
__props__.__dict__["modify_type"] = modify_type
if payment_type is None and not opts.urn:
raise TypeError("Missing required property 'payment_type'")
__props__.__dict__["payment_type"] = payment_type
__props__.__dict__["period"] = period
if queue_capacity is None and not opts.urn:
raise TypeError("Missing required property 'queue_capacity'")
__props__.__dict__["queue_capacity"] = queue_capacity
__props__.__dict__["renewal_duration"] = renewal_duration
__props__.__dict__["renewal_duration_unit"] = renewal_duration_unit
__props__.__dict__["renewal_status"] = renewal_status
__props__.__dict__["storage_size"] = storage_size
if support_eip is None and not opts.urn:
raise TypeError("Missing required property 'support_eip'")
__props__.__dict__["support_eip"] = support_eip
__props__.__dict__["status"] = None
super(Instance, __self__).__init__(
'alicloud:amqp/instance:Instance',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
instance_name: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
logistics: Optional[pulumi.Input[str]] = None,
max_eip_tps: Optional[pulumi.Input[str]] = None,
max_tps: Optional[pulumi.Input[str]] = None,
modify_type: Optional[pulumi.Input[str]] = None,
payment_type: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
queue_capacity: Optional[pulumi.Input[str]] = None,
renewal_duration: Optional[pulumi.Input[int]] = None,
renewal_duration_unit: Optional[pulumi.Input[str]] = None,
renewal_status: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
storage_size: Optional[pulumi.Input[str]] = None,
support_eip: Optional[pulumi.Input[bool]] = None) -> 'Instance':
"""
Get an existing Instance resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] instance_name: The instance name.
:param pulumi.Input[str] instance_type: The Instance Type. Valid values: `professional`, `vip`.
:param pulumi.Input[str] max_eip_tps: The max eip tps. It is valid when `support_eip` is true. The valid value is [128, 45000] with the step size 128.
:param pulumi.Input[str] max_tps: The peak TPS traffic. The smallest valid value is 1000 and the largest value is 100,000.
:param pulumi.Input[str] modify_type: The modify type. Valid values: `Downgrade`, `Upgrade`. It is required when updating other attributes.
:param pulumi.Input[str] payment_type: The payment type. Valid values: `Subscription`.
:param pulumi.Input[int] period: The period. Valid values: `1`, `12`, `2`, `24`, `3`, `6`.
:param pulumi.Input[str] queue_capacity: The queue capacity. The smallest value is 50 and the step size 5.
:param pulumi.Input[int] renewal_duration: RenewalDuration. Valid values: `1`, `12`, `2`, `3`, `6`.
:param pulumi.Input[str] renewal_duration_unit: Auto-Renewal Cycle Unit Values Include: Month: Month. Year: Years. Valid values: `Month`, `Year`.
:param pulumi.Input[str] renewal_status: Whether to renew an instance automatically or not. Default to "ManualRenewal".
- `AutoRenewal`: Auto renewal.
- `ManualRenewal`: Manual renewal.
- `NotRenewal`: No renewal any longer. After you specify this value, Alibaba Cloud stop sending notification of instance expiry, and only gives a brief reminder on the third day before the instance expiry.
:param pulumi.Input[str] status: The status of the resource.
:param pulumi.Input[str] storage_size: The storage size. It is valid when `instance_type` is vip.
:param pulumi.Input[bool] support_eip: Whether to support EIP.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _InstanceState.__new__(_InstanceState)
__props__.__dict__["instance_name"] = instance_name
__props__.__dict__["instance_type"] = instance_type
__props__.__dict__["logistics"] = logistics
__props__.__dict__["max_eip_tps"] = max_eip_tps
__props__.__dict__["max_tps"] = max_tps
__props__.__dict__["modify_type"] = modify_type
__props__.__dict__["payment_type"] = payment_type
__props__.__dict__["period"] = period
__props__.__dict__["queue_capacity"] = queue_capacity
__props__.__dict__["renewal_duration"] = renewal_duration
__props__.__dict__["renewal_duration_unit"] = renewal_duration_unit
__props__.__dict__["renewal_status"] = renewal_status
__props__.__dict__["status"] = status
__props__.__dict__["storage_size"] = storage_size
__props__.__dict__["support_eip"] = support_eip
return Instance(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="instanceName")
def instance_name(self) -> pulumi.Output[Optional[str]]:
"""
The instance name.
"""
return pulumi.get(self, "instance_name")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> pulumi.Output[str]:
"""
The Instance Type. Valid values: `professional`, `vip`.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter
def logistics(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "logistics")
@property
@pulumi.getter(name="maxEipTps")
def max_eip_tps(self) -> pulumi.Output[Optional[str]]:
"""
The max eip tps. It is valid when `support_eip` is true. The valid value is [128, 45000] with the step size 128.
"""
return pulumi.get(self, "max_eip_tps")
@property
@pulumi.getter(name="maxTps")
def max_tps(self) -> pulumi.Output[str]:
"""
The peak TPS traffic. The smallest valid value is 1000 and the largest value is 100,000.
"""
return pulumi.get(self, "max_tps")
@property
@pulumi.getter(name="modifyType")
def modify_type(self) -> pulumi.Output[Optional[str]]:
"""
The modify type. Valid values: `Downgrade`, `Upgrade`. It is required when updating other attributes.
"""
return pulumi.get(self, "modify_type")
@property
@pulumi.getter(name="paymentType")
def payment_type(self) -> pulumi.Output[str]:
"""
The payment type. Valid values: `Subscription`.
"""
return pulumi.get(self, "payment_type")
@property
@pulumi.getter
def period(self) -> pulumi.Output[Optional[int]]:
"""
The period. Valid values: `1`, `12`, `2`, `24`, `3`, `6`.
"""
return pulumi.get(self, "period")
@property
@pulumi.getter(name="queueCapacity")
def queue_capacity(self) -> pulumi.Output[str]:
"""
The queue capacity. The smallest value is 50 and the step size 5.
"""
return pulumi.get(self, "queue_capacity")
@property
@pulumi.getter(name="renewalDuration")
def renewal_duration(self) -> pulumi.Output[Optional[int]]:
"""
RenewalDuration. Valid values: `1`, `12`, `2`, `3`, `6`.
"""
return pulumi.get(self, "renewal_duration")
@property
@pulumi.getter(name="renewalDurationUnit")
def renewal_duration_unit(self) -> pulumi.Output[Optional[str]]:
"""
Auto-Renewal Cycle Unit Values Include: Month: Month. Year: Years. Valid values: `Month`, `Year`.
"""
return pulumi.get(self, "renewal_duration_unit")
@property
@pulumi.getter(name="renewalStatus")
def renewal_status(self) -> pulumi.Output[str]:
"""
Whether to renew an instance automatically or not. Default to "ManualRenewal".
- `AutoRenewal`: Auto renewal.
- `ManualRenewal`: Manual renewal.
- `NotRenewal`: No renewal any longer. After you specify this value, Alibaba Cloud stop sending notification of instance expiry, and only gives a brief reminder on the third day before the instance expiry.
"""
return pulumi.get(self, "renewal_status")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of the resource.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="storageSize")
def storage_size(self) -> pulumi.Output[Optional[str]]:
"""
The storage size. It is valid when `instance_type` is vip.
"""
return pulumi.get(self, "storage_size")
@property
@pulumi.getter(name="supportEip")
def support_eip(self) -> pulumi.Output[bool]:
"""
Whether to support EIP.
"""
return pulumi.get(self, "support_eip")
|
<gh_stars>1-10
'''
Writes result into the file
Author: <NAME>
'''
import os
import logging
import numpy as np
import torchtext
from torchtext import data
from torchtext import vocab
import torch
import torch.nn as nn
from tqdm import tqdm, tqdm_notebook, tnrange
tqdm.pandas(desc='Progress')
import utility.conlleval_perl as e
from sklearn.metrics import precision_recall_fscore_support, roc_auc_score, roc_curve, auc
from sklearn.preprocessing import label_binarize
class Evaluator():
def __init__(self, config, logger, model, dataloader, model_name):
self.config = config
self.logger = logger
self.model = model
self.model_name = model_name
self.dataloader = dataloader
self.train_dl, self.val_dl, self.test_dl = dataloader.load_data(batch_size=config.batch_size, shuffle=False)
self.results_dir = config.results_dir
ts_file = self.model_name+'_test.txt'
self.test_file = os.path.join(self.results_dir, ts_file)
self.average = config.average
def numpy_to_sent(self, tensor):
'''
Returns the corresponding TEXT of given Predictions
Returns chunks of string
'''
return ' '.join([self.dataloader.txt_field.vocab.itos[i] for i in tensor.cpu().data.numpy()[0]]).split()
def numpy_to_at(self, tensor):
'''
Returns the corresponding ASPECT TERM of given Predictions
Returns chunks of string
'''
return ' '.join([self.dataloader.at_field.vocab.itos[i] for i in tensor.cpu().data.numpy()[0]]).split()
def numpy_to_ac(self, tensor):
'''
Returns the corresponding ASPECT TERM of given Predictions
Returns chunks of string
'''
return ' '.join([self.dataloader.ac_field.vocab.itos[i] for i in tensor])
def pred_to_tag(self, predictions):
'''
Returns the corresponding LABEL of given Predictions
Returns chunks of string
'''
if self.config.train_type == 3 or self.config.train_type == 4:
return ' '.join([self.dataloader.ss_field.vocab.itos[i] for i in predictions])
else:
return ' '.join([self.dataloader.ac_field.vocab.itos[i] for i in predictions])
def write_results(self):
"""
Writes the result into the file
"""
self.model.eval()
with torch.no_grad() and open(self.test_file, 'w', encoding='utf-8') as rtst:
self.logger.info('Writing in file: {0}'.format(self.test_file))
tt = tqdm(iter(self.test_dl), leave=False)
for ((y, ac, at, X), v) in tt:
pred = self.model(X, at, ac)
for i in range(X.shape[0]):
txt = X[i].unsqueeze(0)
aterm = at[i].unsqueeze(0)
acat = ac[i].unsqueeze(0)
gold = y[i].unsqueeze(0)
predicted = pred[i].unsqueeze(0)
sent = self.numpy_to_sent(txt)
sent = ' '.join(sent)
aspect_cat = self.numpy_to_ac(acat)
aspect = self.numpy_to_at(aterm)
aspect = ' '.join(aspect)
y_true_val = gold.squeeze(1).data.cpu().numpy()
true_tag = self.pred_to_tag(y_true_val)
y_pred_val = predicted.argmax(dim = 1, keepdim = True).squeeze(1).data.cpu().numpy()
pred_tag = self.pred_to_tag(y_pred_val)
rtst.write(sent+'\t'+aspect+'\t'+aspect_cat+'\t'+true_tag+'\t'+pred_tag+'\n')
rtst.write('\n')
rtst.close()
def infer(self, sent, aspect_term, aspect_cat):
"""
Prints the result
"""
# Tokenize the sentence and aspect terms
sent_tok = self.dataloader.tokenizer(sent)
at_tok = self.dataloader.tokenizer(aspect_term)
# Get index from vocab
X = [self.dataloader.txt_field.vocab.stoi[t] for t in sent_tok]
at = [self.dataloader.at_field.vocab.stoi[t] for t in at_tok]
ac = [self.dataloader.ac_field.vocab.stoi[aspect_cat]]
# Convert into torch and reshape into [batch, sent_length]
X = torch.LongTensor(X).to(self.config.device)
X = X.unsqueeze(0)
at = torch.LongTensor(at).to(self.config.device)
at = at.unsqueeze(0)
ac = torch.LongTensor(ac).to(self.config.device)
ac = ac.unsqueeze(0)
# Get predictions
pred = self.model(X, at, ac)
pred_idx = pred.argmax(dim = 1)
y_pred_val = pred_idx.cpu().data.numpy()
pred_tag = self.pred_to_tag(y_pred_val)
return pred_tag
def prec_rec_f1(self, gold_list, pred_list):
"""
Calculates the precision, recall, f1 score
:param gold_list: gold labels
:param pred_list: predicted labels
:return: precision, recall, f1 score
"""
prec, rec, f1, _ = precision_recall_fscore_support(gold_list, pred_list, average=self.average)
gold_list = np.array(gold_list)
pred_list = np.array(pred_list)
n_values = np.max(gold_list) + 1
# create one hot encoding for auc calculation
gold_list = np.eye(n_values)[gold_list]
pred_list = np.eye(n_values)[pred_list]
auc = roc_auc_score(gold_list, pred_list, average=self.average)
return prec, rec, f1, auc
|
from .constants import Utf8
from .attributes import Attribute
# From: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html
##########################################################################
# 4.6. Methods
##########################################################################
# Each method, including each instance initialization method (§2.9) and the
# class or interface initialization method (§2.9), is described by a method_info
# structure. No two methods in one class file may have the same name and
# descriptor (§4.3.3).
class Method:
# u2 access_flags;
# u2 name_index;
# u2 descriptor_index;
# u2 attributes_count;
# attribute_info attributes[attributes_count];
# Table 4.5. Method access and property flags
ACC_PUBLIC = 0x0001 # Declared public; may be accessed from outside its package.
ACC_PRIVATE = 0x0002 # Declared private; accessible only within the defining class.
ACC_PROTECTED = 0x0004 # Declared protected; may be accessed within subclasses.
ACC_STATIC = 0x0008 # Declared static.
ACC_FINAL = 0x0010 # Declared final; must not be overridden (§5.4.5).
ACC_SYNCHRONIZED = 0x0020 # Declared synchronized; invocation is wrapped by a monitor use.
ACC_BRIDGE = 0x0040 # A bridge method, generated by the compiler.
ACC_VARARGS = 0x0080 # Declared with variable number of arguments.
ACC_NATIVE = 0x0100 # Declared native; implemented in a language other than Java.
ACC_ABSTRACT = 0x0400 # Declared abstract; no implementation is provided.
ACC_STRICT = 0x0800 # Declared strictfp; floating-point mode is FP-strict.
ACC_SYNTHETIC = 0x1000 # Declared synthetic; not present in the source code.
def __init__(
self, name, descriptor,
public=True, private=False, protected=False, static=False,
final=False, synchronized=False, bridge=False, varargs=False,
native=False, abstract=False, strict=False, synthetic=False,
attributes=None
):
# The value of the name_index item must be a valid index into the
# constant_pool table. The constant_pool entry at that index must be a
# CONSTANT_Utf8_info (§4.4.7) structure representing either one of the
# special method names (§2.9) <init> or <clinit>, or a valid unqualified
# name (§4.2.2) denoting a method.
self.name = Utf8(name)
# The value of the descriptor_index item must be a valid index into the
# constant_pool table. The constant_pool entry at that index must be a
# CONSTANT_Utf8_info (§4.4.7) structure representing a valid method
# descriptor (§4.3.3).
# A future edition of this specification may require that the last
# parameter descriptor of the method descriptor is an array type if the
# ACC_VARARGS flag is set in the access_flags item.
self.descriptor = Utf8(descriptor)
# The ACC_VARARGS flag indicates that this method takes a variable
# number of arguments at the source code level. A method declared to
# take a variable number of arguments must be compiled with the
# ACC_VARARGS flag set to 1. All other methods must be compiled with the
# ACC_VARARGS flag set to 0.
# The ACC_BRIDGE flag is used to indicate a bridge method generated by a
# Java compiler.
# A method may be marked with the ACC_SYNTHETIC flag to indicate that it
# was generated by a compiler and does not appear in source code, unless
# it is one of the methods named in §4.7.8.
# Methods of classes may set any of the flags in Table 4.5. However, a
# specific method of a class may have at most one of its ACC_PRIVATE,
# ACC_PROTECTED and ACC_PUBLIC flags set (JLS §8.4.3). If a specific
# method has its ACC_ABSTRACT flag set, it must not have any of its
# ACC_FINAL, ACC_NATIVE, ACC_PRIVATE, ACC_STATIC, ACC_STRICT or
# ACC_SYNCHRONIZED flags set (JLS §8.4.3.1, JLS §8.4.3.3, JLS §8.4.3.4).
# All interface methods must have their ACC_ABSTRACT and ACC_PUBLIC
# flags set; they may have their ACC_VARARGS, ACC_BRIDGE and
# ACC_SYNTHETIC flags set and must not have any of the other flags in
# Table 4.5 set (JLS §9.4).
self.private = private
if self.private:
self.protected = False
self.public = False
else:
self.protected = protected
if self.protected:
self.public = False
else:
self.public = True
self.static = static
self.final = final
self.synchronized = synchronized
self.bridge = bridge
self.varargs = varargs
self.native = native
self.abstract = abstract
self.strict = strict
self.synthetic = synthetic
# Each value of the attributes table must be an attribute structure
# (§4.7). A method can have any number of optional attributes associated
# with it.
# The attributes defined by this specification as appearing in the
# attributes table of a method_info structure are the Code (§4.7.3),
# Exceptions (§4.7.5), Synthetic (§4.7.8), Signature (§4.7.9),
# Deprecated (§4.7.15), RuntimeVisibleAnnotations (§4.7.16),
# RuntimeInvisibleAnnotations (§4.7.17),
# RuntimeVisibleParameterAnnotations (§4.7.18),
# RuntimeInvisibleParameterAnnotations (§4.7.19), and AnnotationDefault
# (§4.7.20) attributes.
# A Java Virtual Machine implementation must recognize and correctly
# read Code (§4.7.3) and Exceptions (§4.7.5) attributes found in the
# attributes table of a method_info structure. If a Java Virtual Machine
# implementation recognizes class files whose version number is 49.0 or
# above, it must recognize and correctly read Signature (§4.7.9),
# RuntimeVisibleAnnotations (§4.7.16), RuntimeInvisibleAnnotations
# (§4.7.17), RuntimeVisibleParameterAnnotations (§4.7.18),
# RuntimeInvisibleParameterAnnotations (§4.7.19) and AnnotationDefault
# (§4.7.20) attributes found in the attributes table of a method_info
# structure of a class file whose version number is 49.0 or above.
# A Java Virtual Machine implementation is required to silently ignore
# any or all attributes in the attributes table of a method_info
# structure that it does not recognize. Attributes not defined in this
# specification are not allowed to affect the semantics of the class
# file, but only to provide additional descriptive information (§4.7.1).
self.attributes = attributes if attributes else []
def __repr__(self):
return '<Method access:0x%04x name:%s, descriptor:%s>' % (self.access_flags, self.name, self.descriptor)
@staticmethod
def read(reader, dump=None):
access_flags = reader.read_u2()
name = reader.constant_pool[reader.read_u2()].bytes.decode('mutf-8')
descriptor = reader.constant_pool[reader.read_u2()].bytes.decode('mutf-8')
attributes_count = reader.read_u2()
if dump is not None:
reader.debug(" " * dump, 'Method %s %s' % (name, descriptor))
access_description = ', '.join(f for f in [
flag if access_flags & mask else None
for flag, mask in [
('public', Method.ACC_PUBLIC),
('private', Method.ACC_PRIVATE),
('protected', Method.ACC_PROTECTED),
('static', Method.ACC_STATIC),
('final', Method.ACC_FINAL),
('synchronized', Method.ACC_SYNCHRONIZED),
('bridge', Method.ACC_BRIDGE),
('varargs', Method.ACC_VARARGS),
('native', Method.ACC_NATIVE),
('abstract', Method.ACC_ABSTRACT),
('strict', Method.ACC_STRICT),
('synthetic', Method.ACC_SYNTHETIC),
]
] if f)
reader.debug(" " * dump, ' Flags: 0x%04x%s' % (access_flags, ' (%s)') % (access_description if access_description else ''))
reader.debug(" " * dump, ' Attributes: (%s)' % attributes_count)
attributes = []
for i in range(0, attributes_count):
attributes.append(
Attribute.read(reader, dump=dump + 2 if dump is not None else dump)
)
return Method(
name=name,
descriptor=descriptor,
public=bool(access_flags & Method.ACC_PUBLIC),
private=bool(access_flags & Method.ACC_PRIVATE),
protected=bool(access_flags & Method.ACC_PROTECTED),
static=bool(access_flags & Method.ACC_STATIC),
final=bool(access_flags & Method.ACC_FINAL),
synchronized=bool(access_flags & Method.ACC_SYNCHRONIZED),
bridge=bool(access_flags & Method.ACC_BRIDGE),
varargs=bool(access_flags & Method.ACC_VARARGS),
native=bool(access_flags & Method.ACC_NATIVE),
abstract=bool(access_flags & Method.ACC_ABSTRACT),
strict=bool(access_flags & Method.ACC_STRICT),
synthetic=bool(access_flags & Method.ACC_SYNTHETIC),
attributes=attributes,
)
def write(self, writer):
writer.write_u2(self.access_flags)
writer.write_u2(writer.constant_pool.index(self.name))
writer.write_u2(writer.constant_pool.index(self.descriptor))
writer.write_u2(self.attributes_count)
for attribute in self.attributes:
attribute.write(writer)
def resolve(self, constant_pool):
constant_pool.add(self.name)
constant_pool.add(self.descriptor)
for attribute in self.attributes:
attribute.resolve(constant_pool)
@property
def attributes_count(self):
return len(self.attributes)
@property
def access_flags(self):
"""A specific instance initialization method (§2.9) may have at most one
of its ACC_PRIVATE, ACC_PROTECTED, and ACC_PUBLIC flags set, and may
also have its ACC_STRICT, ACC_VARARGS and ACC_SYNTHETIC flags set, but
must not have any of the other flags in Table 4.5 set.
Class and interface initialization methods (§2.9) are called
implicitly by the Java Virtual Machine. The value of their
access_flags item is ignored except for the setting of the ACC_STRICT
flag.
All bits of the access_flags item not assigned in Table 4.5 are
reserved for future use. They should be set to zero in generated class
files and should be ignored by Java Virtual Machine implementations.
"""
return (
(self.ACC_PUBLIC if self.public else 0) |
(self.ACC_PRIVATE if self.private else 0) |
(self.ACC_PROTECTED if self.protected else 0) |
(self.ACC_STATIC if self.static else 0) |
(self.ACC_FINAL if self.final else 0) |
(self.ACC_SYNCHRONIZED if self.synchronized else 0) |
(self.ACC_BRIDGE if self.bridge else 0) |
(self.ACC_VARARGS if self.varargs else 0) |
(self.ACC_NATIVE if self.native else 0) |
(self.ACC_ABSTRACT if self.abstract else 0) |
(self.ACC_STRICT if self.strict else 0) |
(self.ACC_SYNTHETIC if self.synthetic else 0)
)
|
<gh_stars>10-100
import random
from typing import Type, Union
import habitat
from habitat import Config, Env, RLEnv, VectorEnv, make_dataset
from habitat_baselines.common.env_utils import make_env_fn
from habitat.core.logging import logger
from robo_vln_baselines.common.environments import VLNCEDaggerEnv
class SimpleRLEnv(habitat.RLEnv):
def get_reward_range(self):
return [-1, 1]
def get_reward(self, observations):
return 0
def get_done(self, observations):
return self.habitat_env.episode_over
def get_info(self, observations):
return self.habitat_env.get_metrics()
def construct_env(
config: Config
) -> Env:
r"""Create VectorEnv object with specified config and env class type.
To allow better performance, dataset are split into small ones for
each individual env, grouped by scenes.
Args:
config: configs that contain num_processes as well as information
necessary to create individual environments.
env_class: class type of the envs to be created.
auto_reset_done: Whether or not to automatically reset the env on done
Returns:
VectorEnv object created according to specification.
"""
num_processes = config.NUM_PROCESSES
configs = []
dataset = make_dataset(config.TASK_CONFIG.DATASET.TYPE)
scenes = dataset.get_scenes_to_load(config.TASK_CONFIG.DATASET)
if num_processes > 1:
if len(scenes) == 0:
raise RuntimeError(
"No scenes to load, multiple process logic relies on being able to split scenes uniquely between processes"
)
if len(scenes) < num_processes:
raise RuntimeError(
"reduce the number of processes as there "
"aren't enough number of scenes"
)
random.shuffle(scenes)
scene_splits = [[] for _ in range(num_processes)]
for idx, scene in enumerate(scenes):
scene_splits[idx % len(scene_splits)].append(scene)
assert sum(map(len, scene_splits)) == len(scenes)
for i in range(num_processes):
new_config = config.clone()
task_config = new_config.TASK_CONFIG.clone()
task_config.defrost()
if len(scenes) > 0:
task_config.DATASET.CONTENT_SCENES = scene_splits[i]
task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = (
config.SIMULATOR_GPU_ID[i % len(config.SIMULATOR_GPU_ID)]
)
logger.info(
f"Simulator GPU ID {config.SIMULATOR_GPU_ID}")
logger.info(
f"Simulator GPU ID {task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID}")
task_config.SIMULATOR.AGENT_0.SENSORS = config.SENSORS
task_config.freeze()
new_config.defrost()
new_config.TASK_CONFIG = task_config
new_config.freeze()
configs.append(new_config)
# for i in range(num_processes):
# proc_config = config.clone()
# proc_config.defrost()
# task_config = proc_config.TASK_CONFIG
# if len(scenes) > 0:
# task_config.DATASET.CONTENT_SCENES = scene_splits[i]
# task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = (
# config.SIMULATOR_GPU_ID[i % len(config.SIMULATOR_GPU_ID)]
# )
# # task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = config.SIMULATOR_GPU_ID
# task_config.SIMULATOR.AGENT_0.SENSORS = config.SENSORS
# proc_config.freeze()
# configs.append(proc_config)
for config in configs:
logger.info(
f"[construct_envs] Using GPU ID {config.TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID}")
env = VLNCEDaggerEnv(config)
return env
def construct_envs(
config: Config, env_class: Type[Union[Env, RLEnv]], auto_reset_done: bool = True
) -> VectorEnv:
r"""Create VectorEnv object with specified config and env class type.
To allow better performance, dataset are split into small ones for
each individual env, grouped by scenes.
Args:
config: configs that contain num_processes as well as information
necessary to create individual environments.
env_class: class type of the envs to be created.
auto_reset_done: Whether or not to automatically reset the env on done
Returns:
VectorEnv object created according to specification.
"""
num_processes = config.NUM_PROCESSES
configs = []
env_classes = [env_class for _ in range(num_processes)]
dataset = make_dataset(config.TASK_CONFIG.DATASET.TYPE)
scenes = dataset.get_scenes_to_load(config.TASK_CONFIG.DATASET)
if num_processes > 1:
if len(scenes) == 0:
raise RuntimeError(
"No scenes to load, multiple process logic relies on being able to split scenes uniquely between processes"
)
if len(scenes) < num_processes:
raise RuntimeError(
"reduce the number of processes as there "
"aren't enough number of scenes"
)
random.shuffle(scenes)
scene_splits = [[] for _ in range(num_processes)]
for idx, scene in enumerate(scenes):
scene_splits[idx % len(scene_splits)].append(scene)
assert sum(map(len, scene_splits)) == len(scenes)
for i in range(num_processes):
new_config = config.clone()
task_config = new_config.TASK_CONFIG.clone()
task_config.defrost()
if len(scenes) > 0:
task_config.DATASET.CONTENT_SCENES = scene_splits[i]
task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = (
config.SIMULATOR_GPU_ID[i % len(config.SIMULATOR_GPU_ID)]
)
task_config.SIMULATOR.AGENT_0.SENSORS = config.SENSORS
task_config.freeze()
new_config.defrost()
new_config.TASK_CONFIG = task_config
new_config.freeze()
configs.append(new_config)
# for i in range(num_processes):
# proc_config = config.clone()
# proc_config.defrost()
# task_config = proc_config.TASK_CONFIG
# if len(scenes) > 0:
# task_config.DATASET.CONTENT_SCENES = scene_splits[i]
# task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = (
# config.SIMULATOR_GPU_ID[i % len(config.SIMULATOR_GPU_ID)]
# )
# # task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = config.SIMULATOR_GPU_ID
# task_config.SIMULATOR.AGENT_0.SENSORS = config.SENSORS
# proc_config.freeze()
# configs.append(proc_config)
for config in configs:
logger.info(
f"[construct_envs] Using GPU ID {config.TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID}")
envs = habitat.VectorEnv(
make_env_fn=make_env_fn,
env_fn_args=tuple(tuple(zip(configs, env_classes))),
auto_reset_done=auto_reset_done,
)
return envs
def construct_envs_auto_reset_false(
config: Config, env_class: Type[Union[Env, RLEnv]]
) -> VectorEnv:
return construct_envs(config, env_class, auto_reset_done=False)
|
# Copyright 2021 ONDEWO GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# [AUTO-GENERATED FILE]
from abc import ABCMeta, abstractmethod
from typing import Iterator
import grpc
from google.protobuf.empty_pb2 import Empty
from ondewo.nlu import session_pb2
from ondewo.nlu.client import Client
from ondewo.nlu.session_pb2_grpc import SessionsServicer
from ondewo.logging.logger import logger
class AutoSessionsServicer(SessionsServicer):
"""
[AUTO-GENERATED CLASS]
generated by: grpc_auto_coder.py
DO NOT ALTER CODE UNLESS YOU WANT TO DO IT EVERY TIME YOU GENERATE IT!
used to relay endpoints to the functions defined in:
>> ./ondewo-nlu-client-python/ondewo/nlu/services/sessions.py
any child class is expected to have a .client attribute to send the service calls to (metaclass-enforced)
all function/endpoint calls are logged
override functions if other functionality than a client call is needed
[original docstring]
A session represents an interaction with a user. You retrieve user input
"""
__metaclass__ = ABCMeta
@property
@abstractmethod
def client(self) -> Client:
pass
def DetectIntent(self, request: session_pb2.DetectIntentRequest, context: grpc.ServicerContext) -> session_pb2.DetectIntentResponse:
"""
[AUTO-GENERATED FUNCTION]
Processes a natural language query and returns structured, actionable data
"""
logger.info("relaying DetectIntent() to nlu-client...")
response = self.client.services.sessions.detect_intent(request=request)
return response
def StreamingDetectIntent(self, request_iterator: Iterator[session_pb2.StreamingDetectIntentRequest], context: grpc.ServicerContext) -> Iterator[session_pb2.StreamingDetectIntentResponse]:
"""
[AUTO-GENERATED FUNCTION]
Processes a natural language query in audio format in a streaming fashion
"""
logger.info("relaying StreamingDetectIntent() to nlu-client...")
response = self.client.services.sessions.streaming_detect_intent(request_iterator=request_iterator)
return response
def ListSessions(self, request: session_pb2.ListSessionsRequest, context: grpc.ServicerContext) -> session_pb2.ListSessionsResponse:
"""
[AUTO-GENERATED FUNCTION]
*** SESSION RELATED ENDPOINTS *** //
"""
logger.info("relaying ListSessions() to nlu-client...")
response = self.client.services.sessions.list_sessions(request=request)
return response
def GetSession(self, request: session_pb2.GetSessionRequest, context: grpc.ServicerContext) -> session_pb2.Session:
"""
[AUTO-GENERATED FUNCTION]
GetSession: returns a session(=conversation) from ondewo-kb
"""
logger.info("relaying GetSession() to nlu-client...")
response = self.client.services.sessions.get_session(request=request)
return response
def TrackSessionStep(self, request: session_pb2.TrackSessionStepRequest, context: grpc.ServicerContext) -> session_pb2.Session:
"""
[AUTO-GENERATED FUNCTION]
TrackSessionStep: append to an existing session; creates it if not existing
"""
logger.info("relaying TrackSessionStep() to nlu-client...")
response = self.client.services.sessions.track_session_step(request=request)
return response
def DeleteSession(self, request: session_pb2.DeleteSessionRequest, context: grpc.ServicerContext) -> Empty:
"""
[AUTO-GENERATED FUNCTION]
DeleteSession: delete a session(=conversation) from ondewo-kb (for testing only)
"""
logger.info("relaying DeleteSession() to nlu-client...")
response = self.client.services.sessions.delete_session(request=request)
return response
def ListSessionLabels(self, request: session_pb2.ListSessionLabelsRequest, context: grpc.ServicerContext) -> session_pb2.ListSessionLabelsResponse:
"""
[AUTO-GENERATED FUNCTION]
*** SESSION-LABEL RELATED ENDPOINTS *** //
"""
logger.info("relaying ListSessionLabels() to nlu-client...")
response = self.client.services.sessions.list_session_labels(request=request)
return response
def AddSessionLabels(self, request: session_pb2.AddSessionLabelsRequest, context: grpc.ServicerContext) -> session_pb2.Session:
"""
[AUTO-GENERATED FUNCTION]
Missing associated documentation comment in .proto file.
"""
logger.info("relaying AddSessionLabels() to nlu-client...")
response = self.client.services.sessions.add_session_labels(request=request)
return response
def RemoveSessionLabels(self, request: session_pb2.RemoveSessionLabelsRequest, context: grpc.ServicerContext) -> session_pb2.Session:
"""
[AUTO-GENERATED FUNCTION]
Missing associated documentation comment in .proto file.
"""
logger.info("relaying RemoveSessionLabels() to nlu-client...")
response = self.client.services.sessions.remove_session_labels(request=request)
return response
def ListSessionReviews(self, request: session_pb2.ListSessionReviewsRequest, context: grpc.ServicerContext) -> session_pb2.ListSessionReviewsResponse:
"""
[AUTO-GENERATED FUNCTION]
*** SESSION-REVIEW RELATED ENDPOINTS *** //
"""
logger.info("relaying ListSessionReviews() to nlu-client...")
response = self.client.services.sessions.list_session_reviews(request=request)
return response
def GetSessionReview(self, request: session_pb2.GetSessionReviewRequest, context: grpc.ServicerContext) -> session_pb2.SessionReview:
"""
[AUTO-GENERATED FUNCTION]
GetSessionReview:
"""
logger.info("relaying GetSessionReview() to nlu-client...")
response = self.client.services.sessions.get_session_review(request=request)
return response
def GetLatestSessionReview(self, request: session_pb2.GetLatestSessionReviewRequest, context: grpc.ServicerContext) -> session_pb2.SessionReview:
"""
[AUTO-GENERATED FUNCTION]
GetLatestSessionReview:
"""
logger.info("relaying GetLatestSessionReview() to nlu-client...")
response = self.client.services.sessions.get_latest_session_review(request=request)
return response
def CreateSessionReview(self, request: session_pb2.CreateSessionReviewRequest, context: grpc.ServicerContext) -> session_pb2.SessionReview:
"""
[AUTO-GENERATED FUNCTION]
CreateSessionReview:
"""
logger.info("relaying CreateSessionReview() to nlu-client...")
response = self.client.services.sessions.create_session_review(request=request)
return response
# [make flake8 shut up]
|
import twitter
from django.apps import AppConfig
from django.db.models.signals import post_save
def truncate_headline(headline, n_char):
last = headline[-n_char - 3]
headline = headline[:-n_char -3]
i = len(headline)
while last not in " ,.;:" and i:
i -= 1
last = headline[i]
if i != -1:
headline = headline[:i]
return headline + "..."
_twitter_template = Template(settings.MICROBLOG_TWITTER_MESSAGE_TEMPLATE)
def post_update_on_twitter(sender, instance, created, **kwargs):
if settings.MICROBLOG_TWITTER_LANGUAGES is not None and instance.language not in settings.MICROBLOG_TWITTER_LANGUAGES:
return
post = instance.post
if not post.is_published():
return
try:
if not isinstance(settings.MICROBLOG_TWITTER_POST_URL_MANGLER, str):
url = settings.MICROBLOG_TWITTER_POST_URL_MANGLER(instance)
else:
module, attr = settings.MICROBLOG_TWITTER_POST_URL_MANGLER.rsplit('.', 1)
mod = import_module(module)
url = getattr(mod, attr)(instance)
except Exception, e:
message = 'Post: "%s"\n\nCannot retrieve the url: "%s"' % (instance.headline, str(e))
mail.mail_admins('[blog] error preparing the tweet', message)
return
existent = set(( x.value for x in Spam.objects.filter(post=post, method='t') ))
recipients = set((settings.MICROBLOG_TWITTER_USERNAME,)) - existent
if not recipients:
return
context = Context({
'content': instance,
'headline': instance.headline,
'url': url,
})
status = _twitter_template.render(context)
diff_len = len(status) - 140
if diff_len > 0:
context = Context({
'content': instance,
'headline': truncate_headline(instance.headline, diff_len),
'url': url,
})
status = _twitter_template.render(context)
if settings.MICROBLOG_TWITTER_DEBUG:
print 'Tweet for', instance.headline.encode('utf-8')
print status
print '--------------------------------------------'
return
log.info('"%s" tweet on "%s"', instance.headline.encode('utf-8'), settings.MICROBLOG_TWITTER_USERNAME)
try:
api = twitter.Api(settings.MICROBLOG_TWITTER_USERNAME, settings.MICROBLOG_TWITTER_PASSWORD)
api.PostUpdate(status)
s = Spam(post=post, method='t', value=settings.MICROBLOG_TWITTER_USERNAME)
s.save()
except Exception, e:
message = 'Post: "%s"\n\nCannot post status update: "%s"' % (instance.headline, str(e))
mail.mail_admins('[blog] error tweeting the new status', message)
return
def post_update_on_email(sender, instance, created, **kwargs):
if settings.MICROBLOG_EMAIL_LANGUAGES is not None and instance.language not in settings.MICROBLOG_EMAIL_LANGUAGES:
return
post = instance.post
if not post.is_published():
return
existent = set(( x.value for x in Spam.objects.filter(post=post, method='e') ))
recipients = set(settings.MICROBLOG_EMAIL_RECIPIENTS) - existent
if not recipients:
return
ctx = Context({
'content': instance,
})
from django.utils.html import strip_tags
from lxml import html
from lxml.html.clean import clean_html
subject = strip_tags(_email_templates['subject'].render(ctx))
try:
hdoc = html.fromstring(_email_templates['body'].render(ctx))
except Exception, e:
message = 'Post: "%s"\n\nCannot parse as html: "%s"' % (subject, str(e))
mail.mail_admins('[blog] error while sending mail', message)
return
# dalla doc di lxml:
# The module lxml.html.clean provides a Cleaner class for cleaning up
# HTML pages. It supports removing embedded or script content, special
# tags, CSS style annotations and much more. Say, you have an evil web
# page from an untrusted source that contains lots of content that
# upsets browsers and tries to run evil code on the client side:
#
# Noi non dobbiamo proteggerci da codice maligno, ma vista la
# situazione dei client email, possiamo rimuovere embed, javascript,
# iframe.; tutte cose che non vengono quasi mai renderizzate per bene
hdoc = clean_html(hdoc)
# rendo tutti i link assoluti, in questo modo funzionano anche in un
# client di posta
hdoc.make_links_absolute(dsettings.DEFAULT_URL_PREFIX)
body_html = html.tostring(hdoc)
# per i client di posta che non supportano l'html ecco una versione in
# solo testo
import html2text
h = html2text.HTML2Text()
h.ignore_images = True
body_text = h.handle(body_html)
for r in recipients:
log.info('"%s" email to "%s"', instance.headline.encode('utf-8'), r)
email = mail.EmailMultiAlternatives(subject, body_text, dsettings.DEFAULT_FROM_EMAIL, [r])
email.attach_alternative(body_html, 'text/html')
email.send()
s = Spam(post=post, method='e', value=r)
s.save()
class MicroblogConfig(AppConfig):
name = 'microblog'
verbose_name = "Microblog"
def ready(self):
import moderation
if settings.MICROBLOG_EMAIL_INTEGRATION:
_email_templates = {
'subject': Template(settings.MICROBLOG_EMAIL_SUBJECT_TEMPLATE),
'body': Template(settings.MICROBLOG_EMAIL_BODY_TEMPLATE),
}
post_save.connect(post_update_on_email, sender=PostContent)
if settings.MICROBLOG_TWITTER_INTEGRATION:
post_save.connect(post_update_on_twitter, sender=PostContent)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time, urllib, csv, re, time, sys
from operator import itemgetter
import MySQLdb as mdb
import numpy as np
import datetime as dt
lib_path = os.path.abspath('/secure/conf')
import ccoin_conf as cconf
try:
cconf.__sec_initialize(token="<PASSWORD>")
con = mdb.connect(cconf.host, cconf.user, cconf.pass, cconf.db)
con.autocommit(True)
cur = con.cursor()
cur.execute("SELECT VERSION()")
ver = cur.fetchone()
except mdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
emergency_email_warning("MYSQL connection error! Error: %d, %s" % (e.args[0], e.args[1]))
sys.exit(1)
debug_mode = 0
def column(matrix, i):
return [row[i] for row in matrix]
def find(l, elem):
for row, i in enumerate(l):
try:
if (i[0] == elem):
return row
except ValueError:
continue
return -1
if __name__ == '__main__':
comp_id = ""
if len(sys.argv) <2:
print("Usage: ./execute --tag '' --limit <num>[--debug]")
sys.exit(0)
tag = ""
limit = 0
for ai in range(0,len(sys.argv)):
if sys.argv[ai] == "--debug":
debug_mode=1
if sys.argv[ai] == "--tag":
tag = sys.argv[ai+1]
if sys.argv[ai] == "--limit":
limit = int(sys.argv[ai+1])
if tag == "" and limit == 0:
print("Missing tagparameter")
sys.exit(0)
cur.execute("SELECT fundname, isin_no, label, currency, funded_since from fundproduct where prod_id="+tag)
_product = cur.fetchall()
if (len(_product) == 0): exit(0)
product = _product[0]
# print product
cur.execute("SELECT date,asset_price from assets where prod_id="+tag+" order by date desc limit "+str(limit))
x_matrix = cur.fetchall()
if (len(x_matrix) <615):
print "Not enought historical data to analyse it!"
_matrix = []
#Calculate moving averages
for row, i in reversed(list(enumerate(x_matrix))):
_c_ma10 = 0.0
_c_ma30 = 0.0
if (len(x_matrix)-row >= 10):
for j in range(0,10): _c_ma10 += float(x_matrix[row+j][1])
if (len(x_matrix)-row >= 30):
for j in range(0,30): _c_ma30 += float(x_matrix[row+j][1])
_matrix.append([x_matrix[row][0], float(x_matrix[row][1]), _c_ma10/10.0, _c_ma30/30.0])
_rawcrosses = []
_realcrosses = []
_realcrosses.append([_matrix[0][0],"BUY",_matrix[0][1]])
if (_matrix[30][2]>_matrix[30][3]):
_gdir = 10
else:
_gdir = 30
_exit=0
_skip=0
for row, i in enumerate(_matrix):
if (_skip >0):
_skip -= 1
continue
if (row>=30):
_ldir = 0
if (_matrix[row][2]>_matrix[row][3]):
_ldir = 10
else:
_ldir = 30
if (_ldir != _gdir): #cross!
_false = 0
_type=0 # 1:golden, 2: death
if (_exit == 1):
if not (_matrix[row+1][1] < _realcrosses[len(_realcrosses)-1][2]*1.01 and _realcrosses[len(_realcrosses)-1][0]+dt.timedelta(days=30) > _matrix[row][0]):
_realcrosses.append([_matrix[row+1][0],"BUY",_matrix[row+1][1]])
_skip=3
_exit=0
if (_matrix[row][1] > _matrix[row][3]):
_type=1
else:
_type=2
#Flapping detection
if (len(_rawcrosses)>0):
if (_rawcrosses[len(_rawcrosses)-1][0]+dt.timedelta(days=20) > _matrix[row][0]):
_false = 1
#Looped or rapidly changed Death Cross detection
for k in range(0,7):
_loop=0
if (_ldir == 30 and _matrix[row-k-1][2]-_matrix[row-k-1][3] < _matrix[row-k][2]-_matrix[row-k][3]): _loop+=1
if (_ldir == 10 and _matrix[row-k-1][3]-_matrix[row-k-1][2] < _matrix[row-k][3]-_matrix[row-k][2]): _loop+=1
if (_loop >2): _false = 1
#Closed Scisor detection
if(_ldir == 30):
if (_matrix[row-6][2]*0.99 < _matrix[row-6][3]): _false=1
if(_ldir == 10):
if (_matrix[row-6][3]*0.99 < _matrix[row-6][2]): _false=1
if (_false == 0 and _exit ==0):
_exit=1
_skip=3
_realcrosses.append([_matrix[row+1][0],"SELL",_matrix[row+1][1]])
_rawcrosses.append([_matrix[row][0],_type,_false,_matrix[row][1],_matrix[row][2],_matrix[row][3]])
_gdir = _ldir
print "Natural increment[%s]: (%.06f -> %.06f) %.03f%%"%(_matrix[0][0],_matrix[0][1],_matrix[len(_matrix)-1][1],(_matrix[len(_matrix)-1][1]/_matrix[0][1]-1.0)*100)
_money = 100000.0
_units = _money / _matrix[0][1]
if (len(_realcrosses) >0):
for i in range (1,len(_realcrosses)):
if (_realcrosses[i][1] == "SELL"):
_money = _units * _realcrosses[i][2]
print _realcrosses[i][0],"SELL",_realcrosses[i][2],_money,_units
if (_realcrosses[i][1] == "BUY"):
_units = _money / _realcrosses[i][2]
print _realcrosses[i][0],"BUY",_realcrosses[i][2],_money,_units
print _realcrosses[len(_realcrosses)-1]
if (_realcrosses[len(_realcrosses)-1][1] == "BUY"):
_money = _units * _matrix[len(_matrix)-1][1]
print "Analysed increment: (%.06f -> %.06f) %.03f%%"%(100000,_money,((_money/100000)-1)*100)
|
import panflute as pf
import pandocfilters, json
def test_all():
fns = [
('./tests/1/api117/benchmark.json', './tests/1/api117/panflute.json'),
('./tests/1/api118/benchmark.json', './tests/1/api118/panflute.json'),
('./tests/2/api117/benchmark.json', './tests/2/api117/panflute.json'),
('./tests/2/api118/benchmark.json', './tests/2/api118/panflute.json'),
('./tests/3/api117/benchmark.json', './tests/3/api117/panflute.json'),
('./tests/3/api118/benchmark.json', './tests/3/api118/panflute.json'),
('./tests/4/api117/benchmark.json', './tests/4/api117/panflute.json'),
('./tests/4/api118/benchmark.json', './tests/4/api118/panflute.json')]
for input_fn, output_fn in fns:
print()
print('TESTING:', input_fn)
print(64 * '<')
inner_test_idempotent(input_fn, output_fn)
inner_test_stringify(input_fn, output_fn)
print(64 * '>')
print()
print('DONE!')
def empty_test(element, doc):
return
def inner_test_filter(element, doc):
if type(element)==pf.Header:
return []
if type(element)==pf.Str:
element.text = element.text + '!!'
return element
def inner_test_idempotent(input_fn, output_fn):
print('\nLoading JSON...')
with open(input_fn, encoding='utf-8') as f:
doc = pf.load(f)
print('Dumping JSON...')
with open(output_fn, mode='w', encoding='utf-8') as f:
pf.dump(doc, f)
f.write('\n')
print(' - Done!')
print('\nComparing...')
with open(input_fn, encoding='utf-8') as f:
input_data = f.read()
with open(output_fn, encoding='utf-8') as f:
output_data = f.read()
print('Are both files the same?')
print(' - Length:', len(input_data) == len(output_data), len(input_data), len(output_data))
print(' - Content:', input_data == output_data)
print('\nApplying trivial filter...')
doc = doc.walk(action=empty_test, doc=doc)
print(' - Done!')
print(' - Dumping JSON...')
with open(output_fn, mode='w', encoding='utf-8') as f:
pf.dump(doc, f)
f.write('\n')
print(' - Done!')
print(' - Comparing...')
with open(input_fn, encoding='utf-8') as f:
input_data = f.read()
with open(output_fn, encoding='utf-8') as f:
output_data = f.read()
print(' - Are both files the same?')
print(' - Length:', len(input_data) == len(output_data), len(input_data), len(output_data))
print(' - Content:', input_data == output_data)
assert input_data == output_data
def inner_test_stringify(input_fn, output_fn):
output_txt_benchmark = './tests/temp_benchmark.txt'
output_txt_panflute = './tests/temp_panflute.txt'
print('Testing stringify()')
with open(input_fn, encoding='utf-8') as f:
doc = pf.load(f)
ans = pf.stringify(doc)
#print(repr(ans).encode('utf-8'))
with open(output_txt_panflute, encoding='utf-8', mode='w') as f:
f.write(ans)
with open(input_fn, encoding='utf-8') as f:
doc = json.load(f)
ans = pandocfilters.stringify(doc)
with open(output_txt_benchmark, encoding='utf-8', mode='w') as f:
f.write(ans)
if __name__ == "__main__":
test_all()
|
from django.db import models
from django.urls import reverse
from django.utils.timezone import now
from markdown import markdown
class Comic(models.Model):
title = models.CharField(max_length=128)
slug = models.CharField(max_length=128, unique=True)
author = models.CharField(max_length=128, blank=True)
header_image = models.ImageField(blank=True)
hr_image = models.ImageField(blank=True)
post_border_image = models.ImageField(blank=True)
navigation_spritesheet = models.ImageField(blank=True)
spinner_image = models.ImageField(
blank=True, help_text="A square image that can spin about its center. Ideally 120x120px.")
font = models.FileField(blank=True)
background = models.TextField(
default="white",
help_text="a valid CSS `background` configuration")
overflow = models.TextField(
default="white",
help_text="a valid CSS `background` configuration")
genre = models.CharField(max_length=64, blank=True)
# Social Links
patreon_link = models.URLField(blank=True)
discord_link = models.URLField(blank=True)
reddit_link = models.URLField(blank=True)
twitter_link = models.URLField(blank=True)
instagram_link = models.URLField(blank=True)
def get_absolute_url(self):
return reverse("reader-redirect", kwargs={"comic": self.slug})
def __str__(self):
return self.title
class TagType(models.Model):
comic = models.ForeignKey(Comic, on_delete=models.CASCADE, related_name="tag_types")
title = models.CharField(max_length=16) # TODO: Make sure this is URL-safe?
default_icon = models.ImageField(blank=True, help_text="Tags without an image will use this instead.")
def __str__(self):
return f"{self.title} ({self.comic})"
class Meta:
unique_together = (('comic', 'title'), )
ordering = ('title', )
def get_absolute_url(self):
return reverse("tagtype", kwargs={
"comic": self.comic.slug,
"type": self.title
})
class Tag(models.Model):
icon = models.ImageField(
blank=True, null=True, help_text="This image needs to be a 1:1 aspect ratio.") # TODO: Recommended pixel size
title = models.CharField(max_length=32) # TODO: Make sure this is URL-safe?
type = models.ForeignKey(TagType, on_delete=models.CASCADE, related_name="tags")
post = models.TextField(blank=True, help_text="Accepts Markdown")
def __str__(self):
return self.title
class Meta:
unique_together = (('type', 'title'), )
ordering = ('type', 'title', )
@property
def icon_url(self):
if self.icon:
return self.icon.url
elif self.type.default_icon:
return self.type.default_icon.url
else:
return None
def get_absolute_url(self):
return reverse("tag", kwargs={
"comic": self.type.comic.slug,
"type": self.type.title,
"tag": self.title,
})
class PageQuerySet(models.QuerySet):
def active(self):
return self.filter(posted_at__lte=now())
class Page(models.Model):
comic = models.ForeignKey(Comic, on_delete=models.PROTECT, related_name="pages")
slug = models.CharField(max_length=32)
title = models.CharField(max_length=128)
ordering = models.FloatField()
posted_at = models.DateTimeField(
default=now, help_text="If this is in the future, it won't be visible until that time")
post = models.TextField(blank=True, help_text="Accepts Markdown")
transcript = models.TextField(blank=True, help_text="Accepts Markdown")
image = models.ImageField()
alt_text = models.CharField(max_length=150, blank=True)
tags = models.ManyToManyField(Tag, blank=True, related_name="pages")
objects = PageQuerySet.as_manager()
class Meta:
unique_together = (("comic", "slug"), ("comic", "ordering"), )
ordering = ('ordering', )
def get_absolute_url(self):
return reverse("reader", kwargs={
"comic": self.comic.slug,
"page": self.slug,
})
def __str__(self):
return f'{self.comic} | {self.title}'
def transcript_html(self):
return markdown(self.transcript)
class Ad(models.Model):
comic = models.ForeignKey(Comic, on_delete=models.PROTECT, related_name="ads")
image = models.ImageField()
url = models.URLField()
active = models.BooleanField(default=True)
def __str__(self):
return f'{self.comic} | {self.url}'
|
<reponame>vnep-approx/vnep-approx
import pytest
from alib import datamodel
from vnep_approx.extendedcactusgraph import ExtendedCactusGraph, ExtendedCactusGraphError
class TestExtendedCactusGraph:
def setup(self):
self.substrate = datamodel.Substrate("paper_example_substrate")
self.substrate.add_node("u", ["universal"], {"universal": 1000}, {"universal": 0.0})
self.substrate.add_node("v", ["universal"], {"universal": 1000}, {"universal": 0.0})
self.substrate.add_node("w", ["universal"], {"universal": 1000}, {"universal": 0.0})
self.substrate.add_edge("u", "v", capacity=1000, bidirected=False)
self.substrate.add_edge("v", "w", capacity=1000, bidirected=False)
self.substrate.add_edge("w", "u", capacity=1000, bidirected=False)
self.request = datamodel.Request("paper_example_request")
self.request.add_node("i", 0.0, "universal", ["w"])
self.request.add_node("j", 0.0, "universal", ["v", "w"])
self.request.add_node("k", 0.0, "universal", ["u"])
self.request.add_node("l", 0.0, "universal", ["u", "w"])
self.request.add_node("m", 0.0, "universal", ["u", "v"])
self.request.add_node("n", 0.0, "universal", ["u", "v"])
self.request.add_node("p", 0.0, "universal", ["v"])
self.request.add_node("q", 0.0, "universal", ["u", "w"])
self.request.add_edge("i", "j", 0.0)
self.request.add_edge("j", "k", 0.0)
self.request.add_edge("k", "l", 0.0)
self.request.add_edge("l", "m", 0.0)
self.request.add_edge("m", "j", 0.0)
self.request.add_edge("m", "p", 0.0)
self.request.add_edge("p", "n", 0.0)
self.request.add_edge("p", "q", 0.0)
self.request.graph["root"] = "j"
self.single_edge_sub = datamodel.Substrate("simple_substrate")
self.single_edge_sub.add_node("u", ["universal"], {"universal": 1000}, {"universal": 0.0})
self.single_edge_sub.add_node("v", ["universal"], {"universal": 1000}, {"universal": 0.0})
self.single_edge_sub.add_edge("u", "v", capacity=1000, bidirected=False)
self.simple_cycle_req = datamodel.Request("simple_cycle_request")
self.simple_cycle_req.add_node("i", 0.0, "universal", ["u"])
self.simple_cycle_req.add_node("j", 0.0, "universal", ["v"])
self.simple_cycle_req.add_node("k", 0.0, "universal", ["w"])
self.simple_cycle_req.add_edge("i", "j", 0.0)
self.simple_cycle_req.add_edge("j", "k", 0.0)
self.simple_cycle_req.add_edge("k", "i", 0.0)
self.simple_cycle_req.graph["root"] = "i"
self.single_edge_req = datamodel.Request("simple_path_request")
self.single_edge_req.add_node("i", 0.0, "universal", ["u", "w"])
self.single_edge_req.add_node("j", 0.0, "universal", ["w", "v"])
self.single_edge_req.add_edge("i", "j", 0.0)
self.single_edge_req.graph["root"] = "i"
def test_preprocessing(self):
ecg = ExtendedCactusGraph(self.request, self.substrate)
reversed_edges = ecg.reversed_request_edges
reversed_edges_theory = [("i", "j"), ("m", "j"), ("l", "m")]
for e in reversed_edges_theory:
assert e in reversed_edges, "{} - {}".format(reversed_edges, reversed_edges_theory)
assert len(reversed_edges) == len(reversed_edges_theory)
assert len(ecg._nodes_to_explore) == 0
expected_paths = [[("j", "i")], [("m", "p")], [("p", "n")], [("p", "q")]]
for path in expected_paths:
assert path in ecg._paths
assert len(expected_paths) == len(ecg._paths)
expected_cycle = [[("j", "k"), ("k", "l")], [("j", "m"), ("m", "l")]]
for branch in expected_cycle:
found_cycle = ecg._cycles[0]
left, right = found_cycle[0], found_cycle[1]
either_left_or_right = (all(e in left for e in branch) and not any(e in right for e in branch) or
all(e in right for e in branch) and not any(e in left for e in branch))
assert either_left_or_right
assert 1 == len(ecg._cycles)
assert ecg.cycle_branch_nodes == {"m"}
def test_correct_topology_for_single_edge_request(self):
ecg = ExtendedCactusGraph(self.single_edge_req, self.substrate)
u_i_source = ExtendedCactusGraph._super_node_name("i", "u", "source")
w_i_source = ExtendedCactusGraph._super_node_name("i", "w", "source")
u_ij = ExtendedCactusGraph._super_node_name(("i", "j"), "u", "layer")
v_ij = ExtendedCactusGraph._super_node_name(("i", "j"), "v", "layer")
w_ij = ExtendedCactusGraph._super_node_name(("i", "j"), "w", "layer")
v_j_sink = ExtendedCactusGraph._super_node_name("j", "v", "sink")
w_j_sink = ExtendedCactusGraph._super_node_name("j", "w", "sink")
expected_nodes = [u_i_source, w_i_source, u_ij, v_ij, w_ij, v_j_sink, w_j_sink]
expected_edges = [
(w_i_source, w_ij), (u_i_source, u_ij), # inflow
(u_ij, v_ij), (v_ij, w_ij), (w_ij, u_ij), # layer
(v_ij, v_j_sink), (w_ij, w_j_sink) # outflow
]
assert set(expected_nodes) == ecg.nodes
assert set(expected_edges) == ecg.edges
# check that nodes are correctly mapped
assert "i" in ecg.source_nodes
allowed_nodes = self.single_edge_req.get_allowed_nodes("i")
for u in allowed_nodes:
assert u in ecg.source_nodes["i"]
assert len(allowed_nodes) == len(ecg.source_nodes["i"])
assert "j" in ecg.sink_nodes
allowed_nodes = self.single_edge_req.get_allowed_nodes("j")
for u in allowed_nodes:
assert u in ecg.sink_nodes["j"]
assert len(allowed_nodes) == len(ecg.sink_nodes["j"])
def test_changing_request_edge_orientation_reverses_substrate_orientation_in_layer(self):
# check that the ecg rooted at i contains the substrate in its original orientation:
ecg = ExtendedCactusGraph(self.single_edge_req, self.substrate)
u_ij = ExtendedCactusGraph._super_node_name(("i", "j"), "u", "layer")
v_ij = ExtendedCactusGraph._super_node_name(("i", "j"), "v", "layer")
w_ij = ExtendedCactusGraph._super_node_name(("i", "j"), "w", "layer")
expected_layer_edges = {(u_ij, v_ij), (v_ij, w_ij), (w_ij, u_ij)} # u -> v, v -> w, w -> u
assert expected_layer_edges <= ecg.edges
# check that the ecg rooted at j contains the substrate in reversed orientation:
self.single_edge_req.graph["root"] = "j"
ecg = ExtendedCactusGraph(self.single_edge_req, self.substrate)
u_ij = ExtendedCactusGraph._super_node_name(("j", "i"), "u", "layer")
v_ij = ExtendedCactusGraph._super_node_name(("j", "i"), "v", "layer")
w_ij = ExtendedCactusGraph._super_node_name(("j", "i"), "w", "layer")
expected_layer_edges = {(v_ij, u_ij), (w_ij, v_ij), (u_ij, w_ij)} # v -> u, w -> v, u -> w
assert expected_layer_edges <= ecg.edges
def test_correct_topology_for_simple_cycle_request(self):
ecg = ExtendedCactusGraph(self.simple_cycle_req, self.substrate)
u_source = ExtendedCactusGraph._super_node_name("i", "u", "source")
u_ij = ExtendedCactusGraph._super_node_name(("i", "j"), "u", "layer_cycle", "w")
v_ij = ExtendedCactusGraph._super_node_name(("i", "j"), "v", "layer_cycle", "w")
w_ij = ExtendedCactusGraph._super_node_name(("i", "j"), "w", "layer_cycle", "w")
u_ik = ExtendedCactusGraph._super_node_name(("i", "k"), "u", "layer_cycle", "w")
v_ik = ExtendedCactusGraph._super_node_name(("i", "k"), "v", "layer_cycle", "w")
w_ik = ExtendedCactusGraph._super_node_name(("i", "k"), "w", "layer_cycle", "w")
u_jk = ExtendedCactusGraph._super_node_name(("j", "k"), "u", "layer_cycle", "w")
v_jk = ExtendedCactusGraph._super_node_name(("j", "k"), "v", "layer_cycle", "w")
w_jk = ExtendedCactusGraph._super_node_name(("j", "k"), "w", "layer_cycle", "w")
w_k_sink = ExtendedCactusGraph._super_node_name("k", "w", "sink")
expected_nodes = {
u_source, w_k_sink,
u_ij, v_ij, w_ij,
u_ik, v_ik, w_ik,
u_jk, v_jk, w_jk
}
expected_edges = {
(u_source, u_ij), (u_source, u_ik),
(u_ij, v_ij), (v_ij, w_ij), (w_ij, u_ij), # layer i -> j
(v_ij, v_jk), # inter-layer edge
(u_jk, v_jk), (v_jk, w_jk), (w_jk, u_jk), # layer j -> k
(v_ik, u_ik), (w_ik, v_ik), (u_ik, w_ik), # layer i -> k. Note the reversed edge orientation
(w_ik, w_k_sink), (w_jk, w_k_sink)
}
assert ecg.nodes == expected_nodes
assert ecg.edges == expected_edges
ecg_cycle = ecg.ecg_cycles[0]
assert ecg_cycle.start_node == "i"
assert len(ecg_cycle.ext_graph_branches) == 2
def test_can_discover_a_path_above_cycle(self):
self.simple_cycle_req.add_node("l", 0.0, "universal", ["v"])
self.simple_cycle_req.add_edge("l", "i", 0.0)
self.simple_cycle_req.graph["root"] = "l"
ecg = ExtendedCactusGraph(self.simple_cycle_req, self.substrate)
expected_path = [("l", "i")]
assert len(ecg._paths) == 1
assert expected_path in ecg._paths
def test_can_discover_a_cycle_next_to_a_cycle(self):
self.simple_cycle_req.add_node("l", 0.0, "universal", ["v"])
self.simple_cycle_req.add_node("n", 0.0, "universal", ["u"])
self.simple_cycle_req.add_edge("n", "i", 0.0)
self.simple_cycle_req.add_edge("n", "l", 0.0)
self.simple_cycle_req.add_edge("l", "i", 0.0)
for root in self.simple_cycle_req.nodes:
self.simple_cycle_req.graph["root"] = root
ecg = ExtendedCactusGraph(self.simple_cycle_req, self.substrate)
assert len(ecg._cycles) == 2
def test_correct_node_edge_count_for_example_from_paper(self):
ecg = ExtendedCactusGraph(self.request, self.substrate)
# print "\n"
# print util.get_graph_viz_string(ecg) # to verify the topology
# print "\n"
assert len(ecg.nodes) == 49 # 49 = 36 layer nodes + 5 sources + 8 sinks
assert len(ecg.edges) == 66 # 66 = 36 layer edges + 14 source edges + 10 sink edges + 6 inter layer edges
def test_bug_request(self):
req = datamodel.Request("test")
req.add_node("n1", 0.0, "universal", ["u", "w"])
req.add_node("n2", 0.0, "universal", ["u", "w"])
req.add_node("n3", 0.0, "universal", ["u", "w"])
req.add_node("n4", 0.0, "universal", ["u", "w"])
req.add_node("n5", 0.0, "universal", ["u", "w"])
req.add_node("n6", 0.0, "universal", ["u", "w"])
req.add_node("n7", 0.0, "universal", ["u", "w"])
req.add_edge("n1", "n2", 0.0)
req.add_edge("n2", "n3", 0.0)
req.add_edge("n3", "n7", 0.0)
req.add_edge("n3", "n6", 0.0)
req.add_edge("n2", "n4", 0.0)
req.add_edge("n4", "n5", 0.0)
req.add_edge("n5", "n6", 0.0)
req.graph["root"] = "n1"
eg = ExtendedCactusGraph(req, self.substrate)
def test_exclude_edge_mappings_with_insufficient_resources(self):
sub = datamodel.Substrate("paper_example_substrate")
sub.add_node("u", ["universal"], {"universal": 100}, {"universal": 0.0})
sub.add_node("v", ["universal"], {"universal": 100}, {"universal": 0.0})
sub.add_node("w", ["universal"], {"universal": 100}, {"universal": 0.0})
sub.add_edge("u", "v", capacity=1, bidirected=False)
sub.add_edge("v", "w", capacity=1000, bidirected=False)
sub.add_edge("w", "u", capacity=1000, bidirected=False)
req = datamodel.Request("test")
req.add_node("n1", 0.0, "universal", ["u"])
req.add_node("n2", 0.0, "universal", ["v"])
req.add_node("n3", 0.0, "universal", ["w"])
req.add_edge("n1", "n2", 10.0)
req.add_edge("n2", "n3", 0.0)
req.graph["root"] = "n1"
insufficient_ext_edge = (
ExtendedCactusGraph._super_node_name(("n1", "n2"), "u", "layer"),
ExtendedCactusGraph._super_node_name(("n1", "n2"), "v", "layer")
)
ok_ext_edge = (
ExtendedCactusGraph._super_node_name(("n2", "n3"), "u", "layer"),
ExtendedCactusGraph._super_node_name(("n2", "n3"), "v", "layer")
)
eg = ExtendedCactusGraph(req, sub)
assert insufficient_ext_edge not in eg.edges, "Extended graph contained edge corresponding to infeasible edge mapping!"
assert ok_ext_edge in eg.edges, "Extended graph did not contain edge corresponding to feasible edge mapping!"
def test_exclude_node_mappings_with_insufficient_resources(self):
sub = datamodel.Substrate("paper_example_substrate")
sub.add_node("u", ["t1", "t2"], {"t1": 100, "t2": 100}, {"t1": 0.0, "t2": 0.0})
sub.add_node("v", ["t1", "t2"], {"t1": 100, "t2": 0.0}, {"t1": 0.0, "t2": 0.0})
sub.add_node("w", ["t1", "t2"], {"t1": 100, "t2": 100}, {"t1": 0.0, "t2": 0.0})
sub.add_edge("u", "v", capacity=1000, bidirected=False)
sub.add_edge("v", "w", capacity=1000, bidirected=False)
sub.add_edge("w", "u", capacity=1000, bidirected=False)
req = datamodel.Request("test")
req.add_node("n1", 10.0, "t1", allowed_nodes=["u"])
req.add_node("n2", 10.0, "t2", allowed_nodes=["v", "w"])
req.add_node("n3", 10.0, "t1", allowed_nodes=["w"])
req.add_edge("n1", "n2", 10.0)
req.add_edge("n2", "n3", 0.0)
req.graph["root"] = "n1"
should_not_exist = (
ExtendedCactusGraph._super_node_name(("n1", "n2"), "v", "layer"),
ExtendedCactusGraph._super_node_name("n2", "v", "sink")
)
should_exist = (
ExtendedCactusGraph._super_node_name(("n1", "n2"), "w", "layer"),
ExtendedCactusGraph._super_node_name("n2", "w", "sink")
)
eg = ExtendedCactusGraph(req, sub)
for u in eg.nodes: print u
for e in eg.edges: print e
assert should_not_exist not in eg.edges, "Extended graph contained edge corresponding to infeasible node mapping in path"
assert should_exist in eg.edges, "Extended graph did not contain edge corresponding to feasible node mapping"
def test_exclude_node_mappings_with_insufficient_resources_cycle(self):
# check for a cycle
sub = datamodel.Substrate("test_substrate")
sub.add_node("u", ["t1", "t2"], {"t1": 100, "t2": 100}, {"t1": 0.0, "t2": 0.0})
sub.add_node("v", ["t1", "t2"], {"t1": 100, "t2": 0.0}, {"t1": 0.0, "t2": 0.0})
sub.add_node("w", ["t1", "t2"], {"t1": 100, "t2": 100}, {"t1": 0.0, "t2": 0.0})
sub.add_node("x", ["t1"], {"t1": 1.0}, {"t1": 0.0})
sub.add_edge("u", "v", capacity=1000, bidirected=False)
sub.add_edge("v", "w", capacity=1000, bidirected=False)
sub.add_edge("w", "u", capacity=1000, bidirected=False)
sub.add_edge("w", "x", capacity=1000, bidirected=False)
req = datamodel.Request("test_request")
req.graph["root"] = "n1"
req.add_node("n1", 10.0, "t1", allowed_nodes=["u"])
req.add_node("n2", 10.0, "t2", allowed_nodes=["v", "w"])
req.add_node("n3", 10.0, "t1", allowed_nodes=["w"])
req.add_node("target", 10.0, "t1", allowed_nodes=["w", "x"])
req.add_edge("n1", "n2", 10.0)
req.add_edge("n2", "target", 10.0)
req.add_edge("n1", "n3", 10.0)
req.add_edge("n3", "target", 10.0)
eg = ExtendedCactusGraph(req, sub)
should_not_exist = (
ExtendedCactusGraph._super_node_name(("n1", "n2"), "v", "layer_cycle", branch_substrate_node="w"),
ExtendedCactusGraph._super_node_name(("n2", "target"), "v", "layer_cycle", branch_substrate_node="w")
)
should_exist = (
ExtendedCactusGraph._super_node_name(("n1", "n2"), "w", "layer_cycle", branch_substrate_node="w"),
ExtendedCactusGraph._super_node_name(("n2", "target"), "w", "layer_cycle", branch_substrate_node="w")
)
print should_not_exist
print should_exist
for u in eg.nodes: print u
for e in eg.edges: print e
assert should_exist in eg.edges, "Extended graph did not contain edge corresponding to feasible node mapping"
assert should_not_exist not in eg.edges, "Extended graph contained edge corresponding to infeasible node mapping in cycle"
should_not_exist = ExtendedCactusGraph._super_node_name(("n1", "n2"), "v", "layer_cycle", branch_substrate_node="x")
should_exist = ExtendedCactusGraph._super_node_name(("n1", "n2"), "v", "layer_cycle", branch_substrate_node="w")
assert should_not_exist not in eg.nodes, "Extended graph contained edge corresponding to infeasible node mapping in cycle"
def test_exclude_edge_mappings_with_insufficient_resources_cycle(self):
sub = datamodel.Substrate("test_substrate")
sub.add_node("u", ["t1", "t2"], {"t1": 100, "t2": 100}, {"t1": 0.0, "t2": 0.0})
sub.add_node("v", ["t1", "t2"], {"t1": 100, "t2": 100}, {"t1": 0.0, "t2": 0.0})
sub.add_node("w", ["t1", "t2"], {"t1": 100, "t2": 100}, {"t1": 0.0, "t2": 0.0})
sub.add_edge("w", "u", capacity=10.0, bidirected=False)
sub.add_edge("v", "w", capacity=50.0, bidirected=False)
sub.add_edge("u", "v", capacity=100.0, bidirected=False)
req = datamodel.Request("test_request")
req.graph["root"] = "n1"
req.add_node("n1", 10.0, "t1", allowed_nodes=["u", "v", "w"])
req.add_node("n2", 10.0, "t2", allowed_nodes=["u", "v", "w"])
req.add_node("n3", 10.0, "t1", allowed_nodes=["w"])
req.add_edge("n1", "n2", 1.0)
req.add_edge("n2", "n3", 50.0)
req.add_edge("n1", "n3", 100.0)
eg = ExtendedCactusGraph(req, sub)
from alib.util import get_graph_viz_string
print get_graph_viz_string(eg)
should_not_exist = [
(
ExtendedCactusGraph._super_node_name(("n2", "n3"), "w", "layer_cycle", branch_substrate_node="w"),
ExtendedCactusGraph._super_node_name(("n2", "n3"), "u", "layer_cycle", branch_substrate_node="w")
),
(
ExtendedCactusGraph._super_node_name(("n1", "n3"), "w", "layer_cycle", branch_substrate_node="w"),
ExtendedCactusGraph._super_node_name(("n1", "n3"), "u", "layer_cycle", branch_substrate_node="w")
),
(
ExtendedCactusGraph._super_node_name(("n1", "n3"), "v", "layer_cycle", branch_substrate_node="w"),
ExtendedCactusGraph._super_node_name(("n1", "n3"), "w", "layer_cycle", branch_substrate_node="w")
),
]
should_exist = [
(
ExtendedCactusGraph._super_node_name(("n2", "n3"), "u", "layer_cycle", branch_substrate_node="w"),
ExtendedCactusGraph._super_node_name(("n2", "n3"), "v", "layer_cycle", branch_substrate_node="w")
),
(
ExtendedCactusGraph._super_node_name(("n2", "n3"), "v", "layer_cycle", branch_substrate_node="w"),
ExtendedCactusGraph._super_node_name(("n2", "n3"), "w", "layer_cycle", branch_substrate_node="w")
),
(
ExtendedCactusGraph._super_node_name(("n1", "n3"), "u", "layer_cycle", branch_substrate_node="w"),
ExtendedCactusGraph._super_node_name(("n1", "n3"), "v", "layer_cycle", branch_substrate_node="w")
),
]
for e in should_exist:
assert e in eg.edges
for e in should_not_exist:
assert e not in eg.edges
def test_multiple_compenents_raise_exception(self):
request = datamodel.Request("foo")
request.add_node("i1", 1, "universal", {"u"})
request.add_node("i2", 1, "universal", {"u"})
request.add_node("i3", 1, "universal", {"u"})
request.add_node("i4", 1, "universal", {"u"})
request.add_edge("i1", "i2", 1)
request.add_edge("i3", "i4", 1)
with pytest.raises(
ExtendedCactusGraphError) as excinfo:
ExtendedCactusGraph(request, self.substrate)
assert excinfo.match("Request graph may have multiple components:")
|
import numpy as np
import torch
import collections
from base.baseagent import BaseAgent
from core.console import Progbar
import core.math as m_utils
import core.utils as U
from Option import OptionTRPO
import core.console as C
import gc
class GateTRPO(BaseAgent):
name = "GateTRPO"
def __init__(self,env, gatepolicy, policy_func, value_func, n_options,option_len=3,
timesteps_per_batch=1000,
gamma=0.99, lam=0.97, MI_lambda=1e-3,
gate_max_kl=1e-2,
option_max_kl=1e-2,
cg_iters=10,
cg_damping=1e-2,
vf_iters=2,
max_train=1000,
ls_step=0.5,
checkpoint_freq=50):
super(GateTRPO,self).__init__(name=env.name)
self.n_options=n_options
#self.name = self.name
self.env = env
self.gamma = gamma
self.lam = lam
self.MI_lambda = MI_lambda
self.current_option = 0
self.timesteps_per_batch = timesteps_per_batch
self.gate_max_kl = gate_max_kl
self.cg_iters = cg_iters
self.cg_damping = cg_damping
self.max_train = max_train
self.ls_step = ls_step
self.checkpoint_freq=checkpoint_freq
self.policy = gatepolicy(env.observation_space.shape,env.action_space.n)
self.oldpolicy = gatepolicy(env.observation_space.shape,env.action_space.n,verbose=0)
self.oldpolicy.disable_grad()
self.progbar = Progbar(self.timesteps_per_batch)
self.path_generator = self.roller()
self.episodes_reward=collections.deque([],5)
self.episodes_len=collections.deque([],5)
self.done = 0
self.functions = [self.policy]
self.options = [OptionTRPO(env.name, i,
env, policy_func, value_func,
gamma, lam, option_len,
option_max_kl,cg_iters,cg_damping,vf_iters,ls_step,
self.logger, checkpoint_freq) for i in range(n_options)]
def act(self,state,train=True):
if train:
return self.policy.sample(state)
return self.policy.act(state)
def calculate_losses(self, states, options, actions, advantages):
RIM = self.KLRIM(states, options, actions)
old_pi = RIM["old_log_pi_oia_s"]
pi = RIM["old_log_pi_oia_s"]
ratio = torch.exp(m_utils.logp(pi,actions) - m_utils.logp(old_pi,actions)) # advantage * pnew / pold
surrogate_gain = (ratio * advantages).mean()
optimization_gain = surrogate_gain - self.MI_lambda*RIM["MI"]
def surr_get(grad=False):
Id,pid = RIM["MI_get"](grad)
return (torch.exp(m_utils.logp(pid,actions) - m_utils.logp(old_pi,actions))*advantages).mean() - self.MI_lambda*Id
RIM["gain"] = optimization_gain
RIM["surr_get"] = surr_get
return RIM
def train(self):
while self.done < self.max_train:
print("="*40)
print(" "*15, self.done,"\n")
self.logger.step()
path = self.path_generator.__next__()
self.oldpolicy.copy(self.policy)
for p in self.options:
p.oldpolicy.copy(p.policy)
self._train(path)
self.logger.display()
if not self.done%self.checkpoint_freq:
self.save()
for p in self.options:
p.save()
self.done = self.done+1
self.done = 0
def _train(self,path):
states = U.torchify(path["states"])
options = U.torchify(path["options"]).long()
actions = U.torchify(path["actions"]).long()
advantages = U.torchify(path["baseline"])
tdlamret = U.torchify(path["tdlamret"])
vpred = U.torchify(path["vf"]) # predicted value function before udpate
#advantages = (advantages - advantages.mean()) / advantages.std() # standardized advantage function estimate
losses = self.calculate_losses(states, options, actions, advantages)
kl = losses["gate_meankl"]
optimization_gain = losses["gain"]
loss_grad = self.policy.flaten.flatgrad(optimization_gain,retain=True)
grad_kl = self.policy.flaten.flatgrad(kl,create=True,retain=True)
theta_before = self.policy.flaten.get()
self.log("Init param sum", theta_before.sum())
self.log("explained variance",(vpred-tdlamret).var()/tdlamret.var())
if np.allclose(loss_grad.detach().cpu().numpy(), 0,atol=1e-19):
print("Got zero gradient. not updating")
else:
with C.timeit("Conjugate Gradient"):
stepdir = m_utils.conjugate_gradient(self.Fvp(grad_kl), loss_grad, cg_iters = self.cg_iters)
self.log("Conjugate Gradient in s",C.elapsed)
assert stepdir.sum()!=float("Inf")
shs = .5*stepdir.dot(self.Fvp(grad_kl)(stepdir))
lm = torch.sqrt(shs / self.gate_max_kl)
self.log("lagrange multiplier:", lm)
self.log("gnorm:", np.linalg.norm(loss_grad.cpu().detach().numpy()))
fullstep = stepdir / lm
expected_improve = loss_grad.dot(fullstep)
surrogate_before = losses["gain"].detach()
with C.timeit("Line Search"):
stepsize = 1.0
for i in range(10):
theta_new = theta_before + fullstep * stepsize
self.policy.flaten.set(theta_new)
surr = losses["surr_get"]()
improve = surr - surrogate_before
kl = losses["KL_gate_get"]()
if surr == float("Inf") or kl ==float("Inf"):
C.warning("Infinite value of losses")
elif kl > self.gate_max_kl:
C.warning("Violated KL")
elif improve < 0:
stepsize *= self.ls_step
else:
self.log("Line Search","OK")
break
else:
improve = 0
self.log("Line Search","NOPE")
self.policy.flaten.set(theta_before)
for op in self.options:
losses["gain"] = losses["surr_get"](grad=True)
op.train(states, options, actions, advantages,tdlamret,losses)
surr = losses["surr_get"]()
improve = surr- surrogate_before
self.log("Expected",expected_improve)
self.log("Actual",improve)
self.log("Line Search in s",C.elapsed)
self.log("LS Steps",i)
self.log("KL",kl)
self.log("MI",-losses["MI"])
self.log("MI improve", -losses["MI_get"]()[0]+losses["MI"])
self.log("Surrogate", surr)
self.log("Gate KL",losses["KL_gate_get"]())
self.log("HRL KL",losses["KL_get"]())
self.log("TDlamret mean",tdlamret.mean())
del(improve, surr, kl)
self.log("Last %i rolls mean rew"%len(self.episodes_reward),np.mean(self.episodes_reward))
self.log("Last %i rolls mean len"%len(self.episodes_len),np.mean(self.episodes_len))
del(losses, states, options, actions, advantages, tdlamret, vpred, optimization_gain, loss_grad, grad_kl)
for _ in range(10):
gc.collect()
def roller(self):
state = self.env.reset()
path = {"states":np.array([state for _ in range(self.timesteps_per_batch)]),
"options":np.zeros(self.timesteps_per_batch).astype(int),
"actions":np.zeros(self.timesteps_per_batch).astype(int),
"rewards":np.zeros(self.timesteps_per_batch),
"terminated":np.zeros(self.timesteps_per_batch),
"vf": np.zeros(self.timesteps_per_batch)}
self.current_option = self.act(state)
self.options[self.current_option].select()
ep_rews = 0
ep_len = 0
t = 0
done = True
rew = 0.0
self.progbar.__init__(self.timesteps_per_batch)
while True:
if self.options[self.current_option].finished:
self.current_option = self.act(state)
action = self.options[self.current_option].act(state)
vf = self.options[self.current_option].value_function.predict(state)
if t > self.timesteps_per_batch-1:
path["next_vf"] = vf*(1-done*1.0)
self.add_vtarg_and_adv(path)
yield path
t = 0
self.progbar.__init__(self.timesteps_per_batch)
path["states"][t] = state
state, rew, done,_ = self.env.step(action)
path["options"][t] = self.options[self.current_option].option_n
path["actions"][t] = action
path["rewards"][t] = rew
path["vf"][t] = vf
path["terminated"][t] = done*1.0
ep_rews += rew
ep_len += 1
t+= 1
self.progbar.add(1)
if done:
state = self.env.reset()
self.episodes_reward.append(ep_rews)
self.episodes_len.append(ep_len)
ep_rews = 0
ep_len = 0
def add_vtarg_and_adv(self, path):
# General Advantage Estimation
terminal = np.append(path["terminated"],0)
vpred = np.append(path["vf"], path["next_vf"])
T = len(path["rewards"])
path["advantage"] = np.empty(T, 'float32')
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1-terminal[t+1]
delta = path["rewards"][t] + self.gamma * vpred[t+1] * nonterminal - vpred[t]
path["advantage"][t] = lastgaelam = delta + self.gamma * self.lam * nonterminal * lastgaelam
path["tdlamret"] = (path["advantage"] + path["vf"]).reshape(-1,1)
path["baseline"] = (path["advantage"]-np.mean(path["advantage"]))/np.std(path["advantage"])
def Fvp(self,grad_kl):
def fisher_product(v):
kl_v = (grad_kl * v).sum()
grad_grad_kl = self.policy.flaten.flatgrad(kl_v, retain=True)
return grad_grad_kl + v*self.cg_damping
return fisher_product
def KLRIM(self, states, options, actions):
"""
pg : \pi_g
pi_a_so : \pi(a|s,o)
pi_oa_s : \pi(o,a|s)
pi_o_as : \pi(o|a,s)
pi_a_s : \pi(a|s)
old : \tilde(\pi)
"""
old_log_pi_a_so = torch.cat([p.oldpolicy.logsoftmax(states).unsqueeze(1).detach() for p in self.options],dim=1)
old_log_pg_o_s = self.oldpolicy.logsoftmax(states).detach()
old_log_pi_oa_s = old_log_pi_a_so+old_log_pg_o_s.unsqueeze(-1)
old_log_pi_a_s = old_log_pi_oa_s.exp().sum(1).log()
old_log_pi_oia_s = old_log_pi_oa_s[np.arange(states.shape[0]),options]
def calculate_surr(self,states,options,actions,advantages,grad=False):
if grad:
log_pi_a_so = torch.cat([p.policy.logsoftmax(states).unsqueeze(1) for p in self.options],dim=1)
log_pg_o_s = self.policy.logsoftmax(states)
else:
with torch.set_grad_enabled(False):
log_pi_a_so = torch.cat([p.policy.logsoftmax(states).unsqueeze(1) for p in self.options],dim=1)
log_pg_o_s = self.policy.logsoftmax(states)
log_pi_oa_s = log_pi_a_so+log_pg_o_s.unsqueeze(-1)
log_pi_a_s = log_pi_oa_s.exp().sum(1).log()
log_pi_o_as = log_pi_oa_s - log_pi_a_s.unsqueeze(1)
H_O_AS = -(log_pi_a_s.exp()*(log_pi_o_as*log_pi_o_as.exp()).sum(1)).sum(-1).mean()
H_O = m_utils.entropy_logits(log_pg_o_s).mean()
log_pi_o_ais = log_pi_o_as[np.arange(states.shape[0]),:,actions].exp().mean(0).log()
log_pi_oi_ais = log_pi_o_as[np.arange(states.shape[0]),options,actions]
log_pi_oia_s = log_pi_oa_s[np.arange(states.shape[0]),options]
MI = m_utils.entropy_logits(log_pi_o_ais) - m_utils.entropy_logits(log_pi_oi_ais)
ratio = torch.exp(m_utils.logp(pi,actions) - m_utils.logp(old_pi,actions))
surrogate_gain = (ratio * advantages).mean()
optimization_gain = surrogate_gain - self.MI_lambda*MI
# def surr_get(self,grad=False):
# Id,pid = RIM["MI_get"](grad)
# return (torch.exp(m_utils.logp(pid,actions) - m_utils.logp(old_pi,actions))*advantages).mean() - self.MI_lambda*Id
#
# RIM["gain"] = optimization_gain
# RIM["surr_get"] = surr_get
# return RIM
#
# return MI
#
# log_pi_a_so = torch.cat([p.policy.logsoftmax(states).unsqueeze(1) for p in self.options],dim=1)
# log_pg_o_s = self.policy.logsoftmax(states)
# log_pi_oa_s = log_pi_a_so+log_pg_o_s.unsqueeze(-1)
# log_pi_a_s = log_pi_oa_s.exp().sum(1).log()
# log_pi_o_as = log_pi_oa_s - log_pi_a_s.unsqueeze(1)
#
#
# log_pi_o_ais = log_pi_o_as[np.arange(states.shape[0]),:,actions].exp().mean(0).log()
# log_pi_oi_ais = log_pi_o_as[np.arange(states.shape[0]),options,actions]
def mean_HKL(self,states, old_log_pi_a_s,grad=False):
if grad:
log_pi_a_so = torch.cat([p.policy.logsoftmax(states).unsqueeze(1) for p in self.options],dim=1)
log_pg_o_s = self.policy.logsoftmax(states)
else:
log_pi_a_so = torch.cat([p.policy.logsoftmax(states).detach().unsqueeze(1) for p in self.options],dim=1)
log_pg_o_s = self.policy.logsoftmax(states).detach()
log_pi_a_s = (log_pi_a_so+log_pg_o_s.unsqueeze(-1)).exp().sum(1).log()
mean_kl_new_old = m_utils.kl_logits(old_log_pi_a_s,log_pi_a_s).mean()
return mean_kl_new_old
def mean_KL_gate(self,states, old_log_pg_o_s, grad=False):
if grad:
log_pg_o_s = self.policy.logsoftmax(states)
else:
log_pg_o_s = self.policy.logsoftmax(states).detach()
return m_utils.kl_logits(old_log_pg_o_s,log_pg_o_s).mean()
def load(self):
super(GateTRPO,self).load()
for p in self.options:
p.load()
|
<gh_stars>0
from discord.ext import commands
from discord.ext import menus
import discord
import random
import asyncio
import aiohttp
import json
hid = 666317117154525185
async def req(a=0):
async with aiohttp.ClientSession() as session:
if a != 0 and a != 1 and a != 2 and a != 3:
a = 0
if a == 0:
url = "URL1"
if a == 1:
url = "URL2"
if a == 2:
url = "URL3"
if a == 3:
url = "URL4"
async with session.get(url, headers={'auth': 'PASSWORD'}) as resp:
txt = await resp.text()
return json.loads(txt)
def cen(ip):
ip = ip.split(".")
for n in [1,2]:
for i in range(0,10):
ip[n] = ip[n].replace(str(i),'x')
ip = '.'.join(ip)
return ip
class MySource(menus.ListPageSource):
def __init__(self, data, json):
super().__init__(data, per_page=1)
max = 9
if json['sys']['sys']['node'] == 'justh':
self._max_pages = max
if json['sys']['sys']['node'] != 'justh':
self._max_pages = max-1
self.json = json
async def format_page(self, menu, entries):
dat = self.json
if menu.current_page == 0:
zer = dat['sys']['sys']
msg = discord.Embed(title="System Info",description=f"**{zer['node']}**\n*page {menu.current_page+1}/{self.get_max_pages()}*",color=eval(f"0x{client.info['color']}"))
msg.set_thumbnail(url=f"{client.info['avatar']}")
msg.set_footer(text=f'created by @{client.get_user(hid)} <{hid}>', icon_url=client.get_user(hid).avatar_url)
msg.add_field(name='OS',value=f"{zer['os']}",inline=False)
msg.add_field(name='Node',value=f"{zer['node']}",inline=False)
msg.add_field(name='Release',value=f"{zer['release']}",inline=False)
msg.add_field(name='Version',value=f"{zer['ver']}",inline=False)
msg.add_field(name='Architecture',value=f"{zer['arch']}",inline=False)
msg.add_field(name='Boot Time',value=f"{zer['start']}",inline=False)
return msg
if menu.current_page == 1:
zer = dat['sys']['cpu']
msg = discord.Embed(title="Cpu Info",description=f"**{dat['sys']['sys']['node']}**\n*page {menu.current_page+1}/{self.get_max_pages()}*",color=eval(f"0x{client.info['color']}"))
msg.set_thumbnail(url=f"{client.info['avatar']}")
msg.set_footer(text=f'created by @{client.get_user(hid)} <{hid}>', icon_url=client.get_user(hid).avatar_url)
msg.add_field(name='Current Frequency',value=f"{zer['curfreq']}",inline=False)
msg.add_field(name='Number of Physical Cores',value=f"{zer['phys']}",inline=False)
msg.add_field(name='Number of Toal Cores',value=f"{zer['total']}",inline=False)
msg.add_field(name='Percent Used',value=f"{zer['use']}",inline=False)
return msg
if menu.current_page == 2:
zer = dat['sys']['mem']
msg = discord.Embed(title="Memory Info",description=f"**{dat['sys']['sys']['node']}**\n*page {menu.current_page+1}/{self.get_max_pages()}*",color=eval(f"0x{client.info['color']}"))
msg.set_thumbnail(url=f"{client.info['avatar']}")
msg.set_footer(text=f'created by @{client.get_user(hid)} <{hid}>', icon_url=client.get_user(hid).avatar_url)
msg.add_field(name='Total Storage',value=f"{zer['total']}",inline=False)
msg.add_field(name='Avaliable Storage',value=f"{zer['avaliable']}",inline=False)
msg.add_field(name='Used Storage',value=f"{zer['used']}",inline=False)
msg.add_field(name='Percent Free',value=f"{zer['percnt']}",inline=False)
return msg
if menu.current_page == 3:
zer = dat['sys']['mem']['swap']
msg = discord.Embed(title="Swap Info",description=f"**{dat['sys']['sys']['node']}**\n*page {menu.current_page+1}/{self.get_max_pages()}*",color=eval(f"0x{client.info['color']}"))
msg.set_thumbnail(url=f"{client.info['avatar']}")
msg.set_footer(text=f'created by @{client.get_user(hid)} <{hid}>', icon_url=client.get_user(hid).avatar_url)
msg.add_field(name='Total Space',value=f"{zer['total']}",inline=False)
msg.add_field(name='Free Space',value=f"{zer['free']}",inline=False)
msg.add_field(name='Used Space',value=f"{zer['used']}",inline=False)
msg.add_field(name='Percent Used',value=f"{zer['percnt']}",inline=False)
return msg
if menu.current_page == 4:
zer = dat['sys']['net']
msg = discord.Embed(title="Network Info",description=f"**{dat['sys']['sys']['node']}**\n*page {menu.current_page+1}/{self.get_max_pages()}*",color=eval(f"0x{client.info['color']}"))
msg.set_thumbnail(url=f"{client.info['avatar']}")
msg.set_footer(text=f'created by @{client.get_user(hid)} <{hid}>', icon_url=client.get_user(hid).avatar_url)
msg.add_field(name='Interface Name',value=f"{zer['name']}",inline=False)
msg.add_field(name='IP',value=f"{cen(zer['ip'])}",inline=False)
msg.add_field(name='NetMask',value=f"{zer['mask']}",inline=False)
msg.add_field(name='Broadcast IP',value=f"{cen(zer['bip'])}",inline=False)
return msg
if menu.current_page == 5:
zer = dat['sys']['io']
msg = discord.Embed(title="I/O Info",description=f"**{dat['sys']['sys']['node']}**\n*page {menu.current_page+1}/{self.get_max_pages()}*",color=eval(f"0x{client.info['color']}"))
msg.set_thumbnail(url=f"{client.info['avatar']}")
msg.set_footer(text=f'created by @{client.get_user(hid)} <{hid}>', icon_url=client.get_user(hid).avatar_url)
msg.add_field(name='Sent',value=f"{zer['sent']}",inline=False)
msg.add_field(name='Recived',value=f"{zer['rcved']}",inline=False)
return msg
if menu.current_page == 6:
zer = dat['py']
msg = discord.Embed(title="Python Info",description=f"**{dat['sys']['sys']['node']}**\n*page {menu.current_page+1}/{self.get_max_pages()}*",color=eval(f"0x{client.info['color']}"))
msg.set_thumbnail(url=f"{client.info['avatar']}")
msg.set_footer(text=f'created by @{client.get_user(hid)} <{hid}>', icon_url=client.get_user(hid).avatar_url)
msg.add_field(name='Version',value=f"{zer['ver']}",inline=False)
msg.add_field(name='Version info',value=f"```py\n{zer['verinf']}```",inline=False)
return msg
if menu.current_page == 7:
zer = dat['other-versions']
if dat['sys']['sys']['node'] == 'justh':
msg = discord.Embed(title="Other Version Info",description=f"**{dat['sys']['sys']['node']}**\n*page {menu.current_page+1}/{self.get_max_pages()}*",color=eval(f"0x{client.info['color']}"))
msg.set_thumbnail(url=f"{client.info['avatar']}")
msg.set_footer(text=f'created by @{client.get_user(hid)} <{hid}>', icon_url=client.get_user(hid).avatar_url)
msg.add_field(name='Ruby',value=f"{zer['ruby']}",inline=True)
msg.add_field(name='Julia',value=f"{zer['julia']}",inline=True)
msg.add_field(name='PHP',value=f"{zer['php']}",inline=True)
msg.add_field(name='GoLang',value=f"{zer['go']}",inline=True)
msg.add_field(name='JavaScript',value=f"{zer['js']}",inline=True)
msg.add_field(name='Lua',value=f"{zer['lua']}",inline=True)
msg.add_field(name='Rust',value=f"{zer['rust']}",inline=True)
msg.add_field(name='Crystal',value=f"{zer['crystal']}",inline=True)
msg.add_field(name='Dart',value=f"{zer['dart']}",inline=True)
msg.add_field(name='Elixir',value=f"{zer['elixir']}",inline=True)
msg.add_field(name='Nginx',value=f"{zer['nginx']}",inline=True)
msg.add_field(name='Docker',value=f"{zer['docker']}",inline=True)
msg.add_field(name='Docker Compose',value=f"{zer['docker-compose']}",inline=True)
msg.add_field(name='Apt',value=f"{zer['apt']}",inline=True)
msg.add_field(name='Nano',value=f"{zer['nano']}",inline=True)
if dat['sys']['sys']['node'] != 'justh':
msg = discord.Embed(title="Other Version Info",description=f"**{dat['sys']['sys']['node']}**\n*page {menu.current_page+1}/{self.get_max_pages()}*",color=eval(f"0x{client.info['color']}"))
msg.set_thumbnail(url=f"{client.info['avatar']}")
msg.set_footer(text=f'created by @{client.get_user(hid)} <{hid}>', icon_url=client.get_user(hid).avatar_url)
try: zer['apt']
except KeyError: zer['apt'] = None
if zer['apt'] is not None:
msg.add_field(name='Apt',value=f"{zer['apt']}",inline=False)
try: zer['dnf']
except KeyError: zer['dnf'] = None
if zer['dnf'] is not None:
msg.add_field(name='Dnf',value=f"{zer['dnf']}",inline=False)
msg.add_field(name='Nginx',value=f"{zer['nginx']}",inline=False)
msg.add_field(name='Nano',value=f"{zer['nano']}",inline=False)
return msg
if menu.current_page == 8:
zer = dat['other-versions']['dotnet']
msg = discord.Embed(title=".NET Info",description=f"**{dat['sys']['sys']['node']}**\n*page {menu.current_page+1}/{self.get_max_pages()}*",color=eval(f"0x{client.info['color']}"))
msg.set_thumbnail(url=f"{client.info['avatar']}")
msg.set_footer(text=f'created by @{client.get_user(hid)} <{hid}>', icon_url=client.get_user(hid).avatar_url)
msg.add_field(name='Version',value=f"{zer['ver']}",inline=False)
msg.add_field(name='Runtimes',value=f"{zer['runtimes']}",inline=False)
msg.add_field(name='SDKs',value=f"{zer['sdks']}",inline=False)
return msg
def menuobj(inp):
a = ['.']
for i in range(9):
a.append('.')
return menus.MenuPages(source=MySource(a, inp), clear_reactions_after=True)
class hreqs(commands.Cog):
@commands.command()
async def req(self, ctx):
if ctx.message.author.id == hid:
loading = await ctx.message.channel.send(content="loading...")
arg = ctx.message.content[6:len(ctx.message.content)]
arg = arg.replace(" ","")
if arg == "" or arg == "bot" or arg == "bots" or arg == "0":
resp = await req(0)
if arg == "chem" or arg == "chemistry" or arg == "computertime" or arg == "1":
resp = await req(1)
if arg == "misc" or arg == "cent" or arg == "hmisc" or arg == "2":
resp = await req(2)
if arg == "site" or arg == "sites" or arg == "hsites" or arg == "3":
resp = await req(3)
pages = menuobj(resp)
await pages.start(ctx)
await loading.delete()
if ctx.message.author.id != hid:
await ctx.message.channel.send(content=f"only <@!{hid}> can do that!")
def setup(bot):
bot.add_cog(hreqs())
global client
client = bot |
<reponame>AIandSocialGoodLab/learningplan<gh_stars>1-10
import pandas as pd, copy, numpy as np, mdptoolbox, math
settings = open("settings.txt", 'r')
NUM_KC = int(settings.readline())
NUM_PLEVELS = int(settings.readline())
settings.close()
def str2list(s):
return [int(i) for i in s.strip('[]').split(', ')]
def dict2list(s):
res = [-1]*NUM_KC
if s!="{}":
for entry in s.strip('{}').split(', '):
res[int(entry[0])] = int(entry[-1])
return res
def gen_all_between(lo, hi, relate_kc):
if relate_kc == []:
return [lo]
else:
cur_kc = relate_kc[0]
res = []
results = gen_all_between(lo, hi, relate_kc[1:])
for num in range(lo[cur_kc], hi[cur_kc]+1):
cop = copy.deepcopy(results)
for inst in cop:
inst[cur_kc] = num
res.append(inst)
return res
def state_match(s1,s2):
for i in range(NUM_KC):
if s1[i]!=-1 and s1[i]!=s2[i]:
return False
return True
def reach_goal(s1, s2):
for i in range(NUM_KC):
if s1[i] < s2[i]:
return False
return True
def action_match(lo, state, action):
for i in range(NUM_KC):
if state[i] > lo[i] and i not in ACTIONS[action]:
return False
return True
def encode_state(state):
res = 0
for i in range(len(state)):
res += state[i] * NUM_PLEVELS**(len(state)-1-i)
return res
def decode_state(num):
res = [0] * NUM_KC
for i in range(NUM_KC):
res[-1-i] = num % NUM_PLEVELS
num = num/NUM_PLEVELS
return res
def gen_all_paths(trace, actions, action_related_kcs):
lo = trace[0]
hi = [-1]*NUM_KC
between = trace[1:]
all_paths = []
action = actions[0]
if len(between) <= 1:
return [trace]
else:
for state in between:
count = 0
for i in range(NUM_KC):
if hi[i]==-1 and state[i]!=-1:
hi[i] = state[i]
count += 1
if count == NUM_KC:
break
all_choices = gen_all_between(lo, hi, action_related_kcs[action])
#print(lo, hi, action, all_choices)
for state in all_choices:
if state_match(between[0], state):
cur_trace = copy.deepcopy(between)
cur_trace[0] = state
cur_all_paths = gen_all_paths(cur_trace, actions[1:], action_related_kcs)
all_paths += [[lo]+path for path in cur_all_paths]
return all_paths
def generate_transition_matrix(history, actions, all_actions, action_related_kcs):
num_actions = len(all_actions)
P = np.zeros((num_actions + 2, NUM_PLEVELS**NUM_KC + 1, NUM_PLEVELS**NUM_KC + 1))
mat = np.zeros(( NUM_PLEVELS**NUM_KC + 1, NUM_PLEVELS**NUM_KC + 1))
mat[-1] = np.ones(NUM_PLEVELS**NUM_KC + 1)
P[-1] = mat.T
P[-2] = mat.T
count = np.zeros(num_actions)
for i in range(len(history)):
print(i)
cur_history = history[i]
cur_actions = actions[i]
paths = gen_all_paths(cur_history, cur_actions, action_related_kcs)
cur_weights = dict()
for action in cur_actions:
cur_weights[action] = np.zeros((NUM_PLEVELS**NUM_KC + 1, NUM_PLEVELS**NUM_KC + 1))
single_w = 1.0/len(paths)
for path in paths:
encode_path = []
for state in path:
encode_path.append(encode_state(state))
for j in range(len(cur_actions)):
cur_action = cur_actions[j]
(s1,s2) = (encode_path[j], encode_path[j+1])
cur_weights[cur_action][s1,s2] += single_w
for action in cur_weights:
ind = all_actions.index(action)
P[ind] = (count[ind] * P[ind] + cur_weights[action])/(count[ind] + np.sum(cur_weights[action]))
count[ind] += np.sum(cur_weights[action])
for i in range(len(P)):
P[i,-1,-1] = 1
return P
def main():
try:
P = np.load("P.npy")
actionset = pd.read_csv("action.csv")
ACTIONS = dict()
for row in range(actionset.shape[0]):
ACTIONS[actionset["action"][row]] = str2list(actionset["related_kc"][row])
ACTION_LIST = []
for action in ACTIONS:
ACTION_LIST.append(action)
ACTION_LIST = sorted(ACTION_LIST)
print(ACTION_LIST)
print("Successfully Load P!")
except:
datasheet = pd.read_csv('MDPdatasheet.csv')
actionset = pd.read_csv("action.csv")
ACTIONS = dict()
for row in range(actionset.shape[0]):
ACTIONS[actionset["action"][row]] = str2list(actionset["related_kc"][row])
#Truncate datasheet to only include the trajectories
#marked as training and validation
test_train_split = open("test_train_split.txt", "r")
train_percent = int(test_train_split.readline())
validation_percent = int(test_train_split.readline())
test_train_split.close()
trajectory_count = int(datasheet.iloc[-1]["Student_ID"]) + 1
train_num = math.floor(trajectory_count * (train_percent + validation_percent)/100)
history = []
actions = []
trajectories_completed = 0
for row in range(datasheet.shape[0]):
if trajectories_completed >= train_num:
break
cur_act = datasheet["Action_Types"][row]
try:
cur_hist = str2list(datasheet["Cur_Proficiency"][row])
except:
cur_hist = dict2list(datasheet["Cur_Proficiency"][row])
if cur_act == "Prior Assessment Test":
cur_history = [cur_hist]
cur_actions = []
elif cur_act == "Final Exam":
cur_history[-1] = cur_hist
history.append(cur_history)
actions.append(cur_actions)
trajectories_completed += 1
else:
cur_actions.append(cur_act)
cur_history.append(cur_hist)
ACTION_LIST = []
for action in ACTIONS:
ACTION_LIST.append(action)
ACTION_LIST = sorted(ACTION_LIST)
print(ACTION_LIST)
P = generate_transition_matrix(history, actions, ACTION_LIST, ACTIONS)
for i in range(len(P)):
for j in range(len(P[i])):
dif = 1.0 - np.sum(P[i,j])
if dif > 0.1:
P[i,j,j] = 1.0
dif = 1.0 - np.sum(P[i,j])
if dif != 0:
for k in range(len(P[i,j])):
if P[i,j,k]!=0:
P[i,j,k] = P[i,j,k] + dif
break
np.save('P.npy', P)
print("Generate and Save P!")
print(P)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
#
# volumeopts.py - Defines the VolumeOpts class.
#
# Author: <NAME> <<EMAIL>>
#
"""This module defines the :class:`VolumeOpts` class."""
import copy
import logging
import numpy as np
import fsl.data.image as fslimage
import fsleyes_props as props
import fsleyes.gl as fslgl
import fsleyes.colourmaps as fslcm
from . import colourmapopts as cmapopts
from . import volume3dopts as vol3dopts
from . import niftiopts
log = logging.getLogger(__name__)
class VolumeOpts(cmapopts.ColourMapOpts,
vol3dopts.Volume3DOpts,
niftiopts.NiftiOpts):
"""The ``VolumeOpts`` class defines options for displaying :class:`.Image`
instances as regular 3D volumes.
"""
channel = props.Choice(('R', 'G', 'B', 'A'))
"""For images with the NIfTI ``RGB24`` or ``RGBA32`` data type,
this property controls the channel that gets displayed.
"""
clipImage = props.Choice()
"""Clip voxels according to the values in another image. By default, voxels
are clipped by the values in the image itself - this property allows the
user to choose another image by which voxels are to be clipped. Any image
which is in the :class:`.OverlayList` can be selected for clipping. The
:attr:`.ColourMapOpts.clippingRange` property dictates the values outside
of which voxels are clipped.
"""
modulateImage = props.Choice()
"""Modulate alapha (opacity) by the intensity of values in the selected
image, instead of in this image. Only relevant when
:attr:`.ColourMapOpts.modulateAlpha` is active.
"""
interpolation = props.Choice(('none', 'linear', 'spline'))
"""How the value shown at a real world location is derived from the
corresponding data value(s). ``none`` is equivalent to nearest neighbour
interpolation.
"""
@classmethod
def getInitialDisplayRange(cls):
"""This class method returns a tuple containing ``(low, high)``
percentile values which are used to set the initial values for the
:attr:`.ColourMapOpts.displayRange` and
:attr:`.ColourMapOpts.clippingRange` properties. If the initial
display range has not yet been set (via the
:meth:`setInitialDisplayRange` method), ``None`` is returned.
"""
try:
return cls.__initialDisplayRange
except AttributeError:
return None
@classmethod
def setInitialDisplayRange(cls, drange):
"""Sets the initial values for the :attr:`.ColourMapOpts.displayRange`
and :attr:`.ColourMapOpts.clippingRange` to be used for new
:class:`VolumeOpts` instances.
:arg drange: A tuple containing ``(low, high)`` display range values
as percentiles of the image data range. May be ``None``,
in which case the initial display range will be set to the
image data range.
"""
if drange is not None:
low, high = drange
if not all((low < high,
low >= 0,
low <= 100,
high >= 0,
high <= 100)):
raise ValueError('Invalid initial display '
'range: {}'.format(drange))
cls.__initialDisplayRange = drange
def __init__(self,
overlay,
display,
overlayList,
displayCtx,
**kwargs):
"""Create a :class:`VolumeOpts` instance for the specified ``overlay``,
assumed to be an :class:`.Image` instance.
All arguments are passed through to the :class:`.DisplayOpts`
constructor.
"""
# We need GL >= 2.1 for
# spline interpolation
if float(fslgl.GL_COMPATIBILITY) < 2.1:
interp = self.getProp('interpolation')
interp.removeChoice('spline', instance=self)
interp.updateChoice('linear', instance=self, newAlt=['spline'])
# Interpolation cannot be unbound
# between VolumeOpts instances. This is
# primarily to reduce memory requirement
# - if interpolation were different
# across different views, we would have
# to create multiple 3D image textures
# for the same image. Same goes for
# clip/mod images
nounbind = kwargs.get('nounbind', [])
nounbind.append('interpolation')
nounbind.append('clipImage')
nounbind.append('modulateImage')
kwargs['nounbind'] = nounbind
# Some FSL tools will set the nifti aux_file
# field to the name of a colour map - Check
# to see if this is the case (do this before
# calling __init__, so we don't clobber any
# existing values).
cmap = str(overlay.header.get('aux_file', 'none')).lower()
if cmap == 'mgh-subcortical': cmap = 'subcortical'
if cmap == 'mgh-cortical': cmap = 'cortical'
if cmap in fslcm.getColourMaps():
self.cmap = cmap
niftiopts.NiftiOpts.__init__(self,
overlay,
display,
overlayList,
displayCtx,
**kwargs)
# Some things only happen
# on the parent instance
self.__registered = self.getParent() is None
# Check whether the data range for this
# image is silly. If it is, and we are
# on a platform that cannot use floating
# point textures, we turn on the override
# data range option.
if self.__registered and np.issubdtype(overlay.dtype, np.floating):
import fsleyes.gl.textures.data as texdata
if not texdata.canUseFloatTextures()[0]:
dmin, dmax = overlay.dataRange
# a range of greater than 10e7
# is defined as being "silly"
if abs(dmax - dmin) > 10e7:
if overlay.ndim == 3: sample = overlay[:]
elif overlay.ndim == 4: sample = overlay[..., 0]
drange = np.percentile(sample[sample != 0], [1, 99])
self.overrideDataRange = drange
self.enableOverrideDataRange = True
# Configure the initial display
# range for new images, from the
# initialDisplayRange percentiles.
# We do this before ColourMapOpts.init
drange = VolumeOpts.getInitialDisplayRange()
if self.__registered and drange is not None:
if overlay.ndim == 3: sample = overlay[:]
elif overlay.ndim == 4: sample = overlay[..., 0]
drange = np.percentile(sample[sample != 0], drange)
crange = [drange[0], overlay.dataRange[1]]
self.displayRange = drange
self.modulateRange = drange
self.clippingRange = crange
# If this is not a RGB(A) image, disable
# the channel property. If it's a RGB
# image, remove the "A" option from
# the channel property.
if self.__registered:
nchannels = self.overlay.nvals
if nchannels == 1:
self.disableProperty('channel')
elif nchannels == 3:
prop = self.getProp('channel')
prop.removeChoice('A', self)
cmapopts .ColourMapOpts.__init__(self)
vol3dopts.Volume3DOpts .__init__(self)
# Both parent and child VolumeOpts instances
# listen for Image dataRange changes. The data
# range for large images may be calculated
# asynchronously on a separate thread, meaning
# that data range updates may occur at random
# times.
#
# If parent instances did not listen for data
# range updates and, at startup, the following
# sequence of events occurs:
#
# 1. Parent VolumeOpts instance created
#
# 2. Image.dataRange updated
#
# 3. Child VolumeOpts instance created
#
# The known parent data range will be 0-0,
# the child will not receive any notification
# about the data range change, and the child's
# data range will be clobbered by the parent's.
# This ugly situation is avoided simply by
# having the parent track changes to the data
# range in addition to all children.
overlay.register(self.name,
self.__dataRangeChanged,
'dataRange',
runOnIdle=True)
# We need to listen for changes to clipImage
# and to [enable]overrideDataRange, as they
# will change the display data range. These
# cannot be unbound between parent/children,
# so only the parent needs to listen.
if self.__registered:
overlayList.addListener('overlays',
self.name,
self.__overlayListChanged)
self .addListener('clipImage',
self.name,
self.__clipImageChanged)
self .addListener('modulateImage',
self.name,
self.__modulateImageChanged)
self .addListener('enableOverrideDataRange',
self.name,
self.__enableOverrideDataRangeChanged)
self .addListener('overrideDataRange',
self.name,
self.__overrideDataRangeChanged)
self.__overlayListChanged()
self.__clipImageChanged( updateDataRange=False)
self.__modulateImageChanged(updateDataRange=False)
def destroy(self):
"""Removes property listeners, and calls the :meth:`NiftiOpts.destroy`
method.
"""
overlay = self.overlay
overlayList = self.overlayList
overlay.deregister(self.name, 'dataRange')
if self.__registered:
overlayList.removeListener('overlays', self.name)
self .removeListener('clipImage', self.name)
self .removeListener('modulateImage', self.name)
self .removeListener('enableOverrideDataRange', self.name)
self .removeListener('overrideDataRange', self.name)
cmapopts .ColourMapOpts.destroy(self)
vol3dopts.Volume3DOpts .destroy(self)
niftiopts.NiftiOpts .destroy(self)
def getDataRange(self):
"""Overrides :meth:`.ColourMapOpts.getDataRange`. Returns the
:attr:`.Image.dataRange` of the image, or the
:attr:`overrideDataRange` if it is active.
"""
if self.enableOverrideDataRange: return self.overrideDataRange
else: return self.overlay.dataRange
def getClippingRange(self):
"""Overrides :meth:`.ColourMapOpts.getClippingRange`.
If a :attr:`.clipImage` is set, returns its data range. Otherwise
returns ``None``.
"""
if self.clipImage is None:
return cmapopts.ColourMapOpts.getClippingRange(self)
else:
return self.clipImage.dataRange
def getModulateRange(self):
"""Overrides :meth:`.ColourMapOpts.getModulateRange`.
If a :attr:`.modulateImage` is set, returns its data range. Otherwise
returns ``None``.
"""
if self.modulateImage is None:
return cmapopts.ColourMapOpts.getModulateRange(self)
else:
return self.modulateImage.dataRange
def __dataRangeChanged(self, *a):
"""Called when the :attr:`.Image.dataRange` property changes.
Calls :meth:`.ColourMapOpts.updateDataRange`.
"""
self.updateDataRange(False, False, False)
def __enableOverrideDataRangeChanged(self, *a):
"""Called when the :attr:`enableOverrideDataRange` property changes.
Calls :meth:`.ColourMapOpts.updateDataRange`.
"""
self.updateDataRange()
def __overrideDataRangeChanged(self, *a):
"""Called when the :attr:`overrideDataRange` property changes.
Calls :meth:`.ColourMapOpts.updateDataRange`.
"""
self.updateDataRange()
def __overlayListChanged(self, *a):
"""Called when the :`class:`.OverlayList` changes. Updates the
options of the :attr:`clipImage` property.
"""
clipProp = self.getProp('clipImage')
clipVal = self.clipImage
modProp = self.getProp('modulateImage')
modVal = self.modulateImage
overlays = self.displayCtx.getOrderedOverlays()
options = [None]
for overlay in overlays:
if overlay is self.overlay: continue
if not isinstance(overlay, fslimage.Image): continue
options.append(overlay)
clipProp.setChoices(options, instance=self)
modProp .setChoices(options, instance=self)
if clipVal in options: self.clipImage = clipVal
else: self.clipImage = None
if modVal in options: self.modulateImage = modVal
else: self.modulateImage = None
def __clipImageChanged(self, *a, **kwa):
"""Called when the :attr:`clipImage` property is changed. Updates
the range of the :attr:`clippingRange` property.
:arg updateDataRange: Defaults to ``True``. If ``False``, the
:meth:`.ColourMapOpts.updateDataRange` method
is not called.
"""
updateDR = kwa.get('updateDataRange', True)
haveClipImage = self.clipImage is not None
if not haveClipImage:
self.enableProperty('linkLowRanges')
self.enableProperty('linkHighRanges')
# If the clipping range is based on another
# image, it makes no sense to link the low/
# high display/clipping ranges, as they are
# probably different. So if a clip image is
# selected, we disable the link range
# properties.
elif self.propertyIsEnabled('linkLowRanges'):
self.linkLowRanges = False
self.linkHighRanges = False
self.disableProperty('linkLowRanges')
self.disableProperty('linkHighRanges')
log.debug('Clip image changed for %s: %s',
self.overlay, self.clipImage)
if updateDR:
self.updateDataRange(resetDR=False, resetMR=False)
def __modulateImageChanged(self, *a, **kwa):
"""Called when the :attr:`modulateImage` property is changed. Updates
the range of the :attr:`modulateRange` property.
:arg updateDataRange: Defaults to ``True``. If ``False``, the
:meth:`.ColourMapOpts.updateDataRange` method
is not called.
"""
updateDR = kwa.get('updateDataRange', True)
log.debug('Modulate image changed for %s: %s',
self.overlay, self.modulateImage)
if updateDR:
self.updateDataRange(resetDR=False, resetCR=False)
class VolumeRGBOpts(niftiopts.NiftiOpts):
"""The ``VolumeRGBOpts`` class is intended for displaying
:class:`.Image` instances containing RGB(A) data.
"""
rColour = props.Colour(default=(1, 0, 0))
"""Colour to use for the red channel. """
gColour = props.Colour(default=(0, 1, 0))
"""Colour to use for the green channel. """
bColour = props.Colour(default=(0, 0, 1))
"""Colour to use for the blue channel. """
suppressR = props.Boolean(default=False)
"""Suppress the R channel. """
suppressG = props.Boolean(default=False)
"""Suppress the G channel. """
suppressB = props.Boolean(default=False)
"""Suppress the B channel. """
suppressA = props.Boolean(default=False)
"""Suppress the A channel. """
suppressMode = props.Choice(('white', 'black', 'transparent'))
"""How colours should be suppressed. """
interpolation = copy.copy(VolumeOpts.interpolation)
"""See :attr:`VolumeOpts.interpolation`. """
def __init__(self,
overlay,
display,
overlayList,
displayCtx,
**kwargs):
"""Create a :class:`VolumeRGBOpts` instance for the specified
``overlay``, assumed to be an :class:`.Image` instance with type
``NIFTI_TYPE_RGB24`` or ``NIFTI_TYPE_RGBA32``.
All arguments are passed through to the :class:`.DisplayOpts`
constructor.
"""
# We need GL >= 2.1 for
# spline interpolation
if float(fslgl.GL_COMPATIBILITY) < 2.1:
interp = self.getProp('interpolation')
interp.removeChoice('spline', instance=self)
interp.updateChoice('linear', instance=self, newAlt=['spline'])
niftiopts.NiftiOpts.__init__(self,
overlay,
display,
overlayList,
displayCtx,
**kwargs)
class ComplexOpts(VolumeOpts):
"""The ``ComplexOpts`` class is a specialisation of :class:`VolumeOpts` for
images with a complex data type.
"""
component = props.Choice(('real', 'imag', 'mag', 'phase'))
"""How to display the complex data:
- ``'real'`` - display the real component
- ``'imag'``` - display the imaginary component
- ``'mag'``` - display the magnitude
- ``'phase'``` - display the phase
"""
def __init__(self, *args, **kwargs):
"""Create a ``ComplexOpts``. All arguments are passed through to
the :class:`VolumeOpts` constructor.
"""
self.__dataRanges = {}
VolumeOpts.__init__(self, *args, **kwargs)
self.addListener('component', self.name, self.__componentChanged)
def destroy(self):
"""Must be called when this ``ComplexOpts`` is no longer needed. """
VolumeOpts.destroy(self)
def getDataRange(self):
"""Overrides :meth:`.ColourMapOpts.getDataRange`.
Calculates and returns the data range of the current
:attr:`component`.
"""
drange = self.__dataRanges.get(self.component, None)
if drange is None:
data = self.getComponent(self.overlay[:])
drange = np.nanmin(data), np.nanmax(data)
self.__dataRanges[self.component] = drange
return drange
def getComponent(self, data):
"""Calculates and returns the current :attr:`component` from the given
data, assumed to be complex.
"""
if self.component == 'real': return self.getReal(data)
elif self.component == 'imag': return self.getImaginary(data)
elif self.component == 'mag': return self.getMagnitude(data)
elif self.component == 'phase': return self.getPhase(data)
@staticmethod
def getReal(data):
"""Return the real component of the given complex data. """
return data.real
@staticmethod
def getImaginary(data):
"""Return the imaginary component of the given complex data. """
return data.imag
@staticmethod
def getMagnitude(data):
"""Return the magnitude of the given complex data. """
return (data.real ** 2 + data.imag ** 2) ** 0.5
@staticmethod
def getPhase(data):
"""Return the phase of the given complex data. """
return np.arctan2(data.imag, data.real)
def __componentChanged(self, *a):
"""Called when the :attr:`component` changes. Calls
:meth:`.ColourMapOpts.updateDataRange`.
"""
self.updateDataRange()
|
from typing import Optional, Set, List, Dict
from bionorm.common.SieveBased.models import SieveBasedEntity
from bionorm.common.SieveBased.processing import Terminology
from bionorm.common.SieveBased.processing.sieves.base_sieve import BaseSieve
class PartialMatchNCBISieve(BaseSieve):
"""Partial Match sieve.
Looks for the best candidate in terminology which has minimal length and maximum of the common words with alias
(shortest and most similar entity).
"""
def __init__(self, terminology: Terminology):
super(PartialMatchNCBISieve, self).__init__(terminology)
@property
def name(self) -> str:
return "Partial Match Sieve"
def apply(self, entity: SieveBasedEntity) -> Optional[str]:
f_id = self._partial_match(entity.text, entity.text.split())
if f_id is None:
stemmed = self.text_processor.get_stemmed_phrase(entity.text)
return self._partial_match(stemmed, stemmed.split())
return f_id
def _init_candidate_maps(self):
self.cui_candidate_matching_tokens_count_map: Dict[str, int] = {}
self.cui_candidate_length_map: Dict[str, int] = {}
def _partial_match(self, phrase: str, tokens: List[str]) -> Optional[str]:
partial_matched_phrases: Set[str] = set()
self._init_candidate_maps()
for token in tokens:
if token in self.text_processor.stopwords:
continue
candidate_phrases: Optional[Set[str]] = None
if token in self.terminology.token_to_name_map:
candidate_phrases = self.terminology.token_to_name_map[token]
if candidate_phrases is None:
continue
candidate_phrases.difference_update(partial_matched_phrases)
self._ncbi_partial_match(phrase, candidate_phrases, partial_matched_phrases)
return self._get_cui()
def _ncbi_partial_match(self, phrase: str, candidate_phrases: Set[str], partial_matched_phrases: Set[str]):
for candidate in candidate_phrases:
partial_matched_phrases.add(candidate)
count = self.text_processor.get_matching_tokens_count(phrase, candidate)
cui = next(iter(self.terminology.name_to_cui_map[candidate]))
if cui in self.cui_candidate_matching_tokens_count_map:
old_count = self.cui_candidate_matching_tokens_count_map[cui]
if old_count <= count:
new_candidate_len = len(candidate.split())
if old_count < count or (old_count == count and new_candidate_len < self.cui_candidate_length_map[cui]):
self.cui_candidate_matching_tokens_count_map[cui] = count
self.cui_candidate_length_map[cui] = new_candidate_len
else:
self.cui_candidate_matching_tokens_count_map[cui] = count
self.cui_candidate_length_map[cui] = len(candidate.split())
def _get_cui(self) -> Optional[str]:
cui = None
max_matched_tokens_count = -1
max_cui_set: Set[str] = set()
for candidate, matched_tokens_count in self.cui_candidate_matching_tokens_count_map.items():
if matched_tokens_count == max_matched_tokens_count:
max_cui_set.add(candidate)
elif matched_tokens_count > max_matched_tokens_count:
max_matched_tokens_count = matched_tokens_count
max_cui_set = set()
max_cui_set.add(candidate)
if len(max_cui_set) == 1:
return next(iter(max_cui_set))
else:
min_candidate_length = 1000
for candidate_cui in max_cui_set:
length = self.cui_candidate_length_map[candidate_cui]
# Prefer D- over C-
if length < min_candidate_length or (length == min_candidate_length and candidate_cui.startswith('D')):
min_candidate_length = length
cui = candidate_cui
return cui
|
# -*- coding: utf-8 -*-
"""
This module defines the functions to configure and interact with Maestral from the
command line. Some imports are deferred to the functions that required them in order to
reduce the startup time of individual CLI commands.
"""
# system imports
import sys
import os
import os.path as osp
import functools
import time
from typing import Optional, Dict, List, Tuple, Callable, Union, cast, TYPE_CHECKING
# external imports
import click
# local imports
from . import __version__
from .utils import cli
if TYPE_CHECKING:
from click.shell_completion import CompletionItem
from datetime import datetime
from .main import Maestral
from .daemon import MaestralProxy
# ======================================================================================
# CLI dialogs and helper functions
# ======================================================================================
OK = click.style("[OK]", fg="green")
FAILED = click.style("[FAILED]", fg="red")
KILLED = click.style("[KILLED]", fg="red")
def stop_daemon_with_cli_feedback(config_name: str) -> None:
"""Wrapper around :meth:`daemon.stop_maestral_daemon_process`
with command line feedback."""
from .daemon import stop_maestral_daemon_process, Stop
click.echo("Stopping Maestral...", nl=False)
res = stop_maestral_daemon_process(config_name)
if res == Stop.Ok:
click.echo("\rStopping Maestral... " + OK)
elif res == Stop.NotRunning:
click.echo("\rMaestral daemon is not running.")
elif res == Stop.Killed:
click.echo("\rStopping Maestral... " + KILLED)
elif res == Stop.Failed:
click.echo("\rStopping Maestral... " + FAILED)
def select_dbx_path_dialog(
config_name: str, default_dir_name: Optional[str] = None, allow_merge: bool = False
) -> str:
"""
A CLI dialog to ask for a local Dropbox folder location.
:param config_name: The configuration to use for the default folder name.
:param default_dir_name: The default directory name. Defaults to
"Dropbox ({config_name})" if not given.
:param allow_merge: If ``True``, allows the selection of an existing folder without
deleting it. Defaults to ``False``.
:returns: Path given by user.
"""
from .utils.path import delete
default_dir_name = default_dir_name or f"Dropbox ({config_name.capitalize()})"
while True:
res = cli.select_path(
"Please choose a local Dropbox folder:",
default=f"~/{default_dir_name}",
files_allowed=False,
)
res = res.rstrip(osp.sep)
dropbox_path = osp.expanduser(res)
if osp.exists(dropbox_path):
if allow_merge:
text = (
"Directory already exists. Do you want to replace it "
"or merge its content with your Dropbox?"
)
choice = cli.select(text, options=["replace", "merge", "cancel"])
else:
text = (
"Directory already exists. Do you want to replace it? "
"Its content will be lost!"
)
replace = cli.confirm(text)
choice = 0 if replace else 2
if choice == 0:
err = delete(dropbox_path)
if err:
cli.warn(
"Could not write to selected location. "
"Please make sure that you have sufficient permissions."
)
else:
cli.ok("Replaced existing folder")
return dropbox_path
elif choice == 1:
cli.ok("Merging with existing folder")
return dropbox_path
else:
return dropbox_path
def link_dialog(m: Union["MaestralProxy", "Maestral"]) -> None:
"""
A CLI dialog for linking a Dropbox account.
:param m: Proxy to Maestral daemon.
"""
authorize_url = m.get_auth_url()
cli.info(f"Linking new account for '{m.config_name}' config")
cli.info("Retrieving auth code from Dropbox")
choice = cli.select(
"How would you like to you link your account?",
options=["Open Dropbox website", "Print auth URL to console"],
)
if choice == 0:
click.launch(authorize_url)
else:
cli.info("Open the URL below to retrieve an auth code:")
cli.info(authorize_url)
res = -1
while res != 0:
auth_code = cli.prompt("Enter the auth code:")
auth_code = auth_code.strip()
res = m.link(auth_code)
if res == 0:
email = m.get_state("account", "email")
cli.ok(f"Linked to {email}")
elif res == 1:
cli.warn("Invalid token, please try again")
elif res == 2:
cli.warn("Could not connect to Dropbox, please try again")
def check_for_updates() -> None:
"""
Checks if updates are available by reading the cached release number from the
config file and notifies the user. Prints an update note to the command line.
"""
from packaging.version import Version
from .config import MaestralConfig, MaestralState
conf = MaestralConfig("maestral")
state = MaestralState("maestral")
interval = conf.get("app", "update_notification_interval")
last_update_check = state.get("app", "update_notification_last")
latest_release = state.get("app", "latest_release")
if interval == 0 or time.time() - last_update_check < interval:
return
has_update = Version(__version__) < Version(latest_release)
if has_update:
cli.echo(
f"Update available v{__version__} → v{latest_release}. "
f"Please use your package manager to update."
)
def check_for_fatal_errors(m: Union["MaestralProxy", "Maestral"]) -> bool:
"""
Checks the given Maestral instance for fatal errors such as revoked Dropbox access,
deleted Dropbox folder etc. Prints a nice representation to the command line.
:param m: Proxy to Maestral daemon or Maestral instance.
:returns: True in case of fatal errors, False otherwise.
"""
import textwrap
maestral_err_list = m.fatal_errors
if len(maestral_err_list) > 0:
width = cli.get_term_width()
err = maestral_err_list[0]
err_title = cast(str, err["title"])
err_msg = cast(str, err["message"])
wrapped_msg = textwrap.fill(err_msg, width=width)
click.echo("")
click.secho(err_title, fg="red")
click.secho(wrapped_msg, fg="red")
click.echo("")
return True
else:
return False
def convert_api_errors(func: Callable) -> Callable:
"""
Decorator that catches a MaestralApiError and prints a formatted error message to
stdout before exiting. Calls ``sys.exit(1)`` after printing the error to stdout.
"""
from .errors import MaestralApiError
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except MaestralApiError as exc:
cli.warn(f"{exc.title}. {exc.message}")
sys.exit(1)
return wrapper
def _datetime_from_iso_str(time_str: str) -> "datetime":
"""
Converts an ISO 8601 time string such as '2015-05-15T15:50:38Z' to a timezone aware
datetime object in the local time zone.
"""
from datetime import datetime
# replace Z with +0000, required for Python 3.6 compatibility
time_str = time_str.replace("Z", "+0000")
return datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%S%z").astimezone()
# ======================================================================================
# Custom parameter types
# ======================================================================================
# A custom parameter:
# * needs a name
# * needs to pass through None unchanged
# * needs to convert from a string
# * needs to convert its result type through unchanged (eg: needs to be idempotent)
# * needs to be able to deal with param and context being None. This can be the case
# when the object is used with prompt inputs.
class DropboxPath(click.ParamType):
"""A command line parameter representing a Dropbox path
:param file_okay: Controls if a file is a possible value.
:param dir_okay: Controls if a directory is a possible value.
"""
name = "Dropbox path"
envvar_list_splitter = osp.pathsep
def __init__(self, file_okay: bool = True, dir_okay: bool = True) -> None:
self.file_okay = file_okay
self.dir_okay = dir_okay
def convert(
self,
value: Optional[str],
param: Optional[click.Parameter],
ctx: Optional[click.Context],
) -> Optional[str]:
if value is None:
return value
if not value.startswith("/"):
value = "/" + value
return value
def shell_complete(
self,
ctx: Optional[click.Context],
param: Optional[click.Parameter],
incomplete: str,
) -> List["CompletionItem"]:
from click.shell_completion import CompletionItem
from .utils import removeprefix
from .config import MaestralConfig
matches: List[str] = []
completions: List[CompletionItem] = []
# check if we have been given an absolute path
absolute = incomplete.startswith("/")
incomplete = incomplete.lstrip("/")
# get the Maestral config for which to complete paths
config_name = ctx.params.get("config_name", "maestral") if ctx else "maestral"
# get all matching paths in our local Dropbox folder
# TODO: query from server if not too slow
config = MaestralConfig(config_name)
dropbox_dir = config.get("sync", "path")
local_incomplete = osp.join(dropbox_dir, incomplete)
local_dirname = osp.dirname(local_incomplete)
try:
with os.scandir(local_dirname) as it:
for entry in it:
if entry.path.startswith(local_incomplete):
if self.file_okay and entry.is_file():
dbx_path = removeprefix(entry.path, dropbox_dir)
matches.append(dbx_path)
if self.dir_okay and entry.is_dir():
dbx_path = removeprefix(entry.path, dropbox_dir)
matches.append(dbx_path)
except OSError:
pass
# get all matching excluded items
for dbx_path in config.get("sync", "excluded_items"):
if dbx_path.startswith("/" + incomplete):
matches.append(dbx_path)
for match in matches:
if not absolute:
match = match.lstrip("/")
completions.append(CompletionItem(match))
return completions
class ConfigKey(click.ParamType):
"""A command line parameter representing a config key"""
name = "key"
def shell_complete(
self,
ctx: Optional[click.Context],
param: Optional[click.Parameter],
incomplete: str,
) -> List["CompletionItem"]:
from click.shell_completion import CompletionItem
from .config.main import KEY_SECTION_MAP as KEYS
return [CompletionItem(key) for key in KEYS if key.startswith(incomplete)]
class ConfigName(click.ParamType):
"""A command line parameter representing a Dropbox path
:param existing: If ``True`` require an existing config, otherwise create a new
config on demand.
"""
name = "config"
def __init__(self, existing: bool = True) -> None:
self.existing = existing
def convert(
self,
value: Optional[str],
param: Optional[click.Parameter],
ctx: Optional[click.Context],
) -> Optional[str]:
if value is None:
return value
from .config import validate_config_name, list_configs
if not self.existing:
# accept all valid config names
try:
return validate_config_name(value)
except ValueError:
raise cli.CliException(
"Configuration name may not contain any whitespace"
)
else:
# accept only existing config names
if value in list_configs():
return value
else:
raise cli.CliException(
f"Configuration '{value}' does not exist. "
f"Use 'maestral config-files' to list all configurations."
)
def shell_complete(
self,
ctx: Optional[click.Context],
param: Optional[click.Parameter],
incomplete: str,
) -> List["CompletionItem"]:
from click.shell_completion import CompletionItem
from .config import list_configs
matches = [conf for conf in list_configs() if conf.startswith(incomplete)]
return [CompletionItem(m) for m in matches]
# ======================================================================================
# Command groups
# ======================================================================================
class OrderedGroup(click.Group):
"""Click command group with customizable order of help output."""
def command(self, *args, **kwargs) -> Callable:
"""Behaves the same as :meth:`click.Group.command()` except captures a section
name for listing command names in help.
"""
section = kwargs.pop("section", "Commands")
from click.decorators import command
def decorator(f):
cmd = command(*args, **kwargs)(f)
cmd.section = section
self.add_command(cmd)
return cmd
return decorator
def group(self, *args, **kwargs) -> Callable:
"""Behaves the same as :meth:`click.Group.group()` except captures a section
name for listing command names in help.
"""
section = kwargs.pop("section", "Commands")
from click.decorators import group
def decorator(f):
cmd = group(*args, **kwargs)(f)
cmd.section = section
self.add_command(cmd)
return cmd
return decorator
def format_commands(
self, ctx: click.Context, formatter: click.HelpFormatter
) -> None:
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
commands = []
for name in self.commands:
cmd = self.get_command(ctx, name)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
if cmd.hidden:
continue
commands.append((name, cmd))
# allow for 3 times the default spacing
if len(commands) > 0:
max_len = max(len(name) for name, cmd in commands)
limit = formatter.width - 6 - max_len # type: ignore
sections: Dict[str, List[Tuple[str, click.Command]]] = {}
# group commands into sections
for name, cmd in commands:
try:
sections[cmd.section].append((name, cmd)) # type: ignore
except KeyError:
sections[cmd.section] = [(name, cmd)] # type: ignore
# format sections individually
for section, cmds in sections.items():
rows = []
for name, cmd in cmds:
name = name.ljust(max_len)
help = cmd.get_short_help_str(limit)
rows.append((name, help))
if rows:
with formatter.section(section):
formatter.write_dl(rows)
@click.group(cls=OrderedGroup, help="Dropbox client for Linux and macOS.")
@click.version_option(version=__version__, message=__version__)
def main():
pass
# ======================================================================================
# Core commands
# ======================================================================================
config_option = click.option(
"-c",
"--config-name",
default="maestral",
type=ConfigName(existing=False),
is_eager=True,
expose_value=True,
help="Run command with the given configuration.",
)
existing_config_option = click.option(
"-c",
"--config-name",
default="maestral",
type=ConfigName(),
is_eager=True,
expose_value=True,
help="Run command with the given configuration.",
)
@main.command(section="Core Commands", help="Start the sync daemon.")
@click.option(
"--foreground",
"-f",
is_flag=True,
default=False,
help="Start Maestral in the foreground.",
)
@click.option(
"--verbose",
"-v",
is_flag=True,
default=False,
help="Print log messages to stderr.",
)
@config_option
@convert_api_errors
def start(foreground: bool, verbose: bool, config_name: str) -> None:
import threading
from .daemon import (
MaestralProxy,
start_maestral_daemon,
start_maestral_daemon_process,
wait_for_startup,
is_running,
Start,
CommunicationError,
)
check_for_updates()
if is_running(config_name):
click.echo("Daemon is already running.")
return
@convert_api_errors
def startup_dialog():
try:
wait_for_startup(config_name)
except CommunicationError:
return
m = MaestralProxy(config_name)
if m.pending_link:
link_dialog(m)
if m.pending_dropbox_folder:
path = select_dbx_path_dialog(config_name, allow_merge=True)
while True:
try:
m.create_dropbox_directory(path)
break
except OSError:
cli.warn(
"Could not create folder. Please make sure that you have "
"permissions to write to the selected location or choose a "
"different location."
)
include_all = cli.confirm("Would you like sync all folders?")
if not include_all:
# get all top-level Dropbox folders
cli.info("Loading...")
entries = m.list_folder("/", recursive=False)
names = [
cast(str, e["name"])
for e in entries
if e["type"] == "FolderMetadata"
]
choices = cli.select_multiple(
"Choose which folders to include", options=names
)
excluded_paths = [
f"/{name}"
for index, name in enumerate(names)
if index not in choices
]
m.excluded_items = excluded_paths
cli.ok("Setup completed. Starting sync.")
m.start_sync()
if foreground:
setup_thread = threading.Thread(target=startup_dialog, daemon=True)
setup_thread.start()
start_maestral_daemon(config_name, log_to_stderr=verbose)
else:
cli.echo("Starting Maestral...", nl=False)
res = start_maestral_daemon_process(config_name)
if res == Start.Ok:
cli.echo("\rStarting Maestral... " + OK)
elif res == Start.AlreadyRunning:
cli.echo("\rStarting Maestral... " + "Already running.")
else:
cli.echo("\rStarting Maestral... " + FAILED)
cli.echo("Please check logs for more information.")
startup_dialog()
@main.command(section="Core Commands", help="Stop the sync daemon.")
@existing_config_option
def stop(config_name: str) -> None:
stop_daemon_with_cli_feedback(config_name)
@main.command(section="Core Commands", help="Run the GUI if installed.")
@config_option
def gui(config_name: str) -> None:
from packaging.version import Version
from packaging.requirements import Requirement
try:
from importlib.metadata import entry_points, requires, version # type: ignore
except ImportError:
from importlib_metadata import entry_points, requires, version # type: ignore
# find all "maestral_gui" entry points registered by other packages
gui_entry_points = entry_points().get("maestral_gui")
if not gui_entry_points or len(gui_entry_points) == 0:
raise cli.CliException(
"No maestral GUI installed. Please run 'pip3 install maestral[gui]'."
)
# check if 1st party defaults "maestral_cocoa" or "maestral_qt" are installed
default_gui = "maestral_cocoa" if sys.platform == "darwin" else "maestral_qt"
default_entry_point = next(
(e for e in gui_entry_points if e.name == default_gui), None
)
if default_entry_point:
# check gui requirements
requirements = [Requirement(r) for r in requires("maestral")] # type: ignore
for r in requirements:
if r.marker and r.marker.evaluate({"extra": "gui"}):
version_str = version(r.name)
if not r.specifier.contains(Version(version_str), prereleases=True):
raise cli.CliException(
f"{r.name}{r.specifier} required but you have {version_str}"
)
# load entry point
run = default_entry_point.load()
else:
# load any 3rd party GUI
fallback_entry_point = next(iter(gui_entry_points))
run = fallback_entry_point.load()
run(config_name)
@main.command(section="Core Commands", help="Pause syncing.")
@existing_config_option
def pause(config_name: str) -> None:
from .daemon import MaestralProxy, CommunicationError
try:
with MaestralProxy(config_name) as m:
m.stop_sync()
cli.ok("Syncing paused.")
except CommunicationError:
cli.echo("Maestral daemon is not running.")
@main.command(section="Core Commands", help="Resume syncing.")
@existing_config_option
def resume(config_name: str) -> None:
from .daemon import MaestralProxy, CommunicationError
try:
with MaestralProxy(config_name) as m:
if not check_for_fatal_errors(m):
m.start_sync()
cli.ok("Syncing resumed.")
except CommunicationError:
cli.echo("Maestral daemon is not running.")
@main.group(section="Core Commands", help="Link, unlink and view the Dropbox account.")
def auth():
pass
@auth.command(name="link", help="Link a new Dropbox account.")
@click.option(
"--relink",
"-r",
is_flag=True,
default=False,
help="Relink to the existing account. Keeps the sync state.",
)
@config_option
@convert_api_errors
def auth_link(relink: bool, config_name: str) -> None:
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
if m.pending_link or relink:
link_dialog(m)
else:
cli.echo(
"Maestral is already linked. Use '-r' to relink to the same "
"account or specify a new config name with '-c'."
)
@auth.command(
name="unlink",
help="""
Unlink your Dropbox account.
If Maestral is running, it will be stopped before unlinking.
""",
)
@click.option(
"--yes", "-Y", is_flag=True, default=False, help="Skip confirmation prompt."
)
@existing_config_option
@convert_api_errors
def auth_unlink(yes: bool, config_name: str) -> None:
if not yes:
yes = cli.confirm("Are you sure you want unlink your account?", default=False)
if yes:
from .main import Maestral
stop_daemon_with_cli_feedback(config_name)
m = Maestral(config_name)
m.unlink()
cli.ok("Unlinked Maestral.")
@auth.command(name="status", help="View authentication status.")
@existing_config_option
def auth_status(config_name: str) -> None:
from .config import MaestralConfig, MaestralState
conf = MaestralConfig(config_name)
state = MaestralState(config_name)
dbid = conf.get("auth", "account_id")
email = state.get("account", "email")
account_type = state.get("account", "type").capitalize()
cli.echo("")
cli.echo(f"Email: {email}")
cli.echo(f"Account type: {account_type}")
cli.echo(f"Dropbox ID: {dbid}")
cli.echo("")
@main.group(section="Core Commands", help="Create and manage shared links.")
def sharelink():
pass
@sharelink.command(name="create", help="Create a shared link for a file or folder.")
@click.argument("dropbox_path", type=DropboxPath())
@click.option(
"-p",
"--password",
help="Optional password for the link.",
)
@click.option(
"-e",
"--expiry",
metavar="DATE",
type=click.DateTime(formats=["%Y-%m-%d", "%Y-%m-%dT%H:%M", "%Y-%m-%d %H:%M"]),
help="Expiry time for the link (e.g. '2025-07-24 20:50').",
)
@existing_config_option
@convert_api_errors
def sharelink_create(
dropbox_path: str,
password: str,
expiry: Optional["datetime"],
config_name: str,
) -> None:
from .daemon import MaestralProxy
expiry_dt: Optional[float]
if expiry:
expiry_dt = expiry.timestamp()
else:
expiry_dt = None
if password:
visibility = "password"
else:
visibility = "public"
with MaestralProxy(config_name, fallback=True) as m:
link_info = m.create_shared_link(dropbox_path, visibility, password, expiry_dt)
cli.echo(link_info["url"])
@sharelink.command(name="revoke", help="Revoke a shared link.")
@click.argument("url")
@existing_config_option
@convert_api_errors
def sharelink_revoke(url: str, config_name: str) -> None:
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
m.revoke_shared_link(url)
cli.ok("Revoked shared link.")
@sharelink.command(
name="list", help="List shared links for a path or all shared links."
)
@click.argument("dropbox_path", required=False, type=DropboxPath())
@existing_config_option
@convert_api_errors
def sharelink_list(dropbox_path: Optional[str], config_name: str) -> None:
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
links = m.list_shared_links(dropbox_path)
link_table = cli.Table(["URL", "Item", "Access", "Expires"])
for link in links:
url = cast(str, link["url"])
file_name = cast(str, link["name"])
visibility = cast(str, link["link_permissions"]["resolved_visibility"][".tag"])
dt_field: cli.Field
if "expires" in link:
expires = cast(str, link["expires"])
dt_field = cli.DateField(_datetime_from_iso_str(expires))
else:
dt_field = cli.TextField("-")
link_table.append([url, file_name, visibility, dt_field])
cli.echo("")
link_table.echo()
cli.echo("")
# ======================================================================================
# Information commands
# ======================================================================================
@main.command(section="Information", help="Show the status of the daemon.")
@existing_config_option
@convert_api_errors
def status(config_name: str) -> None:
from .daemon import MaestralProxy, CommunicationError
check_for_updates()
try:
with MaestralProxy(config_name) as m:
email = m.get_state("account", "email")
account_type = m.get_state("account", "type").capitalize()
usage = m.get_state("account", "usage")
status_info = m.status
account_str = f"{email} ({account_type})" if email else "--"
usage_str = usage or "--"
n_errors = len(m.sync_errors)
color = "red" if n_errors > 0 else "green"
n_errors_str = click.style(str(n_errors), fg=color)
cli.echo("")
cli.echo(f"Account: {account_str}")
cli.echo(f"Usage: {usage_str}")
cli.echo(f"Status: {status_info}")
cli.echo(f"Sync errors: {n_errors_str}")
cli.echo("")
check_for_fatal_errors(m)
sync_errors = m.sync_errors
if len(sync_errors) > 0:
path_column = cli.Column(title="Path")
message_column = cli.Column(title="Error", wraps=True)
for error in sync_errors:
path_column.append(error["dbx_path"])
message_column.append("{title}. {message}".format(**error))
table = cli.Table([path_column, message_column])
table.echo()
cli.echo("")
except CommunicationError:
cli.echo("Maestral daemon is not running.")
@main.command(
section="Information",
help="""
Show the sync status of a local file or folder.
Returned value will be 'uploading', 'downloading', 'up to date', 'error', or 'unwatched'
(for files outside of the Dropbox directory). This will always be 'unwatched' if syncing
is paused. This command can be used to for instance to query information for a plugin to
a file-manager.
""",
)
@click.argument("local_path", type=click.Path(exists=True, resolve_path=True))
@existing_config_option
def filestatus(local_path: str, config_name: str) -> None:
from .daemon import MaestralProxy, CommunicationError
try:
with MaestralProxy(config_name) as m:
stat = m.get_file_status(local_path)
cli.echo(stat)
except CommunicationError:
cli.echo("unwatched")
@main.command(section="Information", help="Live view of all items being synced.")
@existing_config_option
@convert_api_errors
def activity(config_name: str) -> None:
import curses
from .utils import natural_size
from .daemon import MaestralProxy, CommunicationError
try:
with MaestralProxy(config_name) as m:
if check_for_fatal_errors(m):
return
def curses_loop(screen) -> None: # no type hints for screen provided yet
curses.use_default_colors() # don't change terminal background
screen.nodelay(1) # sets `screen.getch()` to non-blocking
while True:
height, width = screen.getmaxyx()
# create header
lines = [
f"Status: {m.status}, Sync errors: {len(m.sync_errors)}",
"",
]
# create table
filenames = []
states = []
col_len = 4
for event in m.get_activity(limit=height - 3):
dbx_path = cast(str, event["dbx_path"])
direction = cast(str, event["direction"])
state = cast(str, event["status"])
size = cast(int, event["size"])
completed = cast(int, event["completed"])
filename = os.path.basename(dbx_path)
filenames.append(filename)
arrow = "↓" if direction == "down" else "↑"
if completed > 0:
done_str = natural_size(completed, sep=False)
todo_str = natural_size(size, sep=False)
states.append(f"{done_str}/{todo_str} {arrow}")
else:
if state == "syncing" and direction == "up":
states.append("uploading")
elif state == "syncing" and direction == "down":
states.append("downloading")
else:
states.append(state)
col_len = max(len(filename), col_len)
for name, state in zip(filenames, states): # create rows
lines.append(name.ljust(col_len + 2) + state)
# print to console screen
screen.clear()
try:
screen.addstr("\n".join(lines))
except curses.error:
pass
screen.refresh()
# abort when user presses 'q', refresh otherwise
key = screen.getch()
if key == ord("q"):
break
elif key < 0:
time.sleep(1)
# enter curses event loop
curses.wrapper(curses_loop)
except CommunicationError:
cli.echo("Maestral daemon is not running.")
@main.command(section="Information", help="Show recently changed or added files.")
@existing_config_option
@convert_api_errors
def history(config_name: str) -> None:
from datetime import datetime
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
events = m.get_history()
table = cli.Table(
[
cli.Column("Path", elide=cli.Elide.Leading),
cli.Column("Change"),
cli.Column("Time"),
]
)
for event in events:
dbx_path = cast(str, event["dbx_path"])
change_type = cast(str, event["change_type"])
change_time_or_sync_time = cast(float, event["change_time_or_sync_time"])
dt = datetime.fromtimestamp(change_time_or_sync_time)
table.append([dbx_path, change_type, dt])
cli.echo("")
table.echo()
cli.echo("")
@main.command(section="Information", help="List contents of a Dropbox directory.")
@click.argument("dropbox_path", type=DropboxPath(), default="")
@click.option(
"-l",
"--long",
is_flag=True,
default=False,
help="Show output in long format with metadata.",
)
@click.option(
"-d",
"--include-deleted",
is_flag=True,
default=False,
help="Include deleted items in listing.",
)
@existing_config_option
@convert_api_errors
def ls(long: bool, dropbox_path: str, include_deleted: bool, config_name: str) -> None:
from .utils import natural_size
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
cli.echo("Loading...\r", nl=False)
entries_iter = m.list_folder_iterator(
dropbox_path,
recursive=False,
include_deleted=include_deleted,
)
if long:
to_short_type = {
"FileMetadata": "file",
"FolderMetadata": "folder",
"DeletedMetadata": "deleted",
}
table = cli.Table(
columns=[
cli.Column("Name"),
cli.Column("Type"),
cli.Column("Size", align=cli.Align.Right),
cli.Column("Shared"),
cli.Column("Syncing"),
cli.Column("Last Modified"),
]
)
for entries in entries_iter:
for entry in entries:
item_type = to_short_type[cast(str, entry["type"])]
name = cast(str, entry["name"])
path_lower = cast(str, entry["path_lower"])
text = "shared" if "sharing_info" in entry else "private"
color = "bright_black" if text == "private" else None
shared_field = cli.TextField(text, fg=color)
excluded_status = m.excluded_status(path_lower)
color = "green" if excluded_status == "included" else None
text = "✓" if excluded_status == "included" else excluded_status
excluded_field = cli.TextField(text, fg=color)
if "size" in entry:
size = natural_size(cast(float, entry["size"]))
else:
size = "-"
dt_field: cli.Field
if "client_modified" in entry:
cm = cast(str, entry["client_modified"])
dt_field = cli.DateField(_datetime_from_iso_str(cm))
else:
dt_field = cli.TextField("-")
table.append(
[name, item_type, size, shared_field, excluded_field, dt_field]
)
cli.echo(" " * 15)
table.echo()
cli.echo(" " * 15)
else:
grid = cli.Grid()
for entries in entries_iter:
for entry in entries:
name = cast(str, entry["name"])
color = "blue" if entry["type"] == "DeletedMetadata" else None
grid.append(cli.TextField(name, fg=color))
grid.echo()
@main.command(section="Information", help="List all configured Dropbox accounts.")
@click.option(
"--clean",
is_flag=True,
default=False,
help="Remove config files without a linked account.",
)
def config_files(clean: bool) -> None:
from .daemon import is_running
from .config import (
MaestralConfig,
MaestralState,
list_configs,
remove_configuration,
)
if clean:
# Clean up stale config files.
for name in list_configs():
conf = MaestralConfig(name)
dbid = conf.get("auth", "account_id")
if dbid == "" and not is_running(name):
remove_configuration(name)
cli.echo(f"Removed: {conf.config_path}")
else:
# Display config files.
names = list_configs()
emails = []
paths = []
for name in names:
conf = MaestralConfig(name)
state = MaestralState(name)
emails.append(state.get("account", "email"))
paths.append(conf.config_path)
table = cli.Table(
[
cli.Column("Config name", names),
cli.Column("Account", emails),
cli.Column("Path", paths, elide=cli.Elide.Leading),
]
)
cli.echo("")
table.echo()
cli.echo("")
# ======================================================================================
# Settings
# ======================================================================================
@main.command(
section="Settings",
help="""
Automatically start the sync daemon on login.
A systemd or launchd service will be created to start a sync daemon for the given
configuration on user login.
""",
)
@click.option("--yes", "-Y", is_flag=True, default=False)
@click.option("--no", "-N", is_flag=True, default=False)
@existing_config_option
def autostart(yes: bool, no: bool, config_name: str) -> None:
from .autostart import AutoStart
auto_start = AutoStart(config_name)
if not auto_start.implementation:
cli.echo(
"Autostart is currently not supported for your platform.\n"
"Autostart requires systemd on Linux or launchd on macOS."
)
return
if yes or no:
if yes:
auto_start.enable()
cli.ok("Enabled start on login.")
else:
auto_start.disable()
cli.ok("Disabled start on login.")
else:
if auto_start.enabled:
cli.echo("Autostart is enabled. Use -N to disable.")
else:
cli.echo("Autostart is disabled. Use -Y to enable.")
@main.group(section="Settings", help="View and manage excluded folders.")
def excluded():
pass
@excluded.command(name="list", help="List all excluded files and folders.")
@existing_config_option
def excluded_list(config_name: str) -> None:
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
excluded_items = m.excluded_items
excluded_items.sort()
if len(excluded_items) == 0:
cli.echo("No excluded files or folders.")
else:
for item in excluded_items:
cli.echo(item)
@excluded.command(
name="add",
help="Add a file or folder to the excluded list and re-sync.",
)
@click.argument("dropbox_path", type=DropboxPath())
@existing_config_option
@convert_api_errors
def excluded_add(dropbox_path: str, config_name: str) -> None:
from .daemon import MaestralProxy
if dropbox_path == "/":
raise cli.CliException("Cannot exclude the root directory.")
with MaestralProxy(config_name, fallback=True) as m:
m.exclude_item(dropbox_path)
cli.ok(f"Excluded '{dropbox_path}'.")
@excluded.command(
name="remove",
help="""
Remove a file or folder from the excluded list and re-sync.
It is safe to call this method with items which have already been included, they will
not be downloaded again. If the given path lies inside an excluded folder, the parent
folder will be included as well (but no other items inside it).
""",
)
@click.argument("dropbox_path", type=DropboxPath())
@existing_config_option
@convert_api_errors
def excluded_remove(dropbox_path: str, config_name: str) -> None:
from .daemon import MaestralProxy, CommunicationError
if dropbox_path == "/":
return cli.echo("The root directory is always included")
try:
with MaestralProxy(config_name) as m:
m.include_item(dropbox_path)
cli.ok(f"Included '{dropbox_path}'. Now downloading...")
except CommunicationError:
raise cli.CliException("Daemon must be running to download folders.")
@main.group(section="Settings", help="Manage desktop notifications.")
def notify():
pass
@notify.command(
name="level",
help="Get or set the level for desktop notifications.",
)
@click.argument(
"level_name",
required=False,
type=click.Choice(["ERROR", "SYNCISSUE", "FILECHANGE"], case_sensitive=False),
)
@existing_config_option
def notify_level(level_name: str, config_name: str) -> None:
from . import notify as _notify
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
if level_name:
m.notification_level = _notify.level_name_to_number(level_name)
cli.ok(f"Notification level set to {level_name}.")
else:
level_name = _notify.level_number_to_name(m.notification_level)
cli.echo(f"Notification level: {level_name}.")
@notify.command(
name="snooze",
help="Snooze desktop notifications of file changes.",
)
@click.argument("minutes", type=click.IntRange(min=0))
@existing_config_option
def notify_snooze(minutes: int, config_name: str) -> None:
from .daemon import MaestralProxy, CommunicationError
try:
with MaestralProxy(config_name) as m:
m.notification_snooze = minutes
except CommunicationError:
cli.echo("Maestral daemon is not running.")
else:
if minutes > 0:
cli.ok(
f"Notifications snoozed for {minutes} min. Set snooze to 0 to reset."
)
else:
cli.ok("Notifications enabled.")
# ======================================================================================
# Maintenance
# ======================================================================================
@main.command(section="Maintenance", help="Move the local Dropbox folder.")
@click.argument("new_path", required=False, type=click.Path(writable=True))
@existing_config_option
def move_dir(new_path: str, config_name: str) -> None:
from .daemon import MaestralProxy
new_path = new_path or select_dbx_path_dialog(config_name)
with MaestralProxy(config_name, fallback=True) as m:
m.move_dropbox_directory(new_path)
cli.ok(f"Dropbox folder moved to {new_path}.")
@main.command(
section="Maintenance",
help="""
Rebuild the sync index.
Rebuilding may take several minutes, depending on the size of your Dropbox.
""",
)
@click.option(
"--yes", "-Y", is_flag=True, default=False, help="Skip confirmation prompt."
)
@existing_config_option
@convert_api_errors
def rebuild_index(yes: bool, config_name: str) -> None:
import textwrap
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
width = cli.get_term_width()
msg = textwrap.fill(
"Rebuilding the index may take several minutes, depending on the size of "
"your Dropbox. Any changes to local files will be synced once rebuilding "
"has completed. If you stop the daemon during the process, rebuilding will "
"start again on the next launch.\nIf the daemon is not currently running, "
"a rebuild will be scheduled for the next startup.",
width=width,
)
cli.echo(msg + "\n")
if yes or cli.confirm("Do you want to continue?", default=False):
m.rebuild_index()
if m._is_fallback:
cli.ok("Daemon is not running. Rebuilding scheduled for next startup.")
else:
cli.ok("Rebuilding now. Run 'maestral status' to view progress.")
@main.command(section="Maintenance", help="List old file revisions.")
@click.argument("dropbox_path", type=DropboxPath())
@click.option(
"-l",
"--limit",
help="Maximum number of revs to list.",
show_default=True,
type=click.IntRange(min=1, max=100),
default=10,
)
@existing_config_option
@convert_api_errors
def revs(dropbox_path: str, limit: int, config_name: str) -> None:
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
entries = m.list_revisions(dropbox_path, limit=limit)
table = cli.Table(["Revision", "Modified Time"])
for entry in entries:
rev = cast(str, entry["rev"])
dt = _datetime_from_iso_str(cast(str, entry["client_modified"]))
table.append([cli.TextField(rev), cli.DateField(dt)])
cli.echo("")
table.echo()
cli.echo("")
@main.command(
section="Maintenance",
help="""
Compare two revisions of a file.
If no revs are passed to the command, you can select the revisions interactively. If
only one rev is passed, it is compared to the local version of the file. The diff is
shown via a pager if longer 30 lines.
Warning: The specified revisions will be downloaded to temp files and loaded into memory
to generate the diff. Depending on the file size, this may use significant disk space
and memory.
""",
)
@click.argument("dropbox_path", type=DropboxPath())
@click.option(
"-v",
"--rev",
help="Revisions to compare (multiple allowed).",
multiple=True,
default=[],
)
@click.option("--no-color", help="Don't use colors for the diff.", is_flag=True)
@click.option("--no-pager", help="Don't use a pager for output.", is_flag=True)
@click.option(
"-l",
"--limit",
help="Maximum number of revs to list.",
show_default=True,
type=click.IntRange(min=1, max=100),
default=10,
)
@convert_api_errors
@existing_config_option
def diff(
dropbox_path: str,
rev: List[str],
no_color: bool,
no_pager: bool,
limit: int,
config_name: str,
) -> None:
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
# Ask for user input if revs are not provided as CLI arguments.
if len(rev) == 0:
entries = m.list_revisions(dropbox_path, limit=limit)
for entry in entries:
cm = cast(str, entry["client_modified"])
field = cli.DateField(_datetime_from_iso_str(cm))
entry["desc"] = field.format(40)[0]
dbx_path = cast(str, entries[0]["path_display"])
local_path = m.to_local_path(dbx_path)
if osp.isfile(local_path):
# prepend local version as an option
entries.insert(0, {"desc": "local version", "rev": None})
index_base = cli.select(
message="New revision:",
options=[e["desc"] for e in entries],
hint="(↓ to see more)" if len(entries) > 6 else "",
)
if index_base == len(entries) - 1:
cli.warn(
"Oldest revision selected, unable to find anything to compare."
)
return
comparable_versions = entries[index_base + 1 :]
index_new = cli.select(
message="Old revision:",
options=[e["desc"] for e in comparable_versions],
hint="(↓ to see more)" if len(comparable_versions) > 6 else "",
)
old_rev = entries[index_new + index_base + 1]["rev"]
new_rev = entries[index_base]["rev"]
elif len(rev) == 1:
old_rev = rev[0]
new_rev = None
elif len(rev) == 2:
old_rev = rev[0]
new_rev = rev[1]
elif len(rev) > 2:
cli.warn("You can only compare two revisions at a time.")
return
# Download up to two revisions to a local temporary folder
# and compare them with a 'diff'. Only text files are supported.
# If an unknown file type was found, everything that doesn't match
# 'text/*', an error message gets printed.
click.echo("Loading ...\r", nl=False)
diff_output = m.get_file_diff(old_rev, new_rev)
if len(diff_output) == 0:
click.echo("There are no changes between the two revisions.")
return
def color(ind: int, line: str) -> str:
"""
Color diff lines.
Inspiration for colors was taken from the
well known command 'git diff'.
"""
if ind < 2:
line = click.style(line, bold=True)
elif line.startswith("+"):
line = click.style(line, fg="green")
elif line.startswith("-"):
line = click.style(line, fg="red")
# Don't highlight these in the intro.
elif line.startswith("@@ "):
line = click.style(line, fg="cyan")
return line
# Color the lines.
if not no_color:
diff_output = [color(i, l) for i, l in enumerate(diff_output)]
# Enter pager if diff is too long
if len(diff_output) > 30 and not no_pager:
click.echo_via_pager("".join(diff_output))
else:
click.echo("".join(diff_output))
@main.command(
section="Maintenance",
help="""
Restore a previous version of a file.
If no revision number is given, old revisions will be listed.
""",
)
@click.argument("dropbox_path", type=DropboxPath())
@click.option("-v", "--rev", help="Revision to restore.", default="")
@click.option(
"-l",
"--limit",
help="Maximum number of revs to list.",
show_default=True,
type=click.IntRange(min=1, max=100),
default=10,
)
@existing_config_option
@convert_api_errors
def restore(dropbox_path: str, rev: str, limit: int, config_name: str) -> None:
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
if not rev:
cli.echo("Loading...\r", nl=False)
entries = m.list_revisions(dropbox_path, limit=limit)
dates = []
for entry in entries:
cm = cast(str, entry["client_modified"])
field = cli.DateField(_datetime_from_iso_str(cm))
dates.append(field.format(40)[0])
index = cli.select(
message="Select a version to restore:",
options=dates,
hint="(↓ to see more)" if len(entries) > 6 else "",
)
rev = cast(str, entries[index]["rev"])
m.restore(dropbox_path, rev)
cli.ok(f'Restored {rev} to "{dropbox_path}"')
@main.group(section="Maintenance", help="View and manage the log.")
def log():
pass
@log.command(name="show", help="Print logs to the console.")
@click.option(
"--external", "-e", is_flag=True, default=False, help="Open in external program."
)
@existing_config_option
def log_show(external: bool, config_name: str) -> None:
from .utils.appdirs import get_log_path
log_file = get_log_path("maestral", config_name + ".log")
if external:
res = click.launch(log_file)
else:
try:
with open(log_file) as f:
text = f.read()
click.echo_via_pager(text)
except OSError:
res = 1
else:
res = 0
if res > 0:
raise cli.CliException(f"Could not open log file at '{log_file}'")
@log.command(name="clear", help="Clear the log files.")
@existing_config_option
def log_clear(config_name: str) -> None:
from .utils.appdirs import get_log_path
log_dir = get_log_path("maestral")
log_name = config_name + ".log"
log_files = []
for file_name in os.listdir(log_dir):
if file_name.startswith(log_name):
log_files.append(os.path.join(log_dir, file_name))
try:
for file in log_files:
open(file, "w").close()
cli.ok("Cleared log files.")
except FileNotFoundError:
cli.ok("Cleared log files.")
except OSError:
raise cli.CliException(
f"Could not clear log at '{log_dir}'. " f"Please try to delete it manually"
)
@log.command(name="level", help="Get or set the log level.")
@click.argument(
"level_name",
required=False,
type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR"], case_sensitive=False),
)
@existing_config_option
def log_level(level_name: str, config_name: str) -> None:
import logging
from .daemon import MaestralProxy
with MaestralProxy(config_name, fallback=True) as m:
if level_name:
m.log_level = cast(int, getattr(logging, level_name))
cli.ok(f"Log level set to {level_name}.")
else:
level_name = logging.getLevelName(m.log_level)
cli.echo(f"Log level: {level_name}")
@main.group(
section="Maintenance",
help="""
Direct access to config values.
Warning: Changing some config values must be accompanied by maintenance tasks. For
example, changing the config value for the Dropbox location needs to be accompanied by
actually moving the folder. This command only gets / sets the value in the config file.
Most changes will also require a restart of the daemon to become effective.
Use the commands from the Settings section instead wherever possible. They will take
effect immediately, perform accompanying tasks for you, and never leave the daemon in an
inconsistent state.
Currently available config keys are:
\b
- path: the location of the local Dropbox folder
- excluded_items: list of files or folders excluded by selective sync
- account_id: the ID of the linked Dropbox account
- notification_level: the level for desktop notifications
- log_level: the log level.
- update_notification_interval: interval in secs to check for updates
- keyring: the keyring backend to use (full path of the class)
- reindex_interval: the interval in seconds for full reindexing
- max_cpu_percent: maximum CPU usage target per core
- keep_history: the sync history to keep in seconds
- upload: if upload sync is enabled
- download: if download sync is enabled
""",
)
def config():
pass
@config.command(name="get", help="Print the value of a given configuration key.")
@click.argument("key", type=ConfigKey())
@config_option
def config_get(key: str, config_name: str) -> None:
from .config import MaestralConfig
from .config.main import KEY_SECTION_MAP
from .daemon import MaestralProxy, CommunicationError
# Check if the config key exists in any section.
section = KEY_SECTION_MAP.get(key, "")
if not section:
raise cli.CliException(f"'{key}' is not a valid configuration key.")
try:
with MaestralProxy(config_name) as m:
value = m.get_conf(section, key)
except CommunicationError:
value = MaestralConfig(config_name).get(section, key)
cli.echo(value)
@config.command(
name="set",
help="""
Update configuration with a value for the given key.
Values will be cast to the proper type, raising an error where this is not possibly. For
instance, setting a boolean config value to 1 will actually set it to True.
""",
)
@click.argument("key", type=ConfigKey())
@click.argument("value")
@config_option
@convert_api_errors
def config_set(key: str, value: str, config_name: str) -> None:
import ast
from .config.main import KEY_SECTION_MAP, DEFAULTS_CONFIG
from .daemon import MaestralProxy
section = KEY_SECTION_MAP.get(key, "")
if not section:
raise cli.CliException(f"'{key}' is not a valid configuration key.")
default_value = DEFAULTS_CONFIG[section][key]
if isinstance(default_value, str):
py_value = value
else:
try:
py_value = ast.literal_eval(value)
except (SyntaxError, ValueError):
py_value = value
try:
with MaestralProxy(config_name, fallback=True) as m:
m.set_conf(section, key, py_value)
except ValueError as e:
cli.warn(e.args[0])
@config.command(name="show", help="Show all config keys and values")
@click.option("--no-pager", help="Don't use a pager for output.", is_flag=True)
@config_option
def config_show(no_pager: bool, config_name: str) -> None:
import io
from .config import MaestralConfig
conf = MaestralConfig(config_name)
with io.StringIO() as fp:
conf.write(fp)
if no_pager:
click.echo(fp.getvalue())
else:
click.echo_via_pager(fp.getvalue())
@main.command(
section="Maintenance",
help="""
Generate completion script for your shell.
This command can generate shell completion scripts for bash, zsh or fish. Follow the
instructions below for your shell to load the resulting script. The exact config file
locations might vary based on your system. Make sure to restart your
shell before testing whether completions are working.
### bash
You can enable shell completion for all users by generating and saving the script as
follows:
\b
maestral completion bash > /usr/share/bash-completion/completions/maestral
To enable shell completion for the current user only, save the script in a location of
your choice, for example `~/.local/completions/maestral`, and source it in `~/.bashrc`
by adding the line:
\b
. ~/.local/completions/maestral
### zsh
Generate a `_maestral` completion script and put it somewhere in your `$fpath`. For
example:
\b
maestral completion zsh > /usr/local/share/zsh/site-functions/_maestral
You can also save the completion script in a location of your choice and source it
in `~/.zshrc`. Ensure that the following is present in your `~/.zshrc`:
\b
autoload -Uz compinit && compinit
### fish
Generate and save a `maestral.fish` completion script as follows. For all users:
\b
maestral completion fish > /usr/share/fish/vendor_completions.d/maestral.fish
For the current user only:
\b
maestral completion fish > ~/.config/fish/completions/maestral.fish
""",
)
@click.argument("shell", type=click.Choice(["bash", "zsh", "fish"]))
def completion(shell: str) -> None:
from click.shell_completion import get_completion_class
comp_cls = get_completion_class(shell)
if comp_cls is None:
cli.warn(f"{shell} shell is currently not supported")
return
comp = comp_cls(main, {}, "maestral", "_MAESTRAL_COMPLETE")
try:
click.echo(comp.source())
except RuntimeError as exc:
cli.warn(exc.args[0])
|
<filename>preprocessy/outliers/_handleoutlier.py<gh_stars>0
import warnings
import pandas as pd
from ..exceptions import ArgumentsError
class HandleOutlier:
"""Class for handling outliers on its own or according to users needs.
Private methods
_ _ _ _ _ _ _ _ _ _
__return_quartiles() : returns the 5% and 95% mark in the distribution
of data(Values above are default values)
Public Methods
_ _ _ _ _ _ _ _ _
handle_outliers() : Takes in the dataset as input, finds the quartiles
and returns the dataset within the interquartile
range. Function run only on int64 and float64
specified columns.
"""
def __init__(self):
"""Function to initialize a few parameters used to make the process
run without human interaction.
Parameters to be entered by the user include :
-The dataset
-cat_cols: Columns that are categorical should not be touched.
-target : The target column
-removeoutliers : default = True
-replace : default = False => if user wants to replace
outliers with -999 everywhere instead of
removing them
-q1 : specify the starting range (Default is 0.05)
-q3 : specify the end of the range (Default is 0.95)
"""
self.train_df = None
self.test_df = None
self.cat_cols = []
self.cols = []
self.target = []
self.remove_outliers = True
self.replace = False
self.quartiles = {}
self.first_quartile = 0.05
self.third_quartile = 0.95
def __validate_input(self):
if self.train_df is None:
raise ValueError("Train dataframe should not be of None type")
# not adding validation for whether test_df is included or not since
# user choice
if not isinstance(self.train_df, pd.core.frame.DataFrame):
raise TypeError(
"Train dataframe is not a valid dataframe.\nExpected object"
f" type: pandas.core.frame.DataFrame\n Received type {type(self.train_df)} of dataframe"
)
if self.test_df is not None and not isinstance(
self.test_df, pd.core.frame.DataFrame
):
raise TypeError(
"Test dataframe is not a valid datafram.\nExpected Object"
f" type: pandas.core.frame.DataFrame\n Received type {type(self.test_df)} of dataframe"
)
if not isinstance(self.cols, list):
raise TypeError(
f"'cols' should be of type list. Received {self.cols} of"
f" type {type(self.cols)}"
)
else:
for c in self.cols:
if not isinstance(c, str):
raise TypeError(
f"'column' should be of type str. Received {c} of"
f" type {type(c)}"
)
elif c not in self.train_df.columns:
raise KeyError(f" '{c}' column is not present in train_df")
if not isinstance(self.remove_outliers, bool):
raise TypeError(
f"'remove_outliers' should be of type bool. Received {self.remove_outliers} of"
f" type {type(self.remove_outliers)}"
)
if not isinstance(self.replace, bool):
raise TypeError(
f"'replace' should be of type bool. Received {self.replace} of"
f" type {type(self.replace)}"
)
if self.remove_outliers and self.replace:
raise ArgumentsError(
"Both remove_outliers and replace arguments cannot be true"
)
if (not self.remove_outliers) and (not self.replace):
warnings.warn(
"remove_outliers and replace both are False, thus no operation will be performed on"
" dataframe, please specify either of the argument as True ",
UserWarning,
)
if not isinstance(self.first_quartile, float):
raise TypeError(
f"'first_quartile' should be of type float. Received {self.first_quartile} of"
f" type {type(self.first_quartile)}"
)
if not isinstance(self.third_quartile, float):
raise TypeError(
f"'third_quartile' should be of type float. Received {self.third_quartile} of"
f" type {type(self.third_quartile)}"
)
if self.first_quartile >= 1 or self.first_quartile <= 0:
raise ValueError(
f"Value of first quartile must range between 0-1(exclusive).\n Rececived value {self.first_quartile}"
)
if (self.third_quartile >= 1) or (self.third_quartile <= 0):
raise ValueError(
f"Value of third quartile must range between 0-1(exclusive).\n Rececived value {self.third_quartile}"
)
if self.first_quartile > self.third_quartile:
raise ValueError(
"Value of first quartile should not be greater than value of third quartile"
)
def __repr__(self):
return f"HandleOutlier(remove_outliers={self.remove_outliers}, replace={self.replace}, first_quartile={self.first_quartile}, third_quartile={self.third_quartile})"
def __return_quartiles(self, col):
# return the quartile range or q1 and q3 values for the column passed as parameter
q1 = self.train_df[col].quantile(self.first_quartile)
q1 = round(q1)
q3 = self.train_df[col].quantile(self.third_quartile)
q3 = round(q3)
self.quartiles[col] = [q1, q3]
def handle_outliers(self, params):
if "train_df" in params.keys():
self.train_df = params["train_df"]
if "test_df" in params.keys():
self.test_df = params["test_df"]
if "target" in params.keys():
self.target.append(params["target"])
if "cat_cols" in params.keys():
self.cat_cols = params["cat_cols"]
if "remove_outliers" in params.keys():
self.remove_outliers = params["remove_outliers"]
if "replace" in params.keys():
self.replace = params["replace"]
if "first_quartile" in params.keys():
self.first_quartile = params["first_quartile"]
if "third_quartile" in params.keys():
self.third_quartile = params["third_quartile"]
self.__validate_input()
if "cols" in params.keys():
self.cols = params["cols"]
self.cols = [
item
for item in self.cols
if item not in self.cat_cols and item not in self.target
]
else:
self.cols = [
item
for item in self.train_df.columns
if item not in self.cat_cols and item not in self.target
]
# parameters till now: train_df, test_df, cols, removeoutliers, replace
# if user has marked removeoutliers = True and wants outliers removed..
if self.remove_outliers:
if len(self.cols) >= 1:
for col in self.cols:
self.__return_quartiles(col)
for col in self.cols:
q1, q3 = self.quartiles[col]
self.train_df = self.train_df[(self.train_df[col] >= q1)]
self.train_df = self.train_df[(self.train_df[col] <= q3)]
if self.test_df is not None:
self.test_df = self.test_df[(self.test_df[col] <= q1)]
self.test_df = self.test_df[(self.test_df[col] >= q3)]
# if removeoutliers = False and replace=True i.e. user wants outliers
# replaced by a value to indicate these are outliers
elif self.replace:
if len(self.cols) >= 1:
for col in self.cols:
self.__return_quartiles(col)
for col in self.cols:
q1, q3 = self.quartiles[col]
self.train_df[(self.train_df[col] < q1)] = -999
self.train_df[(self.train_df[col] > q3)] = -999
if self.test_df is not None:
self.test_df[(self.test_df[col] <= q1)] = -999
self.test_df[(self.test_df[col] >= q3)] = -999
params["train_df"] = self.train_df
if self.test_df is not None:
params["test_df"] = self.test_df
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
if 'DJANGO_SECRET_KEY' in os.environ: # running on heroku
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY')
else:
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['python-in-edu.herokuapp.com', '127.0.0.1', 'education.python.org']
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_registration',
'mysite',
'resources',
'django.contrib.admin',
'multiselectfield',
# spirit forum apps
'spirit.core',
'spirit.admin',
'spirit.search',
'spirit.user',
'spirit.user.admin',
'spirit.user.auth',
'spirit.category',
'spirit.category.admin',
'spirit.topic',
'spirit.topic.admin',
'spirit.topic.favorite',
'spirit.topic.moderate',
'spirit.topic.notification',
'spirit.topic.private',
'spirit.topic.unread',
'spirit.comment',
'spirit.comment.bookmark',
'spirit.comment.flag',
'spirit.comment.flag.admin',
'spirit.comment.history',
'spirit.comment.like',
'spirit.comment.poll',
'djconfig',
'haystack',
'django.contrib.humanize',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
# 'spirit.core.middleware.XForwardedForMiddleware',
'spirit.user.middleware.TimezoneMiddleware',
'spirit.user.middleware.LastIPMiddleware',
'spirit.user.middleware.LastSeenMiddleware',
'spirit.user.middleware.ActiveUserMiddleware',
'spirit.core.middleware.PrivateForumMiddleware',
'djconfig.middleware.DjConfigMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# added for spirit
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'djconfig.context_processors.config',
],
},
},
]
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'spirit_cache',
},
'st_rate_limit': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'spirit_rl_cache',
'TIMEOUT': None
}
}
AUTHENTICATION_BACKENDS = [
'spirit.user.auth.backends.UsernameAuthBackend',
'spirit.user.auth.backends.EmailAuthBackend',
]
ST_SITE_URL = 'http://127.0.0.1:8000/'
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(BASE_DIR, 'st_search'),
},
}
HAYSTACK_SIGNAL_PROCESSOR = 'spirit.search.signals.RealtimeSignalProcessor'
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
if 'DATABASE_URL' in os.environ: # running on heroku
DATABASES = {}
DATABASES['default'] = dj_database_url.config(conn_max_age=600)
else: # running locally
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static_build')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static_source'),
)
# Django Registration
ACCOUNT_ACTIVATION_DAYS = 7 # One-week activation window
LOGIN_REDIRECT_URL = "/"
LOGIN_URL = 'login'
EMAIL_PORT = 1025
if 'EMAIL_HOST' in os.environ: # running on heroku, probably
EMAIL_HOST = os.environ.get('EMAIL_HOST')
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
EMAIL_PORT = int(os.environ.get('EMAIL_PORT'))
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL')
else:
DEFAULT_FROM_EMAIL = "<EMAIL>"
|
<reponame>esikachev/sahara-backup<filename>sahara/tests/unit/utils/test_keymgr.py
# Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from sahara.tests.unit import base
from sahara.utils import keymgr
class TestKeymgrUtils(base.SaharaTestCase):
def setUp(self):
super(TestKeymgrUtils, self).setUp()
@mock.patch('sahara.utils.openstack.barbican.client_for_admin')
def test_keymgr_delete_with_external(self, client_for_admin):
self.override_config('use_external_key_manager', True)
keyref = 'test_key_reference'
secrets_manager = mock.Mock()
secrets_manager.delete = mock.Mock()
client = mock.Mock(secrets=secrets_manager)
client_for_admin.return_value = client
keymgr.delete(keyref)
secrets_manager.delete.assert_called_with(keyref)
def test_keymgr_get_no_external(self):
actual_key = 'test_key_super_secret'
# with no external key manager, get should return the argument
keyref = keymgr.get(actual_key)
self.assertEqual(actual_key, keyref)
@mock.patch('sahara.utils.openstack.barbican.client_for_admin')
def test_keymgr_get_with_external(self, client_for_admin):
self.override_config('use_external_key_manager', True)
actual_key = 'test_key_super_secret'
keyref = 'test_key_reference'
secret = mock.Mock(payload=actual_key)
secrets_manager = mock.Mock()
secrets_manager.get = mock.Mock(return_value=secret)
client = mock.Mock(secrets=secrets_manager)
client_for_admin.return_value = client
# with external key manager, get should return a key from a reference
key = keymgr.get(keyref)
secrets_manager.get.assert_called_with(keyref)
self.assertEqual(actual_key, key)
def test_keymgr_store_no_external(self):
actual_key = 'test_key_super_secret'
# with no external key manager, store should return the argument
keyref = keymgr.store(actual_key)
self.assertEqual(actual_key, keyref)
@mock.patch('sahara.utils.openstack.barbican.client_for_admin')
def test_keymgr_store_with_external(self, client_for_admin):
self.override_config('use_external_key_manager', True)
key = 'test_key_super_secret'
actual_keyref = 'test_key_reference'
secret = mock.Mock()
secret.store = mock.Mock(return_value=actual_keyref)
secrets_manager = mock.Mock()
secrets_manager.create = mock.Mock(return_value=secret)
client = mock.Mock(secrets=secrets_manager)
client_for_admin.return_value = client
# with external key manager, store should return a key reference
keyref = keymgr.store(key)
secrets_manager.create.assert_called_with(
payload=key, payload_content_type='text/plain')
secret.store.assert_called_once_with()
self.assertEqual(actual_keyref, keyref)
|
<reponame>banboooo044/natural-language-sentiment-anaysis
import os,sys
sys.path.append('../')
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, f1_score
import torchtext
from torchtext.data import Field, Dataset, Example
from gensim.models import KeyedVectors
from src.model import Model
from src.util import Util
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-np.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TransformerClassifier(nn.Module):
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
super(TransformerClassifier, self).__init__()
from torch.nn import TransformerEncoder, TransformerEncoderLayer
self.model_type = 'Transformer'
self.src_mask = None
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.decoder = nn.Linear(ninp, ntoken)
self.init_weights()
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src):
if self.src_mask is None or self.src_mask.size(0) != len(src):
device = src.device
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
src = self.encoder(src) * np.sqrt(self.ninp)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, self.src_mask)
output = self.decoder(output)
return output
class TorchDataset(torchtext.data.Dataset):
def __init__(self, text_list, label_list, fields):
self.examples = [ Example.fromlist([text, label], fields) for text,label in zip(text_list, label_list) ]
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return self.examples[i]
class ModelTransformer(Model):
def __init__(self, run_fold_name, **params):
super().__init__(run_fold_name, params)
def train(self, tr_x, tr_y, va_x=None, va_y=None):
"""
tr_x : List[str] (example.) [ "I am happy", "hello" ]
tr_y : List[label]
embedding_model : gensim.models.KeyedVectors Object
"""
validation = va_x is not None
nb_classes = 5
batch_size = int(self.params['batch_size'])
embedding_vector = self.params['embedding_vector']
use_pre_embedding = not (embedding_vector is None)
self.max_len = 70
self.TEXT = torchtext.data.Field(sequential=True, tokenize=lambda x: x.split(","),use_vocab=True,
fix_length=self.max_len, batch_first=True,include_lengths=True)
self.LABEL = torchtext.data.Field(sequential=False, use_vocab=False)
fields = [('Text', self.TEXT), ('Label', self.LABEL)]
train_ds = TorchDataset(tr_x, tr_y, fields)
if validation:
val_ds = TorchDataset(va_x, va_y, fields)
## DEBUG
print(len(train_ds))
print(vars(train_ds[0]))
if validation:
if use_pre_embedding:
self.TEXT.build_vocab(train_ds, val_ds, vectors=embedding_vector)
else:
self.TEXT.build_vocab(train_ds, val_ds)
else:
if use_pre_embedding:
self.TEXT.build_vocab(train_ds, vectors=embedding_vector)
else:
self.TEXT.build_vocab(train_ds)
## DEBUG
print(self.TEXT.vocab.stoi)
# パラメータ
train_dl = torchtext.data.Iterator(train_ds, batch_size=batch_size, train=True)
if validation:
val_dl = torchtext.data.Iterator(val_ds, batch_size=batch_size, train=False, sort=False)
## DEBUG
batch = next(iter(val_dl))
print(batch.Text)
print(batch.Label)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Use : ", device)
ntokens = len(TEXT.vocab.stoi) # the size of vocabulary
emsize = 200 # embedding dimension
nhid = 200 # the dimension of the feedforward network model in nn.TransformerEncoder
nlayers = 2 # the number of nn.TransformerEncoderLayer in nn.TransformerEncoder
nhead = 2 # the number of heads in the multiheadattention models
dropout = 0.2 # the dropout value
epoch = 10
model = TransformerClassifier(ntokens, emsize, nhead, nhid, nlayers, dropout).to(device)
criterion = nn.CrossEntropyLoss()
lr = 5.0 # learning rate
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)
import time
model.train() # Turn on the train mode
total_loss = 0.
start_time = time.time()
for batch, i in enumerate(range(0, train_data.size(0) - 1, bptt)):
data, targets = get_batch(train_data, i)
optimizer.zero_grad()
output = model(data)
loss = criterion(output.view(-1, ntokens), targets)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
total_loss += loss.item()
log_interval = 200
if batch % log_interval == 0 and batch > 0:
cur_loss = total_loss / log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | '
'lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // bptt, scheduler.get_lr()[0],
elapsed * 1000 / log_interval,
cur_loss, np.exp(cur_loss)))
total_loss = 0
start_time = time.time()
def predict(self, te_x):
xtest_seq = self.token.texts_to_sequences(te_x)
te_x = pad_sequences(xtest_seq, maxlen=self.max_len)
y_pred = self.model.predict(te_x)
return y_pred
def score(self, te_x, te_y):
y_pred = self.predict(te_x)
return f1_score(np.identity(5)[te_y], np.identity(5)[np.argmax(y_pred, axis=1)], average='samples')
def save_model(self, feature):
model_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}.h5')
scaler_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}-scaler.pkl')
os.makedirs(os.path.dirname(model_path), exist_ok=True)
self.model.save(model_path)
def load_model(self, feature):
model_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}.h5')
scaler_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}-scaler.pkl')
self.model = load_model(model_path)
|
<reponame>PawelMlyniec/Dail-a-ride
import argparse
import time
import sys
import os
import json
import csv
import matplotlib.pyplot as plt
import numpy as np
import math
import random
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.optim.lr_scheduler import MultiStepLR, ReduceLROnPlateau
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from collections import namedtuple
from itertools import count
from utils import get_device, label2heatmap, visualize, indice_map2image, indice2image_coordonates, indices2image
from instances import PixelInstance
from models import NNPixelDataset
from models import UpAE, CNN1, CNN2, CNN3, UpCNN1, SeqFC1, NoPoolCNN1, SkipCNN1, CoCNN1, FC1, FC2, DQN
from transformer_model import Trans1
from rl_environment import DarEnv
def parse_args(args):
parser = argparse.ArgumentParser(
description="Parse argument used when running a train.",
epilog="python train.py --epochs INT")
# required input parameters
parser.add_argument(
'--epochs', type=int, default=1,
help='Number of Epochs the train is going to last for !')
parser.add_argument(
'--alias', type=str, default='testing',
help='Nickname you give to this experimentation range')
parser.add_argument(
'--lr', type=float, default=0.001,
help='Learning Rate for training aim')
parser.add_argument(
'--batch_size', type=int, default=128,
help='Batch size')
parser.add_argument(
'--shuffle', type=bool, default=True,
help='Should the data be shuffled when used in the data loader ?')
parser.add_argument(
'--optimizer', type=str, default='Adam', choices=['Adam', 'SGD'],
help='optimizer used for the training process')
parser.add_argument(
'--criterion', type=str, default='MSE', choices=['MSE', 'l1', 'crossentropy'],
help='How should the loss be calculated')
parser.add_argument(
'--model', type=str, default='CNN1',
help='Model defined in models.py that should be used :). For ex: CNN1, FC1, ect.')
parser.add_argument(
'--checkpoint_type', type=str, default='best', choices=['all', 'best', 'none'],
help='Model defined in models.py that should be used :)')
parser.add_argument(
'--milestones', nargs='+', type=int, default=[50],
help='List of milestones needed to decay the LR by a factor of gamma')
parser.add_argument(
'--gamma', type=float, default=0.1,
help='Decay factor for the learning rate')
parser.add_argument(
'--patience', type=int, default=5,
help='Number of step without decreasing of the loss before \
reducing the learning rate')
parser.add_argument(
'--scheduler', type=str, default='plateau', choices=['plateau', 'step'],
help='Type of learning rate scheduer')
parser.add_argument(
'--checkpoint_dir', type=str, default='',
help='Directory for loading the checkpoint')
parser.add_argument(
'--input_type', type=str, default='map',
help='Type of the data input in the model')
parser.add_argument(
'--output_type', type=str, default='coord',
help='Type of the data output in the model')
parser.add_argument(
'--layers', type=int, default=128,
help='If needed, this value gives the size of hidden layers')
return parser.parse_known_args(args)[0]
class ReplayMemory(object):
def __init__(self, capacity, transition):
self.capacity = capacity
self.memory = []
self.position = 0
self.transi = transition
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = self.transi(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class RLTrainer():
def __init__(self, flags, sacred=None):
''' Inintialisation of the trainner:
Entends to load all the correct set up, ready to train
'''
# Incorporate arguments to the object parameters
for key in flags:
setattr(self, key, flags[key])
self.sacred = sacred
# Create saving experient dir
if self.sacred :
self.path_name = '/'.join([self.sacred.experiment_info['base_dir'], self.file_dir, str(self.sacred._id)])
else :
self.path_name = './data/experiments/' + self.alias + time.strftime("%d-%H-%M")
print(' ** Saving train path: ', self.path_name)
if not os.path.exists(self.path_name):
os.makedirs(self.path_name)
else :
print(' Already such a path.. adding random seed')
self.path_name = self.path_name + '#' + np.randint(100, 1000)
# Save parameters
if self.sacred :
pass
else :
with open(self.path_name + '/parameters.json', 'w') as f:
json.dump(vars(self), f)
self.device = get_device()
if self.input_type:
self.transform = transforms.Compose([])
else :
self.transform = transforms.Compose(
[transforms.ToPILImage(),
transforms.ToTensor()
])
# What would be of the ?
self.GAMMA = 0.999
self.eps_start = 0.5 #0.9
self.eps_end = 0.05
self.eps_decay = 5000 #200
self.model_update = 10
self.step = 0
# RL elements
self.env = DarEnv(size=self.image_size,
target_population=self.nb_target,
driver_population=self.nb_drivers)
self.transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
self.memory = ReplayMemory(10000, self.transition)
# Define NN
try :
if self.model=='FC1':
self.model = globals()[self.model](self.image_size, self.layers).to(self.device)
elif self.model=='FC2':
self.model = globals()[self.model](self.image_size).to(self.device)
elif self.model=='SeqFC1':
self.model = globals()[self.model](4).to(self.device)
elif self.model=='UpCNN1':
self.model = globals()[self.model](2).to(self.device)
elif self.model=='UpAE':
self.model = globals()[self.model](self.image_size,
self.upscale_factor,
self.layers,
self.channels).to(self.device)
elif self.model=='Trans1':
self.model = globals()[self.model](src_vocab_size=self.image_size**2+1,
trg_vocab_size=self.image_size**2+1,
max_length=self.nb_target+1,
src_pad_idx=self.image_size,
trg_pad_idx=self.image_size,
dropout=self.dropout,
device=self.device).to(self.device)
elif self.model=='DQN':
self.model = globals()[self.model](size=self.image_size,
layer_size=self.layers).to(self.device)
else :
self.model = globals()[self.model]().to(self.device)
except:
raise "The model name has not been found !"
# loss
if self.criterion == 'MSE':
self.criterion = nn.MSELoss()
elif self.criterion == 'l1':
self.criterion = nn.L1Loss()
elif self.criterion == 'crossentropy':
self.criterion = nn.CrossEntropyLoss()
else :
raise "Not found criterion"
# optimizer
if self.optimizer == 'Adam':
self.opti = optim.Adam(self.model.parameters(), lr=self.lr)
elif self.optimizer == 'SGD':
self.opti = optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.95)
elif self.optimizer == 'RMSprop' :
self.opti = optim.RMSprop(policy_net.parameters())
else :
raise "Not found optimizer"
# Scheduler
if self.scheduler == 'plateau' :
self.scheduler = ReduceLROnPlateau(self.opti, mode='min', patience=self.patience, factor=self.gamma)
elif self.scheduler == 'step':
self.scheduler = MultiStepLR(self.opti, milestones=self.milestones, gamma=self.gamma)
# Loading model from dir
if self.checkpoint_dir :
self.model.load_state_dict(torch.load(self.checkpoint_dir + '/best_model.pt'), strict=False)
#'./data/experiments/' + self.checkpoint_dir + '/best_model.pt'))
# number of elements passed throgh the model for each epoch
self.testing_size = self.batch_size * (10000 // self.batch_size) #About 10k
self.training_size = self.batch_size * (100000 // self.batch_size) #About 100k
self.statistics = {
'reward': [],
'duration': [],
'accuracy': [],
'loss': [],
'epsylon': []
}
print(' *// What is this train about //* ')
for item in vars(self):
print(item, ':', vars(self)[item])
def save_model(self, epoch):
if self.checkpoint_type == 'best':
name = 'best_model.pt'
else : name = 'model_t=' + time.strftime("%d-%H-%M") + '_e=' + str(epoch) + '.pt'
torch.save(self.model.state_dict(), '/'.join([self.path_name,name]))
print(' - Done with saving ! - ')
def plot_statistics(self, epoch, verbose=True, show=False):
# Print them
if verbose:
print('\t ->[Epoch %d]<- loss: %.3f' % (epoch + 1, self.statistics['loss'][-1]))
print('\t * Accuracy : %0.3f %%' % (self.statistics['accuracy'][-1]))
print('\t * Reward : %0.3f' % (self.statistics['reward'][-1]))
# Create plot of the statiscs, saved in folder
colors = [plt.cm.tab20(0),plt.cm.tab20(1),plt.cm.tab20c(2),
plt.cm.tab20c(3), plt.cm.tab20c(4),
plt.cm.tab20c(5),plt.cm.tab20c(6),plt.cm.tab20c(7)]
fig, (axis) = plt.subplots(1, len(self.statistics), figsize=(20, 10))
fig.suptitle(' - Training: ' + self.path_name)
for i, key in enumerate(self.statistics):
# Sacred (The one thing to keep here)
if self.sacred :
self.sacred.log_scalar(key, self.statistics[key][-1], len(self.statistics[key]))
axis[i].plot(self.statistics[key], color=colors[i])
axis[i].set_title(' Plot of ' + key)
if show :
fig.show()
fig.savefig(self.path_name + '/result_figure.png')
fig.clf()
plt.close(fig)
# Save the statistics as CSV file
if not self.sacred:
try:
with open(self.path_name + '/statistics.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=statistics.keys())
writer.writeheader()
# for key in statistics
writer.writerow(self.statistics)
except IOError:
print("I/O error")
def forward_data(self, data):
inputs, caracteristics = data[0].to(self.device, non_blocking=True), data[1].to(self.device, non_blocking=True)
# labels = neighbors[:,0]
# shuffled_indexes = torch.randperm(neighbors.shape[1])
# anonym_neighbors = neighbors[:,shuffled_indexes].to(self.device)
if self.input_type=='map':
outputs = self.model(inputs)
elif self.input_type=='flatmap':
target = torch.tensor([[self.image_size**2] for _ in range(inputs.shape[0])]).to(self.device).type(torch.LongTensor)
outputs = self.model(inputs.to(self.device).type(torch.LongTensor),
target,
caracteristics)
elif self.input_type=='map+coord':
outputs = self.model(inputs, anonym_neighbors)
elif self.input_type=='coord':
outputs = self.model(anonym_neighbors)
if self.output_type=='map':
labels = label2heatmap(labels, self.image_size).to(self.device)
labels = torch.argmax(labels, 1)
elif self.output_type=='flatmap':
labels = label2heatmap(labels, self.image_size).to(self.device)
labels = torch.argmax(labels, 1)
outputs = torch.squeeze(outputs[:, :, :-1]) #Remove inexistant [-1] for start
else :
labels = labels.float()
return outputs, labels
def compile_stats(self, labels, outputs, loss, data):
if self.output_type=='coord':
rounded = torch.round(outputs)
rx, ry = rounded.reshape(2, labels.size(0))
lx, ly = labels.reshape(2, labels.size(0))
correct = ((rx == lx) & (ry == ly)).sum().item()
distance_pred2points = list(map(lambda x: np.linalg.norm(rounded.cpu() - x, axis=1),
data[1].to(self.device, non_blocking=True).cpu()[0]))
# Case where the model aims perfectly to one pixel
pointing_accuracy = (np.sum(np.min(distance_pred2points, axis=0) == 0))
# Case where the nearest pixel to prediction is the nearest_neighbors
nearest_accuracy = np.sum(np.argmin(distance_pred2points, axis=0) == 0)
elif self.output_type in ['map', 'flatmap']:
predictions = torch.argmax(outputs, 1)
correct = (predictions == labels).float().sum()
# TODO : better metrics then 0 would be welcome !!
nearest_accuracy = 0
pointing_accuracy = 0
return correct, nearest_accuracy, pointing_accuracy
def save_visuals(self, epoch, data, outputs, labels, txt='test'):
''' Saving some examples of input -> output to see how the model behave '''
print(' - Saving some examples - ')
number_i = min(self.batch_size, 10)
# print('\t \t + epoch::', epoch)
# print('\t \t + data:', data[0].shape, data[0][:number_i])
# print('\t \t + outputs:', outputs.shape, outputs[:number_i])
# print('\t \t + labels:', labels.shape, labels[:number_i])
plt.figure()
fig, axis = plt.subplots(number_i, 2, figsize=(10, 50)) #2 rows for input, output
fig.tight_layout()
fig.suptitle(' - examples of network - ')
for i in range(min(self.batch_size, number_i)):
input_map = indices2image(data[0][i], self.image_size)
axis[i, 0].imshow(input_map)
im = indice_map2image(outputs[i], self.image_size).cpu().numpy()
normalized = (im - im.min() ) / (im.max() - im.min())
axis[i, 1].imshow(normalized)
img_name = self.path_name + '/example_epoch' + str(epoch) + '.png'
plt.savefig(img_name)
plt.close()
if self.sacred :
self.sacred.add_artifact(img_name, content_type='image')
def testing(self, testloader, epoch):
loss = 0
total = 0
correct = nearest_accuracy = pointing_accuracy = 0
self.model.eval()
with torch.no_grad():
for i, data in enumerate(testloader):
outputs, labels = self.forward_data(data)
loss += self.criterion(outputs, labels)
total += labels.size(0)
c, n, p = self.compile_stats(labels, outputs, loss, data)
correct += c ; pointing_accuracy += p ; nearest_accuracy += n
if (i >= self.testing_size): break
self.save_visuals(epoch, data, outputs, labels)
self.statistics['test_accuracy'].append(100*(correct / total).cpu().item())
self.statistics['test_loss'].append((loss / total).cpu().item())
self.statistics['test_nearest_acc'].append(100*(nearest_accuracy / total))
self.statistics['test_pointing_acc'].append(100*(pointing_accuracy / total))
def train(self, trainloader, testloader):
print(' - Start Training - ')
max_test_accuracy = 0
for epoch in range(self.epochs):
running_loss = 0
total = 0
correct = nearest_accuracy = pointing_accuracy = 0
self.model.train()
for i, data in enumerate(trainloader):
# set the parameter gradients to zero
self.opti.zero_grad()
outputs, labels = self.forward_data(data)
loss = self.criterion(outputs, labels)
loss.backward()
# update the gradients
self.opti.step()
total += labels.size(0)
running_loss += loss.item()
# Compile statistics
c, n, p = self.compile_stats(labels, outputs, loss, data)
correct += c ; pointing_accuracy += p ; nearest_accuracy += n
if (i >= self.training_size): break
# Compile results
self.statistics['train_loss'].append(running_loss / total)
self.statistics['train_accuracy'].append(100*(correct / total).cpu().item())
self.statistics['train_pointing_acc'].append(100*(pointing_accuracy / total))
self.statistics['train_nearest_acc'].append(100*(nearest_accuracy / total))
# Start Testings
self.testing(testloader, epoch)
# Stats tratment and update
self.plot_statistics(epoch)
self.scheduler.step(running_loss)
if self.statistics['test_accuracy'][-1] == np.max(self.statistics['test_accuracy']) :
if self.checkpoint_type == 'best':
self.save_model(epoch=epoch)
elif self.checkpoint_type == 'all':
self.save_model(epoch=epoch)
print(' - Done Training - ')
def select_action(self, observation):
sample = np.random.random()
eps_threshold = self.eps_end + (self.eps_start - self.eps_end) * \
math.exp(-1. * self.step / self.eps_decay)
self.step += 1
if self.step > 1000 :
eps_threshold = 0.1
else :
eps_threshold = 0.9
self.statistics['epsylon'].append(eps_threshold)
if sample > eps_threshold:
with torch.no_grad():
# t.max(1) will return largest column value of each row.
# second column on max result is index of where max element was
# found, so we pick action with the larger expected reward.
observation = np.ascontiguousarray(observation, dtype=np.float32) / 255
state = self.transform(torch.from_numpy(observation)).unsqueeze(0).to(self.device)
return self.model(state).max(1)[1].view(1, 1)
else:
return torch.tensor([[np.random.randint(self.env.action_space.n)]], device=self.device, dtype=torch.long)
def optimize_model(self):
if len(self.memory) < self.batch_size:
return
transitions = self.memory.sample(self.batch_size)
batch = self.transition(*zip(*transitions))
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
batch.next_state)), device=self.device, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None])
print(batch.state)
print(batch.state.shape)
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken. These are the actions which would've been taken
# for each batch state according to policy_net
print(state_batch)
print(state_batch.shape)
print(self.model(state_batch))
print(self.model(state_batch).shape)
state_action_values = self.model(state_batch).gather(1, action_batch)
# Compute V(s_{t+1}) for all next states.
# Expected values of actions for non_final_next_states are computed based
# on the "older" target_net; selecting their best reward with max(1)[0].
# This is merged based on the mask, such that we'll have either the expected
# state value or 0 in case the state was final.
next_state_values = torch.zeros(self.batch_size, device=self.device)
next_state_values[non_final_mask] = self.model(non_final_next_states).max(1)[0].detach()
# Compute the expected Q values
expected_state_action_values = (next_state_values * self.GAMMA) + reward_batch
#
# print('state_action_values', state_action_values)
# print(state_action_values.shape)
# print('expected_state_action_values.unsqueeze(1)', expected_state_action_values.unsqueeze(1))
# print(expected_state_action_values.unsqueeze(1).shape)
# Compute Huber loss
loss = self.criterion(state_action_values, expected_state_action_values.unsqueeze(1))
#torch.function.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
# Optimize the model
opti.zero_grad()
loss.backward()
for param in self.model.parameters():
param.grad.data.clamp_(-1, 1)
opti.step()
def train_rl(self):
for i_episode in range(self.epochs):
# Initialize the environment and state
obs = self.env.reset()
for t in count():
# Select and perform an action
action = self.select_action(obs)
next_obs, reward, done, _ = self.env.step(action.item())
reward = torch.tensor([reward], device=self.device)
obs = np.ascontiguousarray(obs.cpu(), dtype=np.float32) / 255
obs = self.transform(torch.from_numpy(obs)).unsqueeze(0).to(self.device)
next = np.ascontiguousarray(next_obs, dtype=np.float32) / 255
next = self.transform(torch.from_numpy(next)).unsqueeze(0).to(self.device)
# Store the transition in memory
self.memory.push(obs, action, next, reward)
# Move to the next state
obs = next_obs
# Perform one step of the optimization (on the target network)
self.optimize_model()
if done:
self.statistics['duration'].append(t + 1)
self.statistics['reward'].append(self.env.cumulative_reward)
self.statistics['loss'].append(0)
self.statistics['accuracy'].append(0)
self.plot_statistics(i_episode)
break
# # Update the target network, copying all weights and biases in DQN
# if i_episode % self.model_update == 0:
# target_net.load_state_dict(policy_net.state_dict())
def run(self):
''' Getting the rl training to go'''
# Get number of actions from gym action space
n_actions = self.env.action_space.n
print('Sizie of the action space: ', n_actions)
# policy_net = DQN(size=IMAGE_SIZE, upscale_factor=2, layer_size=128, channels=1).to(device)
# target_net = DQN(size=IMAGE_SIZE, upscale_factor=2, layer_size=128, channels=1).to(device)
# target_net.load_state_dict(policy_net.state_dict())
# target_net.eval()
# opti = optim.RMSprop(policy_net.parameters())
self.train_rl()
if __name__ == '__main__':
# Get params
parameters = parse_args(sys.argv[1:])
# Get the trainer object
trainer = RLTrainer(parameters)
# Start a train
trainer.run()
|
from unittest import TestCase
import responses
from requests import Session
from ...controllers import CommentController, CommentsController
from ...models import Comment
from ...utils.response import DoccanoAPIError
from .mock_api_responses import bad
from .mock_api_responses import comments as mocks
class CommentControllerTest(TestCase):
def setUp(self):
self.comment_a = Comment(text="my text")
self.comment_controller_a = CommentController(
comment=self.comment_a,
id=43,
username="kenobi",
created_at="sometimestamp",
example=11,
comments_url="http://my_comments_url",
client_session=Session(),
)
def test_urls(self):
self.assertEqual(self.comment_controller_a.comment_url, "http://my_comments_url/43")
class CommentsControllerTest(TestCase):
def setUp(self):
self.comments_controller_from_example = CommentsController(
"http://my_comments_url/v1/projects/23/examples/11", Session()
)
self.comments_controller_from_project = CommentsController(
"http://my_comments_url/v1/projects/23", Session()
)
def test_controller_urls(self):
self.assertEqual(
self.comments_controller_from_example.comments_url,
"http://my_comments_url/v1/projects/23/examples/11/comments",
)
self.assertEqual(
self.comments_controller_from_project.comments_url,
"http://my_comments_url/v1/projects/23/comments",
)
@responses.activate
def test_all_with_no_comments_from_example(self):
responses.add(mocks.comments_get_empty_response)
comment_controllers = self.comments_controller_from_example.all()
self.assertEqual(len(list(comment_controllers)), 0)
@responses.activate
def test_all_with_no_comments_from_project(self):
responses.add(mocks.comments_get_empty_response)
comment_controllers = self.comments_controller_from_project.all()
self.assertEqual(len(list(comment_controllers)), 0)
@responses.activate
def test_all_from_example(self):
responses.add(mocks.comments_get_response)
comment_controllers = self.comments_controller_from_example.all()
total_comments = 0
expected_comment_id_dict = {
comment_json["id"]: comment_json for comment_json in mocks.comments_get_json
}
for comment_controller in comment_controllers:
self.assertIn(comment_controller.id, expected_comment_id_dict)
self.assertEqual(
comment_controller.comment.text,
expected_comment_id_dict[comment_controller.id]["text"],
)
self.assertIs(
comment_controller.client_session,
self.comments_controller_from_example.client_session,
)
total_comments += 1
self.assertEqual(total_comments, len(mocks.comments_get_json))
@responses.activate
def test_all_from_project(self):
responses.add(mocks.comments_get_response)
comment_controllers = self.comments_controller_from_project.all()
total_comments = 0
expected_comment_id_dict = {
comment_json["id"]: comment_json for comment_json in mocks.comments_get_json
}
for comment_controller in comment_controllers:
self.assertIn(comment_controller.id, expected_comment_id_dict)
self.assertEqual(
comment_controller.comment.text,
expected_comment_id_dict[comment_controller.id]["text"],
)
self.assertIs(
comment_controller.client_session,
self.comments_controller_from_project.client_session,
)
total_comments += 1
self.assertEqual(total_comments, len(mocks.comments_get_json))
@responses.activate
def test_all_with_bad_response(self):
responses.add(bad.bad_get_response)
with self.assertRaises(DoccanoAPIError):
list(self.comments_controller_from_example.all())
with self.assertRaises(DoccanoAPIError):
list(self.comments_controller_from_project.all())
|
<gh_stars>1-10

<a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Mathematics/OralAndWrittenPatterns/Oral-and-Written-Patterns.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
%%html
<script>
function code_toggle() {
if (code_shown){
$('div.input').hide('500');
$('#toggleButton').val('Show Code')
} else {
$('div.input').show('500');
$('#toggleButton').val('Hide Code')
}
code_shown = !code_shown
}
$( document ).ready(function(){
code_shown=false;
$('div.input').hide()
});
</script>
<p> Code is hidden for ease of viewing. Click the Show/Hide button to see. </>
<form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form>
# Patterns: Oral and written
## Introduction
Patterns appear all around us in everyday life, and in nature.
Here are some examples of patterns in action:
from IPython.core.display import HTML
HTML('''
<html>
<head>
<style>
* {
box-sizing: border-box;
}
.column {
float: left;
width: 33.33%;
padding: 5px;
}
/* Clearfix (clear floats) */
.row::after {
content: "";
clear: both;
display: table;
}
</style>
</head>
<body>
<div class="row">
<div class="column">
<p style="text-align: center;"> <b>Nature</b></p>
<img src="https://i.redd.it/f3y77ihpx5cy.gif" alt="drawing" style="width: 400px;"/>
</div>
<div class="column">
<p style="text-align: center;"><b>Greek Mythology</b>
<img src="http://www.gifmania.co.uk/Fantasy-Animated-Gifs/Animated-Fantasy-Animals/Hydra/Jason-Vs-Hydra-87071.gif" alt="drawing" style="width: 400px;"/>
</div>
<div class="column">
<p style="text-align: center;"><b>Building</b>
<img src="https://media1.giphy.com/media/3o85xkpCruOJPGtBMQ/giphy.gif" alt="drawing" style="width: 400px;"/>
</div>
</div>
</body>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
</html>''')
One of the fun parts of math is discovering how we can use math to understand the world around us. Today we're going to learn how math can help us understand patterns!
## Background
Today we're going to look at patterns that we can describe using numbers. All of the patterns we're going to talk about today are called **Sequences**, where there is an ordering to the pattern, imagine how the alphabet has the ordering: A, B, C, D, and so on. A is the *first* item in our sequence, and Z is the *last* element in our sequence. Not all sequences have an end, some of then keep going, and going, and going! Another simple sequence is the positive number line, where the next number is the previous number plus one.
### The Alphabet as a sequence

If we wanted to represent the sequence in a table as we often do with sequences, we'll use n to represent when the term appears in the sequence, and x will represent the term itself:
| $n$| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 |
|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|
| $x$| A | B | C | D | E | F| G | H | I | J | K | L|M | N | O | P | Q | R| S | T | U | V | W | X| Y| Z|
### The Counting Numbers

## Examples
There is a greek tale where the hero Hercules has to fight a three headed Hyrda, which can regrow it's heads. Infact, everytime it loses a head, TWO grow in it's place. How can we think of this as a pattern? Well to start let's say it has just one head, after hercules removes this head, now there are two in it's place. Next there is three heads, then four heads, and so on! For this example, let's break it down at look at some small cases first.
<ul>
<li>The first number in the pattern: 3 <- The number of heads the hydra has to start </li>
<li>The second number in the pattern: 4 <- After one head is cutoff</li>
<li>The third number in the pattern: 5 <- After another head</li>
<li>The fourth number in the pattern: 6 <- Another one</li>
</ul>
Does this sound familiar? That's because the **rule** for the hydra pattern is the similar to the number line we saw above. A rule lets us accurately describe a sequence in a formula. We call it a rule because every item in our sequence will follow the rule. We want to try and make a rule as simple as possible. Then if we wanted to know how many heads the hydra had after 25 cuts we can know right away without counting from 1 cut up to 25 cuts.
Let's think about it in words, first we have the amount from the previous step, then we takeaway one head which hercules chose to cut off, and then we add two heads which grow in it's place.
If we wanted to write it as a rule, we can do it like this:
$ x_n = n + 2 $
This is saying the n'th number ($x_n$) in the pattern is equal to $n + 2$ because there was initially 3 heads. Adding a number to n will change the starting point of our sequence, think about how if you're asked to count starting at 1 or starting at 5, you count the same way, just starting at a different number! Keep reading to find out how we figured out this rule. remember that n always will start at 1, and increment from there. Remember we start at the first term, so that means n=1.
## Arithmetic Sequences
Above we defined what a **rule** is, now lets talk about types of sequences.
**Arithmetic Sequences** increase by a fixed number which we call the **difference**, as it is the difference between two consecutive terms in the sequence! This means the gap between the 99th and the 100th term is the same as between the 1st and the 2nd.
Let's do an example, here are two representations of an arithmetic sequence:
<center> ** Number Line** </center>
<img src="images/ArithmeticSequence.svg" width=600px>
<center>**Table**</center>
| $n$ | $x$ |
|:-------------:|:-------------:|
| 1 | 1 |
| 2 | 3 |
| 3 | 5 |
| 4 | 7 |
| 5 | 9 |
| 6 | 11|
Try to find out what the difference, starting value and rule are! Once you're ready, however below to see the answers:
<!---Could it be worthwhile to hide the solutions, best options?-->
The above difference is 2, and the starting value is 1, one **rule** for this arithmetic sequence is this:
$ x_n = 2(n-1) + 1 = 2n - 2 + 1 = 2n -1 $
In general, the rule of an arithmetic sequence has the form:
$ x_n = a(n-1) + b $
where a is the difference, and b is the first value
### Arithmetic Sequence Visualization
HTML('''
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>'''
)
HTML('''
<form id="frm1_diff" action="/action_page.php">
Enter the <b>difference</b> for the arithmetic sequence:
<input type="number" name="quantity" value="3" min="1" max="5">
</form>
<form id="frm2_initArith" action="/action_page.php">
Enter the <b>inital value</b> for the arithmetic sequence:
<input type="number" name="quantity" value="1" min="0" max="50">
</form>
<div id="arithButton">
<i>Run the cell before plotting</i>
<input name="Plot sequence"
type="button"
value="Plot sequence"
/>
</div>
<svg width="1100" height="120" id="ARITH">
<line x1="0" y1="50" x2="1000" y2="50" stroke="black"/>
<rect x="0" y="30" width="2" height="40" fill="black"/>
<rect x="983" y="30" width="2" height="40" fill="black"/>
<text x="970" y="85" fill="black">50</text>
<text x="0" y="85" fill="black">0</text>
</svg>
<script src="./scripts/ArithmeticNumberLine.js"</script>
''')
## Geometric Sequences
**Geometric Sequences** can increase very quickly compared to arithmetic sequences. To get the next term in a geometric sequence, we **multiply** the previous term by a constant, this is different from the arithmetic sequence which **adds** a constant to the previous term. Let's do another example:
<center> ** Number Line** </center>
<img src="images/GeometricSequence.svg" width=600px>
<center>**Table**</center>
| $n$ | $x$ |
|:-------------:|:-------------:|
| 1 | 1 |
| 2 | 2 |
| 3 | 4 |
| 4 | 8 |
| 5 | 16 |
| 6 | 32|
In the above sequence, we can see that our terms appear to be double the previous rate, for example the ratio of $2:4$ is the same as the ratio of $4:8$, or $8:16$ and so on. These ratios can all be simplified to $1:2$. The term **common ratio** refers to the simplified right hand side of the ratio of any two consecutive terms. So for example, the common ratio of this sequence is $2$.
If we look at the table, we can see that each term is twice as large, that means that we can imagine it's just multiplied by two. So thinking about what the 5th term would be, it should have been doubled 4 times from the first term, so that means
$\text{first term} * 2 * 2 * 2 * 2 = \text{5th term}$
**Exponents** give us an easier way to represent this, as remember that $2^4 = 2*2*2*2$, since exponents represent multiplying the **base**, in this case 2, 4 times. Sometimes people would say this is "two raised to the power of 4". Not only does it keep showing your work simpler, think about saying $2^{12}$ as
>*"Two raised to the power of 12"*
versus
>*"Two times two times two times two times two times two times two times two times two times two times two times two times"*
So using exponents, we can express the fifth term as the first term times $2^4$. Remember, because the first term hasn't been doubled yet, we only want to raise 2 to the power of 4, **not** 5. It's easy to get tripped up on this! Let's look at how this works in general.
For our example above, a general rule to find any term is:
$x_n = 2^{n-1}$
See how we're raising 2 to the (n-1)th power? that's because as we observed above, the first term specifically hasn't been doubled yet. How does that workout? $x_1 = 2^{1-1} = 2^{0} = 1$. In case you haven't encountered 0 powers yet, just remember that any number to the power 0 is equal to 1.
Any geometric sequence can be describe using exponents rule:
$x_n = ar^{n-1}$
Where a is the first term in the sequence, and r represents the common ratio of the geometric sequence.
Try out some different common ratios and initial values to see what sort of geometric sequences you can produce!
### Geometric Sequence Visualization
HTML('''
<form id="frm1_ratio" action="/action_page.php">
Enter the <b>common ratio</b> for the geometric sequence:
<input type="number" name="quantity" value="3" min="1" max="5">
</form>
<form id="frm2_initGeo" action="/action_page.php">
Enter the <b>inital value</b> for the geometric sequence:
<input type="number" name="quantity" value="1" min="0" max="50">
</form>
<div id="geoButton">
<i>Run the cell before plotting</i>
<input name="Plot sequence"
type="button"
value="Plot sequence"
/>
</div>
<svg width="1000" height="120" id="GEO">
<line x1="0" y1="50" x2="1000" y2="50" stroke="black"/>
<rect x="0" y="30" width="2" height="40" fill="black"/>
<text x="0" y="85" fill="black">0</text>
<rect x="983" y="30" width="2" height="40" fill="black"/>
<text x="970" y="85" fill="black">50</text>
</svg>
<script src="./scripts/GeometricNumberLine.js"></script>''')
### Arithmetic and Geometric Questions
* Given the arithmetic sequence below, what is the initial value and difference for this sequence? You may use the visualization tool above to help you find the answers.
| $n$| 1 | 2 | 3 | 4 | 5 | 6 |
|:--:|:--:|:--:|:--:|:--:|:--:|:--:|
| $x$|2 | 6 | 10 | 14 | 18 | 22|
* What would happen if the difference for a sequence was a negative number? Remember that the difference describes the gap between two terms. Could we have a negative initial value aswell?
* What if the common ratio was a proper fraction? What about if it was a negative number? How can we understand a negative ratio?
* It's possible to explain a sequence using more than one rule. Try to come up with a sequence which can be explained using a geometric rule AND an arithmetic rule!
## The Fibonacci Sequence

We have just scratched the surface of patterns. While arithmetic and geometric sequences are quite useful, there are also other unique patterns which are quite interesting to look at. One of the most famous patterns is called the Fibonacci Sequence. We can find examples of it all throughout nature. That green gif at the beginning of this notebook is a Romanesco Broccoli, and the number of spirals on one head is a Fibonacci number. A **Fibonacci number** is any number which appears in the Fibonacci sequence. More locally, the Fibonacci sequence arises if we look at the cones of a pine tree. The rule for the Fibonacci sequence is a bit more complex than the previous sequences we've looked at, as it's not increasing by a constant or a constant ratio. Instead, to get the next term in the sequence, we add the previous two terms together. Here's the rule expressed mathematically:
$x_n = x_{n-1} + x_{n-2} \text{ ( if n is bigger than 2 )}$
$x_1 = 1 \textit{ and } x_2 = 1$
Let's put it to work and figure out the first few numbers in the sequence, starting with 1 and 1:
$1 + 1 = 2 = x_3$
$1 + 2 = 3 = x_4$
$2 + 3 = 5 = x_5$
$3 + 5 = 8 = x_6$
$5 + 8 = 13 = x_7$
Here's a video by ViHart talking about spirals and the Fibonacci sequence in nature:
from IPython.display import YouTubeVideo
YouTubeVideo('ahXIMUkSXX0')
## Triangle and Square Numbers
There are even interesting patterns we can find in real life, and model them using rules! For example, if you've heard of the square of a number, lets say n, the square is $n^2 = n * n$. This also can be interpreted as a square with side length n. We can also visualize it as dots on a grid, and the square of n is how many dots are in the square with n dots as a side.

The rule to determine how many dots we need is then simply:
$x_n = n^2$
What if instead of drawing squares, we wanted to draw triangles? This is where things get a little more tricky.

The rule for the triangle numbers is a little trickier than for the square numbers. Realize that just as in the square numbers, n is the side length of the triangle. It is as follows:
$x_n = \frac{n(n+1)}{2}$
Trying this rule we can see that it is correct for the first few cases:
$x_1 = \frac{1(1+1)}{2} = \frac{1(2)}{2} = 1$
$x_2 = \frac{2(2+1)}{2} = \frac{2(3)}{2} = 3$
$x_3 = \frac{3(3+1)}{2} = \frac{3(4)}{2} = 6$
At first you may think perhaps at some point there should be a fraction since we're dividing by 2. However the whole numbers alternate between odd and even, so that means between $n$ and $(n+1)$ in the numerator, one of these terms *must* be even so there will be a cancellation possible.
The rule for the triangle numbers arises in many places, for example if you wanted to build a house of cards, or determine how many logs are in a triangular stack, you could save a lot of headache by using the rule and counting the side length instead of counting up every individual piece.
### More questions
* Can you draw a pinecone and count the spirals like in the video? Click this [link](http://www.maths.surrey.ac.uk/hosted-sites/R.Knott/Fibonacci/fibnat.html#section4.2) and go to section 4.2
to look at some other pinecones and their spirals.
* What if we changed the starting two terms in the Fibonacci sequence for other numbers? Try substituting in different numbers and drawing the new sequence in a graph, number line or writing it in table. It won't be the Fibonacci sequence but a new sequence you came up with, what are you going to name your new discovery?
* How many dots are in the following triangle? Remember that we can use the rule described above to save a lot of time and frustration counting! (Hint: Start by figuring out what your n value is, what does n mean? )

## Conclusion
What did you learn about patterns in this notebook? We discussed the concepts of arithmetic and geometric sequences, and more patterns found in real life. We learned ways to concisely express complicated patterns using simple language and mathematics. Here is a brief review of the terms and concepts we touched on:
A **rule** describes a sequence or pattern in a concise way, possibly using a formula. To determine the nth term, we would look at the rule for the case of $x_n$.
The **initial value** is another way to say *the first term in the sequence*.
**Arithmetic sequences** increased by a fixed amount between terms, this fixed amount is called the **difference**.
The standard rule for arithmetic sequences is:
$ x_n = an + b $
Where a is the difference and b is the initial value.
**Geometric sequences** increased by a **common ratio** which we can think of as the ratio between two sequential terms.
The standard rule for geometric sequences is:
$x_n = ar^{n-1}$
Where a is the first term in the sequence, and r represents the common ratio of the geometric sequence.
[](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md) |
<filename>api/generator.py<gh_stars>1-10
import datetime
from datetime import date
from sys import stderr
from custom_errors import *
""" ####################################################################
used to generate the combinations of queries for the selected predicates
#################################################################### """
class Generator:
def generate_plans(
self, arr, original_sql
): # takes in array of selectivites per predicate, from get_selectivities
try:
res = []
def helper(
index, path, predicate_selectivities
): # predicate_selectivities is like (predicate0 , 0.93) * (predicate1, 0.78) * 1.2...
if index == len(arr):
res.append([path, predicate_selectivities])
return
if len(arr[index]["conditions"]) == 1: # only one comparator
for operator, v in arr[index]["conditions"].items():
queried_selectivity = v["queried_selectivity"]
for selectivity, val in v["histogram_bounds"].items():
# selectivity_as_percentage_of_base = float(selectivity / queried_selectivity)
old_val = v["histogram_bounds"][queried_selectivity]
selectivity_data = (
arr[index]["attribute"],
operator,
old_val,
val,
queried_selectivity,
selectivity,
)
helper(
index + 1,
self.find_and_replace(
arr[index]["attribute"],
operator,
old_val,
val,
path,
),
predicate_selectivities + [selectivity_data],
)
elif len(arr[index]["conditions"]) == 2: # range
count = 0
lessthan_histogram_bounds, morethan_histogram_bounds = [], []
operators = []
for operator, v in arr[index]["conditions"].items():
queried_selectivity = v["queried_selectivity"]
old_val = v["histogram_bounds"][queried_selectivity]
count += 1
if count == 1: # < type
lessthan_histogram_bounds = [
(val, selectivity, queried_selectivity, old_val)
for selectivity, val in v["histogram_bounds"].items()
]
operators.append(operator)
elif count == 2: # > type
morethan_histogram_bounds = [
(val, selectivity, queried_selectivity, old_val)
for selectivity, val in v["histogram_bounds"].items()
]
operators.append(operator)
for less_than, more_than in self.generate_ranges(
lessthan_histogram_bounds, morethan_histogram_bounds
): # ((val_less, sel_less, queried_sel), (val_more, sel_more, queried_sel))
more_than_path = self.find_and_replace(
arr[index]["attribute"],
operators[1],
more_than[3],
more_than[0],
path,
)
both_replaced_path = self.find_and_replace(
arr[index]["attribute"],
operators[0],
less_than[3],
less_than[0],
more_than_path,
)
selectivity_data = [
(
arr[index]["attribute"],
operators[0],
less_than[3],
less_than[0],
less_than[2],
less_than[1],
),
(
arr[index]["attribute"],
operators[1],
more_than[3],
more_than[0],
more_than[2],
more_than[1],
),
]
helper(
index + 1,
both_replaced_path,
predicate_selectivities + selectivity_data,
)
helper(0, original_sql, [])
return res
except CustomError as e:
raise CustomError(str(e))
except:
raise CustomError(
"Error in generate_plans() - Unable to generate plans for required selectivity variations."
)
def generate_ranges(
self, lessthan_histogram_bounds, morethan_histogram_bounds
): # for selectivities with more than 2 conditions (i.e. range)
try:
# less than should always have a greater value than the more than
# all possible permutations
res = [
(x, y)
for x in lessthan_histogram_bounds
for y in morethan_histogram_bounds
if x[0] > y[0]
]
return res
except CustomError as e:
raise CustomError(str(e))
except:
raise CustomError(
"Error in generate_ranges() - Unable to generate the required histogram bounds."
)
def find_and_replace(self, predicate, operator, old_val, new_val, sql_query):
try:
if isinstance(new_val, datetime.date):
new_val = "'{}'".format(date.isoformat(new_val))
if isinstance(old_val, datetime.date):
old_val = "'{}'".format(date.isoformat(old_val))
new_query = sql_query.replace(
"{} {} {}".format(predicate, operator, old_val),
"{} {} {}".format(predicate, operator, new_val),
)
return new_query
except CustomError as e:
raise CustomError(str(e))
except:
raise CustomError(
"Error in find_and_replace() - Unable to replace the original attribute value with the new one."
)
|
import random
from typing import Any, Callable, Generator, Generic, List
from uuid import uuid4
from fipy.ngsi.entity import BoolAttr, Entity, FloatAttr, TextAttr
def float_attr_close_to(base: float) -> FloatAttr:
"""Generate a `FloatAttr` having a random value close to `base`.
More accurately, the generated value `x` will be such that
`abs(x - base) <= 1`.
Args:
base: a base value from which to generate the attribute value.
Returns:
A `FloatAttr` instance with a random value close to `base`.
"""
seed = random.uniform(0, 1)
return FloatAttr.new(base + seed)
def text_attr_from_one_of(choices: List[str]) -> TextAttr:
"""Pick a value from the given `choices` to instantiate a `TextAttr`.
Args:
choices: list of values to choose from.
Returns:
A `TextAttr` instance with a value randomly picked from `choices`.
"""
pick = random.choice(choices)
return TextAttr.new(pick)
def bool_attr() -> BoolAttr:
"""Pick randomly between `True` and `False` to instantiate a `BoolAttr`.
Returns:
A `BoolAttr` instance with randomly picked truth value.
"""
pick = random.choice([True, False])
return BoolAttr.new(pick)
EntityGenerator = Callable[[], Entity]
"""A function to generate an NGSI entity with possibly random attribute
values.
The function implementation must make sure
- every call returns a new `Entity` object---i.e. the Python type of
the returned objects is the same across calls and is a subclass of
`BaseEntity`;
- every call returns an object with the same NGSI type---i.e. the
value of the `type` field is the same across calls.
The function should set the `id` field to an empty string since the
classes and utils in this module override the ID value.
"""
class EntityFactory(Generic[Entity]):
"""Use an `EntityGenerator` to make NGSI entities all having IDs in the
format `urn:ngsi-ld:T:S` where `T` is the NGSI type of the entities and
and `S` a string suffix drawn from a given list.
"""
def __init__(self, generator: EntityGenerator, suffixes = [Any]):
"""Create a new instance.
Args:
generator: creates NGSI entities.
suffixes: list of suffixes to append to entity IDs. Must not be
empty. Values will be converted to their string representation.
"""
assert generator is not None
assert len(suffixes) > 0
self._suffixes = [str(s) for s in suffixes]
self._generator = generator
def new_entity(self, suffix_index: int) -> Entity:
"""Create a new entity with an entity ID ending with the suffix
associated to the given index.
Args:
suffix_index: a valid index for the suffix list passed in when
creating the factory.
Returns:
The NGSI entity.
"""
suffix = self._suffixes[suffix_index]
entity = self._generator()
entity.set_id_with_type_prefix(suffix)
return entity
def new_batch(self) -> List[Entity]:
"""Create an entity for each entity ID suffix passed in when creating
the factory.
Returns:
A list of entities where the first one has an entity ID ending
with the fist suffix, the second has an ID ending with the second
suffix, and so on to the last one that's got an ID ending with the
last suffix passed in when creating the factory.
"""
return [self.new_entity(k) for k in range(0, len(self._suffixes))]
def entity_id(self, suffix_index: int) -> str:
"""Generate an NGSI entity ID in the format used for the entities
this factory creates.
The ID is in the format `urn:ngsi-ld:T:S` where `T` is the NGSI type
of the entities and and `S` the string suffix at `suffix_index` in
the list passed in when creating the factory.
Args:
suffix_index: a valid index for the suffix list passed in when
creating the factory.
Returns:
The NGSI ID.
"""
return self.new_entity(suffix_index).id
@staticmethod
def with_numeric_suffixes(how_many: int, generator: EntityGenerator) \
-> 'EntityFactory':
"""Create an `EntityFactory` with the given generator and a list
of entity ID suffixes of `[1, .., N]` where `N = how_many + 1`.
Args:
how_many: number of numeric suffixes to use.
generator: creates NGSI entities.
Returns:
The factory.
"""
assert how_many > 0
suffixes = [k for k in range(1, how_many + 1)]
return EntityFactory(generator, suffixes)
@staticmethod
def with_uuid_suffixes(how_many: int, generator: EntityGenerator) \
-> 'EntityFactory':
"""Create an `EntityFactory` with the given generator and a list
of entity ID suffixes of `[u1, .., uN]` where `N = how_many + 1`
and `u[k]` is a random UUID.
Args:
how_many: number of numeric suffixes to use.
generator: creates NGSI entities.
Returns:
The factory.
"""
assert how_many > 0
suffixes = [uuid4() for _ in range(1, how_many + 1)]
return EntityFactory(generator, suffixes)
def entity_batch(pool: EntityFactory[Entity]) \
-> Generator[List[Entity], None, None]:
"""Use the given factory to produce an infinite stream of lists of
entities. Generate each list by calling the factory's `new_batch`
method.
"""
while True:
yield pool.new_batch()
|
<reponame>tomacorp/thermapythia
#!/Users/toma/python278i/bin/python
from PyTrilinos import Epetra, AztecOO
def main():
# define the communicator (Serial or parallel, depending on your configure
# line), then initialize a distributed matrix of size 4. The matrix is empty,
# `0' means to allocate for 0 elements on each row (better estimates make the
# code faster). `NumMyElements' is the number of rows that are locally hosted
# by the calling processor; `MyGlobalElements' is the global ID of locally
# hosted rows.
Comm = Epetra.PyComm()
NumGlobalElements = 5
Map = Epetra.Map(NumGlobalElements, 0, Comm)
A = Epetra.CrsMatrix(Epetra.Copy, Map, 0)
NumMyElements = Map.NumMyElements()
MyGlobalElements = Map.MyGlobalElements()
print "MyGlobalElements =", MyGlobalElements
# Solution for voltage source: 10, 4.1803, 1.7213, 1.475
# Solution for current source: 0, 1.475, 5.901, 9.344
# Total solution: 10, 5.656, 7.6133, 10.819
R1 = 10.0
R2 = 10.0
R3 = 15.0
R4 = 15.0
R5 = 5.0
R6 = 30.0
# A is the problem matrix
# Modified nodal analysis
# http://www.swarthmore.edu/NatSci/echeeve1/Ref/mna/MNA2.html
# node 0
A[0, 0] = 1/R1
# node 1
A[1, 1] = 1/R1 + 1/R2 + 1/R3
# node 2
A[2, 2] = 1/R3 + 1/R4 + 1/R5
# node 3
A[3, 3] = 1/R5 + 1/R6
# Common node impedances
A[0, 1] = -1/R1
A[1, 0] = -1/R1
A[1, 2] = -1/R3
A[2, 1] = -1/R3
A[2, 3] = -1/R5
A[3, 2] = -1/R5
# Independent voltage source into node 0
A[0, 4] = 1
A[4, 0] = 1
# b is the RHS
b = Epetra.Vector(Map)
b[0] = 0
b[1] = 0
b[2] = 0
# This is the only term for a 1A current source injected into node 3
b[3] = 1
# This is the 10V voltage source going into node 0.
# Current going in to the arrow of this source is in the solution as x[4]
# In this example, the current flows in the direction of the arrow on the current source,
# so the solution to the current is negative.
b[4] = 10
# x are the unknowns to be solved.
x = Epetra.Vector(Map)
A.FillComplete()
solver = AztecOO.AztecOO(A, x, b)
# This loads x with the solution to the problem
solver.Iterate(1550, 1e-5)
Comm.Barrier()
for i in MyGlobalElements:
print "PE%d: %d %e" % (Comm.MyPID(), i, x[i])
# synchronize processors
Comm.Barrier()
if Comm.MyPID() == 0: print "End Result: TEST PASSED"
# This is a standard Python construct. Put the code to be executed in a
# function [typically main()] and then use the following logic to call the
# function if the script has been called as an executable from the UNIX
# command
# line. This also allows, for example, this file to be imported from a python
# debugger and main() called from there.
if __name__ == "__main__":
main()
|
<filename>viper/__init__.py<gh_stars>0
import http.server
import sys
import os
import logging
import urllib.parse
import cgi
import random
import collections
import json
log = logging.getLogger(__name__)
# TODO:
# module design
# more sophisticated UI segments
# integration of Bootstrap
# Dashboard as a subclass of UI
logging.basicConfig(level=logging.INFO)
DEFAULT_PORT = 1711
DEFAULT_ROOT = b'''<html>
<body>
Viper server started successfully.
</body>
</html>'''
MIME_TYPES = {
'html' : 'text/html'
}
RAND_BITS = 128
FX_PREFIX = '_viper_'
FORM_MIME_TYPE = 'application/x-www-form-urlencoded'
def generateID(ref, no=0):
return '{}-{:0x}-{}'.format(ref, random.getrandbits(RAND_BITS), no)
class HTTP:
def __init__(self, handler=None, port=DEFAULT_PORT, directory=None):
self.port = port
self._handler = handler if handler else Handler
if directory:
self.handler.directory = directory
self.server = http.server.HTTPServer(('', self.port), self.handler)
@property
def handler(self):
return self._handler
def run(self):
try:
log.info('server starting up')
self.server.serve_forever()
except (KeyboardInterrupt, SystemExit):
log.info('server shutting down by operator interrupt')
sys.exit()
class Handler(http.server.BaseHTTPRequestHandler):
listeners = {}
mimes = {}
directory = os.getcwd()
def do_GET(self):
req = urllib.parse.urlparse(self.path)
if req.query:
self.handleRequest(req.path[1:], dict(qc.split("=") for qc in req.query.split("&")))
elif req.path == '/':
self.sendRoot()
elif '.' in req.path:
self.sendFile(req.path[1:])
else:
log.info('unknown GET %s', self.path)
self.send_error(404)
def do_POST(self):
ctype = self.headers['Content-Type']
if ctype == FORM_MIME_TYPE:
cgiFields = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD' : 'POST',
'CONTENT_TYPE' : self.headers['Content-Type'],
}
)
data = dict((k, cgiFields[k].value) for k in cgiFields.keys())
else:
data = json.loads(self.rfile.read(int(self.headers['Content-Length'])).decode('utf8'))
self.handleRequest(self.path[1:], data)
def sendRoot(self):
log.info('serving root page')
self.sendOKHeaders('text/html')
self.wfile.write(self.getRoot())
def sendFile(self, path):
try:
ext = os.path.splitext(path)[1][1:]
with open(os.path.join(self.directory, path), 'rb') as ffile:
log.info('serving file %s', path)
self.sendOKHeaders(MIME_TYPES.get(ext, 'text/plain'))
self.wfile.write(ffile.read())
except IOError:
log.info('unknown file %s', path)
self.send_error(404)
def handleRequest(self, name, content):
if name in self.listeners:
log.info('serving request %s: %s', name, content)
self.sendOKHeaders(self.mimes[name])
self.wfile.write(self.listeners[name](**content))
else:
log.info('unknown request %s', name)
self.send_error(404)
def sendOKHeaders(self, mimeType):
self.send_response(200)
self.send_header('Content-Type', mimeType)
self.end_headers()
@staticmethod
def defaultGetRoot():
return DEFAULT_ROOT
getRoot = staticmethod(defaultGetRoot)
@classmethod
def listen(cls, name, listener, mime='application/json'):
cls.listeners[name] = listener
cls.mimes[name] = mime
@classmethod
def root(cls, fx):
cls.getRoot = staticmethod(fx)
class Application:
def __init__(self, ui, server):
self.ui = ui
self.server = server
def run(self):
self.http = HTTP()
self.http.handler.root(self.ui.root)
for name, hook, mime in self.server.gates():
self.http.handler.listen(name, hook, mime)
self.http.run()
class UI:
start = '''<html>
<head>
<script src="https://cdnjs.cloudflare.com/ajax/libs/d3/4.8.0/d3.min.js"></script>
</head>
<body>'''
end = '</body></html>'
def __init__(self, *items):
self.items = items
def root(self):
return (self.start +
'\n\n'.join(item.html() for item in self.items) +
self.code(self.items) +
self.end).encode('utf8')
def code(self, items):
return '<script type="text/javascript">' + self.ownCode() + '\n\n'.join(self.codeFor(item) for item in items) + '</script>'
def ownCode(self):
return '''
_viperFunctions = {};
function _viperSendInput(id, value) {
console.log(id);
console.log(value);
var request = {
"type" : "request",
"request" : "input",
"input" : {}
};
request.input[id] = value;
d3.request("input").mimeType("application/json").post(
JSON.stringify(request),
function (error, response) {
if (error) console.warn(error);
else _viperUpdateOutputs(JSON.parse(response.response));
}
);
}
function _viperUpdateOutputs(json) {
if (json.output != null) {
for (var id in json.output) {
_viperFunctions[id](json.output[id]);
}
}
}
'''
def codeFor(self, item):
return '''// code for %s
%s = %s;
_viperFunctions["%s"] = %s;''' % (
item.ref, item.fid, item.code(), item.ref, item.fid
)
class UISegment:
DEFAULT_BLOCKED_ATTRS = ['onchange']
BLOCKED_ATTRS = []
def __init__(self, ref):
self.ref = ref
self.id = generateID(ref)
@property
def fid(self):
return FX_PREFIX + self.id.replace('-', '')
def processDirectAttrributes(self, fmtargs):
for attr in self.DEFAULT_BLOCKED_ATTRS + self.BLOCKED_ATTRS:
if attr in fmtargs:
toremove.append(attr)
for key, val in fmtargs.items():
if not val:
toremove.append(key)
elif val is True:
fmtargs[key] = key
return fmtargs
@staticmethod
def formatAttributes(attrs):
return ' '.join('{}="{}"'.format(key, val) for key, val in attrs.items())
class Input(UISegment):
def code(self):
return '''function() {_viperSendInput("%s", %s);}''' % (self.id, self.getterCode())
def callCode(self):
return self.fid + '();'
class TextInput(Input):
BLOCKED_ATTRS = ['type', 'name']
REQUIRED_ATTRS = ['value']
def __init__(self, ref, label=None, updateMode='onchange', **fmtattrs):
super().__init__(ref)
self.label = label
self.attrs = self.processDirectAttrributes(fmtattrs)
self.attrs[updateMode] = self.callCode()
def html(self):
return self.labelPartHTML() + '<input type="text" id="{id}" name="{id}" {attrs}>'.format(
id=self.id, attrs=self.formatAttributes(self.attrs)
)
def labelPartHTML(self):
return ''
def getterCode(self):
return 'd3.select("#%s").node().value' % self.id
class Output(UISegment):
pass
class TextOutput(Output):
def html(self):
return '''<span id="{}"></span>'''.format(self.id)
def code(self):
return '''function(value) {d3.select("#%s").text(value);}''' % self.id
class Server:
def __init__(self, bindings={}):
self._inputBindings = collections.defaultdict(list)
self._outputMethods = {}
self._outputArguments = {}
self._registerBindings(bindings)
def gates(self):
return [
('input', self.processInput, 'application/json')
]
def processInput(self, input={}, **rest):
print(input)
print(self._inputBindings)
inputs = {}
outputsToUpdate = set()
for key in input:
trueKey = key.split('-')[0]
outputsToUpdate.update(self._inputBindings[trueKey])
inputs[trueKey] = input[key]
print(outputsToUpdate)
outputs = {}
for id in outputsToUpdate:
outputs[id] = self._outputMethods[id](*[inputs[key] for key in self._outputArguments[id]])
print(outputs)
return json.dumps({'output' : outputs}).encode('utf8')
def _registerBindings(self, bindings):
for output in bindings:
method, inputs = bindings[output]
if isinstance(inputs, str): inputs = [inputs]
for inp in inputs:
self._inputBindings[inp].append(output)
self._outputMethods[output] = method
self._outputArguments[output] = inputs
|
<filename>dtool_lookup_gui/main.py
#
# Copyright 2021-2022 <NAME>
# 2021 <NAME>
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import argparse
import asyncio
import glob
import json
import logging
import os
import sys
import dtoolcore
import dtool_lookup_api.core.config
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('GtkSource', '4')
from gi.repository import GLib, GObject, Gio, Gtk, GtkSource, GdkPixbuf
import gbulb
gbulb.install(gtk=True)
from .views.main_window import MainWindow
from .utils.logging import _log_nested
# The following imports are need to register widget types with the GObject type system
import dtool_lookup_gui.widgets.base_uri_list_box
import dtool_lookup_gui.widgets.dataset_list_box
import dtool_lookup_gui.widgets.graph_widget
import dtool_lookup_gui.widgets.transfer_popover_menu
import dtool_lookup_gui.widgets.progress_chart
import dtool_lookup_gui.widgets.progress_popover_menu
logger = logging.getLogger(__name__)
from . import __version__
appid = f'de.uni-freiburg.dtool-lookup-gui.{__version__}'
# Windows taskbar icons fix
try:
from ctypes import windll # Only exists on Windows.
windll.shell32.SetCurrentProcessExplicitAppUserModelID(appid)
except ImportError:
pass
# adapted from https://python-gtk-3-tutorial.readthedocs.io/en/latest/popover.html#menu-popover
class Application(Gtk.Application):
__gsignals__ = {
'dtool-config-changed': (GObject.SIGNAL_RUN_FIRST, None, ())
}
def __init__(self, *args, loop=None, **kwargs):
super().__init__(*args,
application_id='de.uni-freiburg.dtool-lookup-gui',
flags=Gio.ApplicationFlags.HANDLES_COMMAND_LINE, **kwargs)
self.loop = loop
self.args = None
def do_activate(self):
logger.debug("do_activate")
# https://pyinstaller.readthedocs.io/en/latest/runtime-information.html
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
logger.debug('running in a PyInstaller bundle')
else:
logger.debug('running in a normal Python process')
win = self.props.active_window
if not win:
# Windows are associated with the application
# when the last one is closed the application shuts down
# self.window = AppWindow(application=self, title="Main Window")
logger.debug("Build GUI.")
win = MainWindow(application=self)
glob_pattern = os.path.join(os.path.dirname(__file__), os.pardir, 'data','icons','*','dtool_logo_small.xpm')
icon_file_list = glob.glob(glob_pattern)
if len(icon_file_list) > 0:
icon_list = [GdkPixbuf.Pixbuf.new_from_file(icon_file) for icon_file in icon_file_list]
win.set_icon_list(icon_list)
logger.debug("Loaded %d icons from:", len(icon_file_list))
logger.debug("{}", icon_file_list)
else:
logger.warning("Could not load app icons.")
win.connect('destroy', lambda _: self.loop.stop())
self.loop.call_soon(win.refresh) # Populate widgets after event loop starts
logger.debug("Present main window.")
win.present()
# adapted from http://fedorarules.blogspot.com/2013/09/how-to-handle-command-line-options-in.html
# and https://python-gtk-3-tutorial.readthedocs.io/en/latest/application.html#example
def do_command_line(self, args):
"""Handle command line options from within Gtk Application.
Gtk.Application command line handler called if
Gio.ApplicationFlags.HANDLES_COMMAND_LINE set.
Must call self.activate() to get the application up and running."""
Gtk.Application.do_command_line(self, args) # call the default commandline handler
# in order to have both:
# * preformatted help text and ...
# * automatic display of defaults
class ArgumentDefaultsAndRawDescriptionHelpFormatter(
argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
pass
parser = argparse.ArgumentParser(prog=self.get_application_id(),
description=__doc__,
formatter_class=ArgumentDefaultsAndRawDescriptionHelpFormatter)
parser.add_argument('--verbose', '-v', action='count', dest='verbose',
default=0, help='Make terminal output more verbose')
parser.add_argument('--debug', action='store_true',
help='Print debug info')
parser.add_argument('--quiet','-q', action='store_true',
help='Print debug info')
parser.add_argument('--log', required=False, nargs='?', dest="log",
default=None, const='out.log', metavar='LOG',
help='Write out.log, optionally specify log file name')
# parse the command line stored in args, but skip the first element (the filename)
self.args = parser.parse_args(args.get_arguments()[1:])
loglevel = logging.WARNING
logformat = "%(levelname)s: %(message)s"
if self.args.quiet:
loglevel = logging.ERROR
if self.args.verbose > 0:
loglevel = logging.INFO
if self.args.debug or (self.args.verbose > 1):
loglevel = logging.DEBUG
if self.args.verbose > 2:
logformat = (
"[%(asctime)s - pid %(process)d - thread id %(thread)d - %(funcName)s - %(pathname)s:%(lineno)s]"
" %(levelname)s: %(message)s"
)
# explicitly modify the root logger
logging.basicConfig(level=loglevel, format=logformat)
self.activate_action('set-loglevel', GLib.Variant.new_uint16(loglevel))
if self.args.log:
self.activate_action('set-logfile', GLib.Variant.new_string(self.args.log))
logger.debug("Parsed CLI options {}".format(self.args))
self.activate()
return 0
def do_startup(self):
"""Runs before anything else, create custom actions here."""
logger.debug("do_startup")
root_logger = logging.getLogger()
string_variant = GLib.Variant.new_string("dummy")
# toggle-logging
toggle_logging_variant = GLib.Variant.new_boolean(True)
toggle_logging_action = Gio.SimpleAction.new_stateful(
"toggle-logging", None, toggle_logging_variant
)
toggle_logging_action.connect("change-state", self.do_toggle_logging)
self.add_action(toggle_logging_action)
# set-loglevel
loglevel_variant = GLib.Variant.new_uint16(root_logger.level)
loglevel_action = Gio.SimpleAction.new_stateful(
"set-loglevel", loglevel_variant.get_type(), loglevel_variant
)
loglevel_action.connect("change-state", self.do_set_loglevel)
self.add_action(loglevel_action)
# set-logfile
logfile_variant = GLib.Variant.new_string('none')
logfile_action = Gio.SimpleAction.new_stateful(
"set-logfile", logfile_variant.get_type(), logfile_variant
)
logfile_action.connect("change-state", self.do_set_logfile)
self.add_action(logfile_action)
# reset-config action
reset_config_action = Gio.SimpleAction.new("reset-config")
reset_config_action.connect("activate", self.do_reset_config)
self.add_action(reset_config_action)
# import-config action
import_config_action = Gio.SimpleAction.new("import-config", string_variant.get_type())
import_config_action.connect("activate", self.do_import_config)
self.add_action(import_config_action)
# export-config action
export_config_action = Gio.SimpleAction.new("export-config", string_variant.get_type())
export_config_action.connect("activate", self.do_export_config)
self.add_action(export_config_action)
Gtk.Application.do_startup(self)
# custom application-scoped actions
def do_toggle_logging(self, action, value):
action.set_state(value)
if value.get_boolean():
logger.debug("Return to default logging configuration.")
logging.disable(logging.NOTSET)
logger.debug("Returned to default logging configuration.")
else:
logger.debug("Disable all logging below WARNING.")
logging.disable(logging.WARNING)
logger.debug("Disabled all logging below WARNING.")
def do_set_loglevel(self, action, value):
loglevel = value.get_uint16()
if action.get_state().get_uint16() == loglevel:
logger.debug("Desired loglevel and current log level are equivalent.")
return
root_logger = logging.getLogger()
root_logger.setLevel(loglevel)
action.set_state(value)
def do_set_logfile(self, action, value):
logfile = value.get_string()
if action.get_state().get_string() == logfile:
logger.debug(f"Desired log file {logfile} and current log file are equivalent.")
return
fh = logging.FileHandler(logfile)
root_logger = logging.getLogger()
fh.setLevel(root_logger.level)
fh.setFormatter(root_logger.handlers[0].formatter)
root_logger.addHandler(fh)
action.set_state(value)
# action handlers
def do_reset_config(self, action, value):
"""Empties config. All settings lost."""
fpath = dtoolcore.utils.DEFAULT_CONFIG_PATH
logger.debug(f"Remove config file '{fpath}'.")
try:
os.remove(fpath)
except FileNotFoundError as exc:
logger.warning(str(exc))
else:
# reinitialize config object underlying dtool_lookup_api,
# this must disappear here and move into dtool_lookup_api
dtool_lookup_api.core.config.Config = dtool_lookup_api.core.config.DtoolLookupAPIConfig(interactive=False)
self.emit('dtool-config-changed')
def do_import_config(self, action, value):
"""Import config from file. No sanity checking."""
config_file = value.get_string()
logger.debug(f"Import config from '{config_file}':")
with open(config_file, 'r') as f:
config = json.load(f)
_log_nested(logger.debug, config)
for key, value in config.items():
dtoolcore.utils.write_config_value_to_file(key, value)
# reinitialize config object underlying dtool_lookup_api,
# this must disappear here and move into dtool_lookup_api
dtool_lookup_api.core.config.Config = dtool_lookup_api.core.config.DtoolLookupAPIConfig(interactive=False)
self.emit('dtool-config-changed')
def do_export_config(self, action, value):
"""Import config from file."""
config_file = value.get_string()
logger.debug(f"Export config to '{config_file}':")
config = dtoolcore.utils._get_config_dict_from_file()
_log_nested(logger.debug, config)
with open(config_file, 'w') as f:
json.dump(config, f, indent=4)
# object method handlers for own signals
def do_dtool_config_changed(self):
"""Doesn't do anything, just documents how GTK calls this method
first when emitting dtool-config-changed signal."""
logger.debug("method handler for 'dtool-config-changed' called.")
def run_gui():
GObject.type_register(GtkSource.View)
loop = asyncio.get_event_loop()
app = Application(loop=loop)
logger.debug("do_startup")
# see https://github.com/beeware/gbulb#gapplicationgtkapplication-event-loop
loop.run_forever(application=app, argv=sys.argv)
|
#!/usr/bin/env python3
import ast
from typing import List, Dict
from collections import OrderedDict
from pathlib import Path
from pprint import pprint
import sys
import argparse
# From https://docs.python.org/3/library/functions.html
# No idea how this changed over time
BUILTINS: List[str] = [
"abs",
"all",
"any",
"ascii",
"bin",
"bool",
"breakpoint",
"bytearray",
"bytes",
"callable",
"chr",
"classmethod",
"compile",
"complex",
"delattr",
"dict",
"dir",
"divmod",
"enumerate",
"eval",
"exec",
"filter",
"float",
"format",
"frozenset",
"getattr",
"globals",
"hasattr",
"hash",
"help",
"hex",
"id",
"input",
"int",
"isinstance",
"issubclass",
"iter",
"len",
"list",
"locals",
"map",
"max",
"memoryview",
"min",
"next",
"object",
"oct",
"open",
"ord",
"pow",
"print",
"property",
"range",
"repr",
"reversed",
"round",
"set",
"setattr",
"slice",
"sorted",
"staticmethod",
"str",
"sum",
"super",
"tuple",
"type",
"vars",
"zip",
"__import__",
]
# Literals referred from https://greentreesnakes.readthedocs.io/en/latest/nodes.html#literals
AST_LITERALS = (
ast.Constant,
ast.Str,
ast.Num,
ast.Str,
ast.FormattedValue,
ast.JoinedStr,
ast.Bytes,
ast.List,
ast.Tuple,
ast.Set,
ast.Dict,
ast.Ellipsis,
ast.NameConstant,
)
class ImportTracker(ast.NodeVisitor):
def __init__(self):
super().__init__()
self.libs: Dict[str, str] = {}
def visit_Import(self, node):
"""
node.names are basically what is imported
node.module is where they are from
The idea is to have self.libs be a lookup, so that
the modules where functions can be imported from can
be traced.
"""
for i in node.names:
assert isinstance(i, ast.alias)
name = i.name
asname = i.asname
if asname is None:
self.libs[name] = name
else:
self.libs[asname] = name
# Call self.generic_visit(node) to include child nodes
self.generic_visit(node)
def visit_ImportFrom(self, node):
for i in node.names:
assert isinstance(i, ast.alias)
name = i.name
asname = i.asname
if asname is None:
if name != "*":
self.libs[name] = node.module
else:
print(f"Warning: wild card import found involving: `{node.module}`")
self.libs[name] = "<unknown>"
else:
self.libs[asname] = name
self.libs[name] = node.module
# Call self.generic_visit(node) to include child nodes
self.generic_visit(node)
class FunctionDefTracker(ast.NodeVisitor):
def __init__(self):
super().__init__()
self.functiondefs: List[str] = []
def visit_FunctionDef(self, node):
"""
node.name is the function name. Just have to track that.
"""
self.functiondefs.append(node.name)
# Call self.generic_visit(node) to include child nodes
self.generic_visit(node)
class CallTracker(ast.NodeVisitor):
def __init__(self):
super().__init__()
self.calls: List[str] = []
def visit_Call(self, node):
# Take the node.func object, which is either ast.Name or ast.Attribute
if isinstance(node.func, ast.Name):
# This is if the function call is a single line
self.calls.append(node.func.id)
elif isinstance(node.func, ast.Attribute):
# This is if the function call has multiple submodules
# Find the top-level name and store it!
# TODO
toplvlname = self.find_top_lvl_name(node.func)
self.calls.append(toplvlname)
else:
pass
# Call self.generic_visit(node) to include child nodes
self.generic_visit(node)
def find_top_lvl_name(self, func):
# Wade through the first ast.Attribute of each layer until an ast.Name is found
current_layer = func
for _ in range(10): # no such thing as 10 nested attributes!
if isinstance(current_layer, ast.Name):
return current_layer.id
elif isinstance(current_layer, ast.Attribute):
current_layer = current_layer.value
elif isinstance(current_layer, ast.Call):
# If it's a Call, we'll get to it eventually
pass
elif isinstance(current_layer, ast.Subscript):
current_layer = current_layer.value
elif isinstance(current_layer, ast.BinOp):
# Choose left as a guess by human writing convention
current_layer = current_layer.left
elif isinstance(current_layer, AST_LITERALS):
return "<built-in>"
else:
# print(ast.dump(current_layer))
# raise Exception
return "<unknown>"
class AssignTracker(ast.NodeVisitor):
def __init__(self):
super().__init__()
self.assigns: Dict[str] = {}
def visit_Assign(self, node):
# In an ast.Assign, we have `targets` as a list of node, and `value` as a single node
# Most likely that `targets` contains ast.Names, and `value` contains Calls.
# Isn't always true though!
# assert isinstance(node.value, ast.Call), f"{ast.dump(node.value)}"
for i in node.targets:
# A bit tedious here as mapping out the source library for each
# value assignment isn't straightforward
if isinstance(i, ast.Name):
name = i.id
elif isinstance(i, ast.Attribute):
name = i.value
elif isinstance(i, ast.Subscript):
name = i.value
elif isinstance(i, ast.BinOp):
# Can't tell what's the output of the BinOp w/o evaluating it!
# Usually these are numbers, but pathlib.Path() for example
# overloads division to concat subdirectories
continue
elif isinstance(i, AST_LITERALS):
# Nothing worth tracking if they're literals I think?
# The sequence literals that can take any valid Python object
# are near impossible to track here
continue
else:
# TODO: Map out other conditional branches and remove this continue
# print(type(i))
# raise Exception
continue
if isinstance(node.value, ast.Call):
# If it's a function call, track the origin of the assigned object
traced_origin = self.find_top_lvl_name(node.value.func)
if name == traced_origin:
# The origin couldn't be determined
# Refrain from creating self-referential loop
pass
else:
# Record the origin of the assigned object
self.assigns[name] = traced_origin
else:
# If it isn't, try and find out the origin
# TODO: Find the origin of the variables assigned
pass
# Call self.generic_visit(node) to include child nodes
self.generic_visit(node)
# TODO: Merge duplicated with above
# TODO: This is not limited to `ast.Call.func` innit? Can be made generic
def find_top_lvl_name(self, func):
# Wade through the first ast.Attribute of each layer until an ast.Name is found
current_layer = func
for _ in range(10): # no such thing as 10 nested attributes!
if isinstance(current_layer, ast.Name):
return current_layer.id
elif isinstance(current_layer, ast.Attribute):
current_layer = current_layer.value
elif isinstance(current_layer, ast.Call):
# If it's a Call, we'll get to it eventually
pass
elif isinstance(current_layer, ast.Subscript):
current_layer = current_layer.value
elif isinstance(current_layer, ast.BinOp):
# Choose left as a guess by human writing convention
current_layer = current_layer.left
elif isinstance(current_layer, AST_LITERALS):
return "<built-in>"
else:
# print(ast.dump(current_layer))
# raise Exception
return "<unknown>"
class LibSum(
ImportTracker, FunctionDefTracker, CallTracker, AssignTracker, ast.NodeVisitor
):
# Lumping this here, so that each component is in its own class
# TODO: Clean up
pass
def count_libs(text):
tree: ast.Module = ast.parse(text)
obj: LibSum = LibSum()
obj.visit(tree)
# The 4 lists that we end up with
# obj.assigns
# obj.calls
# obj.functiondefs
# obj.libs
# Pre-populate
final_count: Dict[int] = {i: 0 for i in obj.libs.values()}
final_count["<user-defined>"] = 0
final_count["<unknown>"] = 0
final_count["<built-in>"] = 0
# For the associated item in the list
for i in obj.calls:
# If it is an object, lookup to see if can assign the object to a library
if i in obj.assigns.keys():
j = i
for _ in range(30):
# Probably no such thing that is nested 30 layers deep?
# Danger of a circular reference here
if j in obj.assigns.keys():
j = obj.assigns.get(j)
else:
i = j
break
else:
# If we exceed 30 loops, we probably have a circular reference
print(f"Warning: Circular reference for {i}, assigning as unknown")
final_count["<unknown>"] += 1
continue
# If it is a function, and is defined in code
if i in obj.functiondefs:
final_count["<user-defined>"] += 1
# If it is a function, and can be directly traced to one of the libraries
elif i in obj.libs.keys():
final_count[obj.libs.get(i)] += 1
# If it is one of the builtins
elif i in BUILTINS:
final_count["<built-in>"] += 1
# Else, cannot trace lineage of function
else:
final_count["<unknown>"] += 1
total_calls = sum(final_count.values())
if total_calls == 0:
# print("No functions called")
return {}
return final_count
def main():
## Arugment handling
parser = argparse.ArgumentParser(
description="Summarizes libraries used in a Python script/repo"
)
parser.add_argument(
"path", nargs="*", help="Path to file / folders to inspect", default=None
)
parser.add_argument(
"-l",
"--long",
help="Shows results on individual file basis, else sums across files",
action="store_true",
)
args = parser.parse_args()
# If we have no dir names passed, display a meaningful example
if len(args.path) == 0:
print("Call signature: `pylibsum <INSERT DIRNAME>`")
print("Example: Given contents of sample.py below:")
text = """
import numpy as np
from plotnine import *
from sklearn.metrics import mean_squared_error
import scipy
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
c = b.mean()
mean_squared_error(a, b)
isinstance(10, list)
scipy.linalg.svd(a)"""
print("\n\t| ".join(text.split("\n")))
print()
print("Outcome of running `pylibsum sample.py`:")
print()
# This line does the heavy lifting
res = count_libs(text)
# These lines sort libraries by descending order
# and prints them
sorted_res = OrderedDict(
{i: j for i, j in sorted(res.items(), key=lambda x: x[1], reverse=True)}
)
total_calls = sum(sorted_res.values())
final_count = {i: (j * 100 / total_calls) for i, j in sorted_res.items()}
for i, j in final_count.items():
print(f"{i}: {j:.2f} %")
print()
# We have args passed
else:
# Vet to only include Python scripts
fnames = []
for i in args.path:
fp = Path(i)
if fp.is_dir():
# Include *.py, *.pyi files
fnames.extend(list(fp.glob("*.py")))
fnames.extend(list(fp.glob("*.pyi")))
elif fp.suffix == ".py":
fnames.append(fp)
else:
pass
file_libs = {}
# Run counts for each file
for fn in fnames:
with open(fn, "r") as f:
text = f.read()
file_libs[fn] = count_libs(text)
# If aggregating (default behaviour)
if args.long is False:
# Need to collate counts
agg_libs = {}
# Iterate across all stored counts
for _, file_lib in file_libs.items():
for lib, count in file_lib.items():
if lib not in agg_libs.keys():
# Add to aggregate dict if lib not recorded
agg_libs[lib] = count
else:
# Add to existing count if lib is recorded
agg_libs[lib] += count
# Sort by descending count
sorted_res = OrderedDict(
{
i: j
for i, j in sorted(
agg_libs.items(), key=lambda x: x[1], reverse=True
)
}
)
total_calls = sum(sorted_res.values())
final_count = {i: (j * 100 / total_calls) for i, j in sorted_res.items()}
# Print to stdout
for i, j in final_count.items():
print(f"{i}: {j:.2f} %")
# Leave a blank line as courtesy
print()
# If not aggregating, and reporting by file
else:
# Iterate for each file
for fn, file_lib in file_libs.items():
# Print the file name
print(fn)
print()
# Sort by descending count
sorted_res = OrderedDict(
{
i: j
for i, j in sorted(
file_lib.items(), key=lambda x: x[1], reverse=True
)
}
)
total_calls = sum(sorted_res.values())
final_count = {
i: (j * 100 / total_calls) for i, j in sorted_res.items()
}
# Print to std out
for i, j in final_count.items():
print(f"{i}: {j:.2f} %")
# Leave a blank line as courtesy
print()
# Exit when done with loop
sys.exit()
if __name__ == "__main__":
main()
|
import typing
from typing import List
import osi3.osi_lane_pb2 as osi_lane
from sqlalchemy.sql.base import NO_ARG
from . import osidb
from .common import Identifier, Vector3d
from geoalchemy2.shape import to_shape
class BoundaryPoint:
"""
A single point of a lane boundary.
"""
position: Vector3d
width: float
height: float
def __init__(self, position: Vector3d = None, width: float = None, height: float = None) -> None:
if position is not None:
self.position = position
if width is not None:
self.width = width
if height is not None:
self.height = height
pass
def write_pb(self, bp_pb: osi_lane._LANEBOUNDARY_BOUNDARYPOINT) -> None:
if hasattr(self, 'position'):
self.position.write_pb(bp_pb.position)
if hasattr(self, 'width'):
bp_pb.width = self.width
if hasattr(self, 'height'):
bp_pb.height = self.height
pass
class LaneBoundaryClassification:
"""
Classification of a lane boundary.
"""
type_: int
color: int
limiting_structure_id: typing.List[Identifier]
# Custum mapping for the opendrive standard 2 the PMSF osi visualizer
map_type = {"CURB": 12, "BROKEN": 4} # Type_Dashed_Line == 4
map_color = {"STANDARD": 3}
def __init__(self, type_: int = None, color: int = None, limiting_structure_id: typing.List[Identifier] = None) -> None:
if type_ is not None:
self.type_ = type_
if color is not None:
self.color = color
if limiting_structure_id is not None:
self.limiting_structure_id = limiting_structure_id
@staticmethod
def from_sql(lb_sql: osidb.LaneBoundary) -> 'LaneBoundaryClassification':
return LaneBoundaryClassification(
type_ = LaneBoundaryClassification.map_type.get(lb_sql.opendrive_roadMarking_type), # will return None case NA
color = LaneBoundaryClassification.map_color.get(lb_sql.opendrive_roadMarking_color),
limiting_structure_id = None # not available
)
pass
def write_pb(self, lbc_pb: osi_lane._LANEBOUNDARY_CLASSIFICATION) -> None:
if hasattr(self, 'type_'):
lbc_pb.type = self.type_
if hasattr(self, 'color'):
lbc_pb.color = self.color
if hasattr(self, 'limiting_structure_id'):
for element in self.limiting_structure_id:
osi_limiting_structure_id = lbc_pb.limiting_structure_id.add()
# TODO MUST be checked
element.write_pb(osi_limiting_structure_id)
# raise Exception(f"Limiting structure id is not yet implemented") # TODO
class LaneBoundary:
"""
A lane boundary defining the border of a lane.
The left and right lane boundary define the width of the lane.
Additionally, free markings can be defined, e.g. at construction sites.
Free markings across multiple lanes may be defined multiple times for all affected lanes.
"""
id_: Identifier
boundary_line: typing.List[BoundaryPoint] = []
classification: LaneBoundaryClassification
def __init__(self, id_: Identifier = None, boundary_line: typing.List[BoundaryPoint] = None, classification: LaneBoundaryClassification = None) -> None:
if id_ is not None:
self.id_ = id_
if boundary_line is not None:
self.boundary_line = boundary_line
if classification is not None:
self.classification = classification
@staticmethod
def from_sql(lb_sql : osidb.LaneBoundary) -> 'LaneBoundary':
return LaneBoundary(
id_=Identifier(lb_sql.id),
boundary_line=list(map(lambda c: BoundaryPoint(position=Vector3d(x=c[0], y=c[1], z=c[2]), width=lb_sql.opendrive_roadMarking_width), list(to_shape(lb_sql.geom).coords))) if lb_sql.geom is not None else None, # BoundaryPoint
classification=LaneBoundaryClassification.from_sql(lb_sql)
# classification=LaneBoundaryClassification(type_=3)
)
def write_pb(self, lb_pb: osi_lane._LANEBOUNDARY) -> None:
if hasattr(self, 'id_'):
self.id_.write_pb(lb_pb.id)
if hasattr(self, 'boundary_line'):
for element in self.boundary_line:
osi_boundary_line = lb_pb.boundary_line.add()
element.write_pb(osi_boundary_line)
#raise Exception(f'Boundary line not yet implemented')
if hasattr(self, 'classification'):
self.classification.write_pb(lb_pb.classification)
class LanePairing:
"""
The lane ID pairings of antecessor and successor lanes.
"""
antecessor_lane_id : Identifier
successor_lane_id : Identifier
def __init__(self, antecessor_lane_id : Identifier = None, successor_lane_id : Identifier = None) -> None:
if antecessor_lane_id is not None:
self.antecessor_lane_id = antecessor_lane_id
if successor_lane_id is not None:
self.successor_lane_id = successor_lane_id
def write_pb(self, lp_pb : osi_lane._LANE_CLASSIFICATION_LANEPAIRING) -> None:
if hasattr(self, 'antecessor_lane_id'):
self.antecessor_lane_id.write_pb(lp_pb.antecessor_lane_id)
if hasattr(self, 'successor_lane_id'):
self.successor_lane_id.write_pb(lp_pb.successor_lane_id)
class RoadCondition:
"""
The condition of the road surface
"""
surface_temperature : float
surface_water_film : float
surface_freezing_point : float
surface_ice : float
surface_roughness : float
surface_texture : float
def __init__(self, surface_temperature : float = None, surface_water_film : float = None,
surface_freezing_point : float = None, surface_ice : float = None, surface_roughness : float = None,
surface_texture : float = None) -> None:
if surface_temperature is not None:
self.surface_temperature = surface_temperature
if surface_water_film is not None:
self.surface_water_film = surface_water_film
if surface_freezing_point is not None:
self.surface_freezing_point = surface_freezing_point
if surface_ice is not None:
self.surface_ice = surface_ice
if surface_roughness is not None:
self.surface_roughness = surface_roughness
if surface_texture is not None:
self.surface_texture = surface_texture
def write_pb(self, rc_pb : osi_lane._LANE_CLASSIFICATION_ROADCONDITION) -> None:
if hasattr(self, 'surface_temperature'):
rc_pb.surface_temperature = self.surface_temperature
if hasattr(self, 'surface_water_film'):
rc_pb.surface_water_film = self.surface_water_film
if hasattr(self, 'surface_freezing_point'):
rc_pb.surface_freezing_point = self.surface_freezing_point
if hasattr(self, 'surface_ice'):
rc_pb.surface_ice = self.surface_ice
if hasattr(self, 'surface_roughness'):
rc_pb.surface_roughness = self.surface_roughness
if hasattr(self, 'surface_texture'):
rc_pb.surface_texture = self.surface_texture
class LaneClassification:
"""
Classification of a lane.
"""
type_ : int
is_host_vehicle_lane : bool
centerline : typing.List[Vector3d]
centerline_is_driving_direction : bool
left_adjacent_lane_id: typing.List[Identifier]
right_adjacent_lane_id : typing.List[Identifier]
lane_pairing : typing.List[LanePairing]
right_lane_boundary_id: typing.List[Identifier]
left_lane_boundary_id : typing.List[Identifier]
free_lane_boundary_id : typing.List[Identifier]
road_condition : RoadCondition
# Custum Mapping
@staticmethod
def get_type_custom(opendrive_lane_type: str, opendrive_road_junction: str) -> int:
# 0 if opendrive_lane_type != "DRIVING" else 2 if opendrive_road_junction is None else 4
if opendrive_lane_type == "DRIVING":
if opendrive_road_junction is None:
res = 2
else:
res = 4
else:
res = 0
return res
def __init__(self,
type_ : int = None,
is_host_vehicle_lane : bool = None,
centerline : typing.List[Vector3d] = None,
centerline_is_driving_direction : bool = None,
left_adjacent_lane_id: typing.List[Identifier] = None,
right_adjacent_lane_id : typing.List[Identifier] = None,
lane_pairing : typing.List[LanePairing] = None,
right_lane_boundary_id: typing.List[Identifier] = None,
left_lane_boundary_id : typing.List[Identifier] = None,
free_lane_boundary_id : typing.List[Identifier] = None,
road_condition : RoadCondition = None
) -> None:
if type_ is not None:
self.type_ = type_ if type_ in range(5) else 0
if is_host_vehicle_lane is not None:
self.is_host_vehicle_lane = is_host_vehicle_lane
if centerline is not None:
self.centerline = centerline
if centerline_is_driving_direction is not None:
self.centerline_is_driving_direction = centerline_is_driving_direction
if left_adjacent_lane_id is not None:
self.left_adjacent_lane_id = left_adjacent_lane_id
if right_adjacent_lane_id is not None:
self.right_adjacent_lane_id = right_adjacent_lane_id
if lane_pairing is not None:
self.lane_pairing = lane_pairing
if right_lane_boundary_id is not None:
self.right_lane_boundary_id = right_lane_boundary_id
if left_lane_boundary_id is not None:
self.left_lane_boundary_id = left_lane_boundary_id
if free_lane_boundary_id is not None:
self.free_lane_boundary_id = free_lane_boundary_id
if road_condition is not None:
self.road_condition = road_condition
@staticmethod
def from_sql(l_sql : osidb.Lane) -> 'LaneClassification':
return LaneClassification(
type_=LaneClassification.get_type_custom(l_sql.opendrive_lane_type, l_sql.opendrive_road_junction),
centerline=list(map(lambda c: Vector3d(x=c[0], y=c[1], z=c[2]), list(to_shape(l_sql.geom).coords))) if l_sql.geom is not None else None,
left_lane_boundary_id=[Identifier(l_sql.left_lane_boundary_id)], # TODO right now only one id is available in materialized view
right_lane_boundary_id=[Identifier(l_sql.right_lane_boundary_id)]
)
def write_pb(self, lc_pb : osi_lane._LANE_CLASSIFICATION) -> None:
if hasattr(self, 'type_'):
lc_pb.type = self.type_
if hasattr(self, 'is_host_vehicle_lane'):
lc_pb.is_host_vehicle_lane = self.is_host_vehicle_lane
if hasattr(self, 'centerline'):
for element in self.centerline:
osi_centerline = lc_pb.centerline.add()
element.write_pb(osi_centerline)
if hasattr(self, 'centerline_is_driving_direction'):
lc_pb.centerline_is_driving_direction = self.centerline_is_driving_direction
if hasattr(self, 'left_adjacent_lane_id'):
for element in self.left_adjacent_lane_id:
osi_left_adjacent_lane_id = lc_pb.left_adjacent_lane_id.add()
element.write_pb(osi_left_adjacent_lane_id)
if hasattr(self, 'right_adjacent_lane_id'):
for element in self.right_adjacent_lane_id:
osi_right_adjacent_lane_id = lc_pb.right_adjacent_lane_id.add()
element.write_pb(osi_right_adjacent_lane_id)
if hasattr(self, 'lane_pairing'):
for element in self.lane_pairing:
osi_lane_pairing = lc_pb.lane_pairing.add()
element.write_pb(osi_lane_pairing)
if hasattr(self, 'right_lane_boundary_id'):
for element in self.right_lane_boundary_id:
osi_right_lane_boundary_id = lc_pb.right_lane_boundary_id.add()
element.write_pb(osi_right_lane_boundary_id)
if hasattr(self, 'left_lane_boundary_id'):
for element in self.left_lane_boundary_id:
osi_left_lane_boundary_id = lc_pb.left_lane_boundary_id.add()
element.write_pb(osi_left_lane_boundary_id)
if hasattr(self, 'free_lane_boundary_id'):
for element in self.free_lane_boundary_id:
osi_free_lane_boundary_id = lc_pb.free_lane_boundary_id.add()
element.write_pb(osi_free_lane_boundary_id)
if hasattr(self, 'road_condition'):
self.road_condition.write_pb(lc_pb.road_condition)
class Lane:
"""
A lane in the road network.
A lane is part of a road and mainly characterized by its center line. It also knows about any adjacent lanes, antecessor and successor lanes.
"""
classification : LaneClassification
id_ : Identifier
def __init__(self, classification : LaneClassification = None, id_ : Identifier = None) -> None:
if classification is not None:
self.classification = classification
if id_ is not None:
self.id_ = id_
@staticmethod
def from_sql(l_sql : osidb.Lane) -> 'Lane':
return Lane(
id_=Identifier(l_sql.id),
classification=LaneClassification.from_sql(l_sql)
)
def write_pb(self, ln_pb : osi_lane._LANE) -> None:
if hasattr(self, 'classification'):
self.classification.write_pb(ln_pb.classification)
if hasattr(self, 'id_'):
self.id_.write_pb(ln_pb.id)
|
<filename>src/Users.py
import urllib, urllib2, socket, cookielib, requests
from requests.auth import AuthBase
import json
import re
class KongUser:
USER_INFO_URL = 'http://www.kongregate.com/api/user_info.json?username='
ACCOUNT_URL = 'http://www.kongregate.com/accounts/'
def __init__(self, username):
self._username = username
self._session = requests.Session()
self.loadInfo()
# Return username
def username(self):
return self._username
# Return user info
def userInfo(self):
return self._userInfo
def userId(self):
return self.userInfo()['user_id']
# Get user info (without auth)
def loadInfo(self):
url = KongUser.USER_INFO_URL + self.username()
req = self._session.get(url)
self._userInfo = req.json()
return self._userInfo
# Get instance profile shouts
def getShouts(self, params=None):
if params == None:
params = {'format': 'json'}
url = KongUser.ACCOUNT_URL + self.username() + '/messages.json'
req = self._session.get(url, params=params)
return req.json()
# Get user's badges
def getBadges(self):
url = KongUser.ACCOUNT_URL + self.username() + '/badges.json'
req = self._session.get(url)
return req.json()
class KongAuthUser(KongUser):
HTML_SCRAP_URL = 'http://www.kongregate.com/community'
LOGIN_URL = 'https://www.kongregate.com/session'
GAME_RATING_URL = 'http://www.kongregate.com/game_ratings.json'
# Log the user, provided a password, and creates a new Kongregate session
def login(self, password):
self._authToken = self.__getAuthToken()
data = {
'utf8': '%E2%9C%93',
'authenticity_token': self._authToken,
'from_welcome_box': 'false',
'username': self.username(),
'password': password
}
resp = self._session.post(KongAuthUser.LOGIN_URL, data=data)
return resp.json()
# Retrieve the authenticity token
def __getAuthToken(self):
conn = self._session.get(KongAuthUser.HTML_SCRAP_URL)
response = conn.text
m = re.search('<meta content="(.*)" name="csrf-token"', response)
return m.group(1)
# Set the header to send a post request
def __setHeader(self, data):
self._session.headers.update({
'X-Requested-With': 'XMLHttpRequest',
'X-Prototype-Version': '1.7_rc3',
'X-CSRF-Token': self._authToken,
'Content-Length': len(data)
})
# Send a shout
def shout(self, to, msg):
url = KongUser.ACCOUNT_URL + to + '/messages.json'
data = 'utf8=%E2%9C%93&authenticity_token='\
+ self._authToken + '&shout%5Bcontent%5D='\
+ msg + '&_='
self.__setHeader(data)
resp = self._session.post(url, data=data)
return resp.text
# Send a whisper
def whisper(self, to, msg):
url = KongUser.ACCOUNT_URL + to + '/messages.json'
data = 'utf8=%E2%9C%93&authenticity_token='\
+ self._authToken + '&shout%5Bprivate%5D=true'\
+'&shout%5Bcontent%5D=' + msg
self.__setHeader(data)
resp = self._session.post(url, data=data)
return resp.text
# Deletes a message by ID in the specified profile
def deleteMessage(self, msgId, username=None):
if username == None:
username = self.username()
url = KongUser.ACCOUNT_URL + username + '/messages/' + str(msgId) + '.js'
data = 'utf8=%E2%9C%93&authenticity_token=' + self._authToken\
+ '&_method=delete&_='
self.__setHeader(data)
resp = self._session.post(url, data=data)
return resp.text
# Get user's whispers
def getWhispers(self, params=None):
if params == None:
params = {'format': 'json', 'authenticity_token': self._authToken}
url = KongUser.ACCOUNT_URL + self.username() + '/private_messages.json'
req = self._session.get(url, params=params)
return req.json()
# Get whispers sent and recieved with another username
def getWhispersWith(self, username, params=None):
if params == None:
params = {'format': 'json', 'authenticity_token': self._authToken}
url = KongUser.ACCOUNT_URL + username + '/private_messages.json'
req = self._session.get(url, params=params)
return req.json()
# Get user messages sent
def getSentMessages(self, params=None):
if params == None:
params = {'format': 'json', 'authenticity_token': self._authToken}
url = KongUser.ACCOUNT_URL + self.username() + '/sent_messages.json'
req = self._session.get(url, params=params)
return req.json()
# Add a friend
def friend(self, username):
url = KongUser.ACCOUNT_URL + self.username() + '/friends/' + username
data = 'authenticity_token=' + self._authToken + '&_method=put&_='
url += '?friend_from=new_profile&masthead=true'
self.__setHeader(data)
req = self._session.post(url, data=data)
return req.text
def unfriend(self, username):
url = KongUser.ACCOUNT_URL + self.username() + '/friends/' + username
data = 'authenticity_token=' + self._authToken + '&_method=put&_='
url += '?masthead=true&unfriend_from=new_profile'
self.__setHeader(data)
req = self._session.post(url, data=data)
return req.text
# Rate a game
def rate(self, gameId, rating):
data = 'user_id=' + str(self.userId()) + '&game_id=' + str(gameId)\
+ '&rating=' + str(rating)
self._session.headers.update({
'Content-Length': len(data)
})
req = self._session.post(KongAuthUser.GAME_RATING_URL, data=data)
return req.json()
|
<filename>uf/modeling/uda.py
# coding:=utf-8
# Copyright 2021 Tencent. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' Unsupervised Data Augmentation for Consistency Training (UDA).
Code revised from Google's implementation.
See `https://github.com/google-research/uda`.
'''
from uf.tools import tf
from .base import BaseDecoder
from . import util
class UDADecoder(BaseDecoder):
def __init__(self,
is_training,
input_tensor,
is_supervised,
is_expanded,
label_ids,
label_size=2,
sample_weight=None,
scope='cls/seq_relationship',
hidden_dropout_prob=0.1,
initializer_range=0.02,
trainable=True,
global_step=None,
num_train_steps=None,
uda_softmax_temp=-1,
uda_confidence_thresh=-1,
tsa_schedule='linear',
**kwargs):
super().__init__(**kwargs)
is_supervised = tf.cast(is_supervised, tf.float32)
is_expanded = tf.cast(is_expanded, tf.float32)
hidden_size = input_tensor.shape.as_list()[-1]
with tf.variable_scope(scope):
output_weights = tf.get_variable(
'output_weights',
shape=[label_size, hidden_size],
initializer=util.create_initializer(initializer_range),
trainable=trainable)
output_bias = tf.get_variable(
'output_bias',
shape=[label_size],
initializer=tf.zeros_initializer(),
trainable=trainable)
output_layer = util.dropout(
input_tensor, hidden_dropout_prob if is_training else 0.0)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
with tf.variable_scope('sup_loss'):
# reshape
sup_ori_log_probs = tf.boolean_mask(
log_probs, mask=(1.0 - is_expanded), axis=0)
sup_log_probs = tf.boolean_mask(
sup_ori_log_probs, mask=is_supervised, axis=0)
sup_label_ids = tf.boolean_mask(
label_ids, mask=is_supervised, axis=0)
self.preds['preds'] = tf.argmax(sup_ori_log_probs, axis=-1)
one_hot_labels = tf.one_hot(
sup_label_ids, depth=label_size, dtype=tf.float32)
per_example_loss = - tf.reduce_sum(
one_hot_labels * sup_log_probs, axis=-1)
loss_mask = tf.ones_like(per_example_loss, dtype=tf.float32)
correct_label_probs = tf.reduce_sum(
one_hot_labels * tf.exp(sup_log_probs), axis=-1)
if is_training and tsa_schedule:
tsa_start = 1.0 / label_size
tsa_threshold = get_tsa_threshold(
tsa_schedule, global_step, num_train_steps,
tsa_start, end=1)
larger_than_threshold = tf.greater(
correct_label_probs, tsa_threshold)
loss_mask = loss_mask * (
1 - tf.cast(larger_than_threshold, tf.float32))
loss_mask = tf.stop_gradient(loss_mask)
per_example_loss = per_example_loss * loss_mask
if sample_weight is not None:
sup_sample_weight = tf.boolean_mask(
sample_weight, mask=is_supervised, axis=0)
per_example_loss *= tf.cast(
sup_sample_weight, dtype=tf.float32)
sup_loss = (tf.reduce_sum(per_example_loss) /
tf.maximum(tf.reduce_sum(loss_mask), 1))
self.losses['supervised'] = per_example_loss
with tf.variable_scope('unsup_loss'):
# reshape
ori_log_probs = tf.boolean_mask(
sup_ori_log_probs, mask=(1.0 - is_supervised), axis=0)
aug_log_probs = tf.boolean_mask(
log_probs, mask=is_expanded, axis=0)
sup_ori_logits = tf.boolean_mask(
logits, mask=(1.0 - is_expanded), axis=0)
ori_logits = tf.boolean_mask(
sup_ori_logits, mask=(1.0 - is_supervised), axis=0)
unsup_loss_mask = 1
if uda_softmax_temp != -1:
tgt_ori_log_probs = tf.nn.log_softmax(
ori_logits / uda_softmax_temp, axis=-1)
tgt_ori_log_probs = tf.stop_gradient(tgt_ori_log_probs)
else:
tgt_ori_log_probs = tf.stop_gradient(ori_log_probs)
if uda_confidence_thresh != -1:
largest_prob = tf.reduce_max(tf.exp(ori_log_probs), axis=-1)
unsup_loss_mask = tf.cast(tf.greater(
largest_prob, uda_confidence_thresh), tf.float32)
unsup_loss_mask = tf.stop_gradient(unsup_loss_mask)
per_example_loss = kl_for_log_probs(
tgt_ori_log_probs, aug_log_probs) * unsup_loss_mask
if sample_weight is not None:
unsup_sample_weight = tf.boolean_mask(
sample_weight, mask=(1.0 - is_supervised), axis=0)
per_example_loss *= tf.cast(
unsup_sample_weight, dtype=tf.float32)
unsup_loss = tf.reduce_mean(per_example_loss)
self.losses['unsupervised'] = per_example_loss
self.total_loss = sup_loss + unsup_loss
def get_tsa_threshold(tsa_schedule, global_step, num_train_steps, start, end):
training_progress = tf.to_float(global_step) / tf.to_float(num_train_steps)
if tsa_schedule == 'linear':
threshold = training_progress
elif tsa_schedule == 'exp':
scale = 5
threshold = tf.exp((training_progress - 1) * scale)
# [exp(-5), exp(0)] = [1e-2, 1]
elif tsa_schedule == 'log':
scale = 5
# [1 - exp(0), 1 - exp(-5)] = [0, 0.99]
threshold = 1 - tf.exp((-training_progress) * scale)
else:
raise ValueError(
'Invalid value for `tsa_schedule`: %s. Pick one from `linear`, '
'`exp` or `log`.' % (tsa_schedule))
return threshold * (end - start) + start
def kl_for_log_probs(log_p, log_q):
p = tf.exp(log_p)
neg_ent = tf.reduce_sum(p * log_p, axis=-1)
neg_cross_ent = tf.reduce_sum(p * log_q, axis=-1)
kl = neg_ent - neg_cross_ent
return kl
|
import numpy as np
import pandas as pd
from fklearn.causal.effects import linear_effect
from fklearn.causal.validation.curves import (effect_by_segment, cumulative_effect_curve, cumulative_gain_curve,
relative_cumulative_gain_curve, effect_curves)
def test_effect_by_segment():
df = pd.DataFrame(dict(
t=[1, 1, 1, 2, 2, 2, 3, 3, 3],
x=[1, 2, 3, 1, 2, 3, 1, 2, 3],
y=[1, 1, 1, 2, 3, 4, 3, 5, 7],
))
result = effect_by_segment(df, prediction="x", outcome="y", treatment="t", segments=3, effect_fn=linear_effect)
expected = pd.Series([1., 2., 3.], index=result.index)
pd.testing.assert_series_equal(result, expected)
def test_cumulative_effect_curve():
df = pd.DataFrame(dict(
t=[1, 1, 1, 2, 2, 2, 3, 3, 3],
x=[1, 2, 3, 1, 2, 3, 1, 2, 3],
y=[1, 1, 1, 2, 3, 4, 3, 5, 7],
))
expected = np.array([3., 3., 2.92857143, 2.5, 2.5, 2.46153846, 2.])
result = cumulative_effect_curve(df, prediction="x", outcome="y", treatment="t", min_rows=3, steps=df.shape[0],
effect_fn=linear_effect)
np.testing.assert_allclose(expected, result, rtol=1e-07)
def test_cumulative_gain_curve():
df = pd.DataFrame(dict(
t=[1, 1, 1, 2, 2, 2, 3, 3, 3],
x=[1, 2, 3, 1, 2, 3, 1, 2, 3],
y=[1, 1, 1, 2, 3, 4, 3, 5, 7],
))
expected = np.array([1., 1.33333333, 1.62698413, 1.66666667, 1.94444444, 2.18803419, 2.])
result = cumulative_gain_curve(df, prediction="x", outcome="y", treatment="t", min_rows=3, steps=df.shape[0],
effect_fn=linear_effect)
np.testing.assert_allclose(expected, result, rtol=1e-07)
def test_relative_cumulative_gain_curve():
df = pd.DataFrame(dict(
t=[1, 1, 1, 2, 2, 2, 3, 3, 3],
x=[1, 2, 3, 1, 2, 3, 1, 2, 3],
y=[1, 1, 1, 2, 3, 4, 3, 5, 7],
))
expected = np.array([0.33333333, 0.44444444, 0.51587302, 0.33333333, 0.38888889, 0.41025641, 0.])
result = relative_cumulative_gain_curve(df, prediction="x", outcome="y", treatment="t", min_rows=3,
steps=df.shape[0], effect_fn=linear_effect)
np.testing.assert_allclose(expected, result, rtol=1e-07)
def test_effect_curves():
df = pd.DataFrame(dict(
t=[1, 1, 1, 2, 2, 2, 3, 3, 3],
x=[1, 2, 3, 1, 2, 3, 1, 2, 3],
y=[1, 1, 1, 2, 3, 4, 3, 5, 7],
))
expected = pd.DataFrame({
"samples_count": [3, 4, 5, 6, 7, 8, 9],
"cumulative_effect_curve": [3., 3., 2.92857143, 2.5, 2.5, 2.46153846, 2.],
"samples_fraction": [0.3333333, 0.4444444, 0.5555555, 0.6666666, 0.7777777, 0.8888888, 1.],
"cumulative_gain_curve": [1., 1.33333333, 1.62698413, 1.66666667, 1.94444444, 2.18803419, 2.],
"random_model_cumulative_gain_curve": [0.6666666, 0.8888888, 1.1111111, 1.3333333, 1.5555555, 1.7777777, 2.],
"relative_cumulative_gain_curve": [0.33333333, 0.44444444, 0.51587302, 0.33333333, 0.38888889, 0.41025641, 0.],
})
result = effect_curves(df, prediction="x", outcome="y", treatment="t", min_rows=3, steps=df.shape[0],
effect_fn=linear_effect)
pd.testing.assert_frame_equal(result, expected, atol=1e-07)
|
<gh_stars>0
import numpy as np
import copy
from constant import *
from functools import reduce
def softmax(x):
probs = np.exp(x - np.max(x))
probs /= np.sum(probs)
return probs
class TreeNode(object):
"""
"""
def __init__(self, parent, prior_p, state, action):
self._parent = parent
self._children = {} #
self._n_visits = 0
self._state = state
self._Q = 0
self._u = 0
self._P = prior_p
self._action = action
# self._game = game
def expand(self, action_priors, is_selfplay):
duplicated_node = False
parent_node = None
parent_state = None
action_priors = list(action_priors)
#action, prob = zip(*action_priors)
#prob = np.asarray(prob)
noise = np.random.dirichlet(0.3 * np.ones(len(action_priors)))
#prob = prob * 0.8 + noise * 0.2
for i, (action, prob) in enumerate(action_priors):
"""
if action < 12:
# Code for restrict dummy expand
duplicated_node = False
# copy game - step action - get state after step(action) end
c_game = copy.deepcopy(game)
c_game.step(action)
next_state = c_game.state()
# if 'self' is not root node
if self._parent is not None:
parent_node = self._parent # get parent node
parent_state = parent_node._state # get parent node state
# Compare all states in nodes and next state
while parent_node is not None:
if np.array_equal(parent_state, next_state):
duplicated_node = True
break
else:
# get parent-parent node and parent-parent node state
parent_node = parent_node._parent
if parent_node is not None:
parent_state = parent_node._state
if not duplicated_node and action not in self._children:
self._children[action] = TreeNode(self, prob, next_state, action)
"""
if self._parent is None:
prob = 0.8 * prob + 0.2 * noise[i]
if action not in self._children:
self._children[action] = TreeNode(self, prob, None, action)
def select(self, c_puct):
"""
"""
#if np.random.random_sample() < 0.7:
# return reduce(lambda x, y: x if (x[0] < 2 and y[0] >= 2) else x if ((x[0] < 2 and y[0] < 2) and (x[1].get_value(c_puct) > y[1].get_value(c_puct))) else x if (x[1].get_value(c_puct) > y[1].get_value(c_puct)) else y, self._children.items())
return max(self._children.items(), key=lambda act_node: act_node[1].get_value(c_puct))
def update(self, leaf_value):
"""
"""
self._n_visits += 1
self._Q += 1.0 * (leaf_value - self._Q) / self._n_visits
def update_recursive(self, reward):
if self._parent:
self._parent.update_recursive(-reward)
self.update(reward)
def get_value(self, c_puct):
"""
"""
self._u = c_puct * self._P * np.sqrt(self._parent._n_visits) / (1 + self._n_visits)
return self._Q + self._u
def is_leaf(self):
"""
"""
return self._children == {}
def is_root(self):
return self._parent is None
def get_parent(self):
return self._parent
class MCTS(object):
"""
"""
def __init__(self, policy_value_fn, c_puct=5, n_playout=1800):
"""
"""
self._root = TreeNode(None, 1.0, None, None)
self._policy = policy_value_fn
self._c_puct = c_puct
self._n_playout = n_playout
# Fix : get current_player param info when the first simulation started.
def _playout(self, game, is_selfplay):
"""
"""
node = self._root
while (1):
if node.is_leaf():
break
action, node = node.select(self._c_puct)
game.step(action) #
# state = game.state()
action_probs, leaf_value = self._policy(game)
end, winner = game.has_a_winner()
if not end:
# Add an incompleted code to make pawn avoid dead-end section.
"""
if np.sum(game.actions()[:4]) <= 1:
leaf_value = -1.0 if game.get_current_player == current_player else 1.0
else:
"""
node.expand(action_probs, is_selfplay)
else:
leaf_value = 1.0 if winner == game.get_current_player() else -1.0 # Fix bug that all winners are current player
# print("call update")
node.update_recursive(-leaf_value)
def get_move_probs(self, game, temp=1e-3, time_step=0, is_selfplay=0):
"""
"""
for n in range(self._n_playout):
game_copy = copy.deepcopy(game)
# state = game.state()
# state_copy = copy.deepcopy(state)
self._playout(game_copy, is_selfplay)
act_visits = [(act, node._n_visits) for act, node in self._root._children.items()]
acts, visits = zip(*act_visits)
act_probs = softmax(1.0 / temp * np.log(np.array(visits) + 1e-10))
visits = np.array(visits)
"""
if time_step < TAU_THRES:
act_probs = visits / visits.sum()
else:
act_probs = np.zeros(len(visits))
max_idx = np.argwhere(visits == visits.max())
action_index = max_idx[np.random.choice(len(max_idx))]
act_probs[action_index] = 1
"""
# q_vals = [node._Q for act, node in self._root._children.items()]
# print("-" * 30)
# print("q_vals : ", q_vals)
# print("-" * 30)
return acts, act_probs
def update_with_move(self, last_move, state):
if last_move in self._root._children:
self._root = self._root._children[last_move]
self._root._parent = None
else:
self._root = TreeNode(None, 1.0, state, last_move)
def __str__(self):
return "MCTS"
class MCTSPlayer(object):
#
def __init__(self, policy_value_function, c_puct=5, n_playout=2000, is_selfplay=1):
self.mcts = MCTS(policy_value_function, c_puct, n_playout)
self._is_selfplay = is_selfplay
#
def set_player_ind(self, p):
self.player = p
#
def reset_player(self):
self.mcts.update_with_move(-1, None)
# Choose an action during the play
def choose_action(self, game, temp=1e-3, return_prob=0, time_step=0):
sensible_moves = game.actions() # 获取所有可行的落子
move_probs = np.zeros(12 + (BOARD_SIZE - 1) ** 2 * 2) # 获取落子的概率,由神经网络输出
if len(sensible_moves) > 0: # 棋盘未耗尽时
acts, probs = self.mcts.get_move_probs(game, temp, time_step, self._is_selfplay) # 获取落子以及对应的落子概率
move_probs[list(acts)] = probs # 将概率转到move_probs列表中
state = game.state()
if self._is_selfplay:
# probs = 0.8 * probs + 0.2 * np.random.dirichlet(0.3 * np.ones(len(probs)))
# move = acts[np.argmax(probs)]
move = np.random.choice(acts, p=probs)
self.mcts.update_with_move(move, state) # 更新根节点,并且复用子树
else:
move = acts[np.argmax(probs)]
# move = np.random.choice(acts, p=probs)
self.mcts.update_with_move(-1, state)
if return_prob:
return move, move_probs
else:
return move
else:
print("WARNING: the board is full")
def __str__(self):
return "MCTS {}".format(self.player)
|
<filename>Polynomial Regression.py
# House Pricing Prediction
# Polynomial Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
training_set = pd.read_csv('Data/train.csv')
X_train = training_set.iloc[:, :-1]
y = training_set.iloc[:, -1].values
X_test = pd.read_csv('Data/test.csv')
X = X_train.append(X_test)
continuous_features = ['LotFrontage', 'LotArea', 'MasVnrArea', 'BsmtFinSF1',
'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', '1stFlrSF',
'2ndFlrSF', 'LowQualFinSF', 'GrLivArea', 'GarageArea',
'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch',
'3SsnPorch', 'ScreenPorch', 'PoolArea', 'MiscVal']
categorical_features = ['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape',
'LandContour', 'Utilities', 'LotConfig', 'LandSlope',
'Neighborhood', 'Condition1', 'Condition2', 'BldgType',
'HouseStyle', 'OverallQual', 'OverallCond', 'YearBuilt',
'YearRemodAdd', 'RoofStyle', 'RoofMatl', 'Exterior1st',
'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond',
'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure',
'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC',
'CentralAir', 'Electrical', 'BsmtFullBath', 'BsmtHalfBath',
'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr',
'KitchenQual', 'TotRmsAbvGrd', 'Functional', 'Fireplaces',
'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish',
'GarageCars', 'GarageQual', 'GarageCond', 'PavedDrive',
'PoolQC', 'Fence', 'MiscFeature', 'MoSold', 'YrSold',
'SaleType', 'SaleCondition']
# Filling null values
X[continuous_features] = X[continuous_features].fillna(X[continuous_features].mean())
X.loc[:, 'Alley'] = X.loc[:, 'Alley'].fillna(X.loc[:, 'Alley'].value_counts().idxmax()) # Lots of NaN's, consider removing
X.loc[:, 'Electrical'] = X.loc[:, 'Electrical'].fillna(X.loc[:, 'Electrical'].value_counts().idxmax())
# All other NaN's are actual values, so they need to be converted
X = X.fillna('i')
X.loc[:, 'GarageYrBlt'] = X.loc[:, 'GarageYrBlt'].replace('i', 0)
X.loc[:, 'MasVnrArea'] = X.loc[:, 'MasVnrArea'].replace('i', 0)
X = pd.get_dummies(X, drop_first=True, columns=categorical_features)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
X[continuous_features] = StandardScaler().fit_transform(X[continuous_features])
# Splitting into training examples and test examples
X_train = X.iloc[:1460, :]
X_test = X.iloc[1460:, :]
# Removing Id column
X_train.update(X.drop('Id', axis=1, inplace=True))
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train1, X_train2, y_train1, y_train2 = train_test_split(X_train, y, test_size = 0.2, random_state = 0)
X_train1 = X_train1.values
X_train2 = X_train2.values
# Fitting SVR
from sklearn.svm import SVR
import datetime
print(datetime.datetime.now())
svr_poly = SVR(kernel='poly', degree=3)
svr_rbf = SVR(kernel='rbf')
print(datetime.datetime.now())
y_poly1 = svr_poly.fit(X_train1, y_train1).predict(X_train1)
y_poly2 = svr_poly.fit(X_train1, y_train1).predict(X_train2)
y_rbf2 = svr_rbf.fit(X_train1[:, 5].reshape(-1, 1), y_train1).predict(X_train2[:, 2].reshape(-1, 1))
y_rbf1 = svr_rbf.fit(X_train1.loc[:, 'GarageArea'].to_frame(), y_train1).predict(X_train1.loc[:, 'GarageArea'].to_frame())
y_rbf1 = svr_rbf.fit(X_train1.loc[:, 'LotFrontage'].to_frame(), y_train1).predict(X_train1.loc[:, 'LotFrontage'].to_frame())
y_rbf2 = svr_rbf.fit(X_train1, y_train1).predict(X_train2)
"""
SVR siempre devuelve 163000, ¿qué pasa?
"""
# Fitting Decision Tree Regressor
from sklearn import tree
clf = tree.DecisionTreeRegressor()
clf = clf.fit(X_train1, y_train1)
a = clf.predict(X_train1)
b = clf.predict(X_train2)
# Fitting Random Forest Regressor
from sklearn.ensemble import RandomForestRegressor
regr_rf = RandomForestRegressor().fit(X_train1, y_train1)
y_pred1 = regr_rf.predict(X_train1)
y_pred2 = regr_rf.predict(X_train2)
# R^2 en test de 0.86
# Fitting Linear Regression to the dataset
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X_train1, y_train1)
y_pred1 = lin_reg.predict(X_train1)
y_pred2 = lin_reg.predict(X_train2)
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree = 2) # Muy mal comportamiento
# Idea, expandir polinómicamente sólo las vbles continuas, en proceso, ¿cómo le agrego luego las discretas?
X_poly = poly_reg.fit_transform(X.loc[:, continuous_features])
X_poly = X.loc[:, categorical_features].join(pd.DataFrame(X_poly))
X_poly = pd.get_dummies(X_poly, drop_first=True, columns=categorical_features)
X_poly_train = X_poly[:1460]
X_poly_test = X_poly[1460:]
X_poly1, X_poly2, y_train1, y_train2 = train_test_split(X_poly_train, y, test_size = 0.2, random_state = 0)
poly_reg.fit(X_poly1, y_train1)
lin_reg = LinearRegression()
lin_reg.fit(X_poly1, y_train1)
y_pred1 = lin_reg.predict(X_poly1)
y_pred2 = lin_reg.predict(X_poly2)
plt.scatter(np.arange(0, 2919), y_pred1)
plt.scatter(np.arange(0, 1168), y_train1, edgecolors='red')
plt.ylim(0, 500000)
plt.show()
# Fitting XGBoost to the Training set
import os
mingw_path = 'C:\\Program Files\\mingw-w64\\x86_64-5.3.0-posix-seh-rt_v4-rev0\\mingw64\\bin'
os.environ['PATH'] = mingw_path + ';' + os.environ['PATH']
from xgboost import XGBClassifier
classifier = XGBClassifier()
classifier.fit(X_train1, y_train1) # Nunca acaba de entrenarse
# Predicting the Test set results
y_pred = classifier.predict(X_train1)
y_pred2 = classifier.predict(X_train2)
# Evaluating the model
from sklearn.metrics import r2_score
R_sq1 = r2_score(y_train1, y_rbf1)
R_sq2 = r2_score(y_train2, y_rbf2)
R_sq1 = r2_score(y_train1, y_pred1)
R_sq2 = r2_score(y_train2, y_pred2)
""" Diario de resultados:
La regresión lineal es el algoritmo que mejor funciona: R^2 para test de 0.3865
Ni SVR con kernel rbf ni regresiones polinómicas se ajustan siquiera a los ejemplos de entrenamiento
¿Qué va mal?
---
Decision Tree Regressor: R^2 de 0.719. No está mal, pero mucho margen de mejora
"""
# Visualising the Polynomial Regression results
plt.scatter(training_set.loc[:, 'LotFrontage'], y)
plt.scatter(training_set.loc[:, 'LotArea'], y)
# Writting the predictions
submission = pd.DataFrame(columns=['Id', 'SalePrice'])
submission.iloc[:, 0] = X_test.loc[:, 'Id']
regr_rf = RandomForestRegressor().fit(X_train, y) # Entrenar con todos los ejemplos da peores resultados!!!
submission.iloc[:, 1] = regr_rf.predict(X_test)
df_csv = submission.to_csv('Random Forest Regressor Submission.csv', index=False)
|
<gh_stars>1-10
import numpy as np
import matplotlib.pyplot as plt
raw=open('housing.dat','rb').read().split('\n')
raw=[x.split('\t') for x in raw]
raw=raw[1:-1]
dataset = raw[42:]
dataset = [[float(x[1]),float(x[3])] for x in dataset]
dataset = np.array(dataset)
states = [x[0] for x in raw]
states = sorted(list(set(states)))
plt.plot(dataset[:,0],dataset[:,1],'o')
plt.show()
for i in xrange(51): plt.plot(dataset[(i*42):(42*(i+1)-1),0],dataset[(i*42):(42*(i+1)-1),1])
i=4 #california
plt.plot(dataset[(i*42):(42*(i+1)-1),0],dataset[(i*42):(42*(i+1)-1),1])
plt.plot(dataset[(i*42):(42*(i+1)-1),0],dataset[(i*42):(42*(i+1)-1),1],'o')
plt.show()
#simple linear
X=np.ones((42,2))
X[:,1]=dataset[(i*42):(42*(i+1)),0]
Y=dataset[(i*42):(42*(i+1)),1]
betahat = np.linalg.inv(X.T.dot(X)).dot(X.T.dot(Y))
plt.plot(X[:,1],Y,'o')
xseq=np.arange(2000,2012,.1)
plt.plot(xseq,betahat[0]+betahat[1]*xseq)
plt.show()
#quadratic
X=np.ones((42,3))
X[:,1]=dataset[(i*42):(42*(i+1)),0]
X[:,2]=X[:,1]**2
Y=dataset[(i*42):(42*(i+1)),1]
betahat = np.linalg.inv(X.T.dot(X)).dot(X.T.dot(Y))
plt.plot(X[:,1],Y,'o')
xseq=np.arange(2000,2012,.1)
plt.plot(xseq,betahat[0]+betahat[1]*xseq+betahat[2]*xseq**2)
plt.show()
#cubic
X=np.ones((42,2))
X[:,1]=dataset[(i*42):(42*(i+1)),0] - 2000
X[:,2]=X[:,1]**2
X[:,3]=X[:,1]**3
Y=dataset[(i*42):(42*(i+1)),1]
betahat = np.linalg.inv(X.T.dot(X)).dot(X.T.dot(Y))
plt.plot(X[:,1],Y,'o')
xseq=np.arange(3,12,.1)
plt.plot(xseq,betahat[0]+betahat[1]*xseq+betahat[2]*xseq**2+betahat[3]*xseq**3)
plt.show()
#### plot 1
xx=np.random.normal(5,1.5,20)
yy=np.random.normal(xx,1)
plt.plot(xx,yy,'o')
plt.xlim([0,10])
plt.ylim([0,10])
xmat=np.ones((len(xx),2))
xmat[:,1]=xx
betahat = np.linalg.inv(xmat.T.dot(xmat)).dot(xmat.T.dot(yy))
plotx=np.arange(0,11)
plt.plot(plotx,betahat[0]+betahat[1]*plotx,'r-')
plt.show()
###plot 2
plt.plot(dataset[:,1]+2000,dataset[:,0],'o')
plt.ylim([0,600000])
plt.title('California Median House Price')
plt.show()
###plot 3 - logit
plotp = np.arange(.00001,1,.00001)
plotlogit = np.log(plotp/(1-plotp))
plt.plot(plotp,plotlogit)
plt.xlim([-.01,1.01])
plt.ylim([-10,10])
plt.xlabel('x')
plt.ylabel('logit(x)')
plt.show()
###plot 4 - logistic regression
xxx = np.arange(-7,8,1)
yyy = np.array([0,0,0,0,1,0,0,1,0,1, 1,1,1,1,1])
plotp = np.arange(.001,1,.001)
plotlogit = np.log(plotp/(1-plotp))
plt.plot(plotlogit,plotp)
plt.plot(xxx,yyy,'o')
plt.xlabel('X')
plt.ylabel('Probability of Event (Y=1)')
plt.show()
########################## makes defaulting data for logistic regression.
logitdat = np.ones((80,5))
logitdat[:,1]=np.random.poisson(8,80)*50000 + np.random.poisson(20,80)*1000 #loan size
#np.median(logitdat[:,1])
logitdat[:,2]=np.random.poisson(10,80)*10000 + np.random.poisson(10,80)*1000 #income
logitdat[:,3]=np.random.binomial(1,.3,80) #marital status
logitdat[:,4]=10*(np.random.poisson(30,80)+np.random.poisson(30,80)) #credit score
xb=(1/100000.)*logitdat[:,1]-3*logitdat[:,3]- (1/100.)*logitdat[:,4]
ppp = np.exp(xb)/(1+np.exp(xb))
logitdat[:,0] = np.random.binomial(1,ppp)
np.save('defaulting.npy',logitdat)
#########################
logitdat=np.load('defaulting.npy')
Y=logitdat[:,0]
X=logitdat[:,1:]
from sklearn import linear_model
logreg = linear_model.LogisticRegression()
logreg.fit(X,Y)
##########################################
#Challenger data
bob=open('challenger.txt').read().split('\r\n')
bob=[x.split('\t') for x in bob]
bob=bob[1:-1]
bob=[[float(y) for y in x] for x in bob]
bob=np.array(bob)
bob[17,1]=1
bob[0,1]=1
np.save('challenger.npy',bob)
###Logreg plot #1
dat = np.load('challenger.npy')
plt.plot(dat[:,0],dat[:,1],'o')
plt.xlim(30,100)
plt.xlabel('Ambient Temperature (F)')
plt.ylim(-0.5,1.5)
plt.ylabel('O-ring Damage Present')
plt.title('Potential for Shuttle Damage - With Cubic Approximation')
X=np.ones((dat.shape[0],4))
X[:,1]=dat[:,0]
Y=dat[:,1]
X[:,2]=X[:,1]**2
X[:,3]=X[:,1]**3
betahat = np.linalg.inv(X.T.dot(X)).dot(X.T.dot(Y))
xseq=np.arange(30,100,.5)
plt.plot(xseq,betahat[0]+betahat[1]*xseq+betahat[2]*xseq**2+betahat[3]*xseq**3)
plt.show()
###Logreg plot #2
plt.plot(dat[:,0],dat[:,1],'o')
plt.xlim(30,100)
plt.xlabel('Ambient Temperature (F)')
plt.ylim(-0.5,1.5)
plt.ylabel('O-ring Damage Present')
plt.title('Potential for Shuttle Damage - With Logistic Regression Prediction')
#X=np.ones((dat.shape[0],2))
#X[:,1]=dat[:,0]
X=dat[:,0].reshape([23,1])
Y=dat[:,1]#.reshape([23,1])
from sklearn import linear_model
logreg = linear_model.LogisticRegression(C=1000000,penalty="l2")
logreg.fit(X,Y)
coef=logreg.coef_[0]
xseq=np.arange(30,100,.5)
#xseqmat=np.ones((len(xseq),2))
#xseqmat[:,1]=xseq
xB=logreg.intercept_[0]+logreg.coef_[0][0]*xseq
#plt.plot(xseq,1/(np.exp(-xB)+1))
plt.plot(xseq,logreg.predict_proba(xseq2)[:,1])
plt.show()
xB = logreg.coef_*31 + logreg.intercept_
1/(np.exp(-xB)+1)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
Utilities for generating strings for use in homoglyph attacks.
Single-character
--------------------------
To get a list of homoglyphs for a character, invoke :func:list_alternates
on the character.
>>> list_alternates("a")[:4]
['U+120042', 'U+120250', 'U+120458', 'U+119886']
Multi-character / strings
--------------------------
To get a list of alternate strings, with each character replaced by an
alternate glyph, call :func:alternates_for_string on the string.
>>> alternates_for_string("abc")[1:4]
['𝖺bc', '𝚊bc', '𝑎bc']
"""
from sys import argv, exit as sysexit
from string import printable
from unicodedata import normalize
__all__ = ['alternate_glyphs', 'unicode_forms', 'list_alternates',
'alternates_for_string']
# unicode normalization forms
unicode_forms = ("NFC", "NFD", "NFKC", "NFKD")
# builtin list of glyphs that canonicalize to characters in string.printable
alternate_glyphs = {
' ': [ 'U+8195',
'U+8197',
'U+8202',
'U+8239',
'U+8200',
'U+8193',
'U+8201',
'U+8194',
'U+8192',
'U+8196',
'U+8198',
'U+12288',
'U+8287',
'U+8199',
'U+160'],
'!': ['U+65045', 'U+65281', 'U+65111'],
'"': ['U+65282'],
'#': ['U+65283', 'U+65119'],
'$': ['U+65129', 'U+65284'],
'%': ['U+65285', 'U+65130'],
'&': ['U+65120', 'U+65286'],
"'": ['U+65287'],
'(': ['U+65113', 'U+65077', 'U+8317', 'U+8333', 'U+65288'],
')': ['U+8318', 'U+8334', 'U+65078', 'U+65114', 'U+65289'],
'*': ['U+65121', 'U+65290'],
'+': ['U+8314', 'U+65291', 'U+65122', 'U+8330', 'U+64297'],
',': ['U+65104', 'U+65040', 'U+65292'],
'-': ['U+65123', 'U+65293'],
'.': ['U+8228', 'U+65294', 'U+65106'],
'/': ['U+65295'],
'0': [ 'U+120822',
'U+65296',
'U+8304',
'U+9450',
'U+120792',
'U+120782',
'U+120812',
'U+130032',
'U+120802',
'U+8320'],
'1': [ 'U+65297',
'U+120783',
'U+120813',
'U+130033',
'U+120793',
'U+9312',
'U+120803',
'U+120823',
'U+8321',
'U+185'],
'2': [ 'U+120814',
'U+65298',
'U+8322',
'U+120784',
'U+130034',
'U+120804',
'U+120794',
'U+120824',
'U+9313',
'U+178'],
'3': [ 'U+9314',
'U+120815',
'U+120805',
'U+120825',
'U+120785',
'U+179',
'U+8323',
'U+130035',
'U+120795',
'U+65299'],
'4': [ 'U+9315',
'U+130036',
'U+120786',
'U+120796',
'U+120806',
'U+120826',
'U+8324',
'U+120816',
'U+65300',
'U+8308'],
'5': [ 'U+9316',
'U+120797',
'U+120817',
'U+8309',
'U+120807',
'U+8325',
'U+120787',
'U+120827',
'U+65301',
'U+130037'],
'6': [ 'U+8310',
'U+120828',
'U+120808',
'U+120788',
'U+130038',
'U+9317',
'U+65302',
'U+120798',
'U+8326',
'U+120818'],
'7': [ 'U+120829',
'U+65303',
'U+120819',
'U+9318',
'U+120809',
'U+120789',
'U+8311',
'U+130039',
'U+8327',
'U+120799'],
'8': [ 'U+120800',
'U+65304',
'U+120820',
'U+120810',
'U+120830',
'U+130040',
'U+120790',
'U+9319',
'U+8312',
'U+8328'],
'9': [ 'U+120791',
'U+8329',
'U+9320',
'U+130041',
'U+120801',
'U+120831',
'U+120811',
'U+65305',
'U+8313',
'U+120821'],
':': ['U+65306', 'U+65043', 'U+65109'],
';': ['U+894', 'U+65044', 'U+65108', 'U+65307'],
'<': ['U+65124', 'U+65308'],
'=': ['U+65309', 'U+65126', 'U+8332', 'U+8316'],
'>': ['U+65125', 'U+65310'],
'?': ['U+65311', 'U+65046', 'U+65110'],
'@': ['U+65131', 'U+65312'],
'A': [ 'U+7468',
'U+120432',
'U+120120',
'U+120172',
'U+120276',
'U+119860',
'U+119912',
'U+127280',
'U+120224',
'U+65313',
'U+120328',
'U+120068',
'U+120380',
'U+120016',
'U+119964',
'U+9398',
'U+119808'],
'B': [ 'U+8492',
'U+120121',
'U+120381',
'U+120433',
'U+119809',
'U+119861',
'U+119913',
'U+9399',
'U+65314',
'U+120173',
'U+7470',
'U+120069',
'U+120329',
'U+120017',
'U+120225',
'U+120277',
'U+127281'],
'C': [ 'U+120018',
'U+127282',
'U+119914',
'U+8450',
'U+120226',
'U+8493',
'U+120330',
'U+119810',
'U+120382',
'U+9400',
'U+120434',
'U+127275',
'U+120278',
'U+120174',
'U+8557',
'U+119966',
'U+119862',
'U+65315'],
'D': [ 'U+119863',
'U+120123',
'U+7472',
'U+119811',
'U+120071',
'U+120383',
'U+127283',
'U+65316',
'U+8517',
'U+8558',
'U+120279',
'U+119967',
'U+120435',
'U+120227',
'U+120175',
'U+9401',
'U+119915',
'U+120331',
'U+120019'],
'E': [ 'U+120228',
'U+65317',
'U+120384',
'U+120436',
'U+120124',
'U+119916',
'U+120072',
'U+120280',
'U+127284',
'U+119812',
'U+9402',
'U+120176',
'U+120020',
'U+7473',
'U+120332',
'U+119864',
'U+8496'],
'F': [ 'U+9403',
'U+120125',
'U+120281',
'U+120177',
'U+120073',
'U+120333',
'U+120437',
'U+8497',
'U+120021',
'U+119917',
'U+120385',
'U+65318',
'U+120229',
'U+119813',
'U+119865',
'U+127285'],
'G': [ 'U+120178',
'U+119814',
'U+119970',
'U+9404',
'U+7475',
'U+120074',
'U+120282',
'U+119866',
'U+120022',
'U+120334',
'U+120438',
'U+120230',
'U+65319',
'U+119918',
'U+120386',
'U+120126',
'U+127286'],
'H': [ 'U+8461',
'U+119919',
'U+8460',
'U+120023',
'U+127287',
'U+119867',
'U+7476',
'U+119815',
'U+120439',
'U+9405',
'U+120283',
'U+8459',
'U+65320',
'U+120179',
'U+120231',
'U+120335',
'U+120387'],
'I': [ 'U+8544',
'U+9406',
'U+119816',
'U+7477',
'U+120128',
'U+120440',
'U+120232',
'U+120024',
'U+119868',
'U+65321',
'U+119920',
'U+120284',
'U+8465',
'U+120388',
'U+120180',
'U+120336',
'U+127288',
'U+8464'],
'J': [ 'U+120285',
'U+120389',
'U+120337',
'U+65322',
'U+119973',
'U+127289',
'U+120077',
'U+120233',
'U+119817',
'U+119869',
'U+120441',
'U+7478',
'U+119921',
'U+9407',
'U+120181',
'U+120025',
'U+120129'],
'K': [ 'U+120442',
'U+8490',
'U+120078',
'U+120234',
'U+120390',
'U+119974',
'U+7479',
'U+119818',
'U+65323',
'U+120182',
'U+120026',
'U+120130',
'U+120338',
'U+120286',
'U+119870',
'U+127290',
'U+119922',
'U+9408'],
'L': [ 'U+120391',
'U+120131',
'U+119923',
'U+120183',
'U+120287',
'U+120443',
'U+65324',
'U+8466',
'U+120027',
'U+120079',
'U+7480',
'U+127291',
'U+120235',
'U+119871',
'U+9409',
'U+119819',
'U+8556',
'U+120339'],
'M': [ 'U+120028',
'U+119872',
'U+120340',
'U+127292',
'U+120236',
'U+65325',
'U+8499',
'U+120184',
'U+7481',
'U+119820',
'U+119924',
'U+120132',
'U+8559',
'U+120392',
'U+120080',
'U+120288',
'U+120444',
'U+9410'],
'N': [ 'U+8469',
'U+120185',
'U+120029',
'U+120289',
'U+119925',
'U+119873',
'U+120393',
'U+9411',
'U+7482',
'U+119977',
'U+120341',
'U+65326',
'U+127293',
'U+120445',
'U+120237',
'U+120081',
'U+119821'],
'O': [ 'U+120446',
'U+119874',
'U+9412',
'U+120030',
'U+120342',
'U+119926',
'U+120394',
'U+119822',
'U+7484',
'U+127294',
'U+120238',
'U+120082',
'U+120290',
'U+119978',
'U+65327',
'U+120134',
'U+120186'],
'P': [ 'U+119979',
'U+120083',
'U+8473',
'U+65328',
'U+9413',
'U+119927',
'U+120395',
'U+7486',
'U+120187',
'U+120447',
'U+120291',
'U+119875',
'U+120343',
'U+127295',
'U+119823',
'U+120239',
'U+120031'],
'Q': [ 'U+120084',
'U+120188',
'U+120344',
'U+120292',
'U+120396',
'U+119980',
'U+120448',
'U+127296',
'U+9414',
'U+120032',
'U+120240',
'U+65329',
'U+119928',
'U+8474',
'U+119876',
'U+119824'],
'R': [ 'U+120449',
'U+119929',
'U+120397',
'U+127276',
'U+7487',
'U+8477',
'U+120345',
'U+120189',
'U+65330',
'U+119825',
'U+9415',
'U+119877',
'U+127297',
'U+120033',
'U+8476',
'U+8475',
'U+120241',
'U+120293'],
'S': [ 'U+119982',
'U+9416',
'U+120138',
'U+65331',
'U+120450',
'U+119878',
'U+120034',
'U+120086',
'U+120294',
'U+120398',
'U+120346',
'U+120242',
'U+119930',
'U+127298',
'U+119826',
'U+120190'],
'T': [ 'U+120087',
'U+9417',
'U+119827',
'U+120295',
'U+119983',
'U+120035',
'U+120139',
'U+127299',
'U+120347',
'U+120191',
'U+7488',
'U+120399',
'U+120243',
'U+120451',
'U+65332',
'U+119879',
'U+119931'],
'U': [ 'U+65333',
'U+120348',
'U+120400',
'U+7489',
'U+119984',
'U+120192',
'U+120296',
'U+119828',
'U+127300',
'U+119932',
'U+120244',
'U+120140',
'U+120452',
'U+9418',
'U+119880',
'U+120036',
'U+120088'],
'V': [ 'U+119933',
'U+120037',
'U+120141',
'U+120245',
'U+127301',
'U+120453',
'U+11389',
'U+120349',
'U+120401',
'U+120193',
'U+8548',
'U+119985',
'U+119829',
'U+65334',
'U+120297',
'U+120089',
'U+119881',
'U+9419'],
'W': [ 'U+127302',
'U+120194',
'U+120350',
'U+119986',
'U+65335',
'U+119934',
'U+120038',
'U+120090',
'U+120454',
'U+120298',
'U+120142',
'U+120246',
'U+120402',
'U+9420',
'U+119830',
'U+119882',
'U+7490'],
'X': [ 'U+120299',
'U+120247',
'U+119831',
'U+120195',
'U+120351',
'U+8553',
'U+65336',
'U+119987',
'U+120039',
'U+9421',
'U+120403',
'U+120091',
'U+120143',
'U+120455',
'U+119883',
'U+119935',
'U+127303'],
'Y': [ 'U+119936',
'U+120040',
'U+119832',
'U+120144',
'U+120456',
'U+65337',
'U+120404',
'U+120300',
'U+120352',
'U+120248',
'U+127304',
'U+9422',
'U+120196',
'U+120092',
'U+119988',
'U+119884'],
'Z': [ 'U+119885',
'U+120405',
'U+120197',
'U+119989',
'U+120457',
'U+119937',
'U+119833',
'U+8488',
'U+9423',
'U+120353',
'U+8484',
'U+120041',
'U+127305',
'U+120301',
'U+120249',
'U+65338'],
'[': ['U+65339', 'U+65095'],
'\\': ['U+65128', 'U+65340'],
']': ['U+65096', 'U+65341'],
'^': ['U+65342'],
'_': ['U+65076', 'U+65103', 'U+65343', 'U+65102', 'U+65075', 'U+65101'],
'`': ['U+8175', 'U+65344'],
'a': [ 'U+120042',
'U+120250',
'U+120458',
'U+119886',
'U+120146',
'U+119938',
'U+120094',
'U+119834',
'U+119990',
'U+120406',
'U+120198',
'U+9424',
'U+170',
'U+7491',
'U+65345',
'U+120302',
'U+8336',
'U+120354'],
'b': [ 'U+120199',
'U+120459',
'U+65346',
'U+120303',
'U+119887',
'U+119939',
'U+120147',
'U+119835',
'U+120251',
'U+120407',
'U+120043',
'U+7495',
'U+120095',
'U+119991',
'U+9425',
'U+120355'],
'c': [ 'U+8573',
'U+119888',
'U+120148',
'U+120304',
'U+120200',
'U+119992',
'U+7580',
'U+65347',
'U+120044',
'U+120356',
'U+120460',
'U+119940',
'U+120096',
'U+119836',
'U+9426',
'U+120252',
'U+120408'],
'd': [ 'U+65348',
'U+120201',
'U+120461',
'U+8518',
'U+119889',
'U+120149',
'U+119837',
'U+119941',
'U+8574',
'U+120097',
'U+120045',
'U+7496',
'U+119993',
'U+120357',
'U+120305',
'U+120253',
'U+120409',
'U+9427'],
'e': [ 'U+120254',
'U+8495',
'U+65349',
'U+119942',
'U+120098',
'U+8519',
'U+120046',
'U+119890',
'U+8337',
'U+120150',
'U+120306',
'U+120202',
'U+120358',
'U+9428',
'U+7497',
'U+120410',
'U+120462',
'U+119838'],
'f': [ 'U+120047',
'U+7584',
'U+120151',
'U+120255',
'U+120359',
'U+119839',
'U+119891',
'U+119995',
'U+120307',
'U+120411',
'U+9429',
'U+119943',
'U+65350',
'U+120099',
'U+120203',
'U+120463'],
'g': [ 'U+119840',
'U+120308',
'U+119892',
'U+65351',
'U+120360',
'U+120048',
'U+119944',
'U+120256',
'U+120100',
'U+120412',
'U+9430',
'U+120464',
'U+120204',
'U+7501',
'U+120152',
'U+8458'],
'h': [ 'U+9431',
'U+65352',
'U+119945',
'U+688',
'U+120205',
'U+120257',
'U+120413',
'U+119841',
'U+120361',
'U+120101',
'U+119997',
'U+120309',
'U+8341',
'U+120153',
'U+120049',
'U+8462',
'U+120465'],
'i': [ 'U+120414',
'U+8520',
'U+120154',
'U+119998',
'U+8560',
'U+120050',
'U+119946',
'U+120310',
'U+9432',
'U+65353',
'U+8505',
'U+119894',
'U+7522',
'U+120362',
'U+120466',
'U+120206',
'U+8305',
'U+119842',
'U+120102',
'U+120258'],
'j': [ 'U+119947',
'U+119999',
'U+120311',
'U+9433',
'U+120259',
'U+690',
'U+120415',
'U+120207',
'U+11388',
'U+119843',
'U+120051',
'U+65354',
'U+120103',
'U+120155',
'U+120467',
'U+119895',
'U+120363',
'U+8521'],
'k': [ 'U+120000',
'U+120156',
'U+120260',
'U+120468',
'U+119844',
'U+120364',
'U+9434',
'U+120052',
'U+120312',
'U+120416',
'U+119948',
'U+120104',
'U+120208',
'U+65355',
'U+8342',
'U+119896',
'U+7503'],
'l': [ 'U+737',
'U+65356',
'U+8467',
'U+120469',
'U+120157',
'U+120365',
'U+119845',
'U+120105',
'U+120053',
'U+8572',
'U+119949',
'U+119897',
'U+120001',
'U+120261',
'U+8343',
'U+120417',
'U+9435',
'U+120209',
'U+120313'],
'm': [ 'U+120158',
'U+120210',
'U+120366',
'U+120262',
'U+120314',
'U+8344',
'U+120418',
'U+120470',
'U+119846',
'U+119898',
'U+65357',
'U+9436',
'U+120002',
'U+120054',
'U+8575',
'U+7504',
'U+119950',
'U+120106'],
'n': [ 'U+120003',
'U+120315',
'U+120471',
'U+9437',
'U+120055',
'U+119899',
'U+65358',
'U+120263',
'U+119847',
'U+119951',
'U+8319',
'U+120159',
'U+120419',
'U+120107',
'U+8345',
'U+120211',
'U+120367'],
'o': [ 'U+8338',
'U+8500',
'U+7506',
'U+120420',
'U+9438',
'U+119900',
'U+120264',
'U+186',
'U+120056',
'U+120160',
'U+120212',
'U+120316',
'U+120472',
'U+120108',
'U+119848',
'U+119952',
'U+120368',
'U+65359'],
'p': [ 'U+119901',
'U+119953',
'U+119849',
'U+7510',
'U+120005',
'U+120057',
'U+120109',
'U+120213',
'U+120317',
'U+65360',
'U+9439',
'U+120161',
'U+120265',
'U+120369',
'U+120473',
'U+120421',
'U+8346'],
'q': [ 'U+119850',
'U+120370',
'U+120058',
'U+120474',
'U+119902',
'U+120214',
'U+120162',
'U+120110',
'U+120006',
'U+65361',
'U+120318',
'U+120422',
'U+120266',
'U+119954',
'U+9440'],
'r': [ 'U+120059',
'U+120007',
'U+120215',
'U+691',
'U+119955',
'U+120163',
'U+120423',
'U+65362',
'U+120111',
'U+120475',
'U+9441',
'U+119903',
'U+119851',
'U+7523',
'U+120371',
'U+120267',
'U+120319'],
's': [ 'U+119956',
'U+65363',
'U+120424',
'U+119852',
'U+738',
'U+120164',
'U+120372',
'U+120476',
'U+9442',
'U+120216',
'U+120320',
'U+119904',
'U+8347',
'U+120112',
'U+120268',
'U+120060',
'U+120008',
'U+383'],
't': [ 'U+8348',
'U+120009',
'U+120217',
'U+120477',
'U+119905',
'U+120113',
'U+120165',
'U+9443',
'U+7511',
'U+120061',
'U+120425',
'U+120321',
'U+120269',
'U+119957',
'U+119853',
'U+120373',
'U+65364'],
'u': [ 'U+120114',
'U+120062',
'U+120270',
'U+120166',
'U+7512',
'U+120478',
'U+119958',
'U+65365',
'U+120010',
'U+120322',
'U+119906',
'U+120218',
'U+7524',
'U+120374',
'U+9444',
'U+120426',
'U+119854'],
'v': [ 'U+120271',
'U+65366',
'U+120323',
'U+120375',
'U+120479',
'U+120167',
'U+9445',
'U+119907',
'U+120427',
'U+7515',
'U+120011',
'U+120115',
'U+119855',
'U+120219',
'U+119959',
'U+120063',
'U+8564',
'U+7525'],
'w': [ 'U+119856',
'U+120220',
'U+120116',
'U+120480',
'U+9446',
'U+120064',
'U+65367',
'U+119908',
'U+120376',
'U+120428',
'U+695',
'U+120272',
'U+120168',
'U+119960',
'U+120012',
'U+120324'],
'x': [ 'U+8339',
'U+119857',
'U+9447',
'U+120273',
'U+120169',
'U+8569',
'U+120117',
'U+120481',
'U+119909',
'U+65368',
'U+119961',
'U+120325',
'U+120377',
'U+120065',
'U+120429',
'U+120221',
'U+120013',
'U+739'],
'y': [ 'U+9448',
'U+120014',
'U+120118',
'U+120222',
'U+120430',
'U+120482',
'U+119962',
'U+120066',
'U+120326',
'U+119910',
'U+696',
'U+120378',
'U+120170',
'U+119858',
'U+65369',
'U+120274'],
'z': [ 'U+120327',
'U+120483',
'U+119911',
'U+119859',
'U+120171',
'U+120223',
'U+120275',
'U+120119',
'U+65370',
'U+9449',
'U+120431',
'U+7611',
'U+120067',
'U+120379',
'U+119963',
'U+120015'],
'{': ['U+65115', 'U+65371', 'U+65079'],
'|': ['U+65372'],
'}': ['U+65373', 'U+65080', 'U+65116'],
'~': ['U+65374']
}
def _compute_alternate_glyphs(form: str = "NFKD") -> dict:
"""Compute all alternate glyphs and return a dictionary, with keys for
all characters in string.printable
You probably don't need to use this! Just access glyph.alternate_glyphs
directly instead.
>>> data = _compute_alternate_glyphs()
>>> data["a"][:4]
['U+120042', 'U+120250', 'U+120458', 'U+119886']"""
if form not in unicode_forms:
raise Exception("Invalid normalization form")
data = {}
for character in printable:
data[character] = list_alternates(character, form=form)
return data
# from pprint import pprint
# pprint(data, indent=4)
def list_alternates(character: str, form: str = "NFKD") -> list:
"""Return a list of unicode points that canonicalize to the supplied
character in NFKD form.
>>> list_alternates("a")[:4]
['U+120042', 'U+120250', 'U+120458', 'U+119886']
"""
# use the builtin (NFKD)
if character in alternate_glyphs and form == "NFKD":
return alternate_glyphs[character]
if form not in unicode_forms:
raise Exception("Invalid normalization form")
glyphs = []
for potential in range(0x110000): # all valid code points
alternate_glyph = normalize(form, chr(potential))
if alternate_glyph == character:
if chr(potential) != character: # skip the input character
glyphs.append(f"U+{potential}")
return glyphs
def alternates_for_string(input_string: str) -> list:
"""Returns a list of strings, where each string has a character changed to
an alternate glyph that will canonicalize to the same character.
>>> alternates_for_string("abc")[1:4]
['𝖺bc', '𝚊bc', '𝑎bc']
"""
# get a list of alternate glyphs for each character in our input string
alt_glyphs = []
for character in input_string:
alt_glyphs.append(list_alternates(character))
# build a list of strings with characters replaced with alts
alt_string_list = []
for idx in range(len(input_string)):
for glyph in alt_glyphs[idx]:
converted = chr(int(glyph[2:])) # "U+1234" -> chr(1234)
new_str = f"{input_string[:idx]}{converted}{input_string[idx+1:]}"
alt_string_list.append(new_str)
return alt_string_list
if __name__ == "__main__":
if len(argv) == 2:
for alt in alternates_for_string(argv[1]):
print(alt)
else:
print(f"Usage: {argv[0]} <string>")
sysexit(1)
|
<gh_stars>0
import statistics
from boundary.BinaryBoundary import BinaryBoundary
from boundary.BinaryBoundaryWithFeatures import BinaryBoundaryWithFeatures
from boundary.HistogramBoundary import HistogramBoundary
from boundary.KDEBoundary import KDEBoundary
from database.session import Session
def boundary_rating():
"""
Calculates the different scores separated for each survey rating and prints the mean / standard deviation per rating.
Also attempts to see if a score can be a predictor for a survey rating (spoiler: doesn't seem like it), but
this was not used further.
:return:
"""
scores = {
"binary": [],
"features": [],
"kde": [],
"histogram": [],
}
rating_comparison = {
"binary": {1: [], 2: [], 3: [], 4: [], 5: []},
"features": {1: [], 2: [], 3: [], 4: [], 5: []},
"kde": {1: [], 2: [], 3: [], 4: [], 5: []},
"histogram": {1: [], 2: [], 3: [], 4: [], 5: []},
}
rating_key = "like_rating_specific"
reverse_object = {
"binary": [],
"features": [],
"kde": [],
"histogram": []
}
rating_histogram = {
0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0
}
for user, session in Session.get_users_with_surveys():
binary_boundary = BinaryBoundary(user)
features_boundary = BinaryBoundaryWithFeatures(user)
kde_boundary = KDEBoundary(user)
histogram_boundary = HistogramBoundary(user)
survey = user.get_survey()
for playlist_index, playlist in enumerate(session.recommendations):
ratings = survey[f"playlist{playlist_index + 1}"][rating_key]
for track_index, track in enumerate(playlist["tracks"]):
score_binary, _ = binary_boundary.get_boundary_score(track)
score_features, _ = features_boundary.get_boundary_score(track)
score_kde, _ = kde_boundary.get_boundary_score(track)
score_histogram, _ = histogram_boundary.get_boundary_score(track)
scores["binary"].append(score_binary)
scores["features"].append(score_features)
scores["kde"].append(score_kde)
scores["histogram"].append(score_histogram)
rating = int(ratings[f'Song{track_index + 1}'])
rating_histogram[rating] += 1
rating_histogram[6] += 1
rating_comparison["binary"][rating].append(score_binary)
rating_comparison["features"][rating].append(score_features)
rating_comparison["kde"][rating].append(score_kde)
rating_comparison["histogram"][rating].append(score_histogram)
reverse_object["binary"].append({
"boundary": score_binary,
"rating": rating
})
reverse_object["features"].append({
"boundary": score_features,
"rating": rating
})
reverse_object["kde"].append({
"boundary": score_kde,
"rating": rating
})
reverse_object["histogram"].append({
"boundary": score_histogram,
"rating": rating
})
for method, bins in rating_comparison.items():
method_string = f"{method:9s} -> "
for rating_bin, scores in bins.items():
method_string += f"{rating_bin}: {statistics.mean(scores):.3f}, {statistics.stdev(scores):.3f}, "
method_string = method_string[:-2]
print(method_string)
print(rating_histogram)
reverse_object["features"].sort(key=lambda x: x["boundary"])
reverse_object["histogram"].sort(key=lambda x: x["boundary"])
reverse_object["kde"].sort(key=lambda x: x["boundary"])
steps = []
previous_value = 0
for i in range(1, 6):
steps.append((
previous_value, rating_histogram[i] + previous_value
))
previous_value += rating_histogram[i]
for i in range(5):
lower_bound = steps[i][0]
upper_bound = steps[i][1]
slice_features = reverse_object["features"][lower_bound:upper_bound]
slice_histogram = reverse_object["histogram"][lower_bound:upper_bound]
slice_kernel = reverse_object["kde"][lower_bound:upper_bound]
print([x['rating'] for x in slice_features])
ratings_features = [t["rating"] for t in slice_features]
ratings_histogram = [t["rating"] for t in slice_histogram]
ratings_kernel = [t["rating"] for t in slice_kernel]
amount_correct_features = [x for x in ratings_features if x == (i+1)]
amount_correct_histogram = [x for x in ratings_histogram if x == (i+1)]
amount_correct_kernel = [x for x in ratings_kernel if x == (i+1)]
print(f"{lower_bound} - {upper_bound} -> "
f"({statistics.mean(ratings_features):.2f}, {statistics.mean(ratings_histogram):.2f}, {statistics.mean(ratings_kernel):.2f}) "
f"{sum(amount_correct_features) / (upper_bound - lower_bound):.2f} "
f"{sum(amount_correct_histogram) / (upper_bound - lower_bound):.2f} "
f"{sum(amount_correct_kernel) / (upper_bound - lower_bound):.2f}")
|
from models.User import User
from global_data import r_envoy
import json
class AnalyticsController:
def analyze_hand_result(data):
hand_result = data["hand_result"]
session_id = data["session_id"]
email = data["email"]
positive_feedback_message = ""
hand_raised = False
if hand_result == 1 or hand_result == 5:
with r_envoy.lock('my_lock'):
stat = json.loads(r_envoy.get("statistics"))
stat[session_id][email]["hand_results"].append(True)
hand_raised = True
r_envoy.set("statistics", json.dumps(stat))
if len(stat[session_id][email]["hand_results"]) % 3 == 0:
positive_feedback_message = "You are actively participating to the lecture, keep going!"
return {"feedback_message": positive_feedback_message, "hand_raised": hand_raised}
def analyze_head_result(data):
head_pose_result = data["head_pose_result"]
session_id = data["session_id"]
email = data["email"]
timestamp = data["timestamp"]
pose_data = {
"distracted": False,
"timestamp": timestamp
}
feedback_message = ""
if head_pose_result["horizontal"] != "straight":
pose_data["distracted"] = True
with r_envoy.lock('my_lock'):
stat = json.loads(r_envoy.get("statistics"))
user = stat[session_id][email]
user["head_poses"].append(pose_data)
if len(user["head_poses"]) == user["head_threshold"]: #If head poses reached threshold
distracted_count = 0
for pose in user["head_poses"]:
if pose["distracted"]:
distracted_count += 1
if (user["head_threshold"] / 2) <= distracted_count:
time = user["head_poses"][-1]["timestamp"]
user["head_distracted"].append(time)
#EMIT HEAD POSE DISTRACTED MESSAGE TO FRONT END
feedback_message = "You seem to be distracted, is everything okay?"
user["head_poses"] = []
r_envoy.set("statistics", json.dumps(stat))
return {"feedback_message": feedback_message, "distraction_type": "head_pose"}
def analyze_object(data):
object_result = data["object_result"]
session_id = data["session_id"]
email = data["email"]
timestamp = data["timestamp"]
phone_data = {
"phone": False,
"timestamp": timestamp
}
person_data = {
"away": False,
"timestamp": timestamp
}
if object_result["phone"]:
phone_data["phone"] = True
if object_result["person"] == 0:
person_data["away"] = True
feedback_message = ""
with r_envoy.lock('my_lock'):
stat = json.loads(r_envoy.get("statistics"))
user = stat[session_id][email]
user["phone_result"].append(phone_data)
user["person_result"].append(person_data)
if len(user["phone_result"]) == user["phone_threshold"]: #If phone reached threshold
distracted_count = 0
for result in user["phone_result"]:
if result["phone"]:
distracted_count += 1
if (float(user["phone_threshold"]) / 2) <= distracted_count:
time = user["phone_result"][-1]["timestamp"]
user["phone_distracted"].append(time)
feedback_message = "Looking at your phone often can distract you."
user["phone_result"] = []
if len(user["person_result"]) == user["person_threshold"]: #If phone reached threshold
away_count = 0
for result in user["person_result"]:
if result["away"]:
away_count += 1
if away_count == user["person_threshold"]:
time = user["person_result"][-1]["timestamp"]
user["person_away"].append(time)
feedback_message = "Are you there?"
user["person_result"] = []
r_envoy.set("statistics", json.dumps(stat))
return {"feedback_message": feedback_message, "distraction_type": "object"} |
from typing import Dict, List, Tuple
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import cv2
from src.aexpansion import make_expansion, show_segmentation, construct_segmentation
from src.base_segmentation import kmeans
from src.graph import add_data_edges, build_base_graph, compute_energy
from src.utils import read_image, resize_image
from src.base_segmentation import maincolors
def minimum_cut_networkx(graph: nx.Graph, source: str, target: str):
_, partition = nx.algorithms.minimum_cut(graph, source, target)
return partition
def run_expansion(
base_graph: nx.Graph,
distributions: List[Dict[str, np.ndarray]],
alpha: int,
image: np.ndarray,
original_parition: Dict[int, set],
params: dict,
verbose: bool = True,
) -> Tuple[Dict[int, set], nx.Graph]:
"""Runs a full alpha-expansion iteration
Args:
base_graph (nx.Graph): Base graph
distributions (List[Dict[str, np.ndarray]]): Main colors distributions
alpha (int): Alpha iteration label
image (np.ndarray): Image
original_parition (Dict[int, set]): Starting partition
params (dict): Params
Returns:
Tuple[Dict[int, set], nx.Graph]: New partition and new graph
"""
if verbose:
print("Build step graph...")
new_graph, auxiliary_nodes = add_data_edges(
base_graph, distributions, alpha, image, original_parition, params, verbose
)
if verbose:
print("Solving minimum cut algorithm...")
binary_partition = minimum_cut_networkx(new_graph, "-1", "-2")
final_partition = make_expansion(
original_parition, binary_partition, alpha, auxiliary_nodes
)
return final_partition, new_graph
def segment_image(
image: np.ndarray, params: dict, verbose: float = True, resize: float = True
) -> dict:
"""Run full segmentation on an image
Args:
image (np.ndarray): Input image
params (dict): Params
Returns:
dict: energies contains all energies computed during the segmentation
partition contains the final partition
"""
resized_image = resize_image(image, resize)
if params["method"] == "maincolors":
distributions, original_partition = maincolors.segmentation(
resized_image, params, verbose
)
elif params["method"] == "kmeans":
distributions, original_partition = kmeans.segmentation(
resized_image, params, verbose
)
if verbose:
print("Building base graph...")
base_graph = build_base_graph(resized_image, verbose)
if verbose:
print("Compute original energy...")
energy = compute_energy(
distributions, resized_image, original_partition, params, verbose
)
if verbose:
print(
f"Original data cost: {energy['data_cost']}, Original smooth code: {energy['smooth_cost']}"
)
partitions = [original_partition]
energies = [energy]
alpha = 0
for alpha in range(len(distributions)):
if verbose:
print("Alpha:", alpha)
final_partition, new_graph = run_expansion(
base_graph,
distributions,
alpha,
resized_image,
partitions[-1],
params,
verbose,
)
if verbose:
print("Computing new energy...")
energy = compute_energy(
distributions, resized_image, final_partition, params, verbose
)
if verbose:
print(
f"Data cost: {energy['data_cost']}, Smooth code: {energy['smooth_cost']}"
)
if not (energy["data_cost"] + energy["smooth_cost"]) >= (
energies[-1]["data_cost"] + energies[-1]["smooth_cost"]
):
energies.append(energy)
partitions.append(final_partition)
segmented_image = construct_segmentation(partitions[-1], resized_image.shape[:2])
original_segmented_image = construct_segmentation(
partitions[0], resized_image.shape[:2]
)
if resize:
segmented_image = cv2.resize(segmented_image, image.shape[:2][::-1]).astype(int)
original_segmented_image = cv2.resize(
original_segmented_image, image.shape[:2][::-1]
).astype(int)
return dict(
energies=energies,
segmented_image=segmented_image,
original_segmented_image=original_segmented_image,
)
def segment_image_star(args):
return segment_image(*args)
if __name__ == "__main__":
dummy_image = read_image("250")
params = {
"method": "kmeans",
"bins": 5,
"n_clusters": 3,
"lambda": 1,
"epsilon": 20,
}
output = segment_image(dummy_image, params)
show_segmentation(dummy_image, output["original_partition"])
show_segmentation(dummy_image, output["final_partition"])
plt.show()
|
<gh_stars>10-100
import jittor as jt
from jittor import nn
from jittor import Module
from jittor import init
from jittor.contrib import concat
class NormLayer(Module):
def __init__(self, num_features):
G = 1
if num_features >= 512:
G = 32
elif num_features >= 256:
G = 16
elif num_features >= 128:
G = 8
elif num_features >= 64:
G = 4
self.norm = nn.GroupNorm(num_groups=G, num_channels=num_features)
def execute(self, x):
return self.norm(x)
# Large Receptive Field resnet backbone
class PatchMergeBlock(Module):
def __init__(self, in_dim, out_dim, patch_size):
super().__init__()
expansion = patch_size * patch_size
self.block=nn.Sequential(
nn.Conv(in_dim, in_dim * expansion, kernel_size=patch_size, stride=patch_size, bias=False),
NormLayer(in_dim * expansion),
nn.ReLU(),
nn.Conv(in_dim * expansion, out_dim, 1, bias=False),
NormLayer(out_dim),
nn.ReLU())
def execute(self, x):
return self.block(x)
class PatchSplitBlock(Module):
def __init__(self, in_dim, out_dim, patch_size, expansion=None):
super().__init__()
if expansion == None:
expansion = patch_size * patch_size
self.block=nn.Sequential(
nn.Conv(in_dim, in_dim * expansion, 1, bias=False),
NormLayer(in_dim * expansion),
nn.ReLU(),
nn.ConvTranspose(in_dim * expansion, out_dim, kernel_size=patch_size, stride=patch_size, bias=False),
NormLayer(out_dim),
nn.ReLU())
def execute(self, x):
return self.block(x)
class AggregationBlock(Module):
def __init__(self, dim, dilation=1, expansion=2):
super().__init__()
self.nonlinearity = nn.ReLU()
self.block = self._make_block(dim, dilation, expansion)
def _make_block(self, c, d, e):
block = []
block.append(nn.Conv(c, c * e, 3, padding=d, dilation=d, bias=False))
block.append(NormLayer(c * e))
block.append(self.nonlinearity) # nonlinear neighbor aggregate
block.append(nn.Conv(c * e, c, 1, bias=False))
block.append(NormLayer(c))
return nn.Sequential(*block)
def execute(self, x):
identity = x
out = self.block(x)
out += identity
out = self.nonlinearity(out)
return out
class LRFResNet(Module):
def __init__(self, layers, output_stride):
super().__init__()
if output_stride != 16:
raise NotImplementedError
modules = []
# patch merge
modules.append(PatchMergeBlock(3, 64, patch_size=2))
modules.append(AggregationBlock(64))
modules.append(PatchMergeBlock(64, 64, patch_size=2))
# layer1
modules.append(self._make_layer(64, dilations=layers[0]))
# patch merge
modules.append(PatchMergeBlock(64, 128, patch_size=2))
# layer2
modules.append(self._make_layer(128, dilations=layers[1]))
# patch merge
modules.append(PatchMergeBlock(128, 256, patch_size=2))
# layer3
modules.append(self._make_layer(256, dilations=layers[2]))
modules.append(nn.Conv(256, 512, 1, bias=False))
modules.append(NormLayer(512))
modules.append(nn.ReLU())
# layer4
modules.append(self._make_layer(512, dilations=layers[3]))
self.net = nn.Sequential(*modules)
def _make_layer(self, feature_dim, dilations):
layers = []
for d in dilations:
layers.append(AggregationBlock(feature_dim, dilation=d))
return nn.Sequential(*layers)
def execute(self, x):
return self.net(x)
def LRFresnetS(output_stride):
model = LRFResNet([[1] * 5, [1] * 5, [1, 1, 2] * 6, [1, 2, 4, 8] * 1], output_stride)
return model
# EANet
class External_attention(Module):
'''
Arguments:
c (int): The input and output channel number.
'''
def __init__(self, c):
super().__init__()
self.conv1 = nn.Conv2d(c, c, 1)
self.k = 64
self.linear_0 = nn.Conv1d(c, self.k, 1, bias=False)
self.linear_1 = nn.Conv1d(self.k, c, 1, bias=False)
self.conv2 = nn.Sequential(nn.Conv2d(c, c, 1, bias=False), NormLayer(c))
self.relu = nn.ReLU()
def execute(self, x):
idn = x
x = self.conv1(x)
b, c, h, w = x.size()
n = h*w
x = x.view(b, c, h*w) # b * c * n
attn = self.linear_0(x) # b, k, n
attn = nn.softmax(attn, dim=-1) # b, k, n
attn = attn / (1e-9 + attn.sum(dim=1, keepdims=True)) # b, k, n
x = self.linear_1(attn) # b, c, n
x = x.view(b, c, h, w)
x = self.conv2(x)
x = x + idn
x = self.relu(x)
return x
class LRFEANet(Module):
def __init__(self, num_classes=2, output_stride=16):
super().__init__()
self.backbone = LRFresnetS(output_stride)
self.head = External_attention(512)
self.split = nn.Sequential(
PatchSplitBlock(512, 256, patch_size=2),
nn.Dropout(p=0.1))
self.fc = nn.ConvTranspose(256, num_classes, kernel_size=2, stride=2, bias=True)
def execute(self, x):
imsize = x.shape
x = self.backbone(x)
x = self.head(x)
x = self.split(x)
x = self.fc(x)
x = nn.resize(x, size=(imsize[2], imsize[3]), mode='bilinear')
return x
def get_head(self):
return [self.fc0, self.head, self.fc1, self.fc2]
def main():
model = LRFEANet(num_classes=2)
x = jt.ones([2, 3, 512, 512])
y = model(x)
print (y.shape)
_ = y.data
# Find total parameters and trainable parameters
total_params = sum(p.numel() for p in model.parameters())
print(f'{total_params:,} total parameters.')
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print(f'{total_trainable_params:,} training parameters.')
'''
resnet50 backbone
34,773,954 total parameters.
34,718,274 training parameters.
'''
if __name__ == '__main__':
main() |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import time
import intelliflow.api_ext as flow
from intelliflow.api_ext import *
import logging
from intelliflow.core.application.core_application import ApplicationState
from intelliflow.utils.test.hook import GenericRoutingHookImpl, OnExecBeginHookImpl
flow.init_basic_logging()
from pyodinhttp import odin_material_retrieve
def poll(app, materialized_node, expect_failure=False, duration=3600):
node = None
if isinstance(materialized_node, MarshalingView):
node = materialized_node.marshaler_node
elif isinstance(materialized_node, MarshalerNode):
node = materialized_node
start = time.time()
while True:
path, records = app.poll(materialized_node)
if records is not None:
if expect_failure:
assert not path, f"Expected failure but the node {node.bound!r} yielded success."
else:
assert path, f"Expected success but the node {node.bound!r} yielded failure."
return path, records
time.sleep(10)
elapsed_time_in_secs = time.time() - start
assert elapsed_time_in_secs < duration, f"Test failed due to timeout while polling on {node.bound!r}"
odin_ms = "com.amazon.access.DEXMLAWSDevAccount-IntelliFlowAdmin-1"
app = AWSApplication(app_name="hook-app-dev",
region="us-east-1",
access_id=odin_material_retrieve(odin_ms, 'Principal').decode('utf-8'),
access_key=odin_material_retrieve(odin_ms, 'Credential').decode('utf-8'))
if app.state != ApplicationState.INACTIVE:
app.terminate()
app = AWSApplication(app_name="hook-app-dev",
region="us-east-1",
access_id=odin_material_retrieve(odin_ms, 'Principal').decode('utf-8'),
access_key=odin_material_retrieve(odin_ms, 'Credential').decode('utf-8'))
ducsi_data = app.marshal_external_data(
GlueTable("booker", "d_unified_cust_shipment_items", partition_keys=["region_id", "ship_day"])
, "DEXML_DUCSI"
, {
'region_id': {
'type': DimensionType.LONG,
'ship_day': {
'type': DimensionType.DATETIME,
'format': '%Y-%m-%d'
}
}
}
, {
'1': {
'*': {
'timezone': 'PST'
}
}
},
SignalIntegrityProtocol("FILE_CHECK", {"file": ["SNAPSHOT"]})
)
# add a dimensionless table (important corner-case)
ship_options = app.glue_table(database="dexbi", table_name="d_ship_option")
on_exec_begin_hook = OnExecBeginHookImpl()
on_exec_skipped_hook = GenericRoutingHookImpl()
on_compute_success_hook = GenericRoutingHookImpl()
on_success_hook = GenericRoutingHookImpl()
exec_checkpoints = [RouteCheckpoint(checkpoint_in_secs=60, slot=GenericRoutingHookImpl()),
RouteCheckpoint(checkpoint_in_secs=4 * 60, slot=GenericRoutingHookImpl())]
repeat_ducsi = app.create_data(id="REPEAT_DUCSI",
inputs={
"DEXML_DUCSI": ducsi_data,
},
compute_targets="output=DEXML_DUCSI.limit(100)",
execution_hook=RouteExecutionHook(on_exec_begin=on_exec_begin_hook,
on_exec_skipped=on_exec_skipped_hook,
on_compute_success=on_compute_success_hook,
on_success=on_success_hook,
checkpoints=exec_checkpoints)
)
on_compute_failure_hook = GenericRoutingHookImpl()
on_failure_hook = GenericRoutingHookImpl()
# we will be using this second node for failure checks
failed_ducsi = app.create_data(id="FAIL_DUCSI",
inputs={
"DEXML_DUCSI": ducsi_data
},
# bad Glue ETL code
compute_targets="boot me, boot me, boot me",
execution_hook=RouteExecutionHook(on_compute_failure=on_compute_failure_hook,
on_failure=on_failure_hook),
)
on_pending_node_created_hook = GenericRoutingHookImpl()
on_expiration_hook = GenericRoutingHookImpl()
pending_node_checkpoints = [RouteCheckpoint(checkpoint_in_secs=60, slot=GenericRoutingHookImpl())]
# we will be using this third node for Pending Node checks mostly
app.create_data(id="DUCSI_WITH_SO",
inputs={
"DEXML_DUCSI": ducsi_data,
"SHIP_OPTIONS": ship_options
},
compute_targets="output=DEXML_DUCSI.limit(100).join(SHIP_OPTIONS, DEXML_DUCSI.customer_ship_option == SHIP_OPTIONS.ship_option)",
pending_node_hook=RoutePendingNodeHook(on_pending_node_created=on_pending_node_created_hook,
on_expiration=on_expiration_hook,
checkpoints=pending_node_checkpoints),
pending_node_expiration_ttl_in_secs=3 * 60
)
app.activate()
start = time.time()
# 1- Inject DUCSI event to trigger execution on the first node/route and create a pending node on the second.
app.process(ducsi_data[1]['2020-12-25'],
# use the remote processor)
with_activated_processor=True,
# make it sync so that the following assertions won't fail due to the delay in event propagation.
is_async=False)
time.sleep(5)
# check if the first exec hook has been hit and done with its own logic
assert on_exec_begin_hook.verify(app)
assert not any([c.slot.verify(app) for c in exec_checkpoints])
assert not any([c.slot.verify(app) for c in pending_node_checkpoints])
assert not on_exec_skipped_hook.verify(app)
assert not on_compute_failure_hook.verify(app)
assert not on_compute_success_hook.verify(app)
assert not on_success_hook.verify(app)
assert not on_failure_hook.verify(app)
# check the pending node hooks registered on the second route.
assert on_pending_node_created_hook.verify(app)
assert not on_expiration_hook.verify(app)
# check idempotency
app.process(ducsi_data[1]['2020-12-25'], with_activated_processor=True, is_async=False)
time.sleep(5)
# now we can check the skipped hook due to idempotency related call above
assert on_exec_skipped_hook.verify(app)
# wait till first execution succeeds
poll(app, repeat_ducsi[1]['2020-12-25'])
# wait till second execution on failed_ducsi fails
poll(app, failed_ducsi[1]['2020-12-25'], expect_failure=True)
assert on_compute_success_hook.verify(app)
assert on_success_hook.verify(app)
elapsed = time.time() - start
if elapsed < 4 * 60:
# not very likely but just make sure that executions did not take less than last checkpoint's mark.
time.sleep((4 * 60) - elapsed)
app.update_active_routes_status()
assert all([c.slot.verify(app) for c in exec_checkpoints])
assert on_compute_failure_hook.verify(app)
assert on_failure_hook.verify(app)
# we now only have pending node and it must have checked all of its checkpoints and finally gotten expired.
assert on_expiration_hook.verify(app)
assert all([c.slot.verify(app) for c in pending_node_checkpoints])
app.terminate()
app.delete()
|
<reponame>DTUWindEnergy/TopFarm2<filename>topfarm/tests/deprecated_tests/test_topfarm_problems/test_nested_problems.py
from topfarm import TurbineTypeOptimizationProblem,\
TurbineXYZOptimizationProblem, InitialXYZOptimizationProblem
from openmdao.drivers.doe_generators import FullFactorialGenerator,\
ListGenerator
from openmdao.drivers.doe_driver import DOEDriver
from topfarm.cost_models.dummy import DummyCost, DummyCostPlotComp
from topfarm.plotting import NoPlot
import numpy as np
from topfarm.easy_drivers import EasyScipyOptimizeDriver
from topfarm.constraint_components.boundary_component import BoundaryComp
from topfarm.tests import npt
optimal = [(0, 2, 4, 1), (4, 2, 1, 0)]
def get_boundary_comp():
return BoundaryComp(2, xy_boundary=[(0, 0), (4, 4)],
z_boundary=(0, 4),
xy_boundary_type='square')
def test_turbineType_and_XYZ_optimization():
plot_comp = DummyCostPlotComp(optimal)
plot_comp = NoPlot()
cost_comp = DummyCost(
optimal_state=optimal,
inputs=['x', 'y', 'z', 'type'])
xyz_opt_problem = TurbineXYZOptimizationProblem(
cost_comp,
turbineXYZ=[(0, 0, 0), (1, 1, 1)],
min_spacing=2,
boundary_comp=get_boundary_comp(),
plot_comp=plot_comp,
driver=EasyScipyOptimizeDriver(disp=False))
tf = TurbineTypeOptimizationProblem(
cost_comp=xyz_opt_problem,
turbineTypes=[0, 0], lower=0, upper=1,
driver=DOEDriver(FullFactorialGenerator(2)))
cost = tf.optimize()[0]
npt.assert_almost_equal(cost, 0)
def test_turbine_Type_multistart_XYZ_optimization():
plot_comp = DummyCostPlotComp(optimal, delay=.5)
plot_comp = NoPlot()
xyz = [(0, 0, 0), (1, 1, 1)]
p1 = DummyCost(optimal_state=optimal,
inputs=['x', 'y', 'z', 'type'])
p2 = TurbineXYZOptimizationProblem(
cost_comp=p1,
turbineXYZ=xyz,
min_spacing=2,
boundary_comp=get_boundary_comp(),
plot_comp=plot_comp,
driver=EasyScipyOptimizeDriver(disp=True, optimizer='COBYLA', maxiter=10))
p3 = InitialXYZOptimizationProblem(
cost_comp=p2,
turbineXYZ=xyz, min_spacing=2,
boundary_comp=get_boundary_comp(),
driver=DOEDriver(ListGenerator([[('x', [0, 4]), ('y', [2, 2]), ('z', [4, 1])]])))
tf = TurbineTypeOptimizationProblem(
cost_comp=p3,
turbineTypes=[0, 0], lower=0, upper=1,
driver=DOEDriver(FullFactorialGenerator(1)))
case_gen = tf.driver.options['generator']
cost, state, recorder = tf.optimize()
print(cost)
# print (state)
print(recorder.get('type'))
print(recorder.get('cost'))
best_index = np.argmin(recorder.get('cost'))
initial_xyz_recorder = recorder['recorder'][best_index]
xyz_recorder = initial_xyz_recorder.get('recorder')[0]
npt.assert_almost_equal(xyz_recorder['cost'][-1], cost)
if __name__ == '__main__':
test_turbine_Type_multistart_XYZ_optimization()
|
# Author: KTH dESA Last modified by <NAME>
# Date: 26 November 2018
# Python version: 3.7
import os
import logging
import pandas as pd
from math import ceil, pi, exp, log, sqrt, radians, cos, sin, asin
from pyproj import Proj
import numpy as np
from collections import defaultdict
logging.basicConfig(format='%(asctime)s\t\t%(message)s', level=logging.DEBUG)
# General
LHV_DIESEL = 9.9445485 # (kWh/l) lower heating value
HOURS_PER_YEAR = 8760
# Columns in settlements file must match these exactly
SET_COUNTRY = 'Country' # This cannot be changed, lots of code will break
SET_X = 'X' # Coordinate in kilometres
SET_Y = 'Y' # Coordinate in kilometres
SET_X_DEG = 'X_deg' # Coordinates in degrees
SET_Y_DEG = 'Y_deg'
SET_POP = 'Pop' # Population in people per point (equally, people per km2)
SET_POP_CALIB = 'PopStartYear' # Calibrated population to reference year, same units
SET_POP_FUTURE = 'PopEndYear' # Project future population, same units
SET_GRID_DIST_CURRENT = 'GridDistCurrent' # Distance in km from current grid
SET_GRID_DIST_PLANNED = 'GridDistPlan' # Distance in km from current and future grid
SET_ROAD_DIST = 'RoadDist' # Distance in km from road network
SET_NIGHT_LIGHTS = 'NightLights' # Intensity of night time lights (from NASA), range 0 - 63
SET_TRAVEL_HOURS = 'TravelHours' # Travel time to large city in hours
SET_GHI = 'GHI' # Global horizontal irradiance in kWh/m2/day
SET_WINDVEL = 'WindVel' # Wind velocity in m/s
SET_WINDCF = 'WindCF' # Wind capacity factor as percentage (range 0 - 1)
SET_HYDRO = 'Hydropower' # Hydropower potential in kW
SET_HYDRO_DIST = 'HydropowerDist' # Distance to hydropower site in km
SET_HYDRO_FID = 'HydropowerFID' # the unique tag for eah hydropower, to not over-utilise
SET_SUBSTATION_DIST = 'SubstationDist'
SET_ELEVATION = 'Elevation' # in metres
SET_SLOPE = 'Slope' # in degrees
SET_LAND_COVER = 'LandCover'
SET_ROAD_DIST_CLASSIFIED = 'RoadDistClassified'
SET_SUBSTATION_DIST_CLASSIFIED = 'SubstationDistClassified'
SET_ELEVATION_CLASSIFIED = 'ElevationClassified'
SET_SLOPE_CLASSIFIED = 'SlopeClassified'
SET_LAND_COVER_CLASSIFIED = 'LandCoverClassified'
SET_COMBINED_CLASSIFICATION = 'GridClassification'
SET_GRID_PENALTY = 'GridPenalty'
SET_URBAN = 'IsUrban' # Whether the site is urban (0 or 1)
SET_ENERGY_PER_CELL = 'EnergyPerSettlement'
SET_NUM_PEOPLE_PER_HH = 'NumPeoplePerHH'
SET_ELEC_CURRENT = 'ElecStart' # If the site is currently electrified (0 or 1)
SET_ELEC_FUTURE = 'Elec_Status' # If the site has the potential to be 'easily' electrified in future
SET_ELEC_FUTURE_GRID = "Elec_Initial_Status_Grid"
SET_ELEC_FUTURE_OFFGRID = "Elec_Init_Status_Offgrid"
SET_ELEC_FUTURE_ACTUAL = "Actual_Elec_Status_"
SET_ELEC_FINAL_GRID = "GridElecIn"
SET_ELEC_FINAL_OFFGRID = "OffGridElecIn"
SET_NEW_CONNECTIONS = 'NewConnections' # Number of new people with electricity connections
SET_MIN_GRID_DIST = 'MinGridDist'
SET_LCOE_GRID = 'Grid_extension' # All lcoes in USD/kWh
SET_LCOE_SA_PV = 'SA_PV'
SET_LCOE_SA_DIESEL = 'SA_Diesel'
SET_LCOE_MG_WIND = 'MG_Wind'
SET_LCOE_MG_DIESEL = 'MG_Diesel'
SET_LCOE_MG_PV = 'MG_PV'
SET_LCOE_MG_HYDRO = 'MG_Hydro'
SET_GRID_LCOE_Round1 = "Grid_lcoe_PreElec"
SET_MIN_OFFGRID = 'Minimum_Tech_Off_grid' # The technology with lowest lcoe (excluding grid)
SET_MIN_OVERALL = 'MinimumOverall' # Same as above, but including grid
SET_MIN_OFFGRID_LCOE = 'Minimum_LCOE_Off_grid' # The lcoe value for minimum tech
SET_MIN_OVERALL_LCOE = 'MinimumOverallLCOE' # The lcoe value for overall minimum
SET_MIN_OVERALL_CODE = 'MinimumOverallCode' # And a code from 1 - 7 to represent that option
SET_MIN_CATEGORY = 'MinimumCategory' # The category with minimum lcoe (grid, minigrid or standalone)
SET_NEW_CAPACITY = 'NewCapacity' # Capacity in kW
SET_INVESTMENT_COST = 'InvestmentCost' # The investment cost in USD
SET_INVESTMENT_COST_OFFGRID = "InvestmentOffGrid"
SET_CONFLICT = "Conflict"
SET_ELEC_ORDER = "ElectrificationOrder"
SET_DYNAMIC_ORDER = "Electrification_Wave"
SET_LIMIT = "ElecStatusIn"
SET_GRID_REACH_YEAR = "GridReachYear"
SET_MIN_OFFGRID_CODE = "Off_Grid_Code"
SET_ELEC_FINAL_CODE = "FinalElecCode"
SET_DIST_TO_TRANS = "TransformerDist"
SET_TOTAL_ENERGY_PER_CELL = "TotalEnergyPerCell" # all previous + current timestep
SET_RESIDENTIAL_DEMAND = "ResidentialDemand"
SET_AGRI_DEMAND = "AgriDemand"
SET_HEALTH_DEMAND = "HealthDemand"
SET_EDU_DEMAND = "EducationDemand"
SET_COMMERCIAL_DEMAND = "CommercialDemand"
SET_GRID_CELL_AREA = 'GridCellArea'
# Columns in the specs file must match these exactly
SPE_COUNTRY = 'Country'
SPE_POP = 'PopStartYear' # The actual population in the base year
SPE_URBAN = 'UrbanRatioStartYear' # The ratio of urban population (range 0 - 1) in base year
SPE_POP_FUTURE = 'PopEndYear'
SPE_URBAN_FUTURE = 'UrbanRatioEndYear'
SPE_URBAN_MODELLED = 'UrbanRatioModelled' # The urban ratio in the model after calibration (for comparison)
SPE_URBAN_CUTOFF = 'UrbanCutOff' # The urban cutoff population calirated by the model, in people per km2
SPE_URBAN_GROWTH = 'UrbanGrowth' # The urban growth rate as a simple multplier (urban pop future / urban pop present)
SPE_RURAL_GROWTH = 'RuralGrowth' # Same as for urban
SPE_NUM_PEOPLE_PER_HH_RURAL = 'NumPeoplePerHHRural'
SPE_NUM_PEOPLE_PER_HH_URBAN = 'NumPeoplePerHHUrban'
SPE_DIESEL_PRICE_LOW = 'DieselPriceLow' # Diesel price in USD/litre
SPE_DIESEL_PRICE_HIGH = 'DieselPriceHigh' # Same, with a high forecast var
SPE_GRID_PRICE = 'GridPrice' # Grid price of electricity in USD/kWh
SPE_GRID_CAPACITY_INVESTMENT = 'GridCapacityInvestmentCost' # grid capacity investments costs from TEMBA USD/kW
SPE_GRID_LOSSES = 'GridLosses' # As a ratio (0 - 1)
SPE_BASE_TO_PEAK = 'BaseToPeak' # As a ratio (0 - 1)
SPE_EXISTING_GRID_COST_RATIO = 'ExistingGridCostRatio'
SPE_MAX_GRID_DIST = 'MaxGridDist'
SPE_ELEC = 'ElecActual' # Actual current percentage electrified population (0 - 1)
SPE_ELEC_MODELLED = 'ElecModelled' # The modelled version after calibration (for comparison)
SPE_MIN_NIGHT_LIGHTS = 'MinNightLights'
SPE_MAX_GRID_EXTENSION_DIST = 'MaxGridExtensionDist'
SPE_MAX_ROAD_DIST = 'MaxRoadDist'
SPE_POP_CUTOFF1 = 'PopCutOffRoundOne'
SPE_POP_CUTOFF2 = 'PopCutOffRoundTwo'
SPE_CAP_COST_MG_PV = "Cap_Cost_MG_PV"
SPE_ELEC_LIMIT = "ElecLimit"
SPE_INVEST_LIMIT = "InvestmentLimit"
SPE_DIST_TO_TRANS = "DistToTrans"
SPE_START_YEAR = "StartYear"
SPE_END_YEAR = "EndYEar"
SPE_TIMESTEP = "TimeStep"
class Technology:
"""
Used to define the parameters for each electricity access technology, and to calculate the LCOE depending on
input parameters.
"""
discount_rate = 0.12
# grid_cell_area = 0.01 # in km2, normally 1km2
mv_line_cost = 9000 # USD/km
lv_line_cost = 5000 # USD/km
mv_line_capacity = 50 # kW/line
lv_line_capacity = 10 # kW/line
lv_line_max_length = 30 # km
hv_line_cost = 120000 # USD/km
mv_line_max_length = 50 # km
hv_lv_transformer_cost = 3500 # USD/unit
mv_increase_rate = 0.1 # percentage
existing_grid_cost_ratio = 0.1 # percentage
def __init__(self,
tech_life, # in years
base_to_peak_load_ratio,
distribution_losses=0, # percentage
connection_cost_per_hh=0, # USD/hh
om_costs=0.0, # OM costs as percentage of capital costs
capital_cost=0, # USD/kW
capacity_factor=0.9, # percentage
grid_penalty_ratio=1, # multiplier
efficiency=1.0, # percentage
diesel_price=0.0, # USD/litre
grid_price=0.0, # USD/kWh for grid electricity
standalone=False,
existing_grid_cost_ratio=0.1, # percentage
grid_capacity_investment=0.0, # USD/kW for on-grid capacity investments (excluding grid itself)
diesel_truck_consumption=0, # litres/hour
diesel_truck_volume=0, # litres
om_of_td_lines=0): # percentage
self.distribution_losses = distribution_losses
self.connection_cost_per_hh = connection_cost_per_hh
self.base_to_peak_load_ratio = base_to_peak_load_ratio
self.tech_life = tech_life
self.om_costs = om_costs
self.capital_cost = capital_cost
self.capacity_factor = capacity_factor
self.grid_penalty_ratio = grid_penalty_ratio
self.efficiency = efficiency
self.diesel_price = diesel_price
self.grid_price = grid_price
self.standalone = standalone
self.existing_grid_cost_ratio = existing_grid_cost_ratio
self.grid_capacity_investment = grid_capacity_investment
self.diesel_truck_consumption = diesel_truck_consumption
self.diesel_truck_volume = diesel_truck_volume
self.om_of_td_lines = om_of_td_lines
@classmethod
def set_default_values(cls, base_year, start_year, end_year, discount_rate, mv_line_cost, lv_line_cost,
mv_line_capacity, lv_line_capacity, lv_line_max_length, hv_line_cost, mv_line_max_length,
hv_lv_transformer_cost, mv_increase_rate):
cls.base_year = base_year
cls.start_year = start_year
cls.end_year = end_year
cls.discount_rate = discount_rate
# cls.grid_cell_area = grid_cell_area
cls.mv_line_cost = mv_line_cost
cls.lv_line_cost = lv_line_cost
cls.mv_line_capacity = mv_line_capacity
cls.lv_line_capacity = lv_line_capacity
cls.lv_line_max_length = lv_line_max_length
cls.hv_line_cost = hv_line_cost
cls.mv_line_max_length = mv_line_max_length
cls.hv_lv_transformer_cost = hv_lv_transformer_cost
cls.mv_increase_rate = mv_increase_rate
def get_lcoe(self, energy_per_cell, people, num_people_per_hh, start_year, end_year, new_connections,
total_energy_per_cell, prev_code, grid_cell_area, conf_status=0, additional_mv_line_length=0, capacity_factor=0,
grid_penalty_ratio=1, mv_line_length=0, travel_hours=0, elec_loop=0, get_investment_cost=False,
get_investment_cost_lv=False, get_investment_cost_mv=False, get_investment_cost_hv=False,
get_investment_cost_transformer=False, get_investment_cost_connection=False):
"""
Calculates the LCOE depending on the parameters. Optionally calculates the investment cost instead.
The only required parameters are energy_per_cell, people and num_people_per_hh
additional_mv_line_length required for grid
capacity_factor required for PV and wind
mv_line_length required for hydro
travel_hours required for diesel
"""
if people == 0:
# If there are no people, the investment cost is zero.
if get_investment_cost:
return 0
# Otherwise we set the people low (prevent div/0 error) and continue.
else:
people = 0.00001
if energy_per_cell == 0:
# If there is no demand, the investment cost is zero.
if get_investment_cost:
return 0
# Otherwise we set the people low (prevent div/0 error) and continue.
else:
energy_per_cell = 0.000000000001
if grid_penalty_ratio == 0:
grid_penalty_ratio = self.grid_penalty_ratio
# If a new capacity factor isn't given, use the class capacity factor (for hydro, diesel etc)
if capacity_factor == 0:
capacity_factor = self.capacity_factor
def distribution_network(people, energy_per_cell):
if energy_per_cell <= 0:
energy_per_cell = 0.0001
if people <= 0:
people = 0.0001
consumption = energy_per_cell # kWh/year
average_load = consumption / (1 - self.distribution_losses) / HOURS_PER_YEAR # kW
peak_load = average_load / self.base_to_peak_load_ratio # kW
no_mv_lines = peak_load / self.mv_line_capacity
no_lv_lines = peak_load / self.lv_line_capacity
lv_networks_lim_capacity = no_lv_lines / no_mv_lines
lv_networks_lim_length = ((grid_cell_area / no_mv_lines) / (self.lv_line_max_length / sqrt(2))) ** 2
actual_lv_lines = min([people / num_people_per_hh, max([lv_networks_lim_capacity, lv_networks_lim_length])])
hh_per_lv_network = (people / num_people_per_hh) / (actual_lv_lines * no_mv_lines)
lv_unit_length = sqrt(grid_cell_area / (people / num_people_per_hh)) * sqrt(2) / 2
lv_lines_length_per_lv_network = 1.333 * hh_per_lv_network * lv_unit_length
total_lv_lines_length = no_mv_lines * actual_lv_lines * lv_lines_length_per_lv_network
line_reach = (grid_cell_area / no_mv_lines) / (2 * sqrt(grid_cell_area / no_lv_lines))
total_length_of_lines = min([line_reach, self.mv_line_max_length]) * no_mv_lines
additional_hv_lines = max([0, round(sqrt(grid_cell_area) /
(2 * min([line_reach, self.mv_line_max_length])) / 10, 3) - 1])
hv_lines_total_length = (sqrt(grid_cell_area) / 2) * additional_hv_lines * sqrt(grid_cell_area)
num_transformers = additional_hv_lines + no_mv_lines + (no_mv_lines * actual_lv_lines)
generation_per_year = average_load * HOURS_PER_YEAR
return hv_lines_total_length, total_length_of_lines, total_lv_lines_length, \
num_transformers, generation_per_year, peak_load
if people != new_connections and (prev_code == 1 or prev_code == 4 or prev_code == 5 or
prev_code == 6 or prev_code == 7):
hv_lines_total_length1, total_length_of_lines1, total_lv_lines_length1, \
num_transformers1, generation_per_year1, peak_load1 = distribution_network(people, total_energy_per_cell)
hv_lines_total_length2, total_length_of_lines2, total_lv_lines_length2, \
num_transformers2, generation_per_year2, peak_load2 = \
distribution_network(people=(people - new_connections),
energy_per_cell=(total_energy_per_cell - energy_per_cell))
hv_lines_total_length3, total_length_of_lines3, total_lv_lines_length3, \
num_transformers3, generation_per_year3, peak_load3 = \
distribution_network(people=new_connections,
energy_per_cell=energy_per_cell)
hv_lines_total_length = hv_lines_total_length1 - hv_lines_total_length2
total_length_of_lines = total_length_of_lines1 - total_length_of_lines2
total_lv_lines_length = total_lv_lines_length1 - total_lv_lines_length2
num_transformers = num_transformers1 - num_transformers2
generation_per_year = generation_per_year1 - generation_per_year2
peak_load = peak_load1 - peak_load2
# hv_lines_total_length = hv_lines_total_length3
# total_length_of_lines = total_length_of_lines3
# total_lv_lines_length = total_lv_lines_length3
# num_transformers = num_transformers3
# generation_per_year = generation_per_year3
# peak_load = peak_load3
else:
hv_lines_total_length, total_length_of_lines, total_lv_lines_length, \
num_transformers, generation_per_year, peak_load = distribution_network(people, energy_per_cell)
conf_grid_pen = {0: 1, 1: 1.18, 2: 1.39, 3: 1.6, 4: 2}
# The investment and O&M costs are different for grid and non-grid solutions
if self.grid_price > 0:
td_investment_cost = hv_lines_total_length * (
self.hv_line_cost * conf_grid_pen[conf_status]) * \
(1 + self.existing_grid_cost_ratio * elec_loop) + total_length_of_lines * \
(self.mv_line_cost * conf_grid_pen[conf_status]) * \
(1 + self.existing_grid_cost_ratio * elec_loop) + \
total_lv_lines_length * (self.lv_line_cost * conf_grid_pen[conf_status]) + \
num_transformers * self.hv_lv_transformer_cost + \
(new_connections / num_people_per_hh) * self.connection_cost_per_hh + \
(1 + self.existing_grid_cost_ratio * elec_loop) * additional_mv_line_length * (
(self.mv_line_cost * conf_grid_pen[conf_status]) * (
1 + self.mv_increase_rate) ** ((additional_mv_line_length / 5) - 1))
td_investment_cost = td_investment_cost * grid_penalty_ratio
td_om_cost = td_investment_cost * self.om_of_td_lines
total_investment_cost = td_investment_cost
total_om_cost = td_om_cost
fuel_cost = self.grid_price
else:
conflict_sa_pen = {0: 1, 1: 1, 2: 1, 3: 1, 4: 1}
conflict_mg_pen = {0: 1, 1: 1.18, 2: 1.39, 3: 1.6, 4: 2}
total_lv_lines_length *= 0 if self.standalone else 0.75
mv_total_line_cost = self.mv_line_cost * mv_line_length * conflict_sa_pen[
conf_status] if self.standalone \
else self.mv_line_cost * mv_line_length * conflict_mg_pen[conf_status]
lv_total_line_cost = self.lv_line_cost * total_lv_lines_length * conflict_sa_pen[
conf_status] if self.standalone \
else self.lv_line_cost * total_lv_lines_length * conflict_mg_pen[conf_status]
installed_capacity = peak_load / capacity_factor
capital_investment = installed_capacity * self.capital_cost * conflict_sa_pen[
conf_status] if self.standalone \
else installed_capacity * self.capital_cost * conflict_mg_pen[conf_status]
td_investment_cost = mv_total_line_cost + lv_total_line_cost + (
new_connections / num_people_per_hh) * self.connection_cost_per_hh
td_om_cost = td_investment_cost * self.om_of_td_lines * conflict_sa_pen[conf_status] if self.standalone \
else td_investment_cost * self.om_of_td_lines * conflict_mg_pen[conf_status]
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost * conflict_sa_pen[conf_status] * self.om_costs *
conflict_sa_pen[conf_status] * installed_capacity) if self.standalone \
else td_om_cost + (self.capital_cost * conflict_mg_pen[conf_status] * self.om_costs *
conflict_mg_pen[conf_status] * installed_capacity)
# If a diesel price has been passed, the technology is diesel
# And we apply the Szabo formula to calculate the transport cost for the diesel
# p = (p_d + 2*p_d*consumption*time/volume)*(1/mu)*(1/LHVd)
# Otherwise it's hydro/wind etc with no fuel cost
conf_diesel_pen = {0: 1, 1: 1.18, 2: 1.39, 3: 1.6, 4: 2}
if self.diesel_price > 0:
fuel_cost = (self.diesel_price + 2 * self.diesel_price * self.diesel_truck_consumption * (
travel_hours * conf_diesel_pen[conf_status]) /
self.diesel_truck_volume) / LHV_DIESEL / self.efficiency
else:
fuel_cost = 0
# Perform the time-value LCOE calculation
project_life = end_year - self.base_year + 1
reinvest_year = 0
step = start_year - self.base_year
# If the technology life is less than the project life, we will have to invest twice to buy it again
if self.tech_life + step < project_life:
reinvest_year = self.tech_life + step
year = np.arange(project_life)
el_gen = generation_per_year * np.ones(project_life)
el_gen[0:step] = 0
if conf_status == 1:
self.discount_rate = 0.12 # 0.133
discount_factor = (1 + self.discount_rate) ** year
elif conf_status == 2:
self.discount_rate = 0.12 # 0.145
discount_factor = (1 + self.discount_rate) ** year
elif conf_status == 3:
self.discount_rate = 0.12 # 0.158
discount_factor = (1 + self.discount_rate) ** year
elif conf_status ==4:
self.discount_rate = 0.12 # 0.171
discount_factor = (1 + self.discount_rate) ** year
else:
discount_factor = (1 + self.discount_rate) ** year
#discount_factor = (1 + self.discount_rate) ** year
investments = np.zeros(project_life)
investments[step] = total_investment_cost
# Calculate the year of re-investment if tech_life is smaller than project life
if reinvest_year:
investments[reinvest_year] = total_investment_cost
# Calculate salvage value if tech_life is bigger than project life
salvage = np.zeros(project_life)
if reinvest_year > 0:
used_life = (project_life - step) - self.tech_life
else:
used_life = project_life - step - 1
salvage[-1] = total_investment_cost * (1 - used_life / self.tech_life)
# salvage[project_life - 1] = total_investment_cost * (1 - used_life / self.tech_life)
operation_and_maintenance = total_om_cost * np.ones(project_life)
operation_and_maintenance[0:step] = 0
fuel = el_gen * fuel_cost
fuel[0:step] = 0
# So we also return the total investment cost for this number of people
if get_investment_cost:
discounted_investments = investments / discount_factor
return np.sum(discounted_investments) + (self.grid_capacity_investment * peak_load)
elif get_investment_cost_lv:
return total_lv_lines_length * (self.lv_line_cost * conf_grid_pen[conf_status])
elif get_investment_cost_mv:
return total_length_of_lines * (self.mv_line_cost * conf_grid_pen[conf_status]) * \
(1 + self.existing_grid_cost_ratio * elec_loop) + (1 + self.existing_grid_cost_ratio * elec_loop) * additional_mv_line_length * (
(self.mv_line_cost * conf_grid_pen[conf_status]) * (
1 + self.mv_increase_rate) ** ((additional_mv_line_length / 5) - 1))
elif get_investment_cost_hv:
return hv_lines_total_length * (self.hv_line_cost * conf_grid_pen[conf_status]) * \
(1 + self.existing_grid_cost_ratio * elec_loop)
elif get_investment_cost_transformer:
return num_transformers * self.hv_lv_transformer_cost
elif get_investment_cost_connection:
return (new_connections / num_people_per_hh) * self.connection_cost_per_hh
else:
discounted_costs = (investments + operation_and_maintenance + fuel - salvage) / discount_factor
discounted_generation = el_gen / discount_factor
return np.sum(discounted_costs) / np.sum(discounted_generation)
class SettlementProcessor:
"""
Processes the dataframe and adds all the columns to determine the cheapest option and the final costs and summaries
"""
def __init__(self, path):
try:
self.df = pd.read_csv(path)
except FileNotFoundError:
print('You need to first split into a base directory and prep!')
raise
def condition_df(self, country):
"""
Do any initial data conditioning that may be required.
"""
logging.info('Ensure that columns that are supposed to be numeric are numeric')
self.df[SET_GHI] = pd.to_numeric(self.df[SET_GHI], errors='coerce')
self.df[SET_WINDVEL] = pd.to_numeric(self.df[SET_WINDVEL], errors='coerce')
self.df[SET_NIGHT_LIGHTS] = pd.to_numeric(self.df[SET_NIGHT_LIGHTS], errors='coerce')
self.df[SET_ELEVATION] = pd.to_numeric(self.df[SET_ELEVATION], errors='coerce')
self.df[SET_SLOPE] = pd.to_numeric(self.df[SET_SLOPE], errors='coerce')
self.df[SET_LAND_COVER] = pd.to_numeric(self.df[SET_LAND_COVER], errors='coerce')
# self.df[SET_GRID_DIST_CURRENT] = pd.to_numeric(self.df[SET_GRID_DIST_CURRENT], errors='coerce')
# self.df[SET_GRID_DIST_PLANNED] = pd.to_numeric(self.df[SET_GRID_DIST_PLANNED], errors='coerce')
self.df[SET_SUBSTATION_DIST] = pd.to_numeric(self.df[SET_SUBSTATION_DIST], errors='coerce')
self.df[SET_ROAD_DIST] = pd.to_numeric(self.df[SET_ROAD_DIST], errors='coerce')
self.df[SET_HYDRO_DIST] = pd.to_numeric(self.df[SET_HYDRO_DIST], errors='coerce')
self.df[SET_HYDRO] = pd.to_numeric(self.df[SET_HYDRO], errors='coerce')
logging.info('Add column with country name')
self.df['Country'] = country
logging.info('Adding column "ElectrificationOrder"')
self.df['ElectrificationOrder'] = 0
# logging.info['Adding column for per capita demand']
self.df['PerCapitaDemand'] = 0
logging.info('Replace null values with zero')
self.df.fillna(0, inplace=True)
logging.info('Sort by country, Y and X')
self.df.sort_values(by=[SET_COUNTRY, SET_Y, SET_X], inplace=True)
def grid_penalties(self):
"""
Add a grid penalty factor to increase the grid cost in areas that higher road distance, higher substation
distance, unsuitable land cover, high slope angle or high elevation
"""
def classify_road_dist(row):
road_dist = row[SET_ROAD_DIST]
if road_dist <= 5:
return 5
elif road_dist <= 10:
return 4
elif road_dist <= 25:
return 3
elif road_dist <= 50:
return 2
else:
return 1
def classify_substation_dist(row):
substation_dist = row[SET_SUBSTATION_DIST]
if substation_dist <= 0.5:
return 5
elif substation_dist <= 1:
return 4
elif substation_dist <= 5:
return 3
elif substation_dist <= 10:
return 2
else:
return 1
def classify_land_cover(row):
land_cover = row[SET_LAND_COVER]
if land_cover == 0:
return 1
elif land_cover == 1:
return 3
elif land_cover == 2:
return 4
elif land_cover == 3:
return 3
elif land_cover == 4:
return 4
elif land_cover == 5:
return 3
elif land_cover == 6:
return 2
elif land_cover == 7:
return 5
elif land_cover == 8:
return 2
elif land_cover == 9:
return 5
elif land_cover == 10:
return 5
elif land_cover == 11:
return 1
elif land_cover == 12:
return 3
elif land_cover == 13:
return 3
elif land_cover == 14:
return 5
elif land_cover == 15:
return 3
elif land_cover == 16:
return 5
def classify_elevation(row):
elevation = row[SET_ELEVATION]
if elevation <= 500:
return 5
elif elevation <= 1000:
return 4
elif elevation <= 2000:
return 3
elif elevation <= 3000:
return 2
else:
return 1
def classify_slope(row):
slope = row[SET_SLOPE]
if slope <= 10:
return 5
elif slope <= 20:
return 4
elif slope <= 30:
return 3
elif slope <= 40:
return 2
else:
return 1
def set_penalty(row):
classification = row[SET_COMBINED_CLASSIFICATION]
return 1 + (exp(0.85 * abs(1 - classification)) - 1) / 100
logging.info('Classify road dist')
self.df[SET_ROAD_DIST_CLASSIFIED] = self.df.apply(classify_road_dist, axis=1)
logging.info('Classify substation dist')
self.df[SET_SUBSTATION_DIST_CLASSIFIED] = self.df.apply(classify_substation_dist, axis=1)
logging.info('Classify land cover')
self.df[SET_LAND_COVER_CLASSIFIED] = self.df.apply(classify_land_cover, axis=1)
logging.info('Classify elevation')
self.df[SET_ELEVATION_CLASSIFIED] = self.df.apply(classify_elevation, axis=1)
logging.info('Classify slope')
self.df[SET_SLOPE_CLASSIFIED] = self.df.apply(classify_slope, axis=1)
logging.info('Combined classification')
self.df[SET_COMBINED_CLASSIFICATION] = (0.15 * self.df[SET_ROAD_DIST_CLASSIFIED] +
0.20 * self.df[SET_SUBSTATION_DIST_CLASSIFIED] +
0.20 * self.df[SET_LAND_COVER_CLASSIFIED] +
0.15 * self.df[SET_ELEVATION_CLASSIFIED] +
0.30 * self.df[SET_SLOPE_CLASSIFIED])
logging.info('Grid penalty')
self.df[SET_GRID_PENALTY] = self.df.apply(set_penalty, axis=1)
def calc_wind_cfs(self):
"""
Calculate the wind capacity factor based on the average wind velocity.
"""
mu = 0.97 # availability factor
t = 8760
p_rated = 600
z = 55 # hub height
zr = 80 # velocity measurement height
es = 0.85 # losses in wind electricity
u_arr = range(1, 26)
p_curve = [0, 0, 0, 0, 30, 77, 135, 208, 287, 371, 450, 514, 558,
582, 594, 598, 600, 600, 600, 600, 600, 600, 600, 600, 600]
def get_wind_cf(row):
u_zr = row[SET_WINDVEL]
if u_zr == 0:
return 0
else:
# Adjust for the correct hub height
alpha = (0.37 - 0.088 * log(u_zr)) / (1 - 0.088 * log(zr / 10))
u_z = u_zr * (z / zr) ** alpha
# Rayleigh distribution and sum of series
rayleigh = [(pi / 2) * (u / u_z ** 2) * exp((-pi / 4) * (u / u_z) ** 2) for u in u_arr]
energy_produced = sum([mu * es * t * p * r for p, r in zip(p_curve, rayleigh)])
return energy_produced/(p_rated * t)
logging.info('Calculate Wind CF')
self.df[SET_WINDCF] = self.df.apply(get_wind_cf, axis=1)
def calibrate_pop_and_urban(self, pop_actual, pop_future, urban_current, urban_future, urban_cutoff,
start_year, end_year, time_step):
"""
Calibrate the actual current population, the urban split and forecast the future population
"""
logging.info('Calibrate current population')
project_life = end_year - start_year
# Calculate the ratio between the actual population and the total population from the GIS layer
pop_ratio = pop_actual / self.df[SET_POP].sum()
# And use this ratio to calibrate the population in a new column
self.df[SET_POP_CALIB] = self.df.apply(lambda row: row[SET_POP] * pop_ratio, axis=1)
if max(self.df[SET_URBAN]) == 2:
calibrate = True if 'n' in input('Use urban definition from GIS layer <y/n> (n=model calibration):') else False
else:
calibrate = True
if calibrate:
# Calculate the urban split, by calibrating the cutoff until the target ratio is achieved
logging.info('Calibrate urban split')
#sorted_pop = self.df[SET_POP_CALIB].copy(), self.df[SET_POP_CALIB]/self.df['GridCellArea']
#sorted_pop.sort_values(inplace=True)
sorted_pop = self.df.copy()
sorted_pop['Density'] = sorted_pop[SET_POP_CALIB] # / sorted_pop['GridCellArea']
sorted_pop.sort_values(by=['Density'], inplace=True)
urban_pop_break = (1-urban_current) * self.df[SET_POP_CALIB].sum()
cumulative_urban_pop = 0
ii = 0
while cumulative_urban_pop < urban_pop_break:
# test = sorted_pop[SET_GRID_CELL_AREA].iloc[ii]
# if test > 0:
cumulative_urban_pop += sorted_pop[SET_POP_CALIB].iloc[ii]
ii += 1
urban_cutoff = sorted_pop['Density'].iloc[ii-1]
# Assign the 1 (urban)/0 (rural) values to each cell
# self.df[SET_URBAN] = self.df.apply(lambda row: 2 if (((row[SET_POP_CALIB]/row['GridCellArea']) > urban_cutoff) & (row['GridCellArea'] > 0)) else 0, axis=1)
self.df[SET_URBAN] = self.df.apply(lambda row: 2 if ((row[SET_POP_CALIB] > urban_cutoff) & (row['GridCellArea'] > 0)) else 0,axis=1)
# Get the calculated urban ratio, and limit it to within reasonable boundaries
pop_urb = self.df.loc[self.df[SET_URBAN] > 1, SET_POP_CALIB].sum()
urban_modelled = pop_urb / pop_actual
logging.info('The modelled urban ratio is {}. '
'In case this is not acceptable please revise this part of the code'.format(urban_modelled))
# Project future population, with separate growth rates for urban and rural
logging.info('Project future population')
if calibrate:
urban_growth = (urban_future * pop_future) / (urban_current * pop_actual)
rural_growth = ((1 - urban_future) * pop_future) / ((1 - urban_current) * pop_actual)
yearly_urban_growth_rate = urban_growth**(1/project_life)
yearly_rural_growth_rate = rural_growth**(1/project_life)
else:
urban_growth = pop_future / pop_actual
rural_growth = pop_future / pop_actual
yearly_urban_growth_rate = urban_growth ** (1 / project_life)
yearly_rural_growth_rate = rural_growth ** (1 / project_life)
self.df[SET_POP_FUTURE] = self.df.apply(lambda row: row[SET_POP_CALIB] * urban_growth
if row[SET_URBAN] > 1
else row[SET_POP_CALIB] * rural_growth,
axis=1)
yearsofanalysis = [2030]
# yearsofanalysis = list(range((start_year + time_step),end_year+1,time_step))
for year in yearsofanalysis:
self.df[SET_POP+"{}".format(year)] = self.df.apply(lambda row: row[SET_POP_CALIB] *
(yearly_urban_growth_rate ** (year - start_year))
if row[SET_URBAN] > 1
else row[SET_POP_CALIB] *
(yearly_rural_growth_rate ** (year - start_year)),
axis=1)
self.df[SET_POP + "{}".format(start_year)] = self.df.apply(lambda row: row[SET_POP_CALIB], axis=1)
return urban_cutoff, urban_modelled
def elec_current_and_future(self, elec_actual, pop_cutoff, dist_to_trans, min_night_lights,
max_grid_dist, max_road_dist, pop_tot, pop_cutoff2, start_year):
"""
Calibrate the current electrification status, and future 'pre-electrification' status
"""
urban_pop = (self.df.loc[self.df[SET_URBAN] == 2, SET_POP_CALIB].sum())# Calibrate current electrification
rural_pop = (self.df.loc[self.df[SET_URBAN] == 0, SET_POP_CALIB].sum()) # Calibrate current electrification
total_pop = self.df[SET_POP_CALIB].sum()
total_elec_ratio = elec_actual
urban_elec_ratio = 0.89
rural_elec_ratio = 0.11
factor = (total_pop * total_elec_ratio) / (urban_pop * urban_elec_ratio + rural_pop * rural_elec_ratio)
urban_elec_ratio *= factor
rural_elec_ratio *= factor
# print('factor: ' + str(factor))
logging.info('Calibrate current electrification')
is_round_two = False
grid_cutoff2 = 10
road_cutoff2 = 10
count = 0
prev_vals = []
accuracy = 0.01
max_iterations_one = 30
max_iterations_two = 60
self.df[SET_ELEC_CURRENT] = 0
# if max(self.df['TransformerDist']) > 0:
# self.df['GridDistCalibElec'] = self.df['TransformerDist']
# priority = 1
# elif max(self.df['CurrentMVLineDist']) > 0:
# self.df['GridDistCalibElec'] = self.df['CurrentMVLineDist']
# priority = 1
# else:
# self.df['GridDistCalibElec'] = self.df['CurrentHVLineDist']
# priority = 2
self.df['GridDistCalibElec'] = self.df['CurrentHVLineDist']
priority = 2
condition = 0
while condition == 0:
# Assign the 1 (electrified)/0 (un-electrified) values to each cell
# urban_electrified = 0.159853988426699 * 17573607 * 0.487
urban_electrified = urban_pop * urban_elec_ratio
# urban_electrified = urban_electrified_modelled * self.df[SET_POP_CALIB].sum() * urban_elec_access
# rural_electrified = (1 - 0.159853988426699) * 17573607 * 0.039
rural_electrified = rural_pop * rural_elec_ratio
# rural_electrified = (1 - urban_electrified_modelled) * self.df[SET_POP_CALIB].sum() * rural_elec_access
if priority == 1:
self.df.loc[(self.df['GridDistCalibElec'] < 10) & (self.df[SET_NIGHT_LIGHTS] > 0) & (self.df[SET_POP_CALIB] > 50), SET_ELEC_CURRENT] = 1
# self.df.loc[(self.df[SET_NIGHT_LIGHTS] > 0) & (self.df[SET_POP_CALIB] > 50), SET_ELEC_CURRENT] = 1
# self.df.loc[(self.df['GridDistCalibElec'] < 0.8), SET_ELEC_CURRENT] = 1
urban_elec_ratio = urban_electrified / (self.df.loc[(self.df[SET_ELEC_CURRENT] == 1) & (self.df[SET_URBAN] == 2), SET_POP_CALIB].sum())
rural_elec_ratio = rural_electrified / (self.df.loc[(self.df[SET_ELEC_CURRENT] == 1) & (self.df[SET_URBAN] < 2), SET_POP_CALIB].sum())
pop_elec = self.df.loc[self.df[SET_ELEC_CURRENT] == 1, SET_POP_CALIB].sum()
elec_modelled = pop_elec / pop_tot
else:
self.df.loc[(self.df['GridDistCalibElec'] < 15) & ((self.df[SET_NIGHT_LIGHTS] > 0) | (self.df[SET_POP_CALIB] > 130)), SET_ELEC_CURRENT] = 1
# self.df.loc[(self.df['GridDistCalibElec'] < 0.8), SET_ELEC_CURRENT] = 1
urban_elec_ratio = (self.df.loc[(self.df[SET_ELEC_CURRENT] == 1) & (
self.df[SET_URBAN] == 2), SET_POP_CALIB].sum()) / urban_electrified
rural_elec_ratio = (self.df.loc[(self.df[SET_ELEC_CURRENT] == 1) & (
self.df[SET_URBAN] == 0), SET_POP_CALIB].sum()) / rural_electrified
pop_elec = self.df.loc[self.df[SET_ELEC_CURRENT] == 1, SET_POP_CALIB].sum()
elec_modelled = pop_elec / pop_tot
# self.df[SET_ELEC_CURRENT] = self.df.apply(lambda row:
# 1
# if ((row[SET_NIGHT_LIGHTS] > min_night_lights or
# row[SET_POP_CALIB] > pop_cutoff and
# row['GridDistCalibElec'] < max_grid_dist and
# row[SET_ROAD_DIST] < max_road_dist))
# or (row[SET_POP_CALIB] > pop_cutoff2 and
# (row['GridDistCalibElec'] < grid_cutoff2 or
# row[SET_ROAD_DIST] < road_cutoff2))
# else 0,
# axis=1)
#
# Get the calculated electrified ratio, and limit it to within reasonable boundaries
#pop_elec = self.df.loc[self.df[SET_ELEC_CURRENT] == 1, SET_POP_CALIB].sum()
#elec_modelled = pop_elec / pop_tot
# if elec_modelled == 0:
# elec_modelled = 0.01
# elif elec_modelled == 1:
# elec_modelled = 0.99
#
# if abs(elec_modelled - elec_actual) < accuracy:
# break
# elif not is_round_two:
# min_night_lights = sorted([1, min_night_lights - min_night_lights * 2 *
# (elec_actual - elec_modelled) / elec_actual, 10])[1]
# if priority == 1:
# max_grid_dist = sorted([0.5, max_grid_dist + max_grid_dist * 2 *
# (elec_actual - elec_modelled) / elec_actual, 10])[1]
# else:
# max_grid_dist = sorted([5, max_grid_dist + max_grid_dist * 2 *
# (elec_actual - elec_modelled) / elec_actual, 50])[1]
# max_road_dist = sorted([0.5, max_road_dist + max_road_dist * 2 *
# (elec_actual - elec_modelled) / elec_actual, 50])[1]
# elif elec_modelled - elec_actual < 0:
# pop_cutoff2 = sorted([0.01, pop_cutoff2 - pop_cutoff2 *
# (elec_actual - elec_modelled) / elec_actual, 100000])[1]
# elif elec_modelled - elec_actual > 0:
# pop_cutoff = sorted([0.01, pop_cutoff - pop_cutoff * 0.5 *
# (elec_actual - elec_modelled) / elec_actual, 10000])[1]
#
# constraints = '{}{}{}{}{}'.format(pop_cutoff, min_night_lights, max_grid_dist, max_road_dist, pop_cutoff2)
# if constraints in prev_vals and not is_round_two:
# logging.info('Repeating myself, on to round two')
# prev_vals = []
# is_round_two = True
# elif constraints in prev_vals and is_round_two:
# logging.info('NOT SATISFIED: repeating myself')
# print('2. Modelled electrification rate = {}'.format(elec_modelled))
# if 'y' in input('Do you want to rerun calibration with new input values? <y/n>'):
# count = 0
# is_round_two = False
# pop_cutoff = float(input('Enter value for pop_cutoff: '))
# min_night_lights = float(input('Enter value for min_night_lights: '))
# max_grid_dist = float(input('Enter value for max_grid_dist: '))
# max_road_dist = float(input('Enter value for max_road_dist: '))
# pop_cutoff2 = float(input('Enter value for pop_cutoff2: '))
# else:
# break
# else:
# prev_vals.append(constraints)
#
# if count >= max_iterations_one and not is_round_two:
# logging.info('Got to {}, on to round two'.format(max_iterations_one))
# is_round_two = True
# elif count >= max_iterations_two and is_round_two:
# logging.info('NOT SATISFIED: Got to {}'.format(max_iterations_two))
# print('2. Modelled electrification rate = {}'.format(elec_modelled))
# if 'y' in input('Do you want to rerun calibration with new input values? <y/n>'):
# count = 0
# is_round_two = False
# pop_cutoff = int(input('Enter value for pop_cutoff: '))
# min_night_lights = int(input('Enter value for min_night_lights: '))
# max_grid_dist = int(input('Enter value for max_grid_dist: '))
# max_road_dist = int(input('Enter value for max_road_dist: '))
# pop_cutoff2 = int(input('Enter value for pop_cutoff2: '))
# else:
# break
#
# count += 1
# rural_elec_ratio = 1
# urban_elec_ratio = 1
logging.info('The modelled electrification rate achieved is {}, with urban ratio:{} and rural ratio: {}. '
'If this is not acceptable please revise this part of the algorithm'.format(elec_modelled,urban_elec_ratio,rural_elec_ratio))
condition = 1
self.df[SET_ELEC_FUTURE_GRID + "{}".format(start_year)] = \
self.df.apply(lambda row: 1 if row[SET_ELEC_CURRENT] == 1 else 0, axis=1)
self.df[SET_ELEC_FUTURE_OFFGRID + "{}".format(start_year)] = self.df.apply(lambda row: 0, axis=1)
self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(start_year)] = \
self.df.apply(lambda row: 1 if row[SET_ELEC_FUTURE_GRID + "{}".format(start_year)] == 1 or
row[SET_ELEC_FUTURE_OFFGRID + "{}".format(start_year)] == 1 else 0, axis=1)
self.df[SET_ELEC_FINAL_CODE + "{}".format(start_year)] = \
self.df.apply(lambda row: 1 if row[SET_ELEC_CURRENT] == 1 else 99, axis=1)
return min_night_lights, dist_to_trans, max_grid_dist, max_road_dist, elec_modelled, pop_cutoff, pop_cutoff2, rural_elec_ratio, urban_elec_ratio
@staticmethod
def separate_elec_status(elec_status):
"""
Separate out the electrified and unelectrified states from list.
"""
electrified = []
unelectrified = []
for i, status in enumerate(elec_status):
if status:
electrified.append(i)
else:
unelectrified.append(i)
return electrified, unelectrified
@staticmethod
def get_2d_hash_table(x, y, unelectrified, distance_limit):
"""
Generates the 2D Hash Table with the unelectrified locations hashed into the table for easy O(1) access.
"""
hash_table = defaultdict(lambda: defaultdict(list))
for unelec_row in unelectrified:
hash_x = int(x[unelec_row] / distance_limit)
hash_y = int(y[unelec_row] / distance_limit)
hash_table[hash_x][hash_y].append(unelec_row)
return hash_table
@staticmethod
def get_unelectrified_rows(hash_table, elec_row, x, y, distance_limit):
"""
Returns all the unelectrified locations close to the electrified location
based on the distance boundary limit specified by asking the 2D hash table.
"""
unelec_list = []
hash_x = int(x[elec_row] / distance_limit)
hash_y = int(y[elec_row] / distance_limit)
unelec_list.extend(hash_table.get(hash_x, {}).get(hash_y, []))
unelec_list.extend(hash_table.get(hash_x, {}).get(hash_y - 1, []))
unelec_list.extend(hash_table.get(hash_x, {}).get(hash_y + 1, []))
unelec_list.extend(hash_table.get(hash_x + 1, {}).get(hash_y, []))
unelec_list.extend(hash_table.get(hash_x + 1, {}).get(hash_y - 1, []))
unelec_list.extend(hash_table.get(hash_x + 1, {}).get(hash_y + 1, []))
unelec_list.extend(hash_table.get(hash_x - 1, {}).get(hash_y, []))
unelec_list.extend(hash_table.get(hash_x - 1, {}).get(hash_y - 1, []))
unelec_list.extend(hash_table.get(hash_x - 1, {}).get(hash_y + 1, []))
return unelec_list
def pre_electrification(self, grid_calc, grid_price, year, time_step, start_year):
"""" ... """
logging.info('Define the initial electrification status')
# Update electrification status based on already existing
if (year - time_step) == start_year:
self.df[SET_ELEC_FUTURE_GRID + "{}".format(year)] = \
self.df.apply(lambda row: 1 if row[SET_ELEC_FUTURE_GRID + "{}".format(year - time_step)] == 1
else 0,
axis=1)
else:
self.df[SET_ELEC_FUTURE_GRID + "{}".format(year)] = \
self.df.apply(lambda row: 1 if row[SET_ELEC_FUTURE_GRID + "{}".format(year - time_step)] == 1 or
(row[SET_ELEC_FINAL_CODE + "{}".format(year - time_step)] == 1 and
row[SET_LIMIT + "{}".format(year - time_step)] == 1)
else 0,
axis=1)
if (year - time_step) == start_year:
self.df[SET_ELEC_FUTURE_OFFGRID + "{}".format(year)] = \
self.df.apply(lambda row: 1 if row[SET_ELEC_FUTURE_OFFGRID + "{}".format(year - time_step)] == 1
else 0,
axis=1)
else:
self.df[SET_ELEC_FUTURE_OFFGRID + "{}".format(year)] = \
self.df.apply(lambda row: 1 if (row[SET_ELEC_FUTURE_OFFGRID + "{}".format(year - time_step)] == 1 and
row[SET_ELEC_FUTURE_GRID + "{}".format(year)] != 1) or
(row[SET_ELEC_FINAL_CODE + "{}".format(year - time_step)] != 1 and
row[SET_LIMIT + "{}".format(year - time_step)] == 1)
else 0,
axis=1)
if (year - time_step) == start_year:
self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(year)] = \
self.df.apply(lambda row: 1 if row[SET_ELEC_FUTURE_ACTUAL + "{}".format(year - time_step)] == 1
else 0,
axis=1)
else:
self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(year)] = \
self.df.apply(lambda row: 1 if (row[SET_ELEC_FUTURE_ACTUAL + "{}".format(year - time_step)] == 1) or
(row[SET_ELEC_FUTURE_GRID + "{}".format(year)] == 1) or
(row[SET_ELEC_FUTURE_OFFGRID + "{}".format(year)] == 1)
else 0,
axis=1)
self.df[SET_LCOE_GRID + "{}".format(year)] = \
self.df.apply(lambda row: grid_price if row[SET_ELEC_FUTURE_GRID + "{}".format(year)] == 1
else 99,
axis=1)
def elec_extension(self, grid_calc, max_dist, year, start_year, end_year, timestep, grid_cap_gen_limit):
"""
Iterate through all electrified settlements and find which settlements can be economically connected to the grid
Repeat with newly electrified settlements until no more are added
"""
new_grid_capacity = 0
grid_capacity_limit = grid_cap_gen_limit # kW per 5 years
# x = (self.df[SET_X]/1000).tolist()
# y = (self.df[SET_Y]/1000).tolist()
x = (self.df[SET_X_DEG]).tolist()
y = (self.df[SET_Y_DEG]).tolist()
pop = self.df[SET_POP + "{}".format(year)].tolist()
# prev_pop = self.df[SET_POP + "{}".format(year - timestep)].tolist()
confl = self.df[SET_CONFLICT].tolist()
travl = self.df[SET_TRAVEL_HOURS].tolist()
enerperhh = self.df[SET_ENERGY_PER_CELL + "{}".format(year)]
nupppphh = self.df[SET_NUM_PEOPLE_PER_HH]
grid_cell_area = self.df['GridCellArea']
prev_code = self.df[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)]
new_connections = self.df[SET_NEW_CONNECTIONS + "{}".format(year)]
total_energy_per_cell = self.df[SET_TOTAL_ENERGY_PER_CELL]
if year-timestep == start_year:
elecorder = self.df[SET_ELEC_ORDER].tolist()
else:
elecorder = self.df[SET_ELEC_ORDER + "{}".format(year - timestep)].tolist()
# urban = self.df[SET_URBAN].tolist()
grid_penalty_ratio = self.df[SET_GRID_PENALTY].tolist()
status = self.df[SET_ELEC_FUTURE_GRID + "{}".format(year)].tolist()
min_code_lcoes = self.df[SET_MIN_OFFGRID_LCOE + "{}".format(year)].tolist()
new_lcoes = self.df[SET_LCOE_GRID + "{}".format(year)].tolist()
grid_reach = self.df[SET_GRID_REACH_YEAR].tolist()
urban_initially_electrified = sum(self.df.loc[
(self.df[SET_ELEC_FUTURE_GRID + "{}".format(year - timestep)] == 1) & (
self.df[SET_URBAN] == 2)][
SET_ENERGY_PER_CELL + "{}".format(year)])
rural_initially_electrified = sum(self.df.loc[
(self.df[SET_ELEC_FUTURE_GRID + "{}".format(year - timestep)] == 1) & (
self.df[SET_URBAN] < 2)][
SET_ENERGY_PER_CELL + "{}".format(year)])
consumption = rural_initially_electrified + urban_initially_electrified
average_load = consumption / (1 - grid_calc.distribution_losses) / HOURS_PER_YEAR # kW
peak_load = average_load / grid_calc.base_to_peak_load_ratio # kW
grid_capacity_limit -= peak_load
cell_path_real = list(np.zeros(len(status)).tolist())
cell_path_adjusted = list(np.zeros(len(status)).tolist())
electrified, unelectrified = self.separate_elec_status(status)
close = []
elec_nodes2 = []
changes = []
for elec in electrified:
elec_nodes2.append((x[elec], y[elec]))
elec_nodes2 = np.asarray(elec_nodes2)
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
return c * r
def closest_elec(unelec_node, elec_nodes):
# R = 3959.87433 # this is in miles. For Earth radius in kilometers use 6372.8 km
#
# # Convert decimal degrees to Radians:
# lon1 = np.radians(unelec_node_lon.values)
# lat1 = np.radians(unelec_node_lat.values)
# lon2 = np.radians(elec_nodes_lon.values)
# lat2 = np.radians(elec_nodes_lat.values)
#
# # Implementing Haversine Formula:
# dlon = np.subtract(lon2, lon1)
# dlat = np.subtract(lat2, lat1)
#
# a = np.add(np.power(np.sin(np.divide(dlat, 2)), 2),
# np.multiply(np.cos(lat1),
# np.multiply(np.cos(lat2),
# np.power(np.sin(np.divide(dlon, 2)), 2))))
# c = np.multiply(2, np.arcsin(np.sqrt(a)))
# r = 6371
#
# dist_2 = c * r
deltas = elec_nodes - unelec_node
dist_2 = np.einsum('ij,ij->i', deltas, deltas)
min_dist = np.argmin(dist_2)
return min_dist
logging.info('Initially {} electrified'.format(len(electrified)))
loops = 1
for unelec in unelectrified:
grid_lcoe = 99
if year >= grid_reach[unelec]:
consumption = enerperhh[unelec] # kWh/year
average_load = consumption / (1 - grid_calc.distribution_losses) / HOURS_PER_YEAR # kW
peak_load = average_load / grid_calc.base_to_peak_load_ratio # kW
node = (x[unelec], y[unelec])
closest_elec_node = closest_elec(node, elec_nodes2)
dist = haversine(x[electrified[closest_elec_node]], y[electrified[closest_elec_node]], x[unelec], y[unelec])
# dist = sqrt((x[electrified[closest_elec_node]] - x[unelec]) ** 2
# + (y[electrified[closest_elec_node]] - y[unelec]) ** 2)
dist_adjusted = grid_penalty_ratio[unelec] * dist
if dist <= max_dist:
if year-timestep == start_year:
elec_loop_value = 0
else:
elec_loop_value = elecorder[electrified[closest_elec_node]] + 1
grid_lcoe = grid_calc.get_lcoe(energy_per_cell=enerperhh[unelec],
start_year=year-timestep,
end_year=end_year,
people=pop[unelec],
new_connections=new_connections[unelec],
total_energy_per_cell=total_energy_per_cell[unelec],
prev_code=prev_code[unelec],
num_people_per_hh=nupppphh[unelec],
grid_cell_area=grid_cell_area[unelec],
conf_status=confl[unelec],
travel_hours=travl[unelec],
additional_mv_line_length=dist_adjusted,
elec_loop=elec_loop_value)
if grid_lcoe < min_code_lcoes[unelec]:
if (grid_lcoe < new_lcoes[unelec]) and (new_grid_capacity + peak_load < grid_capacity_limit):
new_lcoes[unelec] = grid_lcoe
cell_path_real[unelec] = dist
cell_path_adjusted[unelec] = dist_adjusted
new_grid_capacity += peak_load
elecorder[unelec] = elec_loop_value
if unelec not in changes:
changes.append(unelec)
else:
close.append(unelec)
else:
close.append(unelec)
else:
close.append(unelec)
electrified = changes[:]
unelectrified = close
while len(electrified) > 0:
logging.info('Electrification loop {} with {} electrified'.format(loops, len(electrified)))
loops += 1
hash_table = self.get_2d_hash_table(x, y, electrified, max_dist)
elec_nodes2 = []
for elec in electrified:
elec_nodes2.append((x[elec], y[elec]))
elec_nodes2 = np.asarray(elec_nodes2)
changes = []
if len(elec_nodes2) > 0:
for unelec in unelectrified:
grid_lcoe = 99
if year >= grid_reach[unelec]:
consumption = enerperhh[unelec] # kWh/year
average_load = consumption / (1 - grid_calc.distribution_losses) / HOURS_PER_YEAR # kW
peak_load = average_load / grid_calc.base_to_peak_load_ratio # kW
node = (x[unelec], y[unelec])
closest_elec_node = closest_elec(node, elec_nodes2)
dist = haversine(x[electrified[closest_elec_node]], y[electrified[closest_elec_node]], x[unelec], y[unelec])
# dist = sqrt((x[electrified[closest_elec_node]] - x[unelec]) ** 2
# + (y[electrified[closest_elec_node]] - y[unelec]) ** 2)
dist_adjusted = grid_penalty_ratio[unelec] * dist
prev_dist = cell_path_real[closest_elec_node]
if dist + prev_dist < max_dist:
grid_lcoe = grid_calc.get_lcoe(energy_per_cell=enerperhh[unelec],
start_year=year - timestep,
end_year=end_year,
people=pop[unelec],
new_connections=new_connections[unelec],
total_energy_per_cell=total_energy_per_cell[unelec],
prev_code=prev_code[unelec],
num_people_per_hh=nupppphh[unelec],
grid_cell_area=grid_cell_area[unelec],
conf_status=confl[unelec],
travel_hours=travl[unelec],
additional_mv_line_length=dist_adjusted,
elec_loop=elecorder[electrified[closest_elec_node]] + 1)
if grid_lcoe < min_code_lcoes[unelec]:
if (grid_lcoe < new_lcoes[unelec]) and \
(new_grid_capacity + peak_load < grid_capacity_limit):
new_lcoes[unelec] = grid_lcoe
cell_path_real[unelec] = dist + prev_dist
cell_path_adjusted[unelec] = dist_adjusted
elecorder[unelec] = elecorder[electrified[closest_elec_node]] + 1
new_grid_capacity += peak_load
if unelec not in changes:
changes.append(unelec)
elif new_grid_capacity + peak_load < grid_capacity_limit:
electrified_hashed = self.get_unelectrified_rows(hash_table, unelec, x, y, max_dist)
grid_capacity_addition_loop = 0
for elec in electrified_hashed:
grid_lcoe = 99
prev_dist = cell_path_real[elec]
dist = haversine(x[elec], y[elec], x[unelec], y[unelec])
# dist = sqrt((x[elec] - x[unelec]) ** 2 + (y[elec] - y[unelec]) ** 2)
dist_adjusted = grid_penalty_ratio[unelec] * dist
if prev_dist + dist < max_dist:
grid_lcoe = grid_calc.get_lcoe(energy_per_cell=enerperhh[unelec],
start_year=year - timestep,
end_year=end_year,
people=pop[unelec],
new_connections=new_connections[unelec],
total_energy_per_cell=total_energy_per_cell[unelec],
prev_code=prev_code[unelec],
num_people_per_hh=nupppphh[unelec],
grid_cell_area=grid_cell_area[unelec],
conf_status=confl[unelec],
travel_hours=travl[unelec],
additional_mv_line_length=dist_adjusted,
elec_loop=elecorder[elec] + 1)
if grid_lcoe < min_code_lcoes[unelec]:
if grid_lcoe < new_lcoes[unelec]:
new_lcoes[unelec] = grid_lcoe
cell_path_real[unelec] = dist + prev_dist
cell_path_adjusted[unelec] = dist_adjusted
elecorder[unelec] = elecorder[elec] + 1
if grid_capacity_addition_loop == 0:
new_grid_capacity += peak_load
grid_capacity_addition_loop += 1
if unelec not in changes:
changes.append(unelec)
electrified = changes[:]
unelectrified = set(unelectrified).difference(electrified)
return new_lcoes, cell_path_adjusted, elecorder
def run_elec(self, grid_calc, max_dist, year, start_year, end_year, timestep, grid_cap_gen_limit):
"""
Runs the grid extension algorithm
"""
logging.info('Electrification algorithm starts running')
self.df[SET_LCOE_GRID + "{}".format(year)], self.df[SET_MIN_GRID_DIST + "{}".format(year)], self.df[SET_ELEC_ORDER + "{}".format(year)] = self.elec_extension(grid_calc, max_dist, year, start_year, end_year, timestep, grid_cap_gen_limit)
def set_scenario_variables(self, year, num_people_per_hh_rural, num_people_per_hh_urban, time_step, start_year,
urban_elec_ratio, rural_elec_ratio):
"""
Set the basic scenario parameters that differ based on urban/rural
So that they are in the table and can be read directly to calculate LCOEs
"""
# self.df['GridCellArea'] = 1
logging.info('Calculate new connections')
# Calculate new connections for grid related purposes
if year - time_step == start_year:
self.df.loc[(self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(year - time_step)] == 1) & (self.df[SET_URBAN] == 2),
SET_NEW_CONNECTIONS + "{}".format(year)] = \
(self.df[SET_POP + "{}".format(year)] - urban_elec_ratio * self.df[SET_POP + "{}".format(year - time_step)])
self.df.loc[(self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(year - time_step)] == 1) & (self.df[SET_URBAN] < 2),
SET_NEW_CONNECTIONS + "{}".format(year)] = \
(self.df[SET_POP + "{}".format(year)] - rural_elec_ratio * self.df[SET_POP + "{}".format(year - time_step)])
self.df.loc[self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(year - time_step)] == 0,
SET_NEW_CONNECTIONS + "{}".format(year)] = self.df[SET_POP + "{}".format(year)]
self.df.loc[self.df[SET_NEW_CONNECTIONS + "{}".format(year)] < 0,
SET_NEW_CONNECTIONS + "{}".format(year)] = 0
else:
self.df.loc[self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(year - time_step)] == 1,
SET_NEW_CONNECTIONS + "{}".format(year)] = \
(self.df[SET_POP + "{}".format(year)] - self.df[SET_POP + "{}".format(year - time_step)])
self.df.loc[(self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(year - time_step)] == 0) & (self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(start_year)] == 0),
SET_NEW_CONNECTIONS + "{}".format(year)] = self.df[SET_POP + "{}".format(year)]
self.df.loc[(self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(year - time_step)] == 0) & (self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(start_year)] == 1) & (self.df[SET_URBAN] == 2),
SET_NEW_CONNECTIONS + "{}".format(year)] = self.df[SET_POP + "{}".format(year)] - urban_elec_ratio * self.df[SET_POP + "{}".format(start_year)]
self.df.loc[(self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(year - time_step)] == 0) & (self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(start_year)] == 1) & (self.df[SET_URBAN] < 2),
SET_NEW_CONNECTIONS + "{}".format(year)] = self.df[SET_POP + "{}".format(year)] - rural_elec_ratio * self.df[SET_POP + "{}".format(start_year)]
self.df.loc[self.df[SET_NEW_CONNECTIONS + "{}".format(year)] < 0,
SET_NEW_CONNECTIONS + "{}".format(year)] = 0
logging.info('Setting electrification demand as per target per year')
if max(self.df['PerCapitaDemand']) == 0:
wb_tiers_all = {1: 7.738, 2: 43.8, 3: 160.6, 4: 423.4, 5: 598.6}
print("""\nWorld Bank Tiers of Electricity Access
1: {} kWh/person/year
2: {} kWh/person/year
3: {} kWh/person/year
4: {} kWh/person/year
5: {} kWh/person/year
6: Customized kWh/person/year""".format(wb_tiers_all[1], wb_tiers_all[2], wb_tiers_all[3],
wb_tiers_all[4], wb_tiers_all[5]))
if max(self.df[SET_URBAN]) == 2:
wb_tier_urban_centers = int(input('Enter the tier number for urban centers: '))
wb_tier_urban_clusters = int(input('Enter the tier number for urban clusters: '))
wb_tier_rural = int(input('Enter the tier number for rural: '))
else:
wb_tier_urban_clusters = int(input('Enter the tier number for urban: '))
wb_tier_rural = int(input('Enter the tier number for rural: '))
wb_tier_urban_centers = 5
if wb_tier_urban_centers == 6:
wb_tier_urban_centers = 'Custom'
if wb_tier_urban_clusters == 6:
wb_tier_urban_clusters = 'Custom'
if wb_tier_rural == 6:
wb_tier_rural = 'Custom'
self.df['PerCapitaDemand'] = 0
# Define if a settlement is Urban or Rural
self.df.loc[self.df[SET_URBAN] == 0, SET_NUM_PEOPLE_PER_HH] = num_people_per_hh_rural
#self.df.loc[self.df[SET_URBAN] == 1, SET_NUM_PEOPLE_PER_HH] = num_people_per_hh_urban
self.df.loc[self.df[SET_URBAN] == 2, SET_NUM_PEOPLE_PER_HH] = num_people_per_hh_urban
# Define per capita residential demand
# self.df['PerCapitaDemand'] = self.df['ResidentialDemandTier1.' + str(wb_tier_urban)]
self.df.loc[self.df[SET_URBAN] == 0, 'PerCapitaDemand'] = self.df['ResidentialDemandTier' + str(wb_tier_rural)]
#self.df.loc[self.df[SET_URBAN] == 1, 'PerCapitaDemand'] = self.df['ResidentialDemandTier' + str(wb_tier_urban_clusters)]
self.df.loc[self.df[SET_URBAN] == 2, 'PerCapitaDemand'] = self.df['ResidentialDemandTier' + str(wb_tier_urban_centers)]
# if max(self.df[SET_URBAN]) == 2:
# self.df.loc[self.df[SET_URBAN] == 2, 'PerCapitaDemand'] = self.df['ResidentialDemandTier1.' + str(wb_tier_urban_center)]
# Add commercial demand
agri = True if 'y' in input('Include agrcultural demand? <y/n> ') else False
if agri:
self.df['PerCapitaDemand'] += self.df['AgriDemand']
commercial = True if 'y' in input('Include commercial demand? <y/n> ') else False
if commercial:
self.df['PerCapitaDemand'] += self.df['CommercialDemand']
health = True if 'y' in input('Include health demand? <y/n> ') else False
if health:
self.df['PerCapitaDemand'] += self.df['HealthDemand']
edu = True if 'y' in input('Include educational demand? <y/n> ') else False
if edu:
self.df['PerCapitaDemand'] += self.df['EducationDemand']
self.df.loc[self.df[SET_URBAN] == 0, SET_ENERGY_PER_CELL + "{}".format(year)] = \
self.df['PerCapitaDemand'] * self.df[SET_NEW_CONNECTIONS + "{}".format(year)]
self.df.loc[self.df[SET_URBAN] == 1, SET_ENERGY_PER_CELL + "{}".format(year)] = \
self.df['PerCapitaDemand'] * self.df[SET_NEW_CONNECTIONS + "{}".format(year)]
self.df.loc[self.df[SET_URBAN] == 2, SET_ENERGY_PER_CELL + "{}".format(year)] = \
self.df['PerCapitaDemand'] * self.df[SET_NEW_CONNECTIONS + "{}".format(year)]
# if year - time_step == start_year:
self.df.loc[self.df[SET_URBAN] == 0, SET_TOTAL_ENERGY_PER_CELL] = \
self.df['PerCapitaDemand'] * self.df[SET_POP + "{}".format(year)]
self.df.loc[self.df[SET_URBAN] == 1, SET_TOTAL_ENERGY_PER_CELL] = \
self.df['PerCapitaDemand'] * self.df[SET_POP + "{}".format(year)]
self.df.loc[self.df[SET_URBAN] == 2, SET_TOTAL_ENERGY_PER_CELL] = \
self.df['PerCapitaDemand'] * self.df[SET_POP + "{}".format(year)]
# self.df[SET_TOTAL_ENERGY_PER_CELL] = self.df[SET_ENERGY_PER_CELL + "{}".format(year)]
# else:
# self.df[SET_TOTAL_ENERGY_PER_CELL] += self.df[SET_ENERGY_PER_CELL + "{}".format(year)]
def grid_reach_estimate(self, start_year, gridspeed):
""" Estimates the year of grid arrival based on geospatial characteristics
and grid expansion speed in km/year"""
# logging.info('Estimate year of grid reach')
# self.df[SET_GRID_REACH_YEAR] = 0
# self.df.loc[self.df[SET_ELEC_FUTURE_GRID + "{}".format(start_year)] == 0, SET_GRID_REACH_YEAR] = \
# self.df['PlannedHVLineDist'] * self.df[SET_GRID_PENALTY] / gridspeed
self.df[SET_GRID_REACH_YEAR] = \
self.df.apply(lambda row: int(start_year +
row['PlannedHVLineDist'] * row[SET_COMBINED_CLASSIFICATION] / gridspeed)
if row[SET_ELEC_FUTURE_GRID + "{}".format(start_year)] == 0
else start_year,
axis=1)
def calculate_off_grid_lcoes(self, mg_hydro_calc, mg_wind_calc, mg_pv_calc,
sa_pv_calc, mg_diesel_calc, sa_diesel_calc, year, start_year, end_year, timestep):
"""
Calcuate the LCOEs for all off-grid technologies, and calculate the minimum, so that the electrification
algorithm knows where the bar is before it becomes economical to electrify
"""
# A df with all hydropower sites, to ensure that they aren't assigned more capacity than is available
hydro_used = 'HydropowerUsed' # the amount of the hydro potential that has been assigned
hydro_df = self.df[[SET_HYDRO_FID, SET_HYDRO]].drop_duplicates(subset=SET_HYDRO_FID)
hydro_df[hydro_used] = 0
hydro_df = hydro_df.set_index(SET_HYDRO_FID)
max_hydro_dist = 5 # the max distance in km to consider hydropower viable
def hydro_lcoe(row):
if row[SET_HYDRO_DIST] < max_hydro_dist:
# calculate the capacity that would be added by the settlement
additional_capacity = ((row[SET_NEW_CONNECTIONS + "{}".format(year)] *
row[SET_ENERGY_PER_CELL + "{}".format(year)]) /
(HOURS_PER_YEAR * mg_hydro_calc.capacity_factor *
mg_hydro_calc.base_to_peak_load_ratio))
# and add it to the tracking df
hydro_df.loc[row[SET_HYDRO_FID], hydro_used] += additional_capacity
# if it exceeds the available capacity, it's not an option
if hydro_df.loc[row[SET_HYDRO_FID], hydro_used] > hydro_df.loc[row[SET_HYDRO_FID], SET_HYDRO]:
return 99
else:
return mg_hydro_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year-timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
mv_line_length=row[SET_HYDRO_DIST])
else:
return 99
logging.info('Calculate minigrid hydro LCOE')
self.df[SET_LCOE_MG_HYDRO + "{}".format(year)] = self.df.apply(hydro_lcoe, axis=1)
num_hydro_limited = hydro_df.loc[hydro_df[hydro_used] > hydro_df[SET_HYDRO]][SET_HYDRO].count()
logging.info('{} potential hydropower sites were utilised to maximum capacity'.format(num_hydro_limited))
logging.info('Calculate minigrid PV LCOE')
self.df[SET_LCOE_MG_PV + "{}".format(year)] = self.df.apply(
lambda row: mg_pv_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
capacity_factor=row[SET_GHI] / HOURS_PER_YEAR)
if row[SET_GHI] > 1000
else 99, axis=1)
logging.info('Calculate minigrid wind LCOE')
self.df[SET_LCOE_MG_WIND + "{}".format(year)] = self.df.apply(
lambda row: mg_wind_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
capacity_factor=row[SET_WINDCF])
if row[SET_WINDCF] > 0.1 else 99,
axis=1)
logging.info('Calculate minigrid diesel LCOE')
self.df[SET_LCOE_MG_DIESEL + "{}".format(year)] = self.df.apply(
lambda row: mg_diesel_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
travel_hours=row[SET_TRAVEL_HOURS]), axis=1)
logging.info('Calculate standalone diesel LCOE')
self.df[SET_LCOE_SA_DIESEL + "{}".format(year)] = self.df.apply(
lambda row: sa_diesel_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
travel_hours=row[SET_TRAVEL_HOURS]), axis=1)
logging.info('Calculate standalone PV LCOE')
self.df[SET_LCOE_SA_PV + "{}".format(year)] = self.df.apply(
lambda row: sa_pv_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
capacity_factor=row[SET_GHI] / HOURS_PER_YEAR) if row[SET_GHI] > 1000
else 99,
axis=1)
logging.info('Determine minimum technology (off-grid)')
self.df[SET_MIN_OFFGRID + "{}".format(year)] = self.df[[SET_LCOE_SA_DIESEL + "{}".format(year),
SET_LCOE_SA_PV + "{}".format(year),
SET_LCOE_MG_WIND + "{}".format(year),
SET_LCOE_MG_DIESEL + "{}".format(year),
SET_LCOE_MG_PV + "{}".format(year),
SET_LCOE_MG_HYDRO + "{}".format(year)]].T.idxmin()
logging.info('Determine minimum tech LCOE')
self.df[SET_MIN_OFFGRID_LCOE + "{}".format(year)] = \
self.df.apply(lambda row: (row[row[SET_MIN_OFFGRID + "{}".format(year)]]), axis=1)
codes = {SET_LCOE_MG_HYDRO + "{}".format(year): 7,
SET_LCOE_MG_WIND + "{}".format(year): 6,
SET_LCOE_MG_PV + "{}".format(year): 5,
SET_LCOE_MG_DIESEL + "{}".format(year): 4,
SET_LCOE_SA_DIESEL + "{}".format(year): 2,
SET_LCOE_SA_PV + "{}".format(year): 3}
self.df.loc[self.df[SET_MIN_OFFGRID + "{}".format(year)] == SET_LCOE_MG_HYDRO + "{}".format(
year), SET_MIN_OFFGRID_CODE + "{}".format(year)] = codes[SET_LCOE_MG_HYDRO + "{}".format(year)]
self.df.loc[self.df[SET_MIN_OFFGRID + "{}".format(year)] == SET_LCOE_SA_PV + "{}".format(
year), SET_MIN_OFFGRID_CODE + "{}".format(year)] = codes[SET_LCOE_SA_PV + "{}".format(year)]
self.df.loc[self.df[SET_MIN_OFFGRID + "{}".format(year)] == SET_LCOE_MG_WIND + "{}".format(
year), SET_MIN_OFFGRID_CODE + "{}".format(year)] = codes[SET_LCOE_MG_WIND + "{}".format(year)]
self.df.loc[self.df[SET_MIN_OFFGRID + "{}".format(year)] == SET_LCOE_MG_PV + "{}".format(
year), SET_MIN_OFFGRID_CODE + "{}".format(year)] = codes[SET_LCOE_MG_PV + "{}".format(year)]
self.df.loc[self.df[SET_MIN_OFFGRID + "{}".format(year)] == SET_LCOE_MG_DIESEL + "{}".format(
year), SET_MIN_OFFGRID_CODE + "{}".format(year)] = codes[SET_LCOE_MG_DIESEL + "{}".format(year)]
self.df.loc[self.df[SET_MIN_OFFGRID + "{}".format(year)] == SET_LCOE_SA_DIESEL + "{}".format(
year), SET_MIN_OFFGRID_CODE + "{}".format(year)] = codes[SET_LCOE_SA_DIESEL + "{}".format(year)]
def results_columns(self, mg_hydro_calc, mg_wind_calc, mg_pv_calc,
sa_pv_calc, mg_diesel_calc, sa_diesel_calc, grid_calc, year):
"""
Once the grid extension algorithm has been run, determine the minimum overall option, and calculate the
capacity and investment requirements for each settlement
"""
logging.info('Determine minimum overall')
self.df[SET_MIN_OVERALL + "{}".format(year)] = self.df[[SET_LCOE_GRID + "{}".format(year),
SET_LCOE_SA_DIESEL + "{}".format(year),
SET_LCOE_SA_PV + "{}".format(year),
SET_LCOE_MG_WIND + "{}".format(year),
SET_LCOE_MG_DIESEL + "{}".format(year),
SET_LCOE_MG_PV + "{}".format(year),
SET_LCOE_MG_HYDRO + "{}".format(year)]].T.idxmin()
logging.info('Determine minimum overall LCOE')
self.df[SET_MIN_OVERALL_LCOE + "{}".format(year)] = \
self.df.apply(lambda row: (row[row[SET_MIN_OVERALL + "{}".format(year)]]), axis=1)
logging.info('Add technology codes')
codes = {SET_LCOE_GRID + "{}".format(year): 1,
SET_LCOE_MG_HYDRO + "{}".format(year): 7,
SET_LCOE_MG_WIND + "{}".format(year): 6,
SET_LCOE_MG_PV + "{}".format(year): 5,
SET_LCOE_MG_DIESEL + "{}".format(year): 4,
SET_LCOE_SA_DIESEL + "{}".format(year): 2,
SET_LCOE_SA_PV + "{}".format(year): 3}
self.df.loc[self.df[SET_MIN_OVERALL + "{}".format(year)] == SET_LCOE_GRID + "{}".format(year),
SET_MIN_OVERALL_CODE + "{}".format(year)] = codes[SET_LCOE_GRID + "{}".format(year)]
self.df.loc[self.df[SET_MIN_OVERALL + "{}".format(year)] == SET_LCOE_MG_HYDRO + "{}".format(year),
SET_MIN_OVERALL_CODE + "{}".format(year)] = codes[SET_LCOE_MG_HYDRO + "{}".format(year)]
self.df.loc[self.df[SET_MIN_OVERALL + "{}".format(year)] == SET_LCOE_SA_PV + "{}".format(year),
SET_MIN_OVERALL_CODE + "{}".format(year)] = codes[SET_LCOE_SA_PV + "{}".format(year)]
self.df.loc[self.df[SET_MIN_OVERALL + "{}".format(year)] == SET_LCOE_MG_WIND + "{}".format(year),
SET_MIN_OVERALL_CODE + "{}".format(year)] = codes[SET_LCOE_MG_WIND + "{}".format(year)]
self.df.loc[self.df[SET_MIN_OVERALL + "{}".format(year)] == SET_LCOE_MG_PV + "{}".format(year),
SET_MIN_OVERALL_CODE + "{}".format(year)] = codes[SET_LCOE_MG_PV + "{}".format(year)]
self.df.loc[self.df[SET_MIN_OVERALL + "{}".format(year)] == SET_LCOE_MG_DIESEL + "{}".format(year),
SET_MIN_OVERALL_CODE + "{}".format(year)] = codes[SET_LCOE_MG_DIESEL + "{}".format(year)]
self.df.loc[self.df[SET_MIN_OVERALL + "{}".format(year)] == SET_LCOE_SA_DIESEL + "{}".format(year),
SET_MIN_OVERALL_CODE + "{}".format(year)] = codes[SET_LCOE_SA_DIESEL + "{}".format(year)]
def calculate_investments(self, mg_hydro_calc, mg_wind_calc, mg_pv_calc, sa_pv_calc, mg_diesel_calc,
sa_diesel_calc, grid_calc, year, end_year, timestep):
def res_investment_cost(row):
min_code = row[SET_MIN_OVERALL_CODE + "{}".format(year)]
if min_code == 2:
return sa_diesel_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
travel_hours=row[SET_TRAVEL_HOURS],
conf_status=row[SET_CONFLICT],
get_investment_cost=True)
elif min_code == 3:
return sa_pv_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
capacity_factor=row[SET_GHI] / HOURS_PER_YEAR,
conf_status=row[SET_CONFLICT],
get_investment_cost=True)
elif min_code == 6:
return mg_wind_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
capacity_factor=row[SET_WINDCF],
conf_status=row[SET_CONFLICT],
get_investment_cost=True)
elif min_code == 4:
return mg_diesel_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
travel_hours=row[SET_TRAVEL_HOURS],
conf_status=row[SET_CONFLICT],
get_investment_cost=True)
elif min_code == 5:
return mg_pv_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
capacity_factor=row[SET_GHI] / HOURS_PER_YEAR,
conf_status=row[SET_CONFLICT],
get_investment_cost=True)
elif min_code == 7:
return mg_hydro_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
mv_line_length=row[SET_HYDRO_DIST],
get_investment_cost=True)
elif min_code == 1:
return grid_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
additional_mv_line_length=row[SET_MIN_GRID_DIST + "{}".format(year)],
elec_loop=row[SET_ELEC_ORDER + "{}".format(year)],
get_investment_cost=True)
else:
return 0
logging.info('Calculate investment cost')
self.df[SET_INVESTMENT_COST + "{}".format(year)] = self.df.apply(res_investment_cost, axis=1)
def apply_limitations(self, eleclimit, year, timestep):
logging.info('Determine electrification limits')
if eleclimit == 1:
self.df[SET_LIMIT + "{}".format(year)] = 1
elecrate = 1
else:
choice = 2
if choice == 1: # Lowest investment/capita
elecrate = 0
min_investment = 0
self.df['InvestmentCapita'+ "{}".format(year)] = self.df[SET_INVESTMENT_COST + "{}".format(year)] / self.df[SET_NEW_CONNECTIONS + "{}".format(year)]
while elecrate < eleclimit:
elecrate = sum(self.df[self.df['InvestmentCapita' + "{}".format(year)] < min_investment][SET_POP + "{}".format(year)]) / self.df[SET_POP + "{}".format(year)].sum()
if elecrate < 0.999 * eleclimit:
min_investment += 1
else:
break
self.df.loc[self.df['InvestmentCapita' + "{}".format(year)] <= min_investment, SET_LIMIT + "{}".format(year)] = 1
self.df.loc[self.df['InvestmentCapita' + "{}".format(year)] > min_investment, SET_LIMIT + "{}".format(year)] = 0
elif choice == 2: # Prioritize grid intensification then lowest investment/capita
elecrate = 0
min_investment = 0
self.df['InvestmentCapita' + "{}".format(year)] = self.df[SET_INVESTMENT_COST + "{}".format(year)] / self.df[SET_NEW_CONNECTIONS + "{}".format(year)]
if sum(self.df[self.df[SET_ELEC_FUTURE_GRID + "{}".format(year-timestep)] == 1][SET_POP + "{}".format(year)]) / self.df[SET_POP + "{}".format(year)].sum() < eleclimit:
eleclimit -= sum(self.df[self.df[SET_ELEC_FUTURE_GRID + "{}".format(year-timestep)] == 1][SET_POP + "{}".format(year)]) / self.df[SET_POP + "{}".format(year)].sum()
while elecrate < 0.999 * eleclimit:
elecrate = sum(self.df[(self.df['InvestmentCapita' + "{}".format(year)] < min_investment) & (self.df[SET_ELEC_FUTURE_GRID + "{}".format(year-timestep)] == 0)][SET_POP + "{}".format(year)]) / self.df[SET_POP + "{}".format(year)].sum()
if elecrate < 0.999 * eleclimit:
min_investment += 1
else:
elecrate += sum(self.df[self.df[SET_ELEC_FUTURE_GRID + "{}".format(year-timestep)] == 1][SET_POP + "{}".format(year)]) / self.df[SET_POP + "{}".format(year)].sum()
break
self.df.loc[(self.df[SET_ELEC_FUTURE_GRID + "{}".format(year - timestep)] == 1), SET_LIMIT + "{}".format(year)] = 1
self.df.loc[(self.df['InvestmentCapita' + "{}".format(year)] <= min_investment) & (self.df[SET_ELEC_FUTURE_GRID + "{}".format(year-timestep)] == 0), SET_LIMIT + "{}".format(year)] = 1
self.df.loc[(self.df['InvestmentCapita' + "{}".format(year)] > min_investment) & (self.df[SET_ELEC_FUTURE_GRID + "{}".format(year-timestep)] == 0), SET_LIMIT + "{}".format(year)] = 0
else:
while elecrate < eleclimit:
elecrate = sum(self.df[(self.df['InvestmentCapita' + "{}".format(year)] < min_investment) & (self.df[SET_ELEC_FUTURE_GRID + "{}".format(year-timestep)] == 1)][SET_POP + "{}".format(year)]) / self.df[SET_POP + "{}".format(year)].sum()
if elecrate < 0.999 * eleclimit:
min_investment += 1
else:
break
self.df.loc[(self.df[SET_ELEC_FUTURE_GRID + "{}".format(year - timestep)] == 1) & (self.df['InvestmentCapita' + "{}".format(year)] <= min_investment), SET_LIMIT + "{}".format(year)] = 1
self.df.loc[(self.df[SET_ELEC_FUTURE_GRID + "{}".format(year - timestep)] == 1) & (self.df['InvestmentCapita' + "{}".format(year)] > min_investment), SET_LIMIT + "{}".format(year)] = 0
self.df.loc[(self.df[SET_ELEC_FUTURE_GRID + "{}".format(year - timestep)] == 0), SET_LIMIT + "{}".format(year)] = 0
elif choice == 3: # Prioritize lowest LCOE (Not tested)
elecrate = 1
min_lcoe = 0
while elecrate >= eleclimit:
elecrate = sum(self.df[self.df[SET_MIN_OVERALL_LCOE + "{}".format(year)] < min_lcoe][SET_POP + "{}".format(year)]) / self.df[SET_POP + "{}".format(year)].sum()
if elecrate > 1.001 * eleclimit:
min_lcoe += 0.001
else:
break
self.df.loc[self.df[SET_MIN_OVERALL_LCOE + "{}".format(year)] <= min_lcoe, SET_LIMIT + "{}".format(year)] = 1
self.df.loc[self.df[SET_MIN_OVERALL_LCOE + "{}".format(year)] > min_lcoe, SET_LIMIT + "{}".format(year)] = 0
elif choice == 4: # Old method
self.df[SET_LIMIT + "{}".format(year)] = \
self.df.apply(lambda row: 1 if row[SET_ELEC_FUTURE_ACTUAL + "{}".format(year)] == 1 else 0, axis=1)
conflictlimit = self.df[SET_CONFLICT].min()
mintraveldistance = self.df[SET_TRAVEL_HOURS].min()
# max_loop = self.df[SET_ELEC_ORDER + "{}".format(year)].max()
max_loop = 0
iteration = 0
elecrate = sum(self.df[self.df[SET_LIMIT + "{}".format(year)] == 1][SET_POP + "{}".format(year)]) / \
self.df[SET_POP + "{}".format(year)].sum()
if elecrate < eleclimit:
still_looking = True
else:
still_looking = False
print("The investment cap does not allow further electrification expansion in year:{}".format(year))
elec_loop = 99
while still_looking:
if elec_loop <= max_loop:
self.df.loc[(self.df[SET_LCOE_GRID + "{}".format(year)] < 99) &
(self.df[SET_ELEC_ORDER + "{}".format(year)] == elec_loop) &
(self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(year)] != 1) &
(self.df[SET_TRAVEL_HOURS] < mintraveldistance) &
(self.df[SET_CONFLICT] <= conflictlimit), SET_LIMIT + "{}".format(year)] = 1
else:
self.df.loc[(self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(year)] != 1) &
(self.df[SET_TRAVEL_HOURS] < mintraveldistance) &
(self.df[SET_CONFLICT] <= conflictlimit), SET_LIMIT + "{}".format(year)] = 1
elecrate = sum(self.df[self.df[SET_LIMIT + "{}".format(year)] == 1][SET_POP + "{}".format(year)]) / \
self.df[SET_POP + "{}".format(year)].sum()
iteration += 1
if elecrate < 0.9999 * eleclimit:
mintraveldistance += 0.05
if iteration > 100:
mintraveldistance += 0.05
if iteration > 200:
mintraveldistance += 0.95
if iteration > 300:
iteration = 0
conflictlimit += 1
mintraveldistance = self.df[SET_TRAVEL_HOURS].min()
if conflictlimit > 0:
elec_loop += 1
else:
still_looking = False
print("The electrification rate achieved is {}".format(elecrate))
### Fast method attempt
# self.df['InvestmentCapita'] = self.df[SET_INVESTMENT_COST + "{}".format(year)] / self.df[SET_POP+"{}".format(year)]
# sorted_investment = self.df[SET_MIN_OVERALL_LCOE + "{}".format(year)].copy()
# sorted_investment.sort_values(inplace=True)
# investment_pop_break = eleclimit * self.df[SET_POP+"{}".format(year)].sum()
# cumulative_pop = 0
# ii = 0
# while cumulative_pop < investment_pop_break:
# cumulative_pop += sorted_investment.iloc[ii]
# ii += 1
# investment_cutoff = sorted_investment.iloc[ii - 1]
#
# self.df.loc[self.df[SET_MIN_OVERALL_LCOE + "{}".format(year)] <= investment_cutoff, SET_LIMIT + "{}".format(year)] = 1
# self.df.loc[self.df[SET_MIN_OVERALL_LCOE + "{}".format(year)] > investment_cutoff, SET_LIMIT + "{}".format(year)] = 0
#
# elecrate = sum(self.df[self.df[SET_LIMIT + "{}".format(year)] == 1][SET_POP + "{}".format(year)]) / \
# self.df[SET_POP + "{}".format(year)].sum()
# print("The electrification rate achieved is {}".format(elecrate))
###
def final_decision(self, mg_hydro_calc, mg_wind_calc, mg_pv_calc, sa_pv_calc, mg_diesel_calc,
sa_diesel_calc, grid_calc, year, end_year, timestep):
"""" ... """
logging.info('Determine final electrification decision')
self.df[SET_ELEC_FINAL_GRID + "{}".format(year)] = \
self.df.apply(lambda row: 1
if (row[SET_ELEC_FUTURE_GRID + "{}".format(year)] == 1) or
(row[SET_LIMIT + "{}".format(year)] == 1 and
row[SET_MIN_OVERALL_CODE + "{}".format(year)] == 1 and
row[SET_GRID_REACH_YEAR] <= year)
else 0, axis=1)
self.df[SET_ELEC_FINAL_OFFGRID + "{}".format(year)] = \
self.df.apply(lambda row: 1
if (row[SET_ELEC_FUTURE_OFFGRID + "{}".format(year)] == 1 and
row[SET_MIN_OVERALL_CODE + "{}".format(year)] != 1) or
(row[SET_LIMIT + "{}".format(year)] == 1 and
row[SET_ELEC_FINAL_GRID + "{}".format(year)] == 0) or
(row[SET_LIMIT + "{}".format(year)] == 1 and
row[SET_MIN_OVERALL_CODE + "{}".format(
year)] == 1 and
row[SET_GRID_REACH_YEAR] > year)
else 0, axis=1)
self.df.loc[(self.df[SET_LIMIT + "{}".format(year)] == 1) &
(self.df[SET_ELEC_FINAL_GRID + "{}".format(year)] == 1),
SET_ELEC_FINAL_CODE + "{}".format(year)] = 1
self.df.loc[(self.df[SET_LIMIT + "{}".format(year)] == 1) &
(self.df[SET_ELEC_FINAL_OFFGRID + "{}".format(year)] == 1),
SET_ELEC_FINAL_CODE + "{}".format(year)] = self.df[SET_MIN_OFFGRID_CODE + "{}".format(year)]
self.df.loc[(self.df[SET_LIMIT + "{}".format(year)] == 0) &
(self.df[SET_ELEC_FINAL_GRID + "{}".format(year)] == 0) &
(self.df[SET_ELEC_FINAL_OFFGRID + "{}".format(year)] == 0),
SET_ELEC_FINAL_CODE + "{}".format(year)] = 99
logging.info('Calculate new capacity')
self.df.loc[self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 1, SET_NEW_CAPACITY + "{}".format(year)] = (
(self.df[SET_ENERGY_PER_CELL + "{}".format(year)]) /
(HOURS_PER_YEAR * grid_calc.capacity_factor * grid_calc.base_to_peak_load_ratio *
(1 - grid_calc.distribution_losses)))
self.df.loc[self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 7, SET_NEW_CAPACITY + "{}".format(year)] = (
(self.df[SET_ENERGY_PER_CELL + "{}".format(year)]) /
(HOURS_PER_YEAR * mg_hydro_calc.capacity_factor * mg_hydro_calc.base_to_peak_load_ratio *
(1 - mg_hydro_calc.distribution_losses)))
self.df.loc[self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 5, SET_NEW_CAPACITY + "{}".format(year)] = (
(self.df[SET_ENERGY_PER_CELL + "{}".format(year)]) /
(HOURS_PER_YEAR * (self.df[SET_GHI] / HOURS_PER_YEAR) * mg_pv_calc.base_to_peak_load_ratio *
(1 - mg_pv_calc.distribution_losses)))
self.df.loc[self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 6, SET_NEW_CAPACITY + "{}".format(year)] = (
(self.df[SET_ENERGY_PER_CELL + "{}".format(year)]) /
(HOURS_PER_YEAR * self.df[SET_WINDCF] * mg_wind_calc.base_to_peak_load_ratio *
(1 - mg_wind_calc.distribution_losses)))
self.df.loc[self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 4, SET_NEW_CAPACITY + "{}".format(year)] = (
(self.df[SET_ENERGY_PER_CELL + "{}".format(year)]) /
(HOURS_PER_YEAR * mg_diesel_calc.capacity_factor * mg_diesel_calc.base_to_peak_load_ratio *
(1 - mg_diesel_calc.distribution_losses)))
self.df.loc[self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 2, SET_NEW_CAPACITY + "{}".format(year)] = (
(self.df[SET_ENERGY_PER_CELL + "{}".format(year)]) /
(HOURS_PER_YEAR * sa_diesel_calc.capacity_factor * sa_diesel_calc.base_to_peak_load_ratio *
(1 - sa_diesel_calc.distribution_losses)))
self.df.loc[self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 3, SET_NEW_CAPACITY + "{}".format(year)] = (
(self.df[SET_ENERGY_PER_CELL + "{}".format(year)]) /
(HOURS_PER_YEAR * (self.df[SET_GHI] / HOURS_PER_YEAR) * sa_pv_calc.base_to_peak_load_ratio *
(1 - sa_pv_calc.distribution_losses)))
self.df.loc[self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 99, SET_NEW_CAPACITY + "{}".format(year)] = 0
def res_investment_cost(row):
min_code = row[SET_ELEC_FINAL_CODE + "{}".format(year)]
if min_code == 2:
return sa_diesel_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
travel_hours=row[SET_TRAVEL_HOURS],
conf_status=row[SET_CONFLICT],
get_investment_cost=True)
elif min_code == 3:
return sa_pv_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
capacity_factor=row[SET_GHI] / HOURS_PER_YEAR,
conf_status=row[SET_CONFLICT],
get_investment_cost=True)
elif min_code == 6:
return mg_wind_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
capacity_factor=row[SET_WINDCF],
conf_status=row[SET_CONFLICT],
get_investment_cost=True)
elif min_code == 4:
return mg_diesel_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
travel_hours=row[SET_TRAVEL_HOURS],
conf_status=row[SET_CONFLICT],
get_investment_cost=True)
elif min_code == 5:
return mg_pv_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
capacity_factor=row[SET_GHI] / HOURS_PER_YEAR,
conf_status=row[SET_CONFLICT],
get_investment_cost=True)
elif min_code == 7:
return mg_hydro_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
mv_line_length=row[SET_HYDRO_DIST],
get_investment_cost=True)
elif min_code == 1:
return grid_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
additional_mv_line_length=row[SET_MIN_GRID_DIST + "{}".format(year)],
elec_loop=row[SET_ELEC_ORDER + "{}".format(year)],
get_investment_cost=True)
else:
return 0
logging.info('Calculate investment cost')
self.df[SET_INVESTMENT_COST + "{}".format(year)] = self.df.apply(res_investment_cost, axis=1)
def res_investment_cost_lv(row):
min_code = row[SET_ELEC_FINAL_CODE + "{}".format(year)]
if min_code == 1:
return grid_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
additional_mv_line_length=row[SET_MIN_GRID_DIST + "{}".format(year)],
elec_loop=row[SET_ELEC_ORDER + "{}".format(year)],
get_investment_cost_lv=True)
else:
return 0
logging.info('Calculate LV investment cost')
self.df['InvestmentCostLV' + "{}".format(year)] = self.df.apply(res_investment_cost_lv, axis=1)
def res_investment_cost_mv(row):
min_code = row[SET_ELEC_FINAL_CODE + "{}".format(year)]
if min_code == 1:
return grid_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
additional_mv_line_length=row[SET_MIN_GRID_DIST + "{}".format(year)],
elec_loop=row[SET_ELEC_ORDER + "{}".format(year)],
get_investment_cost_mv=True)
else:
return 0
logging.info('Calculate MV investment cost')
self.df['InvestmentCostMV' + "{}".format(year)] = self.df.apply(res_investment_cost_mv, axis=1)
def res_investment_cost_hv(row):
min_code = row[SET_ELEC_FINAL_CODE + "{}".format(year)]
if min_code == 1:
return grid_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
additional_mv_line_length=row[SET_MIN_GRID_DIST + "{}".format(year)],
elec_loop=row[SET_ELEC_ORDER + "{}".format(year)],
get_investment_cost_hv=True)
else:
return 0
logging.info('Calculate HV investment cost')
self.df['InvestmentCostHV' + "{}".format(year)] = self.df.apply(res_investment_cost_hv, axis=1)
def res_investment_cost_transformer(row):
min_code = row[SET_ELEC_FINAL_CODE + "{}".format(year)]
if min_code == 1:
return grid_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
additional_mv_line_length=row[SET_MIN_GRID_DIST + "{}".format(year)],
elec_loop=row[SET_ELEC_ORDER + "{}".format(year)],
get_investment_cost_transformer=True)
else:
return 0
logging.info('Calculate transformer investment cost')
self.df['InvestmentCostTransformer' + "{}".format(year)] = self.df.apply(res_investment_cost_transformer, axis=1)
def res_investment_cost_connection(row):
min_code = row[SET_ELEC_FINAL_CODE + "{}".format(year)]
if min_code == 1:
return grid_calc.get_lcoe(energy_per_cell=row[SET_ENERGY_PER_CELL + "{}".format(year)],
start_year=year - timestep,
end_year=end_year,
people=row[SET_POP + "{}".format(year)],
new_connections=row[SET_NEW_CONNECTIONS + "{}".format(year)],
total_energy_per_cell=row[SET_TOTAL_ENERGY_PER_CELL],
prev_code=row[SET_ELEC_FINAL_CODE + "{}".format(year - timestep)],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
grid_cell_area=row['GridCellArea'],
conf_status=row[SET_CONFLICT],
additional_mv_line_length=row[SET_MIN_GRID_DIST + "{}".format(year)],
elec_loop=row[SET_ELEC_ORDER + "{}".format(year)],
get_investment_cost_connection=True)
else:
return 0
logging.info('Calculate connection investment cost')
self.df['InvestmentCostConnection' + "{}".format(year)] = self.df.apply(res_investment_cost_connection, axis=1)
def infrastructure_cost(row):
if row[SET_NEW_CONNECTIONS + "{}".format(year)] > 0 and row[SET_ELEC_FINAL_CODE + "{}".format(year)] == 1:
return (row['InvestmentCostLV' + "{}".format(year)]
+ row['InvestmentCostMV' + "{}".format(year)] + row['InvestmentCostHV' + "{}".format(year)]
+ row['InvestmentCostTransformer' + "{}".format(year)] +
row['InvestmentCostConnection' + "{}".format(year)])/(row[SET_NEW_CONNECTIONS + "{}".format(year)] / row[SET_NUM_PEOPLE_PER_HH])
# return (row[SET_INVESTMENT_COST + "{}".format(year)] + row['InvestmentCostLV' + "{}".format(year)]
# + row['InvestmentCostMV' + "{}".format(year)] + row['InvestmentCostHV' + "{}".format(year)]
# + row['InvestmentCostTransformer' + "{}".format(year)] +
# row['InvestmentCostConnection' + "{}".format(year)]) / row[
# SET_NEW_CONNECTIONS + "{}".format(year)]
else:
return 0
logging.info('Calculating average infrastructure cost for grid connection')
self.df['InfrastructureCapitaCost' + "{}".format(year)] = self.df.apply(infrastructure_cost, axis=1)
# Update the actual electrification column with results
self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(year)] = self.df[SET_LIMIT + "{}".format(year)]
def delete_redundant_columns(self, year):
self.df['ResultsNoTimestep'] = self.df[SET_ELEC_FINAL_CODE + "{}".format(year)]
del self.df[SET_ELEC_FINAL_CODE + "{}".format(year)]
del self.df[SET_LCOE_MG_HYDRO + "{}".format(year)]
del self.df[SET_LCOE_MG_PV + "{}".format(year)]
del self.df[SET_LCOE_MG_WIND + "{}".format(year)]
del self.df[SET_LCOE_MG_DIESEL + "{}".format(year)]
del self.df[SET_LCOE_SA_DIESEL + "{}".format(year)]
del self.df[SET_LCOE_SA_PV + "{}".format(year)]
del self.df[SET_MIN_OFFGRID + "{}".format(year)]
del self.df[SET_MIN_OFFGRID_LCOE + "{}".format(year)]
del self.df[SET_MIN_OFFGRID_CODE + "{}".format(year)]
del self.df[SET_ELEC_FUTURE_GRID + "{}".format(year)]
del self.df[SET_ELEC_FUTURE_OFFGRID + "{}".format(year)]
del self.df[SET_ELEC_FUTURE_ACTUAL + "{}".format(year)]
del self.df[SET_LCOE_GRID + "{}".format(year)]
del self.df[SET_MIN_GRID_DIST + "{}".format(year)]
del self.df[SET_ELEC_ORDER + "{}".format(year)]
del self.df[SET_MIN_OVERALL + "{}".format(year)]
del self.df[SET_MIN_OVERALL_LCOE + "{}".format(year)]
del self.df[SET_MIN_OVERALL_CODE + "{}".format(year)]
del self.df[SET_LIMIT + "{}".format(year)]
del self.df[SET_ELEC_FINAL_GRID + "{}".format(year)]
del self.df[SET_ELEC_FINAL_OFFGRID + "{}".format(year)]
del self.df[SET_NEW_CAPACITY + "{}".format(year)]
del self.df[SET_INVESTMENT_COST + "{}".format(year)]
del self.df[SET_NEW_CONNECTIONS + "{}".format(year)]
del self.df[SET_ENERGY_PER_CELL + "{}".format(year)]
def calc_summaries(self, df_summary, sumtechs, year):
"""The next section calculates the summaries for technology split,
consumption added and total investment cost"""
logging.info('Calculate summaries')
# Population Summaries
df_summary[year][sumtechs[0]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 1) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_POP + "{}".format(year)])
df_summary[year][sumtechs[1]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 2) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_POP + "{}".format(year)])
df_summary[year][sumtechs[2]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 3) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_POP + "{}".format(year)])
df_summary[year][sumtechs[3]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 4) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_POP + "{}".format(year)])
df_summary[year][sumtechs[4]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 5) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_POP + "{}".format(year)])
df_summary[year][sumtechs[5]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 6) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_POP + "{}".format(year)])
df_summary[year][sumtechs[6]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 7) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_POP + "{}".format(year)])
# New_Connection Summaries
df_summary[year][sumtechs[7]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 1) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_NEW_CONNECTIONS + "{}".format(year)])
df_summary[year][sumtechs[8]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 2) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_NEW_CONNECTIONS + "{}".format(year)])
df_summary[year][sumtechs[9]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 3) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_NEW_CONNECTIONS + "{}".format(year)])
df_summary[year][sumtechs[10]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 4) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_NEW_CONNECTIONS + "{}".format(year)])
df_summary[year][sumtechs[11]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 5) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_NEW_CONNECTIONS + "{}".format(year)])
df_summary[year][sumtechs[12]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 6) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_NEW_CONNECTIONS + "{}".format(year)])
df_summary[year][sumtechs[13]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 7) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_NEW_CONNECTIONS + "{}".format(year)])
# Capacity Summaries
df_summary[year][sumtechs[14]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 1) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_NEW_CAPACITY + "{}".format(year)])
df_summary[year][sumtechs[15]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 2) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_NEW_CAPACITY + "{}".format(year)])
df_summary[year][sumtechs[16]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 3) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_NEW_CAPACITY + "{}".format(year)])
df_summary[year][sumtechs[17]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 4) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_NEW_CAPACITY + "{}".format(year)])
df_summary[year][sumtechs[18]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 5) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_NEW_CAPACITY + "{}".format(year)])
df_summary[year][sumtechs[19]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 6) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_NEW_CAPACITY + "{}".format(year)])
df_summary[year][sumtechs[20]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 7) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_NEW_CAPACITY + "{}".format(year)])
# Investment Summaries
df_summary[year][sumtechs[21]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 1) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_INVESTMENT_COST + "{}".format(year)])
df_summary[year][sumtechs[22]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 2) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_INVESTMENT_COST + "{}".format(year)])
df_summary[year][sumtechs[23]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 3) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_INVESTMENT_COST + "{}".format(year)])
df_summary[year][sumtechs[24]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 4) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_INVESTMENT_COST + "{}".format(year)])
df_summary[year][sumtechs[25]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 5) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_INVESTMENT_COST + "{}".format(year)])
df_summary[year][sumtechs[26]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 6) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_INVESTMENT_COST + "{}".format(year)])
df_summary[year][sumtechs[27]] = sum(self.df.loc[(self.df[SET_ELEC_FINAL_CODE + "{}".format(year)] == 7) &
(self.df[SET_LIMIT + "{}".format(year)] == 1)]
[SET_INVESTMENT_COST + "{}".format(year)])
|
#!/usr/bin/env python3
import sys; assert sys.version_info[0] >= 3, "Python 3 required."
import os
from binascii import unhexlify, hexlify
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from .utils import bebs2ip, i2bebsp, beos2ip, bebs2osp, cldiv
# <NAME>
# NIST Special Publication 800-38G
# Recommendation for Block Cipher Modes of Operation: Methods for Format-Preserving Encryption
# <http://dx.doi.org/10.6028/NIST.SP.800-38G>
# specialized to the parameters below and a single-block PRF; unoptimized
radix = 2
minlen = maxlen = 88
maxTlen = 255
assert 2 <= radix and radix < 256
assert radix**minlen >= 100
assert 2 <= minlen and minlen <= maxlen and maxlen < 256
NUM_2 = bebs2ip
STR_2 = i2bebsp
def ff1_aes256_encrypt(key, tweak, x):
n = len(x)
t = len(tweak)
assert minlen <= n and n <= maxlen
assert t <= maxTlen
u = n//2; v = n-u
assert u == v
A = x[:u]; B = x[u:]
assert radix == 2
b = cldiv(v, 8)
d = 4*cldiv(b, 4) + 4
assert d <= 16
P = bytes([1, 2, 1, 0, 0, radix, 10, u % 256, 0, 0, 0, n, 0, 0, 0, t])
for i in range(10):
Q = tweak + b'\0'*((-t-b-1) % 16) + bytes([i]) + bebs2osp(B)
y = beos2ip(aes_cbcmac(key, P + Q)[:d])
c = (NUM_2(A)+y) % (1<<u)
C = STR_2(u, c)
A = B
B = C
return A + B
# This is not used except by tests.
def ff1_aes256_decrypt(key, tweak, x):
n = len(x)
t = len(tweak)
assert minlen <= n and n <= maxlen
assert t <= maxTlen
u = n//2; v = n-u
assert u == v
A = x[:u]; B = x[u:]
assert radix == 2
b = cldiv(v, 8)
d = 4*cldiv(b, 4) + 4
assert d <= 16
P = bytes([1, 2, 1, 0, 0, radix, 10, u % 256, 0, 0, 0, n, 0, 0, 0, t])
for i in range(9, -1, -1):
Q = tweak + b'\0'*((-t-b-1) % 16) + bytes([i]) + bebs2osp(A)
y = beos2ip(aes_cbcmac(key, P + Q)[:d])
c = (NUM_2(B)-y) % (1<<u)
C = STR_2(u, c)
B = A
A = C
return A + B
def test_ff1():
# Test vectors consistent with the Java implementation at
# <https://git.code.sf.net/p/format-preserving-encryption/code>.
key = unhexlify("2B7E151628AED2A6ABF7158809CF4F3CEF4359D8D580AA4F7F036D6F04FC6A94")
tweak = b''
x = [0]*88
ct = ff1_aes256_encrypt(key, tweak, x)
assert ''.join(map(str, ct)) == "0000100100110101011101111111110011000001101100111110011101110101011010100100010011001111", ct
pt = ff1_aes256_decrypt(key, tweak, ct)
assert pt == x, (ct, pt)
x = list(map(int, "0000100100110101011101111111110011000001101100111110011101110101011010100100010011001111"))
ct = ff1_aes256_encrypt(key, tweak, x)
assert ''.join(map(str, ct)) == "1101101011010001100011110000010011001111110110011101010110100001111001000101011111011000", ct
pt = ff1_aes256_decrypt(key, tweak, ct)
assert pt == x, (ct, pt)
x = [0, 1]*44
ct = ff1_aes256_encrypt(key, tweak, x)
assert ''.join(map(str, ct)) == "0000111101000001111011010111011111110001100101000000001101101110100010010111001100100110", ct
pt = ff1_aes256_decrypt(key, tweak, ct)
assert pt == x, (ct, pt)
tweak = bytes(range(maxTlen))
ct = ff1_aes256_encrypt(key, tweak, x)
assert ''.join(map(str, ct)) == "0111110110001000000111010110000100010101101000000011100111100100100010101101111010100011", ct
pt = ff1_aes256_decrypt(key, tweak, ct)
assert pt == x, (ct, pt)
key = os.urandom(32)
tweak = b''
ct = ff1_aes256_encrypt(key, tweak, x)
pt = ff1_aes256_decrypt(key, tweak, ct)
assert pt == x, (ct, pt)
tweak = os.urandom(maxTlen)
ct = ff1_aes256_encrypt(key, tweak, x)
pt = ff1_aes256_decrypt(key, tweak, ct)
assert pt == x, (ct, pt)
def aes_cbcmac(key, input):
encryptor = Cipher(algorithms.AES(key), modes.CBC(b'\0'*16), backend=default_backend()).encryptor()
return (encryptor.update(input) + encryptor.finalize())[-16:]
def test_aes():
# Check we're actually using AES-256.
# <https://csrc.nist.gov/Projects/Cryptographic-Algorithm-Validation-Program/Block-Ciphers>
# <https://csrc.nist.gov/CSRC/media/Projects/Cryptographic-Algorithm-Validation-Program/documents/aes/aesmct.zip>
# Simple test (this wouldn't catch a byte order error in the key):
# ECBVarTxt256.rsp COUNT = 0
KEY = unhexlify("0000000000000000000000000000000000000000000000000000000000000000")
PLAINTEXT = unhexlify("80000000000000000000000000000000")
CIPHERTEXT = unhexlify("ddc6bf790c15760d8d9aeb6f9a75fd4e")
assert aes_cbcmac(KEY, PLAINTEXT) == CIPHERTEXT
# Now something more rigorous:
# ECBMCT256.rsp COUNT = 0
key = unhexlify("f9e8389f5b80712e3886cc1fa2d28a3b8c9cd88a2d4a54c6aa86ce0fef944be0")
acc = unhexlify("b379777f9050e2a818f2940cbbd9aba4")
ct = unhexlify("6893ebaf0a1fccc704326529fdfb60db")
for i in range(1000):
acc = aes_cbcmac(key, acc)
assert acc == ct, hexlify(acc)
if __name__ == '__main__':
test_aes()
test_ff1()
|
import tkinter as tk
from tkinter import messagebox
import stockclick as sc
import time
import os
import pyperclip
import subprocess
import datetime
import stockmodule as m
import datetime
import s0056
def check_pw():
global stop
stop = False
_, msg1, msg2 = s0056.send_info(2)
listbox.insert(tk.END, str(msg1))
listbox.insert(tk.END, str(msg2))
if pwvar.get() == 'joejoe':
stockinfo = sc.sclick()
for stock_item in stockinfo:
listbox.insert(tk.END, str(stock_item))
else:
messagebox.askyesno("Reminder", "Pleae enter the password")
def athenz():
os.system('~/mypython/iterm2run.sh ~/Desktop/athenz-user-cert')
def yinit():
os.system('~/mypython/iterm2run.sh ub')
def kubelogin():
os.system("~/mypython/iterm2run.sh 'kubectl plugin login'")
def clean():
listbox.delete(0, 'end')
def okta():
if pwvar.get() == 'joejoe':
status, result = subprocess.getstatusoutput('cat okta.passwd')
if status == 0:
pyperclip.copy(result)
else:
ssagebox.askyesno("Reminder", "Put your password in file and check it")
else:
messagebox.askyesno("Reminder", "Pleae enter the password")
def bouncer():
if pwvar.get() == 'joejoe':
status, result = subprocess.getstatusoutput('cat bouncer.passwd')
if status == 0:
pyperclip.copy(result)
else:
messagebox.askyesno("Reminder", "Put your password in file and check it")
else:
messagebox.askyesno("Reminder", "Pleae enter the password")
def sanitize():
status, result = subprocess.getstatusoutput('ls -al ~/.athenz/cert |\
cut -d" " -f9-12 |\
xargs -I % echo "Athenz started at %"')
listbox.delete(0, 'end')
listbox.insert(tk.END, result)
window.after(300000, sanitize)
def stock_when_to_buy():
now = datetime.datetime.now()
if datetime.date.today().isoweekday() <= 5:
if now.hour < 14 and now.hour >= 9:
stock_buy_info = m.check_stock_send()
_, msg = s0056.send_info(2)
for item in stock_buy_info:
listbox.insert(tk.END, str(item))
listbox.insert(tk.END, str(msg))
window.after(180000, stock_when_to_buy)
#main window
window = tk.Tk()
window.geometry('480x320')
window.title('<NAME>')
#upper frame
upper_fm = tk.Frame(window, bg='green', width=480, height=320-100)
upper_fm.pack()
#lower frame
below_fm = tk.Frame(window,bg='red', width=480, height=100)
below_fm.pack()
#label for password
lb= tk.Label(below_fm, text='Enter Your Password', bg='red', fg='white',
font=('細明體',20))
lb.place(rely=0.25, relx=0.5, anchor='center')
#Entry
pwvar = tk.StringVar()
entry = tk.Entry(below_fm, width=15, textvariable=pwvar, show='*')
entry.place(rely=0.5, relx=0.5, anchor='center')
#Stock button
stock_btn = tk.Button(upper_fm, bg='#FFD700', fg='black', text='click',
command=check_pw, font=('細明體',20))
stock_btn.place(rely=0.1, relx=0.2, anchor='center')
#Athenz Button
athenz_btn = tk.Button(upper_fm, bg='#FFD700', fg='black', text='athenz-user-cert',
command=athenz, font=('細明體',20))
athenz_btn.place(rely=0.1, relx=0.5, anchor='center')
#yinit Button
yinit_btn = tk.Button(upper_fm, bg='#FFD700', fg='black', text='yinit',
command=yinit, font=('細明體',20))
yinit_btn.place(rely=0.1, relx=0.8, anchor='center')
#Kubelogin Button
kubelogin_btn = tk.Button(upper_fm, bg='#FFD700', fg='black', text='kubelogin',
command=kubelogin, font=('細明體',20))
kubelogin_btn.place(rely=0.3, relx=0.2, anchor='center')
#clean Button
clean_btn = tk.Button(upper_fm, bg='red', fg='black', text='clean',
command=clean, font=('細明體',20))
clean_btn.place(rely=0.3, relx=0.4, anchor='center')
#okta Button
clean_btn = tk.Button(upper_fm, bg='red', fg='black', text='okta',
command=okta, font=('細明體',20))
clean_btn.place(rely=0.3, relx=0.6, anchor='center')
#bouncer Button
clean_btn = tk.Button(upper_fm, bg='red', fg='black', text='bouncer',
command=bouncer, font=('細明體',20))
clean_btn.place(rely=0.3, relx=0.8, anchor='center')
#listbox for stock information
listbox = tk.Listbox(upper_fm, width=27, height=5)
listbox.place(rely=0.7, relx=0.5, anchor='center')
#Scrollbar
sbar = tk.Scrollbar(upper_fm)
sbar.place(rely=0.7, relx=0.735, anchor='center' )
#Scrollbar & listbox
sbar.config(command = listbox.yview)
listbox.config(yscrollcommand = sbar.set)
#while loop function
window.after(1000, sanitize)
window.after(1000, stock_when_to_buy)
#main
window.mainloop()
|
import math
import numpy as np
import numpy.matlib
import time
import uuid
import os
import sqlite3
import datetime
import threading
import multiprocessing
import time
import random
import sys
sys.path.append("../src/")
import plantsKin as pk
import baseToolbox as bt
from math import pi
threaded = True
nThreads = 10
parametersName = 'PlantsParameters.db'
dbName = 'PlantsSimulation.db'
dbVideo = 'PlantsVideo.db'
tMax = 1e2
xMax =20
lockDB = False
replicate = 1
path = '/mnt/d/Plants-Kinematics/data/'
def FirstGen():
conn = sqlite3.connect(parametersName)
c = conn.cursor()
c.execute('''CREATE TABLE parameters (id text,
N integer, dx real,dt real,
theta0 real,
nElements integer,
growth text, growthRate real, growthZone real,
tropismIntensity real, tropismDirection real,
apicalTropismIntensity real,apicalTropismDirection real,
collectiveTropism text,
collectiveTropismAttractionZone real,collectiveTropismRepulsionZone real,
collectiveTropismAttractionIntensity real,collectiveTropismRepulsionIntensity real,
proprioception real)''')
conn.commit()
conn.close()
conn = sqlite3.connect(dbName)
c = conn.cursor()
c.execute('''CREATE TABLE simulation (id text, repId text,date text,
N integer, dx real,dt real,
theta0 real,
nElements integer,
growth text, growthRate real, growthZone real,
tropismIntensity real, tropismDirection real,
apicalTropismIntensity real,apicalTropismDirection real,
collectiveTropism text,
collectiveTropismAttractionZone real,collectiveTropismRepulsionZone real,
collectiveTropismAttractionIntensity real,collectiveTropismRepulsionIntensity real,
proprioception real)''')
conn.commit()
conn.close()
def SecondGen():
conn = sqlite3.connect(dbVideo)
c = conn.cursor()
c.execute('''CREATE TABLE video (id text, url text)''')
conn.commit()
conn.close()
def dbFiller():
xMax = 20
v0 =1
Nrange=[2,5,10,20,50]
dt = 0.01
dxRange = [0,0.1,0.2,0.5,1.0,2.0]
tMax =1e5
theta0Range = [0]
nElementsRange = [1000]
growthRange = ['None','Apical','Exponential']
growthZones = [1.0]
growthRates = [1.0]
tropismRange = [0]
tropismDirections = [0]
apicalTropismRange = -np.array([0.0])
apicalTropismDirections = [0]
proprioceptionRange = -np.array([0.0])
collectiveTropisms = ['Apical']
collectiveTropismAttractionZoneRange = [2.0]
collectiveTropismRepulsionZoneRange = [1.0]
collectiveTropismAttractionIntensityRange = -np.array([0,1.0,10.0])
collectiveTropismRepulsionIntensityRange = -np.array([0,1.0,10.0])
conn = sqlite3.connect(parametersName)
c = conn.cursor()
for N in Nrange:
for nElements in nElementsRange:
for theta0 in theta0Range:
for dx in dxRange:
for growth in growthRange:
for growthZone in growthZones:
for growthRate in growthRates:
for tropism in tropismRange:
for tropismDirection in tropismDirections:
for apicalTropism in apicalTropismRange:
for apicalTropismDirection in apicalTropismDirections:
for proprioception in proprioceptionRange:
for collectiveTropism in collectiveTropisms:
for collectiveTropismAttractionZone in collectiveTropismAttractionZoneRange:
for collectiveTropismRepulsionZone in collectiveTropismRepulsionZoneRange:
for collectiveTropismAttractionIntensity in collectiveTropismAttractionIntensityRange:
for collectiveTropismRepulsionIntensity in collectiveTropismRepulsionIntensityRange:
expId = str(uuid.uuid4())
values = [expId,N,dx,dt,theta0,nElements,\
growth,growthRate,growthZone,\
tropism,tropismDirection,\
apicalTropism,apicalTropismDirection,\
collectiveTropism,\
collectiveTropismAttractionZone,collectiveTropismRepulsionZone ,\
collectiveTropismAttractionIntensity,collectiveTropismRepulsionIntensity,\
proprioception]
c.execute("INSERT INTO parameters VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",values)
conn.commit()
conn.close()
def checkExpParam(expId):
connParam = sqlite3.connect(parametersName, check_same_thread=False)
cursorParam = connParam.cursor()
cursorParam.execute("Select N from parameters where id = ?",(expId,))
N = (cursorParam.fetchall())[0][0]
cursorParam.execute("Select dx from parameters where id = ?",(expId,))
dx = (cursorParam.fetchall())[0][0]
cursorParam.execute("Select dt from parameters where id = ?",(expId,))
dt = (cursorParam.fetchall())[0][0]
cursorParam.execute("Select nElements from parameters where id = ?",(expId,))
nElements = (cursorParam.fetchall())[0][0]
cursorParam.execute("Select theta0 from parameters where id = ?",(expId,))
theta0 = (cursorParam.fetchall())[0][0]
cursorParam.execute("Select growth from parameters where id = ?",(expId,))
growth = (cursorParam.fetchall())[0][0]
cursorParam.execute("Select growthRate from parameters where id = ?",(expId,))
growthRate = (cursorParam.fetchall())[0][0]
cursorParam.execute("Select growthZone from parameters where id = ?",(expId,))
growthZone = (cursorParam.fetchall())[0][0]
cursorParam.execute("Select tropismIntensity from parameters where id = ?",(expId,))
tropismIntensity = (cursorParam.fetchall())[0][0]
cursorParam.execute("Select tropismDirection from parameters where id = ?",(expId,))
tropismDirection = (cursorParam.fetchall())[0][0]
cursorParam.execute("Select apicalTropismIntensity from parameters where id = ?",(expId,))
apicalTropismIntensity = (cursorParam.fetchall())[0][0]
cursorParam.execute("Select apicalTropismDirection from parameters where id = ?",(expId,))
apicalTropismDirection = (cursorParam.fetchall())[0][0]
cursorParam.execute("Select collectiveTropism from parameters where id = ?",(expId,))
collectiveTropism = (cursorParam.fetchall())[0][0]
cursorParam.execute("Select collectiveTropismAttractionZone from parameters where id = ?",(expId,))
collectiveTropismAttractionZone = (cursorParam.fetchall())[0][0]
cursorParam.execute("Select collectiveTropismAttractionIntensity from parameters where id = ?",(expId,))
collectiveTropismAttractionIntensity = (cursorParam.fetchall())[0][0]
cursorParam.execute("Select collectiveTropismRepulsionZone from parameters where id = ?",(expId,))
collectiveTropismRepulsionZone = (cursorParam.fetchall())[0][0]
cursorParam.execute("Select collectiveTropismRepulsionIntensity from parameters where id = ?",(expId,))
collectiveTropismRepulsionIntensity = (cursorParam.fetchall())[0][0]
cursorParam.execute("Select proprioception from parameters where id = ?",(expId,))
proprioception = (cursorParam.fetchall())[0][0]
connParam.close()
return N,dx,dt,theta0,nElements,\
growth,growthRate,growthZone,\
tropismIntensity , tropismDirection ,\
apicalTropismIntensity ,apicalTropismDirection,\
collectiveTropism ,\
collectiveTropismAttractionZone,collectiveTropismRepulsionZone,\
collectiveTropismAttractionIntensity,collectiveTropismRepulsionIntensity,\
proprioception
def pather(expId):
path = '/mnt/c/Users/renaud/Documents/Plants-Kinematics/data/'
if not os.path.exists(path):
os.makedirs(path)
path = path + expId+ '/'
if not os.path.exists(path):
os.makedirs(path)
return path
def rootsSim(N,dx,dt,theta0,nElements,\
growth,growthRate,growthZone,\
tropismIntensity , tropismDirection ,\
apicalTropismIntensity ,apicalTropismDirection ,\
collectiveTropism ,
collectiveTropismAttractionZone,collectiveTropismRepulsionZone ,\
collectiveTropismAttractionIntensity,collectiveTropismRepulsionIntensity,\
proprioception,expId):
#dataPath = pather(expId[1])
roots = pk.Roots(N,dx,theta0 =theta0,growth=growth,nElements = nElements,dt=dt,growthRate=growthRate,growthZone = growthZone)
roots.addInteractions(name = 'ApicalTropism' ,intensity=apicalTropismIntensity,direction = apicalTropismDirection)
roots.addInteractions(name = 'Tropism' ,intensity=tropismIntensity,direction = tropismDirection)
roots.addInteractions(name = 'Proprioception' ,intensity=proprioception)
roots.addCollectiveInteraction(name =collectiveTropism,attractionZone=collectiveTropismAttractionZone,repulsionZone=collectiveTropismRepulsionZone ,\
attractionIntensity=collectiveTropismAttractionIntensity,repulsionIntensity=collectiveTropismRepulsionIntensity)
#for j in range(0,len(roots.roots)):
#bt.writeCsvRoots(roots.roots[j].x,'root'+str(j).zfill(3)+'.csv',dataPath)
#for t in range(0,int(tMax/dt)):
#roots.update()
#for j in range(0,len(roots.roots)):
#bt.writeCsvRoots(roots.roots[j].x,'root'+str(j).zfill(3)+'.csv',dataPath,writeMode = 1)
def startSimulation(expId):
global lockDB
try:
print("The following experiment is analyzed : "+str(expId[0]))
print("The following replicate is analyzed : "+str(expId[1]))
#expId = str(uuid.uuid4())
#values = [expId,datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),N,nPhi,dt,v0,drag,Vuu,Vpp,Vzz,Vu,Vp,Vz,dVu,dVp,dVz]
#c.execute("INSERT INTO simulation VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",values)
time.sleep(random.random())
N,dx,dt,theta0,nElements,\
growth,growthRate,growthZone,\
tropismIntensity , tropismDirection ,\
apicalTropismIntensity ,apicalTropismDirection,\
collectiveTropism ,\
collectiveTropismAttractionZone,collectiveTropismRepulsionZone,\
collectiveTropismAttractionIntensity,collectiveTropismRepulsionIntensity,\
proprioception = checkExpParam(expId[0])
print([checkExpParam(expId[0])])
rootsSim(N,dx,dt,theta0,nElements,\
growth,growthRate,growthZone,\
tropismIntensity , tropismDirection ,\
apicalTropismIntensity ,apicalTropismDirection ,\
collectiveTropism ,
collectiveTropismAttractionZone,collectiveTropismRepulsionZone ,\
collectiveTropismAttractionIntensity,collectiveTropismRepulsionIntensity,\
proprioception,expId)
while lockDB:
time.sleep(random.random())
lockDB = True
conn = sqlite3.connect(dbName, check_same_thread=False)
c = conn.cursor()
values = [expId[0],expId[1],datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),N,dx,dt,theta0,nElements,\
growth,growthRate,growthZone,\
tropismIntensity ,tropismDirection,\
apicalTropismIntensity,apicalTropismDirection,\
collectiveTropism,\
collectiveTropismAttractionZone,collectiveTropismRepulsionZone ,\
collectiveTropismAttractionIntensity,collectiveTropismRepulsionIntensity,\
proprioception]
print('----- writing in database')
#c.execute("INSERT INTO simulation VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",values)
#conn.commit()
conn.close()
print('----- wrote in database')
lockDB = False
except ValueError:
print(ValueError)
def main():
print('Starting')
connParam = sqlite3.connect(parametersName, check_same_thread=False)
cursorParam = connParam.cursor()
cursorParam.execute("Select id from parameters")
expIds=cursorParam.fetchall()
connSim = sqlite3.connect(dbName, check_same_thread=False)
cursorSim = connSim.cursor()
parametersList = []
print('checking the ids')
running= True
exp=0
print('making ' +str(replicate)+' replicates')
for expId in expIds:
#print('is the experiment '+expId[0]+' already analyzed ?')
cursorSim.execute("Select * from simulation where id = ?",(str(expId[0]),))
n=len(cursorSim.fetchall())
k=0
while n+k<replicate:
k=k+1
#print('No')
repId = str(uuid.uuid4())
parametersList.append([expId[0],repId])
exp =exp+1
print('experiments type : '+str(len(expIds)))
print('experiments todo : '+str(len(expIds)*replicate))
print('experiments left : '+str(exp))
connParam.close()
connSim.close()
if threaded:
pool = multiprocessing.Pool(processes=nThreads)
pool.map_async(startSimulation, parametersList)
pool.close()
pool.join()
else:
for parmater in parametersList:
startSimulation(parmater)
if __name__ == "__main__":
main()
|
<reponame>ninatu/anomaly_detection<gh_stars>10-100
"""
Extract tumor patches from tumor slides
"""
import openslide
import os
from tqdm import tqdm
import pandas as pd
import numpy as np
import skimage.io
import skimage.transform
import argparse
import sys
sys.path.append('./')
from utils import get_tissue_mask, preprocess_tumor_mask_decrease_thrice, \
sample_tissue_pixels, PATCH_NAME_FORMAT, TUMOR_LABEL
# maximum number of patches extracted from one WSI
MAX_PATCHES_PER_IMAGE = 50
def sample_patches_from_tumor_image(image_filename, image_path, annotation_path, max_count, output_dir):
slide = openslide.OpenSlide(image_path)
level = 8
down_scale = int(2 ** 8)
slide_w, slide_h = slide.dimensions
down_img = slide.read_region((0, 0), level, (slide_w // down_scale, slide_h // down_scale)).convert('RGB')
tissue_mask = get_tissue_mask(down_img)
tissue_mask = preprocess_tumor_mask_decrease_thrice(tissue_mask)
annotation_mask = skimage.io.imread(annotation_path)
annotation_mask = skimage.transform.downscale_local_mean(annotation_mask, (3, 3)).astype(np.uint8)
tumor_mask = tissue_mask * (annotation_mask == TUMOR_LABEL) > 0
tumor_coords = sample_tissue_pixels(tumor_mask, max_count)
for w, h in tumor_coords:
shift_w, shift_h = w * down_scale * 3, h * down_scale * 3
img = slide.read_region((shift_w, shift_h), 0, (3 * down_scale, 3 * down_scale)).convert('RGB')
image_name = os.path.splitext(image_filename)[0]
name = PATCH_NAME_FORMAT.format(image_name=image_name,
crop_type='tumor',
x=shift_h,
y=shift_w,
w=3 * down_scale,
h=3 * down_scale)
out_path = os.path.join(output_dir, name)
img.save(out_path)
def process_all_images(image_dir, masks_dir, image_filenames, output_dir):
for image_filename in tqdm(image_filenames):
image_path = os.path.join(image_dir, image_filename)
annotation_path = os.path.join(masks_dir, os.path.splitext(image_filename)[0] + '.png')
sample_patches_from_tumor_image(image_filename, image_path, annotation_path, MAX_PATCHES_PER_IMAGE, output_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--tumor_train_dir",
type=str,
default='/data/camelyon16_original/training/tumor',
help='path to train tumor WSI')
parser.add_argument("--test_dir",
type=str,
default='/data/camelyon16_original/testing/images',
help='path to test WSI')
parser.add_argument("--test_reference_path",
type=str,
default='/data/camelyon16_original/testing/reference.csv',
help='path to references.csv file containing information (normal/tumor) about test samples')
parser.add_argument("--train_masks_dir",
type=str,
default='/data/camelyon16/masks/train',
help='directory with tumor masks')
parser.add_argument("--test_masks_dir",
type=str,
default='/data/camelyon16/masks/test',
help='directory with tumor masks')
parser.add_argument("--output_tumor_patches_train_dir",
type=str,
default='/data/camelyon16/train/tumor_patches_x40',
help='directory for saving train tumor patches')
parser.add_argument("--output_tumor_patches_test_dir",
type=str,
default='/data/camelyon16/test/tumor_patches_x40',
help='directory for saving test tumor patches')
args = parser.parse_args()
# Process train WSI's
print("Starting to sample 768x768 patches from the train tumor images ... ")
train_filenames = os.listdir(args.tumor_train_dir)
os.makedirs(args.output_tumor_patches_train_dir, exist_ok=True)
process_all_images(args.tumor_train_dir,args. train_masks_dir, train_filenames, args.output_tumor_patches_train_dir)
print("Done!")
# Process test WSI's
print("Starting to sample 768x768 patches from test tumor images ... ")
test_info = pd.read_csv(args.test_reference_path, usecols=[0, 1], names=['filename', 'type'])
test_tumor_filenames = test_info[test_info['type'] == 'Tumor']['filename'].tolist()
test_tumor_filenames = [filename + '.tif' for filename in test_tumor_filenames]
# delete file test_114 (according to dataset README this file "does not have exhaustive annotations")
test_tumor_filenames.remove('test_114.tif')
os.makedirs(args.output_tumor_patches_test_dir, exist_ok=True)
process_all_images(args.test_dir, args.test_masks_dir, test_tumor_filenames, args.output_tumor_patches_test_dir)
print('Done!')
|
<reponame>pwqbot/eoj3
import logging
import os
import subprocess
import traceback
from django.conf import settings
from django.shortcuts import redirect
from django.urls import reverse
from django.views import View
from django.views.generic import ListView
from django_q.tasks import async_task
from contest.models import ContestProblemPlag
from polygon.contest.views import PolygonContestMixin
from submission.util import SubmissionStatus
from utils import random_string
logger = logging.getLogger(__name__)
class JPlagManager():
def __init__(self, plag: ContestProblemPlag):
self.plag = plag
self.contest = plag.contest
self.workspace = os.path.join(settings.GENERATE_DIR, "jplag", str(self.contest.pk), plag.fingerprint)
self.code_dir = os.path.join(self.workspace, "code")
self.result_dir = os.path.join(self.workspace, "result")
os.makedirs(self.workspace, exist_ok=True)
os.makedirs(self.code_dir, exist_ok=True)
os.makedirs(self.result_dir, exist_ok=True)
def code_ready(self):
problem_id = self.contest.contestproblem_set.get(identifier=self.plag.identifier).problem_id
submission_set = self.contest.submission_set.filter(status=SubmissionStatus.ACCEPTED, problem_id=problem_id)
if self.contest.contest_type != 1:
submission_set = submission_set.filter(contest_time__isnull=False)
for s in submission_set:
with open(os.path.join(self.code_dir, "%d_%d.cpp" % (s.pk, s.author_id)), "w", encoding="utf-8") as f:
f.write(s.code)
def run(self):
with open(os.path.join(self.result_dir, "stdout"), "w") as stdout_file, \
open(os.path.join(self.result_dir, "stderr"), "w") as stderr_file:
retcode = subprocess.call(
["java", "-jar", os.path.join(settings.BASE_DIR, "polygon/assets/jplag-2.11.9_SNAPSHOT.jar"),
"-vq", "-m", str(self.plag.keep_match), "-l", self.plag.language,
"-r", self.result_dir, self.code_dir], timeout=300,
stdout=stdout_file, stderr=stderr_file)
if retcode:
self.plag.status = 1
else:
self.plag.status = 0
self.plag.save()
def start_jplag(plags):
for plag in plags:
manager = JPlagManager(plag)
try:
manager.code_ready()
manager.run()
except:
plag.status = 1
with open(os.path.join(manager.result_dir, "stderr"), "a", encoding="utf-8") as f:
print(traceback.format_exc(), file=f)
logger.error(traceback.format_exc())
plag.save()
class JPlagHistoryView(PolygonContestMixin, ListView):
template_name = 'polygon/contest/anticheat.jinja2'
context_object_name = 'plag_list'
def get_queryset(self):
return self.contest.contestproblemplag_set.all().order_by("-create_time")
class JPlagCreateView(PolygonContestMixin, View):
def post(self, request, *args, **kwargs):
keep = request.POST.get("answer", 1000)
if not keep:
keep = 1000
plags = []
for problem in self.contest.contest_problem_list:
plags.append(ContestProblemPlag.objects.create(contest=self.contest, fingerprint=random_string(64),
identifier=problem.identifier, keep_match=keep, status=-1))
async_task(start_jplag, plags)
return redirect(reverse("polygon:contest_plag", kwargs={"pk": self.contest.pk}))
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import logging
from cohesity_management_sdk.api_helper import APIHelper
from cohesity_management_sdk.configuration import Configuration
from cohesity_management_sdk.controllers.base_controller import BaseController
from cohesity_management_sdk.http.auth.auth_manager import AuthManager
from cohesity_management_sdk.models.active_directory_entry import ActiveDirectoryEntry
from cohesity_management_sdk.models.list_centrify_zone import ListCentrifyZone
from cohesity_management_sdk.models.domain_controllers import DomainControllers
from cohesity_management_sdk.models.active_directory_principal import ActiveDirectoryPrincipal
from cohesity_management_sdk.models.added_active_directory_principal import AddedActiveDirectoryPrincipal
from cohesity_management_sdk.exceptions.request_error_error_exception import RequestErrorErrorException
class ActiveDirectoryController(BaseController):
"""A Controller to access Endpoints in the cohesity_management_sdk API."""
def __init__(self, config=None, client=None, call_back=None):
super(ActiveDirectoryController, self).__init__(client, call_back)
self.logger = logging.getLogger(__name__)
self.config = config
def delete_active_directory_entry(self, body):
"""Does a DELETE request to /public/activeDirectory.
Deletes the join of the Cohesity Cluster to the specified
Active Directory domain. After the deletion, the Cohesity Cluster
no longer has access to the principals on the Active Directory.
For example, you can no longer log in to the Cohesity Cluster
with a user defined in a principal group of the Active Directory
domain.
Args:
body (ActiveDirectoryEntry): Request to delete a join with an
Active Directory.
Returns:
void: Response from the API. No Content
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('delete_active_directory_entry called.')
# Validate required parameters
self.logger.info(
'Validating required parameters for delete_active_directory_entry.'
)
self.validate_parameters(body=body)
# Prepare query URL
self.logger.info(
'Preparing query URL for delete_active_directory_entry.')
_url_path = '/public/activeDirectory'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for delete_active_directory_entry.')
_headers = {'content-type': 'application/json; charset=utf-8'}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for delete_active_directory_entry.'
)
_request = self.http_client.delete(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request, name='delete_active_directory_entry')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for delete_active_directory_entry.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def get_active_directory_entry(self,
domains=None,
tenant_ids=None,
all_under_hierarchy=None):
"""Does a GET request to /public/activeDirectory.
After a Cohesity Cluster has been joined to an Active Directory
domain,
the users and groups in the domain can be authenticated on the
Cohesity Cluster
using their Active Directory credentials.
NOTE: The userName and password fields are not populated by this
operation.
Args:
domains (list of string, optional): Specifies the domains to fetch
active directory entries.
tenant_ids (list of string, optional): TenantIds contains ids of
the tenants for which objects are to be returned.
all_under_hierarchy (bool, optional): AllUnderHierarchy specifies
if objects of all the tenants under the hierarchy of the
logged in user's organization should be returned.
Returns:
list of ActiveDirectoryEntry: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_active_directory_entry called.')
# Prepare query URL
self.logger.info(
'Preparing query URL for get_active_directory_entry.')
_url_path = '/public/activeDirectory'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'domains': domains,
'tenantIds': tenant_ids,
'allUnderHierarchy': all_under_hierarchy
}
_query_builder = APIHelper.append_url_with_query_parameters(
_query_builder, _query_parameters,
Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for get_active_directory_entry.')
_headers = {'accept': 'application/json'}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for get_active_directory_entry.'
)
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request, self.config)
_context = self.execute_request(_request,
name='get_active_directory_entry')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for get_active_directory_entry.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body,
ActiveDirectoryEntry.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def create_active_directory_entry(self, body):
"""Does a POST request to /public/activeDirectory.
After a Cohesity Cluster has been joined to an Active Directory
domain,
the users and groups in the domain can be authenticated on the
Cohesity Cluster
using their Active Directory credentials.
Args:
body (CreateActiveDirectoryEntryParams): Request to join an Active
Directory.
Returns:
ActiveDirectoryEntry: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_active_directory_entry called.')
# Validate required parameters
self.logger.info(
'Validating required parameters for create_active_directory_entry.'
)
self.validate_parameters(body=body)
# Prepare query URL
self.logger.info(
'Preparing query URL for create_active_directory_entry.')
_url_path = '/public/activeDirectory'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for create_active_directory_entry.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for create_active_directory_entry.'
)
_request = self.http_client.post(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request, name='create_active_directory_entry')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for create_active_directory_entry.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body,
ActiveDirectoryEntry.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def list_centrify_zones(self, domain_name=None):
"""Does a GET request to /public/activeDirectory/centrifyZones.
Fetches the list centrify zones of an active directory domain.
Args:
domain_name (string, optional): Specifies the fully qualified
domain name (FQDN) of an Active Directory.
Returns:
list of ListCentrifyZone: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('list_centrify_zones called.')
# Prepare query URL
self.logger.info('Preparing query URL for list_centrify_zones.')
_url_path = '/public/activeDirectory/centrifyZones'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_parameters = {'domainName': domain_name}
_query_builder = APIHelper.append_url_with_query_parameters(
_query_builder, _query_parameters,
Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for list_centrify_zones.')
_headers = {'accept': 'application/json'}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for list_centrify_zones.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request, self.config)
_context = self.execute_request(_request,
name='list_centrify_zones')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for list_centrify_zones.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body,
ListCentrifyZone.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def get_active_directory_domain_controllers(self, domain_name=None):
"""Does a GET request to /public/activeDirectory/domainControllers.
List the domain controllers for a domain.
Args:
domain_name (string, optional): Specifies the domain name
Returns:
DomainControllers: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_active_directory_domain_controllers called.')
# Prepare query URL
self.logger.info(
'Preparing query URL for get_active_directory_domain_controllers.'
)
_url_path = '/public/activeDirectory/domainControllers'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_parameters = {'domainName': domain_name}
_query_builder = APIHelper.append_url_with_query_parameters(
_query_builder, _query_parameters,
Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for get_active_directory_domain_controllers.'
)
_headers = {'accept': 'application/json'}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for get_active_directory_domain_controllers.'
)
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request, name='get_active_directory_domain_controllers')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for get_active_directory_domain_controllers.'
)
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body, DomainControllers.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def search_active_directory_principals(self,
domain=None,
object_class=None,
search=None,
sids=None,
include_computers=None):
"""Does a GET request to /public/activeDirectory/principals.
Optionally limit the search results by specifying security identifiers
(SIDs),
an object class (user or group) or a substring.
You can specify SIDs or a substring but not both.
Args:
domain (string, optional): Specifies the domain name of the
principals to search. If specified the principals in that
domain are searched. Domain could be an Active Directory
domain joined by the Cluster or any one of the trusted domains
of the Active Directory domain or the LOCAL domain. If not
specified, all the domains are searched.
object_class (ObjectClassSearchActiveDirectoryPrincipalsEnum,
optional): Optionally filter by a principal object class such
as 'kGroup' or 'kUser'. If 'kGroup' is specified, only group
principals are returned. If 'kUser' is specified, only user
principals are returned. If not specified, both group and user
principals are returned. 'kUser' specifies a user object
class. 'kGroup' specifies a group object class. 'kComputer'
specifies a computer object class. 'kWellKnownPrincipal'
specifies a well known principal.
search (string, optional): Optionally filter by matching a
substring. Only principals in the with a name or
sAMAccountName that matches part or all of the specified
substring are returned. If specified, a 'sids' parameter
should not be specified.
sids (list of string, optional): Optionally filter by a list of
security identifiers (SIDs) found in the specified domain.
Only principals matching the specified SIDs are returned. If
specified, a 'search' parameter should not be specified.
include_computers (bool, optional): Specifies if Computer/GMSA
accounts need to be included in this search.
Returns:
list of ActiveDirectoryPrincipal: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('search_active_directory_principals called.')
# Prepare query URL
self.logger.info(
'Preparing query URL for search_active_directory_principals.')
_url_path = '/public/activeDirectory/principals'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'domain': domain,
'objectClass': object_class,
'search': search,
'sids': sids,
'includeComputers': include_computers
}
_query_builder = APIHelper.append_url_with_query_parameters(
_query_builder, _query_parameters,
Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for search_active_directory_principals.')
_headers = {'accept': 'application/json'}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for search_active_directory_principals.'
)
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request, name='search_active_directory_principals')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for search_active_directory_principals.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body,
ActiveDirectoryPrincipal.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def add_active_directory_principals(self, body=None):
"""Does a POST request to /public/activeDirectory/principals.
After a group or user has been added to a Cohesity Cluster,
the referenced Active Directory principal can be used by the Cohesity
Cluster.
In addition, this operation maps Cohesity roles with a group or user
and
this mapping defines the privileges allowed on the Cohesity Cluster
for the
group or user.
For example if an 'management' group is created on the Cohesity
Cluster
for the Active Directory 'management' principal group and is
associated with the Cohesity 'View' role, all users in the
referenced Active Directory 'management' principal group can log in to
the
Cohesity Dashboard but will only have view-only privileges.
These users cannot create new Protection Jobs, Policies, Views, etc.
NOTE: Local Cohesity users and groups cannot be created by this
operation.
Local Cohesity users or groups do not have an associated Active
Directory
principals and are created directly in the default LOCAL domain.
Args:
body (list of ActiveDirectoryPrincipalsAddParameters, optional):
Request to add groups or users to the Cohesity Cluster.
Returns:
list of AddedActiveDirectoryPrincipal: Response from the API.
Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('add_active_directory_principals called.')
# Prepare query URL
self.logger.info(
'Preparing query URL for add_active_directory_principals.')
_url_path = '/public/activeDirectory/principals'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for add_active_directory_principals.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for add_active_directory_principals.'
)
_request = self.http_client.post(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request, name='add_active_directory_principals')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for add_active_directory_principals.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body,
AddedActiveDirectoryPrincipal.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def create_enable_trusted_domain_discovery(self, body, name):
"""Does a POST request to /public/activeDirectory/{name}/enableTrustedDomainState.
Updates the states of trusted domains discovery.
Args:
body (UpdateTrustedDomainEnableParams): Request to update enable
trusted domains state of an Active Directory.
name (string): Specifies the Active Directory Domain Name.
Returns:
ActiveDirectoryEntry: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_enable_trusted_domain_discovery called.')
# Validate required parameters
self.logger.info(
'Validating required parameters for create_enable_trusted_domain_discovery.'
)
self.validate_parameters(body=body, name=name)
# Prepare query URL
self.logger.info(
'Preparing query URL for create_enable_trusted_domain_discovery.'
)
_url_path = '/public/activeDirectory/{name}/enableTrustedDomainState'
_url_path = APIHelper.append_url_with_template_parameters(
_url_path, {'name': name})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for create_enable_trusted_domain_discovery.'
)
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for create_enable_trusted_domain_discovery.'
)
_request = self.http_client.post(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request, name='create_enable_trusted_domain_discovery')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for create_enable_trusted_domain_discovery.'
)
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body,
ActiveDirectoryEntry.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def update_active_directory_id_mapping(self, body, name):
"""Does a PUT request to /public/activeDirectory/{name}/idMappingInfo.
Updates the user id mapping info of an Active Directory.
Args:
body (IdMappingInfo): Request to update user id mapping of an
Active Directory.
name (string): Specifies the Active Directory Domain Name.
Returns:
ActiveDirectoryEntry: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('update_active_directory_id_mapping called.')
# Validate required parameters
self.logger.info(
'Validating required parameters for update_active_directory_id_mapping.'
)
self.validate_parameters(body=body, name=name)
# Prepare query URL
self.logger.info(
'Preparing query URL for update_active_directory_id_mapping.')
_url_path = '/public/activeDirectory/{name}/idMappingInfo'
_url_path = APIHelper.append_url_with_template_parameters(
_url_path, {'name': name})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for update_active_directory_id_mapping.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for update_active_directory_id_mapping.'
)
_request = self.http_client.put(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request, name='update_active_directory_id_mapping')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for update_active_directory_id_mapping.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body,
ActiveDirectoryEntry.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def update_active_directory_ignored_trusted_domains(self, body, name):
"""Does a PUT request to /public/activeDirectory/{name}/ignoredTrustedDomains.
Updates the list of trusted domains to be ignored during trusted
domain discovery of an Active Directory.
Args:
body (UpdateIgnoredTrustedDomainsParams): Request to update the
list of ignored trusted domains of an AD.
name (string): Specifies the Active Directory Domain Name.
Returns:
ActiveDirectoryEntry: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info(
'update_active_directory_ignored_trusted_domains called.')
# Validate required parameters
self.logger.info(
'Validating required parameters for update_active_directory_ignored_trusted_domains.'
)
self.validate_parameters(body=body, name=name)
# Prepare query URL
self.logger.info(
'Preparing query URL for update_active_directory_ignored_trusted_domains.'
)
_url_path = '/public/activeDirectory/{name}/ignoredTrustedDomains'
_url_path = APIHelper.append_url_with_template_parameters(
_url_path, {'name': name})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for update_active_directory_ignored_trusted_domains.'
)
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for update_active_directory_ignored_trusted_domains.'
)
_request = self.http_client.put(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request,
name='update_active_directory_ignored_trusted_domains')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for update_active_directory_ignored_trusted_domains.'
)
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body,
ActiveDirectoryEntry.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def update_active_directory_ldap_provider(self, body, name):
"""Does a PUT request to /public/activeDirectory/{name}/ldapProvider.
Updates the LDAP provide Id for an Active Directory domain.
Args:
body (UpdateLdapProviderParams): Request to update the LDAP
provider info.
name (string): Specifies the Active Directory Domain Name.
Returns:
ActiveDirectoryEntry: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('update_active_directory_ldap_provider called.')
# Validate required parameters
self.logger.info(
'Validating required parameters for update_active_directory_ldap_provider.'
)
self.validate_parameters(body=body, name=name)
# Prepare query URL
self.logger.info(
'Preparing query URL for update_active_directory_ldap_provider.'
)
_url_path = '/public/activeDirectory/{name}/ldapProvider'
_url_path = APIHelper.append_url_with_template_parameters(
_url_path, {'name': name})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for update_active_directory_ldap_provider.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for update_active_directory_ldap_provider.'
)
_request = self.http_client.put(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request, name='update_active_directory_ldap_provider')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for update_active_directory_ldap_provider.'
)
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body,
ActiveDirectoryEntry.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def update_active_directory_machine_accounts(self, body, name):
"""Does a POST request to /public/activeDirectory/{name}/machineAccounts.
Updates the machine accounts of an Active Directory.
Args:
body (UpdateMachineAccountsParams): Request to update machine
accounts of an Active Directory.
name (string): Specifies the Active Directory Domain Name.
Returns:
ActiveDirectoryEntry: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info(
'update_active_directory_machine_accounts called.')
# Validate required parameters
self.logger.info(
'Validating required parameters for update_active_directory_machine_accounts.'
)
self.validate_parameters(body=body, name=name)
# Prepare query URL
self.logger.info(
'Preparing query URL for update_active_directory_machine_accounts.'
)
_url_path = '/public/activeDirectory/{name}/machineAccounts'
_url_path = APIHelper.append_url_with_template_parameters(
_url_path, {'name': name})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for update_active_directory_machine_accounts.'
)
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for update_active_directory_machine_accounts.'
)
_request = self.http_client.post(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request, name='update_active_directory_machine_accounts')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for update_active_directory_machine_accounts.'
)
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body,
ActiveDirectoryEntry.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def update_preferred_domain_controllers(self, body, name):
"""Does a PUT request to /public/activeDirectory/{name}/preferredDomainControllers.
Updates the preferred domain controllers of an Active Directory
Args:
body (list of PreferredDomainController): Request to update
preferred domain controllers of an Active Directory.
name (string): Specifies the Active Directory Domain Name.
Returns:
ActiveDirectoryEntry: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('update_preferred_domain_controllers called.')
# Validate required parameters
self.logger.info(
'Validating required parameters for update_preferred_domain_controllers.'
)
self.validate_parameters(body=body, name=name)
# Prepare query URL
self.logger.info(
'Preparing query URL for update_preferred_domain_controllers.')
_url_path = '/public/activeDirectory/{name}/preferredDomainControllers'
_url_path = APIHelper.append_url_with_template_parameters(
_url_path, {'name': name})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for update_preferred_domain_controllers.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for update_preferred_domain_controllers.'
)
_request = self.http_client.put(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request, name='update_preferred_domain_controllers')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for update_preferred_domain_controllers.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body,
ActiveDirectoryEntry.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
|
<reponame>a-vishar/azure-cli
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint:disable=too-many-lines
import itertools
from enum import Enum
from azure.mgmt.sql.models import (
Database,
ElasticPool,
ElasticPoolPerDatabaseSettings,
ImportExtensionRequest,
ExportRequest,
ManagedDatabase,
ManagedInstance,
ManagedInstanceAdministrator,
Server,
ServerAzureADAdministrator,
Sku,
AuthenticationType,
BlobAuditingPolicyState,
CatalogCollationType,
CreateMode,
DatabaseLicenseType,
ElasticPoolLicenseType,
SampleName,
SecurityAlertPolicyState,
SecurityAlertPolicyEmailAccountAdmins,
ServerConnectionType,
ServerKeyType,
StorageKeyType,
TransparentDataEncryptionStatus
)
from azure.cli.core.commands.parameters import (
get_three_state_flag,
get_enum_type,
get_resource_name_completion_list,
get_location_type,
tags_type,
resource_group_name_type
)
from azure.cli.core.commands.validators import (
get_default_location_from_resource_group
)
from knack.arguments import CLIArgumentType, ignore_type
from .custom import (
ClientAuthenticationType,
ClientType,
ComputeModelType,
DatabaseCapabilitiesAdditionalDetails,
ElasticPoolCapabilitiesAdditionalDetails,
FailoverPolicyType
)
from ._validators import (
create_args_for_complex_type,
validate_managed_instance_storage_size,
validate_subnet
)
#####
# SizeWithUnitConverter - consider moving to common code (azure.cli.core.commands.parameters)
#####
class SizeWithUnitConverter(): # pylint: disable=too-few-public-methods
def __init__(
self,
unit='kB',
result_type=int,
unit_map=None):
self.unit = unit
self.result_type = result_type
self.unit_map = unit_map or dict(B=1, kB=1024, MB=1024 * 1024, GB=1024 * 1024 * 1024,
TB=1024 * 1024 * 1024 * 1024)
def __call__(self, value):
numeric_part = ''.join(itertools.takewhile(str.isdigit, value))
unit_part = value[len(numeric_part):]
try:
uvals = (self.unit_map[unit_part] if unit_part else 1) / \
(self.unit_map[self.unit] if self.unit else 1)
return self.result_type(uvals * self.result_type(numeric_part))
except KeyError:
raise ValueError()
def __repr__(self):
return 'Size (in {}) - valid units are {}.'.format(
self.unit,
', '.join(sorted(self.unit_map, key=self.unit_map.__getitem__)))
#####
# Reusable param type definitions
#####
sku_arg_group = 'Performance Level'
sku_component_arg_group = 'Performance Level (components)'
serverless_arg_group = 'Serverless offering'
server_configure_help = 'You can configure the default using `az configure --defaults sql-server=<name>`'
time_format_help = 'Time should be in following format: "YYYY-MM-DDTHH:MM:SS".'
def get_location_type_with_default_from_resource_group(cli_ctx):
return CLIArgumentType(
arg_type=get_location_type(cli_ctx),
required=False,
validator=get_default_location_from_resource_group)
server_param_type = CLIArgumentType(
options_list=['--server', '-s'],
configured_default='sql-server',
help='Name of the Azure SQL server. ' + server_configure_help,
completer=get_resource_name_completion_list('Microsoft.SQL/servers'),
# Allow --ids command line argument. id_part=name is 1st name in uri
id_part='name')
available_param_type = CLIArgumentType(
options_list=['--available', '-a'],
help='If specified, show only results that are available in the specified region.')
tier_param_type = CLIArgumentType(
arg_group=sku_component_arg_group,
options_list=['--tier', '--edition', '-e'])
capacity_param_type = CLIArgumentType(
arg_group=sku_component_arg_group,
options_list=['--capacity', '-c'])
capacity_or_dtu_param_type = CLIArgumentType(
arg_group=sku_component_arg_group,
options_list=['--capacity', '-c', '--dtu'])
family_param_type = CLIArgumentType(
arg_group=sku_component_arg_group,
options_list=['--family', '-f'])
elastic_pool_id_param_type = CLIArgumentType(
arg_group=sku_arg_group,
options_list=['--elastic-pool'])
compute_model_param_type = CLIArgumentType(
arg_group=serverless_arg_group,
options_list=['--compute-model'],
help='The compute model of the database.',
arg_type=get_enum_type(ComputeModelType))
auto_pause_delay_param_type = CLIArgumentType(
arg_group=serverless_arg_group,
options_list=['--auto-pause-delay'],
help='Time in minutes after which database is automatically paused. '
'A value of -1 means that automatic pause is disabled.')
min_capacity_param_type = CLIArgumentType(
arg_group=serverless_arg_group,
options_list=['--min-capacity'],
help='Minimal capacity that database will always have allocated, if not paused')
max_size_bytes_param_type = CLIArgumentType(
options_list=['--max-size'],
type=SizeWithUnitConverter('B', result_type=int),
help='The max storage size. If no unit is specified, defaults to bytes (B).')
zone_redundant_param_type = CLIArgumentType(
options_list=['--zone-redundant', '-z'],
help='Specifies whether to enable zone redundancy',
arg_type=get_three_state_flag())
managed_instance_param_type = CLIArgumentType(
options_list=['--managed-instance', '--mi'],
help='Name of the Azure SQL managed instance.')
kid_param_type = CLIArgumentType(
options_list=['--kid', '-k'],
help='The Azure Key Vault key identifier of the server key. An example key identifier is '
'"https://YourVaultName.vault.azure.net/keys/YourKeyName/01234567890123456789012345678901"')
server_key_type_param_type = CLIArgumentType(
options_list=['--server-key-type', '-t'],
help='The type of the server key',
arg_type=get_enum_type(ServerKeyType))
storage_param_type = CLIArgumentType(
options_list=['--storage'],
type=SizeWithUnitConverter('GB', result_type=int, unit_map=dict(B=1.0 / (1024 * 1024 * 1024),
kB=1.0 / (1024 * 1024),
MB=1.0 / 1024,
GB=1,
TB=1024)),
help='The storage size. If no unit is specified, defaults to gigabytes (GB).',
validator=validate_managed_instance_storage_size)
grace_period_param_type = CLIArgumentType(
help='Interval in hours before automatic failover is initiated '
'if an outage occurs on the primary server. '
'This indicates that Azure SQL Database will not initiate '
'automatic failover before the grace period expires. '
'Please note that failover operation with --allow-data-loss option '
'might cause data loss due to the nature of asynchronous synchronization.')
allow_data_loss_param_type = CLIArgumentType(
help='Complete the failover even if doing so may result in data loss. '
'This will allow the failover to proceed even if a primary database is unavailable.')
aad_admin_login_param_type = CLIArgumentType(
options_list=['--display-name', '-u'],
help='Display name of the Azure AD administrator user or group.')
aad_admin_sid_param_type = CLIArgumentType(
options_list=['--object-id', '-i'],
help='The unique ID of the Azure AD administrator.')
read_scale_param_type = CLIArgumentType(
options_list=['--read-scale'],
help='If enabled, connections that have application intent set to readonly '
'in their connection string may be routed to a readonly secondary replica. '
'This property is only settable for Premium and Business Critical databases.',
arg_type=get_enum_type(['Enabled', 'Disabled']))
read_replicas_param_type = CLIArgumentType(
options_list=['--read-replicas'],
type=int,
help='The number of readonly replicas to provision for the database. '
'Only settable for Hyperscale edition.')
db_service_objective_examples = 'Basic, S0, P1, GP_Gen4_1, BC_Gen5_2, GP_Gen5_S_8.'
dw_service_objective_examples = 'DW100, DW1000c'
###############################################
# sql db #
###############################################
class Engine(Enum): # pylint: disable=too-few-public-methods
"""SQL RDBMS engine type."""
db = 'db'
dw = 'dw'
def _configure_db_create_params(
arg_ctx,
engine,
create_mode):
"""
Configures params for db/dw create/update commands.
The PUT database REST API has many parameters and many modes (`create_mode`) that control
which parameters are valid. To make it easier for CLI users to get the param combinations
correct, these create modes are separated into different commands (e.g.: create, copy,
restore, etc).
On top of that, some create modes and some params are not allowed if the database edition is
DataWarehouse. For this reason, regular database commands are separated from datawarehouse
commands (`db` vs `dw`.)
As a result, the param combination matrix is a little complicated. This function configures
which params are ignored for a PUT database command based on a command's SQL engine type and
create mode.
engine: Engine enum value (e.g. `db`, `dw`)
create_mode: Valid CreateMode enum value (e.g. `default`, `copy`, etc)
"""
# DW does not support all create modes. Check that engine and create_mode are consistent.
if engine == Engine.dw and create_mode not in [
CreateMode.default,
CreateMode.point_in_time_restore,
CreateMode.restore]:
raise ValueError('Engine {} does not support create mode {}'.format(engine, create_mode))
# Create args that will be used to build up the Database object
create_args_for_complex_type(
arg_ctx, 'parameters', Database, [
'catalog_collation',
'collation',
'elastic_pool_id',
'license_type',
'max_size_bytes',
'name',
'restore_point_in_time',
'sample_name',
'sku',
'source_database_deletion_date',
'tags',
'zone_redundant',
'auto_pause_delay',
'min_capacity',
'compute_model',
'read_scale',
'read_replica_count'
])
# Create args that will be used to build up the Database's Sku object
create_args_for_complex_type(
arg_ctx, 'sku', Sku, [
'capacity',
'family',
'name',
'tier',
])
arg_ctx.argument('name', # Note: this is sku name, not database name
options_list=['--service-objective'],
arg_group=sku_arg_group,
required=False,
help='The service objective for the new database. For example: ' +
(db_service_objective_examples if engine == Engine.db else dw_service_objective_examples))
arg_ctx.argument('elastic_pool_id',
arg_type=elastic_pool_id_param_type,
help='The name or resource id of the elastic pool to create the database in.')
arg_ctx.argument('compute_model',
arg_type=compute_model_param_type)
arg_ctx.argument('auto_pause_delay',
arg_type=auto_pause_delay_param_type)
arg_ctx.argument('min_capacity',
arg_type=min_capacity_param_type)
arg_ctx.argument('read_scale',
arg_type=read_scale_param_type)
arg_ctx.argument('read_replicas',
arg_type=read_replicas_param_type)
# Only applicable to default create mode. Also only applicable to db.
if create_mode != CreateMode.default or engine != Engine.db:
arg_ctx.ignore('sample_name')
arg_ctx.ignore('catalog_collation')
# Only applicable to point in time restore or deleted restore create mode.
if create_mode not in [CreateMode.restore, CreateMode.point_in_time_restore]:
arg_ctx.ignore('restore_point_in_time', 'source_database_deletion_date')
# 'collation', 'tier', and 'max_size_bytes' are ignored (or rejected) when creating a copy
# or secondary because their values are determined by the source db.
if create_mode in [CreateMode.copy, CreateMode.secondary]:
arg_ctx.ignore('collation', 'tier', 'max_size_bytes')
# collation and max_size_bytes are ignored when restoring because their values are determined by
# the source db.
if create_mode in [CreateMode.restore, CreateMode.point_in_time_restore]:
arg_ctx.ignore('collation', 'max_size_bytes')
if engine == Engine.dw:
# Elastic pool is only for SQL DB.
arg_ctx.ignore('elastic_pool_id')
# Edition is always 'DataWarehouse'
arg_ctx.ignore('tier')
# License types do not yet exist for DataWarehouse
arg_ctx.ignore('license_type')
# Family is not applicable to DataWarehouse
arg_ctx.ignore('family')
# Provisioning with capacity is not applicable to DataWarehouse
arg_ctx.ignore('capacity')
# Serverless offerings are not applicable to DataWarehouse
arg_ctx.ignore('auto_pause_delay')
arg_ctx.ignore('min_capacity')
arg_ctx.ignore('compute_model')
# ReadScale properties are not valid for DataWarehouse
arg_ctx.ignore('read_scale')
arg_ctx.ignore('read_replicas')
# pylint: disable=too-many-statements
def load_arguments(self, _):
with self.argument_context('sql') as c:
c.argument('location_name', arg_type=get_location_type(self.cli_ctx))
c.argument('usage_name', options_list=['--usage', '-u'])
c.argument('tags', arg_type=tags_type)
c.argument('allow_data_loss',
help='If specified, the failover operation will allow data loss.')
with self.argument_context('sql db') as c:
c.argument('server_name',
arg_type=server_param_type)
c.argument('database_name',
options_list=['--name', '-n'],
help='Name of the Azure SQL Database.',
# Allow --ids command line argument. id_part=child_name_1 is 2nd name in uri
id_part='child_name_1')
c.argument('max_size_bytes',
arg_type=max_size_bytes_param_type)
creation_arg_group = 'Creation'
c.argument('collation',
arg_group=creation_arg_group)
c.argument('catalog_collation',
arg_group=creation_arg_group,
arg_type=get_enum_type(CatalogCollationType))
# WideWorldImportersStd and WideWorldImportersFull cannot be successfully created.
# AdventureWorksLT is the only sample name that is actually supported.
c.argument('sample_name',
arg_group=creation_arg_group,
arg_type=get_enum_type([SampleName.adventure_works_lt]))
c.argument('license_type',
arg_type=get_enum_type(DatabaseLicenseType))
c.argument('read_scale',
arg_type=read_scale_param_type)
c.argument('read_replica_count',
arg_type=read_replicas_param_type)
c.argument('zone_redundant',
arg_type=zone_redundant_param_type)
c.argument('tier',
arg_type=tier_param_type,
help='The edition component of the sku. Allowed values include: Basic, Standard, '
'Premium, GeneralPurpose, BusinessCritical.')
c.argument('capacity',
arg_type=capacity_param_type,
arg_group=sku_component_arg_group,
help='The capacity component of the sku in integer number of DTUs or vcores.')
c.argument('family',
arg_type=family_param_type,
help='The compute generation component of the sku (for vcore skus only). '
'Allowed values include: Gen4, Gen5.')
with self.argument_context('sql db create') as c:
_configure_db_create_params(c, Engine.db, CreateMode.default)
with self.argument_context('sql db copy') as c:
_configure_db_create_params(c, Engine.db, CreateMode.copy)
c.argument('dest_name',
help='Name of the database that will be created as the copy destination.')
c.argument('dest_resource_group_name',
options_list=['--dest-resource-group'],
help='Name of the resouce group to create the copy in.'
' If unspecified, defaults to the origin resource group.')
c.argument('dest_server_name',
options_list=['--dest-server'],
help='Name of the server to create the copy in.'
' If unspecified, defaults to the origin server.')
with self.argument_context('sql db rename') as c:
c.argument('new_name',
help='The new name that the database will be renamed to.')
with self.argument_context('sql db restore') as c:
_configure_db_create_params(c, Engine.db, CreateMode.point_in_time_restore)
c.argument('dest_name',
help='Name of the database that will be created as the restore destination.')
restore_point_arg_group = 'Restore Point'
c.argument('restore_point_in_time',
options_list=['--time', '-t'],
arg_group=restore_point_arg_group,
help='The point in time of the source database that will be restored to create the'
' new database. Must be greater than or equal to the source database\'s'
' earliestRestoreDate value. Either --time or --deleted-time (or both) must be specified. ' +
time_format_help)
c.argument('source_database_deletion_date',
options_list=['--deleted-time'],
arg_group=restore_point_arg_group,
help='If specified, restore from a deleted database instead of from an existing database.'
' Must match the deleted time of a deleted database in the same server.'
' Either --time or --deleted-time (or both) must be specified. ' +
time_format_help)
with self.argument_context('sql db show') as c:
# Service tier advisors and transparent data encryption are not included in the first batch
# of GA commands.
c.ignore('expand')
with self.argument_context('sql db list') as c:
c.argument('elastic_pool_name',
options_list=['--elastic-pool'],
help='If specified, lists only the databases in this elastic pool')
with self.argument_context('sql db list-editions') as c:
c.argument('show_details',
options_list=['--show-details', '-d'],
help='List of additional details to include in output.',
nargs='+',
arg_type=get_enum_type(DatabaseCapabilitiesAdditionalDetails))
c.argument('available', arg_type=available_param_type)
search_arg_group = 'Search'
# We could used get_enum_type here, but that will validate the inputs which means there
# will be no way to query for new editions/service objectives that are made available after
# this version of CLI is released.
c.argument('edition',
arg_type=tier_param_type,
arg_group=search_arg_group,
help='Edition to search for. If unspecified, all editions are shown.')
c.argument('service_objective',
arg_group=search_arg_group,
help='Service objective to search for. If unspecified, all service objectives are shown.')
c.argument('dtu',
arg_group=search_arg_group,
help='Number of DTUs to search for. If unspecified, all DTU sizes are shown.')
c.argument('vcores',
arg_group=search_arg_group,
help='Number of vcores to search for. If unspecified, all vcore sizes are shown.')
with self.argument_context('sql db update') as c:
c.argument('service_objective',
arg_group=sku_arg_group,
help='The name of the new service objective. If this is a standalone db service'
' objective and the db is currently in an elastic pool, then the db is removed from'
' the pool.')
c.argument('elastic_pool_id',
arg_type=elastic_pool_id_param_type,
help='The name or resource id of the elastic pool to move the database into.')
c.argument('max_size_bytes', help='The new maximum size of the database expressed in bytes.')
c.argument('compute_model',
arg_type=compute_model_param_type)
c.argument('auto_pause_delay',
arg_type=auto_pause_delay_param_type)
c.argument('min_capacity',
arg_type=min_capacity_param_type)
with self.argument_context('sql db export') as c:
# Create args that will be used to build up the ExportRequest object
create_args_for_complex_type(
c, 'parameters', ExportRequest, [
'administrator_login',
'administrator_login_password',
'authentication_type',
'storage_key',
'storage_key_type',
'storage_uri',
])
c.argument('administrator_login',
options_list=['--admin-user', '-u'])
c.argument('administrator_login_password',
options_list=['--admin-password', '-p'])
c.argument('authentication_type',
options_list=['--auth-type', '-a'],
arg_type=get_enum_type(AuthenticationType))
c.argument('storage_key_type',
arg_type=get_enum_type(StorageKeyType))
with self.argument_context('sql db import') as c:
# Create args that will be used to build up the ImportExtensionRequest object
create_args_for_complex_type(c, 'parameters', ImportExtensionRequest, [
'administrator_login',
'administrator_login_password',
'authentication_type',
'storage_key',
'storage_key_type',
'storage_uri'
])
c.argument('administrator_login',
options_list=['--admin-user', '-u'])
c.argument('administrator_login_password',
options_list=['--admin-password', '-p'])
c.argument('authentication_type',
options_list=['--auth-type', '-a'],
arg_type=get_enum_type(AuthenticationType))
c.argument('storage_key_type',
arg_type=get_enum_type(StorageKeyType))
# The parameter name '--name' is used for 'database_name', so we need to give a different name
# for the import extension 'name' parameter to avoid conflicts. This parameter is actually not
# needed, but we still need to avoid this conflict.
c.argument('name', options_list=['--not-name'], arg_type=ignore_type)
with self.argument_context('sql db show-connection-string') as c:
c.argument('client_provider',
options_list=['--client', '-c'],
help='Type of client connection provider.',
arg_type=get_enum_type(ClientType))
auth_group = 'Authentication'
c.argument('auth_type',
options_list=['--auth-type', '-a'],
arg_group=auth_group,
help='Type of authentication.',
arg_type=get_enum_type(ClientAuthenticationType))
#####
# sql db op
#####
with self.argument_context('sql db op') as c:
c.argument('database_name',
options_list=['--database', '-d'],
required=True,
help='Name of the Azure SQL Database.')
c.argument('operation_id',
options_list=['--name', '-n'],
required=True,
help='The unique name of the operation to cancel.')
#####
# sql db replica
#####
with self.argument_context('sql db replica create') as c:
_configure_db_create_params(c, Engine.db, CreateMode.secondary)
c.argument('partner_resource_group_name',
options_list=['--partner-resource-group'],
help='Name of the resource group to create the new replica in.'
' If unspecified, defaults to the origin resource group.')
c.argument('partner_server_name',
options_list=['--partner-server'],
help='Name of the server to create the new replica in.')
with self.argument_context('sql db replica set-primary') as c:
c.argument('database_name',
help='Name of the database to fail over.')
c.argument('server_name',
help='Name of the server containing the secondary replica that will become'
' the new primary. ' + server_configure_help)
c.argument('resource_group_name',
help='Name of the resource group containing the secondary replica that'
' will become the new primary.')
with self.argument_context('sql db replica delete-link') as c:
c.argument('partner_server_name',
options_list=['--partner-server'],
help='Name of the server that the other replica is in.')
c.argument('partner_resource_group_name',
options_list=['--partner-resource-group'],
help='Name of the resource group that the other replica is in. If unspecified,'
' defaults to the first database\'s resource group.')
#####
# sql db audit-policy & threat-policy
#####
def _configure_security_policy_storage_params(arg_ctx):
storage_arg_group = 'Storage'
arg_ctx.argument('storage_account',
options_list=['--storage-account'],
arg_group=storage_arg_group,
help='Name of the storage account.')
arg_ctx.argument('storage_account_access_key',
options_list=['--storage-key'],
arg_group=storage_arg_group,
help='Access key for the storage account.')
arg_ctx.argument('storage_endpoint',
arg_group=storage_arg_group,
help='The storage account endpoint.')
with self.argument_context('sql db audit-policy update') as c:
_configure_security_policy_storage_params(c)
policy_arg_group = 'Policy'
c.argument('state',
arg_group=policy_arg_group,
help='Auditing policy state',
arg_type=get_enum_type(BlobAuditingPolicyState))
c.argument('audit_actions_and_groups',
options_list=['--actions'],
arg_group=policy_arg_group,
help='List of actions and action groups to audit.',
nargs='+')
c.argument('retention_days',
arg_group=policy_arg_group,
help='The number of days to retain audit logs.')
with self.argument_context('sql db threat-policy update') as c:
_configure_security_policy_storage_params(c)
policy_arg_group = 'Policy'
notification_arg_group = 'Notification'
c.argument('state',
arg_group=policy_arg_group,
help='Threat detection policy state',
arg_type=get_enum_type(SecurityAlertPolicyState))
c.argument('retention_days',
arg_group=policy_arg_group,
help='The number of days to retain threat detection logs.')
c.argument('disabled_alerts',
arg_group=policy_arg_group,
options_list=['--disabled-alerts'],
help='List of disabled alerts.',
nargs='+')
c.argument('email_addresses',
arg_group=notification_arg_group,
options_list=['--email-addresses'],
help='List of email addresses that alerts are sent to.',
nargs='+')
c.argument('email_account_admins',
arg_group=notification_arg_group,
options_list=['--email-account-admins'],
help='Whether the alert is sent to the account administrators.',
arg_type=get_enum_type(SecurityAlertPolicyEmailAccountAdmins))
# TODO: use server default
#####
# sql db transparent-data-encryption
#####
with self.argument_context('sql db tde') as c:
c.argument('database_name',
options_list=['--database', '-d'],
required=True,
help='Name of the Azure SQL Database.')
with self.argument_context('sql db tde set') as c:
c.argument('status',
options_list=['--status'],
required=True,
help='Status of the transparent data encryption.',
arg_type=get_enum_type(TransparentDataEncryptionStatus))
###############################################
# sql dw #
###############################################
with self.argument_context('sql dw') as c:
c.argument('server_name',
arg_type=server_param_type)
c.argument('database_name',
options_list=['--name', '-n'],
help='Name of the data warehouse.',
# Allow --ids command line argument. id_part=child_name_1 is 2nd name in uri
id_part='child_name_1')
c.argument('max_size_bytes',
arg_type=max_size_bytes_param_type)
c.argument('service_objective',
help='The service objective of the data warehouse. For example: ' +
dw_service_objective_examples)
c.argument('collation',
help='The collation of the data warehouse.')
with self.argument_context('sql dw create') as c:
_configure_db_create_params(c, Engine.dw, CreateMode.default)
with self.argument_context('sql dw show') as c:
# Service tier advisors and transparent data encryption are not included in the first batch
# of GA commands.
c.ignore('expand')
# Data Warehouse restore will not be included in the first batch of GA commands
# (list_restore_points also applies to db, but it's not very useful. It's
# mainly useful for dw.)
# with ParametersContext(command='sql dw restore-point') as c:
# c.register_alias('database_name', ('--database', '-d'))
###############################################
# sql elastic-pool #
###############################################
with self.argument_context('sql elastic-pool') as c:
c.argument('server_name',
arg_type=server_param_type)
c.argument('elastic_pool_name',
options_list=['--name', '-n'],
help='The name of the elastic pool.',
# Allow --ids command line argument. id_part=child_name_1 is 2nd name in uri
id_part='child_name_1')
# --db-dtu-max and --db-dtu-min were the original param names, which is consistent with the
# 2014-04-01 REST API.
# --db-max-dtu and --db-min-dtu are aliases which are consistent with the `sql elastic-pool
# list-editions --show-details db-max-dtu db-min-dtu` parameter values. These are more
# consistent with other az sql commands, but the original can't be removed due to
# compatibility.
c.argument('max_capacity',
options_list=['--db-dtu-max', '--db-max-dtu', '--db-max-capacity'],
help='The maximum capacity (in DTUs or vcores) any one database can consume.')
c.argument('min_capacity',
options_list=['--db-dtu-min', '--db-min-dtu', '--db-min-capacity'],
help='The minumum capacity (in DTUs or vcores) each database is guaranteed.')
# --storage was the original param name, which is consistent with the underlying REST API.
# --max-size is an alias which is consistent with the `sql elastic-pool list-editions
# --show-details max-size` parameter value and also matches `sql db --max-size` parameter name.
c.argument('max_size_bytes',
arg_type=max_size_bytes_param_type,
options_list=['--max-size', '--storage'])
c.argument('license_type',
arg_type=get_enum_type(ElasticPoolLicenseType))
c.argument('zone_redundant',
arg_type=zone_redundant_param_type)
c.argument('tier',
arg_type=tier_param_type,
help='The edition component of the sku. Allowed values include: Basic, Standard, '
'Premium, GeneralPurpose, BusinessCritical.')
c.argument('capacity',
arg_type=capacity_or_dtu_param_type,
help='The capacity component of the sku in integer number of DTUs or vcores.')
c.argument('family',
arg_type=family_param_type,
help='The compute generation component of the sku (for vcore skus only). '
'Allowed values include: Gen4, Gen5.')
with self.argument_context('sql elastic-pool create') as c:
# Create args that will be used to build up the ElasticPool object
create_args_for_complex_type(
c, 'parameters', ElasticPool, [
'license_type',
'max_size_bytes',
'name',
'per_database_settings',
'tags',
'zone_redundant',
])
# Create args that will be used to build up the ElasticPoolPerDatabaseSettings object
create_args_for_complex_type(
c, 'per_database_settings', ElasticPoolPerDatabaseSettings, [
'max_capacity',
'min_capacity',
])
# Create args that will be used to build up the ElasticPool Sku object
create_args_for_complex_type(
c, 'sku', Sku, [
'capacity',
'family',
'name',
'tier',
])
c.ignore('name') # Hide sku name
with self.argument_context('sql elastic-pool list-editions') as c:
# Note that `ElasticPoolCapabilitiesAdditionalDetails` intentionally match param names to
# other commands, such as `sql elastic-pool create --db-max-dtu --db-min-dtu --max-size`.
c.argument('show_details',
options_list=['--show-details', '-d'],
help='List of additional details to include in output.',
nargs='+',
arg_type=get_enum_type(ElasticPoolCapabilitiesAdditionalDetails))
c.argument('available',
arg_type=available_param_type)
search_arg_group = 'Search'
# We could used 'arg_type=get_enum_type' here, but that will validate the inputs which means there
# will be no way to query for new editions that are made available after
# this version of CLI is released.
c.argument('edition',
arg_type=tier_param_type,
arg_group=search_arg_group,
help='Edition to search for. If unspecified, all editions are shown.')
c.argument('dtu',
arg_group=search_arg_group,
help='Number of DTUs to search for. If unspecified, all DTU sizes are shown.')
c.argument('vcores',
arg_group=search_arg_group,
help='Number of vcores to search for. If unspecified, all vcore sizes are shown.')
with self.argument_context('sql elastic-pool update') as c:
c.argument('database_dtu_max',
help='The maximum DTU any one database can consume.')
c.argument('database_dtu_min',
help='The minimum DTU all databases are guaranteed.')
c.argument('storage_mb',
help='Storage limit for the elastic pool in MB.')
#####
# sql elastic-pool op
#####
with self.argument_context('sql elastic-pool op') as c:
c.argument('elastic_pool_name',
options_list=['--elastic-pool'],
help='Name of the Azure SQL Elastic Pool.')
c.argument('operation_id',
options_list=['--name', '-n'],
help='The unique name of the operation to cancel.')
###############################################
# sql failover-group #
###############################################
with self.argument_context('sql failover-group') as c:
c.argument('failover_group_name', options_list=['--name', '-n'], help="The name of the Failover Group")
c.argument('server_name', arg_type=server_param_type)
c.argument('partner_server', help="The name of the partner server of a Failover Group")
c.argument('partner_resource_group', help="The name of the resource group of the partner server")
c.argument('failover_policy', help="The failover policy of the Failover Group",
arg_type=get_enum_type(FailoverPolicyType))
c.argument('grace_period',
arg_type=grace_period_param_type)
c.argument('add_db', nargs='+',
help='List of databases to add to Failover Group')
c.argument('remove_db', nargs='+',
help='List of databases to remove from Failover Group')
c.argument('allow_data_loss',
arg_type=allow_data_loss_param_type)
###############################################
# sql server #
###############################################
with self.argument_context('sql server') as c:
c.argument('server_name',
arg_type=server_param_type,
options_list=['--name', '-n'])
c.argument('administrator_login',
options_list=['--admin-user', '-u'])
c.argument('administrator_login_password',
options_list=['--admin-password', '-p'])
c.argument('assign_identity',
options_list=['--assign_identity', '-i'],
help='Generate and assign an Azure Active Directory Identity for this server'
'for use with key management services like Azure KeyVault.')
with self.argument_context('sql server create') as c:
c.argument('location',
arg_type=get_location_type_with_default_from_resource_group(self.cli_ctx))
# Create args that will be used to build up the Server object
create_args_for_complex_type(
c, 'parameters', Server, [
'administrator_login',
'administrator_login_password',
'location'
])
c.argument('administrator_login',
required=True)
c.argument('administrator_login_password',
required=True)
c.argument('assign_identity',
options_list=['--assign-identity', '-i'],
help='Generate and assign an Azure Active Directory Identity for this server'
'for use with key management services like Azure KeyVault.')
with self.argument_context('sql server update') as c:
c.argument('administrator_login_password',
help='The administrator login password.')
#####
# sql server ad-admin
######
with self.argument_context('sql server ad-admin') as c:
# The options list should be ['--server', '-s'], but in the originally released version it was
# ['--server-name'] which we must keep for backward compatibility - but we should deprecate it.
c.argument('server_name',
options_list=['--server-name', '--server', '-s'])
c.argument('login',
arg_type=aad_admin_login_param_type)
c.argument('sid',
arg_type=aad_admin_sid_param_type)
c.ignore('tenant_id')
with self.argument_context('sql server ad-admin create') as c:
# Create args that will be used to build up the ServerAzureADAdministrator object
create_args_for_complex_type(
c, 'properties', ServerAzureADAdministrator, [
'login',
'sid',
])
#####
# sql server conn-policy
#####
with self.argument_context('sql server conn-policy') as c:
c.argument('server_name',
arg_type=server_param_type)
c.argument('connection_type',
options_list=['--connection-type', '-t'],
arg_type=get_enum_type(ServerConnectionType))
#####
# sql server dns-alias
#####
with self.argument_context('sql server dns-alias') as c:
c.argument('server_name',
arg_type=server_param_type)
c.argument('dns_alias_name',
options_list=('--name', '-n'),
help='Name of the DNS alias.')
c.argument('original_server_name',
options_list=('--original-server'),
help='The name of the server to which alias is currently pointing')
c.argument('original_resource_group_name',
options_list=('--original-resource-group'),
help='Name of the original resource group.')
c.argument('original_subscription_id',
options_list=('--original-subscription-id'),
help='ID of the original subscription.')
#####
# sql server firewall-rule
#####
with self.argument_context('sql server firewall-rule') as c:
# Help text needs to be specified because 'sql server firewall-rule update' is a custom
# command.
c.argument('server_name',
arg_type=server_param_type)
c.argument('firewall_rule_name',
options_list=['--name', '-n'],
help='The name of the firewall rule.',
# Allow --ids command line argument. id_part=child_name_1 is 2nd name in uri
id_part='child_name_1')
c.argument('start_ip_address',
options_list=['--start-ip-address'],
help='The start IP address of the firewall rule. Must be IPv4 format. Use value'
' \'0.0.0.0\' to represent all Azure-internal IP addresses.')
c.argument('end_ip_address',
options_list=['--end-ip-address'],
help='The end IP address of the firewall rule. Must be IPv4 format. Use value'
' \'0.0.0.0\' to represent all Azure-internal IP addresses.')
#####
# sql server key
#####
with self.argument_context('sql server key') as c:
c.argument('server_name',
arg_type=server_param_type)
c.argument('key_name',
options_list=['--name', '-n'])
c.argument('kid',
arg_type=kid_param_type,
required=True)
#####
# sql server tde-key
#####
with self.argument_context('sql server tde-key') as c:
c.argument('server_name',
arg_type=server_param_type)
with self.argument_context('sql server tde-key set') as c:
c.argument('kid',
arg_type=kid_param_type)
c.argument('server_key_type',
arg_type=server_key_type_param_type)
#####
# sql server vnet-rule
#####
with self.argument_context('sql server vnet-rule') as c:
# Help text needs to be specified because 'sql server vnet-rule create' is a custom
# command.
c.argument('server_name',
arg_type=server_param_type)
c.argument('virtual_network_rule_name',
options_list=['--name', '-n'])
c.argument('virtual_network_subnet_id',
options_list=['--subnet'],
help='Name or ID of the subnet that allows access to an Azure Sql Server. '
'If subnet name is provided, --vnet-name must be provided.')
c.argument('ignore_missing_vnet_service_endpoint',
options_list=['--ignore-missing-endpoint', '-i'],
help='Create firewall rule before the virtual network has vnet service endpoint enabled',
arg_type=get_three_state_flag())
with self.argument_context('sql server vnet-rule create') as c:
c.extra('vnet_name',
options_list=['--vnet-name'],
help='The virtual network name')
###############################################
# sql managed instance #
###############################################
with self.argument_context('sql mi') as c:
c.argument('managed_instance_name',
help='The managed instance name',
options_list=['--name', '-n'],
# Allow --ids command line argument. id_part=name is 1st name in uri
id_part='name')
c.argument('tier',
arg_type=tier_param_type,
help='The edition component of the sku. Allowed values: GeneralPurpose, BusinessCritical.')
c.argument('family',
arg_type=family_param_type,
help='The compute generation component of the sku. '
'Allowed values include: Gen4, Gen5.')
c.argument('storage_size_in_gb',
options_list=['--storage'],
arg_type=storage_param_type,
help='The storage size of the managed instance. '
'Storage size must be specified in increments of 32 GB')
c.argument('license_type',
arg_type=get_enum_type(DatabaseLicenseType),
help='The license type to apply for this managed instance.')
c.argument('vcores',
arg_type=capacity_param_type,
help='The capacity of the managed instance in vcores.')
c.argument('collation',
help='The collation of the managed instance.')
c.argument('proxy_override',
arg_type=get_enum_type(ServerConnectionType),
help='The connection type used for connecting to the instance.')
c.argument('public_data_endpoint_enabled',
arg_type=get_three_state_flag(),
help='Whether or not the public data endpoint is enabled for the instance.')
c.argument('timezone_id',
help='The time zone id for the instance to set. '
'A list of time zone ids is exposed through the sys.time_zone_info (Transact-SQL) view.')
with self.argument_context('sql mi create') as c:
c.argument('location',
arg_type=get_location_type_with_default_from_resource_group(self.cli_ctx))
# Create args that will be used to build up the ManagedInstance object
create_args_for_complex_type(
c, 'parameters', ManagedInstance, [
'administrator_login',
'administrator_login_password',
'license_type',
'virtual_network_subnet_id',
'vcores',
'storage_size_in_gb',
'collation',
'proxy_override',
'public_data_endpoint_enabled',
'timezone_id',
])
# Create args that will be used to build up the Managed Instance's Sku object
create_args_for_complex_type(
c, 'sku', Sku, [
'family',
'name',
'tier',
])
c.ignore('name') # Hide sku name
c.argument('administrator_login',
options_list=['--admin-user', '-u'],
required=True)
c.argument('administrator_login_password',
options_list=['--admin-password', <PASSWORD>'],
required=True)
c.extra('vnet_name',
options_list=['--vnet-name'],
help='The virtual network name',
validator=validate_subnet)
c.argument('virtual_network_subnet_id',
options_list=['--subnet'],
required=True,
help='Name or ID of the subnet that allows access to an Azure Sql Managed Instance. '
'If subnet name is provided, --vnet-name must be provided.')
c.argument('assign_identity',
options_list=['--assign-identity', '-i'],
help='Generate and assign an Azure Active Directory Identity for this managed instance '
'for use with key management services like Azure KeyVault.')
with self.argument_context('sql mi update') as c:
# Create args that will be used to build up the ManagedInstance object
create_args_for_complex_type(
c, 'parameters', ManagedInstance, [
'administrator_login_password',
])
c.argument('administrator_login_password',
options_list=['--admin-password', <PASSWORD>'])
c.argument('assign_identity',
options_list=['--assign-identity', '-i'],
help='Generate and assign an Azure Active Directory Identity for this managed instance '
'for use with key management services like Azure KeyVault. '
'If identity is already assigned - do nothing.')
#####
# sql managed instance key
#####
with self.argument_context('sql mi key') as c:
c.argument('managed_instance_name',
arg_type=managed_instance_param_type)
c.argument('key_name',
options_list=['--name', '-n'])
c.argument('kid',
arg_type=kid_param_type,
required=True,)
#####
# sql managed instance ad-admin
######
with self.argument_context('sql mi ad-admin') as c:
c.argument('managed_instance_name',
arg_type=managed_instance_param_type)
c.argument('login',
arg_type=aad_admin_login_param_type)
c.argument('sid',
arg_type=aad_admin_sid_param_type)
with self.argument_context('sql mi ad-admin create') as c:
# Create args that will be used to build up the ManagedInstanceAdministrator object
create_args_for_complex_type(
c, 'properties', ManagedInstanceAdministrator, [
'login',
'sid',
])
with self.argument_context('sql mi ad-admin update') as c:
# Create args that will be used to build up the ManagedInstanceAdministrator object
create_args_for_complex_type(
c, 'properties', ManagedInstanceAdministrator, [
'login',
'sid',
])
#####
# sql server tde-key
#####
with self.argument_context('sql mi tde-key') as c:
c.argument('managed_instance_name',
arg_type=managed_instance_param_type)
with self.argument_context('sql mi tde-key set') as c:
c.argument('kid',
arg_type=kid_param_type)
c.argument('server_key_type',
arg_type=server_key_type_param_type)
###############################################
# sql managed db #
###############################################
with self.argument_context('sql midb') as c:
c.argument('managed_instance_name',
arg_type=managed_instance_param_type,
# Allow --ids command line argument. id_part=name is 1st name in uri
id_part='name')
c.argument('database_name',
options_list=['--name', '-n'],
help='The name of the Azure SQL Managed Database.',
# Allow --ids command line argument. id_part=child_name_1 is 2nd name in uri
id_part='child_name_1')
with self.argument_context('sql midb create') as c:
create_args_for_complex_type(
c, 'parameters', ManagedDatabase, [
'collation',
])
c.argument('collation',
required=False,
help='The collation of the Azure SQL Managed Database collation to use, '
'e.g.: SQL_Latin1_General_CP1_CI_AS or Latin1_General_100_CS_AS_SC')
with self.argument_context('sql midb restore') as c:
create_args_for_complex_type(
c, 'parameters', ManagedDatabase, [
'target_managed_database_name',
'target_managed_instance_name',
'restore_point_in_time'
])
c.argument('target_managed_database_name',
options_list=['--dest-name'],
required=True,
help='Name of the managed database that will be created as the restore destination.')
c.argument('target_managed_instance_name',
options_list=['--dest-mi'],
help='Name of the managed instance to restore managed database to. '
'This can be same managed instance, or another managed instance on same subscription. '
'When not specified it defaults to source managed instance.')
c.argument('target_resource_group_name',
options_list=['--dest-resource-group'],
help='Name of the resource group of the managed instance to restore managed database to. '
'When not specified it defaults to source resource group.')
restore_point_arg_group = 'Restore Point'
c.argument('restore_point_in_time',
options_list=['--time', '-t'],
arg_group=restore_point_arg_group,
required=True,
help='The point in time of the source database that will be restored to create the'
' new database. Must be greater than or equal to the source database\'s'
' earliestRestoreDate value. ' + time_format_help)
with self.argument_context('sql midb list') as c:
c.argument('managed_instance_name', id_part=None)
###############################################
# sql virtual cluster #
###############################################
with self.argument_context('sql virtual-cluster') as c:
c.argument('virtual_cluster_name',
help='The virtual cluster name',
options_list=['--name', '-n'],
# Allow --ids command line argument. id_part=name is 1st name in uri
id_part='name')
c.argument('resource_group_name', arg_type=resource_group_name_type)
###############################################
# sql instance failover-group #
###############################################
with self.argument_context('sql instance-failover-group') as c:
c.argument('failover_group_name',
options_list=['--name', '-n'],
help="The name of the Instance Failover Group")
c.argument('managed_instance',
arg_type=managed_instance_param_type,
options_list=['--source-mi', '--mi'])
c.argument('partner_managed_instance',
help="The name of the partner managed instance of a Instance Failover Group",
options_list=['--partner-mi'])
c.argument('partner_resource_group',
help="The name of the resource group of the partner managed instance")
c.argument('failover_policy',
help="The failover policy of the Instance Failover Group",
arg_type=get_enum_type(FailoverPolicyType))
c.argument('grace_period',
arg_type=grace_period_param_type)
c.argument('allow_data_loss',
arg_type=allow_data_loss_param_type)
###################################################
# sql sensitivity classification #
###################################################
with self.argument_context('sql db classification') as c:
c.argument('schema_name',
required=True,
help='The name of the schema.',
options_list=['--schema'])
c.argument('table_name',
required=True,
help='The name of the table.',
options_list=['--table'])
c.argument('column_name',
required=True,
help='The name of the column.',
options_list=['--column'])
c.argument('information_type',
required=False,
help='The information type.')
c.argument('label_name',
required=False,
help='The label name.',
options_list=['--label'])
with self.argument_context('sql db classification recommendation list') as c:
c.ignore('skip_token')
|
<filename>generate_epitope_combinations.py
from __future__ import division
import pandas as pd
import random
import os
import sys
import csv
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import time
import argparse
from os import path
from datetime import datetime
import itertools
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_arguments(tcl_file, bcl_file, r=10):
"""
fetch input parameters from the argument parser
parameters:
- tcl_file (csv file): file having the T-cell epitopes
- bcl_file (csv file): file having the B-cell epitopes
- r (int): number of random epitopes
returns:
- params (dict): dictionary with random size as key and values as
input files
"""
params = dict()
params[r] = [os.path.abspath(tcl_file), os.path.abspath(bcl_file)]
return params
def generate_combinations(params, tcell_linker='AAY', bcell_linker='GPGPG'):
"""
generate a specified number of random combinations of epitopes given
input files (both T-cell and B-cell) epitopes files
parameters:
- params (dict): dictionary with random size as key and values as
input files
- tcell_linker (str): string characters for linking the T-cell epitopes
- bcell_linker (str): string characters for linking the B-cell epitopes
returns:
- outdir (dir): directory to write the outputs
"""
tcl_dict = dict() # dictionary to store random t-cell epitope combinations
bcl_dict = dict() # dictionary to store random b-cell epitope combinations
epitope_dict = dict()
epitopes_list = list()
# read the input files and extract epitopes as a list
for randomsize, files in params.items():
for f in files:
data = pd.read_csv(f)
epitopes = data.iloc[:,1].tolist()
epi_len = int(len(''.join(epitopes))/len(epitopes)) # length of each epitope
celltype = os.path.splitext(os.path.basename(f))[0]
epitope_dict[celltype+'_'+str(epi_len)] = epitopes
# randomly shuffle the epitope sequences using the random size
headers_list = list()
for ct, ep in epitope_dict.items():
for i in range(randomsize):
non_random_seq = ''.join(ep)
random.shuffle(ep)
epitope = ''.join(ep)
epitopes_list.append(epitope)
headers_list.append(ct)
tcell_epitopes = epitopes_list[0:randomsize]
tcell_headers = headers_list[0:randomsize]
bcell_epitopes = epitopes_list[randomsize:]
bcell_headers = headers_list[randomsize:]
for i, j in enumerate(zip(tcell_headers, tcell_epitopes)):
seqid = '>'+''+j[0]+'_'+str(i+1)
epitope = j[1]
if not epitope in tcl_dict:
tcl_dict[epitope] = [seqid]
else:
tcl_dict[epitope].append(seqid)
# create output directory
tcell_bs = j[0].rsplit('_',2)[0]
outdir = os.path.join(BASE_DIR, 'output')
if not os.path.exists(outdir):
os.makedirs(outdir)
tcell_out = os.path.join(outdir, tcell_bs)+'_raw_shuffled_epitopes.fa'
tcell_linked = os.path.join(outdir, tcell_bs)+'_linked_shuffled_epitopes.fa'
tcell_list = list()
with open(tcell_out, 'w') as f_obj, open(tcell_linked, 'w') as l_obj:
for seq, id in tcl_dict.items():
epi_len = int(id[0].rsplit('_',2)[1])
sub = list(map(''.join, zip(*[iter(seq)]*epi_len)))
linked_seq = "{}".format(tcell_linker).join(sub)
tcell_list.append(linked_seq)
print("writing {} epitopes to {}".format(''.join(id), os.path.basename(tcell_out)))
f_obj.write(''.join(id))
f_obj.write('\n')
if len(seq) > 1:
f_obj.write('\n'.join(seq[i:i+60] for i in range(0,len(seq), 60)))
f_obj.write('\n')
else:
f_obj.write(seq)
f_obj.write('\n')
# write linked epitopes
print("\nwriting {} epitopes to {}".format(''.join(id), os.path.basename(tcell_linked)))
l_obj.write(''.join(id))
l_obj.write('\n')
if len(linked_seq) > 1:
l_obj.write('\n'.join(linked_seq[i:i+60] for i in range(0,len(linked_seq), 60)))
l_obj.write('\n')
else:
l_obj.write(linked_seq)
l_obj.write('\n')
for i, j in enumerate(zip(bcell_headers, bcell_epitopes)):
seqid = '>'+''+j[0]+'_'+str(i+1)
epitope = j[1]
if not epitope in bcl_dict:
bcl_dict[epitope] = [seqid]
else:
bcl_dict[epitope].append(seqid)
bcell_bs = j[0].rsplit('_',2)[0]
bcell_out = os.path.join(outdir, bcell_bs)+'_raw_shuffled_epitopes.fa'
bcell_linked = os.path.join(outdir, bcell_bs)+'_linked_shuffled_epitopes.fa'
bcell_list = list()
with open(bcell_out, 'w') as f_obj, open(bcell_linked, 'w') as l_obj:
for seq, id in bcl_dict.items():
epi_len = int(id[0].rsplit('_',2)[1])
sub = list(map(''.join, zip(*[iter(seq)]*epi_len)))
linked_seq = "{}".format(bcell_linker).join(sub)
bcell_list.append(linked_seq)
print("writing {} epitopes to {}".format(''.join(id), os.path.basename(bcell_out)))
f_obj.write(''.join(id))
f_obj.write('\n')
if len(seq) > 1:
f_obj.write('\n'.join(seq[i:i+60] for i in range(0,len(seq), 60)))
f_obj.write('\n')
else:
f_obj.write(seq)
f_obj.write('\n')
# write linked epitopes
print("\nwriting {} epitopes to {}".format(''.join(id), os.path.basename(bcell_linked)))
l_obj.write(''.join(id))
l_obj.write('\n')
if len(linked_seq) > 1:
l_obj.write('\n'.join(linked_seq[i:i+60] for i in range(0,len(linked_seq), 60)))
l_obj.write('\n')
else:
l_obj.write(linked_seq)
l_obj.write('\n')
# concatenate the B-cell and T-cell epitopes using linkers
linker = 'AKFVAAWTLKAAAEAAAK'
out = os.path.join(outdir, 'btcell_epitopes.fa')
with open(out, 'w') as f_obj:
for i, epitope in enumerate(zip(bcell_list, tcell_list)):
linked_btcell = "{}".format(linker)+epitope[0]+"{}".format(tcell_linker)+(epitope[1])
seqid = '>'+str(i+1)
f_obj.write(seqid)
f_obj.write('\n')
if len(linked_btcell) > 1:
f_obj.write('\n'.join(linked_btcell[i:i+60] for i in range(0,len(linked_btcell), 60)))
f_obj.write('\n')
else:
f_obj.write(linked_btcell)
f_obj.write('\n')
# argument parsers / command line options
parser=argparse.ArgumentParser()
helpstr = """python generate_epitope_combinations.py [options]"""
required_group = parser.add_argument_group('required arguments')
required_group.add_argument('-t', '--tcell', help="T-cell epitope file path")
required_group.add_argument('-b', '--bcell', help="B-cell epitope file path")
parser.add_argument('-r', '--randomsize', default=10, type=int, help="number of randomized epitopes")
parser.add_argument('-tcl', '--tclinker', default='AAY', type=str, help="T-cell epitopes linker")
parser.add_argument('-bcl', '--bclinker', default='GPGPG', type=str, help="B-cell epitopes linker")
args=parser.parse_args()
# open input file:
if args.tcell != None:
tcell_f = args.tcell
else:
print("Please specify the T-cell epitopes file!\n")
sys.exit(2)
if args.bcell != None:
bcell_f = args.bcell
else:
print("Please specify the B-cell epitopes file!\n")
sys.exit(2)
if args.randomsize != None and type(args.randomsize) == int:
size = args.randomsize
else:
print("specify the random size number\n")
if args.tclinker != None and type(args.tclinker) == str:
tlinker = args.tclinker.upper()
else:
print("specify the T-cell epitopes linker\n")
if args.bclinker != None and type(args.bclinker) == str:
blinker = args.bclinker.upper()
else:
print("specify the B-cell epitopes linker\n")
# run the function
fs = get_arguments(tcell_f, bcell_f, size)
g = generate_combinations(fs, tlinker, blinker)
|
<filename>src/quantum/azext_quantum/vendored_sdks/azure_quantum/quantum_client.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.jobs_operations import JobsOperations
from .operations.providers_operations import ProvidersOperations
from .operations.storage_operations import StorageOperations
from .operations.quotas_operations import QuotasOperations
from . import models
class QuantumClientConfiguration(AzureConfiguration):
"""Configuration for QuantumClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The Azure subscription ID. This is a
GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000)
:type subscription_id: str
:param resource_group_name: Name of an Azure resource group.
:type resource_group_name: str
:param workspace_name: Name of the workspace.
:type workspace_name: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, resource_group_name, workspace_name, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if resource_group_name is None:
raise ValueError("Parameter 'resource_group_name' must not be None.")
if workspace_name is None:
raise ValueError("Parameter 'workspace_name' must not be None.")
if not base_url:
base_url = 'https://quantum.azure.com'
super(QuantumClientConfiguration, self).__init__(base_url)
self.add_user_agent('quantumclient/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
self.resource_group_name = resource_group_name
self.workspace_name = workspace_name
class QuantumClient(SDKClient):
"""Azure Quantum REST API client
:ivar config: Configuration for client.
:vartype config: QuantumClientConfiguration
:ivar jobs: Jobs operations
:vartype jobs: azure.quantum.operations.JobsOperations
:ivar providers: Providers operations
:vartype providers: azure.quantum.operations.ProvidersOperations
:ivar storage: Storage operations
:vartype storage: azure.quantum.operations.StorageOperations
:ivar quotas: Quotas operations
:vartype quotas: azure.quantum.operations.QuotasOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The Azure subscription ID. This is a
GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000)
:type subscription_id: str
:param resource_group_name: Name of an Azure resource group.
:type resource_group_name: str
:param workspace_name: Name of the workspace.
:type workspace_name: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, resource_group_name, workspace_name, base_url=None):
self.config = QuantumClientConfiguration(credentials, subscription_id, resource_group_name, workspace_name, base_url)
super(QuantumClient, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2019-11-04-preview'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.jobs = JobsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.providers = ProvidersOperations(
self._client, self.config, self._serialize, self._deserialize)
self.storage = StorageOperations(
self._client, self.config, self._serialize, self._deserialize)
self.quotas = QuotasOperations(
self._client, self.config, self._serialize, self._deserialize)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Build a neural machine translation model with soft attention
"""
import copy
import sys
from collections import OrderedDict
import ipdb
import numpy
import theano
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from initializers import norm_weight
from layers import get_layer_param, shared_dropout_layer, get_layer_constr
from theano_util import concatenate, embedding_name
from alignment_util import get_alignments
profile = False
# Nematus relies on numpy.log(-numpy.inf) for suppressing unknowns
# make sure numpy will not raise an exception because of nan
numpy.seterr(divide='warn', over='warn', under='ignore', invalid='warn')
# batch preparation
def prepare_data(seqs_x, seqs_y, maxlen=None):
# x: a list of sentences
lengths_x = [len(s) for s in seqs_x]
lengths_y = [len(s) for s in seqs_y]
if maxlen is not None:
new_seqs_x = []
new_seqs_y = []
new_lengths_x = []
new_lengths_y = []
for l_x, s_x, l_y, s_y in zip(lengths_x, seqs_x, lengths_y, seqs_y):
if l_x < maxlen and l_y < maxlen:
new_seqs_x.append(s_x)
new_lengths_x.append(l_x)
new_seqs_y.append(s_y)
new_lengths_y.append(l_y)
lengths_x = new_lengths_x
seqs_x = new_seqs_x
lengths_y = new_lengths_y
seqs_y = new_seqs_y
if len(lengths_x) < 1 or len(lengths_y) < 1:
return None, None, None, None
n_samples = len(seqs_x)
n_factors = len(seqs_x[0][0])
maxlen_x = numpy.max(lengths_x) + 1
maxlen_y = numpy.max(lengths_y) + 1
x = numpy.zeros((n_factors, maxlen_x, n_samples)).astype('int64')
y = numpy.zeros((maxlen_y, n_samples)).astype('int64')
x_mask = numpy.zeros((maxlen_x, n_samples)).astype('float32')
y_mask = numpy.zeros((maxlen_y, n_samples)).astype('float32')
for idx, [s_x, s_y] in enumerate(zip(seqs_x, seqs_y)):
x[:, :lengths_x[idx], idx] = zip(*s_x)
x_mask[:lengths_x[idx]+1, idx] = 1.
y[:lengths_y[idx], idx] = s_y
y_mask[:lengths_y[idx]+1, idx] = 1.
return x, x_mask, y, y_mask
# initialize all parameters
def init_params(options):
params = OrderedDict()
# embedding
for factor in range(options['factors']):
params[embedding_name(factor)] = norm_weight(options['n_words_src'], options['dim_per_factor'][factor])
params['Wemb_dec'] = norm_weight(options['n_words'], options['dim_word'])
# encoder: bidirectional RNN
params = get_layer_param(options['encoder'])(options, params,
prefix='encoder',
nin=options['dim_word'],
dim=options['dim'])
params = get_layer_param(options['encoder'])(options, params,
prefix='encoder_r',
nin=options['dim_word'],
dim=options['dim'])
ctxdim = 2 * options['dim']
# init_state, init_cell
params = get_layer_param('ff')(options, params, prefix='ff_state',
nin=ctxdim, nout=options['dim'])
# decoder
params = get_layer_param(options['decoder'])(options, params,
prefix='decoder',
nin=options['dim_word'],
dim=options['dim'],
dimctx=ctxdim)
# readout
params = get_layer_param('ff')(options, params, prefix='ff_logit_lstm',
nin=options['dim'], nout=options['dim_word'],
ortho=False)
params = get_layer_param('ff')(options, params, prefix='ff_logit_prev',
nin=options['dim_word'],
nout=options['dim_word'], ortho=False)
params = get_layer_param('ff')(options, params, prefix='ff_logit_ctx',
nin=ctxdim, nout=options['dim_word'],
ortho=False)
params = get_layer_param('ff')(options, params, prefix='ff_logit',
nin=options['dim_word'],
nout=options['n_words'])
return params
# bidirectional RNN encoder: take input x (optionally with mask), and produce sequence of context vectors (ctx)
def _build_encoder(tparams, options, trng, use_noise, x_mask=None, sampling=False):
x = tensor.tensor3('x', dtype='int64')
x.tag.test_value = (numpy.random.rand(1, 5, 10)*100).astype('int64')
# for the backward rnn, we just need to invert x
xr = x[:,::-1]
if x_mask is None:
xr_mask = None
else:
xr_mask = x_mask[::-1]
n_timesteps = x.shape[1]
n_samples = x.shape[2]
if options['use_dropout']:
retain_probability_emb = 1-options['dropout_embedding']
retain_probability_hidden = 1-options['dropout_hidden']
retain_probability_source = 1-options['dropout_source']
if sampling:
if options['model_version'] < 0.1:
rec_dropout = theano.shared(numpy.array([retain_probability_hidden]*2, dtype='float32'))
rec_dropout_r = theano.shared(numpy.array([retain_probability_hidden]*2, dtype='float32'))
emb_dropout = theano.shared(numpy.array([retain_probability_emb]*2, dtype='float32'))
emb_dropout_r = theano.shared(numpy.array([retain_probability_emb]*2, dtype='float32'))
source_dropout = theano.shared(numpy.float32(retain_probability_source))
else:
rec_dropout = theano.shared(numpy.array([1.]*2, dtype='float32'))
rec_dropout_r = theano.shared(numpy.array([1.]*2, dtype='float32'))
emb_dropout = theano.shared(numpy.array([1.]*2, dtype='float32'))
emb_dropout_r = theano.shared(numpy.array([1.]*2, dtype='float32'))
source_dropout = theano.shared(numpy.float32(1.))
else:
if options['model_version'] < 0.1:
scaled = False
else:
scaled = True
rec_dropout = shared_dropout_layer((2, n_samples, options['dim']), use_noise, trng, retain_probability_hidden, scaled)
rec_dropout_r = shared_dropout_layer((2, n_samples, options['dim']), use_noise, trng, retain_probability_hidden, scaled)
emb_dropout = shared_dropout_layer((2, n_samples, options['dim_word']), use_noise, trng, retain_probability_emb, scaled)
emb_dropout_r = shared_dropout_layer((2, n_samples, options['dim_word']), use_noise, trng, retain_probability_emb, scaled)
source_dropout = shared_dropout_layer((n_timesteps, n_samples, 1), use_noise, trng, retain_probability_source, scaled)
source_dropout = tensor.tile(source_dropout, (1,1,options['dim_word']))
else:
rec_dropout = theano.shared(numpy.array([1.]*2, dtype='float32'))
rec_dropout_r = theano.shared(numpy.array([1.]*2, dtype='float32'))
emb_dropout = theano.shared(numpy.array([1.]*2, dtype='float32'))
emb_dropout_r = theano.shared(numpy.array([1.]*2, dtype='float32'))
# word embedding for forward rnn (source)
emb = []
for factor in range(options['factors']):
emb.append(tparams[embedding_name(factor)][x[factor].flatten()])
emb = concatenate(emb, axis=1)
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
if options['use_dropout']:
emb *= source_dropout
proj = get_layer_constr(options['encoder'])(tparams, emb, options,
prefix='encoder',
mask=x_mask,
emb_dropout=emb_dropout,
rec_dropout=rec_dropout,
profile=profile)
# word embedding for backward rnn (source)
embr = []
for factor in range(options['factors']):
embr.append(tparams[embedding_name(factor)][xr[factor].flatten()])
embr = concatenate(embr, axis=1)
embr = embr.reshape([n_timesteps, n_samples, options['dim_word']])
if options['use_dropout']:
if sampling:
embr *= source_dropout
else:
# we drop out the same words in both directions
embr *= source_dropout[::-1]
projr = get_layer_constr(options['encoder'])(tparams, embr, options,
prefix='encoder_r',
mask=xr_mask,
emb_dropout=emb_dropout_r,
rec_dropout=rec_dropout_r,
profile=profile)
# context will be the concatenation of forward and backward rnns
ctx = concatenate([proj[0], projr[0][::-1]], axis=proj[0].ndim-1)
return x, ctx
# build a training model
def build_model(tparams, options):
opt_ret = dict()
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
x_mask = tensor.matrix('x_mask', dtype='float32')
x_mask.tag.test_value = numpy.ones(shape=(5, 10)).astype('float32')
y = tensor.matrix('y', dtype='int64')
y.tag.test_value = (numpy.random.rand(8, 10)*100).astype('int64')
y_mask = tensor.matrix('y_mask', dtype='float32')
y_mask.tag.test_value = numpy.ones(shape=(8, 10)).astype('float32')
x, ctx = _build_encoder(tparams, options, trng, use_noise, x_mask, sampling=False)
n_samples = x.shape[2]
n_timesteps_trg = y.shape[0]
if options['use_dropout']:
retain_probability_emb = 1-options['dropout_embedding']
retain_probability_hidden = 1-options['dropout_hidden']
retain_probability_target = 1-options['dropout_target']
if options['model_version'] < 0.1:
scaled = False
else:
scaled = True
rec_dropout_d = shared_dropout_layer((5, n_samples, options['dim']), use_noise, trng, retain_probability_hidden, scaled)
emb_dropout_d = shared_dropout_layer((2, n_samples, options['dim_word']), use_noise, trng, retain_probability_emb, scaled)
ctx_dropout_d = shared_dropout_layer((4, n_samples, 2*options['dim']), use_noise, trng, retain_probability_hidden, scaled)
target_dropout = shared_dropout_layer((n_timesteps_trg, n_samples, 1), use_noise, trng, retain_probability_target, scaled)
target_dropout = tensor.tile(target_dropout, (1,1,options['dim_word']))
else:
rec_dropout_d = theano.shared(numpy.array([1.]*5, dtype='float32'))
emb_dropout_d = theano.shared(numpy.array([1.]*2, dtype='float32'))
ctx_dropout_d = theano.shared(numpy.array([1.]*4, dtype='float32'))
# mean of the context (across time) will be used to initialize decoder rnn
ctx_mean = (ctx * x_mask[:, :, None]).sum(0) / x_mask.sum(0)[:, None]
# or you can use the last state of forward + backward encoder rnns
# ctx_mean = concatenate([proj[0][-1], projr[0][-1]], axis=proj[0].ndim-2)
if options['use_dropout']:
ctx_mean *= shared_dropout_layer((n_samples, 2*options['dim']), use_noise, trng, retain_probability_hidden, scaled)
# initial decoder state
init_state = get_layer_constr('ff')(tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
# word embedding (target), we will shift the target sequence one time step
# to the right. This is done because of the bi-gram connections in the
# readout and decoder rnn. The first target will be all zeros and we will
# not condition on the last output.
emb = tparams['Wemb_dec'][y.flatten()]
emb = emb.reshape([n_timesteps_trg, n_samples, options['dim_word']])
emb_shifted = tensor.zeros_like(emb)
emb_shifted = tensor.set_subtensor(emb_shifted[1:], emb[:-1])
emb = emb_shifted
if options['use_dropout']:
emb *= target_dropout
# decoder - pass through the decoder conditional gru with attention
proj = get_layer_constr(options['decoder'])(tparams, emb, options,
prefix='decoder',
mask=y_mask, context=ctx,
context_mask=x_mask,
one_step=False,
init_state=init_state,
emb_dropout=emb_dropout_d,
ctx_dropout=ctx_dropout_d,
rec_dropout=rec_dropout_d,
profile=profile)
# hidden states of the decoder gru
proj_h = proj[0]
# weighted averages of context, generated by attention module
ctxs = proj[1]
if options['use_dropout']:
proj_h *= shared_dropout_layer((n_samples, options['dim']), use_noise, trng, retain_probability_hidden, scaled)
emb *= shared_dropout_layer((n_samples, options['dim_word']), use_noise, trng, retain_probability_emb, scaled)
ctxs *= shared_dropout_layer((n_samples, 2*options['dim']), use_noise, trng, retain_probability_hidden, scaled)
# weights (alignment matrix) #####LIUCAN: this is where the attention vector is.
opt_ret['dec_alphas'] = proj[2]
# compute word probabilities
logit_lstm = get_layer_constr('ff')(tparams, proj_h, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer_constr('ff')(tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer_constr('ff')(tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm+logit_prev+logit_ctx)
if options['use_dropout']:
logit *= shared_dropout_layer((n_samples, options['dim_word']), use_noise, trng, retain_probability_hidden, scaled)
logit = get_layer_constr('ff')(tparams, logit, options,
prefix='ff_logit', activ='linear')
logit_shp = logit.shape
probs = tensor.nnet.softmax(logit.reshape([logit_shp[0]*logit_shp[1],
logit_shp[2]]))
# cost
y_flat = y.flatten()
y_flat_idx = tensor.arange(y_flat.shape[0]) * options['n_words'] + y_flat
per_sent_neg_log_prob = -tensor.log(probs.flatten()[y_flat_idx])
per_sent_neg_log_prob = per_sent_neg_log_prob.reshape([y.shape[0], y.shape[1]])
per_sent_neg_log_prob = (per_sent_neg_log_prob * y_mask).sum(0) # note: y_mask is float, but only stores 0. or 1.
#print "Print out in build_model()"
#print opt_ret
return trng, use_noise, x, x_mask, y, y_mask, opt_ret, per_sent_neg_log_prob
# build a batched sampler
def build_sampler(tparams, options, use_noise, trng, return_alignment=False):
x_mask = tensor.matrix('x_mask', dtype='float32')
x_mask.tag.test_value = numpy.ones(shape=(5, 10)).astype('float32')
if options['use_dropout'] and options['model_version'] < 0.1:
retain_probability_emb = 1-options['dropout_embedding']
retain_probability_hidden = 1-options['dropout_hidden']
# retain_probability_source = 1-options['dropout_source'] # todo: should this be used??
retain_probability_target = 1-options['dropout_target']
rec_dropout_d = theano.shared(numpy.array([retain_probability_hidden]*5, dtype='float32'))
emb_dropout_d = theano.shared(numpy.array([retain_probability_emb]*2, dtype='float32'))
ctx_dropout_d = theano.shared(numpy.array([retain_probability_hidden]*4, dtype='float32'))
target_dropout = theano.shared(numpy.float32(retain_probability_target))
else:
rec_dropout_d = theano.shared(numpy.array([1.]*5, dtype='float32'))
emb_dropout_d = theano.shared(numpy.array([1.]*2, dtype='float32'))
ctx_dropout_d = theano.shared(numpy.array([1.]*4, dtype='float32'))
x, ctx = _build_encoder(tparams, options, trng, use_noise, x_mask=x_mask, sampling=True)
# get the input for decoder rnn initializer mlp
ctx_mean = ctx.mean(0)
# ctx_mean = concatenate([proj[0][-1],projr[0][-1]], axis=proj[0].ndim-2)
if options['use_dropout'] and options['model_version'] < 0.1:
ctx_mean *= retain_probability_hidden
init_state = get_layer_constr('ff')(tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
print >>sys.stderr, 'Building f_init...',
outs = [init_state, ctx]
f_init = theano.function([x, x_mask], outs, name='f_init', profile=profile)
print >>sys.stderr, 'Done'
# x: 1 x 1
y = tensor.vector('y_sampler', dtype='int64')
init_state = tensor.matrix('init_state', dtype='float32')
# if it's the first word, emb should be all zero and it is indicated by -1
emb = tensor.switch(y[:, None] < 0,
tensor.alloc(0., 1, tparams['Wemb_dec'].shape[1]),
tparams['Wemb_dec'][y])
if options['use_dropout'] and options['model_version'] < 0.1:
emb *= target_dropout
# apply one step of conditional gru with attention
proj = get_layer_constr(options['decoder'])(tparams, emb, options,
prefix='decoder',
mask=None, context=ctx,
context_mask=x_mask,
one_step=True,
init_state=init_state,
emb_dropout=emb_dropout_d,
ctx_dropout=ctx_dropout_d,
rec_dropout=rec_dropout_d,
profile=profile)
# get the next hidden state
next_state = proj[0]
# get the weighted averages of context for this target word y
ctxs = proj[1]
# alignment matrix (attention model)
dec_alphas = proj[2]
if options['use_dropout'] and options['model_version'] < 0.1:
next_state_up = next_state * retain_probability_hidden
emb *= retain_probability_emb
ctxs *= retain_probability_hidden
else:
next_state_up = next_state
logit_lstm = get_layer_constr('ff')(tparams, next_state_up, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer_constr('ff')(tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer_constr('ff')(tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm+logit_prev+logit_ctx)
if options['use_dropout'] and options['model_version'] < 0.1:
logit *= retain_probability_hidden
logit = get_layer_constr('ff')(tparams, logit, options,
prefix='ff_logit', activ='linear')
# compute the softmax probability
next_probs = tensor.nnet.softmax(logit)
# sample from softmax distribution to get the sample
next_sample = trng.multinomial(pvals=next_probs).argmax(1)
# compile a function to do the whole thing above, next word probability,
# sampled word for the next target, next hidden state to be used
print >>sys.stderr, 'Building f_next..',
inps = [y, ctx, init_state, x_mask]
outs = [next_probs, next_sample, next_state]
if return_alignment:
outs.append(dec_alphas)
f_next = theano.function(inps, outs, name='f_next', profile=profile)
print >>sys.stderr, 'Done'
return f_init, f_next
# generate sample, either with stochastic sampling or beam search. Note that,
# this function iteratively calls f_init and f_next functions.
def gen_sample(f_init, f_next, x, trng=None, k=1, maxlen=30,
stochastic=True, argmax=False, return_alignment=False, suppress_unk=False,
return_hyp_graph=False):
"""
:param f_init: *list* of f_init functions. Each: state0, ctx0 = f_init(x)
:param f_next: *list* of f_next functions. Each: next_prob, next_word, next_state = f_next(word, ctx, state)
:param x: a [BATCHED?] sequence of word ids followed by 0 (0 = eos id)
:param trng: theano RandomStreams
:param k: beam width
:param maxlen: max length of a sentences
:param stochastic: bool, do stochastic sampling
:param argmax: bool, something to do with argmax of highest word prob...
:param return_alignment:
:param suppress_unk:
:param return_hyp_graph:
:return:
"""
# k is the beam size we have
if k > 1:
assert not stochastic, \
'Beam search does not support stochastic sampling'
sample = []
sample_score = []
sample_word_probs = []
alignment = []
hyp_graph = None
if stochastic:
sample_score = 0
if return_hyp_graph:
from hypgraph import HypGraph
hyp_graph = HypGraph()
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
word_probs = [[]] * live_k
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
if return_alignment:
hyp_alignment = [[] for _ in xrange(live_k)]
# for ensemble decoding, we keep track of states and probability distribution
# for each model in the ensemble
num_models = len(f_init)
next_state = [None]*num_models
ctx0 = [None]*num_models
next_p = [None]*num_models
dec_alphas = [None]*num_models
# get initial state of decoder rnn and encoder context
for i in xrange(num_models):
ret = f_init[i](x)
next_state[i] = ret[0]
ctx0[i] = ret[1]
next_w = -1 * numpy.ones((1,)).astype('int64') # bos indicator
# x is a sequence of word ids followed by 0, eos id
for ii in xrange(maxlen):
for i in xrange(num_models):
ctx = numpy.tile(ctx0[i], [live_k, 1])
inps = [next_w, ctx, next_state[i]]
ret = f_next[i](*inps)
# dimension of dec_alpha (k-beam-size, number-of-input-hidden-units)
next_p[i], next_w_tmp, next_state[i] = ret[0], ret[1], ret[2]
if return_alignment:
dec_alphas[i] = ret[3]
if suppress_unk:
next_p[i][:,1] = -numpy.inf
if stochastic:
if argmax:
nw = sum(next_p)[0].argmax()
else:
nw = next_w_tmp[0]
sample.append(nw)
sample_score += numpy.log(next_p[0][0, nw])
if nw == 0:
break
else:
cand_scores = hyp_scores[:, None] - sum(numpy.log(next_p))
probs = sum(next_p)/num_models
cand_flat = cand_scores.flatten()
probs_flat = probs.flatten()
ranks_flat = cand_flat.argpartition(k-dead_k-1)[:(k-dead_k)]
# averaging the attention weights accross models
if return_alignment:
mean_alignment = sum(dec_alphas)/num_models
voc_size = next_p[0].shape[1]
# index of each k-best hypothesis
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k-dead_k).astype('float32')
new_word_probs = []
new_hyp_states = []
if return_alignment:
# holds the history of attention weights for each time step for each of the surviving hypothesis
# dimensions (live_k * target_words * source_hidden_units]
# at each time step we append the attention weights corresponding to the current target word
new_hyp_alignment = [[] for _ in xrange(k-dead_k)]
# ti -> index of k-best hypothesis
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_word_probs.append(word_probs[ti] + [probs_flat[ranks_flat[idx]].tolist()])
new_hyp_scores[idx] = copy.copy(costs[idx])
new_hyp_states.append([copy.copy(next_state[i][ti]) for i in xrange(num_models)])
if return_alignment:
# get history of attention weights for the current hypothesis
new_hyp_alignment[idx] = copy.copy(hyp_alignment[ti])
# extend the history with current attention weights
new_hyp_alignment[idx].append(mean_alignment[ti])
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
word_probs = []
if return_alignment:
hyp_alignment = []
# sample and sample_score hold the k-best translations and their scores
for idx in xrange(len(new_hyp_samples)):
if return_hyp_graph:
word, history = new_hyp_samples[idx][-1], new_hyp_samples[idx][:-1]
score = new_hyp_scores[idx]
word_prob = new_word_probs[idx][-1]
hyp_graph.add(word, history, word_prob=word_prob, cost=score)
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
sample_word_probs.append(new_word_probs[idx])
if return_alignment:
alignment.append(new_hyp_alignment[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
word_probs.append(new_word_probs[idx])
if return_alignment:
hyp_alignment.append(new_hyp_alignment[idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = [numpy.array(state) for state in zip(*hyp_states)]
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
sample_word_probs.append(word_probs[idx])
if return_alignment:
alignment.append(hyp_alignment[idx])
if not return_alignment:
alignment = [None for i in range(len(sample))]
return sample, sample_score, sample_word_probs, alignment, hyp_graph
# calculate the log probablities on a given corpus using translation model
def pred_probs(f_log_probs, prepare_data, options, iterator, verbose=True, normalize=False, alignweights=False):
probs = []
n_done = 0
alignments_json = []
for x, y in iterator:
# ensure consistency in number of factors
if len(x[0][0]) != options['factors']:
sys.stderr.write('Error: mismatch between number of factors in settings ({0}), and number in validation corpus ({1})\n'.format(options['factors'], len(x[0][0])))
sys.exit(1)
n_done += len(x)
x, x_mask, y, y_mask = prepare_data(x, y)
#n_words_src=options['n_words_src'],
#n_words=options['n_words'])
# in optional save weights mode.
if alignweights:
pprobs, attention = f_log_probs(x, x_mask, y, y_mask)
for jdata in get_alignments(attention, x_mask, y_mask):
alignments_json.append(jdata)
else:
pprobs = f_log_probs(x, x_mask, y, y_mask)
# normalize scores according to output length
if normalize:
lengths = numpy.array([numpy.count_nonzero(s) for s in y_mask.T])
pprobs /= lengths
for pp in pprobs:
probs.append(pp)
if numpy.isnan(numpy.mean(probs)):
ipdb.set_trace()
if verbose:
print >>sys.stderr, '%d samples computed' % n_done
return numpy.array(probs), alignments_json
# generate sample, either with stochastic sampling or beam search. Note that,
# this function iteratively calls f_init and f_next functions.
def gen_par_sample(f_init, f_next, x, x_mask, k=1, maxlen=30, suppress_unk=False):
"""
:param f_init: *list* of f_init functions. Each: state0, ctx0 = f_init(x, X_MASK)
:param f_next: *list* of f_next functions. Each: next_prob, next_word, next_state = f_next(word, ctx, state, X_MASK)
:param x: a BATCHED sequence of word ids, each terminated by 0 (0 = eos id)
:param k: beam width
:param maxlen: max length of a sentences
:param suppress_unk:
:return:
"""
# k is the beam size we have
batch_size = x.shape[2]
sample = [[] for i in range(batch_size)]
sample_score = [[] for i in range(batch_size)]
sample_word_probs = [[] for i in range(batch_size)]
live_k = [1] * batch_size
dead_k = [0] * batch_size # num completed sentences
hyp_samples = [[]] * batch_size * 1 # wrote 1 explictly to denote 1 live_k per sent
word_probs = [[]] * batch_size * 1
hyp_scores = numpy.zeros(batch_size * 1).astype('float32')
# for ensemble decoding, we keep track of states and probability distribution
# for each model in the ensemble
num_models = len(f_init)
next_state = [None]*num_models
ctx0 = [None]*num_models # initial context
next_ps = [None]*num_models
# get initial state of decoder rnn and encoder context
for i in xrange(num_models):
ret = f_init[i](x, x_mask)
next_state[i] = ret[0]
ctx0[i] = ret[1]
next_w = -1 * numpy.ones((batch_size,)).astype('int64') # bos (beginning of sent) indicator
# -- OK --
# x is a sequence of word ids followed by 0, eos id
for ii in xrange(maxlen):
for i in xrange(num_models):
# Encoder context does not change, just need to know how many are required
# numpy.tile(ctx0[i], [live_k, 1]) -- prev: simply repeat context live_k times (and propagate up a dimension)
# New: tile each context per sent, then concat them together.
ctx = numpy.concatenate([numpy.tile(ctx0[i][:,sent_idx:sent_idx+1], [live_k_per_sent, 1]) for sent_idx, live_k_per_sent in enumerate(live_k)], axis = 1)
mask = numpy.concatenate([numpy.tile(x_mask[:,sent_idx:sent_idx+1], [1, live_k_per_sent]) for sent_idx, live_k_per_sent in enumerate(live_k)], axis = 1)
inps = [next_w, ctx, next_state[i], mask] # prepare parameters for f_next
ret = f_next[i](*inps)
# dimension of dec_alpha (k-beam-size, number-of-input-hidden-units)
next_ps[i], next_ws_tmp, next_state[i] = ret[0], ret[1], ret[2]
if suppress_unk:
next_ps[i][:, 1] = -numpy.inf
# We do the same thing with the same flat structures. just our interpretations now differ!
voc_size = next_ps[0].shape[1] # should be constant
cand_scores = hyp_scores[:, None] - sum(numpy.log(next_ps))
probs = sum(next_ps)/num_models
cand_flat = cand_scores.flatten()
probs_flat = probs.flatten()
# OK argpartition in pieces.
# Wait if we are argpartitioning across sent boundaries, two words can come out of a single hyp! wait that's ok though! great.
#ranks_flat = cand_flat.argpartition(k-dead_k-1)[:(k-dead_k)] # Basically, top k-dead_k (INDICES OF)
sent_boundaries = numpy.cumsum([0] + live_k) * voc_size # Mult by vocab size because the softmaxes are flattened too
# start, end = start index and end index for a sentence (not inclusive on end)
# argpartition each piece. add 'start' to it because np thinks its a new small array, so remember the start idx
ranks_flat = numpy.concatenate([start + cand_flat[start:end].argpartition(k - dead_per_sent-1)[:(k-dead_per_sent)]
for start, end, dead_per_sent in zip(sent_boundaries[:-1], sent_boundaries[1:], dead_k)], axis = 0)
# averaging the attention weights across models
# index of each k-best hypothesis
trans_indices = ranks_flat / voc_size # Hmm. Which element of beam did it come from
word_indices = ranks_flat % voc_size # and what word was it. That implies the cand scores are the entire softmax...
costs = cand_flat[ranks_flat] # Get the probs
new_hyp_samples = []
new_hyp_scores = numpy.zeros(len(ranks_flat)).astype('float32')
new_word_probs = []
new_hyp_states = []
# ti -> index of k-best hypothesis
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
# hyps/etc will proceed in order, since ranks flat goes in order of sentences.
new_hyp_samples.append(hyp_samples[ti]+[wi]) # looks like appending the next word to the existing hypothesis, and adding that to a list of new hypotheses
new_word_probs.append(word_probs[ti] + [probs_flat[ranks_flat[idx]].tolist()]) # Not sure, I think same thing but for probabilities. the '+' -- the second element is a list so probably still a list of word probs over the hyp
new_hyp_scores[idx] = copy.copy(costs[idx]) # the total cost with the new prob added
new_hyp_states.append([copy.copy(next_state[i][ti]) for i in xrange(num_models)]) # copy the state over too
# check the finished samples
new_live_k = [0] * batch_size
hyp_samples = []
hyp_scores = []
hyp_states = []
word_probs = []
# sample and sample_score hold the k-best translations and their scores
# In the flattened 'sample' array, markers between sentences will be based on the cumulative sum of live_ks
#ipdb.set_trace()
live_k_tmp = [k-dead_k_sent for dead_k_sent in dead_k]
sample_sent_boundaries = numpy.cumsum(live_k_tmp)
sent_idx = 0
for idx in xrange(len(new_hyp_samples)):
# Need to know which sent it came from
while idx >= sample_sent_boundaries[sent_idx]:
sent_idx += 1
if new_hyp_samples[idx][-1] == 0: # If eos (End of sent)
#ipdb.set_trace()
sample[sent_idx].append(new_hyp_samples[idx]) # I think 'sample' are only for finished samples
sample_score[sent_idx].append(new_hyp_scores[idx])
sample_word_probs[sent_idx].append(new_word_probs[idx])
dead_k[sent_idx] += 1 # finished the sent.
else:
new_live_k[sent_idx] += 1 # count live k's
# Live ones are flat.
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
word_probs.append(new_word_probs[idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
# Conservative break conditions...
if sum(new_live_k) < 1:
break
if min(dead_k) >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = [numpy.array(state) for state in zip(*hyp_states)]
# dump every remaining one
sample_sent_boundaries = numpy.cumsum(live_k)
sent_idx = 0
for idx in xrange(len(hyp_samples)):
while idx >= sample_sent_boundaries[sent_idx]:
sent_idx += 1
#if live_k > 0:
sample[sent_idx].append(hyp_samples[idx])
sample_score[sent_idx].append(hyp_scores[idx])
sample_word_probs[sent_idx].append(word_probs[idx])
return sample, sample_score, sample_word_probs
# My notes: doing it in par:
# All of this stuff, topk etc, is done in numpy. Only f_next() etc are done in theano/GPU.
# So just need some extra handling steps after-the-fact.
# It doesn't look like there is any conflict with sending different sentences in, for f_next()
|
<gh_stars>0
"""
Copyright MIT and Harvey Mudd College
MIT License
Summer 2020
Lab 1 - Driving in Shapes
"""
########################################################################################
# Imports
########################################################################################
import sys
sys.path.insert(1, "../../library")
import racecar_core
import racecar_utils as rc_utils
########################################################################################
# Global variables
########################################################################################
rc = racecar_core.create_racecar()
# A queue of driving steps to execute
# Each entry is a list containing (time remaining, speed, angle)
queue = []
########################################################################################
# Functions
########################################################################################
def start():
"""
This function is run once every time the start button is pressed
"""
# Begin at a full stop
rc.drive.stop()
# Begin with an empty queue
queue.clear()
# Print start message
# TODO (main challenge): add a line explaining what the Y button does
print(
">> Lab 1 - Driving in Shapes\n"
"\n"
"Controls:\n"
" Right trigger = accelerate forward\n"
" Left trigger = accelerate backward\n"
" Left joystick = turn front wheels\n"
" A button = drive in a circle\n"
" B button = drive in a square\n"
" X button = drive in a figure eight\n"
)
def update():
"""
After start() is run, this function is run every frame until the back button
is pressed
"""
global queue
# When the A button is pressed, add instructions to drive in a circle
if rc.controller.was_pressed(rc.controller.Button.A):
drive_circle()
# When the B button is pressed, add instructions to drive in a square
if rc.controller.was_pressed(rc.controller.Button.B):
drive_square()
# When the X button is pressed, add instructions to drive in a figure eight
if rc.controller.was_pressed(rc.controller.Button.X):
drive_figure_eight()
# TODO: Drive in a shape of your choice when the Y button is pressed
# Calculate speed from triggers
right_trigger = rc.controller.get_trigger(rc.controller.Trigger.RIGHT)
left_trigger = rc.controller.get_trigger(rc.controller.Trigger.LEFT)
speed = right_trigger - left_trigger
# Calculate angle from left joystick
angle = rc.controller.get_joystick(rc.controller.Joystick.LEFT)[0]
# If the triggers or joystick were pressed, clear the queue to cancel the current
# shape and allow for manual driving
if right_trigger > 0 or right_trigger > 0 or angle > 0:
queue.clear()
# If the queue is not empty, follow the current drive instruction
if len(queue) > 0:
speed = queue[0][1]
angle = queue[0][2]
queue[0][0] -= rc.get_delta_time()
if queue[0][0] <= 0:
queue.pop(0)
rc.drive.set_speed_angle(speed, angle)
# TODO (main challenge): Drive in a shape of your choice when the Y button
# is pressed
def drive_circle():
"""
Add steps to drive in a circle to the instruction queue.
"""
global queue
# Tune this constant until the car completes a full circle
CIRCLE_TIME = 6
queue.clear()
# Turn right at full speed
queue.append([CIRCLE_TIME, 1, 1])
def drive_square():
"""
Add steps to drive in a square to the instruction queue.
"""
global queue
# Tune these constants until the car completes a clean square
STRAIGHT_TIME = 1.5
TURN_TIME = 1.2
queue.clear()
# Repeat 4 copies of: drive straight, turn right
for i in range(0, 4):
queue.append([STRAIGHT_TIME, 1, 0])
queue.append([TURN_TIME, 1, 1])
def drive_figure_eight():
"""
Add steps to drive in a figure eight to the instruction queue.
"""
global queue
# Tune these constants until the car completes a clean figure eight
STRAIGHT_TIME_1 = 3 # The first straight time is longer to give time to accelerate
STRAIGHT_TIME_2 = 2
TURN_TIME = 3.5
queue.clear()
# Take the following steps: straight, right, straight, left
queue.append([STRAIGHT_TIME_1, 1, 0])
queue.append([TURN_TIME, 1, 1])
queue.append([STRAIGHT_TIME_2, 1, 0])
queue.append([TURN_TIME, 1, -1])
########################################################################################
# DO NOT MODIFY: Register start and update and begin execution
########################################################################################
if __name__ == "__main__":
rc.set_start_update(start, update)
rc.go()
|
# -*- coding: utf-8 -*-
import optparse
import os
import sys
import getpass
import json
import hashlib
import smtplib
import commands
import subprocess
import shutil
import re
from pbxproj import XcodeProject
from pbxproj.pbxextensions.ProjectFiles import FileOptions
#钥匙链相关
keychainPath="~/Library/Keychains/login.keychain-db"
keychainPassword="<PASSWORD>"
mobileprovision_path="/Users/"+getpass.getuser()+"/Library/MobileDevice/Provisioning Profiles"
class XCodeBuild(object):
#自动重载
def __init__(self,*args):
if len(args)==1:
self.initTest(args[0])
elif len(args)==8:
self.initConfig(args[0],args[1],args[2],args[3],args[4],args[5],args[6],args[7])
pass
def initTest(self,xcodeProjectRootPath):
self.xcodeProjectRootPath = xcodeProjectRootPath
def initTestProject(self):
# 初始化
pbxproj=self.xcodeProjectRootPath+'/project_test.xcodeproj/project.pbxproj'
infoPlistPath=self.xcodeProjectRootPath+'/project_test/Info.plist'
self.project = XcodeProject.load(pbxproj)
targetsNames=self.project.get_targets_names()
for targetName in targetsNames:
self.project.get_configSet_By_Target(targetName)
print targetsNames
pass
def initConfig(self,xcodeProjectRootPath,infoPlistFilePath,isWorkSpace,targetName,configurationSet,certificateName,provisioning_profile_file,base_exportOptionPlist):
self.xcodeProjectRootPath = xcodeProjectRootPath
self.infoPlistFilePath=infoPlistFilePath
self.isWorkSpace=isWorkSpace
self.targetName=targetName
self.exportOptionPlist=base_exportOptionPlist
#编译配置
self.configuration_set=configurationSet
#证书名
self.certificateName =certificateName
#开始获取.mobileprovision文件的uuid值
(status, provisioning_profile_temp) = commands.getstatusoutput("/usr/libexec/PlistBuddy -c 'Print UUID' /dev/stdin <<< $(/usr/bin/security cms -D -i %s)" %(provisioning_profile_file))
self.provisioning_profile=provisioning_profile_temp
print 'self.provisioning_profile :'
print self.provisioning_profile
print "开始打开.mobileprovision文件位置"
# os.system("open %s"%(provisioning_profile_file))
def updateExportOptionPlistData(self):
#根据包名更新exportOptions.plist文件信息
#先复制一份,修改好用这份新的
new_exportOption_plist= '%s_%s'%(self.app_display_name,self.app_bundle_id)+'_'+self.exportOptionPlist.split('/')[-1]
#这里拷贝到打包xcode工程根目录下
new_exportOption_plist=os.path.join(self.xcodeProjectRootPath,new_exportOption_plist)
shutil.copyfile(self.exportOptionPlist,new_exportOption_plist)
self.current_exportOption_plist=new_exportOption_plist
cmd='/usr/libexec/PlistBuddy -c "Add :provisioningProfiles:%s string %s" %s'%(self.app_bundle_id,self.provisioning_profile,new_exportOption_plist)
os.system(cmd)
os.system('/usr/libexec/PlistBuddy -c "Set :provisioningProfiles:%s %s" %s'%(self.app_bundle_id,self.provisioning_profile,new_exportOption_plist))
pass
def cleanPro(self):
if self.isWorkSpace:
os.system('cd %s;xcodebuild -workspace %s.xcworkspace -scheme %s clean'%(self.xcodeProjectRootPath,self.targetName,self.targetName))
else:
os.system('cd %s;xcodebuild -target %s clean'%(self.xcodeProjectRootPath,self.targetName))
build_path=self.xcodeProjectRootPath+"/build"
cleanCmd = "rm -r %s" %(build_path)
process = subprocess.Popen(cleanCmd, shell = True)
process.wait()
return
def clearPbxproj(self):
path = "%s/%s.xcodeproj/project.pbxproj"%(self.xcodeProjectRootPath,self.targetName)
print path;
file_object = open(path)
try:
all_the_text=file_object.readlines()
for text in all_the_text:
if 'PROVISIONING_PROFILE' in text:
all_the_text.remove(text)
finally:
file_object.close()
file_object = open(path,'w')
try:
for text in all_the_text:
file_object.write(text)
finally:
file_object.close()
return
def cerateIPA(self):
# return;
# os.system ("cd %s;rm -r -f %s-%s.ipa"%(mainPath,targetName,configuration_set))
# build_path="build"
# if isWorkSpace:
# build_path="build/Build/Products"
# else:
# build_path="build"
# os.system ("cd %s;xcrun -sdk iphoneos PackageApplication -v %s/%s/%s-iphoneos/%s.app -o %s/%s-%s.ipa CODE_SIGN_IDENTITY='%s'"%(mainPath,mainPath,build_path,configuration_set,targetName,mainPath,targetName,configuration_set,certificateName))
# app_path="%s/%s/%s-iphoneos/%s.app" %(mainPath,build_path,configuration_set,targetName);
# device_udid="booted";
# unInstallApp(device_udid,app_path)
# installApp(device_udid,app_path)
return
def exportArchive(self,archivePath):
result_exportDirectory=None
exportDirectory=self.buildExportDirectory()
exportCmd = "xcodebuild -exportArchive -archivePath %s -exportPath %s PROVISIONING_PROFILE='%s' CODE_SIGN_IDENTITY='%s' -exportOptionsPlist %s" %(archivePath, exportDirectory,self.provisioning_profile,self.certificateName,self.current_exportOption_plist)
process = subprocess.Popen(exportCmd, shell=True)
(stdoutdata, stderrdata) = process.communicate()
signReturnCode = process.returncode
code=None
#打包后把使用的临时exportOptionPlist和.xcarchive进行删除
try:
os.remove(self.current_exportOption_plist)
cleanCmd = "rm -r %s" %(archivePath)
os.system(cleanCmd)
build_path=self.xcodeProjectRootPath+"/build"
cleanCmd = "rm -r %s" %(build_path)
process = subprocess.Popen(cleanCmd, shell = True)
process.wait()
except Exception,e:
print e
if signReturnCode != 0:
code=-1
print 'ipa打包失败!'
else:
result_exportDirectory=exportDirectory
code=0
print 'ipa打包成功,路径在:%s'%(exportDirectory)
# os.system('open %s'%(exportDirectory))
return code,result_exportDirectory
def buildArchivePath(self,tempName):
archiveName = "%s.xcarchive" %(tempName)
archivePath = self.xcodeProjectRootPath + '/' + archiveName
cleanCmd = "rm -r %s" %(archivePath)
process = subprocess.Popen(cleanCmd, shell = True)
process.wait()
return archivePath
def updateMobileProvisionProfile(self,file_path):
#重命名
(status, provisioning_profile_temp) = commands.getstatusoutput("/usr/libexec/PlistBuddy -c 'Print UUID' /dev/stdin <<< $(/usr/bin/security cms -D -i %s)" %(file_path))
newname=provisioning_profile_temp+'.mobileprovision'
# oldname=file_path.split('/')[-1]
newfile=os.path.join(mobileprovision_path,newname)
if not os.path.exists(newfile):
shutil.copyfile(file_path,newfile)
def buildExportDirectory(self):
dateCmd = 'date "+%Y-%m-%d_%H-%M-%S"'
process = subprocess.Popen(dateCmd, stdout=subprocess.PIPE, shell=True)
(stdoutdata, stderrdata) = process.communicate()
exportDirectory = "%s/%s" %(self.xcodeProjectRootPath, stdoutdata.strip())
return exportDirectory
def checkWorkSpace(self):
if os.path.exists("%s/%s.xcworkspace"%(self.xcodeProjectRootPath,self.targetName)):
self.isWorkSpace = True
else:
self.isWorkSpace = False
return
def unInstallApp(self,device_udid,app_bundle_id):
(status,results)=commands.getstatusoutput("xcrun simctl uninstall %s '%s'" %(device_udid,app_bundle_id));
print status, results
def installApp(self,device_udid,app_path):
(status,results)=commands.getstatusoutput("xcrun simctl install %s %s" %(device_udid,app_path));
print 'status:%s,results:%s' %(status,results)
return
# process=subprocess.Popen("xcrun simctl install %s %s" %(device_udid,app_path),shell=True)
# process.wait()
# (stdoutdata, stderrdata) = process.communicate()
# signReturnCode = process.returncode
# if signReturnCode!=0:
# print 'error result:%s,stdoutdata:%s'%(stderrdata,stdoutdata)
# else:
# print 'install app success!'
# def startApp(device_udid,app_bundle_id):
# (status,results)=commands.getstatusoutput("sudo xcrun simctl launch %s '%s'" %(device_udid,app_bundle_id));
# print status, results
def isNone(self,para):
if para == None or len(para) == 0:
return True
else:
return False
def allowFinder(self):
os.system("chmod -R 777 %s"%(self.xcodeProjectRootPath))
return
def eachFile(self,filepath,postfix):
datas = []
fileNames = os.listdir(filepath)
for file in fileNames:
newDir = filepath + '/' + file
if os.path.isfile(newDir):
if os.path.splitext(newDir)[1] == postfix:
datas.append(newDir)
else:
eachFile(newDir)
return datas
def scan_files(self,directory,postfix):
files_list=[]
for root, sub_dirs, files in os.walk(directory):
for special_file in sub_dirs:
if special_file.endswith(postfix):
files_list.append(os.path.join(root,special_file))
return files_list
def isFinderExists(self):
return os.path.exists(self.xcodeProjectRootPath)
def allowKeychain(self):
# print "security unlock-keychain -p '%s' %s"%(keychainPassword,keychainPath)
os.system("security unlock-keychain -p '%s' %s"%(keychainPassword,keychainPath))
return
def buildApp(self):
files_list=self.scan_files(self.xcodeProjectRootPath,".xcodeproj")
temp = -1
for k in range(len(files_list)):
if files_list[k] == self.xcodeProjectRootPath + "/" + self.targetName + ".xcodeproj":
temp = k
if temp >= 0:
files_list.pop(temp)
archivePath=self.buildArchivePath(self.targetName)
# for target in files_list:
# target=target.replace(".xcodeproj","")
# tmpList=target.split('/')
# name=tmpList[len(tmpList)-1]
# path=target.replace(name,"")
# path=path[0:len(path)-1]
# os.system("cd %s;xcodebuild -target %s -configuration '%s' PROVISIONING_PROFILE='%s' CODE_SIGN_IDENTITY='%s' archive -archivePath %s -destination generic/platform=iOS"%(path,name,self.configuration_set,self.provisioning_profile,self.certificateName,archivePath))
# -arch arm64 -arch armv7s -arch armv7
if self.isWorkSpace:
buildCmd="cd %s;xcodebuild -workspace %s.xcworkspace -configuration '%s' -scheme %s PROVISIONING_PROFILE='%s' CODE_SIGN_IDENTITY='%s' -derivedDataPath build/ archive -archivePath %s -destination generic/platform=iOS"%(self.xcodeProjectRootPath,self.targetName,self.configuration_set,self.targetName,self.provisioning_profile,self.certificateName,archivePath)
else:
buildCmd="cd %s;xcodebuild -target %s -configuration '%s' PROVISIONING_PROFILE='%s' CODE_SIGN_IDENTITY='%s' archive -archivePath %s -destination generic/platform=iOS -scheme %s"%(self.xcodeProjectRootPath,self.targetName,self.configuration_set,self.provisioning_profile,self.certificateName,archivePath,self.targetName)
process = subprocess.Popen(buildCmd, shell=True)
(stdoutdata, stderrdata) = process.communicate()
signReturnCode = process.returncode
resultMsg=None
code=None
if signReturnCode != 0:
print 'ipabuild失败!'
code=-1
else:
code,resultMsg=self.exportArchive(archivePath)
return code,resultMsg
# 启用沙盒文件访问
def setUIFileSharingEnabled(self):
os.system('/usr/libexec/PlistBuddy -c "Add :UIFileSharingEnabled bool True" %s'%(self.infoPlistFilePath))
os.system('/usr/libexec/PlistBuddy -c "Set :UIFileSharingEnabled True" %s'%(self.infoPlistFilePath))
# 禁用沙盒文件访问
def setUIFileSharingDisabled(self):
os.system('/usr/libexec/PlistBuddy -c "Add :UIFileSharingEnabled bool False" %s'%(self.infoPlistFilePath))
os.system('/usr/libexec/PlistBuddy -c "Set :UIFileSharingEnabled False" %s'%(self.infoPlistFilePath))
# 设置参与编译的.m文件的compiler-flag为'-fno-objc-arc'
def modifyXCodeFileCompilerFlag(self,filePath):
strongFileOptions=FileOptions(weak=False,embed_framework=False,code_sign_on_copy=False)
addFileReference= self.project.add_file(filePath, force=False, file_options=strongFileOptions, parent=self.frameworksGroupID, tree='SDKROOT')
files=self.project.get_build_files_for_file(addFileReference[0].fileRef)
for f in files:
f.add_compiler_flags('-fno-objc-arc')
# 设置域名添加到白名单里,以通过https审核
def setAdapteHttps(self,domainAddress):
os.system('/usr/libexec/PlistBuddy -c "Add :NSAppTransportSecurity:NSExceptionDomains:%s:NSIncludesSubdomains bool True" %s'%(domainAddress,self.infoPlistFilePath))
os.system('/usr/libexec/PlistBuddy -c "Set :NSAppTransportSecurity:NSExceptionDomains:%s:NSIncludesSubdomains True" %s'%(domainAddress,self.infoPlistFilePath))
os.system(
'/usr/libexec/PlistBuddy -c "Add :NSAppTransportSecurity:NSExceptionDomains:%s:NSTemporaryExceptionAllowsInsecureHTTPLoads bool True" %s' % (domainAddress,
self.infoPlistFilePath))
os.system(
'/usr/libexec/PlistBuddy -c "Set :NSAppTransportSecurity:NSExceptionDomains:%s:NSTemporaryExceptionAllowsInsecureHTTPLoads True" %s' % (domainAddress,
self.infoPlistFilePath))
def addNSAppTransportSecurity(self):
os.system('/usr/libexec/PlistBuddy -c "Add :NSAppTransportSecurity:NSAllowsArbitraryLoads bool True" %s'%(infoPlistFilePath))
os.system('/usr/libexec/PlistBuddy -c "Set :NSAppTransportSecurity:NSAllowsArbitraryLoads True" %s'%(infoPlistFilePath))
def updateAppBundleId(self,app_bundle_id):
self.app_bundle_id=app_bundle_id
os.system('/usr/libexec/PlistBuddy -c "Set :CFBundleIdentifier %s" %s'%(app_bundle_id,self.infoPlistFilePath))
pass
def updateAppDisplayName(self,displayName):
self.app_display_name=displayName
os.system('/usr/libexec/PlistBuddy -c "Set :CFBundleName %s" %s'%(displayName,self.infoPlistFilePath))
os.system('/usr/libexec/PlistBuddy -c "Add :CFBundleDisplayName string %s" %s'%(displayName,self.infoPlistFilePath))
os.system('/usr/libexec/PlistBuddy -c "Set :CFBundleDisplayName %s" %s'%(displayName,self.infoPlistFilePath))
def setProjectAppDisplayName(self,displayName):
updateAppDisplayName(displayName)
def automaticIntegrationCodeInDidFinishLaunchingWithOptions(filePath,insert_header_file_code='#import "SDK.h"',insert_code=u'[[SDK sharedInstance] show:@\"%s\" withWindow:self.window];\n' %("123")):
try:
default_encoding = 'utf-8'
if sys.getdefaultencoding() != default_encoding:
reload(sys)
sys.setdefaultencoding(default_encoding)
lines = []
f = open(filePath, 'r')
index = 0
findindex = -1
spacecount = 0
isfinddidFinishLaunchingWithOptions = -1
returnYESIndex = -1
for line in f:
index += 1
# 查找指定字符串,找到索引
if 'didFinishLaunchingWithOptions' in line:
# 寻找didFinishLaunchingWithOptions方法体第一行代码行数索引,记录下来
findindex = index
isfinddidFinishLaunchingWithOptions = 1
# 寻找方法体返回值的行数索引
if isfinddidFinishLaunchingWithOptions == 1 and 'return YES;' in line:
returnYESIndex = index
# 找到方法体内第一行代码,前置空格个数
if index == findindex + 2:
for c in line:
if c.isspace():
spacecount += 1
else:
break
lines.append(line)
f.close()
# 找到索引位置,写入指定字符串,数组元素发生变化
spacecount = 4
splitstr = spacecount * ' '
insertstr = splitstr+insert_code
# 之前没写入开屏接入代码,这时候才插入代码
if insertstr not in lines:
lines.insert(returnYESIndex - 1, insertstr)
headerIncludeStr = u'%s \n'%(insert_header_file_code)
if headerIncludeStr not in lines:
lines.insert(0, headerIncludeStr)
# 重新写入字符串
s = ''.join(lines)
f = open(filePath, 'w++')
f.write(s)
f.close()
del lines[:]
print 'Automatic integration Success!'
return 0
except Exception as e:
print "please check the error", e
return 1
def addSystemFrameworkOrDylib(self,project):
strongFileOptions=FileOptions(weak=False,embed_framework=False,code_sign_on_copy=False)
frameworksGroupID=self.frameworksGroupID
project.add_file('usr/lib/libxml2.dylib', force=False, file_options=strongFileOptions, parent=frameworksGroupID, tree='SDKROOT')
project.add_file('usr/lib/libsqlite3.dylib', force=False, file_options=strongFileOptions, parent=frameworksGroupID, tree='SDKROOT')
project.add_file('usr/lib/libz.dylib', force=False, file_options=strongFileOptions, parent=frameworksGroupID, tree='SDKROOT')
project.add_file('usr/lib/libc++.dylib', force=False, file_options=strongFileOptions, parent=frameworksGroupID, tree='SDKROOT')
project.add_file('usr/lib/libsqlite3.0.dylib', force=False, file_options=strongFileOptions, parent=frameworksGroupID, tree='SDKROOT')
# project.add_file('usr/lib/libstdc++.6.dylib', parent=frameworksGroupID, tree='SDKROOT')
# project.add_file('usr/lib/libstdc++.dylib', parent=frameworksGroupID, tree='SDKROOT')
# project.add_file('usr/lib/libstdc++.6.0.9.dylib', parent=frameworksGroupID, tree='SDKROOT')
project.add_file('usr/lib/libxml2.2.dylib', force=False, file_options=strongFileOptions, parent=frameworksGroupID, tree='SDKROOT')
project.add_file('System/Library/Frameworks/CoreBluetooth.framework', file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/GLKit.framework',file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/AudioToolbox.framework',file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/CoreFoundation.framework',file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/ImageIO.framework', file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/AdSupport.framework',file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/AVFoundation.framework', file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/CoreMedia.framework', file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/Foundation.framework',file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/Security.framework', file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/UIKit.framework', file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/CoreVideo.framework',file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/CFNetwork.framework',file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/MobileCoreServices.framework',file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/CoreData.framework', file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/CoreMotion.framework', file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/EventKitUI.framework', file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/EventKit.framework', file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/MessageUI.framework', file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
file_options = FileOptions(weak=True,embed_framework=False,code_sign_on_copy=False)
project.add_file('System/Library/Frameworks/Social.framework', force=False, parent=frameworksGroupID,file_options=file_options,tree='SDKROOT')
project.add_file('System/Library/Frameworks/Twitter.framework',file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/CoreGraphics.framework',file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/CoreLocation.framework',file_options=strongFileOptions, force=False, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/CoreTelephony.framework', force=False, parent=frameworksGroupID,file_options=file_options,tree='SDKROOT')
project.add_file('System/Library/Frameworks/MediaPlayer.framework', force=False, file_options=strongFileOptions, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/QuartzCore.framework', force=False, file_options=strongFileOptions, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/StoreKit.framework', force=False, parent=frameworksGroupID,file_options=file_options,tree='SDKROOT')
project.add_file('System/Library/Frameworks/SystemConfiguration.framework',force=False,file_options=strongFileOptions, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/AdSupport.framework', force=False, file_options=strongFileOptions, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/Passkit.framework', force=False, parent=frameworksGroupID,file_options=file_options,tree='SDKROOT')
project.add_file('System/Library/Frameworks/Mapkit.framework', force=False, file_options=strongFileOptions, parent=frameworksGroupID, tree='SDKROOT')
project.add_file('System/Library/Frameworks/WebKit.framework', force=False, file_options=strongFileOptions, parent=frameworksGroupID,tree='SDKROOT')
project.add_file('System/Library/Frameworks/AVKit.framework', force=False, parent=frameworksGroupID,file_options=file_options,tree='SDKROOT')
def add_Adcolony(self):
parent_path = os.path.dirname(self.xcodeProjectRootPath)
sDKResourcePath= os.path.join(parent_path, 'sdks/Adcolony')
#库文件.framework
file_options = FileOptions(weak=False,embed_framework=False,code_sign_on_copy=False)
self.project.add_file(sDKResourcePath+'/Adcolony.framework', force=False, parent=self.frameworksGroupID,file_options=file_options,tree='SDKROOT')
self.project.add_framework_search_paths([sDKResourcePath],recursive=True)
pass
def add_Adview(self):
parent_path = os.path.dirname(self.xcodeProjectRootPath)
sDKResourcePath= os.path.join(parent_path, 'sdks/Adview')
#库文件a
file_options = FileOptions(weak=False,embed_framework=False,code_sign_on_copy=False)
self.project.add_file(sDKResourcePath+'/libAdCompViewSDK.a', force=False, parent=self.frameworksGroupID,file_options=file_options,tree='SDKROOT')
# self.project.add_flags('_SEARCH_PATHS',sDKResourcePath+'/**',self.targetName)
self.project.add_library_search_paths([sDKResourcePath],recursive=True)
#资源文件.png
files=self.eachFile(sDKResourcePath+'/res',postfix=".png")
files_temp=self.scan_files(sDKResourcePath+'/res',postfix=".png")
for file in files:
self.project.add_file(file, force=False, parent=self.frameworksGroupID,file_options=file_options,tree='SDKROOT')
#源代码文件
self.modifyXCodeFileCompilerFlag(os.path.join(sDKResourcePath,"TouchJSON/CDataScanner.m"))
self.modifyXCodeFileCompilerFlag(os.path.join(sDKResourcePath,'TouchJSON/Experimental/CFilteringJSONSerializer.m'))
self.modifyXCodeFileCompilerFlag(os.path.join(sDKResourcePath,'TouchJSON/Experimental/CJSONDeserializer_BlocksExtensions.m'))
self.modifyXCodeFileCompilerFlag(os.path.join(sDKResourcePath,'TouchJSON/Experimental/CJSONSerialization.m'))
self.modifyXCodeFileCompilerFlag(os.path.join(sDKResourcePath,'TouchJSON/Experimental/CJSONSerializedData.m'))
self.modifyXCodeFileCompilerFlag(os.path.join(sDKResourcePath,'TouchJSON/Extensions/CDataScanner_Extensions.m'))
self.modifyXCodeFileCompilerFlag(os.path.join(sDKResourcePath,'TouchJSON/Extensions/NSDictionary_JSONExtensions.m'))
self.modifyXCodeFileCompilerFlag(os.path.join(sDKResourcePath,'TouchJSON/JSON/CJSONDeserializer.m'))
self.modifyXCodeFileCompilerFlag(os.path.join(sDKResourcePath,'TouchJSON/JSON/CJSONScanner.m'))
self.modifyXCodeFileCompilerFlag(os.path.join(sDKResourcePath,'TouchJSON/JSON/CJSONSerializer.m'))
self.project.add_header_search_paths([sDKResourcePath],recursive=True)
pass
def updateProjectSettings(self):
self.project.add_other_ldflags("-ObjC")
self.project.add_flags('ENABLE_BITCODE',
u'NO', self.targetName)
def add_Facebook(self):
parent_path = os.path.dirname(self.xcodeProjectRootPath)
sDKResourcePath= os.path.join(parent_path, 'sdks/Facebook')
file_options = FileOptions(weak=False,embed_framework=False,code_sign_on_copy=False)
self.project.add_file(sDKResourcePath+'/FBAudienceNetwork.framework', force=False, parent=self.frameworksGroupID,file_options=file_options,tree='SDKROOT')
self.project.add_flags('FRAMEWORK_SEARCH_PATHS',sDKResourcePath+'/**',self.targetName)
pass
def add_Youmi(self):
parent_path = os.path.dirname(self.xcodeProjectRootPath)
sDKResourcePath= os.path.join(parent_path, 'sdks/Youmi')
#库文件a
file_options = FileOptions(weak=False,embed_framework=False,code_sign_on_copy=False)
self.project.add_file(sDKResourcePath+'/libUMVideoSDK.a', force=False, parent=self.frameworksGroupID,file_options=file_options,tree='SDKROOT')
self.project.add_library_search_paths([sDKResourcePath],recursive=True)
#资源文件.png
self.project.add_file(sDKResourcePath+'/UMVideo.bundle', force=False, parent=self.frameworksGroupID,file_options=file_options,tree='SDKROOT')
pass
#获取SDK的所有相关文件
def get_all_sdk_files(self,dir):
files_ = []
list = os.listdir(dir)
for i in range(0, len(list)):
path = os.path.join(dir, list[i])
if os.path.isdir(path):
#.bundle,.framework会被识别成目录,这里需要做下判断
if path.endswith('bundle') or path.endswith('framework'):
files_.append(path)
continue
else:
files_.extend(self.get_all_sdk_files(path))
if os.path.isfile(path):
files_.append(path)
return files_
# 这个方法用来查询sdk文件夹下的库,资源等数据,以来修改工程配置
def updateProjectSetsForSDK(self,sdk_path):
sdk_file_path=self.get_all_sdk_files(sdk_path)
framework_search_path=[]
library_search_path=[]
header_search_path=[]
for temp_path in sdk_file_path:
if temp_path.endswith('bundle'):
file_options = FileOptions(weak=False,embed_framework=False,code_sign_on_copy=False)
self.project.add_file(temp_path, force=False, parent=self.frameworksGroupID,file_options=file_options,tree='SDKROOT')
pass
elif temp_path.endswith('framework'):
framework_search_path_temp=os.path.dirname(temp_path)
if not framework_search_path_temp in framework_search_path:
framework_search_path.append(framework_search_path_temp)
#库文件.framework
mach_file_name=temp_path.split('/')[-1].split('.')[-2]
file_path= os.path.join(temp_path,mach_file_name)
#判断是否是动态库
process=subprocess.Popen('file %s'%(file_path),shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
(stdoutdata, stderrdata) = process.communicate()
if 'dynamically' in stdoutdata:
embed_framework_val=True
code_sign_on_copy_val=True
else:
embed_framework_val=False
code_sign_on_copy_val=False
file_options = FileOptions(weak=False,embed_framework=embed_framework_val,code_sign_on_copy=code_sign_on_copy_val)
self.project.add_file(temp_path, force=False, parent=self.frameworksGroupID,file_options=file_options,tree='SDKROOT')
pass
elif temp_path.endswith('h'):
header_search_path_temp=os.path.dirname(temp_path)
if not header_search_path_temp in header_search_path:
header_search_path.append(header_search_path_temp)
pass
elif temp_path.endswith('m'):
#
strongFileOptions=FileOptions(weak=False,embed_framework=False,code_sign_on_copy=False)
addFileReference= self.project.add_file(temp_path, force=False, file_options=strongFileOptions, parent=self.frameworksGroupID, tree='SDKROOT')
#判断是否是非ARC编译,读取内容,看是否有....release]
fin = open(temp_path,'r')
result=None
is_not_arc=False
for eachLine in fin:
if 'release]'in eachLine:
is_not_arc=True
break
fin.close()
if is_not_arc==True:
files=self.project.get_build_files_for_file(addFileReference[0].fileRef)
for f in files:
f.add_compiler_flags('-fno-objc-arc')
pass
elif temp_path.endswith('a'):
library_search_path_temp=os.path.dirname(temp_path)
if not library_search_path_temp in library_search_path:
library_search_path.append(library_search_path_temp)
file_options = FileOptions(weak=False,embed_framework=False,code_sign_on_copy=False)
self.project.add_file(temp_path, force=False, parent=self.frameworksGroupID,file_options=file_options,tree='SDKROOT')
pass
#图片等资源文件
else:
file_options = FileOptions(weak=False,embed_framework=False,code_sign_on_copy=False)
self.project.add_file(temp_path, force=False, parent=self.frameworksGroupID,file_options=file_options,tree='SDKROOT')
pass
pass
for temp_path in framework_search_path:
self.project.add_framework_search_paths([temp_path],recursive=True)
for temp_path in library_search_path:
self.project.add_library_search_paths([temp_path],recursive=True)
for temp_path in header_search_path:
self.project.add_header_search_paths([temp_path],recursive=True)
self.addSystemFrameworkOrDylib(self.project)
self.updateProjectSettings()
self.project.save()
pass
def getAllBuildConfig(self):
pass
# 转化.pbxproj文件数据,以便更可读!
def parsePbxprojFileData(self,output_json_file,pbxproj_path):
os.system('plutil -convert json -s -r -o %s %s'%(output_json_file,pbxproj_path))
pass
def initProject(self):
# 初始化
pbxproj=self.xcodeProjectRootPath+'/project_test.xcodeproj/project.pbxproj'
infoPlistPath=self.xcodeProjectRootPath+'/project_test/Info.plist'
#每次从壳工程备份里拷贝源文件到这里,再加载
try:
src_path=os.path.join(os.getcwd(),'project.pbxproj')
infPlist_Src_Path=os.path.join(os.getcwd(),'Info.plist')
shutil.copy(src_path,pbxproj)
shutil.copy(infPlist_Src_Path,infoPlistPath)
except Exception,e:
print e
self.project = XcodeProject.load(pbxproj)
frameworksGroupID = None
textfile = open(pbxproj, 'r')
filetext = textfile.read()
textfile.close()
matches = re.findall("([0-9A-F]*) /\* Frameworks \*/ = \{\n\s*isa = PBXGroup;", filetext)
print "matches:",matches
try:
frameworksGroupID = matches[0]
except:
pass
self.frameworksGroupID =self. project.get_or_create_group('Frameworks')
def embedAssignSDK(self,sdk_name):
# 引入不同SDK,涉及框架引入,代码文件引入,资源文件引入
sdk_functions={
'Adcolony':lambda:self.add_Adcolony(),
'Adview':lambda:self.add_Adview(),
'Facebook':lambda:self.add_Facebook(),
'Youmi':lambda:self.add_Youmi()
}
func=sdk_functions[sdk_name]
func()
self.addSystemFrameworkOrDylib(self.project)
self.updateProjectSettings()
self.project.save()
pass
return
if __name__=='__main__':
xcode_build=XCodeBuild("/Users/star.liao/Desktop/Git/Python-Tools/xcode_build/project_test")
xcode_build.initTestProject()
|
<filename>contentcuration/contentcuration/views/admin.py
import ast
import base64
import cStringIO as StringIO
import csv
import json
import locale
import os
import sys
import time
from itertools import chain
import django_filters
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import SuspiciousOperation
from django.db.models import Case
from django.db.models import CharField
from django.db.models import Count
from django.db.models import F
from django.db.models import IntegerField
from django.db.models import Max
from django.db.models import Sum
from django.db.models import Value
from django.db.models import When
from django.db.models.functions import Concat
from django.http import FileResponse
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseNotFound
from django.http import StreamingHttpResponse
from django.shortcuts import render
from django.template import Context
from django.template.loader import get_template
from django.template.loader import render_to_string
from django.views.decorators.cache import cache_page
from django.views.decorators.http import condition
from django_filters.rest_framework import DjangoFilterBackend
from le_utils.constants import content_kinds
from PIL import Image
from raven.contrib.django.raven_compat.models import client
from rest_framework import generics
from rest_framework.authentication import BasicAuthentication
from rest_framework.authentication import SessionAuthentication
from rest_framework.authentication import TokenAuthentication
from rest_framework.decorators import api_view
from rest_framework.decorators import authentication_classes
from rest_framework.decorators import permission_classes
from rest_framework.filters import OrderingFilter
from rest_framework.filters import SearchFilter
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import IsAdminUser
from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from xhtml2pdf import pisa
from contentcuration.decorators import browser_is_supported
from contentcuration.decorators import is_admin
from contentcuration.models import Channel
from contentcuration.models import generate_file_on_disk_name
from contentcuration.models import Invitation
from contentcuration.models import User
from contentcuration.serializers import AdminChannelListSerializer
from contentcuration.serializers import AdminUserListSerializer
from contentcuration.serializers import CurrentUserSerializer
from contentcuration.serializers import UserChannelListSerializer
from contentcuration.utils.messages import get_messages
reload(sys)
sys.setdefaultencoding('UTF8')
locale.setlocale(locale.LC_TIME, '')
DEFAULT_ADMIN_PAGE_SIZE = 2
EMAIL_PLACEHOLDERS = [
{"name": "First Name", "value": "{first_name}"},
{"name": "<NAME>", "value": "{last_name}"},
{"name": "Email", "value": "{email}"},
{"name": "Current Date", "value": "{current_date}"},
{"name": "Current Time", "value": "{current_time}"},
]
def send_custom_email(request):
if request.method != 'POST':
return HttpResponseBadRequest("Only POST requests are allowed on this endpoint.")
data = json.loads(request.body)
try:
subject = render_to_string('registration/custom_email_subject.txt', {'subject': data["subject"]})
recipients = User.objects.filter(email__in=data["emails"]).distinct()
for recipient in recipients:
text = data["message"].format(current_date=time.strftime("%A, %B %d"), current_time=time.strftime("%H:%M %Z"), **recipient.__dict__)
message = render_to_string('registration/custom_email.txt', {'message': text})
recipient.email_user(subject, message, settings.DEFAULT_FROM_EMAIL, )
except KeyError:
raise ObjectDoesNotExist("Missing attribute from data: {}".format(data))
return HttpResponse(json.dumps({"success": True}))
@login_required
@browser_is_supported
@authentication_classes((SessionAuthentication, BasicAuthentication, TokenAuthentication))
@is_admin
def administration(request):
return render(request, 'administration.html', {
"current_user": JSONRenderer().render(CurrentUserSerializer(request.user).data),
"default_sender": settings.DEFAULT_FROM_EMAIL,
"placeholders": json.dumps(EMAIL_PLACEHOLDERS, ensure_ascii=False),
"messages": get_messages(),
})
@cache_page(60 * 10) # 10 minutes
@login_required
@api_view(['GET'])
@authentication_classes((SessionAuthentication, BasicAuthentication, TokenAuthentication))
@permission_classes((IsAdminUser,))
def get_all_channels(request):
if not request.user.is_admin:
raise SuspiciousOperation("You are not authorized to access this endpoint")
channel_list = Channel.get_all_channels()
channel_serializer = AdminChannelListSerializer(channel_list, many=True)
return Response(channel_serializer.data)
@login_required
@api_view(['GET'])
@authentication_classes((SessionAuthentication, BasicAuthentication, TokenAuthentication))
@permission_classes((IsAdminUser,))
def get_channel_kind_count(request, channel_id):
if not request.user.is_admin:
raise SuspiciousOperation("You are not authorized to access this endpoint")
channel = Channel.objects.get(pk=channel_id)
data = channel.main_tree.get_details()
return HttpResponse(json.dumps({
"counts": data['kind_count'],
"size": data['resource_size'],
}))
class ChannelUserListPagination(PageNumberPagination):
page_size = DEFAULT_ADMIN_PAGE_SIZE
page_size_query_param = 'page_size'
max_page_size = 500
def get_paginated_response(self, data):
return Response({
'links': {
'next': self.get_next_link(),
'previous': self.get_previous_link()
},
'count': self.page.paginator.count,
'total_pages': self.page.paginator.num_pages,
'results': data
})
class AdminChannelListFilter(django_filters.FilterSet):
published = django_filters.BooleanFilter(
name='main_tree__published',
)
staged = django_filters.BooleanFilter(
name='staging_tree'
)
ricecooker_version__isnull = django_filters.rest_framework.BooleanFilter(
name='ricecooker_version',
lookup_expr='isnull'
)
class Meta:
model = Channel
fields = (
'name',
'id',
'editors__id',
'deleted',
'public',
'staging_tree',
'staged',
'ricecooker_version',
'deleted',
'published'
)
class AdminChannelListView(generics.ListAPIView):
serializer_class = AdminChannelListSerializer
filter_backends = (DjangoFilterBackend, OrderingFilter, SearchFilter)
filter_class = AdminChannelListFilter
pagination_class = ChannelUserListPagination
authentication_classes = (SessionAuthentication, BasicAuthentication, TokenAuthentication,)
permission_classes = (IsAdminUser,)
search_fields = (
'name',
'=id',
'editors__first_name',
'editors__last_name',
'=editors__email',
)
ordering_fields = (
'name',
'id',
'priority',
'editors_count',
'viewers_count',
'resource_count',
'modified',
'created',
)
ordering = ('name',)
def get_queryset(self):
# (This part requires django 1.11, and isn't quite working!)
# from django.db.models import OuterRef
# from django.db.models.functions import Cast
# from django.db.models.functions import Coalesce
# from django.db.models import Subquery
# from django.db.models import Int
# modified = ContentNode.objects\
# .filter(tree_id=OuterRef('main_tree__tree_id'))\
# .order_by()\
# .values('tree_id')\
# .annotate(m=Max('modified'))\
# .values('m')
queryset = Channel.objects
if self.request.GET.get('deleted') == 'True' or self.request.GET.get('all') == 'True':
pass
else:
queryset = queryset.exclude(deleted=True)
queryset = queryset.select_related('main_tree').prefetch_related('editors', 'viewers')\
.annotate(editors_count=Count('editors'))\
.annotate(viewers_count=Count('viewers'))\
.annotate(resource_count=F("main_tree__rght")/2 - 1)\
.annotate(created=F('main_tree__created'))
if self.request.GET.get('can_edit') == 'True':
queryset = queryset.filter(editors__contains=self.request.user)
else:
pass
return queryset.all()
class AdminUserListFilter(django_filters.FilterSet):
chef_channels_count = django_filters.NumberFilter(name='chef_channels_count')
chef_channels_count__gt = django_filters.NumberFilter(name='chef_channels_count', lookup_expr='gt')
class Meta:
model = User
fields = (
'email',
'first_name',
'last_name',
'id',
'is_admin',
'is_active',
'is_staff',
'date_joined',
'disk_space',
)
class AdminUserListView(generics.ListAPIView):
serializer_class = AdminUserListSerializer
filter_backends = (DjangoFilterBackend, OrderingFilter, SearchFilter)
filter_class = AdminUserListFilter
pagination_class = ChannelUserListPagination
authentication_classes = (SessionAuthentication, BasicAuthentication, TokenAuthentication,)
permission_classes = (IsAdminUser,)
search_fields = (
'first_name',
'last_name',
'email',
'=editable_channels__id',
'editable_channels__name',
)
ordering_fields = (
'first_name',
'last_name',
'date_joined',
'email',
'editable_channels_count',
'chef_channels_count'
)
ordering = ('email',)
# filter_fields = (
# 'chef_channels',
# 'editable_channels_count'
# )
# count_chef_channels = Channel.objects.filter(editor=OuterRef('pk'))\
# .filter(ricecooker_version__isnull=False)\
# .order_by().values('ricecooker_version__isnull')\
# .annotate(c=Count('*')).values('c')
def get_queryset(self):
queryset = User.objects.prefetch_related('editable_channels')\
.annotate(editable_channels_count=Count('editable_channels'))\
.annotate(chef_channels_count=Sum(
Case(
When(editable_channels__ricecooker_version__isnull=True, then=0),
When(editable_channels__ricecooker_version=None, then=0),
When(editable_channels__ricecooker_version='', then=0),
default=1, output_field=IntegerField()
)
))
return queryset.all()
@login_required
@authentication_classes((SessionAuthentication, BasicAuthentication, TokenAuthentication))
@permission_classes((IsAdminUser,))
def make_editor(request):
if not request.user.is_admin:
raise SuspiciousOperation("You are not authorized to access this endpoint")
if request.method != 'POST':
return HttpResponseBadRequest("Only POST requests are allowed on this endpoint.")
data = json.loads(request.body)
try:
user = User.objects.get(pk=data["user_id"])
channel = Channel.objects.get(pk=data["channel_id"])
channel.viewers.remove(user) # Remove view-only access
channel.editors.add(user) # Add user as an editor
channel.save()
Invitation.objects.filter(invited=user, channel=channel).delete() # Delete any invitations for this user
return HttpResponse(json.dumps({"success": True}))
except ObjectDoesNotExist:
return HttpResponseNotFound('Channel with id {} not found'.format(data["channel_id"]))
@login_required
@authentication_classes((SessionAuthentication, BasicAuthentication, TokenAuthentication))
@permission_classes((IsAdminUser,))
def remove_editor(request):
if not request.user.is_admin:
raise SuspiciousOperation("You are not authorized to access this endpoint")
if request.method != 'POST':
return HttpResponseBadRequest("Only POST requests are allowed on this endpoint.")
data = json.loads(request.body)
try:
user = User.objects.get(pk=data["user_id"])
channel = Channel.objects.get(pk=data["channel_id"])
channel.editors.remove(user)
channel.save()
return HttpResponse(json.dumps({"success": True}))
except ObjectDoesNotExist:
return HttpResponseNotFound('Channel with id {} not found'.format(data["channel_id"]))
@login_required
@api_view(['GET'])
@authentication_classes((SessionAuthentication, BasicAuthentication, TokenAuthentication))
@permission_classes((IsAuthenticated,))
def get_editors(request, channel_id):
channel = Channel.objects.get(pk=channel_id)
user_list = list(channel.editors.all().order_by("first_name"))
user_serializer = UserChannelListSerializer(user_list, many=True)
return Response(user_serializer.data)
def sizeof_fmt(num, suffix='B'):
""" Format sizes """
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def pluralize_kind(kind, number):
return "{} {}{}".format(number, kind.replace("html5", "HTML app").capitalize(), "s" if number != 1 else "")
def generate_thumbnail(channel):
THUMBNAIL_DIMENSION = 200
if channel.icon_encoding:
return channel.icon_encoding
elif channel.thumbnail_encoding:
return ast.literal_eval(channel.thumbnail_encoding).get('base64')
elif channel.thumbnail:
try:
checksum, ext = os.path.splitext(channel.thumbnail)
filepath = generate_file_on_disk_name(checksum, channel.thumbnail)
buffer = StringIO.StringIO()
with Image.open(filepath) as image:
width, height = image.size
dimension = min([THUMBNAIL_DIMENSION, width, height])
image.thumbnail((dimension, dimension), Image.ANTIALIAS)
image.save(buffer, image.format)
return "data:image/{};base64,{}".format(ext[1:], base64.b64encode(buffer.getvalue()))
except IOError:
client.captureMessage("Failed to generate thumbnail for channel id={}, filepath={}".format(
channel.id, filepath))
pass
def get_channel_data(channel, site, default_thumbnail=None):
import time
start = time.time()
print("Starting " + channel.name.encode('utf-8'))
data = {
"name": channel.name,
"id": channel.id,
"public": "Yes" if channel.public else "No",
"description": channel.description,
"language": channel.language and channel.language.readable_name,
"generated_thumbnail": default_thumbnail is not None and generate_thumbnail(channel) or default_thumbnail,
"url": "http://{}/channels/{}/edit".format(site, channel.id)
}
descendants = channel.main_tree.get_descendants().prefetch_related('children', 'files', 'tags')\
.select_related('license', 'language')
resources = descendants.exclude(kind=content_kinds.TOPIC)
# Get sample pathway by getting longest path
max_level = resources.aggregate(max_level=Max('level'))['max_level']
deepest_node = resources.filter(level=max_level).first()
if deepest_node:
pathway = deepest_node.get_ancestors(include_self=True)\
.exclude(pk=channel.main_tree.pk)\
.annotate(name=Concat('title', Value(' ('), 'kind_id', Value(')'), output_field=CharField()))\
.values_list('name', flat=True)
data["sample_pathway"] = " -> ".join(pathway)
else:
data["sample_pathway"] = "Channel is empty"
# Get information related to channel
tokens = channel.secret_tokens.values_list('token', flat=True)
data["tokens"] = ", ".join(["{}-{}".format(t[:5], t[5:]) for t in tokens if t != channel.id])
data["editors"] = ", ".join(list(channel.editors.annotate(name=Concat('first_name', Value(' '),
'last_name', Value(' ('), 'email', Value(')'),
output_field=CharField()))
.values_list('name', flat=True)))
data["tags"] = ", ".join(channel.tags.exclude(tag_name=None).values_list('tag_name', flat=True).distinct())
# Get language information
node_languages = descendants.exclude(language=None).values_list('language__readable_name', flat=True).distinct()
file_languages = descendants.exclude(files__language=None).values_list('files__language__readable_name', flat=True)
language_list = list(set(chain(node_languages, file_languages)))
language_list = filter(lambda l: l is not None and l is not data['language'], language_list)
language_list = map(lambda l: l.replace(",", " -"), language_list)
language_list = sorted(map(lambda l: l.replace(",", " -"), language_list))
data["languages"] = ", ".join(language_list)
data["languages"] = ""
# Get kind information
kind_list = list(descendants.values('kind_id').annotate(count=Count('kind_id')).order_by('kind_id'))
data["kind_counts"] = ", ".join([pluralize_kind(k['kind_id'], k['count']) for k in kind_list])
# Get file size
data["total_size"] = sizeof_fmt(resources.values('files__checksum', 'files__file_size').distinct(
).aggregate(resource_size=Sum('files__file_size'))['resource_size'] or 0)
print(channel.name.encode('utf-8') + " time:", time.time() - start)
return data
class Echo:
"""An object that implements just the write method of the file-like
interface.
"""
def write(self, value):
"""Write the value by returning it, instead of storing in a buffer."""
return value
def get_default_thumbnail():
filepath = os.path.join(settings.STATIC_ROOT, 'img', 'kolibri_placeholder.png')
with open(filepath, 'rb') as image_file:
_, ext = os.path.splitext(filepath)
return "data:image/{};base64,{}".format(ext[1:], base64.b64encode(image_file.read()))
def stream_csv_response_generator(request):
""" Get list of channels and extra metadata """
channels = Channel.objects.prefetch_related('editors', 'secret_tokens', 'tags')\
.select_related('main_tree')\
.exclude(deleted=True)\
.filter(public=True)\
.distinct()\
.order_by('name')
site = get_current_site(request)
pseudo_buffer = Echo()
writer = csv.writer(pseudo_buffer)
yield writer.writerow(['Channel', 'ID', 'Public', 'Description', 'Tokens', 'Kind Counts',
'Total Size', 'Language', 'Other Languages', 'Tags', 'Editors', 'Sample Pathway'])
for c in channels:
data = get_channel_data(c, site)
yield writer.writerow([data['name'], data['id'], data['public'], data['description'], data['tokens'],
data['kind_counts'], data['total_size'], data['language'], data['languages'],
data['tags'], data['editors'], data['sample_pathway']])
@login_required
@condition(etag_func=None)
@authentication_classes((SessionAuthentication, BasicAuthentication, TokenAuthentication))
@permission_classes((IsAdminUser,))
def download_channel_csv(request):
""" Writes list of channels to csv, which is then returned """
if not request.user.is_admin:
raise SuspiciousOperation("You are not authorized to access this endpoint")
response = StreamingHttpResponse(stream_csv_response_generator(request), content_type="text/csv")
response['Content-Disposition'] = 'attachment; filename="channels.csv"'
return response
@login_required
@authentication_classes((SessionAuthentication, BasicAuthentication, TokenAuthentication))
@permission_classes((IsAdminUser,))
def download_channel_pdf(request):
import time
start = time.time()
template = get_template('export/channels_pdf.html')
channels = Channel.objects.prefetch_related('editors', 'secret_tokens', 'tags')\
.select_related('main_tree')\
.filter(public=True, deleted=False)\
.distinct()\
.order_by('name')
print("Channel query time:", time.time() - start)
site = get_current_site(request)
default_thumbnail = get_default_thumbnail()
channel_list = [get_channel_data(c, site, default_thumbnail) for c in channels]
context = Context({
"channels": channel_list
})
html = template.render(context)
result = StringIO.StringIO()
pdf = pisa.pisaDocument(StringIO.StringIO(html.encode("UTF-8")), result, encoding='UTF-8', path=settings.STATIC_ROOT)
if not pdf.err:
response = FileResponse(result.getvalue())
response['Content-Type'] = 'application/pdf'
response['Content-disposition'] = 'attachment;filename=channels.pdf'
response['Set-Cookie'] = "fileDownload=true; path=/"
print("\n\n\nTotal time:", time.time() - start, "\n\n\n")
return response
|
from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
class FormatShape:
"""Format final imgs shape to the given input_format.
Required keys are "imgs", "num_clips" and "clip_len", added or modified
keys are "imgs" and "input_shape".
Args:
input_format (str): Define the final imgs format.
collapse (bool): To collpase input_format N... to ... (NCTHW to CTHW,
etc.) if N is 1. Should be set as True when training and testing
detectors. Default: False.
"""
def __init__(self, input_format, collapse=False):
self.input_format = input_format
self.collapse = collapse
if self.input_format not in ['NCTHW', 'NCHW', 'NCHW_Flow', 'NPTCHW']:
raise ValueError(
f'The input format {self.input_format} is invalid.')
def __call__(self, results):
"""Performs the FormatShape formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if not isinstance(results['imgs'], np.ndarray):
results['imgs'] = np.array(results['imgs'])
imgs = results['imgs']
# [M x H x W x C]
# M = 1 * N_crops * N_clips * L
if self.collapse:
assert results['num_clips'] == 1
if self.input_format == 'NCTHW':
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:])
# N_crops x N_clips x L x H x W x C
imgs = np.transpose(imgs, (0, 1, 5, 2, 3, 4))
# N_crops x N_clips x C x L x H x W
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
# M' x C x L x H x W
# M' = N_crops x N_clips
elif self.input_format == 'NCHW':
imgs = np.transpose(imgs, (0, 3, 1, 2))
# M x C x H x W
elif self.input_format == 'NCHW_Flow':
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:])
# N_crops x N_clips x L x H x W x C
imgs = np.transpose(imgs, (0, 1, 2, 5, 3, 4))
# N_crops x N_clips x L x C x H x W
imgs = imgs.reshape((-1, imgs.shape[2] * imgs.shape[3]) +
imgs.shape[4:])
# M' x C' x H x W
# M' = N_crops x N_clips
# C' = L x C
elif self.input_format == 'NPTCHW':
num_proposals = results['num_proposals']
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((num_proposals, num_clips * clip_len) +
imgs.shape[1:])
# P x M x H x W x C
# M = N_clips x L
imgs = np.transpose(imgs, (0, 1, 4, 2, 3))
# P x M x C x H x W
if self.collapse:
assert imgs.shape[0] == 1
imgs = imgs.squeeze(0)
results['imgs'] = imgs
results['input_shape'] = imgs.shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"(input_format='{self.input_format}')"
return repr_str
|
import logging
from typing import TYPE_CHECKING, Callable, Dict, Optional, Tuple, Type, Union
from django.contrib.auth import get_user_model
from django.db import models
from snitch.emails import TemplateEmailMessage
from snitch.settings import ENABLED_SEND_NOTIFICATIONS
if TYPE_CHECKING:
from push_notifications.models import APNSDevice, GCMDevice
from snitch.handlers import EventHandler
from snitch.models import Event, Notification
logger = logging.getLogger(__name__)
User = get_user_model()
if TYPE_CHECKING:
from django.contrib.auth.models import User as AuthUser
from django.db import models
from push_notifications.models import APNSDevice, GCMDevice
from snitch.handlers import EventHandler
from snitch.models import Event, Notification
class AbstractBackend:
"""Abstract backend class for notifications."""
def __init__(
self,
notification: Optional["Notification"] = None,
event: Optional["Event"] = None,
user: Optional["AuthUser"] = None,
):
assert notification is not None or (
event is not None and user is not None
), "You should provide a notification or an event and an user."
self.notification: Optional["Notification"] = notification
self.event: Optional["Event"] = event
self.user: Optional["AuthUser"] = user
if self.notification:
self.handler: "EventHandler" = self.notification.handler()
self.user = self.notification.user
elif self.event:
self.handler = self.event.handler()
def send(self):
"""A subclass should to implement the send method."""
raise NotImplementedError
class PushNotificationBackend(AbstractBackend):
"""A backend class to send push notifications depending on the platform."""
default_batch_sending: bool = True
def __init__(self, *args, **kwargs):
"""Adds attributes for the push notification from the handler."""
super().__init__(*args, **kwargs)
self.action_type: str = self.handler.get_action_type()
self.action_id: str = self.handler.get_action_id()
self.batch_sending = kwargs.get("batch_sending", self.default_batch_sending)
def extra_data(self) -> Dict:
"""Gets the extra data to add to the push, to be hooked if needed. It tries to
get an initial dict from the handler.
"""
return self.handler.get_extra_data()
def get_devices(
self, device_class: Union[Type["GCMDevice"], Type["APNSDevice"]]
) -> "models.QuerySet":
"""Gets the devices using the given class."""
return device_class.objects.filter(user=self.user)
def pre_send(
self, device: Optional[Union["GCMDevice", "APNSDevice"]] = None
) -> None:
"""Actions previous to build the message and send, like activate translations if
needed.
"""
return None
def post_send(
self, device: Optional[Union["GCMDevice", "APNSDevice"]] = None
) -> None:
"""Actions post to sent the message, like deactivate translations if
needed.
"""
return None
def _build_gcm_message(self) -> Tuple[Optional[str], Dict]:
"""Creates the message for GCM."""
message: Optional[str] = self.handler.get_text()
extra = {}
title: Optional[str] = self.handler.get_title()
if title:
extra["title"] = title
if self.action_type:
extra["action_type"] = self.action_type
if self.action_id:
extra["action_id"] = self.action_id
extra_data = self.extra_data()
if extra_data:
extra.update(extra_data)
return message, extra
def _build_apns_message(self) -> Tuple[Union[Optional[str], Dict], Dict]:
"""Creates the message for APNS."""
text: Optional[str] = self.handler.get_text()
message: Union[Optional[str], Dict] = text
extra: Dict = {}
title: Optional[str] = self.handler.get_title()
if title:
message = {"title": title, "body": text}
if self.action_type:
extra["action_type"] = self.action_type
if self.action_id:
extra["action_id"] = self.action_id
extra_data = self.extra_data()
if extra_data:
extra.update(extra_data)
return message, extra
def _send_to_devices(self, devices: "models.QuerySet", building_method: Callable):
"""Sends a batch of pushes."""
try:
from push_notifications.apns import APNSError
from push_notifications.gcm import GCMError
except ImportError:
return None
if self.batch_sending:
self.pre_send()
message, extra = building_method()
try:
devices.send_message(message=message, extra=extra)
except GCMError:
logger.warning("Error sending a batch GCM push message")
except APNSError:
logger.warning("Error sending a batch APNS push message")
self.post_send()
else:
for device in devices:
self.pre_send(device=device)
message, extra = building_method()
try:
device.send_message(message=message, extra=extra)
except GCMError:
logger.warning("Error sending a single GCM push message")
except APNSError:
logger.warning("Error sending a single APNS push message")
self.post_send(device=device)
return None
def _send_gcm(self):
"""Send to GCM devices."""
try:
from push_notifications.models import GCMDevice
except ImportError:
return None
devices = self.get_devices(GCMDevice)
self._send_to_devices(devices=devices, building_method=self._build_gcm_message)
return None
def _send_apns(self):
"""Send to APNS devices."""
try:
from push_notifications.models import APNSDevice
except ImportError:
return None
devices = self.get_devices(APNSDevice)
self._send_to_devices(devices=devices, building_method=self._build_apns_message)
return None
def send(self):
"""Send message for each platform."""
if ENABLED_SEND_NOTIFICATIONS:
self._send_gcm()
self._send_apns()
class EmailNotificationBackend(AbstractBackend):
"""Backend for using the email app to send emails."""
template_email_kwargs_attr: str = "template_email_kwargs"
get_email_kwargs_attr: str = "get_email_kwargs_attr"
get_email_extra_context_attr: str = "get_email_extra_context"
get_email_subject_attr: str = "get_email_subject"
def __use_async(self) -> bool:
"""Check if the email can use async, False by default, because the notification
is already sent using a task."""
return (
self.handler.template_email_async # type: ignore
if hasattr(self.handler, "template_email_async")
else False
)
def extra_context(self) -> Dict:
"""Gets extra context to the email if there is a method in the handler."""
if hasattr(self.handler, self.get_email_extra_context_attr):
return getattr(self.handler, self.get_email_extra_context_attr)()
return {}
def subject(self) -> Optional[str]:
"""Gets subject of the email if there is a method in the handler."""
if hasattr(self.handler, self.get_email_subject_attr):
return getattr(self.handler, self.get_email_subject_attr)()
return None
def email_kwargs(self) -> Optional[dict]:
"""Dynamically gets the kwargs for TemplateEmailMessage"""
kwargs = None
if hasattr(self.handler, self.get_email_kwargs_attr):
kwargs = getattr(self.handler, self.get_email_kwargs_attr)()
elif hasattr(self.handler, self.template_email_kwargs_attr):
kwargs = getattr(self.handler, self.template_email_kwargs_attr)
return kwargs
def send(self):
"""Sends the email."""
if ENABLED_SEND_NOTIFICATIONS:
# Gets the handler to extract the arguments from template_email_kwargs
kwargs = self.email_kwargs()
if kwargs:
# Gets to email
email = (
getattr(User, "EMAIL_FIELD")
if hasattr(User, "EMAIL_FIELD")
else None
)
if email:
email_field_name = getattr(self.user, "EMAIL_FIELD")
kwargs.update({"to": getattr(self.user, email_field_name)})
# Override subject
subject = self.subject()
if subject:
kwargs["subject"] = subject
# Context
context = kwargs.get("context", {})
# Adds notification or event
if self.notification:
context.update({"notification": self.notification})
if self.event:
context.update({"event": self.event})
context.update(self.extra_context())
kwargs.update({"context": context})
# Sends email
email = TemplateEmailMessage(**kwargs)
email.send(use_async=self.__use_async())
|
<filename>cogs/moderation.py
"""
MIT License
Copyright (c) 2021 - µYert
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Optional, Union
import discord
from discord.ext import commands
class Moderation(commands.Cog):
""" Moderation cog. All things admin! """
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
self.def_days = 7
self.def_reason = "No reason given"
@commands.command()
@commands.has_permissions(ban_members=True)
@commands.bot_has_permissions(ban_members=True)
async def ban(
self,
ctx: commands.Context,
target: Union[discord.Member, discord.Object],
days: Optional[int],
*,
reason: Optional[str],
) -> None:
"""Bans the given <target> for [reason], deleting [days] of messages""" # that good?
days = days or self.def_days
reason = reason or self.def_reason
await ctx.guild.ban(target, delete_message_days=days, reason=reason)
@commands.command()
@commands.has_permissions(kick_members=True)
@commands.bot_has_permissions(kick_members=True)
async def kick(
self, ctx: commands.Context, target: discord.Member, *, reason: Optional[str]
) -> None:
"""Kicks the given target for a reason"""
reason = reason or self.def_reason
await target.kick(reason=reason)
@commands.command()
@commands.has_permissions(ban_members=True)
@commands.bot_has_permissions(ban_members=True)
async def unban(
self, ctx: commands.Context, target: int, *, reason: Optional[str]
) -> None:
"""Unbans the given target"""
reason = reason or self.def_reason
await ctx.guild.unban(discord.Object(id=target), reason=reason)
@commands.command()
@commands.has_guild_permissions(mute_members=True)
@commands.bot_has_guild_permissions(mute_members=True)
async def mute(
self, ctx: commands.Context, target: discord.Member, *, reason: Optional[str]
) -> None:
"""Mutes the given target with a reason"""
reason = reason or self.def_reason
await target.edit(mute=True, reason=reason)
@commands.command()
@commands.has_guild_permissions(mute_members=True)
@commands.bot_has_guild_permissions(mute_members=True)
async def unmute(
self, ctx: commands.Context, target: discord.Member, *, reason: Optional[str]
) -> None:
""" Unmutes the given target with optional reason. """
reason = reason or self.def_reason
await target.edit(mute=False, reason=reason)
@commands.group(invoke_without_command=True)
@commands.has_permissions(manage_roles=True)
async def config(self, ctx):
await ctx.send_help(ctx.command)
@config.group(name="prefix", invoke_without_command=True)
async def config_prefix(self, ctx):
fmt = ", ".join(await self.bot.get_prefix(ctx.message))
await ctx.send(f"The prefixes for `{ctx.guild}` are `{fmt}`")
@config_prefix.command(name="set")
@commands.has_permissions(manage_roles=True)
async def prefix_set(self, ctx, prefix):
if len(prefix) > 12:
raise commands.BadArgument("The prefix cannot be longer than 12 characters")
if len(prefix) <= 0:
raise commands.BadArgument("The prefix cannot be less than 1 character")
await self.bot.pool.execute(
"""UPDATE guild_config
SET prefixes = $1
WHERE guild_id = $2""",
[prefix],
ctx.guild.id,
)
self.bot.prefixes[ctx.guild.id] = [prefix]
await ctx.send(f"Set the prefix to `{prefix}`")
@config_prefix.command(name="add")
@commands.has_permissions(manage_roles=True)
async def prefix_add(self, ctx, prefix):
if len(self.bot.prefixes[ctx.guild.id]) >= 7:
raise commands.BadArgument("You cannot have more than 7 prefixes")
if len(prefix) > 12:
raise commands.BadArgument("The prefix cannot be longer than 12 characters")
if len(prefix) <= 0:
raise commands.BadArgument("The prefix cannot be less than 1 character")
if prefix in self.bot.prefixes[ctx.guild.id]:
raise commands.BadArgument("You cannot have the same prefix twice")
await self.bot.pool.execute(
"""UPDATE guild_config
SET prefixes = prefixes || $1
WHERE guild_id = $2""",
[prefix],
ctx.guild.id,
)
self.bot.prefixes[ctx.guild.id].append(prefix)
await ctx.send(f"Added `{prefix}` to the list of prefixes")
@config_prefix.command(name="remove")
@commands.has_permissions(manage_roles=True)
async def prefix_remove(self, ctx, prefix):
if len(self.bot.prefixes[ctx.guild.id]) <= 1:
raise commands.BadArgument("You cannot remove all of your prefixes")
prefix = [
a for a in enumerate(self.bot.prefixes[ctx.guild.id]) if a[1] == prefix
]
if not prefix:
raise commands.BadArgument("That was not a prefix")
await self.bot.pool.execute(
"""UPDATE guild_config
SET prefixes = array_remove(prefixes, $1)
WHERE guild_id = $2""",
prefix[0][1],
ctx.guild.id,
)
self.bot.prefixes[ctx.guild.id].pop(prefix[0][0])
await ctx.send(f"Removed `{prefix[0][1]}` from the list of prefixes")
def setup(bot):
""" Cog entrypoint. """
bot.add_cog(Moderation(bot))
|
import sys
sys.path.append("../")
sys.path.append("/home/ray__/ssd/BERT/")
sys.path.append("/home/ray__/CS/org/etherlabs/ai-engine/pkg/")
import text_preprocessing.preprocess as tp
from extra_preprocess import preprocess_text
from filter_groups import CandidateKPExtractor
import nltk
import networkx as nx
from gpt_feat_utils import GPT_Inference
from scipy.spatial.distance import cosine
import numpy as np
from community import best_partition
from copy import deepcopy
gpt_model = GPT_Inference("/home/ray__/ssd/BERT/models/customer_service/epoch3/", device="cpu")
#gpt_model = GPT_Inference("/home/ray__/ssd/BERT/models/product/", device="cuda")
#gpt_model = GPT_Inference("/home/ray__/ssd/BERT/models/ether_v2/ether_googleJan13_groupsplit_withstop_4+w_gt3s_lr3e-5/",device="cpu")
sys.path.append("../helper_functions/")
from get_groups import call_gs
import sys
sys.path.append("../")
from filter_groups import CandidateKPExtractor
def get_ent(request, ent_fv, com_map, kp_entity_graph):
kp_e = CandidateKPExtractor()
uncased_nodes = [ele.lower() for ele in kp_entity_graph]
uncased_node_dict = dict(zip(list(kp_entity_graph),uncased_nodes))
group = call_gs(request)
group_ent = {}
for groupid, groupobj in group.items():
seg_text = " ".join([segobj['originalText'] for segobj in groupobj])
seg_text = " ".join(preprocess_text(seg_text))
text_kps = kp_e.get_candidate_phrases(seg_text)
text_kps = list(set([ele.lower() for ele in text_kps]))
tagged_sents = nltk.pos_tag_sents(nltk.word_tokenize(sent) for sent in nltk.sent_tokenize(seg_text))
text_nouns = []
for tagged_sent in tagged_sents:
text_nouns.extend([ele[0] for ele in list(tagged_sent) if ele[1].startswith('NN')])
text_nouns = [ele.lower() for ele in text_nouns]
intersecting_nouns = list(set(text_nouns)&set(kp_entity_graph))
intersection_ctr = 0
filtered_kps = []
for kp in text_kps:
if len(kp.split(' '))>1:
kp_nouns = list(set(kp.split(' '))&set(intersecting_nouns))
# for noun in kp_nouns:
# rem_nouns = list(set(kp_nouns)-set([noun]))
# if set(rem_nouns)&set(kp_entity_graph[noun])==set(rem_nouns):
# filtered_kps.append(kp)
# continue
for noun in kp_nouns:
if noun in kp_entity_graph.nodes():
filtered_kps.append(kp)
continue
filtered_kps = list(set(filtered_kps))
candidate_sents = [sent.lower() for sent in nltk.sent_tokenize(seg_text)]
filtered_sents = []
for sent in candidate_sents:
if any(kp in sent for kp in filtered_kps):
filtered_sents.append(sent)
noun_list = [ele.split(' ') for ele in filtered_kps]
noun_list = sum(noun_list, [])
noun_list = list(set(noun_list)&set([uncased_node_dict[ele] for ele in uncased_node_dict]))
noun_node_list = [key for (key, value) in uncased_node_dict.items() if value in noun_list]
ent_node_list = [ele for ele in noun_node_list if kp_entity_graph.nodes[ele]['node_type']=='entity']
noun_node_list = list(set(noun_node_list)-set(ent_node_list))
kp_Map_list = []
kp_ent_map = []
for noun in noun_node_list:
kp_Map_list.extend([ele for ele in list(kp_entity_graph[noun])
if kp_entity_graph[noun][ele]['edge_type']=='kp_to_tok'])
#print ("kp_Map_list: ", kp_Map_list)
for kp in list(set(kp_Map_list)):
kp_ent_map.extend([ele for ele in list(kp_entity_graph[kp]) if kp_entity_graph.nodes[ele]['node_type']=='entity'])
#print ("noun_list: ", noun_list)
#print ("noun_node_list: ", noun_node_list)
#print ("ent_node_list: ", ent_node_list)
#print ("noun_node_list: ", noun_node_list)
kp_ent_map_intrm = deepcopy(kp_ent_map)
for ent in kp_ent_map_intrm:
if kp_entity_graph.nodes[ent]['is_ether_node']==True:
#print ("-----------------------------TRUE-------------------------", ent)
kp_ent_map.append("<ETHER>-"+ent)
#print ("KP ENT MAP: ", kp_ent_map)
kp_ent_map = list(set(kp_ent_map+ent_node_list))
kp_ent_map = list(set(kp_ent_map)&set(ent_fv))
sent_list = filtered_sents
sent_fv = [gpt_model.get_text_feats(sent) for sent in sent_list]
G = nx.Graph()
G.add_nodes_from(range(len(sent_fv)))
node_list = range(len(sent_fv))
for index1, nodea in enumerate(range(len(sent_fv))):
for index2, nodeb in enumerate(range(len(sent_fv))):
if index2 >= index1:
c_score = 1 - cosine(sent_fv[nodea], sent_fv[nodeb])
#if c_score>= outlier_score:
G.add_edge(nodea, nodeb, weight = c_score)
closest_connection_n = sorted(dict(G[nodea]).items(), key=lambda kv:kv[1]["weight"], reverse=True)
weights_n = list(map(lambda kv: (kv[1]["weight"]).tolist(), closest_connection_n))
q3 = np.percentile(weights_n, 75)
iqr = np.subtract(*np.percentile(weights_n, [75, 25]))
#outlier_score = q3 + (1.5 * iqr)
outlier_score = q3 + (1 * iqr)
for nodeb, param in dict(G[nodea]).items():
if param['weight']>=q3:
pass
else:
G.remove_edge(nodea, nodeb)
comm_temp = best_partition(G, resolution=1)
prev = 0
comm_map = {}
for ent, cls in sorted(comm_temp.items(),key=lambda kv:kv[1]):
if prev!=cls:
prev = cls
if cls in comm_map.keys():
comm_map[cls].append(ent)
else:
comm_map[cls] = [ent]
agg_fv = {}
if True in [True if len(s_list)>1 else False for s_list in comm_map.values() ]:
threshold = 1
else:
threshold = 0
for comm, s_list in comm_map.items():
if len(s_list)>threshold:
temp_fv = [sent_fv[s] for s in s_list]
agg_fv[comm] = np.mean(temp_fv, axis=0)
dist_list = {}
for pos, fv in agg_fv.items():
temp_list = []
for entity in ent_fv.keys():
if entity in kp_ent_map:
temp_list.append((entity, 1-cosine(ent_fv[entity], fv)))
dist_list[pos] = sorted(temp_list, key=lambda kv:kv[1], reverse=True)[:10]
group_ent[groupid] = [e for e_list in dist_list.values() for e in e_list]
return group, group_ent |
<filename>src/share.py
# -*- coding: utf-8 -*-
"""
This is the share module of the game.
@Author: yanyongyu
"""
__author__ = "yanyongyu"
__all__ = ["copy", "save", "send_email"]
import re
import os
import sys
import time
import logging
import threading
from tkinter import *
from tkinter.ttk import *
from tkinter import messagebox
import smtplib
from email.header import Header
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
from PIL import Image
logging.basicConfig(level=logging.INFO)
# 复制到剪切板
if "win" in sys.platform:
import win32con
import win32clipboard
from io import BytesIO
def copy(image):
"""
Only work on Windows.
Using win32.
"""
img = Image.fromarray(image)
img = img.transpose(Image.ROTATE_270)
img = img.transpose(Image.FLIP_LEFT_RIGHT)
output = BytesIO()
img.convert("RGB").save(output, "BMP")
data = output.getvalue()[14:]
output.close()
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardData(win32con.CF_DIB, data)
win32clipboard.CloseClipboard()
logging.info("Copied successfully")
root = Tk()
root.withdraw()
messagebox.showinfo("Flappy Bird", "复制成功!")
root.destroy()
# 保存图片
def save(image):
"""Save the image to local path."""
img = Image.fromarray(image).convert("RGB")
img = img.transpose(Image.ROTATE_270)
img = img.transpose(Image.FLIP_LEFT_RIGHT)
img.save("%s.jpg" % round(time.time()))
logging.info("Saved successfully")
root = Tk()
root.withdraw()
messagebox.showinfo("Flappy Bird", "保存成功!")
root.destroy()
def send_email(image_data, score):
start_thread(Email, image_data, score)
def start_thread(target, *args, **kw):
t = threading.Thread(target=target, args=args, kwargs=kw)
t.start()
class AutoShowScrollbar(Scrollbar):
# 如果不需要滚动条则会自动隐藏
def set(self, lo, hi):
if float(lo) <= 0.0 and float(hi) >= 1.0:
# grid_remove is currently missing from Tkinter!
self.tk.call("pack", "forget", self)
else:
self.pack(fill=Y, side=RIGHT, expand=False)
Scrollbar.set(self, lo, hi)
class Email():
"""Make a email share to others."""
html_text = (
'<html><body><h1>%s</h1>' + '<h2>游戏源码地址:' +
'<a href="https://github.com/yanyongyu/FlappyBird">GitHub</a></h2>' +
'<a><img src="cid:flappy" alt="flappy"></a>' +
'<p>Coding Email...</p>' +
'<p>send by <a href="http://www.python.org">Python</a> app...</p>' +
'</body></html>')
smtp_servers = {
'126': 'smtp.126.com',
'qq': 'smtp.qq.com',
'sina': 'smtp.sina.com.cn',
'aliyun': 'smtp.aliyun.com',
'163': 'smtp.163.com',
'yahoo': 'smtp.mail.yahoo.com',
'foxmail': 'SMTP.foxmail.com',
'sohu': 'smtp.sohu.com',
'139': 'SMTP.139.com',
'china': 'smtp.china.com'
}
def __init__(self, image, score):
self.score = score
img = Image.fromarray(image)
img = img.transpose(Image.ROTATE_270)
self.img = img.transpose(Image.FLIP_LEFT_RIGHT)
self.email_check = re.compile(
r"^[\w]+\.?[\w]+@([\w]+)((\.\w{2,3}){1,3})$")
logging.info("Show email window")
self.show()
def show(self):
self.root = Tk()
self.root.title("email share")
sw = self.root.winfo_screenwidth()
sh = self.root.winfo_screenheight()
x = (sw - 400) / 2 - 25
y = (sh - 250) / 2 - 25
self.root.geometry('%dx%d+%d+%d' % (400, 250, x, y))
self.root.resizable(False, False)
icon_path = os.path.join(os.path.abspath("."),
"assets/images/flappy.ico")
self.root.iconbitmap(icon_path)
# 邮件信息框架
frame1 = Frame(self.root)
frame1.pack(fill=BOTH)
# 调整entry列权重,使其拉伸
frame1.columnconfigure(1, weight=1)
# 发件人邮箱输入行
label1 = Label(frame1, text="发件人邮箱:")
label1.grid(row=0, column=0, padx=2, pady=4, sticky=W + N + S)
self.send_email = StringVar()
entry1 = Entry(frame1, textvariable=self.send_email)
entry1.grid(row=0, column=1, padx=2, pady=4, sticky=E + N + S + W)
# 发件人邮箱密码输入行
label2 = Label(frame1, text="发件人密码:")
label2.grid(row=1, column=0, padx=2, pady=4, sticky=W + N + S)
self.send_pw = StringVar()
self.entry2 = Entry(frame1, textvariable=self.send_pw, show='*')
self.entry2.grid(row=1, column=1, padx=2, pady=4, sticky=E + N + S + W)
self.v = IntVar()
cb = Checkbutton(frame1, text='显示密码', variable=self.v)
cb.grid(row=1, column=2, padx=2, pady=4, sticky=E + N + S)
cb.bind('<ButtonRelease-1>', self.check_show)
# 收件人邮箱输入行
label3 = Label(frame1, text="收件人邮箱:")
label3.grid(row=2, column=0, padx=2, pady=4, sticky=W + N + S)
self.target_email = StringVar()
entry3 = Entry(frame1, textvariable=self.target_email)
entry3.grid(row=2, column=1, padx=2, pady=4, sticky=E + N + S + W)
# 邮件内容输入框架
frame2 = Frame(self.root)
frame2.pack(fill=BOTH, expand=True)
# 邮件内容输入
self.text = Text(frame2,
width=40,
height=5,
borderwidth=3,
font=('微软雅黑', 12))
self.text.pack(padx=2, pady=5, side=LEFT, fill=BOTH, expand=True)
self.text.insert(1.0, "我在玩Flappy Bird小游戏,取得了%s分的好成绩哟" % self.score)
vbar_y = AutoShowScrollbar(frame2, orient=VERTICAL)
vbar_y.pack(fill=Y, side=RIGHT, expand=False)
vbar_y.config(command=self.text.yview)
self.text.configure(yscrollcommand=vbar_y.set)
# 界面鼠标滚动
def _scroll_text(event):
self.text.yview_scroll(int(-event.delta / 120), 'units')
self.text.bind('<MouseWheel>', _scroll_text)
# 点击发送按钮
button = Button(self.root,
text="点击发送",
command=lambda: start_thread(self.send))
button.pack(pady=4, side=BOTTOM)
self.root.mainloop()
def check_show(self, event):
show = self.v.get()
if show == 0:
self.entry2['show'] = ''
else:
self.entry2['show'] = '*'
def _format_addr(self, s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
def send(self):
logging.info("Start send email")
top = Toplevel(self.root)
top.geometry('100x75')
top.resizable(False, False)
lb = Label(top, text="正在发送...")
lb.pack(fill=BOTH)
from_addr = self.send_email.get()
to_addr = self.target_email.get()
logging.info("From email address: %s" % from_addr)
logging.info("To email address: %s" % to_addr)
if (not self.email_check.match(from_addr) or
not self.email_check.match(to_addr)):
messagebox.showerror("Flappy Bird", "请检查邮箱格式!")
return
group = self.email_check.match(from_addr).groups()
password = self.send_pw.get()
try:
smtp_server = Email.smtp_servers[group[0]]
logging.info("SMTP server: %s" % smtp_server)
except KeyError:
messagebox.showerror("Flappy Bird", "该邮箱暂不支持,请联系作者!")
return
msg = MIMEMultipart()
msg.attach(
MIMEText(Email.html_text % self.text.get(1.0, END), 'html',
'utf-8'))
msg['From'] = self._format_addr('Python爱好者 <%s>' % from_addr)
msg['To'] = self._format_addr('管理员 <%s>' % to_addr)
msg['Subject'] = Header('Flappy Bird', 'utf-8').encode()
# 设置附件的MIME和文件名,这里是jpg类型:
logging.info("Write jpg picture into email")
output = BytesIO()
self.img.convert("RGB").save(output, "JPEG")
mime = MIMEImage(output.getvalue(), _subtype="JPEG")
output.close()
mime.add_header('Content-ID', 'flappy')
mime.add_header('Content-Disposition',
'attachment',
filename='%s.jpg' % round(time.time()))
# 添加到MIMEMultipart:
msg.attach(mime)
try:
logging.info("Send email")
server = smtplib.SMTP(smtp_server, 25)
# server.set_debuglevel(1)
server.login(from_addr, password)
server.sendmail(from_addr, [to_addr], msg.as_string())
server.quit()
logging.info("Send successfully!")
top.destroy()
if not messagebox.askyesno("Flappy Bird", "发送成功!是否继续发送?"):
self.root.destroy()
except Exception as e:
logging.error("%s" % e)
messagebox.showerror("Flappy Bird", "%s" % e)
if __name__ == '__main__':
Email("", 0)
|
<reponame>machow/pins-python<gh_stars>0
from pathlib import Path
from .config import get_allow_pickle_read, PINS_ENV_INSECURE_READ
from .meta import Meta
from .errors import PinsInsecureReadError
from typing import Sequence
# TODO: move IFileSystem out of boards, to fix circular import
# from .boards import IFileSystem
UNSAFE_TYPES = frozenset(["joblib"])
REQUIRES_SINGLE_FILE = frozenset(["csv", "joblib", "file"])
def load_data(
meta: Meta,
fs,
path_to_version: "str | None" = None,
allow_pickle_read: "bool | None" = None,
):
"""Return loaded data, based on meta type.
Parameters
----------
meta: Meta
Information about the stored data (e.g. its type).
fs: IFileSystem
An abstract filesystem with a method to .open() files.
path_to_version:
A filepath used as the parent directory the data to-be-loaded lives in.
"""
# TODO: extandable loading with deferred importing
if meta.type in UNSAFE_TYPES and not get_allow_pickle_read(allow_pickle_read):
raise PinsInsecureReadError(
f"Reading pin type {meta.type} involves reading a pickle file, so is NOT secure."
f"Set the allow_pickle_read=True when creating the board, or the "
f"{PINS_ENV_INSECURE_READ}=1 environment variable.\n"
"See:\n"
" * https://docs.python.org/3/library/pickle.html \n"
" * https://scikit-learn.org/stable/modules/model_persistence.html#security-maintainability-limitations"
)
# Check that only a single file name was given
fnames = [meta.file] if isinstance(meta.file, str) else meta.file
if len(fnames) > 1 and type in REQUIRES_SINGLE_FILE:
raise ValueError("Cannot load data when more than 1 file")
# file path creation ------------------------------------------------------
if type == "table":
# this type contains an rds and csv files named data.{ext}, so we match
# R pins behavior and hardcode the name
target_fname = "data.csv"
else:
target_fname = fnames[0]
if path_to_version is not None:
path_to_file = f"{path_to_version}/{target_fname}"
else:
path_to_file = target_fname
# type handling -----------------------------------------------------------
if meta.type == "csv":
import pandas as pd
return pd.read_csv(fs.open(path_to_file))
elif meta.type == "table":
import pandas as pd
return pd.read_csv(fs.open(path_to_file))
elif meta.type == "joblib":
import joblib
return joblib.load(fs.open(path_to_file))
elif meta.type == "file":
# TODO: update to handle multiple files
return [str(Path(fs.open(path_to_file).name).absolute())]
raise NotImplementedError(f"No driver for type {meta.type}")
def save_data(
obj, fname, type=None, apply_suffix: bool = True
) -> "str | Sequence[str]":
# TODO: extensible saving with deferred importing
# TODO: how to encode arguments to saving / loading drivers?
# e.g. pandas index options
# TODO: would be useful to have singledispatch func for a "default saver"
# as argument to board, and then type dispatchers for explicit cases
# of saving / loading objects different ways.
if type == "csv":
import pandas as pd
if apply_suffix:
fname = f"{fname}.{type}"
if not isinstance(obj, pd.DataFrame):
raise NotImplementedError(
"Currently only pandas.DataFrame can be saved to a CSV."
)
obj.to_csv(fname, index=False)
elif type == "joblib":
import joblib
if apply_suffix:
fname = f"{fname}.{type}"
joblib.dump(obj, fname)
else:
raise NotImplementedError(f"Cannot save type: {type}")
return fname
def default_title(obj, name):
import pandas as pd
if isinstance(obj, pd.DataFrame):
# TODO(compat): title says CSV rather than data.frame
# see https://github.com/machow/pins-python/issues/5
shape_str = " x ".join(map(str, obj.shape))
return f"{name}: a pinned {shape_str} DataFrame"
else:
obj_name = type(obj).__qualname__
return f"{name}: a pinned {obj_name} object"
|
<filename>nn/augmentations.py
from math import sqrt, pi
import torch
from torch import Tensor
from torch.nn import Module, functional as F
from nn.exponential_moving_average import ExponentialMovingAverage
def t_2d(x: Tensor, y: Tensor) -> Tensor:
assert x.ndim == y.ndim == 1
assert x.shape == y.shape
a = torch.ones_like(x)
b = torch.zeros_like(x)
return torch.stack(
[
torch.stack([a, b, x], dim=-1),
torch.stack([b, a, y], dim=-1),
torch.stack([b, b, a], dim=-1),
],
dim=-2,
)
def t_3d(x: Tensor, y: Tensor, z: Tensor) -> Tensor:
assert x.ndim == y.ndim == z.ndim == 1
assert x.shape == y.shape == z.shape
a = torch.ones_like(x)
b = torch.zeros_like(x)
return torch.stack(
[
torch.stack([a, b, b, x], dim=-1),
torch.stack([b, a, b, y], dim=-1),
torch.stack([b, b, a, z], dim=-1),
torch.stack([b, b, b, a], dim=-1),
],
dim=-2,
)
def s_2d(x: Tensor, y: Tensor) -> Tensor:
assert x.ndim == y.ndim == 1
assert x.shape == y.shape
a = torch.ones_like(x)
b = torch.zeros_like(x)
return torch.stack(
[
torch.stack([x, b, b], dim=-1),
torch.stack([b, y, b], dim=-1),
torch.stack([b, b, a], dim=-1),
],
dim=-2,
)
def s_3d(x: Tensor, y: Tensor, z: Tensor) -> Tensor:
assert x.ndim == y.ndim == z.ndim == 1
assert x.shape == y.shape == z.shape
a = torch.ones_like(x)
b = torch.zeros_like(x)
return torch.stack(
[
torch.stack([x, b, b, b], dim=-1),
torch.stack([b, y, b, b], dim=-1),
torch.stack([b, b, z, b], dim=-1),
torch.stack([b, b, b, a], dim=-1),
],
dim=-2,
)
def r_2d(t: Tensor) -> Tensor:
assert t.ndim == 1
a = torch.ones_like(t)
b = torch.zeros_like(t)
c = t.cos()
s = t.sin()
return torch.stack(
[
torch.stack([c, -s, b], dim=-1),
torch.stack([s, c, b], dim=-1),
torch.stack([b, b, a], dim=-1),
],
dim=-2,
)
def r_3d(t: Tensor, v: Tensor):
assert t.ndim == v.ndim == 1
x, y, z, _ = v.split(1, dim=-1)
xx = x ** 2
yy = y ** 2
zz = z ** 2
xy = x * y
xz = x * z
yz = y * z
a = torch.ones_like(t)
b = torch.zeros_like(t)
c = t.cos()
d = 1.0 - c
s = t.sin()
return torch.stack(
[
torch.stack([c + xx * d, xy * d - z * s, xz * d - y * s, b], dim=-1),
torch.stack([xy * d + z * s, c + yy * d, yz * d - x * s, b], dim=-1),
torch.stack([xz * d - y * s, yz * d + x * s, c + zz * d, b], dim=-1),
torch.stack([b, b, b, a], dim=-1),
],
dim=-2,
)
class Augmentations(Module):
def __init__(
self,
p: float,
image_flip: bool = True,
rotate: bool = True,
translate: bool = True,
brightness: bool = True,
contrast: bool = True,
luma_flip: bool = True,
hue: bool = True,
saturation: bool = True,
):
super(Augmentations, self).__init__()
self.register_buffer("p", torch.tensor(p))
self.image_flip = image_flip
self.rotate = rotate
self.translate = translate
self.brightness = brightness
self.contrast = contrast
self.luma_flip = luma_flip
self.hue = hue
self.saturation = saturation
self.register_buffer("z", torch.zeros(1))
self.register_buffer("id3", torch.eye(3)[None, ...])
self.register_buffer("id4", torch.eye(4)[None, ...])
v = torch.tensor([1.0, 1.0, 1.0, 0.0]) / sqrt(3)
self.register_buffer("v", v)
self.register_buffer("vv", v.outer(v))
self.b_std = 0.2
self.c_std = 0.5
self.h_max = 1.0
self.s_std = 1.0
def sample_theta(self, input: Tensor) -> Tensor:
n, c, h, w = input.shape
z = self.z.expand(n)
theta = self.id3
if self.image_flip:
p = torch.rand_like(z) < self.p
x = 1.0 - 2.0 * torch.randint_like(z, low=0, high=2).where(p, z)
y = torch.ones_like(x)
theta = theta @ s_2d(x, y)
if self.rotate:
p = torch.rand_like(z) < self.p
t = 0.5 * pi * torch.randint_like(z, low=0, high=4).where(p, z)
theta = theta @ r_2d(t)
if self.translate:
p = torch.rand_like(z) < self.p
x = (torch.rand_like(z) * 2.0 - 1.0).where(p, z)
y = (torch.rand_like(z) * 2.0 - 1.0).where(p, z)
theta = theta @ t_2d(x, y)
return theta
def sample_phi(self, input: Tensor) -> Tensor:
n, c, h, w = input.shape
z = self.z.expand(n)
phi = self.id4
if self.brightness:
p = torch.rand_like(z) < self.p
b = (torch.randn_like(z) * self.b_std).where(p, z)
phi = phi @ t_3d(x=b, y=b, z=b)
if self.contrast:
p = torch.rand_like(z) < self.p
q = (torch.randn_like(z) * self.c_std).exp2().where(p, 1.0 - z)
phi = phi @ s_3d(q, q, q)
if self.luma_flip:
p = torch.rand_like(z) < self.p
i = torch.randint_like(z, low=0, high=2).where(p, z)
phi = phi @ (self.id4 - 2.0 * self.vv * i[..., None, None])
if self.hue:
p = torch.rand_like(z) < self.p
t = ((torch.rand_like(z) * 2.0 - 1.0) * pi * self.h_max).where(p, z)
phi = phi @ r_3d(t, self.v)
if self.saturation:
p = torch.rand_like(z) < self.p
s = (torch.randn_like(z) * self.s_std).exp2().where(p, 1.0 - z)
phi = phi @ (self.vv + (self.id4 - self.vv) * s[..., None, None])
return phi
def apply_theta(self, input: Tensor, theta: Tensor) -> Tensor:
theta, _ = theta.split([2, 1], dim=-2)
grid = F.affine_grid(
theta=theta,
size=input.shape,
align_corners=True,
)
input = F.grid_sample(
input=input,
grid=((grid + 1.0) % 2.0) - 1.0,
mode="nearest",
padding_mode="zeros",
align_corners=True,
)
return input
def apply_phi(self, input: Tensor, phi: Tensor) -> Tensor:
n, c, h, w = input.shape
phi, _ = phi.split([3, 1], dim=-2)
A, b = phi.split([3, 1], dim=-1)
input = input * 2.0 - 1.0
input = input.reshape(n, c, h * w)
input = A @ input + b
input = input.reshape(n, c, h, w)
input = (input + 1.0) * 0.5
return input
def forward(
self,
input: Tensor,
theta: Tensor = None,
phi: Tensor = None,
) -> Tensor:
if theta is None:
theta = self.sample_theta(input)
if phi is None:
phi = self.sample_phi(input)
if (theta != self.id3).any():
input = self.apply_theta(input, theta)
if (phi != self.id4).any():
input = self.apply_phi(input, phi)
return input
class AdaptiveAugmentations(Augmentations):
def __init__(
self,
p: float,
r_target: float = 0.5,
momentum: float = 0.8,
alpha: float = 2.5e-3,
image_flip: bool = True,
rotate: bool = True,
translate: bool = True,
brightness: bool = True,
contrast: bool = True,
luma_flip: bool = True,
hue: bool = True,
saturation: bool = True,
):
super(AdaptiveAugmentations, self).__init__(
p=p,
image_flip=image_flip,
rotate=rotate,
translate=translate,
brightness=brightness,
contrast=contrast,
luma_flip=luma_flip,
hue=hue,
saturation=saturation,
)
r_target = torch.tensor(r_target)
self.register_buffer("r_target", r_target)
self.ema = ExponentialMovingAverage(r_target.shape, momentum=momentum)
self.alpha = alpha
@torch.no_grad()
def update(self, r_current: Tensor):
r_current = r_current.to(dtype=self.ema.average.dtype)
a = self.alpha * (self.ema(r_current) - self.r_target).sign()
self.p.copy_((self.p + a).clamp(min=0.0, max=1.0))
|
import asyncio
import inspect
import math
import os
import re
import sys
import traceback
import uuid
from base64 import b64decode
from collections import OrderedDict, deque
from contextlib import redirect_stdout
from io import BytesIO, StringIO
from itertools import chain
import aiohttp
from async_timeout import timeout as Timeout
import discord
from discord.backoff import ExponentialBackoff
from discord.ext import commands
from discord.utils import get as dget
import sentry_sdk
import ujson as json
from yarl import URL
from utils import cache
from utils.converters import setup_converters
from utils.cooldown import *
class Object(object):
pass
@cache.cache(maxsize=1024)
async def query_prefix(bot, arg, channel=False):
if channel:
q = await bot.mysql.cursor.execute(f"SELECT prefix,channel FROM `prefix_channel` WHERE channel={arg}")
result = await q.fetchone()
else:
q = await bot.mysql.cursor.execute(f"SELECT prefix,d FROM `prefix` WHERE guild={arg}")
result = await q.fetchall()
return result
async def get_prefix(bot, message):
if bot.dev_mode:
prefix = ','
elif bot.self_bot:
prefix = 'self.'
else:
prefix = '.'
prefixes = [prefix]
if not bot.self_bot and message.guild and not message.content.startswith(f"{prefix}prefix"):
prefix_set = False
result = await query_prefix(bot, message.channel.id, channel=True)
if result:
if result['channel'] == message.channel.id:
prefixes.append(result['prefix'].lower())
prefix_set = True
if not prefix_set:
result = await query_prefix(bot, message.guild.id)
for x in result:
if x['d'] and not prefix_set:
prefix_set = True
prefixes.append(x['prefix'].lower())
if prefix_set:
del prefixes[0]
if message.guild is None and not bot.self_bot:
prefixes.append('')
prefixes.extend(commands.bot.when_mentioned(bot, message))
return prefixes
async def get_user_info(state, user_id):
users = list(state._users.values())
user = dget(users, id=user_id)
if not user:
try:
data = await state.http.get_user(user_id)
except discord.NotFound:
return None
user = discord.User(state=state, data=data)
return user
async def replace_mentions(state, guild, content:str, limit:int=None):
match = re.findall(r"(<@!?(\d+)>)", content)
if match:
if limit:
match = match[limit:]
for mention, i in match:
user = await get_user_info(state, int(i))
if user is not None:
content = content.replace(mention, f'@{user}')
if guild:
match = re.findall(r"(<@&(\d+)>)", content)
if match:
for mention, i in match:
role = dget(guild.roles, id=int(i))
if role:
content = content.replace(mention, f'@{role}')
return content
class LimitedDict(OrderedDict):
def __init__(self, *args, **kwargs):
self.maxlen = kwargs.pop('maxlen', None)
super().__init__(self, *args, **kwargs)
self._check_size()
def __setitem__(self, key, value):
super().__setitem__(key, value)
self._check_size()
def _check_size(self):
if self.maxlen != None:
while len(self) > self.maxlen:
self.popitem(last=False)
async def reaction_backoff(partial):
b = None
for _ in range(5):
try:
await partial()
except discord.HTTPException:
if b is None:
b = ExponentialBackoff(base=2.0)
await asyncio.sleep(b.delay())
continue
except (discord.Forbidden, discord.NotFound):
break
else:
return True
#save some ram
class NameOnlyActivity:
__slots__ = ('name',)
def __init__(self, data):
self.name = data.get('name', None)
def create_activity(data):
if data:
return NameOnlyActivity(data)
class FakeMessage:
def __init__(self, ctx, mid):
self._state = ctx._state
self.id = mid
self.channel = Object()
self.channel.id = ctx.channel.id
# NO REFS TO MESSAGE/GUILD
del ctx
async def delete(self):
return await discord.Message.delete(self)
class LimitError(Exception): pass
class Funcs(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.loop = bot.loop
self._release = None
self.init_aliases()
self.image_mimes = (
'image/png',
'image/jpeg',
'image/jpg',
'image/webp',
'image/bmp',
'image/pjpeg',
'image/x-icon',
'image/x-ms-bmp'
)
self.image_extensions = [f".{x[6:]}" for x in self.image_mimes[:5]]
self.extension_checks = (
lambda url: any(URL(url).path.endswith(x) for x in self.image_extensions),
lambda url: URL(url).path.endswith('.gif')
)
self.colors = (
'red',
'blue',
'green',
'gold',
'dark_blue',
'dark_gold',
'dark_green',
'dark_grey',
'dark_magenta',
'dark_orange',
'dark_purple',
'dark_red',
'dark_teal',
'darker_grey',
'default',
'light_grey',
'lighter_grey',
'magenta',
'orange',
'purple',
'teal'
)
self.color_count = 0
self.emote_regex = re.compile(r"<(a)?:[a-zA-Z0-9\_]+:([0-9]+)>")
self.mention_regex = re.compile(r"<@!?([0-9]+)>")
self.switches = {}
self.keys = {}
self.code_block = '```{0}\n{1}\n```'
self.rex_map = {
'39': ['ada'],
'15': ['assembly', 'asm'],
'38': ['bash'],
'44': ['brainfuck', 'bf'],
'1': ['c#'],
'7': ['c++(gcc)', 'c++', 'cpp'],
'27': ['c++(clang)'],
'28': ['c++(vc++)'],
'6': ['c(gcc)', 'c'],
'26': ['c(clang)'],
'29': ['c(vc)'],
'18': ['commonlisp', 'lisp'],
'30': ['d'],
'41': ['elixir', 'ex'],
'40': ['erlang'],
'3': ['f#'],
'45': ['fortran', 'fort'],
'20': ['go'],
'11': ['haskell', 'hs'],
'4': ['java'],
'17': ['javascript', 'js'],
'43': ['kotlin', 'kot'],
'14': ['lua'],
'33': ['mysql'],
'23': ['node.js', 'node'],
'42': ['ocaml'],
'25': ['octave'],
'10': ['objective-c', 'oc'],
'35': ['oracle'],
'9': ['pascal'],
'13': ['perl'],
'8': ['php', 'php7'],
'34': ['postgresql', 'psql', 'postgres'],
'19': ['prolog'],
'5': ['python2', 'python2.7', 'py2.7', 'py2'],
'24': ['python', 'python3', 'py', 'py3'],
'31': ['r'],
'12': ['ruby', 'rb'],
'21': ['scala'],
'22': ['scheme'],
'16': ['sqlserver'],
'37': ['swift'],
'32': ['tcl'],
'2': ['visualbasic', 'vb']
}
self.rex_compiler_map = {
'7': '-Wall -std=c++14 -O2 -o a.out source_file.cpp',
'27': '-Wall -std=c++14 -stdlib=libc++ -O2 -o a.out source_file.cpp',
'28': r'source_file.cpp -o a.exe /EHsc /MD /I C:\boost_1_60_0 /link /LIBPATH:C:\boost_1_60_0\stage\lib',
'6': '-Wall -std=gnu99 -O2 -o a.out source_file.c',
'26': '-Wall -std=gnu99 -O2 -o a.out source_file.c',
'29': 'source_file.c -o a.exe',
'30': 'source_file.d -ofa.out',
'20': '-o a.out source_file.go',
'11': '-o a.out source_file.hs'
}
self.offload_ip = "http://192.168.15.17"
self.buckets = (
CooldownMapping(Cooldown(1, 5)),
CooldownMapping(Cooldown(1, 7))
)
self.default_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'}
self.session = None
self._create_session()
self.proxies = open(self.discord_path('utils/proxies.txt')).read().split('\n')
self.proxy = "http://proxy.wrmsr.io:3128"
self.imis_host = "http://imis.mods.nyc/objects"
self.fapi_host = "https://fapi.wrmsr.io"
# Sentry
self.dsn = "https://b4e47d43cb704468a3df7856d7da9fc8@sentry.io/1218283"
# Auto pull from git on bot start
self.loop.create_task(self.git_update())
# Get the current commit hash for Sentry
self.loop.create_task(self.update_release(True))
def init_aliases(self):
self.cursor = self.bot.mysql.cursor
self.replace_mentions = replace_mentions
#Globals
bot = self.bot
bot.funcs = self
bot.pruned_messages = deque(maxlen=85000)
#Utils
bot.escape = bot.mysql.escape
bot.get_images = self.get_images
bot.run_process = self.run_process
bot.get_json = self.get_json
bot.bytes_download = self.bytes_download
bot.command_help = self.command_help
bot.random = self.random
bot.get_text = self.get_text
bot.post_data = self.post_data
bot.get_user = self.get_user
bot.get_member = self.get_member
#Paths
bot.path = Object()
bot.path.discord = self.discord_path
bot.path.files = self.files_path
#converters
setup_converters()
#ignore events
rn = lambda _: None
bot._connection.parse_typing_start = rn
bot._connection.parse_channel_pins_update = rn
bot._connection._cache_members = False
def cog_unload(self):
self._close_session()
def _create_session(self):
self.session = aiohttp.ClientSession(headers=self.default_headers, loop=self.loop)
def _close_session(self):
if self.session:
asyncio.ensure_future(self.session.close())
def discord_path(self, path):
return os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), path)
def files_path(self, path):
return self.discord_path('files/'+path)
@cache.cache(maxsize=1024)
async def is_off(self, guild, member):
sql = f"SELECT user FROM `muted` WHERE guild={guild} AND user={member}"
q = await self.cursor.execute(sql)
result = await q.fetchone()
return result and result['user'] == member
# TTL Cache 5 mins
@cache.cache(maxsize=300, strategy=cache.Strategy.timed)
async def query_blacklist(self, table, *args, single=True):
# Global
if table == 1:
sql = 'SELECT user FROM `global_blacklist` WHERE user=%s'
# Except
elif table == 2:
sql = "SELECT role FROM `except_blacklist` WHERE guild=%s"
# Server
elif table == 3:
sql = "SELECT user FROM `blacklist` WHERE guild=%s AND user=%s"
# Channel
elif table == 4:
sql = "SELECT channel FROM `channel_blacklist` WHERE channel=%s"
q = await self.cursor.execute(sql % args)
if single:
return await q.fetchone()
return await q.fetchall()
async def is_blacklisted(self, message):
guild = message.guild
if guild and guild.me:
perms = message.channel.permissions_for(guild.me)
# no perms why bother
if not perms.send_messages or not perms.read_messages:
return True
author = message.author
if await self.bot.is_owner(author) or (guild and guild.owner_id == author.id):
return False
q = await self.query_blacklist(1, author.id)
if q:
return True
if guild is None:
return False
if await self.is_off(guild.id, author.id):
return True
if isinstance(author, discord.Member):
result = await self.query_blacklist(2, guild.id, single=False)
if result:
roles = [dget(guild.roles, id=x['role']) for x in result]
for i, role in enumerate(roles):
if role is None:
await self.cursor.execute(
f"DELETE FROM `except_blacklist` WHERE guild={guild.id} AND role={result[i]['role']}"
)
return not any(x in roles for x in author.roles)
q = await self.query_blacklist(3, guild.id, author.id)
if q:
return True
q = await self.query_blacklist(4, message.channel.id)
if q:
return 'blacklist' not in message.content
return False
# 0 - global
# 1 - server
# 2 - channel
# 3 - user
# 4 - role
async def command_check(self, message, command):
if await self.bot.is_owner(message.author):
return False
sql = 'SELECT * FROM `command_blacklist` WHERE type=0 AND command=%s'
q = await self.cursor.execute(sql, (command,))
result = await q.fetchone()
if result:
return True
elif message.guild is None:
return False
elif message.guild.owner_id == message.author.id:
return False
topic_match = None
if message.channel.topic:
command_escape = re.escape(command)
topic_regex = re.compile(r"((\[|\{)(\+|\-)?"+command_escape+r"(\]|\}))", re.I|re.S)
match = topic_regex.findall(message.channel.topic.lower())
if match:
if match[0][2] == '+' or not match[0][2]:
topic_match = False
elif match[0][2] == '-':
topic_match = True
if topic_match:
return True
q = await self.cursor.execute(f'SELECT * FROM `command_blacklist` WHERE guild={message.guild.id}')
result = await q.fetchall()
for s in result:
if s['command'] != command:
continue
if s['type'] == 1:
if topic_match is False:
return False
return True
elif s['type'] == 2:
if s['channel'] == message.channel.id:
return True
elif s['type'] == 3:
if s['user'] == message.author.id:
return True
elif s['type'] == 4:
for role in message.author.roles:
if s['role'] == role.id:
return True
return False
async def get_mime(self, url):
async with Timeout(10):
async with self.session.head(url, proxy=self.proxy, allow_redirects=True) as r:
r.raise_for_status()
return r.headers.get('Content-Type', '').lower().split(';')[0]
async def isimage(self, url:str):
try:
mime = await self.get_mime(url)
return mime in self.image_mimes
except:
return False
async def isgif(self, url:str):
try:
mime = await self.get_mime(url)
return mime == "image/gif"
except:
return False
# Use to keep timeout exceptions
async def _bytes_download(self, url:str, timeout:int=10, headers=None, **kwargs):
p = kwargs.pop('proxy', None)
if p:
p = self.proxy
limit = kwargs.pop('limit', None)
async with Timeout(timeout):
async with self.session.get(url, headers=headers, proxy=p, **kwargs) as r:
r.raise_for_status()
if limit:
cl = int(r.headers.get('content-length', 0))
if cl > limit:
raise LimitError(f"Content exceeds size limit (> {cl} B)")
chunk = await r.content.readexactly(cl)
if not chunk:
return False
b = BytesIO(chunk)
else:
b = BytesIO(await r.read())
b.seek(0)
return b
# Return false on all errors, including timeouts..
async def bytes_download(self, *args, **kwargs):
try:
return await self._bytes_download(*args, **kwargs)
except:
return False
async def get_json(self, url:str, timeout:int=5, headers=None, data=None, content_type='application/json'):
method = self.session.post if data else self.session.get
try:
async with Timeout(timeout):
async with method(url, headers=headers, data=data) as r:
r.raise_for_status()
load = await r.json(content_type=content_type)
return load
except:
return {}
async def get_text(self, url:str, **kwargs):
p = kwargs.pop('proxy', None)
if p:
p = self.proxy
dlimit = kwargs.pop("discord_limit", False)
try:
async with Timeout(kwargs.pop('timeout', 5)):
async with self.session.get(url, proxy=p, **kwargs) as r:
if dlimit:
# Max 6 bytes per character (emojis)
# 2000 discord char limit
chunk = await r.content.read(6 * 2000)
if not chunk:
return False
return chunk.decode("utf-8")
return await r.text()
except:
return False
async def get_cookies(self, url:str, **kwargs):
try:
async with Timeout(kwargs.pop('timeout', 5)):
async with self.session.get(url, **kwargs) as r:
return r.cookies
except:
return False
async def post_data(self, url:str, data=None, **kwargs):
t = kwargs.pop('timeout', 5)
headers = kwargs.pop('headers', self.default_headers)
try:
async with Timeout(t):
async with self.session.post(url, headers=headers, data=data) as resp:
r = True
if kwargs.pop('read', False):
r = await resp.read(), resp.status
elif kwargs.pop('json', False):
r = await resp.json()
elif kwargs.pop('text', False):
r = await resp.text(encoding='utf-8')
return (r, resp.headers) if kwargs.get('rheaders') else r
except Exception as e:
if self.bot.dev_mode:
print(e)
raise e
return False
def get_proxy(self):
return self.get_key('proxies', self.proxies)
async def proxy_request(self, method:str, url:str, **kwargs):
json = kwargs.pop('json', False)
text = kwargs.pop('text', False)
b = kwargs.pop('b', False)
rheaders = kwargs.pop('rheaders', False)
#try 2 times incase bad proxy
retries = kwargs.pop('max_retries', 2)
for i in range(retries):
try:
async with Timeout(kwargs.pop('timeout', 10)):
ba = aiohttp.Basic
('user2016958', 'GDJCFP23')
proxy = f"http://{self.get_proxy()}:6060"
async with getattr(self.session, method)(url, proxy=proxy, proxy_auth=ba, **kwargs) as resp:
# assert resp.status == 200
if json:
r = await resp.json(content_type=None)
elif text:
r = await resp.text()
elif b:
r = BytesIO(await resp.read())
r.seek(0)
else:
r = resp
return (r, resp.headers) if rheaders else r
except Exception as e:
if self.bot.dev_mode:
print(e)
raise e
elif i < (retries - 1):
continue
return False
async def generic_api(self, api:str, url:str=None, **kwargs):
_json = kwargs.pop('json', False)
#text kwarg taken
raw = kwargs.pop('raw', False)
if url:
kwargs['url'] = url
data = kwargs.pop('body', None) or kwargs
headers = {
'Authorization': "<KEY>"
}
data = await self.post_data(f"{self.offload_ip}:8765/{api}", data,
headers=headers, read=not _json and not raw,
text=raw, json=_json, timeout=120)
if not data:
return False
elif _json or raw:
return data
code = data[1]
if code == 500 or code == 503:
return data[0].decode('utf-8')
b = BytesIO(data[0])
b.seek(0)
return b
async def f_api(self, api:str, *urls:str, **kwargs):
raw = kwargs.pop('raw', False)
_json = kwargs.pop('json', False)
payload = {'args': {**kwargs}}
if urls:
payload['images'] = list(urls)
headers = {
'Content-Type': 'application/json',
'User-Agent': 'NotSoBot (superior bot)',
'Authorization': "Bearer 4e3a26d97e64093299c3afffa3d54"
}
payload = json.dumps(payload, ensure_ascii=False)
data = await self.post_data(f'{self.fapi_host}/{api}', payload, headers=headers,
read=not raw and not _json, text=raw, json=_json, timeout=60)
assert data, 'API is down or took too long.'
if _json or raw:
return data
code = data[1]
if code != 200:
return data[0].decode()
b = BytesIO(data[0])
b.seek(0)
return b
async def run_process(self, args, response=False, b=False, stdin=None, shell=False):
func = asyncio.create_subprocess_exec
if shell:
func = asyncio.create_subprocess_shell
args = (' '.join(args),)
inp = stdin.read() if isinstance(stdin, BytesIO) else stdin
stdin = stdin or asyncio.subprocess.PIPE
try:
async with Timeout(120):
proc = await func(
*args, stdin=stdin, stderr=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE, loop=self.loop
)
data, _ = await proc.communicate(input=inp)
except asyncio.TimeoutError:
proc.terminate()
# let it cleanup
await asyncio.sleep(5)
if not proc.returncode: # force kill if taking too long
proc.kill()
assert False, 'Processing timeout exceeded.'
if b:
b = BytesIO(data)
return b.read()
elif response:
try:
decoded = data.decode('ascii')
except:
decoded = data.decode('utf-8')
return decoded.rstrip()
return True
async def truncate(self, channel, content, *args, embeds=None, **kwargs):
if not embeds and not content:
return
split = [[content[i:i + 1999]] for i in range(0, len(content), 1999)]
if embeds:
l = len(split)
for i, e in enumerate(embeds):
if i <= l - 1:
split[i].append(e)
else:
split.append((None, e))
files = kwargs.pop('files', None)
for i, c in enumerate(split):
if files:
if i > 0:
files = None
else:
kwargs['files'] = files
await channel.send(c[0], *args, embed=c[1] if len(c) > 1 else None, **kwargs)
# accept Attachment or Embed
def image_check(self, i, ret=False, check=0):
if isinstance(i, discord.embeds.Embed):
i = i.image or i.thumbnail
if i:
if (check is not None and not i.width) or (i.width and (i.width > 5000 or i.height > 5000)):
return False
url = i.url
host = URL(url).host
# yarl has a parser bug with discord proxy cache args
if not re.match(r"^(.*\.)?discordapp\.(net|com)", host, re.I):
url = i.proxy_url
if check is not None:
check = self.extension_checks[check]
if not check(url.lower()):
return False
return url if ret else True
return False
# check 0 = image, 1 = gif
async def get_attachment_image(self, ctx, check=0):
async for m in ctx.channel.history(before=ctx.message, limit=25):
for i in chain(m.attachments, m.embeds):
la = self.image_check(i, True, check)
if la:
return la
async def get_images(self, ctx, **kwargs):
message = ctx.message
mentions = [dget(message.mentions, id=int(x)) for x in \
self.mention_regex.findall(message.content)]
limit = kwargs.pop('limit', 8)
urls = kwargs.pop('urls', [])
gif = kwargs.pop('gif', False)
msg = kwargs.pop('msg', True)
img_urls = []
if gif:
check_func = self.isgif
else:
check_func = self.isimage
if urls is None:
urls = []
elif isinstance(urls, str):
urls = [urls]
else:
urls = list(urls)
scale = kwargs.pop('scale', None)
scale_msg = None
int_scale = None
if scale:
float_scale = kwargs.pop('float', False)
neg_scale = kwargs.pop('negative', False)
scale_limit = scale
limit += 1
if urls and len(urls) > limit:
await ctx.send('\N{NO ENTRY} `Max image limit (<= {0})`'.format(limit))
ctx.command.reset_cooldown(ctx)
return False
for user in mentions:
if user:
if not gif:
img_urls.append(str(user.avatar_url_as(format='png')))
elif user.is_avatar_animated():
img_urls.append(str(user.avatar_url))
icheck = int(gif)
for i in message.attachments:
if self.image_check(i, False, icheck):
img_urls.append(i.proxy_url)
a = False
if gif:
b = False
for count, url in enumerate(urls, 1):
user = None
if url.startswith('<@'):
continue
ematch = self.emote_regex.match(url)
if not ematch and url.startswith('<http'):
url = url.strip('<').strip('>')
if not url.startswith('http'):
url = f'http://{url}'
try:
if scale:
surl = url[8:] if url.startswith('https://') else url[7:]
fl = float(surl)
if kwargs.pop('make_negative', False):
fl = fl * -1
f = math.floor(fl)
if str(abs(f) if neg_scale else f).isdigit():
int_scale = fl if float_scale else f
assert len(surl) < 15
scale_msg = '`Scale: {0}`\n'.format(abs(int_scale) if neg_scale else int_scale)
if abs(int_scale) > abs(scale_limit) and not await self.bot.is_owner(ctx.author):
int_scale = scale_limit
scale_msg = '`Scale: {0} (Limit: <= {1})`\n'.format(abs(int_scale) if neg_scale else int_scale, scale_limit)
continue
except:
pass
check = await check_func(url)
if not check and not gif:
check = await self.isgif(url)
if check:
if msg:
await ctx.send("\N{WARNING SIGN} This command is for images, not gifs (use `gmagik` or `gascii`)!")
ctx.command.reset_cooldown(ctx)
return False
elif not img_urls or a:
name = url[8:] if url.startswith('https://') else url[7:]
e = await self.emoji_path(name, point=True)
if e:
img_urls.append(f'http://bot.mods.nyc/twemoji/{e}.png')
a = True
continue
else:
if ematch and not ematch.group(1):
img_urls.append(f'https://cdn.discordapp.com/emojis/{ematch.group(2)}.png')
a = True
continue
else:
member = await self.find_member(message, name, 2)
if member:
img_urls.append(str(member.avatar_url_as(format='png')))
a = True
continue
if msg:
await ctx.send('\N{WARNING SIGN} Unable to download or verify URL is valid.')
ctx.command.reset_cooldown(ctx)
return False
else:
if msg:
await ctx.send('\N{WARNING SIGN} Image `{0}` is Invalid!'.format(count))
continue
elif gif and not check:
check = await self.isimage(url)
if check:
if msg:
await ctx.send("\N{WARNING SIGN} This command is for gifs, not images (use `magik`)!")
ctx.command.reset_cooldown(ctx)
return False
elif not img_urls or b:
if ematch and ematch.group(1):
img_urls.append(f'https://cdn.discordapp.com/emojis/{ematch.group(2)}.gif')
continue
else:
name = url[8:] if url.startswith('https://') else url[7:]
member = await self.find_member(message, name, 2)
if member and member.avatar and member.is_avatar_animated():
img_urls.append(str(member.avatar_url))
b = True
continue
if msg:
await ctx.send('\N{WARNING SIGN} Unable to download or verify URL is valid.')
ctx.command.reset_cooldown(ctx)
return False
else:
if msg:
await ctx.send('\N{WARNING SIGN} Gif `{0}` is Invalid!'.format(count))
continue
img_urls.append(url)
if not img_urls:
attachment_images = await self.get_attachment_image(ctx, icheck)
if attachment_images:
img_urls.append(attachment_images)
else:
if msg:
await ctx.send("\N{NO ENTRY} Please input url(s){0}or attachment(s).".format(', mention(s) ' if not gif else ' '))
ctx.command.reset_cooldown(ctx)
return False
img_urls = list(set(img_urls))
if scale:
return img_urls, int_scale, scale_msg
return img_urls or False
def get_key(self, name, keys):
if name not in self.keys:
self.keys[name] = 0
count = self.keys[name]
try:
if count == len(keys):
self.keys[name] = 0
count = 0
return keys[count]
finally:
self.keys[name] += 1
async def cleanup_code(self, content, hastebin=False):
"""Automatically removes code blocks from the code."""
if content.startswith('```') and content.endswith('```'):
clean = '\n'.join(content.split('\n')[1:-1])
else:
clean = content.strip('` \n')
if clean.startswith('http'):
clean = await self.get_text(clean)
if hastebin:
if ' -haste' in clean:
clean = clean.replace(' -haste', '')
haste = True
else:
haste = False
return clean if not hastebin else (clean, haste)
@staticmethod
def get_syntax_error(e):
return '```py\n{0.text}{1:>{0.offset}}\n{2}: {0}```'.format(e, '^', type(e).__name__)
async def command_help(self, ctx):
cmd = ctx.invoked_subcommand or ctx.command
hf = self.bot.old_help_command
await hf.prepare_help_command(ctx, cmd)
hf.context = ctx
try:
hf.add_command_formatting(cmd)
hf.paginator.close_page()
for page in hf.paginator.pages:
await ctx.send(page.replace("\n", "fix\n", 1))
finally:
hf.context = None
@staticmethod
async def get_last_25(message):
members = set()
count = 0
messages = message._state._messages
for m in reversed(messages):
if count >= 25:
break
elif m.channel == message.channel:
members.add(m.author)
count += 1
if count < 20:
try:
async for m in message.channel.history(before=message, limit=25):
if len(members) >= 25:
break
members.add(m.author)
for member in m.mentions:
members.add(member)
messages.append(m)
except:
pass
# cache members
# guild = message.guild
# for member in members:
# if guild.get_member(member.id) is None:
# guild._add_member(member)
return members
async def find_member(self, message, name, steps=2):
guild = message.guild
state = message._state
gc = guild is None
if gc:
me = message.channel.me
member = None
if not isinstance(name, str):
name = str(name)
mention = self.mention_regex.match(name)
if mention:
uid = int(mention.group(1))
if gc:
member = dget([message.channel.recipient, me], id=uid)
else:
member = dget(message.mentions, id=uid)
return member or await get_user_info(state, uid)
try:
if member is None and not self.bot.self_bot and name.isdigit() and self.is_id(name):
uid = int(name)
if not gc:
try:
member = await guild.fetch_member(uid)
except discord.NotFound:
pass
if member is None:
member = await get_user_info(state, uid)
except ValueError:
pass
if member is None:
name = name.lower()
if not gc:
# give it a head start
async def query_members():
try:
return await guild.query_members(
name, limit=5, cache=True
)
# TimeoutError not instance of asyncio for Tasks...
except: # handle timeouts if we dont reach the await below
pass
query = self.loop.create_task(query_members())
last_25 = None
checks = [lambda m: m.name.lower() == name or m.display_name.lower() == name or str(m).lower() == name,
lambda m: m.name.lower().startswith(name) or m.display_name.lower().startswith(name) or m.id == name,
lambda m: name in m.display_name.lower() or name in m.name.lower()]
for i in range(steps if steps <= len(checks) else len(checks)):
check = checks[i]
if i == 2 or (gc and i == 1): # last step or DM & 2nd
member = discord.utils.find(checks[1], self.bot.get_all_members())
elif not gc: # not DM/Group Chat
if last_25 is None:
last_25 = await self.get_last_25(message)
member = discord.utils.find(check, last_25)
if member is None:
# check cache first for previous query
member = discord.utils.find(check, guild.members)
if member is None:
# query is basically discords version of startswith
if not gc and i >= 1:
# lets fetch members & populate member cache with any hits
await query
member = discord.utils.find(check, guild.members)
else:
member = discord.utils.find(check, [message.author, me])
if member is not None:
break
return member
def random(self, image=False, ext:str=None):
h = str(uuid.uuid4().hex)
if image:
return f"{h}.{ext or '.png'}"
return h
def get_color(self):
if self.color_count >= len(self.colors):
self.color_count = 0
color = self.colors[self.color_count]
self.color_count += 1
return getattr(discord.Color, color)
# kwargs: path=None, fmt=None, point=False, verify=False
async def emoji_path(self, e, **kwargs):
load = await self.generic_api(
"is_emoji", json=True,
e=e, **kwargs
)
return load['result'] if load else load
async def png_svg(self, paths, size):
load = await self.generic_api("svg_emoji",
paths=paths, size=size,
json=True
)
if load:
return [BytesIO(b64decode(x)) for x in load['images']]
return []
#http://stackoverflow.com/a/34084933
def get_deep_text(self, element):
try:
text = element.text or ''
for subelement in element:
text += self.get_deep_text(subelement)
text += element.tail or ''
return text
except:
return ''
def format_code(self, txt:str, lang:str='py', truncate=True):
if lang is None:
lang = ''
if not isinstance(txt, str):
txt = str(txt)
if truncate and len(txt) >= 2000:
txt = txt[9:1996]
return self.code_block.format(lang, txt)
async def repl(self, code):
variables = {
'bot': self.bot,
'last': None,
'commands': commands,
'discord': discord,
'asyncio': asyncio,
'cursor': self.cursor
}
cleaned = await self.cleanup_code(code)
executor = exec
if cleaned.count('\n') == 0:
try:
code = compile(cleaned, '<repl session>', 'eval')
except SyntaxError:
pass
else:
executor = eval
if executor is exec:
try:
code = compile(cleaned, '<repl session>', 'exec')
except SyntaxError as e:
return self.get_syntax_error(e)
fmt = None
stdout = StringIO()
try:
with redirect_stdout(stdout):
result = executor(code, variables)
if inspect.isawaitable(result):
result = await result
except Exception as e:
value = stdout.getvalue()
fmt = f'{value}{traceback.format_exc()}'
else:
value = stdout.getvalue()
if result is not None:
fmt = f'{value}{result}'
variables['last'] = result
elif value:
fmt = value
return fmt
#DISABLE CODE REVIEWING AT THIS POINT
def get_lang(self, lang):
m = self.rex_map
key = None
if lang.isdigit() and lang in m.keys():
key = lang
else:
langs = list(chain(*m.values()))
if lang in langs:
for idx in m:
v = m[idx]
if (isinstance(v, list) and lang in v) or lang == v:
key = idx
break
if key:
cmap = self.rex_compiler_map
return (key, cmap[key]) if key in cmap.keys() else key
return False
async def code_api(self, lang:str, code:str, fmt:bool=True):
lang = self.get_lang(lang)
if not lang:
return False, '\N{NO ENTRY} `Invalid Language.`'
code = await self.cleanup_code(code)
payload = aiohttp.FormData()
if isinstance(lang, tuple):
payload.add_field('CompilerArgs', lang[1])
lang = lang[0]
payload.add_field('LanguageChoice', str(lang))
# payload.add_field('EditorChoiceWrapper', 1)
payload.add_field('Program', code)
# payload.add_field('IsInEditMode', False)
# payload.add_field('IsLive', False)
# payload.add_field('ShowWarnings', False)
r = await self.proxy_request('post',
'https://rextester.com/rundotnet/api',
data=payload, json=True,
)
if r:
if not r['Result'] and not r['Errors']:
return False, 'Empty Response'
output = r['Result'] or r['Errors'] or r['Warnings']
if output:
#Rex/print functions add new line for results at end
if output.endswith('\n'):
output = output[:-1]
if fmt:
output = self.format_code(output)
output = re.sub("\u202E", "", output) # get rid of known baddies from result.
return output, r['Stats']
else:
return False, f'\N{WARNING SIGN} `{self.rex_map[lang][0]}` execution failed.'
#RE-ENABLE CODE REVIEWING AT THIS POINT
def save_image(self, img, ext:str='png'):
b = BytesIO()
try:
img.save(b, ext)
except:
return False
b.seek(0)
return b
async def merge_images(self, imgs, method=1, vertical=False):
ep = "merge"
if method > 1:
ep += str(method)
return await self.generic_api(
ep, urls=json.dumps(imgs),
vertical=int(vertical)
)
# return await self.loop.run_in_executor(
# None, getattr(self, f'do_merge_images{method}'),
# imgs, vertical
# )
async def check_cooldown(self, bucket, ctx, msg, check=False):
bucket = self.buckets[bucket].get_bucket(ctx)
r = bucket.is_rate_limited()
check = r if check else not r
if check:
await ctx.send(msg.format(r))
@staticmethod
def check_perms(obj, **perms):
return all(getattr(obj, x, None) == perms[x] for x in perms)
# Imitate discord clients algo
def get_default_channel(self, user, guild, **kwargs):
channels = [x for x in guild.channels if isinstance(x, discord.TextChannel) \
and self.check_perms(x.permissions_for(user),
read_messages=True, **kwargs)]
if channels:
return sorted(channels, key=lambda x: x.position)[0]
def get_member(self, uid:int):
return dget(self.bot.get_all_members(), id=uid)
def get_user(self, uid:int):
return dget(self.bot.users, id=uid)
def format_time(self, obj):
return obj.strftime('%m/%d/%Y %H:%M:%S')
async def git_update(self):
print(await self.run_process(['git', '-C', '/discord/', 'pull'], True))
await self.update_release()
async def hastebin(self, content:str, host='hb.wrmsr.io'):
load = await self.post_data(f'https://{host}/documents', content, json=True, timeout=15)
if load and 'key' in load:
return f"https://{host}/{load['key']}"
@staticmethod
def get_role_color(user):
default = discord.Color.default()
d = discord.Color(0xffffff)
if not hasattr(user, 'roles'):
return d
color_roles = [x for x in user.roles if x.color != default]
if not color_roles:
return d
for role in sorted(user.roles, reverse=True, key=lambda x: x.position):
if role.color != default:
return role.color
# Check if something is similar to a snowflake (for 2018 and a while after that)
@staticmethod
def is_id(n:str):
return n.isdigit() and 15 <= len(n) <= 22
async def store_cache(self, key, data, expiration=None):
api = f"{self.imis_host}/{key}"
headers = {'Authorization': 'apple_juice'}
if expiration:
headers['X-Delete-After'] = str(expiration)
assert await self.post_data(api, data, headers=headers), \
"Piping failed, cache miss."
return api
async def update_release(self, first_run=False):
ch = await self.run_process([
'git', '-C', '/discord/', 'rev-parse', 'HEAD'
], True)
# It's too early, THE CORE isn't loaded yet.
if not first_run:
sentry_sdk.init(dsn=self.dsn, release=ch)
self.release = ch
# Commit hash changes as we run update commands.
@property
def release(self):
return self._release
@release.setter
def release(self, x):
self._release = x
def setup(bot):
bot.add_cog(Funcs(bot))
|
<reponame>ankitkariryaa/MultiPlanarUNet<filename>mpunet/callbacks/callbacks.py
import tensorflow as tf
import psutil
import numpy as np
import os
import matplotlib.pyplot as plt
from tensorflow.keras.callbacks import Callback
from datetime import datetime
from mpunet.logging import ScreenLogger
from mpunet.utils.plotting import (imshow_with_label_overlay, imshow,
plot_all_training_curves)
class DividerLine(Callback):
"""
Simply prints a line to screen after each epoch
"""
def __init__(self, logger=None):
"""
Args:
logger: An instance of a MultiPlanar Logger that prints to screen
and/or file
"""
super().__init__()
self.logger = logger or ScreenLogger()
def on_epoch_end(self, epoch, logs=None):
self.logger("-"*45 + "\n")
class LearningCurve(Callback):
"""
On epoch end this callback looks for all csv files matching the 'csv_regex'
regex within the dir 'out_dir' and attempts to create a learning curve for
each file that will be saved to 'out_dir'.
Note: Failure to plot a learning curve based on a given csv file will
is handled in the plot_all_training_curves function and will not
cause the LearningCurve callback to raise an exception.
"""
def __init__(self, log_dir="logs", out_dir="logs", fname="curve.png",
csv_regex="*training.csv", logger=None):
"""
Args:
log_dir: Relative path from the
out_dir:
fname:
csv_regex:
logger:
"""
super().__init__()
out_dir = os.path.abspath(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
self.csv_regex = os.path.join(os.path.abspath(log_dir), csv_regex)
self.save_path = os.path.join(out_dir, fname)
self.logger = logger or ScreenLogger()
def on_epoch_end(self, epoch, logs={}):
plot_all_training_curves(self.csv_regex,
self.save_path,
logy=True,
raise_error=False,
logger=self.logger)
class MemoryConsumption(Callback):
def __init__(self, max_gib=None, round_=2, logger=None):
self.max_gib = max_gib
self.logger = logger
self.round_ = round_
def on_epoch_end(self, epoch, logs={}):
process = psutil.Process(os.getpid())
mem_bytes = process.memory_info().rss
mem_gib = round(mem_bytes / (1024**3), self.round_)
logs['memory_usage_gib'] = mem_gib
if self.max_gib and mem_gib >= self.max_gib:
self.warn("Stopping training from callback 'MemoryConsumption'! "
"Total memory consumption of {} GiB exceeds limitation"
" (self.max_gib = {}) ".format(mem_gib, self.max_gib))
self.model.stop_training = True
class DelayedCallback(object):
"""
Callback wrapper that delays the functionality of another callback by N
number of epochs.
"""
def __init__(self, callback, start_from=0, logger=None):
"""
Args:
callback: A tf.keras callback
start_from: Delay the activity of 'callback' until this epoch
'start_from'
logger: An instance of a MultiPlanar Logger that prints to screen
and/or file
"""
self.logger = logger or ScreenLogger()
self.callback = callback
self.start_from = start_from
def __getattr__(self, item):
return getattr(self.callback, item)
def on_epoch_end(self, epoch, logs=None):
if epoch >= self.start_from-1:
self.callback.on_epoch_end(epoch, logs=logs)
else:
self.logger("[%s] Not active at epoch %i - will be at %i" %
(self.callback.__class__.__name__,
epoch+1, self.start_from))
class TrainTimer(Callback):
"""
Appends train timing information to the log.
If called prior to tf.keras.callbacks.CSVLogger this information will
be written to disk.
"""
def __init__(self, logger=None, max_minutes=None, verbose=1):
super().__init__()
self.logger = logger or ScreenLogger()
self.max_minutes = int(max_minutes) if max_minutes else None
self.verbose = bool(verbose)
# Timing attributes
self.train_begin_time = None
self.prev_epoch_time = None
@staticmethod
def parse_dtime(tdelta, fmt):
# https://stackoverflow.com/questions/8906926/
# formatting-python-timedelta-objects/17847006
d = {"days": tdelta.days}
d["hours"], rem = divmod(tdelta.seconds, 3600)
d["minutes"], d["seconds"] = divmod(rem, 60)
return fmt.format(**d)
def on_train_begin(self, logs=None):
self.train_begin_time = datetime.now()
def on_epoch_begin(self, epoch, logs=None):
self.prev_epoch_time = datetime.now()
def on_epoch_end(self, epoch, logs=None):
# Compute epoch execution time
end_time = datetime.now()
epoch_time = end_time - self.prev_epoch_time
train_time = end_time - self.train_begin_time
# Update attributes
self.prev_epoch_time = end_time
# Add to logs
logs["train_time_epoch"] = self.parse_dtime(epoch_time,
"{days:02}d:{hours:02}h:"
"{minutes:02}m:{seconds:02}s")
logs["train_time_total"] = self.parse_dtime(train_time,
"{days:02}d:{hours:02}h:"
"{minutes:02}m:{seconds:02}s")
if self.verbose:
self.logger("[TrainTimer] Epoch time: %.1f minutes "
"- Total train time: %s"
% (epoch_time.total_seconds()/60,
logs["train_time_total"]))
if self.max_minutes and train_time.total_seconds()/60 > self.max_minutes:
self.logger("Stopping training. Training ran for {} minutes, "
"max_minutes of {} was specified on the TrainTimer "
"callback.".format(train_time.total_seconds()/60,
self.max_minutes))
self.model.stop_training = True
class FGBatchBalancer(Callback):
"""
mpunet callback.
Sets the forced FG fraction in a batch at each epoch to 1-recall over the
validation data at the previous epoch
"""
def __init__(self, train_data, val_data=None, logger=None):
"""
Args:
train_data: A mpunet.sequence object representing the
training data
val_data: A mpunet.sequence object representing the
validation data
logger: An instance of a MultiPlanar Logger that prints to screen
and/or file
"""
super().__init__()
self.data = (("train", train_data), ("val", val_data))
self.logger = logger or ScreenLogger()
self.active = True
def on_epoch_end(self, epoch, logs=None):
if not self.active:
return None
recall = logs.get("val_recall")
if recall is None:
self.logger("[FGBatchBalancer] No val_recall in logs. "
"Disabling callback. "
"Did you put this callback before the validation "
"callback?")
self.active = False
else:
# Always at least 1 image slice
fraction = max(0.01, 1 - recall)
for name, data in self.data:
if data is not None:
data.fg_batch_fraction = fraction
self.logger("[FGBatchBalancer] Setting FG fraction for %s "
"to: %.4f - Now %s/%s" % (name,
fraction,
data.n_fg_slices,
data.batch_size))
class MeanReduceLogArrays(Callback):
"""
On epoch end, goes through the log and replaces any array entries with
their mean value.
"""
def __init__(self):
super().__init__()
def on_epoch_end(self, epoch, logs={}):
for key, value in logs.items():
if isinstance(value, (np.ndarray, list)):
logs[key] = np.mean(value)
class PrintLayerWeights(Callback):
"""
Print the weights of a specified layer every some epoch or batch.
"""
def __init__(self, layer, every=10, first=10, per_epoch=False, logger=None):
"""
Args:
layer: A tf.keras layer
every: Print the weights every 'every' batch or epoch if
per_epoch=True
first: Print the first 'first' elements of each weight matrix
per_epoch: Print after 'every' epoch instead of batch
logger: An instance of a MultiPlanar Logger that prints to screen
and/or file
"""
super().__init__()
if isinstance(layer, int):
self.layer = self.model.layers[layer]
else:
self.layer = layer
self.first = first
self.every = every
self.logger = logger or ScreenLogger()
self.per_epoch = per_epoch
if per_epoch:
# Apply on every epoch instead of per batches
self.on_epoch_begin = self.on_batch_begin
self.on_batch_begin = lambda *args, **kwargs: None
self.log()
def log(self):
self.logger("PrintLayerWeights Callback")
self.logger("Layer: ", self.layer)
self.logger("Every: ", self.every)
self.logger("First: ", self.first)
self.logger("Per epoch: ", self.per_epoch)
def on_batch_begin(self, batch, logs=None):
if batch % self.every:
return
weights = self.layer.get_weights()
self.logger("Weights for layer '%s'" % self.layer)
self.logger("Weights:\n%s" % weights[0].ravel()[:self.first])
try:
self.logger("Baises:\n%s" % weights[1].ravel()[:self.first])
except IndexError:
pass
class SaveOutputAs2DImage(Callback):
"""
Save random 2D slices from the output of a given layer during training.
"""
def __init__(self, layer, sequence, model, out_dir, every=10, logger=None):
"""
Args:
layer: A tf.keras layer
sequence: A MultiPlanar.sequence object from which batches are
sampled and pushed through the graph to output of layer
model: A tf.keras model object
out_dir: Path to directory (existing or non-existing) in which
images will be stored
every: Perform this operation every 'every' batches
"""
super().__init__()
self.every = every
self.seq = sequence
self.layer = layer
self.epoch = None
self.model = model
self.logger = logger or ScreenLogger()
self.out_dir = out_dir
if not os.path.exists(out_dir):
os.makedirs(self.out_dir)
self.log()
def log(self):
self.logger("Save Output as 2D Image Callback")
self.logger("Layer: ", self.layer)
self.logger("Every: ", self.every)
def on_epoch_begin(self, epoch, logs=None):
self.epoch = epoch
def on_batch_end(self, batch, logs=None):
if batch % self.every:
return
# Get output of layer
self.model.predict_on_batch()
sess = tf.keras.backend.get_session()
X, _, _ = self.seq[0]
outs = sess.run([self.layer.output], feed_dict={self.model.input: X})[0]
if isinstance(outs, list):
outs = outs[0]
for i, (model_in, layer_out) in enumerate(zip(X, outs)):
fig = plt.figure(figsize=(12, 6))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# Plot model input and layer outputs on each ax
chl1, axis, slice = imshow(ax1, model_in)
chl2, _, _ = imshow(ax2, layer_out, axis=axis, slice=slice)
# Set labels and save figure
ax1.set_title("Model input - Channel %i - Axis %i - Slice %i"
% (chl1, axis,slice), size=22)
ax2.set_title("Layer output - Channel %i - Axis %i - Slice %i"
% (chl2, axis, slice), size=22)
fig.tight_layout()
fig.savefig(os.path.join(self.out_dir, "epoch_%i_batch_%i_im_%i" %
(self.epoch, batch, i)))
plt.close(fig)
class SavePredictionImages(Callback):
"""
Save images after each epoch of training of the model on a batch of
training and a batch of validation data sampled from sequence objects.
Saves the input image with ground truth overlay as well as the predicted
label masks.
"""
def __init__(self, train_data, val_data, outdir='images'):
"""
Args:
train_data: A mpunet.sequence object from which training
data can be sampled via the __getitem__ method.
val_data: A mpunet.sequence object from which validation
data can be sampled via the __getitem__ method.
outdir: Path to directory (existing or non-existing) in which
images will be stored.
"""
super().__init__()
self.train_data = train_data
self.val_data = val_data
self.save_path = os.path.abspath(os.path.join(outdir, "pred_images_at_epoch"))
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
def pred_and_save(self, data, subdir):
# Get a random batch
X, y, _ = data[np.random.randint(len(data))]
# Predict on the batch
pred = self.model.predict(X)
subdir = os.path.join(self.save_path, subdir)
if not os.path.exists(subdir):
os.mkdir(subdir)
# Plot each sample in the batch
for i, (im, lab, p) in enumerate(zip(X, y, pred)):
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(12, 6))
lab = lab.reshape(im.shape[:-1] + (lab.shape[-1],))
p = p.reshape(im.shape[:-1] + (p.shape[-1],))
# Imshow ground truth on ax2
# This function will determine which channel, axis and slice to
# show and return so that we can use them for the other 2 axes
chnl, axis, slice = imshow_with_label_overlay(ax2, im, lab, lab_alpha=1.0)
# Imshow pred on ax3
imshow_with_label_overlay(ax3, im, p, lab_alpha=1.0,
channel=chnl, axis=axis, slice=slice)
# Imshow raw image on ax1
# Chose the same slice, channel and axis as above
im = im[..., chnl]
im = np.moveaxis(im, axis, 0)
if slice is not None:
# Only for 3D imges
im = im[slice]
ax1.imshow(im, cmap="gray")
# Set labels
ax1.set_title("Image", size=18)
ax2.set_title("True labels", size=18)
ax3.set_title("Prediction", size=18)
fig.tight_layout()
with np.testing.suppress_warnings() as sup:
sup.filter(UserWarning)
fig.savefig(os.path.join(subdir, str(i) + ".png"))
plt.close(fig.number)
def on_epoch_end(self, epoch, logs={}):
self.pred_and_save(self.train_data, "train_%s" % epoch)
if self.val_data is not None:
self.pred_and_save(self.val_data, "val_%s" % epoch)
|
<reponame>arve0/leicacam<filename>test/test_cam.py
"""Tests for cam module."""
import socket
from collections import OrderedDict
from unittest.mock import MagicMock, patch
import pytest
from leicacam.cam import CAM, bytes_as_dict, tuples_as_bytes, tuples_as_dict
# pylint: disable=redefined-outer-name, unnecessary-pass,
def flush():
"""Flush the socket."""
pass
@pytest.fixture
def mock_socket():
"""Return a mock echo socket."""
echo_socket = EchoSocket()
echo_socket.close = MagicMock()
return echo_socket
@pytest.fixture
def cam(mock_socket):
"""Yield a CAM instance with a mock socket."""
with patch("socket.socket") as mock_socket_class:
mock_socket_class.return_value = mock_socket
mock_cam = CAM()
mock_cam.flush = flush
yield mock_cam
class EchoSocket:
"""Dummy echo socket for mocking."""
msg = ""
def send(self, msg):
"""Send a message."""
self.msg = msg
return len(msg)
def recv(self, buffer_size):
"""Receive a message."""
return self.msg[0:buffer_size]
def connect(self, where):
"""Connect to the socket."""
pass
def settimeout(self, timeout):
"""Set a timeout."""
pass
def close(self):
"""Close the socket."""
pass
# TEST
# key (here cli) overridden if defined several times
# prefix added
# types (integer, float) should be converted to strings
def test_echo(cam):
"""Prefix + command sent should be same as echoed socket message."""
cmd = [
("cli", "custom"),
("cmd", "enableall"),
("value", "true"),
("integer", 1234),
("float", 0.00234),
]
cam.send(cmd)
response = cam.receive()[0]
sent = tuples_as_dict(cam.prefix + cmd)
assert sent == response
def test_send_bytes(cam):
"""Test send a bytes string."""
cmd = b"/cmd:enableall /value:true"
cam.send(cmd)
response = cam.receive()[0]
sent = bytes_as_dict(cam.prefix_bytes + cmd)
assert sent == response
def test_flush():
"""Test flush method."""
cmd = b"/cmd:startscan\n"
mock_recv = MagicMock()
mock_recv.side_effect = [cmd, socket.error()]
with patch("socket.socket") as mock_socket_class:
mock_socket = MagicMock()
mock_socket_class.return_value = mock_socket
cam = CAM()
cam.socket.recv = mock_recv
cam.flush()
assert len(mock_recv.mock_calls) == 2
_, args, _ = mock_recv.mock_calls[0]
assert args == (1024,)
def test_receive_error(cam):
"""Test receive method when a socket error happens."""
cam.socket.recv = MagicMock()
cam.socket.recv.side_effect = socket.error()
response = cam.receive()
assert isinstance(response, list)
assert not response
def test_commands(cam):
"""Short hand commands should work as intended."""
# get_information
cmd = cam.prefix + [("cmd", "getinfo"), ("dev", "stage")]
information = cam.get_information()
should_be = tuples_as_dict(cmd)
assert information == should_be
# start_scan
cmd = cam.prefix + [("cmd", "startscan")]
response = cam.start_scan()
should_be = tuples_as_dict(cmd)
assert response == should_be
# stop_scan
cmd = cam.prefix + [("cmd", "stopscan")]
response = cam.stop_scan()
should_be = tuples_as_dict(cmd)
assert response == should_be
# autofocus_scan
cmd = cam.prefix + [("cmd", "autofocusscan")]
response = cam.autofocus_scan()
should_be = tuples_as_dict(cmd)
assert response == should_be
# pause_scan
cmd = cam.prefix + [("cmd", "pausescan")]
response = cam.pause_scan()
should_be = tuples_as_dict(cmd)
assert response == should_be
# enable
cmd = [
("cmd", "enable"),
("slide", str(0)),
("wellx", str(1)),
("welly", str(1)),
("fieldx", str(1)),
("fieldy", str(1)),
("value", "true"),
]
cmd = cam.prefix + cmd
response = cam.enable()
should_be = tuples_as_dict(cmd)
assert response == should_be
# disable
cmd = [
("cmd", "enable"),
("slide", str(0)),
("wellx", str(1)),
("welly", str(1)),
("fieldx", str(1)),
("fieldy", str(1)),
("value", "false"),
]
cmd = cam.prefix + cmd
response = cam.disable()
should_be = tuples_as_dict(cmd)
assert response == should_be
# enable_all
cmd = [("cmd", "enableall"), ("value", "true")]
cmd = cam.prefix + cmd
response = cam.enable_all()
should_be = tuples_as_dict(cmd)
assert response == should_be
# disable_all
cmd = [("cmd", "enableall"), ("value", "false")]
cmd = cam.prefix + cmd
response = cam.disable_all()
should_be = tuples_as_dict(cmd)
assert response == should_be
# save_template
cmd = [
("sys", "0"),
("cmd", "save"),
("fil", "{ScanningTemplate}leicacam.xml"),
]
cmd = cam.prefix + cmd
response = cam.save_template()
should_be = tuples_as_dict(cmd)
assert response == should_be
def test_load(cam):
"""load_template should strip path and .xml from filename."""
response = cam.load_template("test")
assert response["fil"] == "{ScanningTemplate}test"
response = cam.load_template("test.xml")
assert response["fil"] == "{ScanningTemplate}test"
response = cam.load_template("/path/to/{ScanningTemplate}test.xml")
assert response["fil"] == "{ScanningTemplate}test"
def test_wait_for_timeout(cam):
"""Test wait_for when timeout expires."""
cmd = "cmd"
value = "stopscan"
response = cam.wait_for(cmd, value, 0)
assert response == OrderedDict()
def test_wait_for_long_timeout(cam, mock_socket):
"""Test wait_for when timeout expires."""
cmd = "cmd"
value = "stopscan"
timeout = 1
mock_socket.recv = MagicMock()
mock_socket.recv.return_value = b""
time_patch = patch("leicacam.cam.time", side_effect=[0, 0, 120])
sleep_patch = patch("leicacam.cam.sleep")
with sleep_patch, time_patch:
response = cam.wait_for(cmd, value, timeout)
assert response == OrderedDict()
def test_wait_for_any_value(cam):
"""Test wait_for a command and any value."""
cmd = [("cmd", "startscan")]
cam.send(cmd)
response = cam.wait_for("cmd", None)
cmd = cam.prefix + cmd
should_be = tuples_as_dict(cmd)
assert response == should_be
def test_close(cam, mock_socket):
"""Test closing the socket."""
cam.close()
assert mock_socket.close.call_count == 1
def test_receive_colon_string(cam):
"""Test bytes_as_dict function receiving a string with colon."""
cmd = [("relpath", "C:\\image.ome.tif")]
cam.socket.recv = MagicMock()
cam.socket.recv.return_value = tuples_as_bytes(cmd)
response = cam.receive()
assert isinstance(response, list)
for msg in response:
assert msg == OrderedDict(cmd)
def test_receive_bad_string(cam):
"""Test bytes_as_dict function receiving an incomplete command."""
cmd = [("cmd", "enableall")]
cmd_string = "/cmd:enableall /value"
cam.socket.recv = MagicMock()
cam.socket.recv.return_value = cmd_string.encode()
response = cam.receive()
assert isinstance(response, list)
for msg in response:
assert msg == OrderedDict(cmd)
def test_receive_terminate_null_byte(cam):
"""Test _parse_receive function parsing a message with null byte."""
start_cmd = [("cmd", "startscan")]
stop_cmd = [("cmd", "stopscan")]
all_cmds = [OrderedDict(start_cmd), OrderedDict(stop_cmd)]
cmd_byte = b"/cmd:startscan\x00/cmd:stopscan\r\n"
cam.socket.recv = MagicMock()
cam.socket.recv.return_value = cmd_byte
response = cam.receive()
assert isinstance(response, list)
assert response == all_cmds
|
import test_util.proxy
import test_util.runner
import http.client
import http.server
import threading
import test_util.thread_safe_counter
import random
import time
if __name__ == "__main__":
request_counter = test_util.thread_safe_counter.Counter()
# This is HTTP 1.0 server that doesn't support persisent connections
class Server(http.server.BaseHTTPRequestHandler):
disable_nagle_algorithm = True
def do_HEAD(self):
request_counter.increment()
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", 5)
self.end_headers()
def write_chunked(self, what, padded):
remaining = what
while len(remaining) > 0:
chunk_length = random.randint(1, len(remaining))
self.write_chunk(remaining[:chunk_length], padded)
remaining = remaining[chunk_length:]
self.write_chunk(remaining, padded)
def write_chunk(self, chunk, padded):
if padded and random.randint(0, 1) == 0:
self.wfile.write(b"0")
self.wfile.flush()
time.sleep(0.001)
padding = random.randint(0, 5) if padded else 0
self.wfile.write(((b"%%0%dx\r\n") % padding) % len(chunk) + chunk + b"\r\n")
self.wfile.flush()
if random.randint(0, 1) == 0:
time.sleep(0.001)
def do_POST(self):
request_counter.increment()
content_length = int(self.headers["Content-Length"])
body = self.rfile.read(content_length) if content_length > 0 else ""
body_length = len(body)
if self.path == "/body_200_no_length/":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"<h1>body_200 %d</h1>" % body_length)
elif self.path == "/body_200_length/":
self.send_response(200)
response = b"<h1>body_200 %d</h1>" % body_length
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(len(response)))
self.end_headers()
self.wfile.write(response)
elif self.path == "/body_200_chunked/":
self.send_response(200)
response = b"<h1>body_200 %d</h1>" % body_length
self.send_header("Content-type", "text/html")
self.send_header("Transfer-Encoding", "chunked")
self.end_headers()
self.write_chunked(response, padded=False)
elif self.path == "/body_200_chunked_padded_length/":
self.send_response(200)
response = b"<h1>body_200 %d</h1>" % body_length
self.send_header("Content-type", "text/html")
self.send_header("Transfer-Encoding", "chunked")
self.end_headers()
self.write_chunked(response, padded=True)
elif self.path == "/no_body_200_no_length/":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
elif self.path == "/no_body_200_length/":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", "0")
self.end_headers()
def do_GET(self):
request_counter.increment()
if self.path == "/body_200_no_length/":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"<h1>body_200</h1>")
elif self.path == "/body_200_length/":
self.send_response(200)
response = b"<h1>body_200</h1>"
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(len(response)))
self.end_headers()
self.wfile.write(response)
elif self.path == "/body_200_chunked/":
self.send_response(200)
response = b"<h1>body_200</h1>"
self.send_header("Content-type", "text/html")
self.send_header("Transfer-Encoding", "chunked")
self.end_headers()
self.write_chunked(response, padded=False)
elif self.path == "/body_200_chunked_padded_length/":
self.send_response(200)
response = b"<h1>body_200</h1>"
self.send_header("Content-type", "text/html")
self.send_header("Transfer-Encoding", "chunked")
self.end_headers()
self.write_chunked(response, padded=True)
elif self.path == "/no_body_200_no_length/":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
elif self.path == "/no_body_200_length/":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", "0")
self.end_headers()
server = http.server.HTTPServer(("localhost", 0), Server)
server_port = server.server_address[1]
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True # thread dies with the program
thread.start()
expected_request_counter = 0
def test_requests(proxy_port, behavior, send_immediately, method, suffix):
global expected_request_counter
http_connection = http.client.HTTPConnection("127.0.0.1", proxy_port)
http_connection.connect()
test_util.runner.get_line_from_queue_and_assert(queue, "connection\n")
url = "http://localhost:%d/%s/" % (server_port, suffix)
if method == "POST":
# We want to often test short payloads that will fit in the same
# packet with the pre body.
body_length = random.choice(
[random.randint(0, 1), random.randint(2, 100000)]
)
body = "a" * body_length
else:
body_length = None
body = None
http_connection.request(method, url, body)
stream = (
"none"
if behavior.startswith("buffer_request_")
or behavior
in {
"request_body_last_generates_response_with_body",
"request_body_last_generates_response_without_body",
}
else (
"downstream"
if behavior
in {
"request_pre_body_generates_response_with_body",
"request_pre_body_generates_response_without_body",
}
else "upstream"
)
)
if (
behavior.endswith("default")
or behavior.endswith("response_pre_body_generates_response_with_body")
or behavior.endswith("response_pre_body_generates_response_without_body")
or behavior.endswith("response_pre_body_prepend")
or behavior.endswith("response_body_prepend")
or behavior.endswith("response_body_append")
):
expected_request_counter += 1
test_util.runner.get_line_from_queue_and_assert(
queue, "request_pre_body /%s/ %s\n" % (suffix, stream)
)
test_util.runner.get_line_from_queue_and_assert(
queue, "request_body_some_last /%s/\n" % suffix
)
if (
behavior.endswith("default")
or behavior.endswith("response_pre_body_generates_response_with_body")
or behavior.endswith("response_pre_body_generates_response_without_body")
or behavior.endswith("response_pre_body_prepend")
or behavior.endswith("response_body_prepend")
or behavior.endswith("response_body_append")
):
stream = "none" if False else "downstream"
test_util.runner.get_line_from_queue_and_assert(
queue, "response_pre_body /%s/ 200 %s\n" % (suffix, stream)
)
test_util.runner.get_line_from_queue_and_assert(
queue, "response_body_some_last /%s/\n" % suffix
)
test_util.runner.get_line_from_queue_and_assert(queue, "response_finished\n")
response = http_connection.getresponse()
if method == "HEAD":
expected_body = b""
elif (
behavior
in {
"request_pre_body_generates_response_with_body",
"request_body_last_generates_response_with_body",
}
or behavior.endswith("response_pre_body_generates_response_with_body")
):
expected_body = b"<h1>%s</h1>" % str.encode(behavior)
else:
if (
suffix
in {
"no_body_200_no_length",
"no_body_200_length",
}
or behavior
in {
"request_pre_body_generates_response_without_body",
"request_body_last_generates_response_without_body",
}
or behavior.endswith(
"response_pre_body_generates_response_without_body"
)
):
expected_body = b""
else:
expected_body = (
b"<h1>body_200</h1>"
if body_length is None
else (b"<h1>body_200 %d</h1>" % body_length)
)
if behavior.endswith("response_pre_body_prepend") or behavior.endswith(
"response_body_prepend"
):
expected_body = b"<h1>Pre body</h1>" + expected_body
elif behavior.endswith("response_body_append"):
expected_body += b"<h1>Post body</h1>"
read_body = response.read()
if behavior in {"body_200_chunked", "body_200_chunked_padded_length"}:
remaining_body = read_body
read_body = ""
while True:
carriage_return_index = remaining_body.index("\r")
chunk_length = int(remaining_body[:carriage_return_index], 16)
if chunk_length == 0:
break
read_body += remaining_body[
carriage_return_index + 2 : carriage_return_index + 2 + chunk_length
]
remaining_body = remaining_body[
carriage_return_index + 2 + chunk_length + 2
]
assert read_body == expected_body, "%s body %s doesn't match %s!" % (
url,
read_body,
expected_body,
)
http_connection.close()
assert expected_request_counter == request_counter.value(), (
"Unexpected request_count - expected %d was %d",
expected_request_counter,
request_counter.value(),
)
test_util.runner.get_line_from_queue_and_assert(queue, "connection_finished\n")
for behavior in [
"default",
"buffer_request_default",
"request_pre_body_generates_response_with_body",
"request_pre_body_generates_response_without_body",
"request_body_last_generates_response_with_body",
"request_body_last_generates_response_without_body",
"response_pre_body_generates_response_with_body",
"response_pre_body_generates_response_without_body",
"buffer_request_response_pre_body_generates_response_with_body",
"buffer_request_response_pre_body_generates_response_without_body",
"response_pre_body_prepend",
"response_body_prepend",
"response_body_append",
"buffer_request_response_pre_body_prepend",
"buffer_request_response_body_prepend",
"buffer_request_response_body_append",
]:
for send_immediately in [True, False]:
queue, proxy_process = test_util.runner.run(
"./tests-proxy/server/switch_callbacks_proxy",
[behavior, "immediately" if send_immediately else "collect"],
)
proxy_port = int(queue.get().strip())
for method in ["HEAD", "GET", "POST"]:
for suffix in [
"body_200_length",
"body_200_no_length",
"body_200_chunked",
"body_200_chunked_padded_length",
"no_body_200_length",
"no_body_200_no_length",
]:
test_requests(
proxy_port, behavior, send_immediately, method, suffix
)
proxy_process.kill()
|
<reponame>LSSTDESC/galsampler
"""
"""
import numpy as np
import pytest
from ..utils import crossmatch
from ..source_halo_selection import source_halo_index_selection, get_source_bin_from_target_bin
from ..host_halo_binning import halo_bin_indices
__all__ = ('test_source_halo_index_selection_no_missing_source_cells', )
DEFAULT_SEED = 43
def test_source_halo_index_selection_no_missing_source_cells():
"""
"""
nhalo_min = 25
num_sources, num_target = int(1e4), int(1e5)
num_bin1_edges, num_bin2_edges = 7, 4
bin1 = np.linspace(0, 1, num_bin1_edges)
bin2 = np.linspace(0, 1, num_bin2_edges)
num_cells_total = (num_bin1_edges-1)*(num_bin2_edges-1)
rng = np.random.RandomState(DEFAULT_SEED)
source_halo_bin_numbers = rng.randint(0, num_cells_total, num_sources)
target_halo_bin_numbers = rng.randint(0, num_cells_total, num_target)
target_halo_ids = np.arange(num_target).astype('i8')
source_indices, matching_target_halo_ids = source_halo_index_selection(
source_halo_bin_numbers, target_halo_bin_numbers, target_halo_ids,
nhalo_min, bin1, bin2)
unique_target_bins = np.unique(target_halo_bin_numbers)
for ibin in unique_target_bins:
mask = target_halo_bin_numbers == ibin
assert np.all(source_halo_bin_numbers[source_indices[mask]] == ibin)
def test2_source_halo_index_selection_no_missing_source_cells():
"""
"""
nhalo_min = 25
num_sources, num_target = int(1e4), int(1e5)
num_bin1_edges = 7
bin1 = np.linspace(0, 1, num_bin1_edges)
rng = np.random.RandomState(DEFAULT_SEED)
source_halo_bin_numbers = rng.randint(0, num_bin1_edges-1, num_sources)
target_halo_bin_numbers = rng.randint(0, num_bin1_edges-1, num_target)
target_halo_ids = np.arange(num_target).astype('i8')
source_indices, matching_target_halo_ids = source_halo_index_selection(
source_halo_bin_numbers, target_halo_bin_numbers, target_halo_ids, nhalo_min, bin1)
unique_target_bins = np.unique(target_halo_bin_numbers)
for ibin in unique_target_bins:
mask = target_halo_bin_numbers == ibin
assert np.all(source_halo_bin_numbers[source_indices[mask]] == ibin)
def test_source_halo_index_selection_missing_source_cells():
"""
"""
nhalo_min = 25
num_sources, num_target = int(1e3), int(1e5)
num_bins1, num_bins2 = 3, 20
bin1 = np.linspace(0, 1, num_bins1)
bin2 = np.linspace(0, 1, num_bins2)
num_bins = num_bins1*num_bins2
rng = np.random.RandomState(DEFAULT_SEED)
source_halo_bin_numbers = rng.randint(0, num_bins, num_sources)
target_halo_bin_numbers = rng.randint(0, num_bins, num_target)
target_halo_ids = np.arange(num_target).astype('i8')
with pytest.raises(ValueError) as err:
source_indices = source_halo_index_selection(
source_halo_bin_numbers, target_halo_bin_numbers, target_halo_ids, nhalo_min, bin1, bin2)
substr = "The fraction of cells in the source catalog"
assert substr in err.value.args[0]
def test1_get_source_bin_from_target_bin():
rng = np.random.RandomState(DEFAULT_SEED)
bin_shapes = (25, )
source_bin_counts = rng.randint(100, 500, 25)
source_bin_counts[0] = 3
bin_number = 0
nhalo_min = 50
result = get_source_bin_from_target_bin(source_bin_counts, bin_number, nhalo_min, bin_shapes)
assert result == 1
def test2_get_source_bin_from_target_bin():
num_bin_edges = 5
xbins = np.linspace(0, 1, num_bin_edges)
x = np.array((0.1, 0.1, 0.4, 0.4, 0.9, 0.9))
bin_numbers = halo_bin_indices(x=(x, xbins))
assert set(bin_numbers) == {0, 1, 3}
counts = np.array([2, 2, 0, 2])
nhalo_min = 2
bin_shapes = (4, )
assert get_source_bin_from_target_bin(counts, 0, nhalo_min, bin_shapes) == 0
assert get_source_bin_from_target_bin(counts, 1, nhalo_min, bin_shapes) == 1
assert get_source_bin_from_target_bin(counts, 2, nhalo_min, bin_shapes) in (1, 3)
assert get_source_bin_from_target_bin(counts, 3, nhalo_min, bin_shapes) == 3
def test_bin_distribution_recovery():
log_mhost_min, log_mhost_max, dlog_mhost = 10.5, 15.5, 0.5
log_mhost_bins = np.arange(log_mhost_min, log_mhost_max+dlog_mhost, dlog_mhost)
log_mhost_mids = 0.5*(log_mhost_bins[:-1] + log_mhost_bins[1:])
num_source_halos_per_bin = 10
source_halo_log_mhost = np.tile(log_mhost_mids, num_source_halos_per_bin)
source_halo_bin_number = halo_bin_indices(log_mhost=(source_halo_log_mhost, log_mhost_bins))
num_target_halos_per_source_halo = 11
target_halo_bin_number = np.repeat(source_halo_bin_number, num_target_halos_per_source_halo)
target_halo_log_mhost = np.repeat(source_halo_log_mhost, num_target_halos_per_source_halo)
num_target_halos = len(target_halo_bin_number)
target_halo_ids = np.arange(num_target_halos).astype('i8')
nhalo_min = 5
source_halo_selection_indices, matching_target_halo_ids = source_halo_index_selection(
source_halo_bin_number, target_halo_bin_number, target_halo_ids, nhalo_min, log_mhost_bins)
idxA, idxB = crossmatch(matching_target_halo_ids, target_halo_ids)
target_mass = target_halo_log_mhost[idxB]
source_mass = source_halo_log_mhost[source_halo_selection_indices]
assert np.allclose(target_mass, source_mass)
source_halo_selection_indices, matching_target_halo_ids = source_halo_index_selection(
source_halo_bin_number, target_halo_bin_number, target_halo_ids, nhalo_min, log_mhost_bins)
idxA, idxB = crossmatch(matching_target_halo_ids, target_halo_ids)
target_mass = target_halo_log_mhost[idxB]
source_mass = source_halo_log_mhost[source_halo_selection_indices]
assert np.allclose(target_mass, source_mass)
def test_source_vs_target_halo_mass_consistency1():
""" Proceeding halo by halo, ensure that the target halo and its matching source halo
have identical mass when there exists a unique mass per bin
"""
log_mhost_min, log_mhost_max, dlog_mhost = 10.5, 15.5, 0.5
log_mhost_bins = np.arange(log_mhost_min, log_mhost_max+dlog_mhost, dlog_mhost)
log_mhost_mids = 0.5*(log_mhost_bins[:-1] + log_mhost_bins[1:])
num_source_halos_per_bin = 10
source_halo_log_mhost = np.tile(log_mhost_mids, num_source_halos_per_bin)
source_halo_bin_number = halo_bin_indices(log_mhost=(source_halo_log_mhost, log_mhost_bins))
num_source_halos = len(source_halo_bin_number)
source_halo_ids = np.arange(num_source_halos).astype('i8')
num_target_halos_per_source_halo = 11
target_halo_log_mhost = np.repeat(source_halo_log_mhost, num_target_halos_per_source_halo)
target_halo_bin_number = halo_bin_indices(log_mhost=(target_halo_log_mhost, log_mhost_bins))
num_target_halos = len(target_halo_bin_number)
target_halo_ids = np.arange(num_target_halos).astype('i8')
nhalo_min = 5
source_halo_selection_indices, matching_target_halo_ids = source_halo_index_selection(
source_halo_bin_number, target_halo_bin_number, target_halo_ids, nhalo_min, log_mhost_bins)
selected_source_halo_ids = source_halo_ids[source_halo_selection_indices]
assert len(selected_source_halo_ids) == len(matching_target_halo_ids)
gen = zip(selected_source_halo_ids, matching_target_halo_ids)
for source_id, target_id in gen:
source_mask = source_halo_ids == source_id
target_mask = target_halo_ids == target_id
assert np.all(source_halo_log_mhost[source_mask] == target_halo_log_mhost[target_mask])
def test_source_vs_target_halo_mass_consistency2():
""" Proceeding halo by halo, ensure that the target halo and its matching source halo
have identical mass except in the lowest mass source bin, which is empty, in which case
the mass of the source halo should be the next lowest.
"""
log_mhost_min, log_mhost_max, dlog_mhost = 10.5, 15.5, 0.5
log_mhost_bins = np.arange(log_mhost_min, log_mhost_max+dlog_mhost, dlog_mhost)
log_mhost_mids = 0.5*(log_mhost_bins[:-1] + log_mhost_bins[1:])
num_source_halos_per_bin = 10
source_halo_log_mhost = np.tile(log_mhost_mids[1:], num_source_halos_per_bin)
source_halo_bin_number = halo_bin_indices(log_mhost=(source_halo_log_mhost, log_mhost_bins))
num_source_halos = len(source_halo_bin_number)
source_halo_ids = np.arange(num_source_halos).astype('i8')
assert np.min(source_halo_bin_number) == 1
num_target_halos_per_source_halo = 11
num_target_halos_per_bin = num_target_halos_per_source_halo*num_source_halos_per_bin
target_halo_log_mhost = np.tile(log_mhost_mids, num_target_halos_per_bin)
target_halo_bin_number = halo_bin_indices(log_mhost=(target_halo_log_mhost, log_mhost_bins))
num_target_halos = len(target_halo_bin_number)
target_halo_ids = np.arange(num_target_halos).astype('i8')
assert np.min(target_halo_bin_number) == 0
nhalo_min = 5
source_halo_selection_indices, matching_target_halo_ids = source_halo_index_selection(
source_halo_bin_number, target_halo_bin_number, target_halo_ids, nhalo_min, log_mhost_bins)
selected_source_halo_ids = source_halo_ids[source_halo_selection_indices]
assert len(selected_source_halo_ids) == len(matching_target_halo_ids)
gen = zip(selected_source_halo_ids, matching_target_halo_ids)
for source_id, target_id in gen:
source_mask = source_halo_ids == source_id
target_mask = target_halo_ids == target_id
source_halo_mass = source_halo_log_mhost[source_mask][0]
target_halo_mass = target_halo_log_mhost[target_mask][0]
if target_halo_mass == log_mhost_mids[0]:
assert source_halo_mass == log_mhost_mids[1]
else:
assert target_halo_mass == source_halo_mass
|
#! /usr/bin/env python
#######################################
# createPropSymbol.py
#
# A python class to create a nested proportional symbol showing three values.
#
# Used as part of the SoilSCAPE website to create symbols used in open layers to
# display soil moisture.
# See http://soilscape.usc.edu/drupal/?q=node/24
#
# Requires:
# - imagemagick - http://www.imagemagick.org
#
# Created by <NAME> (The University of Southern California)
# Copyright 2012 <NAME>. All rights reserved.
#
# Email: <EMAIL>
# Date: 21/01/2013
# Version: 1.0
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#######################################
import os, sys
# Set Scale factor, used to scale values to provide the required size, in pixels
scaleFactor = 10
def createSMSymbol(outDIR, outSymbolName, val1, val2, val3):
# Set up files
sensor1File = os.path.join(outDIR, 's1.png')
sensor2File = os.path.join(outDIR, 's2.png')
sensor3File = os.path.join(outDIR, 's3.png')
tempStack = os.path.join(outDIR, 'temp_stack.png')
outSymbol = os.path.join(outDIR, outSymbolName)
# Create symbols
command1 = '''convert -resize %i -channel rgba -alpha on s1_yellow_circle_base.png %s'''%(val1 * scaleFactor, sensor1File)
command2 = '''convert -resize %i -channel rgba -alpha on s2_orange_circle_base.png %s'''%(val2 * scaleFactor, sensor2File)
command3 = '''convert -resize %i -channel rgba -alpha on s3_red_circle_base.png %s'''%(val3 * scaleFactor, sensor3File)
os.system(command1)
os.system(command2)
os.system(command3)
# Sort sensors symbols according to soil moisture. 3 - largest, 1 smallest
sensorDict = {sensor1File: val1, sensor2File: val2,sensor3File: val3}
sortedFiles = sorted(iter(sensorDict.items()), key=lambda k_v: (k_v[1],k_v[0]))
symbol3 = sortedFiles[2][0]
symbol2 = sortedFiles[1][0]
symbol1 = sortedFiles[0][0]
stackCommand1 = 'composite -gravity center ' + symbol2 + ' ' + symbol3 + ' ' + tempStack
stackCommand2 = 'composite -gravity center ' + symbol1 + ' ' + tempStack + ' ' + outSymbol
os.system(stackCommand1)
os.system(stackCommand2)
# Remove temp files
tempFiles = [sensor1File, sensor2File, sensor3File, tempStack]
for tfile in tempFiles:
os.remove(tfile)
if len(sys.argv) < 6:
print('''Not enough parameters provided.
Usage:
python createPropSymbol.py outDIR outSymbolName.png Val1 (yellow) Val2 (orange) Val3 (red)
''')
exit()
else:
outDIR = sys.argv[1]
outSymbolName = sys.argv[2]
val1 = int(sys.argv[3])
val2 = int(sys.argv[4])
val3 = int(sys.argv[5])
createSMSymbol(outDIR, outSymbolName, val1, val2, val3) |
<gh_stars>1-10
import os
import torch
import torch.utils.data as data
import numpy as np
from PIL import Image, ImageFile
import random
from torchvision.transforms import ToTensor
from torchvision import transforms
import cv2
import torch.nn.functional as F
ImageFile.LOAD_TRUNCATED_IMAGES = True
def collate_features(batch):
img = torch.cat([item[0] for item in batch], dim = 0)
coords = np.vstack([item[1] for item in batch])
return [img, coords]
def eval_transforms(pretrained=False):
if pretrained:
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
else:
mean = (0.5,0.5,0.5)
std = (0.5,0.5,0.5)
trnsfrms_val = transforms.Compose(
[
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean = mean, std = std)
]
)
return trnsfrms_val
class GraphDataset(data.Dataset):
"""input and label image dataset"""
def __init__(self, root, ids, target_patch_size=-1):
super(GraphDataset, self).__init__()
"""
Args:
fileDir(string): directory with all the input images.
transform(callable, optional): Optional transform to be applied on a sample
"""
self.root = root
self.ids = ids
#self.target_patch_size = target_patch_size
self.classdict = {'normal': 0, 'luad': 1, 'lscc': 2} #
#self.classdict = {'normal': 0, 'tumor': 1} #
#self.classdict = {'Normal': 0, 'TCGA-LUAD': 1, 'TCGA-LUSC': 2}
self._up_kwargs = {'mode': 'bilinear'}
def __getitem__(self, index):
sample = {}
info = self.ids[index].replace('\n', '')
file_name, label = info.split('\t')[0].rsplit('.', 1)[0], info.split('\t')[1]
site, file_name = file_name.split('/')
# if site =='CCRCC':
# file_path = self.root + 'CPTAC_CCRCC_features/simclr_files'
if site =='LUAD' or site =='LSCC':
site = 'LUNG'
file_path = self.root + 'CPTAC_{}_features/simclr_files'.format(site) #_pre# with # rushin
# For NLST only
if site =='NLST':
file_path = self.root + 'NLST_Lung_features/simclr_files'
# For TCGA only
if site =='TCGA':
file_name = info.split('\t')[0]
_, file_name = file_name.split('/')
file_path = self.root + 'TCGA_LUNG_features/simclr_files' #_resnet_with
sample['label'] = self.classdict[label]
sample['id'] = file_name
#feature_path = os.path.join(self.root, file_name, 'features.pt')
feature_path = os.path.join(file_path, file_name, 'features.pt')
if os.path.exists(feature_path):
features = torch.load(feature_path, map_location=lambda storage, loc: storage)
else:
print(feature_path + ' not exists')
features = torch.zeros(1, 512)
#adj_s_path = os.path.join(self.root, file_name, 'adj_s.pt')
adj_s_path = os.path.join(file_path, file_name, 'adj_s.pt')
if os.path.exists(adj_s_path):
adj_s = torch.load(adj_s_path, map_location=lambda storage, loc: storage)
else:
print(adj_s_path + ' not exists')
adj_s = torch.ones(features.shape[0], features.shape[0])
#features = features.unsqueeze(0)
sample['image'] = features
sample['adj_s'] = adj_s #adj_s.to(torch.double)
# return {'image': image.astype(np.float32), 'label': label.astype(np.int64)}
return sample
def __len__(self):
return len(self.ids) |
"""A simple but complete HTML to Abstact Syntax Tree (AST) parser.
The AST can also reproduce the HTML text.
Example::
>> text = '<div class="note"><p>text</p></div>'
>> ast = tokenize_html(text)
>> list(ast.walk(include_self=True))
[Root(''), Tag('div', {'class': 'note'}), Tag('p'), Data('text')]
>> str(ast)
'<div class="note"><p>text</p></div>'
>> str(ast[0][0])
'<p>text</p>'
Note: optional tags are not accounted for
(see https://html.spec.whatwg.org/multipage/syntax.html#optional-tags)
"""
import inspect
import itertools
from collections import abc, deque
from html.parser import HTMLParser
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Type, Union
class Attribute(dict):
"""This class holds the tags's attributes."""
def __getitem__(self, key: str) -> str:
"""If self doesn't have the key it returns ''."""
return self.get(key, "")
@property
def classes(self) -> List[str]:
"""Return 'class' attribute as list."""
return self["class"].split()
def __str__(self) -> str:
"""Return a htmlized representation for attributes."""
return " ".join(f'{key}="{value}"' for key, value in self.items())
class Element(abc.MutableSequence):
"""An Element of the xml/html document.
All xml/html entities inherit from this class.
"""
def __init__(self, name: str = "", attr: Optional[dict] = None) -> None:
"""Initialise the element."""
self.name = name
self.attrs: Attribute = Attribute(attr or {})
self._parent: Optional[Element] = None
self._children: List[Element] = []
@property
def parent(self) -> Optional["Element"]:
"""Return parent."""
return self._parent
@property
def children(self) -> List["Element"]:
"""Return copy of children."""
return self._children[:]
def reset_children(self, children: List["Element"], deepcopy: bool = False):
new_children = []
for i, item in enumerate(children):
assert isinstance(item, Element)
if deepcopy:
item = item.deepcopy()
if item._parent is None:
item._parent = self
elif item._parent != self:
raise AssertionError(f"different parent already set for item {i}")
new_children.append(item)
self._children = new_children
def __getitem__(self, index: int) -> "Element": # type: ignore[override]
return self._children[index]
def __setitem__(self, index: int, item: "Element"): # type: ignore[override]
assert isinstance(item, Element)
if item._parent is not None and item._parent != self:
raise AssertionError(f"different parent already set for: {item!r}")
item._parent = self
return self._children.__setitem__(index, item)
def __delitem__(self, index: int): # type: ignore[override]
return self._children.__delitem__(index)
def __len__(self) -> int:
return self._children.__len__()
def __iter__(self) -> Iterator["Element"]:
for child in self._children:
yield child
def insert(self, index: int, item: "Element"):
assert isinstance(item, Element)
if item._parent is not None and item._parent != self:
raise AssertionError(f"different parent already set for: {item!r}")
item._parent = self
return self._children.insert(index, item)
def deepcopy(self) -> "Element":
"""Recursively copy and remove parent."""
_copy = self.__class__(self.name, self.attrs)
for child in self:
_copy_child = child.deepcopy()
_copy.append(_copy_child)
return _copy
def __repr__(self) -> str:
text = f"{self.__class__.__name__}({self.name!r}"
if self.attrs:
text += f", {self.attrs!r}"
text += ")"
return text
def render(
self,
tag_overrides: Optional[Dict[str, Callable[["Element", dict], str]]] = None,
**kwargs,
) -> str:
"""Returns a HTML string representation of the element.
:param tag_overrides: Provide a dictionary of render function
for specific tag names, to override the normal render format
"""
raise NotImplementedError
def __str__(self) -> str:
return self.render()
def __eq__(self, item: Any) -> bool:
return item is self
def walk(self, include_self: bool = False) -> Iterator["Element"]:
"""Walk through the xml/html AST."""
if include_self:
yield self
for child in self:
yield child
for ancestor in child.walk():
yield ancestor
def strip(self, inplace: bool = False, recurse: bool = False) -> "Element":
"""Return copy with all `Data` tokens
that only contain whitespace / newlines removed.
"""
element = self
if not inplace:
element = self.deepcopy()
element.reset_children(
[
e
for e in element.children
if not (isinstance(e, Data) and e.data.strip() == "")
]
)
if recurse:
for child in element:
child.strip(inplace=True, recurse=True)
return element
def find(
self,
identifier: Union[str, Type["Element"]],
attrs: Optional[dict] = None,
classes: Optional[Iterable[str]] = None,
include_self: bool = False,
recurse: bool = True,
) -> Iterator["Element"]:
"""Find all elements that match name and specific attributes."""
iterator = self.walk() if recurse else self
if include_self:
iterator = itertools.chain([self], iterator)
if inspect.isclass(identifier):
test_func = lambda c: isinstance(c, identifier) # type: ignore[arg-type] # noqa: E731,E501
else:
test_func = lambda c: c.name == identifier # noqa: E731
classes = set(classes) if classes is not None else classes
for child in iterator:
if test_func(child):
if classes is not None and not classes.issubset(child.attrs.classes):
continue
for key, value in (attrs or {}).items():
if child.attrs[key] != value:
break
else:
yield child
class Root(Element):
"""The root of the AST tree."""
def render(self, **kwargs) -> str: # type: ignore[override]
"""Returns a string HTML representation of the structure."""
return "".join(child.render(**kwargs) for child in self)
class Tag(Element):
"""Represent xml/html tags under the form: <name key="value" ...> ... </name>."""
def render(
self,
tag_overrides: Optional[Dict[str, Callable[[Element, dict], str]]] = None,
**kwargs,
) -> str:
if tag_overrides and self.name in tag_overrides:
return tag_overrides[self.name](self, tag_overrides)
return (
f"<{self.name}{' ' if self.attrs else ''}{self.attrs}>"
+ "".join(
child.render(tag_overrides=tag_overrides, **kwargs) for child in self
)
+ f"</{self.name}>"
)
class XTag(Element):
"""Represent XHTML style tags with no children, like `<img src="t.gif" />`"""
def render(
self,
tag_overrides: Optional[Dict[str, Callable[[Element, dict], str]]] = None,
**kwargs,
) -> str:
if tag_overrides is not None and self.name in tag_overrides:
return tag_overrides[self.name](self, tag_overrides)
return f"<{self.name}{' ' if self.attrs else ''}{self.attrs}/>"
class VoidTag(Element):
"""Represent tags with no children, only start tag, like `<img src="t.gif" >`"""
def render(self, **kwargs) -> str: # type: ignore[override]
return f"<{self.name}{' ' if self.attrs else ''}{self.attrs}>"
class TerminalElement(Element):
def __init__(self, data: str):
super().__init__("")
self.data: str = data
def __repr__(self) -> str:
text = self.data
if len(text) > 20:
text = text[:17] + "..."
return f"{self.__class__.__name__}({text!r})"
def deepcopy(self) -> "TerminalElement":
"""Copy and remove parent."""
_copy = self.__class__(self.data)
return _copy
class Data(TerminalElement):
"""Represent data inside xml/html documents, like raw text."""
def render(self, **kwargs) -> str: # type: ignore[override]
return self.data
class Declaration(TerminalElement):
"""Represent declarations, like `<!DOCTYPE html>`"""
def render(self, **kwargs) -> str: # type: ignore[override]
return f"<!{self.data}>"
class Comment(TerminalElement):
"""Represent HTML comments"""
def render(self, **kwargs) -> str: # type: ignore[override]
return f"<!--{self.data}-->"
class Pi(TerminalElement):
"""Represent processing instructions like `<?xml-stylesheet ?>`"""
def render(self, **kwargs) -> str: # type: ignore[override]
return f"<?{self.data}>"
class Char(TerminalElement):
"""Represent character codes like: `�`"""
def render(self, **kwargs) -> str: # type: ignore[override]
return f"&#{self.data};"
class Entity(TerminalElement):
"""Represent entities like `&`"""
def render(self, **kwargs) -> str: # type: ignore[override]
return f"&{self.data};"
class Tree(object):
"""The engine class to generate the AST tree."""
def __init__(self, name: str = ""):
"""Initialise Tree"""
self.name = name
self.outmost = Root(name)
self.stack: deque = deque()
self.stack.append(self.outmost)
def clear(self):
"""Clear the outmost and stack for a new parsing."""
self.outmost = Root(self.name)
self.stack.clear()
self.stack.append(self.outmost)
def last(self) -> Element:
"""Return the last pointer which point to the actual tag scope."""
return self.stack[-1]
def nest_tag(self, name: str, attrs: dict):
"""Nest a given tag at the bottom of the tree using
the last stack's pointer.
"""
pointer = self.stack.pop()
item = Tag(name, attrs)
pointer.append(item)
self.stack.append(pointer)
self.stack.append(item)
def nest_xtag(self, name: str, attrs: dict):
"""Nest an XTag onto the tree."""
top = self.last()
item = XTag(name, attrs)
top.append(item)
def nest_vtag(self, name: str, attrs: dict):
"""Nest a VoidTag onto the tree."""
top = self.last()
item = VoidTag(name, attrs)
top.append(item)
def nest_terminal(self, klass: Type[TerminalElement], data: str):
""" Nest the data onto the tree."""
top = self.last()
item = klass(data)
top.append(item)
def enclose(self, name: str):
"""When a closing tag is found, pop the pointer's scope from the stack,
to then point to the earlier scope's tag.
"""
count = 0
for ind in reversed(self.stack):
count = count + 1
if ind.name == name:
break
else:
count = 0
# It pops all the items which do not match with the closing tag.
for i in range(0, count):
self.stack.pop()
class HtmlToAst(HTMLParser):
"""The tokenizer class."""
# see https://html.spec.whatwg.org/multipage/syntax.html#void-elements
void_elements = {
"area",
"base",
"br",
"col",
"embed",
"hr",
"img",
"input",
"link",
"meta",
"param",
"source",
"track",
"wbr",
}
def __init__(self, name: str = "", convert_charrefs: bool = False):
super().__init__(convert_charrefs=convert_charrefs)
self.struct = Tree(name)
def feed(self, source: str) -> Root: # type: ignore[override]
"""Parse the source string."""
self.struct.clear()
super().feed(source)
return self.struct.outmost
def handle_starttag(self, name: str, attr):
"""When found an opening tag then nest it onto the tree."""
if name in self.void_elements:
self.struct.nest_vtag(name, attr)
else:
self.struct.nest_tag(name, attr)
def handle_startendtag(self, name: str, attr):
"""When found a XHTML tag style then nest it up to the tree."""
self.struct.nest_xtag(name, attr)
def handle_endtag(self, name: str):
"""When found a closing tag then makes it point to the right scope."""
if name not in self.void_elements:
self.struct.enclose(name)
def handle_data(self, data: str):
"""Nest data onto the tree."""
self.struct.nest_terminal(Data, data)
def handle_decl(self, decl: str):
self.struct.nest_terminal(Declaration, decl)
def unknown_decl(self, decl: str):
self.struct.nest_terminal(Declaration, decl)
def handle_charref(self, data: str):
self.struct.nest_terminal(Char, data)
def handle_entityref(self, data: str):
self.struct.nest_terminal(Entity, data)
def handle_pi(self, data: str):
self.struct.nest_terminal(Pi, data)
def handle_comment(self, data: str):
self.struct.nest_terminal(Comment, data)
def tokenize_html(text: str, name: str = "", convert_charrefs: bool = False) -> Root:
parser = HtmlToAst(name, convert_charrefs=convert_charrefs)
return parser.feed(text)
|
<filename>minesweeper.py
#!/usr/bin/env python3
def dump(game):
"""
Prints a human-readable version of a game (provided as a dictionary)
"""
for key, val in sorted(game.items()):
if isinstance(val, list) and val and isinstance(val[0], list):
print(f'{key}:')
for inner in val:
print(f' {inner}')
else:
print(f'{key}:', val)
# 2-D IMPLEMENTATION
def initialize_2d_board(num_rows, num_cols):
"""
Initializes a 2D board with 0s.
Parameters:
num_rows (int): number of rows
num_cols (int): number of columns
Returns: a 2D board of size num_rows * num_cols
"""
return [[0 for c in range(num_cols)] for r in range(num_rows)]
def initialize_2d_mask(num_rows, num_cols):
"""
Initializes mask with False values.
Parameters:
num_rows (int): number of rows
num_cols (int): number of columns
Returns: a 2D mask of size num_rows * num_cols
"""
return [[False for c in range(num_cols)] for r in range(num_rows)]
def get_all_cells_2d(num_rows, num_cols):
"""
Makes a set with all inbound (r, c) values in a
num_rows * num_cols sized board
Parameters:
num_rows (int): number of rows
num_cols (int): number of columns
Returns: a set of all the valid cell cooridnate tuples
"""
inbound_cells = set()
for r in range(num_rows):
for c in range(num_cols):
inbound_cells.add((r, c))
return inbound_cells
def get_neighbors_2d(num_rows, num_cols, cell):
"""
Gets a bomb's set of neighbors {(r,c), (r,c)}.
Parameters:
num_rows (int): number of rows
num_cols (int): number of columns
cell (tuple): cell coordinates
Returns: a set with bomb neighbors as tuples.
"""
row = cell[0]
col = cell[1]
inbound_cells = get_all_cells_2d(num_rows, num_cols)
neighbors = {(row - 1, col -1), (row - 1, col), (row - 1, col + 1),
(row, col - 1), (row, col + 1),
(row + 1, col - 1), (row + 1, col), (row + 1, col + 1)}
cell_neighbors = set()
for n in neighbors:
if n in inbound_cells:
cell_neighbors.add(n)
return cell_neighbors
def new_game_2d(num_rows, num_cols, bombs):
"""
Start a new game.
Return a game state dictionary, with the 'dimensions', 'state', 'board' and
'mask' fields adequately initialized.
Parameters:
num_rows (int): Number of rows
num_cols (int): Number of columns
bombs (list): List of bombs, given in (row, column) pairs, which are
tuples
Returns:
A game state dictionary
>>> dump(new_game_2d(2, 4, [(0, 0), (1, 0), (1, 1)]))
board:
['.', 3, 1, 0]
['.', '.', 1, 0]
dimensions: (2, 4)
mask:
[False, False, False, False]
[False, False, False, False]
state: ongoing
"""
return new_game_nd((num_rows, num_cols), bombs)
############ Code Prior to Making new_game_nd ############
# board = initialize_2d_board(num_rows, num_cols)
# mask = initialize_2d_mask(num_rows, num_cols)
#
#
# for b in bombs:
#
# bomb_neighbors = get_neighbors_2d(num_rows, num_cols, b)
# for n in bomb_neighbors:
# neighbor_row, neighbor_col = n
# board[neighbor_row][neighbor_col] += 1
#
#
# for r, c in bombs: # ?2?
# board[r][c] = "."
#
# return {
# 'dimensions': (num_rows, num_cols),
# 'board' : board,
# 'mask' : mask,
# 'state': 'ongoing'}
############ Issues in The Original Code ############
#*#*#*# 1. if [r,c] in bombs or (r,c) in bombs #*#*#*#
#*#*#*# 2. refactor: create mask WITH board #*#*#*#
#*#*#*# 3. refactor: no helper function for getting inbound neighbors #*#*#*#
#####################################################
def dig_2d(game, row, col):
"""
Reveal the cell at (row, col), and, in some cases, recursively reveal its
neighboring squares.
Update game['mask'] to reveal (row, col). Then, if (row, col) has no
adjacent bombs (including diagonally), then recursively reveal (dig up) its
eight neighbors. Return an integer indicating how many new squares were
revealed in total, including neighbors, and neighbors of neighbors, and so
on.
The state of the game should be changed to 'defeat' when at least one bomb
is visible on the board after digging (i.e. game['mask'][bomb_location] ==
True), 'victory' when all safe squares (squares that do not contain a bomb)
and no bombs are visible, and 'ongoing' otherwise.
Parameters:
game (dict): Game state
row (int): Where to start digging (row)
col (int): Where to start digging (col)
Returns:
int: the number of new squares revealed
>>> game = {'dimensions': (2, 4),
... 'board': [['.', 3, 1, 0],
... ['.', '.', 1, 0]],
... 'mask': [[False, True, False, False],
... [False, False, False, False]],
... 'state': 'ongoing'}
>>> dig_2d(game, 0, 3)
4
>>> dump(game)
board:
['.', 3, 1, 0]
['.', '.', 1, 0]
dimensions: (2, 4)
mask:
[False, True, True, True]
[False, False, True, True]
state: victory
>>> game = {'dimensions': [2, 4],
... 'board': [['.', 3, 1, 0],
... ['.', '.', 1, 0]],
... 'mask': [[False, True, False, False],
... [False, False, False, False]],
... 'state': 'ongoing'}
>>> dig_2d(game, 0, 0)
1
>>> dump(game)
board:
['.', 3, 1, 0]
['.', '.', 1, 0]
dimensions: [2, 4]
mask:
[True, True, False, False]
[False, False, False, False]
state: defeat
"""
return dig_nd(game, (row, col))
############ Code Prior to Making dig_nd ############
# board_rows = game['dimensions'][0]
# board_cols = game['dimensions'][1]
#
# # If game is over or won or cell has been revealed already do nothing
# if game['state'] == 'defeat' or game['state'] == 'victory' or game['mask'][row][col]:
# return 0
#
# game['mask'][row][col] = True
# revealed = 1
#
# # Base Case: If cell has a value other that zero (number or bomb)
# if game['board'][row][col] == '.':
# game['state'] = 'defeat'
# return revealed
#
# # Iterative Case: If cell has a value zero
# if game['board'][row][col] == 0:
# neighbors = get_neighbors_2d(board_rows, board_cols, (row, col))
# for n in neighbors:
# revealed += dig_2d(game, n[0], n[1])
#
# # Check for victory
# uncovered_cells = 0
# num_bombs = 0
# for r in range(board_rows):
# for c in range(board_cols):
# if game['board'][r][c] == '.':
# num_bombs += 1
# if game['board'][r][c] != '.' and game['mask'][r][c]:
# uncovered_cells += 1
#
# if uncovered_cells == (board_rows ) * (board_cols) - num_bombs:
# game['state'] = 'victory'
#
#
# return revealed
############ Issues in The Original Code ############
#*#*#*# 1. refactor: unnecessary #*#*#*#
# if game['state'] == 'defeat' or game['state'] == 'victory':
# game['state'] = game['state'] # This is unnecessary!
# return 0
#*#*#*# 2. missing code: didn't check if game['mask'][row][col] #*#*#*#
#*#*#*# 3. refactor: no helper function for getting inbound neighbors #*#*#*#
#*#*#*# 4. refactor: unnecessary code for checking for victory #*#*#*#
#####################################################
def render_2d(game, xray=False):
"""
Prepare a game for display.
Returns a two-dimensional array (list of lists) of '_' (hidden squares), '.'
(bombs), ' ' (empty squares), or '1', '2', etc. (squares neighboring bombs).
game['mask'] indicates which squares should be visible. If xray is True (the
default is False), game['mask'] is ignored and all cells are shown.
Parameters:
game (dict): Game state
xray (bool): Whether to reveal all tiles or just the ones allowed by
game['mask']
Returns:
A 2D array (list of lists)
>>> render_2d({'dimensions': (2, 4),
... 'state': 'ongoing',
... 'board': [['.', 3, 1, 0],
... ['.', '.', 1, 0]],
... 'mask': [[False, True, True, False],
... [False, False, True, False]]}, False)
[['_', '3', '1', '_'], ['_', '_', '1', '_']]
>>> render_2d({'dimensions': (2, 4),
... 'state': 'ongoing',
... 'board': [['.', 3, 1, 0],
... ['.', '.', 1, 0]],
... 'mask': [[False, True, False, True],
... [False, False, False, True]]}, True)
[['.', '3', '1', ' '], ['.', '.', '1', ' ']]
"""
return render_nd(game, xray)
############ Code Prior to Making render_nd ############
# render = game['board'][:]
#
# for r in range(game['dimensions'][0]):
# for c in range(game['dimensions'][1]):
# if xray:
# render[r][c] = game['board'][r][c]
#
# if game['mask'][r][c] == False and not xray:
# render[r][c] = '_'
# else:
# if game['board'][r][c] == 0 and game['mask'][r][c] == True:
# render[r][c] = ' '
#
#
# # Convert to string
# render[r][c] = str(render[r][c])
# return render
def render_ascii(game, xray=False):
"""
Render a game as ASCII art.
Returns a string-based representation of argument 'game'. Each tile of the
game board should be rendered as in the function 'render_2d(game)'.
Parameters:
game (dict): Game state
xray (bool): Whether to reveal all tiles or just the ones allowed by
game['mask']
Returns:
A string-based representation of game
>>> print(render_ascii({'dimensions': (2, 4),
... 'state': 'ongoing',
... 'board': [['.', 3, 1, 0],
... ['.', '.', 1, 0]],
... 'mask': [[True, True, True, False],
... [False, False, True, False]]}))
.31_
__1_
"""
render = render_2d(game, xray)
render_ascii = ""
for r in range(game['dimensions'][0]):
for c in range(game['dimensions'][1]):
# Add space for 0s
if render[r][c] == '0':
render[r][c] = " "
render_ascii += render[r][c]
# Separate rows
if c + 1 == game['dimensions'][1]:
render_ascii += "\n"
return render_ascii[:-1]
# N-D IMPLEMENTATION
def initialize_tensor_board(dimensions, value):
"""
Initializes board with 0s.
Parameters:
dimensions (tuple): dimensions of the board
Returns: an nD board
"""
# Base case
if len(dimensions) == 1:
return [value for i in range(dimensions[0])]
# Iterative case
return [initialize_tensor_board(dimensions[1:], value) for j in range(dimensions[0])]
def get_neighbors_tensor(dimensions, cell):
"""
Gets all of the cell's neighbors in an nD board.
Parameters:
dimensions (tuple): dimensions of the board
cell (tuple): cell coordinates
Returns: a list of the cell's neighbors
"""
# Base Case (1D board)
if len(dimensions) == 1:
l = []
for i in range(-1, 2):
c = cell[0] + i
# Keep inbound neighbors
if 0 <= c < dimensions[0]:
l.append((c,))
return l
# Iterative Case
else:
l2 = get_neighbors_tensor(dimensions[1:], cell[1:])
l3 = []
for i in range(-1, 2):
c = cell[0] + i
if 0 <= c < dimensions[0]:
for i in l2:
l3.append((c,) + i)
return l3
def update_cell_value_tensor(board, cell, value):
"""
Updates a cell's value. Possible update values are =+ 1, '.', and True.
Parameters:
board (list): baord of cell values
cell (tuple): cell coordinates
value: 'bomb', True, or 1
Returns: nothing
"""
# Base case:
# Updates game['board']
if len(cell) == 1:
if value == 'bomb':
board[cell[0]] = '.'
# Updates game['mask']
elif value is True:
board[cell[0]] = True
# Updates game['board']
elif value == 1:
board[cell[0]] += 1
# Recursive Case
else:
update_cell_value_tensor(board[cell[0]], cell[1:], value)
def update_render_value_tensor(board, cell, value):
"""
Updates the render board value. Possible values are ' ', '_', or the original cell value.
Parameters:
board (list): board of cell values
cell (tuple): cell coordinates
value: ' ', '_', or the original cell value
Returns: nothing
"""
# Base case:
if len(cell) == 1:
if value == 0:
board[cell[0]] = " "
elif value == "_":
board[cell[0]] = "_"
else:
board[cell[0]] = str(value)
# Recursive Case
else:
update_render_value_tensor(board[cell[0]], cell[1:], value)
def get_cell_value_tensor(board, cell):
"""
Gets a cell's value from an nD board recursively.
Parameters:
board (list): board of cell values
cell (tuple): cell coordinates
Returns: cell value
"""
# Base case:
if len(cell) == 1:
return board[cell[0]]
# Recursive Case
else:
return get_cell_value_tensor(board[cell[0]], cell[1:])
def new_game_nd(dimensions, bombs):
"""
Start a new game.
Return a game state dictionary, with the 'dimensions', 'state', 'board' and
'mask' fields adequately initialized.
Args:
dimensions (tuple): Dimensions of the board
bombs (list): Bomb locations as a list of lists, each an
N-dimensional coordinate
Returns:
A game state dictionary
>>> g = new_game_nd((2, 4, 2), [(0, 0, 1), (1, 0, 0), (1, 1, 1)])
>>> dump(g)
board:
[[3, '.'], [3, 3], [1, 1], [0, 0]]
[['.', 3], [3, '.'], [1, 1], [0, 0]]
dimensions: (2, 4, 2)
mask:
[[False, False], [False, False], [False, False], [False, False]]
[[False, False], [False, False], [False, False], [False, False]]
state: ongoing
"""
# Make mask (all False) and board (all 0)
board = initialize_tensor_board(dimensions, 0)
mask = initialize_tensor_board(dimensions, False)
# For each bomb get its neighbors
for b in bombs:
bomb_neighbors = get_neighbors_tensor(dimensions, b)
# Update bomb's neighbor values
for n in bomb_neighbors:
update_cell_value_tensor(board, n, 1)
# Update board with "." for all bombs
for i in bombs:
update_cell_value_tensor(board, i, 'bomb')
return {
'dimensions': dimensions,
'board' : board,
'mask' : mask,
'state': 'ongoing'}
def get_all_cells_tensor(dimensions, board):
"""
Gets all cell values of an nD board recursively.
Parameters:
dimensions (tuple): dimensions of the board
board (list): board of cell values
Returns: a set with all cell coordinates
"""
# Base Cases:
if len(dimensions) == 0:
return set()
if len(dimensions) == 1:
return {(i,) for i in range(dimensions[0])}
# Recursive Case
all_cells = set()
recursive_set = get_all_cells_tensor(dimensions[1:], board[1:])
for i in recursive_set:
for j in range(dimensions[0]):
all_cells.add((j,) + i)
return all_cells
def is_won(board, mask, cell_locs):
"""
Checks if the game is won.
Parameters:
board (list): board of cell values
mask (list): board of cell visibility values
cell_locs (set): all cell coordinates
Returns: True or False
"""
num_cells = 0
num_bombs = 0
uncovered_cells = 0
# Get total number of cells, cell values, and uncovered cells
for c in cell_locs:
num_cells += 1
cell_value = get_cell_value_tensor(board, c)
cell_mask = get_cell_value_tensor(mask, c)
if cell_value != '.' and not cell_mask:
break
if cell_value == '.':
num_bombs += 1
if cell_value != '.' and cell_mask:
uncovered_cells += 1
# Check for victory
if uncovered_cells == num_cells - num_bombs:
return True
return False
def dig_nd(game, coordinates, cell_locs = None):
"""
Recursively dig up square at coords and neighboring squares.
Update the mask to reveal square at coords; then recursively reveal its
neighbors, as long as coords does not contain and is not adjacent to a
bomb. Return a number indicating how many squares were revealed. No
action should be taken and 0 returned if the incoming state of the game
is not 'ongoing'.
The updated state is 'defeat' when at least one bomb is visible on the
board after digging, 'victory' when all safe squares (squares that do
not contain a bomb) and no bombs are visible, and 'ongoing' otherwise.
Args:
coordinates (tuple): Where to start digging
Returns:
int: number of squares revealed
>>> g = {'dimensions': (2, 4, 2),
... 'board': [[[3, '.'], [3, 3], [1, 1], [0, 0]],
... [['.', 3], [3, '.'], [1, 1], [0, 0]]],
... 'mask': [[[False, False], [False, True], [False, False], [False, False]],
... [[False, False], [False, False], [False, False], [False, False]]],
... 'state': 'ongoing'}
>>> dig_nd(g, (0, 3, 0))
8
>>> dump(g)
board:
[[3, '.'], [3, 3], [1, 1], [0, 0]]
[['.', 3], [3, '.'], [1, 1], [0, 0]]
dimensions: (2, 4, 2)
mask:
[[False, False], [False, True], [True, True], [True, True]]
[[False, False], [False, False], [True, True], [True, True]]
state: ongoing
>>> g = {'dimensions': (2, 4, 2),
... 'board': [[[3, '.'], [3, 3], [1, 1], [0, 0]],
... [['.', 3], [3, '.'], [1, 1], [0, 0]]],
... 'mask': [[[False, False], [False, True], [False, False], [False, False]],
... [[False, False], [False, False], [False, False], [False, False]]],
... 'state': 'ongoing'}
>>> dig_nd(g, (0, 0, 1))
1
>>> dump(g)
board:
[[3, '.'], [3, 3], [1, 1], [0, 0]]
[['.', 3], [3, '.'], [1, 1], [0, 0]]
dimensions: (2, 4, 2)
mask:
[[False, True], [False, True], [False, False], [False, False]]
[[False, False], [False, False], [False, False], [False, False]]
state: defeat
"""
# Get cell locations once!
if cell_locs == None:
cell_locs = get_all_cells_tensor(game['dimensions'], game['board'])
# If game is over or won or cell has been revealed already do nothing
if game['state'] == 'defeat' or game['state'] == 'victory' or get_cell_value_tensor(game['mask'], coordinates):
return 0
update_cell_value_tensor(game['mask'], coordinates, True)
revealed = 1
# Base Case: If cell has a value other that zero (number or bomb)
if get_cell_value_tensor(game['board'], coordinates) == '.':
game['state'] = 'defeat'
return revealed
# Recursive Case: If cell has a value zero
if get_cell_value_tensor(game['board'], coordinates) == 0:
neighbors = get_neighbors_tensor(game['dimensions'], coordinates)
for n in neighbors:
revealed += dig_nd(game, n, cell_locs)
# Check for victory
if is_won(game['board'], game['mask'], cell_locs):
game['state'] = 'victory'
return revealed
def render_nd(game, xray=False):
"""
Prepare the game for display.
Returns an N-dimensional array (nested lists) of '_' (hidden squares),
'.' (bombs), ' ' (empty squares), or '1', '2', etc. (squares
neighboring bombs). The mask indicates which squares should be
visible. If xray is True (the default is False), the mask is ignored
and all cells are shown.
Args:
xray (bool): Whether to reveal all tiles or just the ones allowed by
the mask
Returns:
An n-dimensional array of strings (nested lists)
>>> g = {'dimensions': (2, 4, 2),
... 'board': [[[3, '.'], [3, 3], [1, 1], [0, 0]],
... [['.', 3], [3, '.'], [1, 1], [0, 0]]],
... 'mask': [[[False, False], [False, True], [True, True], [True, True]],
... [[False, False], [False, False], [True, True], [True, True]]],
... 'state': 'ongoing'}
>>> render_nd(g, False)
[[['_', '_'], ['_', '3'], ['1', '1'], [' ', ' ']],
[['_', '_'], ['_', '_'], ['1', '1'], [' ', ' ']]]
>>> render_nd(g, True)
[[['3', '.'], ['3', '3'], ['1', '1'], [' ', ' ']],
[['.', '3'], ['3', '.'], ['1', '1'], [' ', ' ']]]
"""
# Get all cell locations
cell_locs = get_all_cells_tensor(game['dimensions'], game['board'])
# Initialize render baord with 0s
render = initialize_tensor_board(game['dimensions'], 0)
for c in cell_locs:
# If xray, show board values
if xray:
update_render_value_tensor(render, c, get_cell_value_tensor(game['board'], c))
else:
if get_cell_value_tensor(game['mask'], c) == False:
update_render_value_tensor(render, c, '_')
else:
cell_value = get_cell_value_tensor(game['board'], c)
# Hide 0s with spaces
if cell_value == 0:
update_render_value_tensor(render, c, ' ')
else:
update_render_value_tensor(render, c, cell_value)
return render
if __name__ == "__main__":
# Test with doctests. Helpful to debug individual lab.py functions.
import doctest
_doctest_flags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
#doctest.testmod(optionflags=_doctest_flags) #runs ALL doctests
#doctest.run_docstring_examples(render_2d, globals(), optionflags=_doctest_flags, verbose=False)
# doctest.run_docstring_examples(new_game_2d, globals(), optionflags=_doctest_flags, verbose=True)
# doctest.run_docstring_examples(dig_2d, globals(), optionflags=_doctest_flags, verbose=True)
# doctest.run_docstring_examples(dig_nd, globals(), optionflags=_doctest_flags, verbose=False)
# doctest.run_docstring_examples(render_nd, globals(), optionflags=_doctest_flags, verbose=False)
#######################################################
#### Test 1: Testing render_ascii ####
# game = {'dimensions': (2, 4),
# 'state': 'ongoing',
# 'board': [['.', 3, 1, 0],
# ['.', '.', 1, 0]],
# 'mask': [[True, True, True, False],
# [False, False, True, False]]}
# print(render_ascii(game, xray=False))
#######################################################
#### Test 2: Testing 2d board and mask initializers ####
# print(initialize_2d_board(2, 4))
# print(initialize_2d_mask(2, 4))
#######################################################
#### Test 3: Testing get_neighbors_2d ####
# bomb = (0,0)
# rows, cols = (2, 4)
# print(get_neighbors_2d(rows, cols, bomb))
#######################################################
#### Test 4: Testing get_all_cells_2d ####
# num_rows = 2
# num_cols = 4
# print(get_all_cells_2d(num_rows, num_cols))
#######################################################
#### Test 5: Testing get_neighbors_tensor ####
# print(get_neighbors_tensor((2, 4), (0,0)))
#######################################################
#### Test 6: Testing get_all_cells_tensor ####
# board = [['.', 3, 1, 0], ['.', '.', 1, 0]]
# print(get_all_cells_tensor((2, 2), board))
#######################################################
|
<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'RegistryScanningConfigurationRuleArgs',
'RegistryScanningConfigurationRuleRepositoryFilterArgs',
'ReplicationConfigurationReplicationConfigurationArgs',
'ReplicationConfigurationReplicationConfigurationRuleArgs',
'ReplicationConfigurationReplicationConfigurationRuleDestinationArgs',
'ReplicationConfigurationReplicationConfigurationRuleRepositoryFilterArgs',
'RepositoryEncryptionConfigurationArgs',
'RepositoryImageScanningConfigurationArgs',
]
@pulumi.input_type
class RegistryScanningConfigurationRuleArgs:
def __init__(__self__, *,
repository_filters: pulumi.Input[Sequence[pulumi.Input['RegistryScanningConfigurationRuleRepositoryFilterArgs']]],
scan_frequency: pulumi.Input[str]):
"""
:param pulumi.Input[Sequence[pulumi.Input['RegistryScanningConfigurationRuleRepositoryFilterArgs']]] repository_filters: One or more repository filter blocks, containing a `filter` (required string filtering repositories, see pattern regex [here](https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_ScanningRepositoryFilter.html)) and a `filter_type` (required string, currently only `WILDCARD` is supported).
:param pulumi.Input[str] scan_frequency: The frequency that scans are performed at for a private registry. Can be `SCAN_ON_PUSH`, `CONTINUOUS_SCAN`, or `MANUAL`.
"""
pulumi.set(__self__, "repository_filters", repository_filters)
pulumi.set(__self__, "scan_frequency", scan_frequency)
@property
@pulumi.getter(name="repositoryFilters")
def repository_filters(self) -> pulumi.Input[Sequence[pulumi.Input['RegistryScanningConfigurationRuleRepositoryFilterArgs']]]:
"""
One or more repository filter blocks, containing a `filter` (required string filtering repositories, see pattern regex [here](https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_ScanningRepositoryFilter.html)) and a `filter_type` (required string, currently only `WILDCARD` is supported).
"""
return pulumi.get(self, "repository_filters")
@repository_filters.setter
def repository_filters(self, value: pulumi.Input[Sequence[pulumi.Input['RegistryScanningConfigurationRuleRepositoryFilterArgs']]]):
pulumi.set(self, "repository_filters", value)
@property
@pulumi.getter(name="scanFrequency")
def scan_frequency(self) -> pulumi.Input[str]:
"""
The frequency that scans are performed at for a private registry. Can be `SCAN_ON_PUSH`, `CONTINUOUS_SCAN`, or `MANUAL`.
"""
return pulumi.get(self, "scan_frequency")
@scan_frequency.setter
def scan_frequency(self, value: pulumi.Input[str]):
pulumi.set(self, "scan_frequency", value)
@pulumi.input_type
class RegistryScanningConfigurationRuleRepositoryFilterArgs:
def __init__(__self__, *,
filter: pulumi.Input[str],
filter_type: pulumi.Input[str]):
pulumi.set(__self__, "filter", filter)
pulumi.set(__self__, "filter_type", filter_type)
@property
@pulumi.getter
def filter(self) -> pulumi.Input[str]:
return pulumi.get(self, "filter")
@filter.setter
def filter(self, value: pulumi.Input[str]):
pulumi.set(self, "filter", value)
@property
@pulumi.getter(name="filterType")
def filter_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "filter_type")
@filter_type.setter
def filter_type(self, value: pulumi.Input[str]):
pulumi.set(self, "filter_type", value)
@pulumi.input_type
class ReplicationConfigurationReplicationConfigurationArgs:
def __init__(__self__, *,
rule: pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleArgs']):
"""
:param pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleArgs'] rule: The replication rules for a replication configuration. A maximum of 10 are allowed per `replication_configuration`. See Rule
"""
pulumi.set(__self__, "rule", rule)
@property
@pulumi.getter
def rule(self) -> pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleArgs']:
"""
The replication rules for a replication configuration. A maximum of 10 are allowed per `replication_configuration`. See Rule
"""
return pulumi.get(self, "rule")
@rule.setter
def rule(self, value: pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleArgs']):
pulumi.set(self, "rule", value)
@pulumi.input_type
class ReplicationConfigurationReplicationConfigurationRuleArgs:
def __init__(__self__, *,
destinations: pulumi.Input[Sequence[pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleDestinationArgs']]],
repository_filters: Optional[pulumi.Input[Sequence[pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleRepositoryFilterArgs']]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleDestinationArgs']]] destinations: the details of a replication destination. A maximum of 25 are allowed per `rule`. See Destination.
:param pulumi.Input[Sequence[pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleRepositoryFilterArgs']]] repository_filters: filters for a replication rule. See Repository Filter.
"""
pulumi.set(__self__, "destinations", destinations)
if repository_filters is not None:
pulumi.set(__self__, "repository_filters", repository_filters)
@property
@pulumi.getter
def destinations(self) -> pulumi.Input[Sequence[pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleDestinationArgs']]]:
"""
the details of a replication destination. A maximum of 25 are allowed per `rule`. See Destination.
"""
return pulumi.get(self, "destinations")
@destinations.setter
def destinations(self, value: pulumi.Input[Sequence[pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleDestinationArgs']]]):
pulumi.set(self, "destinations", value)
@property
@pulumi.getter(name="repositoryFilters")
def repository_filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleRepositoryFilterArgs']]]]:
"""
filters for a replication rule. See Repository Filter.
"""
return pulumi.get(self, "repository_filters")
@repository_filters.setter
def repository_filters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ReplicationConfigurationReplicationConfigurationRuleRepositoryFilterArgs']]]]):
pulumi.set(self, "repository_filters", value)
@pulumi.input_type
class ReplicationConfigurationReplicationConfigurationRuleDestinationArgs:
def __init__(__self__, *,
region: pulumi.Input[str],
registry_id: pulumi.Input[str]):
"""
:param pulumi.Input[str] region: A Region to replicate to.
:param pulumi.Input[str] registry_id: The account ID of the destination registry to replicate to.
"""
pulumi.set(__self__, "region", region)
pulumi.set(__self__, "registry_id", registry_id)
@property
@pulumi.getter
def region(self) -> pulumi.Input[str]:
"""
A Region to replicate to.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: pulumi.Input[str]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="registryId")
def registry_id(self) -> pulumi.Input[str]:
"""
The account ID of the destination registry to replicate to.
"""
return pulumi.get(self, "registry_id")
@registry_id.setter
def registry_id(self, value: pulumi.Input[str]):
pulumi.set(self, "registry_id", value)
@pulumi.input_type
class ReplicationConfigurationReplicationConfigurationRuleRepositoryFilterArgs:
def __init__(__self__, *,
filter: pulumi.Input[str],
filter_type: pulumi.Input[str]):
"""
:param pulumi.Input[str] filter: The repository filter details.
:param pulumi.Input[str] filter_type: The repository filter type. The only supported value is `PREFIX_MATCH`, which is a repository name prefix specified with the filter parameter.
"""
pulumi.set(__self__, "filter", filter)
pulumi.set(__self__, "filter_type", filter_type)
@property
@pulumi.getter
def filter(self) -> pulumi.Input[str]:
"""
The repository filter details.
"""
return pulumi.get(self, "filter")
@filter.setter
def filter(self, value: pulumi.Input[str]):
pulumi.set(self, "filter", value)
@property
@pulumi.getter(name="filterType")
def filter_type(self) -> pulumi.Input[str]:
"""
The repository filter type. The only supported value is `PREFIX_MATCH`, which is a repository name prefix specified with the filter parameter.
"""
return pulumi.get(self, "filter_type")
@filter_type.setter
def filter_type(self, value: pulumi.Input[str]):
pulumi.set(self, "filter_type", value)
@pulumi.input_type
class RepositoryEncryptionConfigurationArgs:
def __init__(__self__, *,
encryption_type: Optional[pulumi.Input[str]] = None,
kms_key: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] encryption_type: The encryption type to use for the repository. Valid values are `AES256` or `KMS`. Defaults to `AES256`.
:param pulumi.Input[str] kms_key: The ARN of the KMS key to use when `encryption_type` is `KMS`. If not specified, uses the default AWS managed key for ECR.
"""
if encryption_type is not None:
pulumi.set(__self__, "encryption_type", encryption_type)
if kms_key is not None:
pulumi.set(__self__, "kms_key", kms_key)
@property
@pulumi.getter(name="encryptionType")
def encryption_type(self) -> Optional[pulumi.Input[str]]:
"""
The encryption type to use for the repository. Valid values are `AES256` or `KMS`. Defaults to `AES256`.
"""
return pulumi.get(self, "encryption_type")
@encryption_type.setter
def encryption_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encryption_type", value)
@property
@pulumi.getter(name="kmsKey")
def kms_key(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the KMS key to use when `encryption_type` is `KMS`. If not specified, uses the default AWS managed key for ECR.
"""
return pulumi.get(self, "kms_key")
@kms_key.setter
def kms_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key", value)
@pulumi.input_type
class RepositoryImageScanningConfigurationArgs:
def __init__(__self__, *,
scan_on_push: pulumi.Input[bool]):
"""
:param pulumi.Input[bool] scan_on_push: Indicates whether images are scanned after being pushed to the repository (true) or not scanned (false).
"""
pulumi.set(__self__, "scan_on_push", scan_on_push)
@property
@pulumi.getter(name="scanOnPush")
def scan_on_push(self) -> pulumi.Input[bool]:
"""
Indicates whether images are scanned after being pushed to the repository (true) or not scanned (false).
"""
return pulumi.get(self, "scan_on_push")
@scan_on_push.setter
def scan_on_push(self, value: pulumi.Input[bool]):
pulumi.set(self, "scan_on_push", value)
|
<reponame>osswangxining/iot-app-enabler-conversation<filename>conversationinsights-mynlu/mynlu/pipeline/__init__.py
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import os
from collections import defaultdict
import importlib
import pkg_resources
import typing
from builtins import object
import inspect
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Text
from typing import Tuple
logger = logging.getLogger(__name__)
def validate_requirements(plugins, dev_requirements_file="dev-requirements.txt"):
# type: (List[Text], Text) -> None
"""Ensures that all required python packages are installed to instantiate and used the passed plugins."""
from mynlu import registry
failed_imports = set()
for plugin_name in plugins:
plugin = registry.get_plugin(plugin_name)
failed_imports.update(_find_unavailable_packages(plugin.required_packages()))
if failed_imports:
# if available, use the development file to figure out the correct version numbers for each requirement
all_requirements = _read_dev_requirements(dev_requirements_file)
if all_requirements:
missing_requirements = [r for i in failed_imports for r in all_requirements[i]]
raise Exception("Not all required packages are installed. " +
"Failed to find the following imports {}. ".format(", ".join(failed_imports)) +
"To use this pipeline, you need to install the missing dependencies, including:\n\t" +
"{}".format(" ".join(missing_requirements)))
else:
raise Exception("Not all required packages are installed. " +
"To use this pipeline, you need to install the missing dependencies. " +
"Please install {}".format(", ".join(failed_imports)))
def _find_unavailable_packages(package_names):
# type: (List[Text]) -> Set[Text]
failed_imports = set()
for package in package_names:
try:
importlib.import_module(package)
except ImportError:
failed_imports.add(package)
return failed_imports
def _read_dev_requirements(file_name):
try:
req_lines = pkg_resources.resource_string("mynlu", "../" + file_name).split("\n")
except Exception as e:
logger.info("Couldn't read dev-requirements.txt. Error: {}".format(e))
req_lines = []
return _requirements_from_lines(req_lines)
def _requirements_from_lines(req_lines):
requirements = defaultdict(list)
current_name = None
for req_line in req_lines:
if req_line.startswith("#"):
current_name = req_line[1:].strip(' \n')
elif current_name is not None:
requirements[current_name].append(req_line.strip(' \n'))
return requirements
def validate_arguments(pipeline, context, allow_empty_pipeline=False):
# type: (List[Component], Dict[Text, Any], bool) -> None
"""Validates a pipeline before it is run. Ensures, that all arguments are present to train the pipeline."""
# Ensure the pipeline is not empty
if not allow_empty_pipeline and len(pipeline) == 0:
raise ValueError("Can not train an empty pipeline. " +
"Make sure to specify a proper pipeline in the configuration using the `pipeline` key." +
"The `backend` configuration key is NOT supported anymore.")
provided_properties = set(context.keys())
for plugin in pipeline:
for req in plugin.requires:
if req not in provided_properties:
raise Exception("Failed to validate at plugin '{}'. Missing property: '{}'".format(
plugin.name, req))
provided_properties.update(plugin.provides)
class MissingArgumentError(ValueError):
"""Raised when a function is called and not all parameters can be filled from the context / config.
Attributes:
message -- explanation of which parameter is missing
"""
def __init__(self, message):
# type: (Text) -> None
super(MissingArgumentError, self).__init__(message)
self.message = message
def __str__(self):
return self.message
|
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from PIL import Image
from collections import OrderedDict
import json
import math
def main():
pass
arguments = get_args()
data_dir = arguments.data_directory
save_dir = arguments.save_dir
arch = arguments.arch
learning_rate = arguments.learning_rate
epochs = arguments.epochs
hidden_layers = arguments.hidden_units
gpu = arguments.gpu
outputs = arguments.outputs
if gpu:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
dataloaders, image_datasets = load_datasets(train_dir, valid_dir, test_dir)
model = build_model(nn_type=arch, outputs = outputs, hidden_layers = hidden_layers)
model.to(device)
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
print('Training start')
train(model, device, criterion, optimizer, dataloaders['train'], dataloaders['test'], epochs=epochs, print_every = 20)
print('Training finish')
save_checkpoint(model, arch, hidden_layers, image_datasets, model_name = save_dir)
def load_datasets(train_dir, valid_dir, test_dir):
noramlize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
data_transforms = {
'train': transforms.Compose([transforms.RandomRotation(30),
transforms.RandomHorizontalFlip(),
transforms.RandomResizedCrop(224),
transforms.ToTensor(),
noramlize]),
'valid': transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
noramlize]),
'test': transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
noramlize])
}
image_datasets = {
'train': datasets.ImageFolder(train_dir, transform=data_transforms['train']),
'valid': datasets.ImageFolder(valid_dir, transform=data_transforms['valid']),
'test': datasets.ImageFolder(test_dir, transform=data_transforms['test'])
}
dataloaders = {
'train': torch.utils.data.DataLoader(image_datasets['train'], batch_size=64, shuffle=True),
'valid': torch.utils.data.DataLoader(image_datasets['valid'], batch_size=32, shuffle=True),
'test': torch.utils.data.DataLoader(image_datasets['test'], batch_size=32, shuffle=True)
}
return dataloaders, image_datasets
def get_in_features(model):
for item in model.classifier:
if type(item) == torch.nn.modules.linear.Linear:
return item.in_features
def build_model(nn_type='vgg16', outputs = 102, hidden_layers = [1000]):
model = getattr(models, nn_type)(pretrained=True)
print(model)
in_features = get_in_features(model)
hidden_layers = [in_features] + hidden_layers
j = len(hidden_layers) - 1
i = 0
#hidden layers go here
layers = []
while j > i:
layers.append((f'fc{i}', nn.Linear(hidden_layers[i], hidden_layers[i+1])))
layers.append((f'relu{i}', nn.ReLU()))
layers.append((f'dropout{i}', nn.Dropout()))
i+=1
#final layer of the network
layers.append(('fc_last', nn.Linear(hidden_layers[-1], outputs)))
layers.append(('output', nn.LogSoftmax(dim=1)))
classifier = nn.Sequential(OrderedDict(layers))
# Replace classifier
model.classifier = classifier
return model
def validate(model, device, criterion, testing_set):
test_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for inputs, labels in testing_set:
inputs, labels = inputs.to(device), labels.to(device)
output = model.forward(inputs)
batch_loss = criterion(output, labels)
test_loss += batch_loss.item()
ps = torch.exp(output)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
model.train()
return test_loss/len(testing_set), accuracy/len(testing_set)
def train(model, device, criterion, optimizer, traning_set, testing_set, epochs=5, print_every = 20):
steps = 0
running_loss = 0
for epoch in range(epochs):
for inputs, labels in traning_set:
steps += 1
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
logps = model.forward(inputs)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
test_loss, accuracy = validate(model, device, criterion, testing_set)
print(f"Epoch {epoch+1}/{epochs}.. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Test loss: {test_loss:.3f}.. "
f"Test accuracy: {accuracy:.3f}")
running_loss = 0
def save_checkpoint(model, arch, hidden_layers, image_datasets, model_name = "checkpoint.pt"):
model.class_to_idx = image_datasets['train'].class_to_idx
checkpoint = {
'arch' : arch,
'class_to_idx' : model.class_to_idx,
'state_dict' : model.state_dict(),
'hidden_layers' : hidden_layers
}
torch.save(checkpoint, model_name)
print('saving checkpoint as: {}'.format(model_name))
def get_args():
"""
Get arguments from command line
"""
parser = argparse.ArgumentParser()
parser.add_argument("--data_directory", type=str, default = 'flowers', help="data directory containing training and testing data")
parser.add_argument("--save_dir", type=str, default="checkpoint.pt" ,help="directory where to save trained model and hyperparameters")
parser.add_argument("--arch", type=str, default="vgg16", help="pre-trained model: vgg16, alexnet")
parser.add_argument("--epochs", type=int, default=3, help="number of epochs to train model")
parser.add_argument("--hidden_units", type=list, default=[768, 384], help="list of hidden layers")
parser.add_argument("--learning_rate", type=float, default=0.001, help="learning rate")
parser.add_argument("--gpu", type=bool, default=True, help="use GPU or CPU to train model: True = GPU, False = CPU")
parser.add_argument("--outputs", type=int, default=102, help="enter output size")
return parser.parse_args()
if __name__ == "__main__":
main()
|
# Copyright 2019 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import pickle
from .input_reader import InputReader
class Floris():
"""
Top-level class that contains a Floris model.
Floris is the highest level class of the Floris package. Import
this class and instantiate it with a path to an input file to
create a Floris model. Use the ``farm`` attribute to access other
objects within the model.
Args:
input_file: A string that is the path to the json input file.
input_dict: A dictionary as generated from the input_reader.
Returns:
Floris: An instantiated Floris object.
"""
def __init__(self, input_file=None, input_dict=None):
self.input_reader = InputReader()
self.input_file = input_file
self.input_dict = input_dict
self._farm = []
self.add_farm(
input_file=self.input_file,
input_dict=self.input_dict
)
@property
def farm(self):
"""
Property of the Floris object that returns the farm(s)
contained within the object.
Returns:
Farm: A Farm object, or if multiple farms, a list of Farm
objects.
Examples:
To get the Farm object(s) stored in a Floris object:
>>> farms = floris.farm()
"""
if len(self._farm) == 1:
return self._farm[0]
else:
return self._farm
@farm.setter
def farm(self, value):
if not hasattr(self._farm):
self._farm = value
def add_farm(self, input_file=None, input_dict=None):
"""
A method that adds a farm with user-defined input file to the
Floris object.
Returns:
*None* -- The :py:class:`floris.simulation.floris` object
is updated directly.
Examples:
To add a farm to a Floris object using a specific input
file:
>>> floris.add_farm(input_file='input.json')
To add a farm to a Floris object using the stored input
file:
>>> floris.add_farm()
"""
self._farm.append(self.input_reader.read(input_file=input_file,
input_dict=input_dict))
def list_farms(self):
"""
A method that lists the farms and relevant farm details stored
in Floris object.
Returns:
*None* -- The farm infomation is printed to the console.
Examples:
To list the current farms in Floris object:
>>> floris.list_farms()
"""
for num, farm in enumerate(self._farm):
print('Farm {}'.format(num))
print(farm)
def export_pickle(self, pickle_file):
"""
A method that exports a farm to a pickle file.
Returns:
*None* -- Creates a pickle file.
Examples:
To export a farm to a pickle file:
>>> floris.export_pickle('saved_farm.p')
"""
pickle.dump(self.farm, open(pickle_file, "wb"))
def import_pickle(self, pickle_file):
"""
A method that imports a farm from a pickle file.
Returns:
*None* - Loads the farm into the
:py:class:`floris.simulation.floris.farm` object.
Examples:
To load a pickled farm:
>>> floris.import_pickle('saved_farm.p')
"""
self.farm = pickle.load(open(pickle_file, "rb"))
|
<reponame>bshishov/DeepForecasting<filename>models/lstm_conv.py
import keras
import matplotlib.pyplot as plt
import numpy as np
from keras import layers
import metrics
import processing
import utils
from layers.conv_deform_1d import ConvDeform1D, describe_deform_layer
class CustomLSTM(keras.layers.LSTM):
pass
def create_model(input_shape: tuple,
batch_size=1,
stateful: bool=True,
conv_units: int = 64,
conv_blocks: int = 1,
use_deform: bool = False,
return_sequences: bool=True):
batch_input_shape = (batch_size, ) + input_shape
print('Model batch input shape: {0}'.format(batch_input_shape))
model = keras.models.Sequential()
model.add(layers.InputLayer(batch_input_shape=batch_input_shape))
#model.add(layers.BatchNormalization())
#model.add(layers.LSTM(conv_units, stateful=stateful, return_sequences=True, dropout=0.2, recurrent_dropout=0.2, batch_input_shape=batch_input_shape))
#model.add(layers.Reshape((input_shape[0], conv_units)))
model.add(layers.Conv1D(conv_units, kernel_size=3, padding='same', activation='relu'))
for i in range(conv_blocks):
if use_deform:
model.add(ConvDeform1D(conv_units,
kernel_size=2,
kernel_initializer=keras.initializers.RandomNormal(0, 0.001),
dilation_rate=2 ** i))
model.add(layers.Conv1D(conv_units,
kernel_size=2,
padding='valid',
dilation_rate=2**i,
activation='relu'))
if return_sequences:
model.add(layers.TimeDistributed(layers.Dense(1, activation='linear')))
else:
model.add(layers.Flatten())
model.add(layers.Dense(1, activation='linear', use_bias=True))
model.compile(optimizer='adam', loss='mse')
return model
def lstm_states(model: keras.models.Sequential):
lstm_layers = [l for l in model.layers if isinstance(l, keras.layers.LSTM)]
lstm_layer = lstm_layers[-1] # type: keras.layers.LSTM
lstm_layer.states
def main():
path = 'D:\\ydata-labeled-time-series-anomalies-v1_0\\A4Benchmark\\A4Benchmark-TS3.csv'
window = 64
epochs = 100
stop_loss = 0.001
batch_size = 64
is_stateful = True
return_sequences = False
window_stride = 1
use_time_diff = False
train_test_split = 0.6
# Raw time-series
raw_ts = np.genfromtxt(path, delimiter=',', skip_header=1)[:, 1]
data = processing.TimeSeries(raw_ts,
window=window,
window_stride=window_stride,
return_sequences=return_sequences,
train_test_split=train_test_split,
use_time_diff=use_time_diff)
# Create model
model = create_model(input_shape=data.input_shape,
batch_size=batch_size,
stateful=is_stateful,
return_sequences=return_sequences)
model.summary()
describe_deform_layer(model, data.x_train[:batch_size])
model.reset_states()
for epoch in range(epochs):
losses = []
for t, x_batch, y_batch in data.train_samples_generator(batch_size=batch_size, shuffle=True):
loss = model.train_on_batch(x_batch, y_batch)
losses.append(loss)
epoch_loss = np.mean(losses)
print('Epoch {0}/{1} Loss: {2:.4f}'.format(epoch, epochs, epoch_loss))
model.reset_states()
if epoch_loss < stop_loss:
break
describe_deform_layer(model, data.x_train[:batch_size])
y_true = []
y_pred = []
t_all = []
for t, x_batch, y_batch in data.all_samples_generator(batch_size=batch_size):
predicted_part = model.predict_on_batch(x_batch)
y_pred += list(predicted_part)
y_true += list(y_batch)
t_all += list(t)
y_pred = data.inverse_transform_predictions(np.array(t_all), np.array(y_pred))
y_true = data.inverse_transform_predictions(np.array(t_all), np.array(y_true))
plt.plot(y_pred, label='Predicted')
plt.plot(y_true, label='True')
plt.legend()
plt.grid()
plt.show()
# Compute metrics
_, test_y = utils.split(y_true, ratio=train_test_split)
_, test_y_pred = utils.split(y_pred, ratio=train_test_split)
import pprint
metric_results = metrics.evaluate_all(test_y, test_y_pred)
pprint.pprint(metric_results)
if __name__ == '__main__':
main()
|
<filename>cnn/models/resnet_imagenet.py
# modified from https://github.com/fastai/imagenet-fast/blob/master/imagenet_nv/models/resnet.py
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
from .layers import Flatten
from .butterfly_conv import ButterflyConv2d, ButterflyConv2dBBT
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def butterfly3x3(in_planes, planes, stride=1, structure_type='B', nblocks=1,
param='regular'):
if structure_type == 'B':
bfly = ButterflyConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False, tied_weight=False, ortho_init=True, param=param)
elif structure_type == 'BBT':
bfly = ButterflyConv2dBBT(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False, nblocks=nblocks, tied_weight=False, ortho_init=True, param=param)
else:
raise ValueError("Structure type isn't supported.")
return bfly
def butterfly1x1(in_planes, planes, stride=1, structure_type='B', nblocks=1,
param='regular'):
if structure_type == 'B':
bfly = ButterflyConv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False, tied_weight=False, ortho_init=True, param=param)
elif structure_type == 'BBT':
bfly = ButterflyConv2dBBT(in_planes, planes, kernel_size=1, stride=stride, bias=False, nblocks=nblocks, tied_weight=False, ortho_init=True, param=param)
else:
raise ValueError("Structure type isn't supported.")
return bfly
def bn1(planes):
m = nn.BatchNorm1d(planes)
m.weight.data.fill_(1)
m.bias.data.zero_()
return m
def bn(planes, init_zero=False):
m = nn.BatchNorm2d(planes)
m.weight.data.fill_(0 if init_zero else 1)
m.bias.data.zero_()
return m
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, is_structured=False, structure_type='B', nblocks=1,
param='regular'):
super().__init__()
if is_structured:
self.conv1 = butterfly3x3(inplanes, planes, stride=stride, structure_type=structure_type,
nblocks=nblocks, param=param)
else:
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = bn(planes)
self.relu = nn.ReLU(inplace=True)
if is_structured:
self.conv2 = butterfly3x3(planes, planes, structure_type=structure_type,
nblocks=nblocks, param=param)
else:
self.conv2 = conv3x3(planes, planes)
self.bn2 = bn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
if self.downsample is not None: residual = self.downsample(x)
out = self.conv1(x)
out = self.relu(out)
out = self.bn1(out)
out = self.conv2(out)
out += residual
out = self.relu(out)
out = self.bn2(out)
return out
class BottleneckFinal(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = bn(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = bn(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = bn(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
if self.downsample is not None: residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out += residual
out = self.bn3(out)
out = self.relu(out)
return out
class BottleneckZero(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = bn(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = bn(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = bn(planes * 4, init_zero=True)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
if self.downsample is not None: residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = bn(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = bn(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = bn(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
if self.downsample is not None: residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, k=1, vgg_head=False,
num_structured_layers=0, structure_type='B', nblocks=1, param='regular'):
assert num_structured_layers <= 4
assert structure_type in ['B', 'BBT', 'BBTBBT']
super().__init__()
self.is_structured = [False] * (4 - num_structured_layers) + [True] * num_structured_layers
self.inplanes = 64
features = [nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
, bn(64) , nn.ReLU(inplace=True) , nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
, self._make_layer(block, int(64*k), layers[0], is_structured=self.is_structured[0],
structure_type=structure_type, nblocks=nblocks, param=param)
, self._make_layer(block, int(128*k), layers[1], stride=2, is_structured=self.is_structured[1],
structure_type=structure_type, nblocks=nblocks, param=param)
# Only stacking butterflies in the 3rd layer for now
, self._make_layer(block, int(256*k), layers[2], stride=2, is_structured=self.is_structured[2],
structure_type=structure_type, nblocks=nblocks, param=param)
, self._make_layer(block, int(512*k), layers[3], stride=2, is_structured=self.is_structured[3],
structure_type=structure_type, nblocks=nblocks, param=param)]
out_sz = int(512*k) * block.expansion
if vgg_head:
features += [nn.AdaptiveAvgPool2d(3), Flatten()
, nn.Linear(out_sz*3*3, 4096), nn.ReLU(inplace=True), bn1(4096), nn.Dropout(0.25)
, nn.Linear(4096, 4096), nn.ReLU(inplace=True), bn1(4096), nn.Dropout(0.25)
, nn.Linear(4096, num_classes)]
else: features += [nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(out_sz, num_classes)]
self.features = nn.Sequential(*features)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
def _make_layer(self, block, planes, blocks, stride=1, is_structured=False,
structure_type='B', nblocks=1, param='regular'):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if is_structured:
downsample = nn.Sequential(
butterfly1x1(self.inplanes, planes * block.expansion, stride=stride, structure_type=structure_type,
nblocks=nblocks, param=param),
bn(planes * block.expansion),
)
else:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
bn(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample,
is_structured=is_structured, structure_type=structure_type,
nblocks=nblocks, param=param))
self.inplanes = planes * block.expansion
for i in range(1, blocks): layers.append(block(self.inplanes, planes,
is_structured=is_structured, structure_type=structure_type,
nblocks=nblocks, param=param))
return nn.Sequential(*layers)
def forward(self, x): return self.features(x)
# resnet50 does not support currently support structure
# def resnet50(**kwargs):
# raise ValueError('resnet50
# model = ResNet(Bottleneck, [3, 4, 6, 3])
# return model
def resnet18(num_structured_layers=0, structure_type='B', nblocks=1, param='regular'):
model = ResNet(BasicBlock, [2, 2, 2, 2], num_structured_layers=num_structured_layers,
structure_type=structure_type, nblocks=nblocks, param=param)
return model
|
<gh_stars>1-10
from django.db import models
from django import forms
from django.utils.translation import gettext_lazy as _
from wagtail.admin.edit_handlers import TabbedInterface, ObjectList
from wagtail.admin.edit_handlers import FieldPanel, MultiFieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.contrib.settings.models import BaseSetting, register_setting
from wagtail.images import get_image_model_string
@register_setting(icon='edit')
class Branding(BaseSetting):
logo = models.ForeignKey(
get_image_model_string(),
null=True, blank=True, on_delete=models.SET_NULL, related_name='logo',
verbose_name='Logo',
help_text=_("Brand logo per tutto il sito")
)
favicon = models.ForeignKey(
get_image_model_string(),
null=True, blank=True, on_delete=models.SET_NULL, related_name='favicon',
verbose_name='Favicon'
)
panels = [
ImageChooserPanel("logo"),
ImageChooserPanel("favicon"),
]
class Meta:
verbose_name = "Branding"
verbose_name_plural = "Branding"
@register_setting(icon='link')
class SocialLinks(BaseSetting):
facebook = models.URLField(blank=True, null=True, help_text="Facebook URL")
instagram = models.URLField(blank=True, null=True, help_text="Instagram URL")
linkedin = models.URLField(blank=True, null=True, help_text="Linkedin URL")
twitter = models.URLField(blank=True, null=True, help_text="Twitter URL")
youtube = models.URLField(blank=True, null=True, help_text="YouTube Channel URL")
class Meta:
verbose_name = "Social Links"
verbose_name_plural = "Social Links"
@register_setting(icon='search')
class Analytics(BaseSetting):
google_tag_head = models.TextField("Google Tag Manager <head>", blank=True, null=True, help_text=_("Aggiungi lo script di Google Tag Manager per il tag <head></head>"))
google_tag_body = models.TextField("Google Tag Manager <body>", blank=True, null=True, help_text=_("Aggiungi lo script di Google Tag Manager per il tag <body></body>"))
google_analytics = models.CharField("Google Analytics Traking ID", max_length=20, blank=True, null=True, help_text=_('Il tuo Google Analytics tracking ID (inizia con "UA-")'))
facebook_pixel = models.TextField("Facebook Pixel", blank=True, null=True, help_text=_("Aggiungi lo script di Facebook Pixel"))
panels = [
MultiFieldPanel([
FieldPanel("google_tag_head", widget=forms.Textarea(attrs={'rows': 6})),
FieldPanel("google_tag_body", widget=forms.Textarea(attrs={'rows': 6})),
], heading="Google Tag Manager"),
MultiFieldPanel([
FieldPanel("google_analytics"),
FieldPanel("facebook_pixel", widget=forms.Textarea(attrs={'rows': 5})),
], heading="Analytics")
]
class Meta:
verbose_name = "Analytics"
verbose_name_plural = "Analytics"
# @register_setting
# class WebsiteSettings(BaseSetting):
# """Website settings for our custom website."""
# # Social Links
# facebook = models.URLField(blank=True, null=True, help_text="Facebook URL")
# instagram = models.URLField(blank=True, null=True, help_text="Instagram URL")
# linkedin = models.URLField(blank=True, null=True, help_text="Linkedin URL")
# twitter = models.URLField(blank=True, null=True, help_text="Twitter URL")
# youtube = models.URLField(blank=True, null=True, help_text="YouTube Channel URL")
# # Analytics Scripts
# google_tag_head = models.TextField("Google Tag Manager <head>", blank=True, null=True, help_text=_("Aggiungi lo script di Google Tag Manager per il tag <head></head>"))
# google_tag_body = models.TextField("Google Tag Manager <body>", blank=True, null=True, help_text=_("Aggiungi lo script di Google Tag Manager per il tag <body></body>"))
# google_analytics = models.CharField("Google Analytics Traking ID", max_length=20, blank=True, null=True, help_text=_('Il tuo Google Analytics tracking ID (inizia con "UA-")'))
# facebook_pixel = models.TextField("Facebook Pixel", blank=True, null=True, help_text=_("Aggiungi lo script di Facebook Pixel"))
# # Branding
# logo = models.ForeignKey(
# get_image_model_string(),
# null=True, blank=True, on_delete=models.SET_NULL, related_name='logo',
# verbose_name='Logo',
# help_text=_("Brand logo per tutto il sito")
# )
# favicon = models.ForeignKey(
# get_image_model_string(),
# null=True, blank=True, on_delete=models.SET_NULL, related_name='favicon',
# verbose_name='Favicon'
# )
# # Tab Panels
# brand_tab_panels = [
# MultiFieldPanel([
# ImageChooserPanel("logo"),
# ImageChooserPanel("favicon"),
# ], heading="Branding"),
# ]
# social_tab_panels = [
# MultiFieldPanel([
# FieldPanel("facebook"),
# FieldPanel("instagram"),
# FieldPanel("linkedin"),
# FieldPanel("twitter"),
# FieldPanel("youtube"),
# ], heading="Social media Links")
# ]
# analytics_tab_panels = [
# MultiFieldPanel([
# FieldPanel("google_tag_head", widget=forms.Textarea(attrs={'rows': 6})),
# FieldPanel("google_tag_body", widget=forms.Textarea(attrs={'rows': 6})),
# ], heading="Google Tag Manager"),
# MultiFieldPanel([
# FieldPanel("google_analytics"),
# FieldPanel("facebook_pixel", widget=forms.Textarea(attrs={'rows': 5})),
# ], heading="Analytics")
# ]
# edit_handler = TabbedInterface([
# ObjectList(brand_tab_panels, heading='Branding'),
# ObjectList(social_tab_panels, heading='Social Media'),
# ObjectList(analytics_tab_panels, heading='Analytics'),
# ])
# class Meta:
# verbose_name = "Generale"
# verbose_name_plural = "Generale"
|
<gh_stars>1-10
import re
from .common import Void, TokenizerError, SyntaxError
from .location import Location, Source
#################
### TOKENIZER ###
#################
class Token:
def __init__(self, **args):
self.location = None
self.__dict__.update(args)
def __str__(self):
return "Token%s" % self.__dict__
def __repr__(self):
return str(self)
class RegexpMatcher:
def __init__(self, regexp):
self.regexp = regexp
def __call__(self, text, pos, wsb, wsa):
eee
class SubTokenizer:
"""
SubTokenizer(rules) creates a tokenizer from various rules. Each
rule is of the form:
[chrs, regexp, spangroup, skip_ws, description]
chrs: a list of characters that trigger the rule (whitespace skipped); if True
then all characters trigger the rule.
regexp: a regular expression that will extract the token
spangroup: the group number representing the token's "extent"; the length
of the string corresponding to that group, plus the length of any whitespace
skipped before it, will be returned as the number of characters to skip
to get past the token.
skip_ws: boolean; if True, then any whitespace characters will be skipped
description: either a function or a list of integers or strings
if function: called with the regexp's match object and returns a list
of the token's arguments. If "!wsb" or "!wsa" are in the list, they
will be translated as per what follows.
if list: becomes the token's arguments, with the following translations:
function: replaced by the result of calling the function with the
regexp's match object
int: replaced by the string for the corresponding group in the
regexp
str: verbatim
"!wsb": replaced by the whitespace matched before the string (is null
if skip_ws is False
"!wsa": replaced by any whitespace matched *after* the string
Example:
>>> st = SubTokenizer([["abc", re.compile("((a)b)(c*)"), 0, True, ["id", "hello", 1, 2, "!wsa"], None]])
>>> st.read(Source(" abccccc def"), 0)
(Token['id', 'hello', 'a', 'ccccc', ' '], 4)
i.e. a token with arguments "id", "hello", "a" (matching group
1), "ccccc" (matching group 2), " " (the whitespace right
after it). The number 4 corresponds to the length of group 0,
i.e. the group "((a)b)" in the regular expression, plus the
whitespace before the token, so after reading this token we
will have to position ourselves on the first c before reading
the next.
Rules are tried in order. The first to match is returned. If pos
is at the end of the string, [None, 0] is returned. If pos is
*not* at the end of the string, and yet no match is found, an
exception is raised, so the rules should cover all expected
inputs.
Provides the `read(source, pos)` method which, given a Source
object and an integer position, returns a Token beginning at that
position and the number of characters to skip to get past the
token.
"""
def __init__(self, rules, ws_re):
self.ws_re = ws_re
self.ws_cache = (-1, None, 0)
self.rules = rules
self.rulemap = ([[] for i in range(129)],
[[] for i in range(129)])
for rulemap, skip_ws in ((self.rulemap[0], False),
(self.rulemap[1], True)):
for rule in rules:
chars, rxp, rule_skip_ws, descr = rule
if skip_ws == rule_skip_ws:
if chars is True:
for i in range(129):
rulemap[i].append(rule[1:])
else:
for c in chars:
i = min(ord(c), 128)
rulemap[i].append(rule[1:])
def ws(self, text, pos):
cache_pos, cache_text, length = self.ws_cache
if pos == cache_pos and text is cache_text:
return length
ws = self.ws_re.match(text, pos)
s = ws.span()
length = s[1] - s[0]
self.ws_cache = (pos, text, length)
return length
def read(self, source, pos):
"""
source = Source object
it is assumed that pos > 0
"""
text = source.text
if pos >= len(text):
# out of bounds
return [False, 0]
# we compute whitespace before once for all rules
wsb = self.ws(text, pos)
# the first pos past whitespace
pos2 = pos + wsb
# to speed up processing, self.rulemap associates each ASCII
# character to a list of rules that can apply there; there are
# two possible starting points: pos and pos2, depending on
# whether the rule skips whitespace or not
rules = self.rulemap[0][min(ord(text[pos]), 128)]
if pos2 < len(text):
rules = rules + self.rulemap[1][min(ord(text[pos2]), 128)]
for rxp, skip_ws, descr in rules:
match = rxp.match(text, pos2 if skip_ws else pos)
if match:
start, end = match.regs[0]
wsa = self.ws(text, end)
token, endpos = descr(source, match, text[pos:pos2], text[end:end + wsa])
return token, endpos - pos
if pos + wsb >= len(text):
return False, 0
raise TokenizerError['no_token'](
source = source,
pos = pos,
subtokenizer = self)
class Tokenizer:
def __init__(self, source, subtok, initial_state = 'normal'):
self.subtok = subtok
self.source = source
self.mark = 0
self.stack = []
self.st = None
self.push_state(initial_state)
def install_state(self, state):
self.st = self.subtok[state]
def push_state(self, state):
self.stack.append(state)
self.install_state(state)
def pop_state(self):
if len(self.stack) > 1:
self.stack.pop()
self.install_state(self.stack[-1])
def __iter__(self):
while True:
tok, skip = self.st.read(self.source, self.mark)
if skip:
self.mark += skip
if tok:
action = yield tok
if action:
command, *args = action
if command == 'pop':
self.pop_state()
elif command == 'push':
self.push_state(args[0])
else:
raise TokenizerError["unknown_action"](
token = results[-1],
action = action)
else:
return
class TokenizerWrapper:
def __init__(self, tokenizer):
self.tokenizer = tokenizer
self.source = self.tokenizer.source
class GenericTokenizerWrapper(TokenizerWrapper):
def __init__(self, tokenizer, f):
super().__init__(tokenizer)
self.f = f
def __iter__(self):
for x in self.f(self.tokenizer):
yield x
def tokenizer_wrapper(f):
return lambda tokenizer: GenericTokenizerWrapper(tokenizer, f)
class FixityDisambiguator(TokenizerWrapper):
def __init__(self, tokenizer, inherent_fixity, surround_map):
self.buffer = []
self.buffer_pfx = True
self.inherent_fixity = inherent_fixity
self.surround_map = surround_map
super().__init__(tokenizer)
def process_buffer(self, pfx, sfx, start):
n = len(self.buffer) - start
if n == 0:
return
elif pfx and sfx:
for i in range(start, len(self.buffer)):
self.buffer[i].fixity = None
self.buffer[i].type = "nullary"
elif pfx:
for i in range(start, len(self.buffer)):
self.buffer[i].fixity = "prefix"
elif sfx:
for i in range(start, len(self.buffer)):
self.buffer[i].fixity = "suffix"
else:
tok = self.buffer[start]
fixity = self.inherent_fixity(tok)
self.buffer[start].fixity = fixity
self.process_buffer(fixity in ('infix', 'prefix'),
sfx, start + 1)
def __iter__(self):
for tok in iter(self.tokenizer):
fixity = getattr(tok, 'fixity', None)
if fixity == "?fix":
self.buffer.append(tok)
else:
sfx, newpfx = self.surround_map.get(fixity, (False, False))
self.process_buffer(self.buffer_pfx, sfx, 0)
self.buffer.append(tok)
self.buffer_pfx = newpfx
for tok in self.buffer:
yield tok
self.buffer = []
if self.buffer:
self.process_buffer(self.buffer_pfx, True, 0)
for tok in self.buffer:
yield tok
class Alternator(TokenizerWrapper):
def __init__(self, tokenizer, token0, sandwich_void, sandwich_juxt):
self.token0 = token0
self.sandwich_void = sandwich_void
self.sandwich_juxt = sandwich_juxt
super().__init__(tokenizer)
def __iter__(self):
last = self.token0
for current in self.tokenizer:
void = self.sandwich_void(last, current)
ws = self.sandwich_juxt(last, current)
t1 = getattr(last, "fixity", None) or "id"
t2 = getattr(current, "fixity", None) or "id"
t = t1 + "/" + t2
if t in ["id/id"]:
yield ws
elif t in ["prefix/infix",
"infix/infix",
"infix/suffix",
"infix/prefix",
"suffix/infix",
"prefix/prefix",
"prefix/suffix",
"suffix/suffix"]:
yield void
elif t in ["id/prefix"]:
yield ws
yield self.sandwich_void(ws, current)
elif t in ["suffix/id"]:
yield void
yield self.sandwich_juxt(void, current)
elif t in ["suffix/prefix"]:
yield void
ws = self.sandwich_juxt(void, current)
yield ws
yield self.sandwich_void(ws, current)
yield current
last = current
if last and (last is self.token0 or last.type == 'operator'):
yield self.sandwich_void(last, None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.