code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 1 08:44:45 2018
@author: merit
"""
import pandas as pd
import cx_Oracle
import numpy as np
import pickle
from sklearn.cluster import KMeans
import json
import time
class DataModel:
def getDataMap(self, config):
dataMap = {}
for i in range(len(config)):
user = config[i]['user']
pwd = config[i]['password']
host = config[i]['host']
port = config[i]['port']
db = config[i]['database']
url = host + ':' + port + '/' + db
conn = cx_Oracle.connect(user, pwd, url)
for j in range(len(config[i]['tables'])):
key = config[i]['tables'][j]
value = pd.read_sql('select * from ' + key, conn)
if len(value)>0:
dataMap[key] = value
print('读库数据结束...')
return dataMap
def contain_chinese(self, check_str):
for ch in check_str:
if u'\u4e00' <= ch <= u'\u9fff':
return True
return False
def getType(self, value):
if ('int' in str(type(value))):
return 0
elif ('float' in str(type(value))):
return 1
elif ('str' in str(type(value))):
return 2
elif ('None' in str(type(value))):
return 3
elif ('LOB' in str(type(value)).upper()):
return 4
else:
return 5
# 获取最大公共子序列相似度
def getLCS(self, s1, s2):
# reload(sys)
# sys.setdefaultencoding('utf-8')
len1 = len(s1)
len2 = len(s2)
arr = np.zeros([len2 + 1, len1 + 1])
for p in range(1, len2 + 1):
lineUnit = s2[p - 1]
for q in range(1, len1 + 1):
leftValue = arr[p, q - 1]
topValue = arr[p - 1, q]
cornerValue = arr[p - 1, q - 1]
if lineUnit == s1[q - 1]:
cornerValue += 1
arr[p, q] = np.max([leftValue, topValue, cornerValue])
commonLen = arr[len2, len1]
sim = commonLen / min([len1, len2])
return sim
def getValueStats(self, df, tableName, colName, size, colsMap):
data = []
allCols = df.columns.tolist()
if (size > 20):
data = df.sample(n=20)[colName].tolist()
else:
data = df[colName].tolist()
colTypes = [self.getType(e) for e in data]
colType = 3
for tp in colTypes:
if (tp != 3):
colType = tp
lens = []
meanLen = 0
if (colType == 4):
meanLen = 100
else:
lens = [len(str(elem)) for elem in data]
if(len(lens)>0):
meanLen = np.mean(lens)
flag = 0
if (colType == 4):
flag = 1
else:
for value in data:
s = str(value)
if (self.contain_chinese(s)):
flag = 1
ind = allCols.index(colName)
pos = 1
if (ind > 0):
pos = 0
interval = 0
colNum = len(allCols)
if (colNum * 1.0 / 3 <= ind and ind < colNum * 2.0 / 3):
interval = 1
elif (ind >= colNum * 2.0 / 3):
interval = 2
withId = 0
withNo = 0
if (colType != 4):
if (colName.upper().startswith('ID') or colName.upper().endswith('ID')):
withId = 1
if (colName.upper().startswith('NO') or colName.upper().endswith('NO')):
withNo = 1
sim = self.getLCS(tableName, colName)
freq = 0
for key in colsMap:
curList = colsMap[key]
for col in curList:
if (col == colName):
freq += 1
alone = 1
if (freq > 1):
alone = 0
otherSims = []
for key in colsMap:
if (key != tableName):
curSim = self.getLCS(key, colName)
otherSims.append(curSim)
otherMaxSim = np.max(otherSims)
noneCount = 0
manyNone = 0
hasNone = 0
for v in data[0:10]:
if (v == None):
noneCount += 1
if (noneCount > 7):
manyNone = 1
if (noneCount > 0):
hasNone = 1
setLen = len(set(data))
dataLen = len(data)
setRatio = setLen / dataLen
return [tableName, colName, meanLen, flag, colType, pos, withId, withNo, \
sim, freq, alone, interval, otherMaxSim, manyNone, hasNone, setRatio]
def getPredictFeatureDf(self, dataMap):
colsMap = {}
for table in dataMap:
cols = dataMap[table].columns.tolist()
colsMap[table] = cols
predData = []
for tableName in dataMap:
curDf = dataMap[tableName]
allCols = curDf.columns.tolist()
curLen = len(curDf)
for colName in allCols:
curFeas = self.getValueStats(curDf, tableName, colName, curLen, colsMap)
predData.append(curFeas)
predDf = pd.DataFrame(predData, columns= \
['tableName', 'colName', 'valueLen', 'containChn', 'colType', 'isFirstCol', 'withId', 'withNo',
'similarity', \
'colFreq', 'isSingleCol', 'colPosGroup', 'otherMaxSim', 'manyNone', 'hasNone', 'setRatio'])
return predDf
def predict(self, modelPath, predDf):
# newModel = pickle.load(open(modelPath+'\\rf.pkl', 'rb'))
newModel = pickle.load(open(modelPath, 'rb'))
allCols = predDf.columns.tolist()
features = allCols[2:len(allCols)]
X = np.array(predDf[features])
preds = newModel.predict(X)
predDf['prediction'] = preds
newDf = predDf[predDf['prediction'] != 2]
func = lambda x: ('PK' if (x == 0) else 'FK')
newDf.loc[:, 'keyType'] = newDf.loc[:, 'prediction'].apply(func)
resDf = newDf[['tableName', 'colName', 'keyType']]
return resDf
def conv_to_json(self, dic, b_name, config):
########输入是dataframe
# table1,table2,id1,id2
data = dic
temp = {}
tab = {}
tables = []
rela = {}
relationship = []
for i in range(len(b_name)):
tab = {}
tab["id"] = i + 1
tab["tablename"] = b_name[i]
for j in range(len(config)):
if b_name[i] in config[j]['tables']:
tab['dataSourceId'] = config[j]['dataSourceId']
tables.append(tab)
for i in data:
value = data[i]
tab = {}
rela = {}
i = str(i)
i = i.lstrip("('").rstrip("')")
i = i.split(",")
i[0] = i[0].rstrip("'")
i[1] = i[1].lstrip(" '")
rela["sourceCol"] = i[0] + '.' + value[0]
rela["targetCol"] = i[1] + '.' + value[1]
relationship.append(rela)
temp["tables"] = tables
temp["relationships"] = relationship
return temp
def is_relationship1(self, a, b):
aa = a.drop_duplicates().reset_index()
del aa['index']
bb = b.drop_duplicates().reset_index()
del bb['index']
m = len(set(aa.iloc[:, 0]) & set(bb.iloc[:, 0]))
return (round(m / min(len(aa), len(bb)), 2))
def tz(self, x):
for i in range(len(x)):
x_ = x.loc[i, 'x']
x_type = type(x_) # 类型
if 'str' in str(x_type):
x1 = 1 ##字符型
elif 'float' in str(x_type):
x1 = 2 ##浮点型
if (x_ - int(x_)) == 0:
x_ = int(x_)
x1 = 3
elif 'int' in str(x_type):
x1 = 3 ##整型
elif 'LOB' in str(x_type):
x1 = 4 ##整型
else:
x1 = 0
x2 = len(str(x_)) # 长度
if x1 != 1:
x_ = str(x_)
x4 = 0
x3 = 0
x6 = 0
for ch in x_:
if '0' <= ch <= '9':
x4 += 1
if 'a' <= ch <= 'z' or 'A' <= ch <= 'Z':
x3 += 1
if '\u4e00' <= ch <= '\u9fa5':
x6 += 1
x5 = x2 - x3 - x4 - x6
x.loc[i, 'type'] = x1
x.loc[i, 'len'] = x2
x.loc[i, 'num'] = x4
x.loc[i, 'xyz'] = x3
x.loc[i, 'other'] = x5
x.loc[i, 'hanzi'] = x6
return x
def model(self, dataMap, key, tableNames, config):
key = key.reset_index()
del key['index']
x = pd.DataFrame() # len(key)
for i in range(len(key)):
# print(i)
table = dataMap[key.loc[i, 'tableName']][key.loc[i, 'colName']]
sam = min(100, len(table))
t = pd.DataFrame(table.sample(sam))
# t=pd.read_sql('select t.'+key.loc[i,'colName']+' from '+ key.loc[i,'tableName']+' SAMPLE (50) t where rownum <=50',conn)
t.columns = ['x']
t = t.dropna()
if len(t) == 0:
t = pd.DataFrame(table)
t.columns = ['x']
t = t.dropna()
if len(t) > 0:
t['tableName'] = key.loc[i, 'tableName']
t['colName'] = key.loc[i, 'colName']
t = t.reset_index()
del t['index']
self.tz(t)
x = x.append(t)
x = x.reset_index()
del x['index']
clf = KMeans(n_clusters=6)
y_pred = clf.fit_predict(x.iloc[:, 3:])
for i in range(len(x)):
x.loc[i, 'kmeans'] = y_pred[i]
# x_show=x.copy()
del x['x']
x = x.drop_duplicates().reset_index()
del x['index']
y = x.iloc[:, :2]
y['kmeans'] = x['kmeans']
y = y.drop_duplicates().reset_index()
del y['index']
model = pd.DataFrame()
cluser = y['kmeans']
cluser = cluser.drop_duplicates().reset_index()
del cluser['index']
for i in range(len(cluser)):
cluser_i = y[y['kmeans'] == cluser.loc[i, 'kmeans']].reset_index()
del cluser_i['index']
for j in range(len(cluser_i)):
yw = key[key['tableName'] == cluser_i.loc[j, 'tableName']][
key[key['tableName'] == cluser_i.loc[j, 'tableName']]['colName'] == cluser_i.loc[j, 'colName']][
'keyType'].reset_index()
cluser_i.loc[j, 'keyType'] = yw.loc[0, 'keyType']
key_type = cluser_i['keyType'].drop_duplicates().reset_index()
if len(key_type) > 1:
p_key = cluser_i[cluser_i['keyType'] == 'PK'].reset_index()
del p_key['index']
f_key = cluser_i[cluser_i['keyType'] == 'FK'].reset_index()
del f_key['index']
for k in range(len(p_key)):
a = dataMap[p_key.loc[k, 'tableName']][p_key.loc[k, 'colName']]
for h in range(len(f_key)):
if p_key.loc[k, 'tableName'] != f_key.loc[h, 'tableName']:
b = dataMap[f_key.loc[h, 'tableName']][f_key.loc[h, 'colName']]
rela = self.is_relationship1(a, b)
if rela > 0.65:
re = pd.DataFrame()
re.loc[0, 'table1'] = p_key.loc[k, 'tableName']
re.loc[0, 'table2'] = f_key.loc[h, 'tableName']
re.loc[0, 'id1'] = p_key.loc[k, 'colName']
re.loc[0, 'id2'] = f_key.loc[h, 'colName']
model = model.append(re)
print(time.localtime(time.time()))
if len(model) > 0:
model = model.drop_duplicates()
# model.to_csv('F:\\2018\\2月\\2.26\\model3.csv')
model = model.reset_index()
del model['index']
model_result = {}
for i in range(len(model)):
model_result[model.loc[i, 'table1'], model.loc[i, 'table2']] = (model.loc[i, 'id1'], model.loc[i, 'id2'], 1)
model_result = self.conv_to_json(model_result, tableNames, config)
return model_result
def runProcess(self, dbConfigMap, tableNames, modelPath):
dataMap = self.getDataMap(dbConfigMap)
predDf = self.getPredictFeatureDf(dataMap)
resDf = self.predict(modelPath, predDf)
jsObj = json.dumps(self.model(dataMap, resDf, tableNames, dbConfigMap))
#res = eval(jsObj)
return jsObj
'''
configMap = {}
configMap['user'] = 'C##meritdata'
configMap['password'] = '<PASSWORD>'
configMap['host'] = '192.168.3.11'
configMap['port'] = '1521'
configMap['database'] = 'orcl'
configMap['tables'] = ['ARC_S_APP_VAT', 'ARC_S_DESIGN_EXAMINE', 'ARC_S_PRC_TACTIC_SCHEME', 'ARC_S_PSP_DESIGN_VERI', 'ARC_S_PSP_DESIGN', 'ARC_S_PS_SCHEME', 'ARC_S_PRJ_INSPECT', 'ARC_S_PS_SCHEME_DRAWING', 'ARC_S_PSP_CONSTRUCT', 'ARC_S_APP_PAYMENT_RELA', 'ARC_S_MI_RELA_SCHEME', 'ARC_S_APP', 'ARC_S_APP_DATA', 'ARC_S_APP_REPLY', 'ARC_S_BILL_RELA_SCHEME', 'ARC_S_CONSPRC_SCHEME', 'ARC_S_INVESTIGATE', 'ARC_S_METER_SCHEME', 'ARC_S_SND_CIRCUIT_SCHEME', 'ARC_S_MID_CHECK', 'ARC_S_IT_SCHEME']
configMap['dataSourceId']=5
configMap2 = {}
configMap2['user'] = 'C##meritdata'
configMap2['password'] = '<PASSWORD>'
configMap2['host'] = '192.168.3.11'
configMap2['port'] = '1521'
configMap2['database'] = 'orcl'
configMap2['tables'] = ['ARC_S_PRJ_ACCEPT', 'ARC_S_APP_CERT', 'ARC_S_APP_BANK_ACCT', 'ARC_S_ELEC_DEV_SCHEME', 'ARC_S_MP_METER_RELA_SCHM', 'ARC_S_APP_NATURAL_INFO', 'ARC_S_PS_CHG_SCHEME', 'ARC_S_APP_ELEC_ADDR', 'ARC_S_MP_SCHEME', 'ARC_S_APP_CONTACT', 'ARC_S_APP_ACCT']
configMap2['dataSourceId']=6
tableNames= ['ARC_S_APP_VAT', 'ARC_S_DESIGN_EXAMINE', 'ARC_S_PRC_TACTIC_SCHEME', 'ARC_S_PSP_DESIGN_VERI', 'ARC_S_PSP_DESIGN', 'ARC_S_PS_SCHEME', 'ARC_S_PRJ_INSPECT', 'ARC_S_PS_SCHEME_DRAWING', 'ARC_S_PSP_CONSTRUCT', 'ARC_S_APP_PAYMENT_RELA', 'ARC_S_MI_RELA_SCHEME', 'ARC_S_APP', 'ARC_S_APP_DATA', 'ARC_S_APP_REPLY', 'ARC_S_BILL_RELA_SCHEME', 'ARC_S_CONSPRC_SCHEME', 'ARC_S_INVESTIGATE', 'ARC_S_METER_SCHEME', 'ARC_S_SND_CIRCUIT_SCHEME', 'ARC_S_MID_CHECK', 'ARC_S_IT_SCHEME', 'ARC_S_PRJ_ACCEPT', 'ARC_S_APP_CERT', 'ARC_S_APP_BANK_ACCT', 'ARC_S_ELEC_DEV_SCHEME', 'ARC_S_MP_METER_RELA_SCHM', 'ARC_S_APP_NATURAL_INFO', 'ARC_S_PS_CHG_SCHEME', 'ARC_S_APP_ELEC_ADDR', 'ARC_S_MP_SCHEME', 'ARC_S_APP_CONTACT', 'ARC_S_APP_ACCT']
config=[configMap,configMap2]
#print(time.localtime(time.time()))
#configMap = {}
#configMap['user'] = 'portaldb'
#configMap['password'] = '<PASSWORD>'
#configMap['host'] = '172.16.17.32'
#configMap['port'] = '1521'
#configMap['database'] = 'orcl'
#b_name=['G_SUBS','G_LINE','T_RZ_BG_SUM_DX']
modelPath = 'F:\\2018\\2月\\2.26'
data_model = DataModel()
print('**************************************')
myJson = data_model.runProcess(config, tableNames, modelPath)
print(myJson)
'''
# =====================================================
# print(time.localtime(time.time()))
# configMap = {}
# configMap['user'] = 'caiwu'
# configMap['password'] = '<PASSWORD>'
# configMap['host'] = '192.168.3.11'
# configMap['port'] = '1521'
# configMap['database'] = 'orcl'
# b_name=['TRDATADETAIL2407','TRDATADETAIL2418','TRDATADETAIL3878','TRDATADETAIL4931','TRDATADETAIL4934','TRDATADETAIL5060','TRDATADETAIL5061','TRDATADETAIL5063','T_RZ_BG_SUM_DX','T_ZJ_BALCHANGE','T_ZJ_BALCHANGEMX','XT10031YWBILL','XT167YWBILL',]
# modelPath = 'F:\\2018\\2月\\2.26'
#
# data_model = DataModel()
# print('**************************************')
# myJson = data_model.runProcess(configMap, b_name, modelPath)
# print(myJson)
# configMap = {}
# configMap['user'] = 'yangwen2'
# configMap['password'] = '<PASSWORD>'
# configMap['host'] = '192.168.3.11'
# configMap['port'] = '1521'
# configMap['database'] = 'orcl'
# configMap['tables'] = ['ARC_S_APP', 'ARC_S_APP_ACCT']
#
# configMap2 = {}
# configMap2['user'] = 'yangwen2'
# configMap2['password'] = '<PASSWORD>'
# configMap2['host'] = '192.168.3.11'
# configMap2['port'] = '1521'
# configMap2['database'] = 'orcl'
# configMap2['tables'] = ['ARC_S_APP_CUST_ADDR', 'ARC_S_APP_DATA', 'ARC_S_APP_ELEC_ADDR', 'ARC_S_APP_NATURAL_INFO']
#
# tableNames = ['ARC_S_APP', 'ARC_S_APP_ACCT', 'ARC_S_APP_CUST_ADDR', 'ARC_S_APP_DATA', 'ARC_S_APP_ELEC_ADDR',
# 'ARC_S_APP_NATURAL_INFO']
#
# config = [configMap, configMap2]
# print(time.localtime(time.time()))
# configMap = {}
# configMap['user'] = 'portaldb'
# configMap['password'] = '<PASSWORD>'
# configMap['host'] = '172.16.17.32'
# configMap['port'] = '1521'
# configMap['database'] = 'orcl'
# b_name=['G_SUBS','G_LINE','T_RZ_BG_SUM_DX']
# modelPath = 'F:\\2018\\2月\\2.26'
#
# data_model = DataModel()
# print('**************************************')
# myJson = data_model.runProcess(config, tableNames, modelPath)
# print(myJson)
| [
"sklearn.cluster.KMeans",
"numpy.mean",
"cx_Oracle.connect",
"numpy.max",
"numpy.array",
"numpy.zeros",
"pandas.DataFrame",
"pandas.read_sql",
"time.time"
] | [((1637, 1667), 'numpy.zeros', 'np.zeros', (['[len2 + 1, len1 + 1]'], {}), '([len2 + 1, len1 + 1])\n', (1645, 1667), True, 'import numpy as np\n'), ((4102, 4119), 'numpy.max', 'np.max', (['otherSims'], {}), '(otherSims)\n', (4108, 4119), True, 'import numpy as np\n'), ((5179, 5423), 'pandas.DataFrame', 'pd.DataFrame', (['predData'], {'columns': "['tableName', 'colName', 'valueLen', 'containChn', 'colType', 'isFirstCol',\n 'withId', 'withNo', 'similarity', 'colFreq', 'isSingleCol',\n 'colPosGroup', 'otherMaxSim', 'manyNone', 'hasNone', 'setRatio']"}), "(predData, columns=['tableName', 'colName', 'valueLen',\n 'containChn', 'colType', 'isFirstCol', 'withId', 'withNo', 'similarity',\n 'colFreq', 'isSingleCol', 'colPosGroup', 'otherMaxSim', 'manyNone',\n 'hasNone', 'setRatio'])\n", (5191, 5423), True, 'import pandas as pd\n'), ((5737, 5763), 'numpy.array', 'np.array', (['predDf[features]'], {}), '(predDf[features])\n', (5745, 5763), True, 'import numpy as np\n'), ((8756, 8770), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8768, 8770), True, 'import pandas as pd\n'), ((9656, 9676), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(6)'}), '(n_clusters=6)\n', (9662, 9676), False, 'from sklearn.cluster import KMeans\n'), ((10059, 10073), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10071, 10073), True, 'import pandas as pd\n'), ((582, 615), 'cx_Oracle.connect', 'cx_Oracle.connect', (['user', 'pwd', 'url'], {}), '(user, pwd, url)\n', (599, 615), False, 'import cx_Oracle\n'), ((740, 781), 'pandas.read_sql', 'pd.read_sql', (["('select * from ' + key)", 'conn'], {}), "('select * from ' + key, conn)\n", (751, 781), True, 'import pandas as pd\n'), ((2019, 2061), 'numpy.max', 'np.max', (['[leftValue, topValue, cornerValue]'], {}), '([leftValue, topValue, cornerValue])\n', (2025, 2061), True, 'import numpy as np\n'), ((2786, 2799), 'numpy.mean', 'np.mean', (['lens'], {}), '(lens)\n', (2793, 2799), True, 'import numpy as np\n'), ((9243, 9262), 'pandas.DataFrame', 'pd.DataFrame', (['table'], {}), '(table)\n', (9255, 9262), True, 'import pandas as pd\n'), ((11925, 11936), 'time.time', 'time.time', ([], {}), '()\n', (11934, 11936), False, 'import time\n'), ((11514, 11528), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11526, 11528), True, 'import pandas as pd\n')] |
from pendulum.models import *
from pendulum.models import _format_accelerations
import numpy as np
import pytest
@pytest.mark.parametrize("input, exp_output", [
((0, 0), (0, 0)), # Stable equilibrium
((np.pi, 0), (0, 0)) # Unstable equilibrium
])
def test_dpendulum(input, exp_output):
''' Test the equilibrium solutions
'''
tol = 1e-8
df = dpendulum(input)
assert(df == pytest.approx(exp_output, tol)), \
'pendulum is not behaving as expected'
def test_damped_pendulum():
''' Test the long-term solution of a damped pendulum
'''
tol = 1e-8
## Set the pendulum
yinit = (0, 1)
d = 2 # Damping
ts = np.linspace(0, 100, 100)
sol = pendulum(yinit, ts, d = d)
last_theta = sol[-1, 0]
last_w = sol[-1, 1]
assert(last_theta == pytest.approx(0.0, tol))
assert(last_w == pytest.approx(0.0, tol))
def test_undamped_pendulum():
''' Test the long-term solution of a damped pendulum
'''
tol = 1e-8
## Set the problem
ts = np.linspace(0, 100, 100) # Simulation time
yinit = (0, 1) # Initial condition (th_0, w_0)
## Solve it
sol = pendulum(yinit, ts)
last_theta = sol[-1, 0]
last_w = sol[-1, 1]
assert(last_theta != pytest.approx(0.0, tol))
assert(last_w != pytest.approx(0.0, tol))
def test_freefall_pendulum():
''' Check the solution for a free-falling non-inertial pendulum
'''
tol = 1e-4
## Set-up your problem
g = 9.8 # Acceleration of gravity
pos_x = lambda t : 0.0*t # Pivot's position
pos_y = lambda t : -g/2*t**2 # Free falling
ts = np.linspace(0, 10, 1000) # Simulation time
yinit = (np.pi/2, 0) # Initial condition (th_0, w_0)
## Solve it
sol = pendulum(yinit, ts, pos_x, pos_y, g = g)
## No relative movement is expected
assert(sol[-1, 0] == pytest.approx(yinit[0], tol))
# Repeat test in acceleration mode
acc_x = lambda t: 0.0*t # Pivot's acceleration
acc_y = lambda t: 0.0*t - g
sol_2 = pendulum(yinit, ts, acc_x, acc_y, is_acceleration = True, g = g)
## No relative movement is expected
assert(sol_2[-1, 0] == pytest.approx(yinit[0], tol))
def test_noninertial_pendulum_no_acceleration():
''' Tests the non inertial pendulum with no acceleration
'''
ts = np.linspace(0, 10, 1000) # Simulation time
yinit = (0, 0) # Initial condition (th_0, w_0)
forc_x = lambda t : 1.0*t # Uniform speed
forc_y = lambda t : 2.0*t
# The dynamics should be the same by virtue of Galileo's relativity
f_inertial = lambda state, t : dpendulum(state, t)
f_non_intertial = lambda state, t : dpendulum(state, t, forc_x, forc_y)
assert(f_inertial(yinit, 0.0) == f_non_intertial(yinit, 0.0))
def test_noninertial_pendulum():
''' Tests the non inertial pendulum with acceleration
'''
ts = np.linspace(0, 10, 1000) # Simulation time
yinit = (0, 0) # Initial condition (th_0, w_0)
forc_x = lambda t : 1.0*t**2 # Accelerated movement
forc_y = lambda t : 2.0*t
# The dynamics should be different
f_inertial = lambda state, t : dpendulum(state, t)
f_non_intertial = lambda state, t : dpendulum(state, t, forc_x, forc_y)
assert(f_inertial(yinit, 0.0) != f_non_intertial(yinit, 0.0))
@pytest.mark.parametrize("input, exp_output", [
((0, 0, 0, 0), (0, 0, 0, 0)), # Stable equilibrium
((np.pi, 0, 0, 0), (0, 0, 0, 0)), # Unstable equilibria
((0, 0, np.pi, 0), (0, 0, 0, 0)),
((np.pi, 0, np.pi, 0), (0, 0, 0, 0))
])
def test_ddouble_pendulum(input, exp_output):
''' Test the equilibrium solutions
'''
tol = 1e-8
df = ddouble_pendulum(input, 0)
assert(df == pytest.approx(exp_output, tol)), \
'pendulum is not behaving as expected'
def test_ni_double_pendulum_no_acceleration():
'''Tests the non-inertial double pendulum with no acceleration
'''
ts = np.linspace(0, 10, 1000) # Simulation time
yinit = (0, 0, 0, 0) # Initial condition (th_0, w_0, th_1, w_1)
forc_x = lambda t : 1.0*t # Uniform speed
forc_y = lambda t : 2.0*t
# The dynamics should be the same by virtue of Galileo's relativity principle
f_inertial = lambda state, t : ddouble_pendulum(state, t)
f_non_intertial = lambda state, t : ddouble_pendulum(state, t, forc_x, forc_y)
assert(f_inertial(yinit, 0.0) == f_non_intertial(yinit, 0.0))
def test_freefall_double_pendulum():
''' Check the solution for a free-falling non-inertial pendulum
'''
tol = 1e-4
## Set-up your problem
g = 9.8 # Acceleration of gravity
pos_x = lambda t : 0.0*t # Pivot's position
pos_y = lambda t : -g/2*t**2 # Free falling
ts = np.linspace(0, 10, 1000) # Simulation time
yinit = (np.pi/2, 0, np.pi/2, 0) # Initial condition (th_0, w_0)
## Solve it
sol = double_pendulum(yinit, ts, pos_x, pos_y, g = g)
## No relative movement is expected
assert(sol[-1, 0] == pytest.approx(yinit[0], tol))
# Repeat test in acceleration mode
acc_x = lambda t: 0.0*t # Pivot's acceleration
acc_y = lambda t: 0.0*t - g
sol_2 = double_pendulum(yinit, ts, acc_x, acc_y, is_acceleration = True, g = g)
## No relative movement is expected
assert(sol_2[-1, 0] == pytest.approx(yinit[0], tol))
| [
"pytest.approx",
"pytest.mark.parametrize",
"numpy.linspace"
] | [((115, 205), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input, exp_output"""', '[((0, 0), (0, 0)), ((np.pi, 0), (0, 0))]'], {}), "('input, exp_output', [((0, 0), (0, 0)), ((np.pi, 0),\n (0, 0))])\n", (138, 205), False, 'import pytest\n'), ((3269, 3460), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input, exp_output"""', '[((0, 0, 0, 0), (0, 0, 0, 0)), ((np.pi, 0, 0, 0), (0, 0, 0, 0)), ((0, 0, np\n .pi, 0), (0, 0, 0, 0)), ((np.pi, 0, np.pi, 0), (0, 0, 0, 0))]'], {}), "('input, exp_output', [((0, 0, 0, 0), (0, 0, 0, 0)),\n ((np.pi, 0, 0, 0), (0, 0, 0, 0)), ((0, 0, np.pi, 0), (0, 0, 0, 0)), ((\n np.pi, 0, np.pi, 0), (0, 0, 0, 0))])\n", (3292, 3460), False, 'import pytest\n'), ((667, 691), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(100)'], {}), '(0, 100, 100)\n', (678, 691), True, 'import numpy as np\n'), ((1023, 1047), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(100)'], {}), '(0, 100, 100)\n', (1034, 1047), True, 'import numpy as np\n'), ((1608, 1632), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(1000)'], {}), '(0, 10, 1000)\n', (1619, 1632), True, 'import numpy as np\n'), ((2299, 2323), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(1000)'], {}), '(0, 10, 1000)\n', (2310, 2323), True, 'import numpy as np\n'), ((2849, 2873), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(1000)'], {}), '(0, 10, 1000)\n', (2860, 2873), True, 'import numpy as np\n'), ((3891, 3915), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(1000)'], {}), '(0, 10, 1000)\n', (3902, 3915), True, 'import numpy as np\n'), ((4674, 4698), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(1000)'], {}), '(0, 10, 1000)\n', (4685, 4698), True, 'import numpy as np\n'), ((403, 433), 'pytest.approx', 'pytest.approx', (['exp_output', 'tol'], {}), '(exp_output, tol)\n', (416, 433), False, 'import pytest\n'), ((808, 831), 'pytest.approx', 'pytest.approx', (['(0.0)', 'tol'], {}), '(0.0, tol)\n', (821, 831), False, 'import pytest\n'), ((854, 877), 'pytest.approx', 'pytest.approx', (['(0.0)', 'tol'], {}), '(0.0, tol)\n', (867, 877), False, 'import pytest\n'), ((1243, 1266), 'pytest.approx', 'pytest.approx', (['(0.0)', 'tol'], {}), '(0.0, tol)\n', (1256, 1266), False, 'import pytest\n'), ((1289, 1312), 'pytest.approx', 'pytest.approx', (['(0.0)', 'tol'], {}), '(0.0, tol)\n', (1302, 1312), False, 'import pytest\n'), ((1842, 1870), 'pytest.approx', 'pytest.approx', (['yinit[0]', 'tol'], {}), '(yinit[0], tol)\n', (1855, 1870), False, 'import pytest\n'), ((2141, 2169), 'pytest.approx', 'pytest.approx', (['yinit[0]', 'tol'], {}), '(yinit[0], tol)\n', (2154, 2169), False, 'import pytest\n'), ((3677, 3707), 'pytest.approx', 'pytest.approx', (['exp_output', 'tol'], {}), '(exp_output, tol)\n', (3690, 3707), False, 'import pytest\n'), ((4927, 4955), 'pytest.approx', 'pytest.approx', (['yinit[0]', 'tol'], {}), '(yinit[0], tol)\n', (4940, 4955), False, 'import pytest\n'), ((5233, 5261), 'pytest.approx', 'pytest.approx', (['yinit[0]', 'tol'], {}), '(yinit[0], tol)\n', (5246, 5261), False, 'import pytest\n')] |
import numpy as np
from scipy.integrate import ode
from .name2idx import C, V
from .set_model import diffeq, param_values, initial_values
def solveode(diffeq, y0, tspan, args):
sol = ode(diffeq)
sol.set_integrator(
'vode', method='bdf', with_jacobian=True, min_step=1e-8
)
sol.set_initial_value(y0, tspan[0])
sol.set_f_params(args)
T = [tspan[0]]
Y = [y0]
while sol.successful() and sol.t < tspan[-1]:
sol.integrate(sol.t+1.)
T.append(sol.t)
Y.append(sol.y)
return np.array(T), np.array(Y)
def get_steady_state(diffeq, y0, tspan, args, steady_state_time=7200):
sol = ode(diffeq)
sol.set_integrator(
'vode', method='bdf', with_jacobian=True, min_step=1e-8
)
sol.set_initial_value(y0, tspan[0])
sol.set_f_params(args)
T = [tspan[0]]
Y = [y0]
while sol.successful() and sol.t < steady_state_time:
sol.integrate(steady_state_time, step=True)
T.append(sol.t)
Y.append(sol.y)
return T[-1], Y[-1]
class Simulation(object):
tspan = range(5401) # Unit time: 1 sec.
conditions = ['EGF', 'HRG']
t = np.array(tspan)/60. # sec. -> min. (plot_func.py)
PMEK_cyt = np.empty((len(tspan), len(conditions)))
PERK_cyt = np.empty((len(tspan), len(conditions)))
PRSK_wcl = np.empty((len(tspan), len(conditions)))
PCREB_wcl = np.empty((len(tspan), len(conditions)))
DUSPmRNA = np.empty((len(tspan), len(conditions)))
cFosmRNA = np.empty((len(tspan), len(conditions)))
cFosPro = np.empty((len(tspan), len(conditions)))
PcFos = np.empty((len(tspan), len(conditions)))
x = param_values()
y0 = initial_values()
# get steady state -- preprocess
y0[V.EGF] = 0.0
y0[V.HRG] = 0.0
(T_steady_state, Y_steady_state) = get_steady_state(diffeq, y0, tspan, tuple(x))
y0 = Y_steady_state[:]
# add ligand
for i, condition in enumerate(conditions):
if condition == 'EGF':
y0[V.EGF] = 10.0
y0[V.HRG] = 0.0
elif condition == 'HRG':
y0[V.EGF] = 0.0
y0[V.HRG] = 10.0
(T, Y) = solveode(diffeq, y0, tspan, tuple(x))
if T[-1] < tspan[-1]:
print('Simulation failed.')
else:
PMEK_cyt[:, i] = Y[:, V.ppMEKc]
PERK_cyt[:, i] = Y[:, V.pERKc] + Y[:, V.ppERKc]
PRSK_wcl[:, i] = Y[:, V.pRSKc] + Y[:, V.pRSKn]*(x[C.Vn]/x[C.Vc])
PCREB_wcl[:, i] = Y[:, V.pCREBn]*(x[C.Vn]/x[C.Vc])
DUSPmRNA[:, i] = Y[:, V.duspmRNAc]
cFosmRNA[:, i] = Y[:, V.cfosmRNAc]
cFosPro[:, i] = (Y[:, V.pcFOSn] + Y[:, V.cFOSn]) * (x[C.Vn]/x[C.Vc]) \
+ Y[:, V.cFOSc] + Y[:, V.pcFOSc]
PcFos[:, i] = Y[:, V.pcFOSn]*(x[C.Vn]/x[C.Vc]) + Y[:, V.pcFOSc] | [
"numpy.array",
"scipy.integrate.ode"
] | [((190, 201), 'scipy.integrate.ode', 'ode', (['diffeq'], {}), '(diffeq)\n', (193, 201), False, 'from scipy.integrate import ode\n'), ((647, 658), 'scipy.integrate.ode', 'ode', (['diffeq'], {}), '(diffeq)\n', (650, 658), False, 'from scipy.integrate import ode\n'), ((539, 550), 'numpy.array', 'np.array', (['T'], {}), '(T)\n', (547, 550), True, 'import numpy as np\n'), ((552, 563), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (560, 563), True, 'import numpy as np\n'), ((1161, 1176), 'numpy.array', 'np.array', (['tspan'], {}), '(tspan)\n', (1169, 1176), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# *******************************************************************
# Author: <NAME>
# Oct. 2019
# Copyright 2019, <NAME>, All rights reserved.
# *******************************************************************
import numpy as np
import sys
import trajectory
import copy
import matplotlib.pyplot as plt
# ROS libs
import rospy
import moveit_commander
import tf.transformations
# ROS messages
from geometry_msgs.msg import Pose, PoseStamped
from sensor_msgs.msg import JointState
from moveit_msgs.msg import RobotState
from std_msgs.msg import Header
class Planner:
"""
This class handles the motion planning of the robot.
"""
def __init__(self, robot_name):
"""
:param robot_name: 'j2n6s300' or 'j2s7s300'
:type robot_name: str
"""
self.n_joints = int(robot_name[3])
# trajectory in joint space
self.traj = trajectory.Trajectory(self.n_joints)
# MoveIt! objects
self.robot = None
self.move_group = None
self.scene = None
def create_test_traj(self, pos_amp, freq, start_pos, total_t, time_step=0.01):
"""
Creates a periodic trajectory for testing the controllers. The motion is generated by superimposing a series of
sinusoids functions.
:param pos_amp: amplitude of the joint positions (in Radians) - 2D array [n_func, n_joints]
:type pos_amp: np.array
:param freq: frequency of the periodic functions - 2D array [n_func, n_joints]
:type freq: np.array
:param start_pos: starting position of the trajectory (same as the current position of the robot)
:type start_pos: np.array or list
:param total_t: total duration of the trajectory
:type total_t: float or int
:param time_step: time between each waypoint
:type time_step: float
:return: trajectory
:rtype: trajectory.Trajectory
"""
t_waypoints = np.reshape(np.arange(0., total_t, time_step), (-1, 1))
waypoints = np.zeros((len(t_waypoints), self.n_joints))
omega = 2 * np.pi * freq
for index, t in np.ndenumerate(t_waypoints):
waypoints[index[0], :] = np.sum(pos_amp * np.sin(omega * t), axis=0) + np.array(start_pos)
self.traj = trajectory.Trajectory(self.n_joints, waypoints=waypoints, t_waypoints=t_waypoints, start_pos=start_pos,
step_size=time_step)
return self.traj
def init_moveit(self):
"""
Initializes the inverse kinematics system. Required if inverse kinematics is going to be used.
:return None
"""
# set up MoveIt! robot commander and move moveit_group commander
moveit_commander.roscpp_initialize(sys.argv)
self.robot = moveit_commander.RobotCommander()
self.move_group = moveit_commander.MoveGroupCommander("arm")
# set up MoveIt! moveit_scene interface
self.scene = moveit_commander.PlanningSceneInterface()
# setting the position and orientation tolerance
self.move_group.set_goal_position_tolerance(0.001)
self.move_group.set_goal_orientation_tolerance(0.001)
# Add a table to the moveit_scene acting as a ground
rospy.sleep(1)
table_position = PoseStamped()
table_position.header.frame_id = self.robot.get_planning_frame()
table_position.pose.position.x = 0.
table_position.pose.position.y = 0.
table_position.pose.position.z = -.03 / 2
self.scene.add_box("table", table_position, (2.4, 2.4, 0.03))
rospy.loginfo("MoveIt! init successful.")
def plan_moveit(self, position, orientation, start_joint_position=None, euler_flag=False, time_scale=1.0):
"""
Plans the motions using the IK implemented in MoveIt!. Note that MoveIt! is not used to execute the plan on the
robot, because the controllers are not very useful.
:param position: list of cartesian coordinates (XYZ)
:type position: list or np.array
:param orientation: list of orientation elements (default: quaternion)
:type orientation: list or np.array
:param start_joint_position: starting joint position for the planner
:type: list or np.array
:param euler_flag: a flag to indicate whether the orientation is in euler or quaternions
:type euler_flag: bool
:param time_scale: scales time for the interpolation of raw waypoints
:type time_scale: float
:return the trajectory with the raw waypoints
:rtype: trajectory.Trajectory
"""
if start_joint_position is not None:
self.set_start_pose_moveit(start_joint_position)
pose = self.create_pose(position, orientation, euler_flag=euler_flag)
self.move_group.set_pose_target(pose)
plan = self.move_group.plan()
self.move_group.clear_pose_targets()
# create the raw waypoints
raw_waypoints = np.array([p.positions for p in plan.joint_trajectory.points])
t_raw_waypoints = np.array([self.to_sec(p.time_from_start) for p in plan.joint_trajectory.points])
self.traj = trajectory.Trajectory(self.n_joints, raw_waypoints=raw_waypoints, t_raw_waypoints=t_raw_waypoints,
time_scale=time_scale)
return self.traj
def plan_cutting(self, start_position=[0., -0.4, 0.5], end_position=[0., -0.7, 0.5],
orientation=[0.7071, 0., 0., .7071], cutting_direction=[0., -1., 0.], cutting_plane=[1, 1, 0],
rod_center=[0., -0.55, 0.50], rod_radius=0.08, cut_force_k=100., cut_force_d=10., count=10):
"""
Plans a simple cutting scenario using MoveIt! planner.
:param start_position: starting position
:param end_position: end position
:param orientation: orientation during cutting
:param cutting_direction: cutting direction
:param cutting_plane: cutting plane
:param rod_center: center of the rod position
:param rod_radius: radius of the tree
:type rod_radius: float
:param cut_force_k: K of the cutting force
:type cut_force_k: float
:param cut_force_d: D of the cutting force
:type cut_force_d: float
:param count: number of cuts
:type count: int
:return: trajectory
:rtype: trajectory.Trajectory
"""
# start of the motion
start = self.plan_moveit(position=start_position, orientation=orientation)
start_joint_position = start.waypoints[-1]
# trajectory 1: go to the starting position
start_motion = self.create_test_traj(pos_amp=np.array([[0.] * 6]), freq=np.array([[0.2] * 6]),
start_pos=start_joint_position, total_t=5)
# trajectory 2: forward motion
forward_motion = self.plan_moveit(position=end_position, orientation=orientation,
start_joint_position=start_joint_position, time_scale=1.)
# trajectory 3: backward motion
backward_motion = self.plan_moveit(position=start_position, orientation=orientation,
start_joint_position=forward_motion.waypoints[-1], time_scale=1.)
# combine all trajectories
cutting_motion = self.append_trajectory(forward_motion, backward_motion)
cutting_traj = self.repeat_trajectory(cutting_motion, count)
traj = self.append_trajectory(start_motion, cutting_traj)
# set the position and radius of the wooden rod and the cutting direction
traj.set_interaction_force_param(rod_center, rod_radius, cut_force_k, cut_force_d, cutting_direction, cutting_plane)
return traj
def set_start_pose_moveit(self, joint_position):
"""
Sets the start position of the robot for MoveIt! planner. Note that this method does not move the robot but only
specifies the starting position for path planning.
:param joint_position: starting position
:type joint_position: list or np.array
:return: None
"""
# create the robot state msg
joint_state = JointState()
joint_state.header = Header()
joint_state.header.stamp = rospy.Time.now()
joint_state.name = ['j2n6s300_joint_1', 'j2n6s300_joint_2', 'j2n6s300_joint_3', 'j2n6s300_joint_4',
'j2n6s300_joint_5', 'j2n6s300_joint_6']
joint_state.position = list(joint_position)
moveit_robot_state = RobotState(joint_state=joint_state)
# set the start state for the planner
self.move_group.set_start_state(moveit_robot_state)
def append_trajectory(self, traj_1, traj_2):
"""
Appends traj_2 to the end of traj_1. Warning: it is up to the user to check for continuity of the trajectories.
:param traj_1: trajectory 1
:type traj_1: trajectory.Trajectory
:param traj_2: trajectory 2
:type traj_1: trajectory.Trajectory
:return: combined trajectory
:rtype: trajectory.Trajectory
"""
step_size = min(traj_1.step_size, traj_2.step_size)
waypoints = np.append(traj_1.waypoints, traj_2.waypoints, axis=0)
t_waypoints = np.append(traj_1.t_waypoints, traj_2.t_waypoints + traj_1.total_t + step_size, axis=0)
start_pos = traj_1.start_pos
self.traj = trajectory.Trajectory(self.n_joints, waypoints=waypoints, t_waypoints=t_waypoints, start_pos=start_pos,
step_size=step_size)
return self.traj
def repeat_trajectory(self, traj, repeat_count):
"""
Repeats the trajectory by appending a duplicate to the end of it.
:param traj: trajectory
:type traj: trajectory.Trajectory
:param repeat_count: number of repeats
:type repeat_count: int
:return: trajectory
:rtype: trajectory.Trajectory
"""
repeated_traj = copy.copy(traj)
for i in range(repeat_count - 1):
repeated_traj = self.append_trajectory(repeated_traj, traj)
return repeated_traj
@staticmethod
def to_sec(duration):
"""
Converts rospy.Duration to a second.
:param duration: time
:type duration: rospy.Duration
:return: converted timm in seconds
:rtype: float
"""
return duration.secs + duration.nsecs * 10**-9
@staticmethod
def create_pose(position, orientation, euler_flag=False):
"""
Creates a pose for the end effector.
:param position: list of cartesian coordinates (XYZ)
:type position: list or np.array
:param orientation: list of orientation elements (default: quaternion)
:type orientation: list or np.array
:param euler_flag: a flag to indicate whether the orientation is in euler or quaternions
:type euler_flag: bool
:return the pose
:rtype: Pose
"""
# convert orientation euler angles to quaternions if the optional flag is set to True
if euler_flag:
orientation = tf.transformations.quaternion_from_euler(*orientation)
# create the pose
pose = Pose()
pose.orientation.x = orientation[0]
pose.orientation.y = orientation[1]
pose.orientation.z = orientation[2]
pose.orientation.w = orientation[3]
pose.position.x = position[0]
pose.position.y = position[1]
pose.position.z = position[2]
return pose
@staticmethod
def shut_down_moveit():
"""
Shuts down MoveIt! planner.
:return: None
"""
rospy.loginfo("Shutting down MoveIt!")
moveit_commander.roscpp_shutdown()
if __name__ == '__main__':
# a simple test for checking the trajectory parametrization and differentiation
planner = Planner()
starting_position = [4.7, 3.5, 2., 4.7, 0., 1.57]
# trajectory 1: go to the starting position
test_pos = np.array([[0.0, 0.0, 0.5, 0.0, 0.0, 0.0]])
test_freq = np.array([[0.2, 0.2, 0.2, 0.2, 0.2, 0.2]])
test_traj = planner.create_test_traj(test_pos, test_pos, starting_position, 5, time_step=0.01)
# combine all trajectories
test_traj = planner.repeat_trajectory(test_traj, 5)
plt.subplot(3, 1, 1)
plt.plot(test_traj.t_waypoints, test_traj.waypoints[:, 2], 'g')
plt.subplot(3, 1, 2)
plt.plot(test_traj.t_waypoints, test_traj.joint_vel[:, 2], 'b')
plt.subplot(3, 1, 3)
plt.plot(test_traj.t_waypoints, test_traj.joint_acc[:, 2], 'r')
plt.show()
| [
"moveit_commander.PlanningSceneInterface",
"numpy.array",
"numpy.sin",
"geometry_msgs.msg.Pose",
"copy.copy",
"numpy.arange",
"moveit_msgs.msg.RobotState",
"matplotlib.pyplot.plot",
"numpy.ndenumerate",
"rospy.sleep",
"moveit_commander.RobotCommander",
"sensor_msgs.msg.JointState",
"rospy.Ti... | [((12185, 12227), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.5, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.5, 0.0, 0.0, 0.0]])\n', (12193, 12227), True, 'import numpy as np\n'), ((12244, 12286), 'numpy.array', 'np.array', (['[[0.2, 0.2, 0.2, 0.2, 0.2, 0.2]]'], {}), '([[0.2, 0.2, 0.2, 0.2, 0.2, 0.2]])\n', (12252, 12286), True, 'import numpy as np\n'), ((12479, 12499), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (12490, 12499), True, 'import matplotlib.pyplot as plt\n'), ((12504, 12567), 'matplotlib.pyplot.plot', 'plt.plot', (['test_traj.t_waypoints', 'test_traj.waypoints[:, 2]', '"""g"""'], {}), "(test_traj.t_waypoints, test_traj.waypoints[:, 2], 'g')\n", (12512, 12567), True, 'import matplotlib.pyplot as plt\n'), ((12573, 12593), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (12584, 12593), True, 'import matplotlib.pyplot as plt\n'), ((12598, 12661), 'matplotlib.pyplot.plot', 'plt.plot', (['test_traj.t_waypoints', 'test_traj.joint_vel[:, 2]', '"""b"""'], {}), "(test_traj.t_waypoints, test_traj.joint_vel[:, 2], 'b')\n", (12606, 12661), True, 'import matplotlib.pyplot as plt\n'), ((12667, 12687), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (12678, 12687), True, 'import matplotlib.pyplot as plt\n'), ((12692, 12755), 'matplotlib.pyplot.plot', 'plt.plot', (['test_traj.t_waypoints', 'test_traj.joint_acc[:, 2]', '"""r"""'], {}), "(test_traj.t_waypoints, test_traj.joint_acc[:, 2], 'r')\n", (12700, 12755), True, 'import matplotlib.pyplot as plt\n'), ((12761, 12771), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12769, 12771), True, 'import matplotlib.pyplot as plt\n'), ((914, 950), 'trajectory.Trajectory', 'trajectory.Trajectory', (['self.n_joints'], {}), '(self.n_joints)\n', (935, 950), False, 'import trajectory\n'), ((2156, 2183), 'numpy.ndenumerate', 'np.ndenumerate', (['t_waypoints'], {}), '(t_waypoints)\n', (2170, 2183), True, 'import numpy as np\n'), ((2309, 2438), 'trajectory.Trajectory', 'trajectory.Trajectory', (['self.n_joints'], {'waypoints': 'waypoints', 't_waypoints': 't_waypoints', 'start_pos': 'start_pos', 'step_size': 'time_step'}), '(self.n_joints, waypoints=waypoints, t_waypoints=\n t_waypoints, start_pos=start_pos, step_size=time_step)\n', (2330, 2438), False, 'import trajectory\n'), ((2759, 2803), 'moveit_commander.roscpp_initialize', 'moveit_commander.roscpp_initialize', (['sys.argv'], {}), '(sys.argv)\n', (2793, 2803), False, 'import moveit_commander\n'), ((2825, 2858), 'moveit_commander.RobotCommander', 'moveit_commander.RobotCommander', ([], {}), '()\n', (2856, 2858), False, 'import moveit_commander\n'), ((2885, 2927), 'moveit_commander.MoveGroupCommander', 'moveit_commander.MoveGroupCommander', (['"""arm"""'], {}), "('arm')\n", (2920, 2927), False, 'import moveit_commander\n'), ((2998, 3039), 'moveit_commander.PlanningSceneInterface', 'moveit_commander.PlanningSceneInterface', ([], {}), '()\n', (3037, 3039), False, 'import moveit_commander\n'), ((3289, 3303), 'rospy.sleep', 'rospy.sleep', (['(1)'], {}), '(1)\n', (3300, 3303), False, 'import rospy\n'), ((3329, 3342), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (3340, 3342), False, 'from geometry_msgs.msg import Pose, PoseStamped\n'), ((3633, 3674), 'rospy.loginfo', 'rospy.loginfo', (['"""MoveIt! init successful."""'], {}), "('MoveIt! init successful.')\n", (3646, 3674), False, 'import rospy\n'), ((5029, 5090), 'numpy.array', 'np.array', (['[p.positions for p in plan.joint_trajectory.points]'], {}), '([p.positions for p in plan.joint_trajectory.points])\n', (5037, 5090), True, 'import numpy as np\n'), ((5219, 5344), 'trajectory.Trajectory', 'trajectory.Trajectory', (['self.n_joints'], {'raw_waypoints': 'raw_waypoints', 't_raw_waypoints': 't_raw_waypoints', 'time_scale': 'time_scale'}), '(self.n_joints, raw_waypoints=raw_waypoints,\n t_raw_waypoints=t_raw_waypoints, time_scale=time_scale)\n', (5240, 5344), False, 'import trajectory\n'), ((8321, 8333), 'sensor_msgs.msg.JointState', 'JointState', ([], {}), '()\n', (8331, 8333), False, 'from sensor_msgs.msg import JointState\n'), ((8363, 8371), 'std_msgs.msg.Header', 'Header', ([], {}), '()\n', (8369, 8371), False, 'from std_msgs.msg import Header\n'), ((8407, 8423), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (8421, 8423), False, 'import rospy\n'), ((8681, 8716), 'moveit_msgs.msg.RobotState', 'RobotState', ([], {'joint_state': 'joint_state'}), '(joint_state=joint_state)\n', (8691, 8716), False, 'from moveit_msgs.msg import RobotState\n'), ((9333, 9386), 'numpy.append', 'np.append', (['traj_1.waypoints', 'traj_2.waypoints'], {'axis': '(0)'}), '(traj_1.waypoints, traj_2.waypoints, axis=0)\n', (9342, 9386), True, 'import numpy as np\n'), ((9409, 9499), 'numpy.append', 'np.append', (['traj_1.t_waypoints', '(traj_2.t_waypoints + traj_1.total_t + step_size)'], {'axis': '(0)'}), '(traj_1.t_waypoints, traj_2.t_waypoints + traj_1.total_t +\n step_size, axis=0)\n', (9418, 9499), True, 'import numpy as np\n'), ((9554, 9683), 'trajectory.Trajectory', 'trajectory.Trajectory', (['self.n_joints'], {'waypoints': 'waypoints', 't_waypoints': 't_waypoints', 'start_pos': 'start_pos', 'step_size': 'step_size'}), '(self.n_joints, waypoints=waypoints, t_waypoints=\n t_waypoints, start_pos=start_pos, step_size=step_size)\n', (9575, 9683), False, 'import trajectory\n'), ((10141, 10156), 'copy.copy', 'copy.copy', (['traj'], {}), '(traj)\n', (10150, 10156), False, 'import copy\n'), ((11393, 11399), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (11397, 11399), False, 'from geometry_msgs.msg import Pose, PoseStamped\n'), ((11848, 11886), 'rospy.loginfo', 'rospy.loginfo', (['"""Shutting down MoveIt!"""'], {}), "('Shutting down MoveIt!')\n", (11861, 11886), False, 'import rospy\n'), ((11895, 11929), 'moveit_commander.roscpp_shutdown', 'moveit_commander.roscpp_shutdown', ([], {}), '()\n', (11927, 11929), False, 'import moveit_commander\n'), ((1990, 2024), 'numpy.arange', 'np.arange', (['(0.0)', 'total_t', 'time_step'], {}), '(0.0, total_t, time_step)\n', (1999, 2024), True, 'import numpy as np\n'), ((2268, 2287), 'numpy.array', 'np.array', (['start_pos'], {}), '(start_pos)\n', (2276, 2287), True, 'import numpy as np\n'), ((6793, 6814), 'numpy.array', 'np.array', (['[[0.0] * 6]'], {}), '([[0.0] * 6])\n', (6801, 6814), True, 'import numpy as np\n'), ((6820, 6841), 'numpy.array', 'np.array', (['[[0.2] * 6]'], {}), '([[0.2] * 6])\n', (6828, 6841), True, 'import numpy as np\n'), ((2239, 2256), 'numpy.sin', 'np.sin', (['(omega * t)'], {}), '(omega * t)\n', (2245, 2256), True, 'import numpy as np\n')] |
"""Central module of the nodal package.
Provides, among others, the classes meant for external usage:
* Netlist: reads .csv files
* Circuit: provides solve() method to compute electrical variables
* Solution: printable object storing the computation results
Example use case:
from nodal import Circuit, Netlist
my_netlist = Netlist("path/to/netlist.csv")
my_circuit = Circuit(my_netlist, sparse=True)
my_solution = my_circuit.solve()
print(my_solution)
"""
import csv
import logging
import numpy as np
import scipy as sp
import scipy.sparse as spsp
import scipy.sparse.linalg as spspla
import nodal.constants as c
import nodal.models as models
logging.basicConfig(level=logging.ERROR)
def find_ground_node(degrees):
"""Chooses node to be used as ground reference for the circuit.
If no node is explicitly labeled as ground ("g"), the node of
highest degree is picked instead.
"""
if "g" in degrees:
ground = "g"
else:
ground = max(degrees.keys(), key=(lambda x: degrees[x]))
logging.debug(f"ground node-> {ground}")
return ground
def build_opmodel(data):
# OPMODEL component specification:
# [
# name,
# "OPMODEL",
# value of feedback resistor,
# output terminal,
# ground terminal
# non-inverting terminal,
# inverting terminal,
# ]
name = data[c.NCOL]
# Values
ri = str(c.OPMODEL_RI)
ro = str(c.OPMODEL_RO)
rf = data[c.VCOL]
gain = str(c.OPMODEL_GAIN)
# Nodes
out = data[c.ACOL]
ground = data[c.BCOL]
pos = data[c.CCOL]
neg = data[c.DCOL]
phony = f"{name}_internal_node"
input_resistor = [f"{name}_ri", "R", ri, pos, neg]
output_resistor = [f"{name}_ro", "R", ro, phony, out]
feedback_resistor = [f"{name}_rf", "R", rf, neg, out]
vcvs = [f"{name}_vcvs", "VCVS", gain, phony, ground, pos, neg]
result = [input_resistor, output_resistor, vcvs]
# rf is set to 0 when there is direct feedback without a resistor
if rf != "0":
result.append(feedback_resistor)
else:
assert neg == out
return result
def is_connected(netlist):
nodes = netlist.degrees.keys()
forward = {node: set() for node in nodes}
for component in netlist.components.values():
forward[component.anode].add(component.bnode)
forward[component.bnode].add(component.anode)
assert len(forward) == len(nodes)
# Breadth first search
visited_count = 0
open_list = [netlist.ground]
for node in open_list:
visited_count += 1
new_nodes = (x for x in forward[node] if x not in open_list)
open_list.extend(new_nodes)
assert visited_count == len(open_list)
return len(nodes) == visited_count
class UnconnectedCircuitError(Exception):
pass
class Component:
"""Builds object representing single electrical component.
Here `data` is a row from the .csv file, as a str.
Sets the following attributes:
* name
* type
* value
* anode
* bnode
* [pos_control]
* [neg_control]
* [driver]
Attributes in brackets might be set to None if not applicable.
Raises ValueError if data is malformed.
"""
def __init__(self, data):
self.check_input(data)
self.name = data[c.NCOL]
self.type = data[c.TCOL]
self.value = float(data[c.VCOL])
self.anode = data[c.ACOL]
self.bnode = data[c.BCOL]
if data[c.TCOL] in c.NODE_TYPES_DEP:
self.pos_control = data[c.CCOL]
self.neg_control = data[c.DCOL]
if data[c.TCOL] in c.NODE_TYPES_CC:
self.driver = data[c.PCOL]
else:
self.driver = None
else:
self.pos_control = None
self.neg_control = None
def check_input(self, data):
s = len(data)
if s == 0 or data[0][0] == "#":
return
key = data[c.NCOL]
assert type(key) == str
if s < 5:
raise ValueError(f"Missing arguments for component {key}")
ctype = data[c.TCOL]
if ctype not in c.NODE_TYPES:
raise ValueError(f"Unknown type {ctype} for component {key}")
n = c.NODE_ARGS_NUMBER[ctype]
if s != n:
raise ValueError(
f"Wrong number of arguments for component {key}: expected {n}, got {s}"
)
try:
float(data[c.VCOL])
except ValueError:
raise ValueError(
"Bad input: expected a number for component value "
f"of {key}, got {data[c.VCOL]} instead"
)
class Netlist:
"""Reads netlist from .csv file.
Sets the following attributes:
* nums: dictionary keeping count of the number of
* total electrical components
* anomalous components, ie component of any type
contained in NODE_TYPES_ANOM
* branch equations
* Kirchhoff Current Laws, ie number of nodes -1
* opamps
* degrees: number of connected components for each node
* anomnum: dictionary, with the format {component.name: i} for
all anomalous components, where i is an index starting at 0
* components: dictionary of Component objects, using
component.name as a key
* component_keys: ordered list of component keys, kept since
we will later need to iterate over components in the same
order as it was written
* ground: ground node
* nodenum: dictionary, with the format {node_label: i} for
all circuit nodes except ground, where i is an index starting
at 0
* opmodel_equivalents: stores the equivalent circuits generated
by build_opmodel()
Raises FileNotFoundError, ValueError when the netlist file
can't be found or parsed.
"""
def __init__(self, path):
self.nums = {"components": 0, "anomalies": 0, "be": 0, "kcl": 0, "opamps": 0}
self.degrees = {}
self.anomnum = {}
self.components = {}
self.component_keys = []
self.ground = None
self.nodenum = {}
self.opmodel_equivalents = []
self.read_netlist(path)
def process_component(self, data):
"""Builds a Component object, updates counts and attributes"""
# Skip comments and empty lines
if data == [] or data[0][0] == "#":
return
# If the current component is an OPMODEL,
# replace it with an equivalent circuit.
if data[c.TCOL] == "OPMODEL":
eq = build_opmodel(data)
self.opmodel_equivalents.extend(eq)
return
# Otherwise, just build the component
try:
newcomp = Component(data)
except ValueError:
raise
key = data[c.NCOL]
# We will need to iterate over components twice
# in the same order, so we save keys
self.component_keys.append(key)
self.components[key] = newcomp
# Update the different component counts
self.nums["components"] += 1
curnodes = [data[c.ACOL], data[c.BCOL]]
newnodes = [key for key in curnodes if key not in self.degrees]
if data[c.TCOL] in c.NODE_TYPES_ANOM:
self.anomnum[data[c.NCOL]] = self.nums["anomalies"]
self.nums["anomalies"] += 1
for node in newnodes:
self.degrees[node] = 0
for node in curnodes:
self.degrees[node] += 1
def read_netlist(self, path):
"""Iterates over netlist file to process components"""
try:
infile = open(path, "r")
except FileNotFoundError:
logging.error(f"File '{path}' not found.")
raise
infile.close()
with open(path, "r") as infile:
reader = csv.reader(infile, skipinitialspace=True)
# Iterate over components in the netlist file
for data in reader:
self.process_component(data)
for data in self.opmodel_equivalents:
self.process_component(data)
# Set ground node
self.ground = find_ground_node(self.degrees)
# Update node counts
i = 0
self.nodenum = {}
for node in [k for k in self.degrees.keys() if k != self.ground]:
self.nodenum[node] = i
i += 1
# nodenum should have an entry for all nodes except ground
assert len(self.nodenum) == len(self.degrees) - 1
# Update equations count
logging.debug(f"nodenum={self.nodenum}")
self.nums["kcl"] = len(self.nodenum)
self.nums["be"] = self.nums["anomalies"]
logging.debug(f"nums={self.nums}")
logging.debug(f"anomnum={self.anomnum}")
class Circuit:
"""Builds a system of linear equations from a Netlist object.
Main functionality is providing the solve() method, which
returns a Solution object.
"""
def __init__(self, netlist, sparse=False):
if not isinstance(netlist, Netlist):
raise TypeError("Input isn't a netlist")
self.netlist = netlist
self.sparse = sparse
self.G, self.A, self.currents = self.build_model()
def solve(self):
"""Wrapper for numpy and scipy methods.
Raises:
* LinAlgError: the linear system is singular. This should
never happen.
* UnconnectedCircuitError: there are floating nodes not
connected to the rest of the circuit.
"""
try:
if self.sparse:
e = spspla.spsolve(self.G, self.A)
else:
e = np.linalg.solve(self.G, self.A)
except (np.linalg.linalg.LinAlgError, sp.linalg.LinAlgError):
if not is_connected(self.netlist):
logging.error("Model error: unconnected circuit")
raise UnconnectedCircuitError
else:
logging.error("Model error: matrix is singular")
logging.debug(self.G)
raise
return Solution(e, self.netlist, self.currents)
def build_model(self):
# Setup local variables
nums = self.netlist.nums
anomnum = self.netlist.anomnum
components = self.netlist.components
component_keys = self.netlist.component_keys
ground = self.netlist.ground
nodenum = self.netlist.nodenum
# Initialize matrices
n = nums["kcl"] + nums["be"] # number of unknowns
if self.sparse:
G = spsp.dok_matrix((n, n), dtype=np.float64)
else:
G = np.zeros(shape=(n, n))
A = np.zeros(n)
currents = []
# Iterate over components
for key in component_keys: # preserve order of iteration
component = components[key]
if component.anode != ground:
i = nodenum[component.anode]
assert 0 <= i <= nums["kcl"]
else:
i = None
if component.bnode != ground:
j = nodenum[component.bnode]
assert 0 <= j <= nums["kcl"]
else:
j = None
args = (component, i, j, ground, G, A, currents, anomnum, nums, nodenum)
if component.type == "R":
models.write_R(component, i, j, ground, G)
elif component.type == "A":
models.write_A(component, i, j, ground, A)
elif component.type == "E":
models.write_E(*args)
elif component.type == "VCCS":
models.write_VCVS(*args)
elif component.type == "VCVS":
models.write_VCVS(*args)
elif component.type == "CCVS":
models.write_CCVS(*args, components)
elif component.type == "CCCS":
models.write_CCCS(*args, components)
elif component.type == "OPAMP":
raise NotImplementedError
else:
# This should never happen, since `component` has
# already been tested by Component.check_input()
raise ValueError(f"Unknown component type: {component.driver.type}")
# Log and return
logging.debug(f"currents={currents}")
logging.debug(f"G=\n{G}")
logging.debug(f"A=\n{A}")
if self.sparse:
G = G.tocsr()
return [G, A, currents]
class Solution:
"""Holds the result of computation.
Attributes:
* result: vector, stores both node potentials and currents running
through anomalous components. The first n elements are potentials
(unit is volt), while the remaining are currents in ampere, where
n = nums["kcl"]
* other attributes are for internal use
Printable.
"""
def __init__(self, result, netlist, currents):
self.result = result
self.nodenum = netlist.nodenum
self.nums = netlist.nums
self.currents = currents
self.ground = netlist.ground
self.anomnum = netlist.anomnum
def __str__(self):
output = f"Ground node: {self.ground}"
names = sorted(self.nodenum)
for name in names:
i = self.nodenum[name]
potential = self.result[i]
output += f"\ne({name}) \t= {potential}"
names = sorted(self.anomnum)
for name in names:
i = self.anomnum[name]
current = self.result[self.nums["kcl"] + i]
output += f"\ni({name}) \t= {current}"
return output
| [
"logging.basicConfig",
"scipy.sparse.linalg.spsolve",
"nodal.models.write_E",
"numpy.linalg.solve",
"nodal.models.write_VCVS",
"logging.debug",
"nodal.models.write_A",
"nodal.models.write_CCVS",
"nodal.models.write_CCCS",
"nodal.models.write_R",
"numpy.zeros",
"scipy.sparse.dok_matrix",
"csv... | [((682, 722), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.ERROR'}), '(level=logging.ERROR)\n', (701, 722), False, 'import logging\n'), ((1061, 1101), 'logging.debug', 'logging.debug', (['f"""ground node-> {ground}"""'], {}), "(f'ground node-> {ground}')\n", (1074, 1101), False, 'import logging\n'), ((10875, 10886), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (10883, 10886), True, 'import numpy as np\n'), ((12474, 12511), 'logging.debug', 'logging.debug', (['f"""currents={currents}"""'], {}), "(f'currents={currents}')\n", (12487, 12511), False, 'import logging\n'), ((12520, 12545), 'logging.debug', 'logging.debug', (['f"""G=\n{G}"""'], {}), "(f'G=\\n{G}')\n", (12533, 12545), False, 'import logging\n'), ((12554, 12579), 'logging.debug', 'logging.debug', (['f"""A=\n{A}"""'], {}), "(f'A=\\n{A}')\n", (12567, 12579), False, 'import logging\n'), ((7964, 8005), 'csv.reader', 'csv.reader', (['infile'], {'skipinitialspace': '(True)'}), '(infile, skipinitialspace=True)\n', (7974, 8005), False, 'import csv\n'), ((8731, 8771), 'logging.debug', 'logging.debug', (['f"""nodenum={self.nodenum}"""'], {}), "(f'nodenum={self.nodenum}')\n", (8744, 8771), False, 'import logging\n'), ((8886, 8920), 'logging.debug', 'logging.debug', (['f"""nums={self.nums}"""'], {}), "(f'nums={self.nums}')\n", (8899, 8920), False, 'import logging\n'), ((8933, 8973), 'logging.debug', 'logging.debug', (['f"""anomnum={self.anomnum}"""'], {}), "(f'anomnum={self.anomnum}')\n", (8946, 8973), False, 'import logging\n'), ((10768, 10809), 'scipy.sparse.dok_matrix', 'spsp.dok_matrix', (['(n, n)'], {'dtype': 'np.float64'}), '((n, n), dtype=np.float64)\n', (10783, 10809), True, 'import scipy.sparse as spsp\n'), ((10840, 10862), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, n)'}), '(shape=(n, n))\n', (10848, 10862), True, 'import numpy as np\n'), ((7818, 7860), 'logging.error', 'logging.error', (['f"""File \'{path}\' not found."""'], {}), '(f"File \'{path}\' not found.")\n', (7831, 7860), False, 'import logging\n'), ((9803, 9833), 'scipy.sparse.linalg.spsolve', 'spspla.spsolve', (['self.G', 'self.A'], {}), '(self.G, self.A)\n', (9817, 9833), True, 'import scipy.sparse.linalg as spspla\n'), ((9872, 9903), 'numpy.linalg.solve', 'np.linalg.solve', (['self.G', 'self.A'], {}), '(self.G, self.A)\n', (9887, 9903), True, 'import numpy as np\n'), ((11540, 11582), 'nodal.models.write_R', 'models.write_R', (['component', 'i', 'j', 'ground', 'G'], {}), '(component, i, j, ground, G)\n', (11554, 11582), True, 'import nodal.models as models\n'), ((10037, 10086), 'logging.error', 'logging.error', (['"""Model error: unconnected circuit"""'], {}), "('Model error: unconnected circuit')\n", (10050, 10086), False, 'import logging\n'), ((10167, 10215), 'logging.error', 'logging.error', (['"""Model error: matrix is singular"""'], {}), "('Model error: matrix is singular')\n", (10180, 10215), False, 'import logging\n'), ((10232, 10253), 'logging.debug', 'logging.debug', (['self.G'], {}), '(self.G)\n', (10245, 10253), False, 'import logging\n'), ((11639, 11681), 'nodal.models.write_A', 'models.write_A', (['component', 'i', 'j', 'ground', 'A'], {}), '(component, i, j, ground, A)\n', (11653, 11681), True, 'import nodal.models as models\n'), ((11738, 11759), 'nodal.models.write_E', 'models.write_E', (['*args'], {}), '(*args)\n', (11752, 11759), True, 'import nodal.models as models\n'), ((11819, 11843), 'nodal.models.write_VCVS', 'models.write_VCVS', (['*args'], {}), '(*args)\n', (11836, 11843), True, 'import nodal.models as models\n'), ((11903, 11927), 'nodal.models.write_VCVS', 'models.write_VCVS', (['*args'], {}), '(*args)\n', (11920, 11927), True, 'import nodal.models as models\n'), ((11987, 12023), 'nodal.models.write_CCVS', 'models.write_CCVS', (['*args', 'components'], {}), '(*args, components)\n', (12004, 12023), True, 'import nodal.models as models\n'), ((12083, 12119), 'nodal.models.write_CCCS', 'models.write_CCCS', (['*args', 'components'], {}), '(*args, components)\n', (12100, 12119), True, 'import nodal.models as models\n')] |
import gym
import random
import keras
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
def create_model(state_size,action_size):
model=Sequential()
model.add(Dense(24,input_dim=state_size,activation='relu'))
model.add(Dense(24,activation='relu'))
model.add(Dense(action_size,activation='linear'))
model.compile(loss='mse',optimizer=keras.optimizers.Adam(lr=0.001))
return model
if __name__=="__main__":
env=gym.make('CartPole-v1')
state_size=env.observation_space.shape[0]
action_size=env.action_space.n
batch_size =32
game_history=deque(maxlen=2000)
model=create_model(state_size,action_size)
epsilon=1.0
epsilon_min=0.01
epsilon_decay=0.995
for i in range(1000):
state= env.reset()
state=state.reshape(1,-1)
for j in range(500):
env.render()
if np.random.rand()<=epsilon:
action=random.randrange(action_size)
else:
act_values = model.predict(state)
action=np.argmax(act_values[0])
next_state, reward, done, _ = env.step(action)
if done:
reward=-10
next_state = next_state.reshape(1,-1)
game_history.append((state,action,reward,next_state,done))
state=next_state
if done :
print("Episode : {}/{},score :{}, e :{:.2}"
.format(i, 1000 , j, epsilon))
break
if len(game_history) > batch_size:
batch=random.sample(game_history, batch_size)
for state,action,reward,next_state,done in batch:
target=reward
if not done:
target=(reward +0.95 * np.amax(model.predict(next_state)[0]))
target_action= model.predict(state)
target_action[0][action]=target
model.fit(state,target_action,epochs=1,verbose=0)
if epsilon > epsilon_min:
epsilon*=epsilon_decay
if __name__=='_main_':
main() | [
"keras.optimizers.Adam",
"random.sample",
"collections.deque",
"numpy.random.rand",
"random.randrange",
"numpy.argmax",
"keras.models.Sequential",
"keras.layers.Dense",
"gym.make"
] | [((208, 220), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (218, 220), False, 'from keras.models import Sequential\n'), ((506, 529), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (514, 529), False, 'import gym\n'), ((650, 668), 'collections.deque', 'deque', ([], {'maxlen': '(2000)'}), '(maxlen=2000)\n', (655, 668), False, 'from collections import deque\n'), ((235, 285), 'keras.layers.Dense', 'Dense', (['(24)'], {'input_dim': 'state_size', 'activation': '"""relu"""'}), "(24, input_dim=state_size, activation='relu')\n", (240, 285), False, 'from keras.layers import Dense\n'), ((299, 327), 'keras.layers.Dense', 'Dense', (['(24)'], {'activation': '"""relu"""'}), "(24, activation='relu')\n", (304, 327), False, 'from keras.layers import Dense\n'), ((342, 381), 'keras.layers.Dense', 'Dense', (['action_size'], {'activation': '"""linear"""'}), "(action_size, activation='linear')\n", (347, 381), False, 'from keras.layers import Dense\n'), ((421, 452), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (442, 452), False, 'import keras\n'), ((1621, 1660), 'random.sample', 'random.sample', (['game_history', 'batch_size'], {}), '(game_history, batch_size)\n', (1634, 1660), False, 'import random\n'), ((945, 961), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (959, 961), True, 'import numpy as np\n'), ((995, 1024), 'random.randrange', 'random.randrange', (['action_size'], {}), '(action_size)\n', (1011, 1024), False, 'import random\n'), ((1116, 1140), 'numpy.argmax', 'np.argmax', (['act_values[0]'], {}), '(act_values[0])\n', (1125, 1140), True, 'import numpy as np\n')] |
import numpy as np
import torch as t
import torch.nn as nn
from typing import Tuple
class NBeatsBlock(nn.Module):
"""
N-BEATS block which takes a basis function as an argument.
"""
def __init__(self, n_inputs: int, theta_dim: int, basis: nn.Module, # n_static:int
n_layers: int, n_hidden: int, batch_normalization: bool, dropout_prob: float):
"""
"""
super().__init__()
self.batch_normalization = batch_normalization
self.dropout_prob = dropout_prob
input_layer = [nn.Linear(in_features=n_inputs, out_features=n_hidden), nn.ReLU()]
hidden_layers = []
for _ in range(n_layers-1):
hidden_layers.append(nn.Linear(in_features=n_hidden, out_features=n_hidden))
hidden_layers.append(nn.ReLU())
if self.batch_normalization:
hidden_layers.append(nn.BatchNorm1d(n_hidden))
if self.dropout_prob>0:
hidden_layers.append(nn.Dropout(p=self.dropout_prob))
output_layer = [nn.Linear(in_features=n_hidden, out_features=theta_dim)]
layers = input_layer + hidden_layers + output_layer
self.layers = nn.Sequential(*layers)
self.basis = basis
def forward(self, insample_y: t.Tensor, insample_x_t: t.Tensor,
outsample_x_t: t.Tensor) -> Tuple[t.Tensor, t.Tensor]:
# Compute local projection weights and projection
theta = self.layers(insample_y)
backcast, forecast = self.basis(theta, insample_x_t, outsample_x_t)
return backcast, forecast
class NBeats(nn.Module):
"""
N-Beats Model.
"""
def __init__(self, blocks: nn.ModuleList):
super().__init__()
self.blocks = blocks
def forward(self, insample_y: t.Tensor, insample_x_t: t.Tensor, insample_mask: t.Tensor,
outsample_x_t: t.Tensor) -> t.Tensor:
residuals = insample_y.flip(dims=(-1,))
insample_x_t = insample_x_t.flip(dims=(-1,))
insample_mask = insample_mask.flip(dims=(-1,))
forecast = insample_y[:, -1:] # Level with Naive1
for i, block in enumerate(self.blocks):
backcast, block_forecast = block(residuals, insample_x_t, outsample_x_t)
residuals = (residuals - backcast) * insample_mask
forecast = forecast + block_forecast
return forecast
def decomposed_prediction(self, insample_y: t.Tensor, insample_x_t: t.Tensor, insample_mask: t.Tensor,
outsample_x_t: t.Tensor):
residuals = insample_y.flip(dims=(-1,))
insample_x_t = insample_x_t.flip(dims=(-1,))
insample_mask = insample_mask.flip(dims=(-1,))
forecast = insample_y[:, -1:] # Level with Naive1
forecast_components = []
for i, block in enumerate(self.blocks):
backcast, block_forecast = block(residuals, insample_x_t, outsample_x_t)
residuals = (residuals - backcast) * insample_mask
forecast = forecast + block_forecast
forecast_components.append(block_forecast)
return forecast, forecast_components
class IdentityBasis(nn.Module):
def __init__(self, backcast_size: int, forecast_size: int):
super().__init__()
self.forecast_size = forecast_size
self.backcast_size = backcast_size
def forward(self, theta: t.Tensor, insample_x_t: t.Tensor, outsample_x_t: t.Tensor) -> Tuple[t.Tensor, t.Tensor]:
backcast = theta[:, :self.backcast_size]
forecast = theta[:, -self.forecast_size:]
return backcast, forecast
class TrendBasis(nn.Module):
def __init__(self, degree_of_polynomial: int, backcast_size: int, forecast_size: int):
super().__init__()
polynomial_size = degree_of_polynomial + 1
self.backcast_basis = nn.Parameter(
t.tensor(np.concatenate([np.power(np.arange(backcast_size, dtype=np.float) / backcast_size, i)[None, :]
for i in range(polynomial_size)]), dtype=t.float32), requires_grad=False)
self.forecast_basis = nn.Parameter(
t.tensor(np.concatenate([np.power(np.arange(forecast_size, dtype=np.float) / forecast_size, i)[None, :]
for i in range(polynomial_size)]), dtype=t.float32), requires_grad=False)
def forward(self, theta: t.Tensor, insample_x_t: t.Tensor, outsample_x_t: t.Tensor) -> Tuple[t.Tensor, t.Tensor]:
cut_point = self.forecast_basis.shape[0]
backcast = t.einsum('bp,pt->bt', theta[:, cut_point:], self.backcast_basis)
forecast = t.einsum('bp,pt->bt', theta[:, :cut_point], self.forecast_basis)
return backcast, forecast
class SeasonalityBasis(nn.Module):
def __init__(self, harmonics: int, backcast_size: int, forecast_size: int):
super().__init__()
frequency = np.append(np.zeros(1, dtype=np.float32),
np.arange(harmonics, harmonics / 2 * forecast_size,
dtype=np.float32) / harmonics)[None, :]
backcast_grid = -2 * np.pi * (
np.arange(backcast_size, dtype=np.float32)[:, None] / forecast_size) * frequency
forecast_grid = 2 * np.pi * (
np.arange(forecast_size, dtype=np.float32)[:, None] / forecast_size) * frequency
backcast_cos_template = t.tensor(np.transpose(np.cos(backcast_grid)), dtype=t.float32)
backcast_sin_template = t.tensor(np.transpose(np.sin(backcast_grid)), dtype=t.float32)
backcast_template = t.cat([backcast_cos_template, backcast_sin_template], dim=0)
forecast_cos_template = t.tensor(np.transpose(np.cos(forecast_grid)), dtype=t.float32)
forecast_sin_template = t.tensor(np.transpose(np.sin(forecast_grid)), dtype=t.float32)
forecast_template = t.cat([forecast_cos_template, forecast_sin_template], dim=0)
self.backcast_basis = nn.Parameter(backcast_template, requires_grad=False)
self.forecast_basis = nn.Parameter(forecast_template, requires_grad=False)
def forward(self, theta: t.Tensor, insample_x_t: t.Tensor, outsample_x_t: t.Tensor) -> Tuple[t.Tensor, t.Tensor]:
cut_point = self.forecast_basis.shape[0]
backcast = t.einsum('bp,pt->bt', theta[:, cut_point:], self.backcast_basis)
forecast = t.einsum('bp,pt->bt', theta[:, :cut_point], self.forecast_basis)
return backcast, forecast | [
"torch.nn.ReLU",
"torch.nn.Dropout",
"numpy.arange",
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"numpy.zeros",
"torch.nn.Parameter",
"torch.einsum",
"numpy.cos",
"torch.nn.Linear",
"numpy.sin",
"torch.cat"
] | [((1190, 1212), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (1203, 1212), True, 'import torch.nn as nn\n'), ((4538, 4602), 'torch.einsum', 't.einsum', (['"""bp,pt->bt"""', 'theta[:, cut_point:]', 'self.backcast_basis'], {}), "('bp,pt->bt', theta[:, cut_point:], self.backcast_basis)\n", (4546, 4602), True, 'import torch as t\n'), ((4622, 4686), 'torch.einsum', 't.einsum', (['"""bp,pt->bt"""', 'theta[:, :cut_point]', 'self.forecast_basis'], {}), "('bp,pt->bt', theta[:, :cut_point], self.forecast_basis)\n", (4630, 4686), True, 'import torch as t\n'), ((5599, 5659), 'torch.cat', 't.cat', (['[backcast_cos_template, backcast_sin_template]'], {'dim': '(0)'}), '([backcast_cos_template, backcast_sin_template], dim=0)\n', (5604, 5659), True, 'import torch as t\n'), ((5879, 5939), 'torch.cat', 't.cat', (['[forecast_cos_template, forecast_sin_template]'], {'dim': '(0)'}), '([forecast_cos_template, forecast_sin_template], dim=0)\n', (5884, 5939), True, 'import torch as t\n'), ((5971, 6023), 'torch.nn.Parameter', 'nn.Parameter', (['backcast_template'], {'requires_grad': '(False)'}), '(backcast_template, requires_grad=False)\n', (5983, 6023), True, 'import torch.nn as nn\n'), ((6054, 6106), 'torch.nn.Parameter', 'nn.Parameter', (['forecast_template'], {'requires_grad': '(False)'}), '(forecast_template, requires_grad=False)\n', (6066, 6106), True, 'import torch.nn as nn\n'), ((6294, 6358), 'torch.einsum', 't.einsum', (['"""bp,pt->bt"""', 'theta[:, cut_point:]', 'self.backcast_basis'], {}), "('bp,pt->bt', theta[:, cut_point:], self.backcast_basis)\n", (6302, 6358), True, 'import torch as t\n'), ((6378, 6442), 'torch.einsum', 't.einsum', (['"""bp,pt->bt"""', 'theta[:, :cut_point]', 'self.forecast_basis'], {}), "('bp,pt->bt', theta[:, :cut_point], self.forecast_basis)\n", (6386, 6442), True, 'import torch as t\n'), ((548, 602), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'n_inputs', 'out_features': 'n_hidden'}), '(in_features=n_inputs, out_features=n_hidden)\n', (557, 602), True, 'import torch.nn as nn\n'), ((604, 613), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (611, 613), True, 'import torch.nn as nn\n'), ((1050, 1105), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'n_hidden', 'out_features': 'theta_dim'}), '(in_features=n_hidden, out_features=theta_dim)\n', (1059, 1105), True, 'import torch.nn as nn\n'), ((712, 766), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'n_hidden', 'out_features': 'n_hidden'}), '(in_features=n_hidden, out_features=n_hidden)\n', (721, 766), True, 'import torch.nn as nn\n'), ((801, 810), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (808, 810), True, 'import torch.nn as nn\n'), ((4894, 4923), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.float32'}), '(1, dtype=np.float32)\n', (4902, 4923), True, 'import numpy as np\n'), ((5435, 5456), 'numpy.cos', 'np.cos', (['backcast_grid'], {}), '(backcast_grid)\n', (5441, 5456), True, 'import numpy as np\n'), ((5530, 5551), 'numpy.sin', 'np.sin', (['backcast_grid'], {}), '(backcast_grid)\n', (5536, 5551), True, 'import numpy as np\n'), ((5715, 5736), 'numpy.cos', 'np.cos', (['forecast_grid'], {}), '(forecast_grid)\n', (5721, 5736), True, 'import numpy as np\n'), ((5810, 5831), 'numpy.sin', 'np.sin', (['forecast_grid'], {}), '(forecast_grid)\n', (5816, 5831), True, 'import numpy as np\n'), ((891, 915), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_hidden'], {}), '(n_hidden)\n', (905, 915), True, 'import torch.nn as nn\n'), ((991, 1022), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'self.dropout_prob'}), '(p=self.dropout_prob)\n', (1001, 1022), True, 'import torch.nn as nn\n'), ((4965, 5034), 'numpy.arange', 'np.arange', (['harmonics', '(harmonics / 2 * forecast_size)'], {'dtype': 'np.float32'}), '(harmonics, harmonics / 2 * forecast_size, dtype=np.float32)\n', (4974, 5034), True, 'import numpy as np\n'), ((5164, 5206), 'numpy.arange', 'np.arange', (['backcast_size'], {'dtype': 'np.float32'}), '(backcast_size, dtype=np.float32)\n', (5173, 5206), True, 'import numpy as np\n'), ((5299, 5341), 'numpy.arange', 'np.arange', (['forecast_size'], {'dtype': 'np.float32'}), '(forecast_size, dtype=np.float32)\n', (5308, 5341), True, 'import numpy as np\n'), ((3897, 3937), 'numpy.arange', 'np.arange', (['backcast_size'], {'dtype': 'np.float'}), '(backcast_size, dtype=np.float)\n', (3906, 3937), True, 'import numpy as np\n'), ((4167, 4207), 'numpy.arange', 'np.arange', (['forecast_size'], {'dtype': 'np.float'}), '(forecast_size, dtype=np.float)\n', (4176, 4207), True, 'import numpy as np\n')] |
import io
import csv
import logging
import requests
import glob, os
import numpy as np
import pandas as pd
from pathlib import Path
from astropy.table import Table
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
# ["g", "r", "i", "z"], value=[1, 2, 3, 4]
dic_filters = {1: "g", 2: "r", 3: "i", 4: "z"}
def i_minus_g(fid, mag):
""" Compute i-g based on vectors of filters and magnitudes
"""
if list(set(fid)) == [1, 3]:
# last measurement
last_fid = fid[-1]
# last measurement with different filter
# could take the mean
index_other = np.where(np.array(fid) != last_fid)[0][-1]
sign = np.diff([fid[index_other], last_fid])[0]
mag = [mag[index_other], mag[-1]]
return -1 * sign * np.diff(mag)[0]
else:
return np.nan
def extract_delta_color(pdf: pd.DataFrame, smooth_by: float):
""" Extract last g-r and delta mag for each object
Modified from Fink https://github.com/astrolabsoftware/fink-science-portal/apps/utils.py
Adding a grouping for a given mjd interval
Parameters
----------
pdf: pandas DataFrame
DataFrame containing magnitude, mjd, filters
Filter band, as integer. g: 1, r: 2, i: 3, z:4
smooth_by: float
MJD delta to smooth computation
Returns
----------
"""
# dmag, rate for each filter
dic_dmag = dict.fromkeys([1, 2, 3, 4])
dic_dmag_mjd = dict.fromkeys([1, 2, 3, 4])
dic_rate = dict.fromkeys([1, 2, 3, 4])
for fil in pdf["filter"].unique():
subpdf = pdf[pdf["filter"] == fil]
subpdf = subpdf.sort_values("mjd", ascending=False)
# dmag smoothed
min_mjd = float(int(subpdf.mjd.min()))
max_mjd = float(int(subpdf.mjd.max()))
bins = np.arange(min_mjd, max_mjd + 1, smooth_by)
df_grouped = subpdf.groupby(np.digitize(subpdf.mjd, bins)).median()
mag_grouped = df_grouped["magnitude"]
mjd_grouped = df_grouped["mjd"]
if len(mag_grouped) > 1:
# only compute if more than one observation
dmag_ = mag_grouped.diff(periods=1).values[1:]
dmag_mjd_ = mjd_grouped.values[1:].astype(int)
dic_dmag[fil] = dmag_
dic_dmag_mjd[fil] = dmag_mjd_
# Rate by day
bins = np.arange(min_mjd, max_mjd + 1, 1)
df_grouped_byday = subpdf.groupby(np.digitize(subpdf.mjd, bins)).median()
mag_grouped_byday = df_grouped_byday["magnitude"]
rate_ = mag_grouped_byday.diff(periods=1).values
if len(mag_grouped_byday) > 1:
dic_rate[fil] = rate_[1:]
# for color (can be modified to take median mag)
# group by night
gpdf = pdf.groupby("mjd_int")[["filter", "mjd", "magnitude"]].agg(list)
# take only nights with at least measurements on 2 different filters
mask = gpdf["filter"].apply(lambda x: (len(x) > 1) & (np.sum(x) / len(x) != x[0]))
gpdf_night = gpdf[mask]
# compute i-g for those nights
color_tmp = [
i_minus_g(i, j)
for i, j in zip(gpdf_night["filter"].values, gpdf_night["magnitude"].values)
]
mask = np.argwhere(~np.isnan(color_tmp)).flatten()
if len(mask) > 1:
color_mjd = gpdf_night.index.values[mask]
color = [color_tmp[k] for k in mask]
else:
color_mjd = []
color = []
return (dic_dmag, dic_dmag_mjd, dic_rate, color, color_mjd)
if __name__ == "__main__":
inpath = "./S82sub8_59.12"
list_files = glob.glob(f"{inpath}/*.forced.difflc.txt")
# If daily cadence then = rate
smooth_by = 1
for fname in list_files[:1]:
# read file and convert to pandas
df_tmp = Table.read(fname, format="ascii").to_pandas()
# variable reformatting
t = Time(df_tmp["dateobs"].to_list(), format="isot", scale="utc")
df_tmp["mjd"] = t.mjd.astype(float)
df_tmp["mjd_int"] = t.mjd.astype(int)
df_tmp["magnitude"] = df_tmp["m"].values
df_tmp["filter"] = df_tmp["filt"].replace(
to_replace=["g", "r", "i", "z"], value=[1, 2, 3, 4]
)
df_tmp = df_tmp[df_tmp["magnitude"] != "-"]
df_tmp["magnitude"] = df_tmp["magnitude"].astype(np.float).copy()
dic_dmag, dic_dmag_mjd, dic_rate, color, color_mjd = extract_delta_color(
df_tmp, smooth_by=smooth_by
)
import ipdb
ipdb.set_trace()
# # Plot to verify processing
# from matplotlib import gridspec
# fig = plt.figure(figsize=(14, 14))
# gs = gridspec.GridSpec(2, 1, height_ratios=[2, 1])
# ax0 = plt.subplot(gs[0, 0])
# ax1 = plt.subplot(gs[1, 0])
# x_arr = np.arange(df_tmp.mjd.min(), df_tmp.mjd.max() + 1, 1)
# ax1.plot(x_arr, np.zeros(len(x_arr)), color="grey", linestyle="-")
# for fil in df_tmp["filter"].unique():
# sel = df_tmp[df_tmp["filter"] == fil]
# if len(sel) > 1:
# ax0.errorbar(
# sel.mjd,
# sel.magnitude,
# yerr=sel.dm.astype(float),
# fmt="o",
# label=dic_filters[fil],
# )
# if fil in dic_dmag_mjd.keys():
# ax1.scatter(dic_dmag_mjd[fil], dic_dmag[fil])
# if len(color) > 0:
# ax1.scatter(color_mjd, color, color="black")
# idx = Path(fname).stem.replace(".forced.difflc", "")
# ax0.legend()
# plt.savefig(f"{idx}.png")
| [
"numpy.arange",
"ipdb.set_trace",
"numpy.digitize",
"numpy.diff",
"numpy.array",
"numpy.sum",
"numpy.isnan",
"glob.glob",
"astropy.table.Table.read"
] | [((3585, 3627), 'glob.glob', 'glob.glob', (['f"""{inpath}/*.forced.difflc.txt"""'], {}), "(f'{inpath}/*.forced.difflc.txt')\n", (3594, 3627), False, 'import glob, os\n'), ((1876, 1918), 'numpy.arange', 'np.arange', (['min_mjd', '(max_mjd + 1)', 'smooth_by'], {}), '(min_mjd, max_mjd + 1, smooth_by)\n', (1885, 1918), True, 'import numpy as np\n'), ((2402, 2436), 'numpy.arange', 'np.arange', (['min_mjd', '(max_mjd + 1)', '(1)'], {}), '(min_mjd, max_mjd + 1, 1)\n', (2411, 2436), True, 'import numpy as np\n'), ((4481, 4497), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (4495, 4497), False, 'import ipdb\n'), ((736, 773), 'numpy.diff', 'np.diff', (['[fid[index_other], last_fid]'], {}), '([fid[index_other], last_fid])\n', (743, 773), True, 'import numpy as np\n'), ((847, 859), 'numpy.diff', 'np.diff', (['mag'], {}), '(mag)\n', (854, 859), True, 'import numpy as np\n'), ((3775, 3808), 'astropy.table.Table.read', 'Table.read', (['fname'], {'format': '"""ascii"""'}), "(fname, format='ascii')\n", (3785, 3808), False, 'from astropy.table import Table\n'), ((1955, 1984), 'numpy.digitize', 'np.digitize', (['subpdf.mjd', 'bins'], {}), '(subpdf.mjd, bins)\n', (1966, 1984), True, 'import numpy as np\n'), ((2479, 2508), 'numpy.digitize', 'np.digitize', (['subpdf.mjd', 'bins'], {}), '(subpdf.mjd, bins)\n', (2490, 2508), True, 'import numpy as np\n'), ((3243, 3262), 'numpy.isnan', 'np.isnan', (['color_tmp'], {}), '(color_tmp)\n', (3251, 3262), True, 'import numpy as np\n'), ((686, 699), 'numpy.array', 'np.array', (['fid'], {}), '(fid)\n', (694, 699), True, 'import numpy as np\n'), ((2993, 3002), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (2999, 3002), True, 'import numpy as np\n')] |
"""
TODO: some figure numberings (CHOICE, VERSION) were changed: make sure the current numberings are consistent with original runs
TODO: replaces previous versions 161110, 171029
TODO: how to get the grid small log lines also for x-axis?
TODO: mention that Python 3.5.2 or later is required (ideally 3.8)
Plots times for graph creation, eps_max calculation, compatibility estimation and propagation
Since graph creation takes most time, especially for large graphs, saves graphs to a file format, then loads them later again.
CHOICE is a choice of parameters and is thus included in CSV file name
VARIANT is a variant that is chosen to be plotted, is included only in Figure file name
Important (CHOICE, VARIANT) combinations:
(3,3): paper figure introduction (prop, Holdout, DCEr) with arrows
(3,2): paper figure main experiments (all methods) with arrows
(3,4): paper figure variant (prop)
(3,5): paper figure variant (prop, Holdout)
(3,6): paper figure variant (prop, Holdout, DCEr)
First version: Nov 10, 2016
This version: Jan 26, 2020
"""
import numpy as np
import datetime
import random
# import os # for displaying created PDF TODO: can be removed?
import time
import sys
sys.path.append("../sslh") # important to be able to run from command line
from fileInteraction import (save_csv_record,
save_W,
save_X,
load_W,
load_X) # TODO: Paul, why do we need to use sslh here as part of the name but below not for estimation?
from utils import (from_dictionary_beliefs,
create_parameterized_H,
replace_fraction_of_rows,
to_centering_beliefs,
eps_convergence_linbp_parameterized,
showfig)
from estimation import (estimateH,
estimateH_baseline_serial)
from graphGenerator import planted_distribution_model
from inference import linBP_symmetric_parameterized
import matplotlib as mpl
from matplotlib.ticker import LogLocator
mpl.use('Agg') # more common rendering
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.max_columns', None) # show all columns
pd.options.mode.chained_assignment = None # default='warn'
# -- Determine path to data *irrespective* of where the file is run from
from os.path import abspath, dirname, join
from inspect import getfile, currentframe
current_path = dirname(abspath(getfile(currentframe())))
figure_directory = join(current_path, 'figs')
data_directory = join(current_path, 'datacache')
def run(choice, variant, create_data=False, add_data=False, create_graph=False,
create_fig=True, show_plot=False, create_pdf=False, show_pdf=False, shorten_length=False, show_arrows=True):
"""main parameterized method to produce all figures.
Can be run from external jupyther notebook or method to produce all figures in PDF
"""
# -- Setup
CHOICE = choice # determines the CSV data file to use
VARIANT = variant # determines the variant of how the figures are plotted
CREATE_DATA = create_data # starts new CSV file and stores experimental timing results
ADD_DATA = add_data # adds data to existing file
CREATE_GRAPH = create_graph # creates the actual graph for experiments (stores W and X in CSV files)
SHOW_PDF = show_pdf
SHOW_PLOT = show_plot
CREATE_FIG = create_fig
CREATE_PDF = create_pdf
SHORTEN_LENGTH = shorten_length # to prune certain fraction of data to plot
SHOW_SCALING_LABELS = True # first entry in the legend is for the dashed line of scalability
SHOW_TITLE = True # show parameters in title of plot
SHOW_DCER_WITH_BOX = True # show DCER value in a extra box
LABEL_FONTSIZE = 16 # size of number labels in figure
SHOW_LINEAR = True # show dashed line for linear scaling
SHOW_ARROWS = show_arrows # show extra visual comparison of speed-up
csv_filename = 'Fig_Timing_{}.csv'.format(CHOICE) # CSV filename includes CHOICE
filename = 'Fig_Timing_{}-{}'.format(CHOICE, VARIANT) # PDF filename includes CHOICE and VARIANT
header = ['n', 'type', 'time']
if CREATE_DATA:
save_csv_record(join(data_directory, csv_filename), header, append=False)
# -- Default Graph parameters
distribution = 'powerlaw'
exponent = -0.3
k = 3
a = 1 # this value was erroneously set to 5 previously!!! TODO: fix everywhere else
# err = 0
avoidNeighbors = False
f = 0.1
est_EC = True # !!! TODO: for graph estimation
weights = 10
pyamg = False
convergencePercentage_W = None
alpha = 0
beta = 0
gamma = 0
s = 0.5
numMaxIt = 10
xtick_lab = [0.001, 0.01, 0.1, 1]
ytick_lab = np.arange(0, 1, 0.1)
xmin = 1e2
xmax = 1e8
# xmax = 1e6
ymin = 1e-3
ymax = 5e3
color_vec = ["#4C72B0", "#55A868", "#8172B2", "#C44E52", "#CCB974", 'black', 'black', "#64B5CD", "black"]
marker_vec = ['s', '^', 'x', 'o', 'None', 'None', 'None', 'None']
linestyle_vec = ['solid'] * 6 + ['dashed']
linewidth_vec = [3] * 3 + [4, 3, 4] + [3] * 7
SHOWMAXNUMBER = True
show_num_vec = ['MHE', 'LHE', 'DHE', 'DHEr', 'Holdout', 'prop', 'eps_max']
# %% -- Main Options
if CHOICE == 3:
n_vec = [100, 200, 400, 800,
1600, 3200, 6400,
12800, 25600, 51200,
102400, 204800, 409600, 819200,
1638400, 3276800, 6553600
]
# # n_vec = [1638400] # graph: 12021 sec = 3.4h, 18600 sec = 5h, 21824 sec (34000 sec old laptop)
# # n_vec = [3276800] # graph: 49481 sec = 13.8h, 68145 sec (125233 sec old laptop)
# # n_vec = [6553600] # graph: 145020 sec = 40h
h = 8
d = 5
repeat_vec_vec = [[
50, 50, 50, 50,
50, 50, 50,
20, 10, 10,
5, 5, 5, 3,
3, 3, 3
],
[
5, 5, 5, 5,
3, 3, 3,
3, 3, 1,
1
],
[
20, 20, 20, 10,
10, 10, 10,
10, 5, 5,
5, 3, 3, 1,
1, 1, 1
]
]
method_vec_vec = [['MHE', 'DHE', 'DHEr', 'LHE'],
['Holdout'],
['prop']
]
if VARIANT == 1:
method_vec_fig = ['MHE', 'LHE', 'DHE', 'DHEr', 'Holdout', 'prop']
label_vec = ['MCE', 'LCE', 'DCE', 'DCEr', 'Holdout', 'prop']
show_num_vec = ['MHE', 'LHE', 'DHE', 'DHEr', 'Holdout', 'prop']
if VARIANT == 2: # version used for main paper figure
method_vec_fig = ['MHE', 'LHE', 'DHE', 'DHEr', 'Holdout', 'prop']
label_vec = ['MCE', 'LCE', 'DCE', 'DCEr', 'Holdout', 'prop']
linestyle_vec = ['solid'] * 5 + ['dashed']
SHOW_ARROWS = False
if VARIANT == 3: # version used for main paper figure
method_vec_fig = ['DHEr', 'Holdout', 'prop']
label_vec = ['DCEr', 'Holdout', 'Propagation', '$\epsilon_{\mathrm{max}}$']
linestyle_vec = ['solid'] * 2 + ['dashed']
color_vec = ["#C44E52", "#CCB974", 'black', 'black', "#64B5CD", "black"]
marker_vec = ['o', 'x', 'None', 'None', 'None']
linestyle_vec = ['solid'] * 3 + ['dashed']
linewidth_vec = [4, 3, 4] + [3] * 7
ymin = 1e-2
SHOW_ARROWS = True
if VARIANT == 4: # figure used in slides
method_vec_fig = ['prop']
label_vec = ['Propagation']
color_vec = ['black']
marker_vec = ['None']
linestyle_vec = ['solid'] * 1
linewidth_vec = [2]
ymin = 1e-2
SHOW_ARROWS = False
SHOW_SCALING_LABELS = False
SHOW_TITLE = False
SHOW_DCER_WITH_BOX = False
LABEL_FONTSIZE = 20
SHOW_LINEAR = False
if VARIANT == 5: # figure used in slides
method_vec_fig = ['prop', 'Holdout']
label_vec = ['Propagation', 'Baseline']
color_vec = ['black', "#CCB974"]
marker_vec = ['None', '^']
linestyle_vec = ['solid'] * 2
linewidth_vec = [2, 4]
ymin = 1e-2
SHOW_ARROWS = True
SHOW_SCALING_LABELS = False
SHOW_TITLE = False
SHOW_DCER_WITH_BOX = False
LABEL_FONTSIZE = 20
SHOW_LINEAR = False
if VARIANT == 6: # figure used in slides
method_vec_fig = ['prop', 'Holdout', 'DHEr']
label_vec = ['Propagation', 'Baseline', 'Our method']
color_vec = ['black', "#CCB974", "#C44E52"]
marker_vec = ['None', '^', 'o', 'None', 'None']
linestyle_vec = ['solid'] + ['solid'] * 2
linewidth_vec = [2, 4, 4]
ymin = 1e-2
SHOW_ARROWS = True
SHOW_SCALING_LABELS = False
SHOW_TITLE = True
SHOW_DCER_WITH_BOX = False
LABEL_FONTSIZE = 20
SHOW_LINEAR = False
graph_cvs = 'Fig_Timing_SSLH_1' # re-use existing large graphs
elif CHOICE == 4:
n_vec = [200, 400, 800,
1600, 3200, 6400,
12800, 25600, 51200,
102400, 204800, 409600, 819200,
]
# n_vec = [819200] # graph: 47905 sec = 13.3h. 90562 sec = 25h (180527 sec old laptop)
h = 3
d = 25
repeat_vec_vec = [[
50, 50, 50,
50, 50, 50,
20, 10, 10,
5, 3, 3, 3,
],
[
5, 5, 5,
3, 1, 1,
1, 1, 1
],
[
20, 20, 10,
10, 10, 10,
10, 5, 5,
5, 1, 1, 1,
]
]
method_vec_vec = [['MHE', 'DHE', 'DHEr', 'LHE'],
['Holdout'],
['prop']
]
VARIANT = 2
if VARIANT == 1:
method_vec_fig = ['MHE', 'LHE', 'DHE', 'DHEr', 'Holdout', 'prop', 'eps_max']
label_vec = ['MCE', 'LCE', 'DCE', 'DCEr', 'Holdout', 'prop', '$\epsilon_{\mathrm{max}}$']
show_num_vec = ['MHE', 'LHE', 'DHE', 'DHEr', 'Holdout', 'prop', 'eps_max']
if VARIANT == 2:
method_vec_fig = ['MHE', 'LHE', 'DHE', 'DHEr', 'Holdout', 'prop']
label_vec = ['MCE', 'LCE', 'DCE', 'DCEr', 'Holdout', 'prop']
linestyle_vec = ['solid'] * 5 + ['dashed']
if VARIANT == 3:
method_vec_fig = ['DHEr', 'Holdout', 'prop']
label_vec = ['DCEr', 'Holdout', 'Propagation', '$\epsilon_{\mathrm{max}}$']
linestyle_vec = ['solid'] * 2 + ['dashed']
color_vec = ["#C44E52", "#CCB974", 'black', 'black', "#64B5CD", "black"]
marker_vec = ['o', 'x', 'None', 'None', 'None']
linestyle_vec = ['solid'] * 3 + ['dashed']
linewidth_vec = [4, 3, 4] + [3] * 7
ymin = 1e-2
graph_cvs = 'Fig_Timing_SSLH_2' # re-use existing large graphs
xmin = 1e3
xmax = 5e7
ymax = 1e3
elif CHOICE == 2:
# rep_Estimation = 10
# n_vec = [200, 400, 800, 1600, 3200, 6400, 12800,
# 25600, 51200, 102400, 204800, 409600, 819200]
# repeat_vec = [20, 20, 20, 20, 20, 10, 10,
# 10, 10, 10, 5, 5, 1]
# n_vec = [819200] # graph: 47905 sec = 13.3h. 90562 sec = 25h (180527 sec old laptop)
n_vec = [1638400] # !!! not done yet
repeat_vec = [1]
h = 3
d = 25
xmax = 5e7
graph_cvs = 'Fig_Timing_SSLH_2'
elif CHOICE == 10: # same as 3 but with difference bars
n_vec = [100, 200, 400, 800,
1600, 3200, 6400,
12800, 25600, 51200,
102400, 204800, 409600, 819200,
1638400, 3276800, 6553600
]
# # n_vec = [1638400] # graph: 12021 sec = 3.4h, 18600 sec = 5h, 21824 sec (34000 sec old laptop)
# # n_vec = [3276800] # graph: 49481 sec = 13.8h, 68145 sec (125233 sec old laptop)
# # n_vec = [6553600] # graph: 145020 sec = 40h
h = 8
d = 5
repeat_vec_vec = [[
50, 50, 50, 50,
50, 50, 50,
20, 10, 10,
5, 5, 5, 3,
3, 3, 3
],
[
5, 5, 5, 5,
3, 3, 3,
3, 3, 1,
1
],
[
20, 20, 20, 10,
10, 10, 10,
10, 5, 5,
5, 3, 3, 1,
1, 1, 1
]
]
method_vec_vec = [['MHE', 'DHE', 'DHEr', 'LHE'],
['Holdout'],
['prop']
]
method_vec_fig = ['DHEr', 'Holdout', 'prop']
label_vec = ['DCEr', 'Holdout', 'Propagation', '$\epsilon_{\mathrm{max}}$']
linestyle_vec = ['solid'] * 2 + ['dashed']
color_vec = ["#C44E52", "#CCB974", 'black', 'black', "#64B5CD", "black"]
marker_vec = ['o', 'x', 'None', 'None', 'None']
linestyle_vec = ['solid'] * 3 + ['dashed']
linewidth_vec = [4, 3, 4] + [3] * 7
ymin = 1e-2
graph_cvs = 'Fig_Timing_SSLH_1' # re-use existing large graphs
else:
raise Warning("Incorrect choice!")
# %% -- Common options
alpha0 = np.array([a, 1., 1.])
alpha0 = alpha0 / np.sum(alpha0)
H0 = create_parameterized_H(k, h, symmetric=True)
H0c = to_centering_beliefs(H0)
RANDOMSEED = None # For repeatability
random.seed(RANDOMSEED) # seeds some other python random generator
np.random.seed(seed=RANDOMSEED) # seeds the actually used numpy random generator; both are used and thus needed
# print("CHOICE: {}".format(CHOICE))
def save_tuple(n, label, time):
tuple = [str(datetime.datetime.now())]
text = [n, label, time]
tuple.extend(text)
print("time potential {}: {}".format(label, time))
save_csv_record(join(data_directory, csv_filename), tuple)
# %% -- Create data
if CREATE_DATA or ADD_DATA:
for repeat_vec, method_vec in zip(repeat_vec_vec, method_vec_vec):
for n, repeat in zip(n_vec, repeat_vec):
print("\nn: {}".format(n))
# repeat = repeat_vec[j]
# -- Graph
if CREATE_GRAPH:
start = time.time()
W, Xd = planted_distribution_model(n, alpha=alpha0, P=H0, m=d * n,
distribution=distribution,
exponent=exponent,
directed=False,
debug=False)
X0 = from_dictionary_beliefs(Xd)
time_graph = time.time() - start
save_W(join(data_directory, '{}_{}_W.csv'.format(graph_cvs, n)), W, saveWeights=False)
save_X(join(data_directory, '{}_{}_X.csv'.format(graph_cvs, n)), X0)
save_tuple(n, 'graph', time_graph)
else:
W, _ = load_W(join(data_directory, '{}_{}_W.csv'.format(graph_cvs, n)), skiprows=1, zeroindexing=True, n=None,
doubleUndirected=False)
X0, _, _ = load_X(join(data_directory, '{}_{}_X.csv'.format(graph_cvs, n)), n=None, k=None, skiprows=1, zeroindexing=True)
# -- Repeat loop
for i in range(repeat):
print("\n repeat: {}".format(i))
X2, ind = replace_fraction_of_rows(X0, 1 - f, avoidNeighbors=avoidNeighbors, W=W)
for method in method_vec:
if method == 'DHE':
start = time.time()
H2 = estimateH(X2, W, method='DHE', variant=1, distance=5, EC=est_EC, weights=weights)
time_est = time.time() - start
save_tuple(n, 'DHE', time_est)
elif method == 'DHEr':
start = time.time()
H2 = estimateH(X2, W, method='DHE', variant=1, distance=5, EC=est_EC, weights=weights, randomize=True)
time_est = time.time() - start
save_tuple(n, 'DHEr', time_est)
elif method == 'MHE':
start = time.time()
H2 = estimateH(X2, W, method='MHE', variant=1, distance=1, EC=est_EC, weights=None)
time_est = time.time() - start
save_tuple(n, 'MHE', time_est)
elif method == 'LHE':
start = time.time()
H2 = estimateH(X2, W, method='LHE', variant=1, distance=1, EC=est_EC, weights=None)
time_est = time.time() - start
save_tuple(n, 'LHE', time_est)
elif method == 'Holdout':
start = time.time()
H2 = estimateH_baseline_serial(X2, ind, W, numMax=numMaxIt,
numberOfSplits=1,
# EC=EC,
# weights=weight,
alpha=alpha, beta=beta, gamma=gamma)
time_est = time.time() - start
save_tuple(n, 'Holdout', time_est)
elif method == 'prop':
H2c = to_centering_beliefs(H0)
X2c = to_centering_beliefs(X2, ignoreZeroRows=True) # try without
start = time.time()
eps_max = eps_convergence_linbp_parameterized(H2c, W, method='noecho', alpha=alpha, beta=beta, gamma=gamma, X=X2,
pyamg=pyamg)
time_eps_max = time.time() - start
save_tuple(n, 'eps_max', time_eps_max)
# -- Propagate
eps = s * eps_max
try:
start = time.time()
F, actualIt, actualPercentageConverged = \
linBP_symmetric_parameterized(X2, W, H2c * eps,
method='noecho',
alpha=alpha, beta=beta, gamma=gamma,
numMaxIt=numMaxIt,
convergencePercentage=convergencePercentage_W,
debug=2)
time_prop = time.time() - start
except ValueError as e:
print(
"ERROR: {}: d={}, h={}".format(e, d, h))
else:
save_tuple(n, 'prop', time_prop)
else:
raise Warning("Incorrect choice!")
# %% -- Read, aggregate, and pivot data for all options
df1 = pd.read_csv(join(data_directory, csv_filename))
# print("\n-- df1: (length {}):\n{}".format(len(df1.index), df1.head(50)))
# Aggregate repetitions
df2 = df1.groupby(['n', 'type']).agg \
({'time': [np.mean, np.median, np.std, np.size], # Multiple Aggregates
})
df2.columns = ['_'.join(col).strip() for col in df2.columns.values] # flatten the column hierarchy
df2.reset_index(inplace=True) # remove the index hierarchy
df2.rename(columns={'time_size': 'count'}, inplace=True)
# print("\n-- df2 (length {}):\n{}".format(len(df2.index), df2.head(15)))
# Pivot table
df3 = pd.pivot_table(df2, index=['n'], columns=['type'], values=['time_mean', 'time_median']) # Pivot
# df3 = pd.pivot_table(df2, index=['n'], columns=['type'], values=['time_mean', 'time_median', 'time_std'] ) # Pivot
# print("\n-- df3 (length {}):\n{}".format(len(df3.index), df3.head(30)))
df3.columns = ['_'.join(col).strip() for col in df3.columns.values] # flatten the column hierarchy
df3.reset_index(inplace=True) # remove the index hierarchy
# df2.rename(columns={'time_size': 'count'}, inplace=True)
# print("\n-- df3 (length {}):\n{}".format(len(df3.index), df3.head(30)))
# Extract values
X = df3['n'].values # plot x values
X = X * d / 2 # calculate edges (!!! notice dividing by 2 as one edge appears twice in symmetric adjacency matrix)
Y = {}
for method in method_vec_fig:
# Y[method] = df3['time_mean_{}'.format(method)].values
Y[method] = df3['time_median_{}'.format(method)].values
if SHORTEN_LENGTH:
SHORT_FACTOR = 4 ## KEEP EVERY Nth ELEMENT
X = np.copy(X[list(range(0, len(X), SHORT_FACTOR)),])
for method in method_vec_fig:
Y[method] = np.copy(Y[method][list(range(0, len(Y[method]), SHORT_FACTOR)),])
# %% -- Figure
if CREATE_FIG:
fig_filename = '{}.pdf'.format(filename) # TODO: repeat pattern in other files
mpl.rcParams['backend'] = 'agg'
mpl.rcParams['lines.linewidth'] = 3
mpl.rcParams['font.size'] = LABEL_FONTSIZE
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['axes.titlesize'] = 16
mpl.rcParams['xtick.labelsize'] = 16
mpl.rcParams['ytick.labelsize'] = 16
mpl.rcParams['legend.fontsize'] = 12
mpl.rcParams['axes.edgecolor'] = '111111' # axes edge color
mpl.rcParams['grid.color'] = '777777' # grid color
mpl.rcParams['figure.figsize'] = [4, 4]
mpl.rcParams['xtick.major.pad'] = 4 # padding of tick labels: default = 4
mpl.rcParams['ytick.major.pad'] = 4 # padding of tick labels: default = 4
fig = plt.figure()
ax = fig.add_axes([0.13, 0.17, 0.8, 0.8])
# -- Draw the plots
if SHOW_LINEAR:
ax.plot([1, 1e8], [1e-5, 1e3], linewidth=1, color='gray', linestyle='dashed', label='1sec/100k edges', clip_on=True, zorder=3)
for i, (method, color, marker, linewidth, linestyle) in enumerate(zip(method_vec_fig, color_vec, marker_vec, linewidth_vec, linestyle_vec)):
ax.plot(X, Y[method], linewidth=linewidth, color=color, linestyle=linestyle, label=label_vec[i], clip_on=True, marker=marker,
markersize=6, markeredgewidth=1, markeredgecolor='black', zorder=4)
# for choice, (option, label, color, linewidth, clip_on, linestyle, marker, markersize) in \
# enumerate(zip(option_vec, labels, facecolor_vec, linewidth_vec, clip_on_vec, linestyle_vec, marker_vec, markersize_vec)):
# P = ax.plot(X_f, Y[choice], linewidth=linewidth, color=color, linestyle=linestyle, label=label, zorder=4, marker=marker,
# markersize=markersize, markeredgewidth=1, markeredgecolor='black', clip_on=clip_on)
if SHOWMAXNUMBER and method in show_num_vec:
if method == 'DHEr' and SHOW_DCER_WITH_BOX:
j = np.argmax(np.ma.masked_invalid(Y[method])) # mask nan, then get index of max element
ax.annotate(int(np.round(Y[method][j])), xy=(X[j] * 1.5, Y[method][j]), color=color, va='center',
bbox=dict(boxstyle="round,pad=0.3", fc="w"), annotation_clip=False, zorder=5)
else:
j = np.argmax(np.ma.masked_invalid(Y[method])) # mask nan, then get index of max element
ax.annotate(int(np.round(Y[method][j])), xy=(X[j] * 1.5, Y[method][j]), color=color, va='center', annotation_clip=False, zorder=5)
if SHOW_ARROWS:
dce_opt = 'DHEr'
holdout_opt = 'Holdout'
prop_opt = 'prop'
j_holdout = np.argmax(np.ma.masked_invalid(Y[holdout_opt]))
if dce_opt in Y:
j_dce = np.argmax(np.ma.masked_invalid(Y[dce_opt]))
ax.annotate(s='', xy=(X[j_dce], Y[prop_opt][j_dce]),
xytext=(X[j_dce], Y[dce_opt][j_dce]),
arrowprops=dict(arrowstyle='<->'))
ax.annotate(str(int(np.round(Y[prop_opt][j_dce] / Y[dce_opt][j_dce]))) + 'x',
xy=(X[j_dce], int(Y[prop_opt][j_dce] + Y[dce_opt][j_dce]) / 6),
color='black', va='center', fontsize=14,
# bbox = dict(boxstyle="round,pad=0.3", fc="w"),
annotation_clip=False, zorder=5)
ax.annotate(s='', xy=(X[j_holdout], Y[holdout_opt][j_holdout]),
xytext=(X[j_holdout], Y[dce_opt][j_holdout]),
arrowprops=dict(arrowstyle='<->'))
ax.annotate(str(int(np.round(Y[holdout_opt][j_holdout] / Y[dce_opt][j_holdout]))) + 'x',
xy=(X[j_holdout], int(Y[holdout_opt][j_holdout] + Y[dce_opt][j_holdout]) / 8),
color='black', va='center', fontsize=14,
# bbox = dict(boxstyle="round,pad=0.3", fc="w"),
annotation_clip=False, zorder=5)
else: # in case dce_opt not shown, then show arrow as compared to prop method
ax.annotate(s='', xy=(X[j_holdout], Y[holdout_opt][j_holdout]),
xytext=(X[j_holdout], Y[prop_opt][j_holdout]),
arrowprops=dict(arrowstyle='<->'))
ax.annotate(str(int(np.round(Y[holdout_opt][j_holdout] / Y[prop_opt][j_holdout]))) + 'x',
xy=(X[j_holdout], int(Y[holdout_opt][j_holdout] + Y[prop_opt][j_holdout]) / 8),
color='black', va='center', fontsize=14,
# bbox = dict(boxstyle="round,pad=0.3", fc="w"),
annotation_clip=False, zorder=5)
if SHOW_TITLE:
plt.title(r'$\!\!\!d\!=\!{}, h\!=\!{}$'.format(d, h))
handles, labels = ax.get_legend_handles_labels()
if not SHOW_SCALING_LABELS and SHOW_LINEAR:
handles = handles[1:]
labels = labels[1:]
legend = plt.legend(handles, labels,
loc='upper left', # 'upper right'
handlelength=2,
labelspacing=0, # distance between label entries
handletextpad=0.3, # distance between label and the line representation
borderaxespad=0.2, # distance between legend and the outer axes
borderpad=0.3, # padding inside legend box
numpoints=1, # put the marker only once
)
legend.set_zorder(3)
frame = legend.get_frame()
frame.set_linewidth(0.0)
frame.set_alpha(0.2) # 0.8
# -- Figure settings and save
plt.minorticks_on()
plt.xscale('log')
plt.yscale('log')
minorLocator = LogLocator(base=10, subs=[0.1 * n for n in range(1, 10)], numticks=40) # TODO: discuss with Paul trick that helped with grid lines last time; necessary in order to create the log locators (otherwise does now show the wanted ticks
# ax.xaxis.set_minor_locator(minorLocator)
plt.xticks([1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9])
plt.grid(True, which='both', axis='both', alpha=0.2, linestyle='-', linewidth=1, zorder=1) # linestyle='dashed', which='minor', axis='y',
# grid(b=True, which='minor', axis='x', alpha=0.2, linestyle='solid', linewidth=0.5) # linestyle='dashed', which='minor', axis='y',
plt.xlabel(r'Number of edges ($m$)', labelpad=0) # labelpad=0
plt.ylabel(r'Time [sec]', labelpad=0)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
# print(ax.get_xaxis().get_minor_locator())
if CREATE_PDF:
plt.savefig(join(figure_directory, fig_filename), format='pdf',
dpi=None,
edgecolor='w',
orientation='portrait',
transparent=False,
bbox_inches='tight',
pad_inches=0.05,
# frameon=None
)
if SHOW_PDF:
showfig(join(figure_directory, fig_filename)) # shows actually created PDF
if SHOW_PLOT:
plt.show()
if __name__ == "__main__":
run(3, 2, create_pdf=True, show_pdf=True)
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"inference.linBP_symmetric_parameterized",
"utils.to_centering_beliefs",
"utils.replace_fraction_of_rows",
"numpy.array",
"estimation.estimateH",
"estimation.estimateH_baseline_serial",
"sys.path.append",
"numpy.arange",
"pandas.pivot_table",... | [((1241, 1267), 'sys.path.append', 'sys.path.append', (['"""../sslh"""'], {}), "('../sslh')\n", (1256, 1267), False, 'import sys\n'), ((2096, 2110), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (2103, 2110), True, 'import matplotlib as mpl\n'), ((2189, 2231), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (2202, 2231), True, 'import pandas as pd\n'), ((2548, 2574), 'os.path.join', 'join', (['current_path', '"""figs"""'], {}), "(current_path, 'figs')\n", (2552, 2574), False, 'from os.path import abspath, dirname, join\n'), ((2592, 2623), 'os.path.join', 'join', (['current_path', '"""datacache"""'], {}), "(current_path, 'datacache')\n", (2596, 2623), False, 'from os.path import abspath, dirname, join\n'), ((4795, 4815), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.1)'], {}), '(0, 1, 0.1)\n', (4804, 4815), True, 'import numpy as np\n'), ((13684, 13707), 'numpy.array', 'np.array', (['[a, 1.0, 1.0]'], {}), '([a, 1.0, 1.0])\n', (13692, 13707), True, 'import numpy as np\n'), ((13752, 13796), 'utils.create_parameterized_H', 'create_parameterized_H', (['k', 'h'], {'symmetric': '(True)'}), '(k, h, symmetric=True)\n', (13774, 13796), False, 'from utils import from_dictionary_beliefs, create_parameterized_H, replace_fraction_of_rows, to_centering_beliefs, eps_convergence_linbp_parameterized, showfig\n'), ((13807, 13831), 'utils.to_centering_beliefs', 'to_centering_beliefs', (['H0'], {}), '(H0)\n', (13827, 13831), False, 'from utils import from_dictionary_beliefs, create_parameterized_H, replace_fraction_of_rows, to_centering_beliefs, eps_convergence_linbp_parameterized, showfig\n'), ((13879, 13902), 'random.seed', 'random.seed', (['RANDOMSEED'], {}), '(RANDOMSEED)\n', (13890, 13902), False, 'import random\n'), ((13951, 13982), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'RANDOMSEED'}), '(seed=RANDOMSEED)\n', (13965, 13982), True, 'import numpy as np\n'), ((20594, 20685), 'pandas.pivot_table', 'pd.pivot_table', (['df2'], {'index': "['n']", 'columns': "['type']", 'values': "['time_mean', 'time_median']"}), "(df2, index=['n'], columns=['type'], values=['time_mean',\n 'time_median'])\n", (20608, 20685), True, 'import pandas as pd\n'), ((13728, 13742), 'numpy.sum', 'np.sum', (['alpha0'], {}), '(alpha0)\n', (13734, 13742), True, 'import numpy as np\n'), ((19978, 20012), 'os.path.join', 'join', (['data_directory', 'csv_filename'], {}), '(data_directory, csv_filename)\n', (19982, 20012), False, 'from os.path import abspath, dirname, join\n'), ((22664, 22676), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22674, 22676), True, 'import matplotlib.pyplot as plt\n'), ((27070, 27218), 'matplotlib.pyplot.legend', 'plt.legend', (['handles', 'labels'], {'loc': '"""upper left"""', 'handlelength': '(2)', 'labelspacing': '(0)', 'handletextpad': '(0.3)', 'borderaxespad': '(0.2)', 'borderpad': '(0.3)', 'numpoints': '(1)'}), "(handles, labels, loc='upper left', handlelength=2, labelspacing=\n 0, handletextpad=0.3, borderaxespad=0.2, borderpad=0.3, numpoints=1)\n", (27080, 27218), True, 'import matplotlib.pyplot as plt\n'), ((27828, 27847), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (27845, 27847), True, 'import matplotlib.pyplot as plt\n'), ((27856, 27873), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (27866, 27873), True, 'import matplotlib.pyplot as plt\n'), ((27882, 27899), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (27892, 27899), True, 'import matplotlib.pyplot as plt\n'), ((28214, 28315), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[100.0, 1000.0, 10000.0, 100000.0, 1000000.0, 10000000.0, 100000000.0, \n 1000000000.0]'], {}), '([100.0, 1000.0, 10000.0, 100000.0, 1000000.0, 10000000.0, \n 100000000.0, 1000000000.0])\n', (28224, 28315), True, 'import matplotlib.pyplot as plt\n'), ((28275, 28369), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'which': '"""both"""', 'axis': '"""both"""', 'alpha': '(0.2)', 'linestyle': '"""-"""', 'linewidth': '(1)', 'zorder': '(1)'}), "(True, which='both', axis='both', alpha=0.2, linestyle='-',\n linewidth=1, zorder=1)\n", (28283, 28369), True, 'import matplotlib.pyplot as plt\n'), ((28563, 28610), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of edges ($m$)"""'], {'labelpad': '(0)'}), "('Number of edges ($m$)', labelpad=0)\n", (28573, 28610), True, 'import matplotlib.pyplot as plt\n'), ((28634, 28670), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time [sec]"""'], {'labelpad': '(0)'}), "('Time [sec]', labelpad=0)\n", (28644, 28670), True, 'import matplotlib.pyplot as plt\n'), ((28680, 28700), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (28688, 28700), True, 'import matplotlib.pyplot as plt\n'), ((28709, 28729), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin', 'ymax'], {}), '(ymin, ymax)\n', (28717, 28729), True, 'import matplotlib.pyplot as plt\n'), ((2511, 2525), 'inspect.currentframe', 'currentframe', ([], {}), '()\n', (2523, 2525), False, 'from inspect import getfile, currentframe\n'), ((4253, 4287), 'os.path.join', 'join', (['data_directory', 'csv_filename'], {}), '(data_directory, csv_filename)\n', (4257, 4287), False, 'from os.path import abspath, dirname, join\n'), ((14332, 14366), 'os.path.join', 'join', (['data_directory', 'csv_filename'], {}), '(data_directory, csv_filename)\n', (14336, 14366), False, 'from os.path import abspath, dirname, join\n'), ((29340, 29350), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29348, 29350), True, 'import matplotlib.pyplot as plt\n'), ((14164, 14187), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (14185, 14187), False, 'import datetime\n'), ((24691, 24727), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['Y[holdout_opt]'], {}), '(Y[holdout_opt])\n', (24711, 24727), True, 'import numpy as np\n'), ((28830, 28866), 'os.path.join', 'join', (['figure_directory', 'fig_filename'], {}), '(figure_directory, fig_filename)\n', (28834, 28866), False, 'from os.path import abspath, dirname, join\n'), ((29238, 29274), 'os.path.join', 'join', (['figure_directory', 'fig_filename'], {}), '(figure_directory, fig_filename)\n', (29242, 29274), False, 'from os.path import abspath, dirname, join\n'), ((14735, 14746), 'time.time', 'time.time', ([], {}), '()\n', (14744, 14746), False, 'import time\n'), ((14775, 14913), 'graphGenerator.planted_distribution_model', 'planted_distribution_model', (['n'], {'alpha': 'alpha0', 'P': 'H0', 'm': '(d * n)', 'distribution': 'distribution', 'exponent': 'exponent', 'directed': '(False)', 'debug': '(False)'}), '(n, alpha=alpha0, P=H0, m=d * n, distribution=\n distribution, exponent=exponent, directed=False, debug=False)\n', (14801, 14913), False, 'from graphGenerator import planted_distribution_model\n'), ((15154, 15181), 'utils.from_dictionary_beliefs', 'from_dictionary_beliefs', (['Xd'], {}), '(Xd)\n', (15177, 15181), False, 'from utils import from_dictionary_beliefs, create_parameterized_H, replace_fraction_of_rows, to_centering_beliefs, eps_convergence_linbp_parameterized, showfig\n'), ((16000, 16071), 'utils.replace_fraction_of_rows', 'replace_fraction_of_rows', (['X0', '(1 - f)'], {'avoidNeighbors': 'avoidNeighbors', 'W': 'W'}), '(X0, 1 - f, avoidNeighbors=avoidNeighbors, W=W)\n', (16024, 16071), False, 'from utils import from_dictionary_beliefs, create_parameterized_H, replace_fraction_of_rows, to_centering_beliefs, eps_convergence_linbp_parameterized, showfig\n'), ((24793, 24825), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['Y[dce_opt]'], {}), '(Y[dce_opt])\n', (24813, 24825), True, 'import numpy as np\n'), ((15215, 15226), 'time.time', 'time.time', ([], {}), '()\n', (15224, 15226), False, 'import time\n'), ((23949, 23980), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['Y[method]'], {}), '(Y[method])\n', (23969, 23980), True, 'import numpy as np\n'), ((24309, 24340), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['Y[method]'], {}), '(Y[method])\n', (24329, 24340), True, 'import numpy as np\n'), ((16200, 16211), 'time.time', 'time.time', ([], {}), '()\n', (16209, 16211), False, 'import time\n'), ((16245, 16331), 'estimation.estimateH', 'estimateH', (['X2', 'W'], {'method': '"""DHE"""', 'variant': '(1)', 'distance': '(5)', 'EC': 'est_EC', 'weights': 'weights'}), "(X2, W, method='DHE', variant=1, distance=5, EC=est_EC, weights=\n weights)\n", (16254, 16331), False, 'from estimation import estimateH, estimateH_baseline_serial\n'), ((24061, 24083), 'numpy.round', 'np.round', (['Y[method][j]'], {}), '(Y[method][j])\n', (24069, 24083), True, 'import numpy as np\n'), ((24421, 24443), 'numpy.round', 'np.round', (['Y[method][j]'], {}), '(Y[method][j])\n', (24429, 24443), True, 'import numpy as np\n'), ((16366, 16377), 'time.time', 'time.time', ([], {}), '()\n', (16375, 16377), False, 'import time\n'), ((16529, 16540), 'time.time', 'time.time', ([], {}), '()\n', (16538, 16540), False, 'import time\n'), ((16574, 16676), 'estimation.estimateH', 'estimateH', (['X2', 'W'], {'method': '"""DHE"""', 'variant': '(1)', 'distance': '(5)', 'EC': 'est_EC', 'weights': 'weights', 'randomize': '(True)'}), "(X2, W, method='DHE', variant=1, distance=5, EC=est_EC, weights=\n weights, randomize=True)\n", (16583, 16676), False, 'from estimation import estimateH, estimateH_baseline_serial\n'), ((25061, 25109), 'numpy.round', 'np.round', (['(Y[prop_opt][j_dce] / Y[dce_opt][j_dce])'], {}), '(Y[prop_opt][j_dce] / Y[dce_opt][j_dce])\n', (25069, 25109), True, 'import numpy as np\n'), ((25672, 25731), 'numpy.round', 'np.round', (['(Y[holdout_opt][j_holdout] / Y[dce_opt][j_holdout])'], {}), '(Y[holdout_opt][j_holdout] / Y[dce_opt][j_holdout])\n', (25680, 25731), True, 'import numpy as np\n'), ((26401, 26461), 'numpy.round', 'np.round', (['(Y[holdout_opt][j_holdout] / Y[prop_opt][j_holdout])'], {}), '(Y[holdout_opt][j_holdout] / Y[prop_opt][j_holdout])\n', (26409, 26461), True, 'import numpy as np\n'), ((16711, 16722), 'time.time', 'time.time', ([], {}), '()\n', (16720, 16722), False, 'import time\n'), ((16874, 16885), 'time.time', 'time.time', ([], {}), '()\n', (16883, 16885), False, 'import time\n'), ((16919, 16997), 'estimation.estimateH', 'estimateH', (['X2', 'W'], {'method': '"""MHE"""', 'variant': '(1)', 'distance': '(1)', 'EC': 'est_EC', 'weights': 'None'}), "(X2, W, method='MHE', variant=1, distance=1, EC=est_EC, weights=None)\n", (16928, 16997), False, 'from estimation import estimateH, estimateH_baseline_serial\n'), ((17037, 17048), 'time.time', 'time.time', ([], {}), '()\n', (17046, 17048), False, 'import time\n'), ((17199, 17210), 'time.time', 'time.time', ([], {}), '()\n', (17208, 17210), False, 'import time\n'), ((17244, 17322), 'estimation.estimateH', 'estimateH', (['X2', 'W'], {'method': '"""LHE"""', 'variant': '(1)', 'distance': '(1)', 'EC': 'est_EC', 'weights': 'None'}), "(X2, W, method='LHE', variant=1, distance=1, EC=est_EC, weights=None)\n", (17253, 17322), False, 'from estimation import estimateH, estimateH_baseline_serial\n'), ((17362, 17373), 'time.time', 'time.time', ([], {}), '()\n', (17371, 17373), False, 'import time\n'), ((17528, 17539), 'time.time', 'time.time', ([], {}), '()\n', (17537, 17539), False, 'import time\n'), ((17573, 17686), 'estimation.estimateH_baseline_serial', 'estimateH_baseline_serial', (['X2', 'ind', 'W'], {'numMax': 'numMaxIt', 'numberOfSplits': '(1)', 'alpha': 'alpha', 'beta': 'beta', 'gamma': 'gamma'}), '(X2, ind, W, numMax=numMaxIt, numberOfSplits=1,\n alpha=alpha, beta=beta, gamma=gamma)\n', (17598, 17686), False, 'from estimation import estimateH, estimateH_baseline_serial\n'), ((17985, 17996), 'time.time', 'time.time', ([], {}), '()\n', (17994, 17996), False, 'import time\n'), ((18150, 18174), 'utils.to_centering_beliefs', 'to_centering_beliefs', (['H0'], {}), '(H0)\n', (18170, 18174), False, 'from utils import from_dictionary_beliefs, create_parameterized_H, replace_fraction_of_rows, to_centering_beliefs, eps_convergence_linbp_parameterized, showfig\n'), ((18209, 18254), 'utils.to_centering_beliefs', 'to_centering_beliefs', (['X2'], {'ignoreZeroRows': '(True)'}), '(X2, ignoreZeroRows=True)\n', (18229, 18254), False, 'from utils import from_dictionary_beliefs, create_parameterized_H, replace_fraction_of_rows, to_centering_beliefs, eps_convergence_linbp_parameterized, showfig\n'), ((18306, 18317), 'time.time', 'time.time', ([], {}), '()\n', (18315, 18317), False, 'import time\n'), ((18356, 18476), 'utils.eps_convergence_linbp_parameterized', 'eps_convergence_linbp_parameterized', (['H2c', 'W'], {'method': '"""noecho"""', 'alpha': 'alpha', 'beta': 'beta', 'gamma': 'gamma', 'X': 'X2', 'pyamg': 'pyamg'}), "(H2c, W, method='noecho', alpha=alpha,\n beta=beta, gamma=gamma, X=X2, pyamg=pyamg)\n", (18391, 18476), False, 'from utils import from_dictionary_beliefs, create_parameterized_H, replace_fraction_of_rows, to_centering_beliefs, eps_convergence_linbp_parameterized, showfig\n'), ((18590, 18601), 'time.time', 'time.time', ([], {}), '()\n', (18599, 18601), False, 'import time\n'), ((18840, 18851), 'time.time', 'time.time', ([], {}), '()\n', (18849, 18851), False, 'import time\n'), ((18963, 19149), 'inference.linBP_symmetric_parameterized', 'linBP_symmetric_parameterized', (['X2', 'W', '(H2c * eps)'], {'method': '"""noecho"""', 'alpha': 'alpha', 'beta': 'beta', 'gamma': 'gamma', 'numMaxIt': 'numMaxIt', 'convergencePercentage': 'convergencePercentage_W', 'debug': '(2)'}), "(X2, W, H2c * eps, method='noecho', alpha=\n alpha, beta=beta, gamma=gamma, numMaxIt=numMaxIt, convergencePercentage\n =convergencePercentage_W, debug=2)\n", (18992, 19149), False, 'from inference import linBP_symmetric_parameterized\n'), ((19514, 19525), 'time.time', 'time.time', ([], {}), '()\n', (19523, 19525), False, 'import time\n')] |
"""
Functions and methods for performing solar radiation tests taken from
the BSRN Global Network recommended QC tests, V2.0
https://bsrn.awi.de
"""
import warnings
import numpy as np
import dask.array as da
from scipy.constants import Stefan_Boltzmann
from act.utils.geo_utils import get_solar_azimuth_elevation
from act.utils.data_utils import convert_units
def _calculate_solar_parameters(obj, lat_name, lon_name, solar_constant):
"""
Function to calculate solar zenith angles and solar constant adjusted
to Earth Sun distance
Parameters
----------
obj : Xarray.Dataset
Dataset containing location variables
lat_name : str
Variable name for latitude
lon_name : str
Variable name for longitude
solar_constant : float
Solar constant in W/m^2
Returns
-------
Tuple containing (solar zenith angle array, solar constant scalar)
"""
latitude = obj[lat_name].values
if latitude.size > 1:
latitude = latitude[0]
longitude = obj[lon_name].values
if longitude.size > 1:
longitude = longitude[0]
# Calculate solar parameters
elevation, _, solar_distance = get_solar_azimuth_elevation(
latitude=latitude, longitude=longitude, time=obj['time'].values)
solar_distance = np.nanmean(solar_distance)
Sa = solar_constant / solar_distance**2
sza = 90. - elevation
return (sza, Sa)
def _find_indexes(obj, var_name, min_limit, max_limit, use_dask):
"""
Function to find array indexes where failing limit tests
Parameters
----------
obj : Xarray.Dataset
Dataset containing data to use in test
var_name : str
Variable name to inspect
min_limit : float or numpy array
Minimum limit to use for returning indexes
max_limit : float or numpy array
Maximum limit to use for returning indexes
use_dask : boolean
Option to use Dask operations instead of Numpy
Returns
-------
Tuple containing solar zenith angle array and solar constant scalar
"""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
if use_dask and isinstance(obj[var_name].data, da.Array):
index_min = da.where(obj[var_name].data < min_limit, True, False).compute()
index_max = da.where(obj[var_name].data > max_limit, True, False).compute()
else:
index_min = np.less(obj[var_name].values, min_limit)
index_max = np.greater(obj[var_name].values, max_limit)
return (index_min, index_max)
class QCTests:
"""
This is a Mixins class used to allow using qcfilter class that is already
registered to the xarray object. All the methods in this class will be added
to the qcfilter class. Doing this to make the code spread across more files
so it is more manageable and readable. Additinal files of tests can be added
to qcfilter by creating a new class in the new file and adding to qcfilter
class declaration.
"""
def bsrn_limits_test(
self,
test='Physically Possible',
gbl_SW_dn_name=None,
glb_diffuse_SW_dn_name=None,
direct_normal_SW_dn_name=None,
direct_SW_dn_name=None,
glb_SW_up_name=None,
glb_LW_dn_name=None,
glb_LW_up_name=None,
sw_min_limit=None,
lw_min_dn_limit=None,
lw_min_up_limit=None,
lw_max_dn_limit=None,
lw_max_up_limit=None,
solar_constant=1366,
lat_name='lat',
lon_name='lon',
use_dask=False
):
"""
Method to apply BSRN limits test and add results to ancillary quality control variable.
Need to provide variable name for each measurement for the test to be performed. If no
limits provided will use default values. All data must be in W/m^2 units. Test will
provided exception if required variable name is missing.
Parameters
----------
test : str
Type of tests to apply. Options include "Physically Possible" or "Extremely Rare"
gbl_SW_dn_name : str
Variable name in the Dataset for global shortwave downwelling radiation
measured by unshaded pyranometer
glb_diffuse_SW_dn_name : str
Variable name in the Dataset for global diffuse shortwave downwelling radiation
measured by shaded pyranometer
direct_normal_SW_dn_name : str
Variable name in the Dataset for direct normal shortwave downwelling radiation
direct_SW_dn_name : str
Variable name in the Dataset for direct shortwave downwelling radiation
glb_SW_up_name : str
Variable name in the Dataset for global shortwave upwelling radiation
glb_LW_dn_name : str
Variable name in the Dataset for global longwave downwelling radiation
glb_LW_up_name : str
Variable name in the Dataset for global longwave upwelling radiation
sw_min_limit : int or float
Lower limit for shortwave radiation test
lw_min_dn_limit : int or float
Lower limit for downwelling longwave radiation test measured by a pyrgeometer
lw_min_up_limit : int or float
Lower limit for upwelling longwave radiation test measured by a pyrgeometer
lw_max_dn_limit : int or float
Upper limit for downwelling longwave radiation test measured by a pyrgeometer
lw_max_up_limit : int or float
Upper limit for upwelling longwave radiation test measured by a pyrgeometer
solar_constant : int or float
Mean solar constant used in upper limit calculation. Earth sun distance will be
calculated and applied to this value.
lat_name : str
Variable name in the Dataset for latitude
lon_name : str
Variable name in the Dataset for longitude
use_dask : boolean
Option to use Dask for processing if data is stored in a Dask array
References
----------
Long, <NAME>., and <NAME>. "BSRN Global Network recommended QC tests, V2. x." (2010).
Examples
--------
.. code-block:: python
ds_object = act.io.armfiles.read_netcdf(act.tests.EXAMPLE_BRS, cleanup_qc=True)
ds_object.qcfilter.bsrn_limits_test(
gbl_SW_dn_name='down_short_hemisp',
glb_diffuse_SW_dn_name='down_short_diffuse_hemisp',
direct_normal_SW_dn_name='short_direct_normal',
glb_SW_up_name='up_short_hemisp',
glb_LW_dn_name='down_long_hemisp_shaded',
glb_LW_up_name='up_long_hemisp')
"""
test_names_org = ["Physically Possible", "Extremely Rare"]
test = test.lower()
test_names = [ii.lower() for ii in test_names_org]
if test not in test_names:
raise ValueError(f"Value of '{test}' in keyword 'test' not recognized. "
f"Must a single value in options {test_names_org}")
sza, Sa = _calculate_solar_parameters(self._obj, lat_name, lon_name, solar_constant)
if test == test_names[0]:
if sw_min_limit is None:
sw_min_limit = -4.
if lw_min_dn_limit is None:
lw_min_dn_limit = 40.
if lw_min_up_limit is None:
lw_min_up_limit = 40.
if lw_max_dn_limit is None:
lw_max_dn_limit = 700.
if lw_max_up_limit is None:
lw_max_up_limit = 900.
elif test == test_names[1]:
if sw_min_limit is None:
sw_min_limit = -2.
if lw_min_dn_limit is None:
lw_min_dn_limit = 60.
if lw_min_up_limit is None:
lw_min_up_limit = 60.
if lw_max_dn_limit is None:
lw_max_dn_limit = 500.
if lw_max_up_limit is None:
lw_max_up_limit = 700.
# Global Shortwave downwelling min and max tests
if gbl_SW_dn_name is not None:
cos_sza = np.cos(np.radians(sza))
cos_sza[sza > 90.] = 0.
if test == test_names[0]:
sw_max_limit = Sa * 1.5 * cos_sza**1.2 + 100.
elif test == test_names[1]:
sw_max_limit = Sa * 1.2 * cos_sza**1.2 + 50.
index_min, index_max = _find_indexes(self._obj, gbl_SW_dn_name, sw_min_limit, sw_max_limit, use_dask)
self._obj.qcfilter.add_test(
gbl_SW_dn_name, index=index_min, test_assessment='Bad',
test_meaning=f"Value less than BSRN {test.lower()} limit of {sw_min_limit} W/m^2")
self._obj.qcfilter.add_test(
gbl_SW_dn_name, index=index_max, test_assessment='Bad',
test_meaning=f"Value greater than BSRN {test.lower()} limit")
# Diffuse Shortwave downwelling min and max tests
if glb_diffuse_SW_dn_name is not None:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
if test == test_names[0]:
sw_max_limit = Sa * 0.95 * np.cos(np.radians(sza))**1.2 + 50.
elif test == test_names[1]:
sw_max_limit = Sa * 0.75 * np.cos(np.radians(sza))**1.2 + 30.
index_min, index_max = _find_indexes(self._obj, glb_diffuse_SW_dn_name, sw_min_limit,
sw_max_limit, use_dask)
self._obj.qcfilter.add_test(
glb_diffuse_SW_dn_name, index=index_min, test_assessment='Bad',
test_meaning=f"Value less than BSRN {test.lower()} limit of {sw_min_limit} W/m^2")
self._obj.qcfilter.add_test(
glb_diffuse_SW_dn_name, index=index_max, test_assessment='Bad',
test_meaning=f"Value greater than BSRN {test.lower()} limit")
# Direct Normal Shortwave downwelling min and max tests
if direct_normal_SW_dn_name is not None:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
if test == test_names[0]:
sw_max_limit = Sa
elif test == test_names[1]:
sw_max_limit = Sa * 0.95 * np.cos(np.radians(sza))**0.2 + 10.
index_min, index_max = _find_indexes(self._obj, direct_normal_SW_dn_name,
sw_min_limit, sw_max_limit, use_dask)
self._obj.qcfilter.add_test(
direct_normal_SW_dn_name, index=index_min, test_assessment='Bad',
test_meaning=f"Value less than BSRN {test.lower()} limit of {sw_min_limit} W/m^2")
self._obj.qcfilter.add_test(
direct_normal_SW_dn_name, index=index_max, test_assessment='Bad',
test_meaning=f"Value greater than BSRN {test.lower()} limit")
# Direct Shortwave downwelling min and max tests
if direct_SW_dn_name is not None:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
if test == test_names[0]:
sw_max_limit = Sa * np.cos(np.radians(sza))
elif test == test_names[1]:
sw_max_limit = Sa * 0.95 * np.cos(np.radians(sza))**1.2 + 10
index_min, index_max = _find_indexes(self._obj, direct_SW_dn_name,
sw_min_limit, sw_max_limit, use_dask)
self._obj.qcfilter.add_test(
direct_SW_dn_name, index=index_min, test_assessment='Bad',
test_meaning=f"Value less than BSRN {test.lower()} limit of {sw_min_limit} W/m^2")
self._obj.qcfilter.add_test(
direct_SW_dn_name, index=index_max, test_assessment='Bad',
test_meaning=f"Value greater than BSRN {test.lower()} limit")
# Shortwave up welling min and max tests
if glb_SW_up_name is not None:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
if test == test_names[0]:
sw_max_limit = Sa * 1.2 * np.cos(np.radians(sza))**1.2 + 50
elif test == test_names[1]:
sw_max_limit = Sa * np.cos(np.radians(sza))**1.2 + 50
index_min, index_max = _find_indexes(self._obj, glb_SW_up_name,
sw_min_limit, sw_max_limit, use_dask)
self._obj.qcfilter.add_test(
glb_SW_up_name, index=index_min, test_assessment='Bad',
test_meaning=f"Value less than BSRN {test.lower()} limit of {sw_min_limit} W/m^2")
self._obj.qcfilter.add_test(
glb_SW_up_name, index=index_max, test_assessment='Bad',
test_meaning=f"Value greater than BSRN {test.lower()} limit")
# Longwave downwelling min and max tests
if glb_LW_dn_name is not None:
index_min, index_max = _find_indexes(self._obj, glb_LW_dn_name,
lw_min_dn_limit, lw_max_dn_limit, use_dask)
self._obj.qcfilter.add_test(
glb_LW_dn_name, index=index_min, test_assessment='Bad',
test_meaning=f"Value less than BSRN {test.lower()} limit of {lw_min_dn_limit} W/m^2")
self._obj.qcfilter.add_test(
glb_LW_dn_name, index=index_max, test_assessment='Bad',
test_meaning=f"Value greater than BSRN {test.lower()} limit of {lw_max_dn_limit} W/m^2")
# Longwave upwelling min and max tests
if glb_LW_up_name is not None:
index_min, index_max = _find_indexes(self._obj, glb_LW_up_name,
lw_min_up_limit, lw_max_up_limit, use_dask)
self._obj.qcfilter.add_test(
glb_LW_up_name, index=index_min, test_assessment='Bad',
test_meaning=f"Value less than BSRN {test.lower()} limit of {lw_min_up_limit} W/m^2")
self._obj.qcfilter.add_test(
glb_LW_up_name, index=index_max, test_assessment='Bad',
test_meaning=f"Value greater than BSRN {test.lower()} limit of {lw_max_up_limit} W/m^2")
def bsrn_comparison_tests(
self,
test,
gbl_SW_dn_name=None,
glb_diffuse_SW_dn_name=None,
direct_normal_SW_dn_name=None,
glb_SW_up_name=None,
glb_LW_dn_name=None,
glb_LW_up_name=None,
air_temp_name=None,
test_assessment='Indeterminate',
lat_name='lat',
lon_name='lon',
LWdn_lt_LWup_component=25.,
LWdn_gt_LWup_component=300.,
use_dask=False
):
"""
Method to apply BSRN comparison tests and add results to ancillary quality control variable.
Need to provided variable name for each measurement for the test to be performed. All radiation
data must be in W/m^2 units. Test will provided exception if required variable name is missing.
Parameters
----------
test : str
Type of tests to apply. Options include: 'Global over Sum SW Ratio', 'Diffuse Ratio',
'SW up', 'LW down to air temp', 'LW up to air temp', 'LW down to LW up'
gbl_SW_dn_name : str
Variable name in Dataset for global shortwave downwelling radiation
measured by unshaded pyranometer
glb_diffuse_SW_dn_name : str
Variable name in Dataset for global diffuse shortwave downwelling radiation
measured by shaded pyranometer
direct_normal_SW_dn_name : str
Variable name in Dataset for direct normal shortwave downwelling radiation
glb_SW_up_name : str
Variable name in Dataset for global shortwave upwelling radiation
glb_LW_dn_name : str
Variable name in Dataset for global longwave downwelling radiation
glb_LW_up_name : str
Variable name in Dataset for global longwave upwelling radiation
air_temp_name : str
Variable name in Dataset for atmospheric air temperature. Variable used
in longwave tests.
test_assessment : str
Test assessment string value appended to flag_assessments attribute of QC variable.
lat_name : str
Variable name in the Dataset for latitude
lon_name : str
Variable name in the Dataset for longitude
LWdn_lt_LWup_component : int or float
Value used in longwave down less than longwave up test.
LWdn_gt_LWup_component : int or float
Value used in longwave down greater than longwave up test.
use_dask : boolean
Option to use Dask for processing if data is stored in a Dask array
References
----------
Long, <NAME>., and <NAME>. "BSRN Global Network recommended QC tests, V2. x." (2010).
Examples
--------
.. code-block:: python
ds_object = act.io.armfiles.read_netcdf(act.tests.EXAMPLE_BRS, cleanup_qc=True)
ds_object.qcfilter.bsrn_comparison_tests(
gbl_SW_dn_name='down_short_hemisp',
glb_diffuse_SW_dn_name='down_short_diffuse_hemisp',
direct_normal_SW_dn_name='short_direct_normal',
glb_SW_up_name='up_short_hemisp',
glb_LW_dn_name='down_long_hemisp_shaded',
glb_LW_up_name='up_long_hemisp',
use_dask=True)
"""
if isinstance(test, str):
test = [test]
test_options = ['Global over Sum SW Ratio', 'Diffuse Ratio', 'SW up', 'LW down to air temp',
'LW up to air temp', 'LW down to LW up']
solar_constant = 1360.8
sza, Sa = _calculate_solar_parameters(self._obj, lat_name, lon_name, solar_constant)
# Ratio of Global over Sum SW
if test_options[0] in test:
if gbl_SW_dn_name is None or glb_diffuse_SW_dn_name is None or direct_normal_SW_dn_name is None:
raise ValueError('Must set keywords gbl_SW_dn_name, glb_diffuse_SW_dn_name, '
f'direct_normal_SW_dn_name for {test_options[0]} test.')
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
if use_dask and isinstance(self._obj[glb_diffuse_SW_dn_name].data, da.Array):
sum_sw_down = (self._obj[glb_diffuse_SW_dn_name].data +
self._obj[direct_normal_SW_dn_name].data * np.cos(np.radians(sza)))
sum_sw_down[sum_sw_down < 50] = np.nan
ratio = self._obj[gbl_SW_dn_name].data / sum_sw_down
index_a = sza < 75
index_1 = da.where((ratio > 1.08) & index_a, True, False)
index_2 = da.where((ratio < 0.92) & index_a, True, False)
index_b = (sza >= 75) & (sza < 93)
index_3 = da.where((ratio > 1.15) & index_b & index_b, True, False)
index_4 = da.where((ratio < 0.85) & index_b, True, False)
index = (index_1 | index_2 | index_3 | index_4).compute()
else:
sum_sw_down = (self._obj[glb_diffuse_SW_dn_name].values +
self._obj[direct_normal_SW_dn_name].values * np.cos(np.radians(sza)))
sum_sw_down[sum_sw_down < 50] = np.nan
ratio = self._obj[gbl_SW_dn_name].values / sum_sw_down
index_a = sza < 75
index_1 = (ratio > 1.08) & index_a
index_2 = (ratio < 0.92) & index_a
index_b = (sza >= 75) & (sza < 93)
index_3 = (ratio > 1.15) & index_b
index_4 = (ratio < 0.85) & index_b
index = index_1 | index_2 | index_3 | index_4
test_meaning = "Ratio of Global over Sum shortwave larger than expected"
self._obj.qcfilter.add_test(gbl_SW_dn_name, index=index, test_assessment=test_assessment,
test_meaning=test_meaning)
self._obj.qcfilter.add_test(glb_diffuse_SW_dn_name, index=index, test_assessment=test_assessment,
test_meaning=test_meaning)
self._obj.qcfilter.add_test(direct_normal_SW_dn_name, index=index, test_assessment=test_assessment,
test_meaning=test_meaning)
# Diffuse Ratio
if test_options[1] in test:
if gbl_SW_dn_name is None or glb_diffuse_SW_dn_name is None:
raise ValueError('Must set keywords gbl_SW_dn_name, glb_diffuse_SW_dn_name '
f'for {test_options[1]} test.')
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
if use_dask and isinstance(self._obj[glb_diffuse_SW_dn_name].data, da.Array):
ratio = self._obj[glb_diffuse_SW_dn_name].data / self._obj[gbl_SW_dn_name].data
ratio[self._obj[gbl_SW_dn_name].data < 50] = np.nan
index_a = sza < 75
index_1 = da.where((ratio >= 1.05) & index_a, True, False)
index_b = (sza >= 75) & (sza < 93)
index_2 = da.where((ratio >= 1.10) & index_b, True, False)
index = (index_1 | index_2).compute()
else:
ratio = self._obj[glb_diffuse_SW_dn_name].values / self._obj[gbl_SW_dn_name].values
ratio[self._obj[gbl_SW_dn_name].values < 50] = np.nan
index_a = sza < 75
index_1 = (ratio >= 1.05) & index_a
index_b = (sza >= 75) & (sza < 93)
index_2 = (ratio >= 1.10) & index_b
index = index_1 | index_2
test_meaning = "Ratio of Diffuse Shortwave over Global Shortwave larger than expected"
self._obj.qcfilter.add_test(gbl_SW_dn_name, index=index, test_assessment=test_assessment,
test_meaning=test_meaning)
self._obj.qcfilter.add_test(glb_diffuse_SW_dn_name, index=index, test_assessment=test_assessment,
test_meaning=test_meaning)
# Shortwave up comparison
if test_options[2] in test:
if glb_SW_up_name is None or glb_diffuse_SW_dn_name is None or direct_normal_SW_dn_name is None:
raise ValueError('Must set keywords glb_SW_up_name, glb_diffuse_SW_dn_name, '
f'direct_normal_SW_dn_name for {test_options[2]} test.')
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
if use_dask and isinstance(self._obj[glb_diffuse_SW_dn_name].data, da.Array):
sum_sw_down = (self._obj[glb_diffuse_SW_dn_name].data +
self._obj[direct_normal_SW_dn_name].data * np.cos(np.radians(sza)))
sum_sw_down[sum_sw_down < 50] = np.nan
index = da.where(self._obj[glb_SW_up_name].data > sum_sw_down, True, False).compute()
else:
sum_sw_down = (self._obj[glb_diffuse_SW_dn_name].values +
self._obj[direct_normal_SW_dn_name].values * np.cos(np.radians(sza)))
sum_sw_down[sum_sw_down < 50] = np.nan
index = self._obj[glb_SW_up_name].values > sum_sw_down
test_meaning = "Ratio of Shortwave Upwelling greater than Shortwave Sum"
self._obj.qcfilter.add_test(glb_SW_up_name, index=index, test_assessment=test_assessment,
test_meaning=test_meaning)
self._obj.qcfilter.add_test(glb_diffuse_SW_dn_name, index=index, test_assessment=test_assessment,
test_meaning=test_meaning)
self._obj.qcfilter.add_test(direct_normal_SW_dn_name, index=index, test_assessment=test_assessment,
test_meaning=test_meaning)
# Longwave down to air temperature comparison
if test_options[3] in test:
if glb_LW_dn_name is None or air_temp_name is None:
raise ValueError('Must set keywords glb_LW_dn_name, air_temp_name '
f' for {test_options[3]} test.')
air_temp = convert_units(self._obj[air_temp_name].values,
self._obj[air_temp_name].attrs['units'], 'degK')
if use_dask and isinstance(self._obj[glb_LW_dn_name].data, da.Array):
air_temp = da.array(air_temp)
conversion = da.array(Stefan_Boltzmann * air_temp**4)
index_1 = (0.4 * conversion) > self._obj[glb_LW_dn_name].data
index_2 = (conversion + 25.) < self._obj[glb_LW_dn_name].data
index = (index_1 | index_2).compute()
else:
conversion = Stefan_Boltzmann * air_temp**4
index_1 = (0.4 * conversion) > self._obj[glb_LW_dn_name].values
index_2 = (conversion + 25.) < self._obj[glb_LW_dn_name].values
index = index_1 | index_2
test_meaning = "Longwave downwelling comparison to air temperature out side of expected range"
self._obj.qcfilter.add_test(glb_LW_dn_name, index=index, test_assessment=test_assessment,
test_meaning=test_meaning)
# Longwave up to air temperature comparison
if test_options[4] in test:
if glb_LW_up_name is None or air_temp_name is None:
raise ValueError('Must set keywords glb_LW_up_name, air_temp_name '
f'for {test_options[3]} test.')
air_temp = convert_units(self._obj[air_temp_name].values,
self._obj[air_temp_name].attrs['units'], 'degK')
if use_dask and isinstance(self._obj[glb_LW_up_name].data, da.Array):
air_temp = da.array(air_temp)
index_1 = (Stefan_Boltzmann * (air_temp - 15)**4) > self._obj[glb_LW_up_name].data
index_2 = (Stefan_Boltzmann * (air_temp + 25)**4) < self._obj[glb_LW_up_name].data
index = (index_1 | index_2).compute()
else:
index_1 = (Stefan_Boltzmann * (air_temp - 15)**4) > self._obj[glb_LW_up_name].values
index_2 = (Stefan_Boltzmann * (air_temp + 25)**4) < self._obj[glb_LW_up_name].values
index = index_1 | index_2
test_meaning = "Longwave upwelling comparison to air temperature out side of expected range"
self._obj.qcfilter.add_test(glb_LW_up_name, index=index, test_assessment=test_assessment,
test_meaning=test_meaning)
# Lonwave down to longwave up comparison
if test_options[5] in test:
if glb_LW_dn_name is None or glb_LW_up_name is None:
raise ValueError('Must set keywords glb_LW_dn_name, glb_LW_up_name '
f'for {test_options[3]} test.')
if use_dask and isinstance(self._obj[glb_LW_dn_name].data, da.Array):
index_1 = da.where(self._obj[glb_LW_dn_name].data >
(self._obj[glb_LW_up_name].data + LWdn_lt_LWup_component), True, False)
index_2 = da.where(self._obj[glb_LW_dn_name].data <
(self._obj[glb_LW_up_name].data - LWdn_gt_LWup_component), True, False)
index = (index_1 | index_2).compute()
else:
index_1 = self._obj[glb_LW_dn_name].values > (self._obj[glb_LW_up_name].values + LWdn_lt_LWup_component)
index_2 = self._obj[glb_LW_dn_name].values < (self._obj[glb_LW_up_name].values - LWdn_gt_LWup_component)
index = index_1 | index_2
test_meaning = "Lonwave downwelling compared to longwave upwelling outside of expected range"
self._obj.qcfilter.add_test(glb_LW_dn_name, index=index, test_assessment=test_assessment,
test_meaning=test_meaning)
self._obj.qcfilter.add_test(glb_LW_up_name, index=index, test_assessment=test_assessment,
test_meaning=test_meaning)
| [
"numpy.radians",
"numpy.less",
"numpy.greater",
"act.utils.data_utils.convert_units",
"warnings.catch_warnings",
"numpy.nanmean",
"dask.array.array",
"act.utils.geo_utils.get_solar_azimuth_elevation",
"dask.array.where",
"warnings.filterwarnings"
] | [((1182, 1279), 'act.utils.geo_utils.get_solar_azimuth_elevation', 'get_solar_azimuth_elevation', ([], {'latitude': 'latitude', 'longitude': 'longitude', 'time': "obj['time'].values"}), "(latitude=latitude, longitude=longitude, time=\n obj['time'].values)\n", (1209, 1279), False, 'from act.utils.geo_utils import get_solar_azimuth_elevation\n'), ((1305, 1331), 'numpy.nanmean', 'np.nanmean', (['solar_distance'], {}), '(solar_distance)\n', (1315, 1331), True, 'import numpy as np\n'), ((2086, 2111), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2109, 2111), False, 'import warnings\n'), ((2121, 2179), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (2144, 2179), False, 'import warnings\n'), ((2460, 2500), 'numpy.less', 'np.less', (['obj[var_name].values', 'min_limit'], {}), '(obj[var_name].values, min_limit)\n', (2467, 2500), True, 'import numpy as np\n'), ((2525, 2568), 'numpy.greater', 'np.greater', (['obj[var_name].values', 'max_limit'], {}), '(obj[var_name].values, max_limit)\n', (2535, 2568), True, 'import numpy as np\n'), ((25030, 25130), 'act.utils.data_utils.convert_units', 'convert_units', (['self._obj[air_temp_name].values', "self._obj[air_temp_name].attrs['units']", '"""degK"""'], {}), "(self._obj[air_temp_name].values, self._obj[air_temp_name].\n attrs['units'], 'degK')\n", (25043, 25130), False, 'from act.utils.data_utils import convert_units\n'), ((26454, 26554), 'act.utils.data_utils.convert_units', 'convert_units', (['self._obj[air_temp_name].values', "self._obj[air_temp_name].attrs['units']", '"""degK"""'], {}), "(self._obj[air_temp_name].values, self._obj[air_temp_name].\n attrs['units'], 'degK')\n", (26467, 26554), False, 'from act.utils.data_utils import convert_units\n'), ((8230, 8245), 'numpy.radians', 'np.radians', (['sza'], {}), '(sza)\n', (8240, 8245), True, 'import numpy as np\n'), ((9127, 9152), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (9150, 9152), False, 'import warnings\n'), ((9170, 9228), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (9193, 9228), False, 'import warnings\n'), ((10202, 10227), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (10225, 10227), False, 'import warnings\n'), ((10245, 10303), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (10268, 10303), False, 'import warnings\n'), ((11225, 11250), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (11248, 11250), False, 'import warnings\n'), ((11268, 11326), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (11291, 11326), False, 'import warnings\n'), ((12242, 12267), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (12265, 12267), False, 'import warnings\n'), ((12285, 12343), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (12308, 12343), False, 'import warnings\n'), ((18611, 18636), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (18634, 18636), False, 'import warnings\n'), ((18654, 18712), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (18677, 18712), False, 'import warnings\n'), ((21252, 21277), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (21275, 21277), False, 'import warnings\n'), ((21295, 21353), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (21318, 21353), False, 'import warnings\n'), ((23210, 23235), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (23233, 23235), False, 'import warnings\n'), ((23253, 23311), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (23276, 23311), False, 'import warnings\n'), ((25272, 25290), 'dask.array.array', 'da.array', (['air_temp'], {}), '(air_temp)\n', (25280, 25290), True, 'import dask.array as da\n'), ((25320, 25362), 'dask.array.array', 'da.array', (['(Stefan_Boltzmann * air_temp ** 4)'], {}), '(Stefan_Boltzmann * air_temp ** 4)\n', (25328, 25362), True, 'import dask.array as da\n'), ((26696, 26714), 'dask.array.array', 'da.array', (['air_temp'], {}), '(air_temp)\n', (26704, 26714), True, 'import dask.array as da\n'), ((27914, 28029), 'dask.array.where', 'da.where', (['(self._obj[glb_LW_dn_name].data > self._obj[glb_LW_up_name].data +\n LWdn_lt_LWup_component)', '(True)', '(False)'], {}), '(self._obj[glb_LW_dn_name].data > self._obj[glb_LW_up_name].data +\n LWdn_lt_LWup_component, True, False)\n', (27922, 28029), True, 'import dask.array as da\n'), ((28089, 28204), 'dask.array.where', 'da.where', (['(self._obj[glb_LW_dn_name].data < self._obj[glb_LW_up_name].data -\n LWdn_gt_LWup_component)', '(True)', '(False)'], {}), '(self._obj[glb_LW_dn_name].data < self._obj[glb_LW_up_name].data -\n LWdn_gt_LWup_component, True, False)\n', (28097, 28204), True, 'import dask.array as da\n'), ((2270, 2323), 'dask.array.where', 'da.where', (['(obj[var_name].data < min_limit)', '(True)', '(False)'], {}), '(obj[var_name].data < min_limit, True, False)\n', (2278, 2323), True, 'import dask.array as da\n'), ((2358, 2411), 'dask.array.where', 'da.where', (['(obj[var_name].data > max_limit)', '(True)', '(False)'], {}), '(obj[var_name].data > max_limit, True, False)\n', (2366, 2411), True, 'import dask.array as da\n'), ((19187, 19234), 'dask.array.where', 'da.where', (['((ratio > 1.08) & index_a)', '(True)', '(False)'], {}), '((ratio > 1.08) & index_a, True, False)\n', (19195, 19234), True, 'import dask.array as da\n'), ((19265, 19312), 'dask.array.where', 'da.where', (['((ratio < 0.92) & index_a)', '(True)', '(False)'], {}), '((ratio < 0.92) & index_a, True, False)\n', (19273, 19312), True, 'import dask.array as da\n'), ((19398, 19455), 'dask.array.where', 'da.where', (['((ratio > 1.15) & index_b & index_b)', '(True)', '(False)'], {}), '((ratio > 1.15) & index_b & index_b, True, False)\n', (19406, 19455), True, 'import dask.array as da\n'), ((19486, 19533), 'dask.array.where', 'da.where', (['((ratio < 0.85) & index_b)', '(True)', '(False)'], {}), '((ratio < 0.85) & index_b, True, False)\n', (19494, 19533), True, 'import dask.array as da\n'), ((21689, 21737), 'dask.array.where', 'da.where', (['((ratio >= 1.05) & index_a)', '(True)', '(False)'], {}), '((ratio >= 1.05) & index_a, True, False)\n', (21697, 21737), True, 'import dask.array as da\n'), ((21823, 21870), 'dask.array.where', 'da.where', (['((ratio >= 1.1) & index_b)', '(True)', '(False)'], {}), '((ratio >= 1.1) & index_b, True, False)\n', (21831, 21870), True, 'import dask.array as da\n'), ((11416, 11431), 'numpy.radians', 'np.radians', (['sza'], {}), '(sza)\n', (11426, 11431), True, 'import numpy as np\n'), ((23673, 23740), 'dask.array.where', 'da.where', (['(self._obj[glb_SW_up_name].data > sum_sw_down)', '(True)', '(False)'], {}), '(self._obj[glb_SW_up_name].data > sum_sw_down, True, False)\n', (23681, 23740), True, 'import dask.array as da\n'), ((18968, 18983), 'numpy.radians', 'np.radians', (['sza'], {}), '(sza)\n', (18978, 18983), True, 'import numpy as np\n'), ((19799, 19814), 'numpy.radians', 'np.radians', (['sza'], {}), '(sza)\n', (19809, 19814), True, 'import numpy as np\n'), ((23567, 23582), 'numpy.radians', 'np.radians', (['sza'], {}), '(sza)\n', (23577, 23582), True, 'import numpy as np\n'), ((23938, 23953), 'numpy.radians', 'np.radians', (['sza'], {}), '(sza)\n', (23948, 23953), True, 'import numpy as np\n'), ((9325, 9340), 'numpy.radians', 'np.radians', (['sza'], {}), '(sza)\n', (9335, 9340), True, 'import numpy as np\n'), ((12439, 12454), 'numpy.radians', 'np.radians', (['sza'], {}), '(sza)\n', (12449, 12454), True, 'import numpy as np\n'), ((9451, 9466), 'numpy.radians', 'np.radians', (['sza'], {}), '(sza)\n', (9461, 9466), True, 'import numpy as np\n'), ((10482, 10497), 'numpy.radians', 'np.radians', (['sza'], {}), '(sza)\n', (10492, 10497), True, 'import numpy as np\n'), ((11531, 11546), 'numpy.radians', 'np.radians', (['sza'], {}), '(sza)\n', (11541, 11546), True, 'import numpy as np\n'), ((12557, 12572), 'numpy.radians', 'np.radians', (['sza'], {}), '(sza)\n', (12567, 12572), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
def ks_table(y, quantiles):
"""Function to produce a K-S table"""
if len(set(y)) > 2:
raise ValueError('Function only defined for binary classification')
df = pd.concat([pd.Series(y), pd.Series(quantiles)], axis=1)
df.columns = ['y', 'quantiles']
# Get counts of positive and negative values
count_total = df.groupby('quantiles')['y'].count()
count_pos = df.groupby('quantiles')['y'].sum()
count_neg = count_total - count_pos
# Get cumulative percents
pos_pct_cum = count_pos.cumsum() / float(count_pos.sum()) * 100
neg_pct_cum = count_neg.cumsum() / float(count_neg.sum()) * 100
# Calculate KS
ks_idx = np.abs(pos_pct_cum - neg_pct_cum)
# Output table
out_df = pd.concat([
count_total, count_pos, count_neg, pos_pct_cum, neg_pct_cum, ks_idx
], axis=1)
out_df.columns = [
'count_total', 'count_pos', 'count_neg',
'pos_pct_cum', 'neg_pct_cum', 'ks_idx'
]
return out_df | [
"pandas.Series",
"numpy.abs",
"pandas.concat"
] | [((709, 742), 'numpy.abs', 'np.abs', (['(pos_pct_cum - neg_pct_cum)'], {}), '(pos_pct_cum - neg_pct_cum)\n', (715, 742), True, 'import numpy as np\n'), ((776, 868), 'pandas.concat', 'pd.concat', (['[count_total, count_pos, count_neg, pos_pct_cum, neg_pct_cum, ks_idx]'], {'axis': '(1)'}), '([count_total, count_pos, count_neg, pos_pct_cum, neg_pct_cum,\n ks_idx], axis=1)\n', (785, 868), True, 'import pandas as pd\n'), ((232, 244), 'pandas.Series', 'pd.Series', (['y'], {}), '(y)\n', (241, 244), True, 'import pandas as pd\n'), ((246, 266), 'pandas.Series', 'pd.Series', (['quantiles'], {}), '(quantiles)\n', (255, 266), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
from fipy import *
import math
import numpy as np
#
nx = 100
L = 5.e-3 # Gap between needle and water surface (m)
e = 1.6e-19 # Coulombic charge of an electron (C)
eps = 8.85e-12 # Permittivity of free space (F/m)
#
dx = L/nx
#
mesh = Grid1D(nx=nx, dx=dx)
#
phi = CellVariable(name="Potential (V)", mesh=mesh, value=0.)
Efield = -phi.faceGrad
EfieldMag = np.fmax(np.fabs(Efield), 1.e-6)
#
inidensity = 1.e20 # (m^-3)
electrons = CellVariable(name="electrons (m^-3)", mesh=mesh, value=0)
ions = CellVariable(name="ions", mesh=mesh, value=inidensity)
rho = e * (ions - electrons) # Charge density
#
alpha = .35 * np.exp(-1.65e3 / EfieldMag) # Ionization coefficient (1/m)
We = -60.6*Efield**.75 # Electron velocity vector (m/s)
Wp = 2.43 / 100 * Efield # Ion velocity vector (m/s)
De = 1800 / (100**2) # Electron diffusion coefficient
Dp = .046 / (100**2) # Ion diffusion coefficient
#
phi.equation = (DiffusionTerm(coeff = eps) + rho == 0)
phi.constrain(0., mesh.facesLeft)
phi.equation.solve(var=phi)
#
viewer= Viewer(vars=phi)
viewer.plot()
raw_input("Press any key to continue...")
| [
"numpy.exp",
"numpy.fabs"
] | [((386, 401), 'numpy.fabs', 'np.fabs', (['Efield'], {}), '(Efield)\n', (393, 401), True, 'import numpy as np\n'), ((634, 661), 'numpy.exp', 'np.exp', (['(-1650.0 / EfieldMag)'], {}), '(-1650.0 / EfieldMag)\n', (640, 661), True, 'import numpy as np\n')] |
import time
import numpy as np
import torch
from torch.autograd import Variable
from torch.nn import Parameter
from torch.utils.data.sampler import SubsetRandomSampler
from data_loader import libsvm_dataset
from thrift_ps.ps_service import ParameterServer
from thrift_ps.client import ps_client
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from utils.constants import Prefix, MLModel, Optimization, Synchronization
from storage.s3.s3_type import S3Storage
from model import linear_models
def handler(event, context):
start_time = time.time()
# dataset setting
file = event['file']
data_bucket = event['data_bucket']
dataset_type = event['dataset_type']
assert dataset_type == "dense_libsvm"
n_features = event['n_features']
n_classes = event['n_classes']
n_workers = event['n_workers']
worker_index = event['worker_index']
# ps setting
host = event['host']
port = event['port']
# training setting
model_name = event['model']
optim = event['optim']
sync_mode = event['sync_mode']
assert model_name.lower() in MLModel.Linear_Models
assert optim.lower() == Optimization.Grad_Avg
assert sync_mode.lower() == Synchronization.Reduce
# hyper-parameter
learning_rate = event['lr']
batch_size = event['batch_size']
n_epochs = event['n_epochs']
valid_ratio = event['valid_ratio']
print('bucket = {}'.format(data_bucket))
print("file = {}".format(file))
print('number of workers = {}'.format(n_workers))
print('worker index = {}'.format(worker_index))
print('model = {}'.format(model_name))
print('host = {}'.format(host))
print('port = {}'.format(port))
# Set thrift connection
# Make socket
transport = TSocket.TSocket(host, port)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TBufferedTransport(transport)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
t_client = ParameterServer.Client(protocol)
# Connect!
transport.open()
# test thrift connection
ps_client.ping(t_client)
print("create and ping thrift server >>> HOST = {}, PORT = {}".format(host, port))
# Read file from s3
read_start = time.time()
storage = S3Storage()
lines = storage.load(file, data_bucket).read().decode('utf-8').split("\n")
print("read data cost {} s".format(time.time() - read_start))
parse_start = time.time()
dataset = libsvm_dataset.from_lines(lines, n_features, dataset_type)
print("parse data cost {} s".format(time.time() - parse_start))
preprocess_start = time.time()
# Creating data indices for training and validation splits:
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(valid_ratio * dataset_size))
shuffle_dataset = True
random_seed = 100
if shuffle_dataset:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Creating data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=train_sampler)
n_train_batch = len(train_loader)
validation_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=valid_sampler)
print("preprocess data cost {} s, dataset size = {}"
.format(time.time() - preprocess_start, dataset_size))
model = linear_models.get_model(model_name, n_features, n_classes)
# Loss and Optimizer
# Softmax is internally computed.
# Set parameters to be updated.
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# register model
model_name = "w.b"
weight_shape = model.linear.weight.data.numpy().shape
weight_length = weight_shape[0] * weight_shape[1]
bias_shape = model.linear.bias.data.numpy().shape
bias_length = bias_shape[0]
model_length = weight_length + bias_length
ps_client.register_model(t_client, worker_index, model_name, model_length, n_workers)
ps_client.exist_model(t_client, model_name)
print("register and check model >>> name = {}, length = {}".format(model_name, model_length))
# Training the Model
train_start = time.time()
iter_counter = 0
for epoch in range(n_epochs):
epoch_start = time.time()
epoch_cal_time = 0
epoch_comm_time = 0
epoch_loss = 0.
for batch_idx, (items, labels) in enumerate(train_loader):
batch_comm_time = 0
batch_start = time.time()
# pull latest model
ps_client.can_pull(t_client, model_name, iter_counter, worker_index)
latest_model = ps_client.pull_model(t_client, model_name, iter_counter, worker_index)
model.linear.weight = Parameter(
torch.from_numpy(np.asarray(latest_model[:weight_length], dtype=np.float32).reshape(weight_shape)))
model.linear.bias = Parameter(
torch.from_numpy(np.asarray(latest_model[weight_length:], dtype=np.float32).reshape(bias_shape[0])))
batch_comm_time += time.time() - batch_start
# Forward + Backward + Optimize
batch_cal_start = time.time()
items = Variable(items.view(-1, n_features))
labels = Variable(labels)
optimizer.zero_grad()
outputs = model(items)
loss = criterion(outputs, labels)
epoch_loss += loss.item()
loss.backward()
# flatten and concat gradients of weight and bias
w_b_grad = np.concatenate((model.linear.weight.grad.data.double().numpy().flatten(),
model.linear.bias.grad.data.double().numpy().flatten()))
batch_cal_time = time.time() - batch_cal_start
# push gradient to PS
batch_comm_start = time.time()
ps_client.can_push(t_client, model_name, iter_counter, worker_index)
ps_client.push_grad(t_client, model_name, w_b_grad, -1. * learning_rate / n_workers,
iter_counter, worker_index)
ps_client.can_pull(t_client, model_name, iter_counter + 1, worker_index) # sync all workers
batch_comm_time += time.time() - batch_comm_start
epoch_cal_time += batch_cal_time
epoch_comm_time += batch_comm_time
if batch_idx % 10 == 0:
print('Epoch: [%d/%d], Batch: [%d/%d] >>> Time: %.4f, Loss: %.4f, epoch cost %.4f, '
'batch cost %.4f s: cal cost %.4f s and communication cost %.4f s'
% (epoch + 1, n_epochs, batch_idx + 1, n_train_batch,
time.time() - train_start, loss.data, time.time() - epoch_start,
time.time() - batch_start, batch_cal_time, batch_comm_time))
iter_counter += 1
# Test the Model
test_start = time.time()
n_test_correct = 0
n_test = 0
test_loss = 0
for items, labels in validation_loader:
items = Variable(items.view(-1, n_features))
labels = Variable(labels)
outputs = model(items)
test_loss += criterion(outputs, labels).data
_, predicted = torch.max(outputs.data, 1)
n_test += labels.size(0)
n_test_correct += (predicted == labels).sum()
test_time = time.time() - test_start
print('Epoch: [%d/%d], Batch: [%d/%d], Time: %.4f, Loss: %.4f, epoch cost %.4f: '
'calculation cost = %.4f s, synchronization cost %.4f s, test cost %.4f s, '
'accuracy of the model on the %d test samples: %d %%, loss = %f'
% (epoch + 1, n_epochs, batch_idx + 1, n_train_batch,
time.time() - train_start, epoch_loss, time.time() - epoch_start,
epoch_cal_time, epoch_comm_time, test_time,
n_test, 100. * n_test_correct / n_test, test_loss / n_test))
end_time = time.time()
print("Elapsed time = {} s".format(end_time - start_time))
| [
"torch.nn.CrossEntropyLoss",
"torch.max",
"thrift_ps.client.ps_client.pull_model",
"thrift_ps.client.ps_client.push_grad",
"thrift_ps.client.ps_client.exist_model",
"thrift.transport.TSocket.TSocket",
"thrift.transport.TTransport.TBufferedTransport",
"thrift_ps.client.ps_client.can_pull",
"storage.s... | [((618, 629), 'time.time', 'time.time', ([], {}), '()\n', (627, 629), False, 'import time\n'), ((1824, 1851), 'thrift.transport.TSocket.TSocket', 'TSocket.TSocket', (['host', 'port'], {}), '(host, port)\n', (1839, 1851), False, 'from thrift.transport import TSocket\n'), ((1923, 1963), 'thrift.transport.TTransport.TBufferedTransport', 'TTransport.TBufferedTransport', (['transport'], {}), '(transport)\n', (1952, 1963), False, 'from thrift.transport import TTransport\n'), ((2004, 2046), 'thrift.protocol.TBinaryProtocol.TBinaryProtocol', 'TBinaryProtocol.TBinaryProtocol', (['transport'], {}), '(transport)\n', (2035, 2046), False, 'from thrift.protocol import TBinaryProtocol\n'), ((2112, 2144), 'thrift_ps.ps_service.ParameterServer.Client', 'ParameterServer.Client', (['protocol'], {}), '(protocol)\n', (2134, 2144), False, 'from thrift_ps.ps_service import ParameterServer\n'), ((2214, 2238), 'thrift_ps.client.ps_client.ping', 'ps_client.ping', (['t_client'], {}), '(t_client)\n', (2228, 2238), False, 'from thrift_ps.client import ps_client\n'), ((2368, 2379), 'time.time', 'time.time', ([], {}), '()\n', (2377, 2379), False, 'import time\n'), ((2394, 2405), 'storage.s3.s3_type.S3Storage', 'S3Storage', ([], {}), '()\n', (2403, 2405), False, 'from storage.s3.s3_type import S3Storage\n'), ((2570, 2581), 'time.time', 'time.time', ([], {}), '()\n', (2579, 2581), False, 'import time\n'), ((2596, 2654), 'data_loader.libsvm_dataset.from_lines', 'libsvm_dataset.from_lines', (['lines', 'n_features', 'dataset_type'], {}), '(lines, n_features, dataset_type)\n', (2621, 2654), False, 'from data_loader import libsvm_dataset\n'), ((2747, 2758), 'time.time', 'time.time', ([], {}), '()\n', (2756, 2758), False, 'import time\n'), ((3223, 3257), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_indices'], {}), '(train_indices)\n', (3242, 3257), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((3278, 3310), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['val_indices'], {}), '(val_indices)\n', (3297, 3310), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((3330, 3417), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'sampler': 'train_sampler'}), '(dataset, batch_size=batch_size, sampler=\n train_sampler)\n', (3357, 3417), False, 'import torch\n'), ((3569, 3656), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'sampler': 'valid_sampler'}), '(dataset, batch_size=batch_size, sampler=\n valid_sampler)\n', (3596, 3656), False, 'import torch\n'), ((3891, 3949), 'model.linear_models.get_model', 'linear_models.get_model', (['model_name', 'n_features', 'n_classes'], {}), '(model_name, n_features, n_classes)\n', (3914, 3949), False, 'from model import linear_models\n'), ((4066, 4093), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (4091, 4093), False, 'import torch\n'), ((4458, 4547), 'thrift_ps.client.ps_client.register_model', 'ps_client.register_model', (['t_client', 'worker_index', 'model_name', 'model_length', 'n_workers'], {}), '(t_client, worker_index, model_name, model_length,\n n_workers)\n', (4482, 4547), False, 'from thrift_ps.client import ps_client\n'), ((4548, 4591), 'thrift_ps.client.ps_client.exist_model', 'ps_client.exist_model', (['t_client', 'model_name'], {}), '(t_client, model_name)\n', (4569, 4591), False, 'from thrift_ps.client import ps_client\n'), ((4734, 4745), 'time.time', 'time.time', ([], {}), '()\n', (4743, 4745), False, 'import time\n'), ((8528, 8539), 'time.time', 'time.time', ([], {}), '()\n', (8537, 8539), False, 'import time\n'), ((2911, 2947), 'numpy.floor', 'np.floor', (['(valid_ratio * dataset_size)'], {}), '(valid_ratio * dataset_size)\n', (2919, 2947), True, 'import numpy as np\n'), ((3031, 3058), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (3045, 3058), True, 'import numpy as np\n'), ((3067, 3093), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (3084, 3093), True, 'import numpy as np\n'), ((4823, 4834), 'time.time', 'time.time', ([], {}), '()\n', (4832, 4834), False, 'import time\n'), ((7452, 7463), 'time.time', 'time.time', ([], {}), '()\n', (7461, 7463), False, 'import time\n'), ((5040, 5051), 'time.time', 'time.time', ([], {}), '()\n', (5049, 5051), False, 'import time\n'), ((5096, 5164), 'thrift_ps.client.ps_client.can_pull', 'ps_client.can_pull', (['t_client', 'model_name', 'iter_counter', 'worker_index'], {}), '(t_client, model_name, iter_counter, worker_index)\n', (5114, 5164), False, 'from thrift_ps.client import ps_client\n'), ((5192, 5262), 'thrift_ps.client.ps_client.pull_model', 'ps_client.pull_model', (['t_client', 'model_name', 'iter_counter', 'worker_index'], {}), '(t_client, model_name, iter_counter, worker_index)\n', (5212, 5262), False, 'from thrift_ps.client import ps_client\n'), ((5716, 5727), 'time.time', 'time.time', ([], {}), '()\n', (5725, 5727), False, 'import time\n'), ((5806, 5822), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (5814, 5822), False, 'from torch.autograd import Variable\n'), ((6385, 6396), 'time.time', 'time.time', ([], {}), '()\n', (6394, 6396), False, 'import time\n'), ((6409, 6477), 'thrift_ps.client.ps_client.can_push', 'ps_client.can_push', (['t_client', 'model_name', 'iter_counter', 'worker_index'], {}), '(t_client, model_name, iter_counter, worker_index)\n', (6427, 6477), False, 'from thrift_ps.client import ps_client\n'), ((6490, 6607), 'thrift_ps.client.ps_client.push_grad', 'ps_client.push_grad', (['t_client', 'model_name', 'w_b_grad', '(-1.0 * learning_rate / n_workers)', 'iter_counter', 'worker_index'], {}), '(t_client, model_name, w_b_grad, -1.0 * learning_rate /\n n_workers, iter_counter, worker_index)\n', (6509, 6607), False, 'from thrift_ps.client import ps_client\n'), ((6647, 6719), 'thrift_ps.client.ps_client.can_pull', 'ps_client.can_pull', (['t_client', 'model_name', '(iter_counter + 1)', 'worker_index'], {}), '(t_client, model_name, iter_counter + 1, worker_index)\n', (6665, 6719), False, 'from thrift_ps.client import ps_client\n'), ((7658, 7674), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (7666, 7674), False, 'from torch.autograd import Variable\n'), ((7794, 7820), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (7803, 7820), False, 'import torch\n'), ((7936, 7947), 'time.time', 'time.time', ([], {}), '()\n', (7945, 7947), False, 'import time\n'), ((2524, 2535), 'time.time', 'time.time', ([], {}), '()\n', (2533, 2535), False, 'import time\n'), ((2695, 2706), 'time.time', 'time.time', ([], {}), '()\n', (2704, 2706), False, 'import time\n'), ((3831, 3842), 'time.time', 'time.time', ([], {}), '()\n', (3840, 3842), False, 'import time\n'), ((5615, 5626), 'time.time', 'time.time', ([], {}), '()\n', (5624, 5626), False, 'import time\n'), ((6289, 6300), 'time.time', 'time.time', ([], {}), '()\n', (6298, 6300), False, 'import time\n'), ((6771, 6782), 'time.time', 'time.time', ([], {}), '()\n', (6780, 6782), False, 'import time\n'), ((8307, 8318), 'time.time', 'time.time', ([], {}), '()\n', (8316, 8318), False, 'import time\n'), ((8346, 8357), 'time.time', 'time.time', ([], {}), '()\n', (8355, 8357), False, 'import time\n'), ((5341, 5399), 'numpy.asarray', 'np.asarray', (['latest_model[:weight_length]'], {'dtype': 'np.float32'}), '(latest_model[:weight_length], dtype=np.float32)\n', (5351, 5399), True, 'import numpy as np\n'), ((5500, 5558), 'numpy.asarray', 'np.asarray', (['latest_model[weight_length:]'], {'dtype': 'np.float32'}), '(latest_model[weight_length:], dtype=np.float32)\n', (5510, 5558), True, 'import numpy as np\n'), ((7223, 7234), 'time.time', 'time.time', ([], {}), '()\n', (7232, 7234), False, 'import time\n'), ((7261, 7272), 'time.time', 'time.time', ([], {}), '()\n', (7270, 7272), False, 'import time\n'), ((7313, 7324), 'time.time', 'time.time', ([], {}), '()\n', (7322, 7324), False, 'import time\n')] |
if __name__ == '__main__':
from crossSection import sigma_real
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
import random
import warnings
warnings.filterwarnings("ignore")
plt.style.use('ja')
from numpy.random import normal, sample
Enu_TeV = np.array([290.])
ge = 3*np.power(10.0, -3)
gt = 3*np.power(10.0, -1)
gm = np.power(10.0, -2)
mp = 0.5
mp_MeV = mp*np.power(10.0, 6)
nu_mass = 0.15
mn = 5
Ecom_nu_MeV = np.sqrt(0.5*nu_mass*Enu_TeV)
Ecom_phi_MeV = np.sqrt(0.5*mp_MeV*Enu_TeV)
s_nu_sample = 4 * np.power(Ecom_nu_MeV, 2)
s_phi_sample = 4 * np.power(Ecom_phi_MeV, 2)
sigma_nu = sigma_real(s_nu_sample, ge, gm, gt, mp, mn)
sigma_phi = sigma_real(s_phi_sample, ge, gm, gt, mp, mn)
print('Scattering off Neutrino:', sigma_nu[0])
print('Scattering off Scalar:', sigma_phi[0])
| [
"numpy.sqrt",
"numpy.power",
"matplotlib.pyplot.style.use",
"crossSection.sigma_real",
"numpy.array",
"warnings.filterwarnings"
] | [((207, 240), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (230, 240), False, 'import warnings\n'), ((245, 264), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ja"""'], {}), "('ja')\n", (258, 264), True, 'import matplotlib.pyplot as plt\n'), ((323, 340), 'numpy.array', 'np.array', (['[290.0]'], {}), '([290.0])\n', (331, 340), True, 'import numpy as np\n'), ((409, 427), 'numpy.power', 'np.power', (['(10.0)', '(-2)'], {}), '(10.0, -2)\n', (417, 427), True, 'import numpy as np\n'), ((524, 556), 'numpy.sqrt', 'np.sqrt', (['(0.5 * nu_mass * Enu_TeV)'], {}), '(0.5 * nu_mass * Enu_TeV)\n', (531, 556), True, 'import numpy as np\n'), ((572, 603), 'numpy.sqrt', 'np.sqrt', (['(0.5 * mp_MeV * Enu_TeV)'], {}), '(0.5 * mp_MeV * Enu_TeV)\n', (579, 603), True, 'import numpy as np\n'), ((712, 755), 'crossSection.sigma_real', 'sigma_real', (['s_nu_sample', 'ge', 'gm', 'gt', 'mp', 'mn'], {}), '(s_nu_sample, ge, gm, gt, mp, mn)\n', (722, 755), False, 'from crossSection import sigma_real\n'), ((772, 816), 'crossSection.sigma_real', 'sigma_real', (['s_phi_sample', 'ge', 'gm', 'gt', 'mp', 'mn'], {}), '(s_phi_sample, ge, gm, gt, mp, mn)\n', (782, 816), False, 'from crossSection import sigma_real\n'), ((351, 369), 'numpy.power', 'np.power', (['(10.0)', '(-3)'], {}), '(10.0, -3)\n', (359, 369), True, 'import numpy as np\n'), ((381, 399), 'numpy.power', 'np.power', (['(10.0)', '(-1)'], {}), '(10.0, -1)\n', (389, 399), True, 'import numpy as np\n'), ((457, 474), 'numpy.power', 'np.power', (['(10.0)', '(6)'], {}), '(10.0, 6)\n', (465, 474), True, 'import numpy as np\n'), ((623, 647), 'numpy.power', 'np.power', (['Ecom_nu_MeV', '(2)'], {}), '(Ecom_nu_MeV, 2)\n', (631, 647), True, 'import numpy as np\n'), ((671, 696), 'numpy.power', 'np.power', (['Ecom_phi_MeV', '(2)'], {}), '(Ecom_phi_MeV, 2)\n', (679, 696), True, 'import numpy as np\n')] |
import numpy as np
from typing import Union
def cleanup_delete(coords_list_in: np.ndarray,
eps_grid: float = 1e-4,
cyclic_points: bool = True,
check_inline: bool = True,
) -> np.ndarray:
"""
From the passed coordinate list, returns a numpy array of bools of the same length where each value indicates
whether that point should be deleted from the coord_list.
Points that should be removed are either adjacent points that are the same, or points that are in a line.
Parameters
----------
coords_list_in : np.ndarray
The list of x-y coordinates composing a polygon shape
eps_grid :
grid resolution below which points are considered to be the same
cyclic_points : bool
True if the coords_list forms a closed polygon. If True, the start/end points might be removed.
False if the coords_list is not a closed polygon (ie, a path). If False, the start and end points will never be
removed.
check_inline : bool
True [default] to check for and remove center points that are in a line with their two adjacent neighbors.
False to skip this check
Returns
-------
delete_array : np.ndarray
Numpy array of bools telling whether to delete the coordinate or not
"""
coord_set_next = np.roll(coords_list_in, -1, axis=0)
coord_set_prev = np.roll(coords_list_in, 1, axis=0)
vec_to_next = coord_set_next - coords_list_in
vec_from_prev = coords_list_in - coord_set_prev
dx_next = vec_to_next[:, 0]
dy_next = vec_to_next[:, 1]
dx_prev = vec_from_prev[:, 0]
dy_prev = vec_from_prev[:, 1]
dx_next_abs = np.abs(dx_next)
dy_next_abs = np.abs(dy_next)
dx_prev_abs = np.abs(dx_prev)
dy_prev_abs = np.abs(dy_prev)
same_as_next = np.logical_and(dx_next_abs < eps_grid, dy_next_abs < eps_grid)
if check_inline:
same_as_prev = np.logical_and(dx_prev_abs < eps_grid, dy_prev_abs < eps_grid)
diff_from_lr = np.logical_not(np.logical_or(same_as_next, same_as_prev))
"""
if x&y coords are accurate, we should have dy2_acc/dx2_acc = dy1_acc/dx1_acc
equivalent to dx1_acc * dy2_acc =dx2_acc * dy1_acc,
because of inaccuracy in float numbers, we have
|dx1 * dy2 - dx2 * dy1| = |(dx1_acc + err1) * (dy2_acc + err2) - (dx2_acc + err3) * (dy1_acc + err4)|
~ |dx1 * err2 + dy2 * err1 - dx2 * err4 - dy1 * err3|
< sum(|dx1|, |dx2|, |dy1|, |dy2|) * |err_max|
# error_abs = np.abs(dx_l * dy_r - dx_r * dy_l)
# in_line = error_abs < eps_grid * (dx_l_abs + dy_l_abs + dx_r_abs + dy_r_abs)
"""
in_line = np.logical_or(np.logical_and(dx_next_abs < eps_grid, dx_prev_abs < eps_grid),
np.logical_and(dy_next_abs < eps_grid, dy_prev_abs < eps_grid))
in_line_and_diff_from_lr = np.logical_and(in_line, diff_from_lr)
else:
# If not checking for inline points, default all values of inline check to false
in_line_and_diff_from_lr = np.full_like(same_as_next, False)
# situation 1: the point is the same with its left neighbor
# situation 2: the point is not the same with its neighbors, but it is in a line with them
delete_array = np.logical_or(same_as_next, in_line_and_diff_from_lr)
# If cleaning a path rather than a polygon, never delete the first or last point
if not cyclic_points:
delete_array[0] = False
delete_array[-1] = False
return delete_array
def coords_cleanup(coords_list: np.ndarray,
eps_grid: float = 1e-4,
cyclic_points: bool = True,
check_inline: bool = True,
) -> np.ndarray:
"""
clean up coordinates in the list that are redundant or harmful for following geometry manipulation functions
Points that are cleaned are:
- Adjacent coincident points
- Collinear points (middle points removed)
Parameters
----------
coords_list : np.ndarray
list of coordinates that enclose a polygon
eps_grid : float
a size smaller than the resolution grid size,
if the difference of x/y coordinates of two points is smaller than it,
these two points should actually share the same x/y coordinate
cyclic_points : bool
True [default] if the coords_list forms a closed polygon. If True, the start/end points might be removed.
False if the coords_list is not a closed polygon (ie, a path). If False, the start and end points will never be
removed.
check_inline : bool
True [default] to check for and remove center points that are in a line with their two adjacent neighbors.
False to skip this check
Returns
----------
coords_set_out : np.ndarray
The cleaned coordinate set
"""
delete_array = cleanup_delete(coords_list, eps_grid=eps_grid,
cyclic_points=cyclic_points, check_inline=check_inline)
not_cleaned = np.sum(delete_array) > 0
# in some cases, some coordinates become on the line if the following coord is deleted,
# need to loop until no coord is deleted during one loop
while not_cleaned:
select_array = np.logical_not(delete_array)
coords_list = coords_list[select_array]
delete_array = cleanup_delete(coords_list, eps_grid=eps_grid,
cyclic_points=cyclic_points, check_inline=check_inline)
not_cleaned = np.sum(delete_array) > 0
return coords_list
def create_polygon_from_path_and_width(points_list: np.ndarray,
width: Union[float, int],
eps: float = 1e-4
) -> np.ndarray:
"""
Given a path (a numpy array of 2-D points) and a width (constant along the path), return the set of points forming
the polygon.
Checks to see if the radius of curvature is smaller than half the width. If so, the polygon will be self
intersecting, so raise an error.
Does not perform any rounding/snapping of points to a grid.
Parameters
----------
points_list : np.ndarray
A numpy array of points (n x 2) representing the center of the path.
width : Union[float, int]
The width of the path
eps : float
The tolerance for determining whether two points are coincident.
Returns
-------
polygon_points : np.ndarray
The polygon formed by the center path and width.
"""
tangent_vec = np.gradient(points_list, axis=0)
tangent_normalized_vec = \
tangent_vec / np.tile(np.linalg.norm(tangent_vec, axis=1, keepdims=True), (1, 2)) * width/2
# Find the points using the perpendicular to tangent line
pts0 = points_list + np.column_stack([-1 * tangent_normalized_vec[:, 1], tangent_normalized_vec[:, 0]])
pts1 = points_list + np.column_stack([tangent_normalized_vec[:, 1], -1 * tangent_normalized_vec[:, 0]])
# Concatenate into a polygon
points_out = np.concatenate((pts0, np.flipud(pts1)), axis=0)
# Clean up the polygon
polygon_points = coords_cleanup(points_out, eps_grid=eps, cyclic_points=True)
return polygon_points
| [
"numpy.abs",
"numpy.roll",
"numpy.full_like",
"numpy.logical_and",
"numpy.flipud",
"numpy.logical_not",
"numpy.logical_or",
"numpy.column_stack",
"numpy.sum",
"numpy.linalg.norm",
"numpy.gradient"
] | [((1374, 1409), 'numpy.roll', 'np.roll', (['coords_list_in', '(-1)'], {'axis': '(0)'}), '(coords_list_in, -1, axis=0)\n', (1381, 1409), True, 'import numpy as np\n'), ((1431, 1465), 'numpy.roll', 'np.roll', (['coords_list_in', '(1)'], {'axis': '(0)'}), '(coords_list_in, 1, axis=0)\n', (1438, 1465), True, 'import numpy as np\n'), ((1721, 1736), 'numpy.abs', 'np.abs', (['dx_next'], {}), '(dx_next)\n', (1727, 1736), True, 'import numpy as np\n'), ((1755, 1770), 'numpy.abs', 'np.abs', (['dy_next'], {}), '(dy_next)\n', (1761, 1770), True, 'import numpy as np\n'), ((1789, 1804), 'numpy.abs', 'np.abs', (['dx_prev'], {}), '(dx_prev)\n', (1795, 1804), True, 'import numpy as np\n'), ((1823, 1838), 'numpy.abs', 'np.abs', (['dy_prev'], {}), '(dy_prev)\n', (1829, 1838), True, 'import numpy as np\n'), ((1859, 1921), 'numpy.logical_and', 'np.logical_and', (['(dx_next_abs < eps_grid)', '(dy_next_abs < eps_grid)'], {}), '(dx_next_abs < eps_grid, dy_next_abs < eps_grid)\n', (1873, 1921), True, 'import numpy as np\n'), ((3374, 3427), 'numpy.logical_or', 'np.logical_or', (['same_as_next', 'in_line_and_diff_from_lr'], {}), '(same_as_next, in_line_and_diff_from_lr)\n', (3387, 3427), True, 'import numpy as np\n'), ((6717, 6749), 'numpy.gradient', 'np.gradient', (['points_list'], {'axis': '(0)'}), '(points_list, axis=0)\n', (6728, 6749), True, 'import numpy as np\n'), ((1967, 2029), 'numpy.logical_and', 'np.logical_and', (['(dx_prev_abs < eps_grid)', '(dy_prev_abs < eps_grid)'], {}), '(dx_prev_abs < eps_grid, dy_prev_abs < eps_grid)\n', (1981, 2029), True, 'import numpy as np\n'), ((2989, 3026), 'numpy.logical_and', 'np.logical_and', (['in_line', 'diff_from_lr'], {}), '(in_line, diff_from_lr)\n', (3003, 3026), True, 'import numpy as np\n'), ((3161, 3194), 'numpy.full_like', 'np.full_like', (['same_as_next', '(False)'], {}), '(same_as_next, False)\n', (3173, 3194), True, 'import numpy as np\n'), ((5152, 5172), 'numpy.sum', 'np.sum', (['delete_array'], {}), '(delete_array)\n', (5158, 5172), True, 'import numpy as np\n'), ((5377, 5405), 'numpy.logical_not', 'np.logical_not', (['delete_array'], {}), '(delete_array)\n', (5391, 5405), True, 'import numpy as np\n'), ((6969, 7056), 'numpy.column_stack', 'np.column_stack', (['[-1 * tangent_normalized_vec[:, 1], tangent_normalized_vec[:, 0]]'], {}), '([-1 * tangent_normalized_vec[:, 1], tangent_normalized_vec[\n :, 0]])\n', (6984, 7056), True, 'import numpy as np\n'), ((7077, 7164), 'numpy.column_stack', 'np.column_stack', (['[tangent_normalized_vec[:, 1], -1 * tangent_normalized_vec[:, 0]]'], {}), '([tangent_normalized_vec[:, 1], -1 * tangent_normalized_vec[\n :, 0]])\n', (7092, 7164), True, 'import numpy as np\n'), ((2068, 2109), 'numpy.logical_or', 'np.logical_or', (['same_as_next', 'same_as_prev'], {}), '(same_as_next, same_as_prev)\n', (2081, 2109), True, 'import numpy as np\n'), ((2793, 2855), 'numpy.logical_and', 'np.logical_and', (['(dx_next_abs < eps_grid)', '(dx_prev_abs < eps_grid)'], {}), '(dx_next_abs < eps_grid, dx_prev_abs < eps_grid)\n', (2807, 2855), True, 'import numpy as np\n'), ((2889, 2951), 'numpy.logical_and', 'np.logical_and', (['(dy_next_abs < eps_grid)', '(dy_prev_abs < eps_grid)'], {}), '(dy_next_abs < eps_grid, dy_prev_abs < eps_grid)\n', (2903, 2951), True, 'import numpy as np\n'), ((5640, 5660), 'numpy.sum', 'np.sum', (['delete_array'], {}), '(delete_array)\n', (5646, 5660), True, 'import numpy as np\n'), ((7233, 7248), 'numpy.flipud', 'np.flipud', (['pts1'], {}), '(pts1)\n', (7242, 7248), True, 'import numpy as np\n'), ((6811, 6861), 'numpy.linalg.norm', 'np.linalg.norm', (['tangent_vec'], {'axis': '(1)', 'keepdims': '(True)'}), '(tangent_vec, axis=1, keepdims=True)\n', (6825, 6861), True, 'import numpy as np\n')] |
"""
Name : c8_36_Fama_MecBeth_regression.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : <NAME>
Date : 6/6/2017
email : <EMAIL>
<EMAIL>
"""
import numpy as np
import pandas as pd
import statsmodels.api as sm
from datetime import datetime
#
n = 252
np.random.seed(12345)
begdate=datetime(2013, 1, 2)
dateRange = pd.date_range(begdate, periods=n)
def makeDataFrame():
data=pd.DataFrame(np.random.randn(n,7),columns=['A','B','C','D','E',' F','G'],
index=dateRange)
return data
#
data = { 'A': makeDataFrame(), 'B': makeDataFrame(), 'C': makeDataFrame() }
Y = makeDataFrame()
print(pd.fama_macbeth(y=Y,x=data))
| [
"datetime.datetime",
"pandas.fama_macbeth",
"numpy.random.seed",
"numpy.random.randn",
"pandas.date_range"
] | [((329, 350), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (343, 350), True, 'import numpy as np\n'), ((360, 380), 'datetime.datetime', 'datetime', (['(2013)', '(1)', '(2)'], {}), '(2013, 1, 2)\n', (368, 380), False, 'from datetime import datetime\n'), ((394, 427), 'pandas.date_range', 'pd.date_range', (['begdate'], {'periods': 'n'}), '(begdate, periods=n)\n', (407, 427), True, 'import pandas as pd\n'), ((678, 706), 'pandas.fama_macbeth', 'pd.fama_macbeth', ([], {'y': 'Y', 'x': 'data'}), '(y=Y, x=data)\n', (693, 706), True, 'import pandas as pd\n'), ((473, 494), 'numpy.random.randn', 'np.random.randn', (['n', '(7)'], {}), '(n, 7)\n', (488, 494), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import pyautogui
from matplotlib import pyplot as plt
import time
cap = cv2.VideoCapture(0)
ct=0
ct1=0
time.sleep(5)
while(1):
# Take each frame
_, frame = cap.read()
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_blue = np.array([0,48,80])
upper_blue = np.array([20,255,255])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_blue, upper_blue)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame,frame, mask= mask)
#res = cv2.GaussianBlur(res,(10,10),0)
blur = cv2.GaussianBlur(mask,(15,15),0)
#Apply threshold
ret, thresh = cv2.threshold(blur, 127, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
cv2.imshow('frame',mask)
#cv2.imshow('ret',ret)
cv2.imshow('thresh',thresh)
#img = thresh
_,contours,hierarchy = cv2.findContours(thresh,cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
if not contours:
continue
cnt = contours[0]
if(len(contours))>=0:
c=max(contours, key=cv2.contourArea)
(x,y),radius=cv2.minEnclosingCircle(c)
M=cv2.moments(c)
else:
print("Sorry no contour found")
cnt=c
if cv2.contourArea(cnt)<=1000:
continue
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
#epsilon = 0.01*cv2.arcLength(cnt,True)
#approx = cv2.approxPolyDP(cnt,epsilon,True)
count=0;
try:
defects.shape
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
cv2.line(frame,start,end,[0,255,0],2)
cv2.circle(frame,far,5,[0,0,255],-1)
count=count+1
#print(str(cv2.contourArea(cnt,True)))
if cv2.arcLength(cnt,True)>2000:
while ct==0:
print("ON")
pyautogui.press('space')
ct=1
ct1=0
if cv2.arcLength(cnt,True)>500 and cv2.arcLength(cnt,True)<=1500:
while ct1==0:
print("OFF")
pyautogui.press('space')
ct1=1
ct=0
#if arc
except AttributeError:
print("shape not found")
cv2.imshow('final',frame)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.imshow('mask', mask)
# cv2.imshow('res', res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
| [
"time.sleep",
"cv2.imshow",
"cv2.convexityDefects",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.threshold",
"cv2.arcLength",
"cv2.line",
"cv2.contourArea",
"cv2.waitKey",
"cv2.minEnclosingCircle",
"cv2.circle",
"cv2.cvtColor",
"cv2.moments",
"cv2.GaussianBlur",
"cv2.convexHull",
"py... | [((109, 128), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (125, 128), False, 'import cv2\n'), ((143, 156), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (153, 156), False, 'import time\n'), ((2772, 2795), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2793, 2795), False, 'import cv2\n'), ((259, 297), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (271, 297), False, 'import cv2\n'), ((359, 380), 'numpy.array', 'np.array', (['[0, 48, 80]'], {}), '([0, 48, 80])\n', (367, 380), True, 'import numpy as np\n'), ((397, 421), 'numpy.array', 'np.array', (['[20, 255, 255]'], {}), '([20, 255, 255])\n', (405, 421), True, 'import numpy as np\n'), ((489, 529), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_blue', 'upper_blue'], {}), '(hsv, lower_blue, upper_blue)\n', (500, 529), False, 'import cv2\n'), ((586, 626), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'mask'}), '(frame, frame, mask=mask)\n', (601, 626), False, 'import cv2\n'), ((683, 718), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['mask', '(15, 15)', '(0)'], {}), '(mask, (15, 15), 0)\n', (699, 718), False, 'import cv2\n'), ((759, 825), 'cv2.threshold', 'cv2.threshold', (['blur', '(127)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(blur, 127, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (772, 825), False, 'import cv2\n'), ((931, 956), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'mask'], {}), "('frame', mask)\n", (941, 956), False, 'import cv2\n'), ((989, 1017), 'cv2.imshow', 'cv2.imshow', (['"""thresh"""', 'thresh'], {}), "('thresh', thresh)\n", (999, 1017), False, 'import cv2\n'), ((1064, 1126), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_NONE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n', (1080, 1126), False, 'import cv2\n'), ((1465, 1504), 'cv2.convexHull', 'cv2.convexHull', (['cnt'], {'returnPoints': '(False)'}), '(cnt, returnPoints=False)\n', (1479, 1504), False, 'import cv2\n'), ((1521, 1552), 'cv2.convexityDefects', 'cv2.convexityDefects', (['cnt', 'hull'], {}), '(cnt, hull)\n', (1541, 1552), False, 'import cv2\n'), ((2552, 2578), 'cv2.imshow', 'cv2.imshow', (['"""final"""', 'frame'], {}), "('final', frame)\n", (2562, 2578), False, 'import cv2\n'), ((2646, 2670), 'cv2.imshow', 'cv2.imshow', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (2656, 2670), False, 'import cv2\n'), ((1284, 1309), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (1306, 1309), False, 'import cv2\n'), ((1321, 1335), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (1332, 1335), False, 'import cv2\n'), ((1407, 1427), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (1422, 1427), False, 'import cv2\n'), ((2587, 2601), 'cv2.waitKey', 'cv2.waitKey', (['(5)'], {}), '(5)\n', (2598, 2601), False, 'import cv2\n'), ((2709, 2723), 'cv2.waitKey', 'cv2.waitKey', (['(5)'], {}), '(5)\n', (2720, 2723), False, 'import cv2\n'), ((1896, 1939), 'cv2.line', 'cv2.line', (['frame', 'start', 'end', '[0, 255, 0]', '(2)'], {}), '(frame, start, end, [0, 255, 0], 2)\n', (1904, 1939), False, 'import cv2\n'), ((1947, 1989), 'cv2.circle', 'cv2.circle', (['frame', 'far', '(5)', '[0, 0, 255]', '(-1)'], {}), '(frame, far, 5, [0, 0, 255], -1)\n', (1957, 1989), False, 'import cv2\n'), ((2071, 2095), 'cv2.arcLength', 'cv2.arcLength', (['cnt', '(True)'], {}), '(cnt, True)\n', (2084, 2095), False, 'import cv2\n'), ((2173, 2197), 'pyautogui.press', 'pyautogui.press', (['"""space"""'], {}), "('space')\n", (2188, 2197), False, 'import pyautogui\n'), ((2255, 2279), 'cv2.arcLength', 'cv2.arcLength', (['cnt', '(True)'], {}), '(cnt, True)\n', (2268, 2279), False, 'import cv2\n'), ((2287, 2311), 'cv2.arcLength', 'cv2.arcLength', (['cnt', '(True)'], {}), '(cnt, True)\n', (2300, 2311), False, 'import cv2\n'), ((2392, 2416), 'pyautogui.press', 'pyautogui.press', (['"""space"""'], {}), "('space')\n", (2407, 2416), False, 'import pyautogui\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 31 12:46:30 2020
@author: mhayt
"""
import os
from PIL import Image
import random
import string
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
def get_random_string(length):
'''
creates random string of characters of defined length.
Parameters
----------
length : int
length of the random string.
Returns
-------
result_str : string
random string of characters.
'''
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
def img_to_jpg(input_path, input_dir):
'''
converts all files within a directory to .jpg.
Parameters
----------
input_path : string
absolute path to the directory of the data.
input_dir : string
additional optional sub-directory - useful for iterating over directories.
'''
input_full = input_path + input_dir
dir_items = os.listdir(input_full)
#converting to .jpg if not already and removes the pre-converted file.
for i, img in enumerate(dir_items):
im_name = os.path.splitext(img)[0]
im_type = os.path.splitext(img)[1]
if im_type != '.jpg':
im = Image.open(f'{input_full}/{img}')
rgb_im = im.convert('RGB')
rgb_im.save(f'{input_full}/{im_name}.jpg')
im.close()
os.remove(f'{input_full}/{img}')
def check_rgb(input_path, input_dir):
'''
checks that the jpg is formatted as an RBG by checking the number of channels is equal to 3. If not the data is deleted
Parameters
----------
input_path : string
absolute path to the directory of the data.
input_dir : string
additional optional sub-directory - useful for iterating over directories.
'''
input_full = input_path + input_dir
dir_items = os.listdir(input_full)
for i, img in enumerate(dir_items):
im = Image.open(f'{input_full}/{img}')
im = np.asarray(im)
im_shape = im.shape
im_channels = im_shape[-1]
if im_channels != 3:
os.remove(f'{input_full}/{img}')
def rename_files(fish_species, input_path, input_dir, file_already_exisits=True):
'''
renames alls files in a directory with the name of the fish_species
Parameters
----------
fish_species : string
fish species / type used in the naming convention
input_path : string
absolute path to the directory of the data.
input_dir : string
additional optional sub-directory - useful for iterating over directories.
'''
input_full = input_path + input_dir
dir_items = os.listdir(input_full)
if file_already_exisits:
for i, img in enumerate(dir_items):
new_name = get_random_string(20) + str(i) + '.jpg'
src = input_full + '/' + img
dst = input_full + '/' + new_name
if src == dst:
continue
os.rename(src, dst)
dir_items = os.listdir(input_full)
for i, img in enumerate(dir_items):
new_name = fish_species + '_' + str(i) + '.jpg'
src = input_full + '/' + img
dst = input_full + '/' + new_name
if src == dst:
continue
os.rename(src, dst)
def open_jpeg_as_np(path, image_size, vectorize=True):
'''
takes the path of a jpeg file, re-sizes, converts to grayscale and outputs a np array
Parameters
----------
path : string
path to the image.
image_size : tuple
2D-tuple: (width, height).
vectorize : TYPE, optional
output n array will be vector. The default is True.
Returns
-------
im : array
np array of the input image.
'''
im = Image.open(path)
im = im.resize(image_size)
if vectorize:
im = im.convert('L')
im = np.asarray(im)
return im
def gen_data_array_vector(label_paths, image_size):
'''
takes labels from a list, loads to np array, reshaes to image_size, converts to vector and appends toa data array.
Parameters
----------
label_paths : list
relative paths to the data.
image_size : tuple
2D-tuple: (width, height).
Returns
-------
data_array : np array
data loaded to a np array.
'''
#creating required variables.
image_vector_len = image_size[0] * image_size[1]
num_images = len(label_paths)
#instantiating our np array with zeros, speeds up writing data to this array in the for loop.
data_array = np.zeros(shape=(num_images, image_vector_len))
for i, path in enumerate(label_paths):
im = open_jpeg_as_np(path, image_size)
im_vector = np.reshape(im, image_vector_len)
data_array[i] = im_vector
return data_array
def gen_data_array_image(label_paths, image_size, RGB=True):
'''
takes labels from a list, loads to np array, reshapes to image_size and appends toa data array.
Parameters
----------
label_paths : list
relative paths to the data.
image_size : tuple
2D-tuple: (width, height).
RGB : bool, optional
Number of channels set to 3 when rbg is set to true. The default is True.
Returns
-------
data_array : np array
data loaded to a np array.
'''
#creating required variables.
num_images = len(label_paths)
if RGB:
data_array_shape = (num_images, image_size[1], image_size[0], 3)
else:
data_array_shape = (num_images,) + image_size
#instantiating our np array with zeros, speeds up writing data to this array in the for loop.
data_array = np.zeros(shape=data_array_shape)
for i, path in enumerate(label_paths):
im = open_jpeg_as_np(path, image_size, vectorize=False)
data_array[i] = im
return data_array
def proc_img(im_path, img_size=224):
'''
Takes an image path, reads the image, converts to a tensor (dtype), resizes to img_size*img_size, before returning the 'image' sensor.
Parameters
----------
im_path : str
path to the image.
img_size : int, optional
width and height of the output tensor. The default is 224.
Returns
-------
im : tensor
reformated image as tensor in standard size.
'''
#loading image to variable
im = tf.io.read_file(im_path)
#modify im variable to tensor with 3 channels (RGB)
im = tf.image.decode_jpeg(im, channels=3)
#feature scaling - we're using normalisation (0 -> 1) but we could use standardisation (mean = 0, var = 1)
im = tf.image.convert_image_dtype(im, tf.float32)
#resize the image - all images will be the same size and hence have the same number of features (pixels)
im = tf.image.resize(im, size=[img_size, img_size])
return im
def get_methods(object, spacing=20):
'''
Prints the methods available for a definted instantiated object
Parameters
----------
object : any
object for inspection.
spacing : int, optional
print spacing. The default is 20.
Returns
-------
print to the console.
'''
methodList = []
for method_name in dir(object):
try:
if callable(getattr(object, method_name)):
methodList.append(str(method_name))
except:
methodList.append(str(method_name))
processFunc = (lambda s: ' '.join(s.split())) or (lambda s: s)
for method in methodList:
try:
print(str(method.ljust(spacing)) + ' ' +
processFunc(str(getattr(object, method).__doc__)[0:90]))
except:
print(method.ljust(spacing) + ' ' + ' getattr() failed')
def print_tf_setup():
'''
Prints information on the tf setup to the console.
Returns
-------
None.
'''
print(' ---------------------------------------\n TF SETUP\n')
physical_devices = tf.config.list_physical_devices('GPU')
print('TF Version:', tf.__version__, '\nTF Hub Version:', hub.__version__, '\n')
print(f'{len(physical_devices)} GPU is available' if physical_devices else 'GPU is not available')
print(' ---------------------------------------\n')
return None | [
"os.listdir",
"PIL.Image.open",
"tensorflow.image.convert_image_dtype",
"numpy.reshape",
"tensorflow.image.resize",
"random.choice",
"os.rename",
"tensorflow.io.read_file",
"numpy.asarray",
"os.path.splitext",
"os.remove",
"numpy.zeros",
"tensorflow.config.list_physical_devices",
"tensorfl... | [((1012, 1034), 'os.listdir', 'os.listdir', (['input_full'], {}), '(input_full)\n', (1022, 1034), False, 'import os\n'), ((1956, 1978), 'os.listdir', 'os.listdir', (['input_full'], {}), '(input_full)\n', (1966, 1978), False, 'import os\n'), ((2788, 2810), 'os.listdir', 'os.listdir', (['input_full'], {}), '(input_full)\n', (2798, 2810), False, 'import os\n'), ((3152, 3174), 'os.listdir', 'os.listdir', (['input_full'], {}), '(input_full)\n', (3162, 3174), False, 'import os\n'), ((3915, 3931), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (3925, 3931), False, 'from PIL import Image\n'), ((4019, 4033), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (4029, 4033), True, 'import numpy as np\n'), ((4724, 4770), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_images, image_vector_len)'}), '(shape=(num_images, image_vector_len))\n', (4732, 4770), True, 'import numpy as np\n'), ((5850, 5882), 'numpy.zeros', 'np.zeros', ([], {'shape': 'data_array_shape'}), '(shape=data_array_shape)\n', (5858, 5882), True, 'import numpy as np\n'), ((6555, 6579), 'tensorflow.io.read_file', 'tf.io.read_file', (['im_path'], {}), '(im_path)\n', (6570, 6579), True, 'import tensorflow as tf\n'), ((6650, 6686), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['im'], {'channels': '(3)'}), '(im, channels=3)\n', (6670, 6686), True, 'import tensorflow as tf\n'), ((6813, 6857), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['im', 'tf.float32'], {}), '(im, tf.float32)\n', (6841, 6857), True, 'import tensorflow as tf\n'), ((6981, 7027), 'tensorflow.image.resize', 'tf.image.resize', (['im'], {'size': '[img_size, img_size]'}), '(im, size=[img_size, img_size])\n', (6996, 7027), True, 'import tensorflow as tf\n'), ((8159, 8197), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (8190, 8197), True, 'import tensorflow as tf\n'), ((2037, 2070), 'PIL.Image.open', 'Image.open', (['f"""{input_full}/{img}"""'], {}), "(f'{input_full}/{img}')\n", (2047, 2070), False, 'from PIL import Image\n'), ((2084, 2098), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (2094, 2098), True, 'import numpy as np\n'), ((3407, 3426), 'os.rename', 'os.rename', (['src', 'dst'], {}), '(src, dst)\n', (3416, 3426), False, 'import os\n'), ((4882, 4914), 'numpy.reshape', 'np.reshape', (['im', 'image_vector_len'], {}), '(im, image_vector_len)\n', (4892, 4914), True, 'import numpy as np\n'), ((560, 582), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (573, 582), False, 'import random\n'), ((1173, 1194), 'os.path.splitext', 'os.path.splitext', (['img'], {}), '(img)\n', (1189, 1194), False, 'import os\n'), ((1216, 1237), 'os.path.splitext', 'os.path.splitext', (['img'], {}), '(img)\n', (1232, 1237), False, 'import os\n'), ((1288, 1321), 'PIL.Image.open', 'Image.open', (['f"""{input_full}/{img}"""'], {}), "(f'{input_full}/{img}')\n", (1298, 1321), False, 'from PIL import Image\n'), ((1451, 1483), 'os.remove', 'os.remove', (['f"""{input_full}/{img}"""'], {}), "(f'{input_full}/{img}')\n", (1460, 1483), False, 'import os\n'), ((2212, 2244), 'os.remove', 'os.remove', (['f"""{input_full}/{img}"""'], {}), "(f'{input_full}/{img}')\n", (2221, 2244), False, 'import os\n'), ((3103, 3122), 'os.rename', 'os.rename', (['src', 'dst'], {}), '(src, dst)\n', (3112, 3122), False, 'import os\n')] |
# import sys
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling1D, AveragePooling1D
from sklearn.ensemble.forest import RandomForestClassifier
from pandas.core.frame import DataFrame
from seaborn.matrix import heatmap
# sys.path.insert(0, "/home/cirl/Amir/Human-Activity-EEG-Accelerometer")
import numpy as np
import os
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout, Flatten, Reshape
import time
import tensorflow as tf
import random as rn
from keras import backend as K, optimizers
from DeepEEGACC.input_preparation import build_inputs
from keras.callbacks import EarlyStopping, CSVLogger
from DeepEEG.evaluation import compute_accuracy, evalRes
from keras.utils.vis_utils import plot_model
from keras.layers.recurrent import LSTM
from keras.layers.convolutional import Conv2D, Conv1D, SeparableConv1D
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
# https://pdfs.semanticscholar.org/df0b/05d8985846e694cda62d41a04e7c85090fa6.pdf
rn.seed(12345)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = ""
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(3)
rn.seed(12345)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
session_conf.gpu_options.allow_growth = True
tf.set_random_seed(1234)
classes = 2
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
tf.set_random_seed(1234)
# 16
def build_model(X_train, row, cell):
model = Sequential()
model.add(Conv1D(32, 10, strides=1, data_format='channels_last',
input_shape=(330, 5)))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Conv1D(64, 15, strides=4, data_format='channels_last'))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(MaxPooling1D(pool_size=16, strides=2, padding='valid'))
model.add(Dense(512))
model.add(Activation("tanh"))
model.add(LSTM(128, activation='tanh', recurrent_activation='hard_sigmoid', \
use_bias=True, kernel_initializer='glorot_uniform', \
recurrent_initializer='orthogonal', \
unit_forget_bias=True, kernel_regularizer=None, \
recurrent_regularizer=None, \
bias_regularizer=None, activity_regularizer=None, \
kernel_constraint=None, recurrent_constraint=None, \
bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, \
implementation=1, return_sequences=True, return_state=False, \
go_backwards=False, stateful=False, unroll=False))
model.add(Dropout(0.4))
model.add(LSTM(128, activation='tanh', recurrent_activation='hard_sigmoid', \
use_bias=True, kernel_initializer='glorot_uniform', \
recurrent_initializer='orthogonal', \
unit_forget_bias=True, kernel_regularizer=None, \
recurrent_regularizer=None, \
bias_regularizer=None, activity_regularizer=None, \
kernel_constraint=None, recurrent_constraint=None, \
bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, \
implementation=1, return_sequences=False, return_state=False, \
go_backwards=False, stateful=False, unroll=False))
model.add(Dropout(0.4))
model.add(Dense(3, activation="softmax"))
opt = optimizers.adam(lr=0.001)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=['accuracy'])
plot_model(model, to_file='model.png', show_shapes=True)
model.summary()
return model
if __name__ == '__main__':
X_train, y_train, X_test, y_test, train_labels, test_labels = build_inputs(False, 330)
epochs = 50 # 21
# rforest_checker = RandomForestClassifier(random_state = 0)
# rforest_checker.fit(X_train, y_train)
# importances_df = DataFrame(rforest_checker.feature_importances_, columns=['Feature_Importance'],
# index=["AF7", "AF8", "X_Axis", "Y_Axis", "Z_Axis"])
# importances_df.sort_values(by=['Feature_Importance'], ascending=False, inplace=True)
# # colormap = plt.cm.viridis
#
# plt.figure(figsize=(12,12))
# plt.title('Correlation between Features', y=1.05, size = 15)
# tmp = np.corrcoef(X_train)
# heatmap(tmp,
# linewidths=0.1,
# vmax=1.0,
# square=True,
# # cmap=colormap,
# linecolor='white',
# annot=True)
# print(importances_df)
model = build_model(X_train, 0, 0)
name = "{}-{}".format(0, 0)
early_stop = EarlyStopping(monitor='val_acc', min_delta=0.1, patience=2, mode='auto')
csv_logger = CSVLogger('res/training.csv', append=True, separator=',')
history_callback = model.fit(X_train, y_train, epochs=epochs, batch_size=500,
validation_split=0.2, verbose=1, callbacks=[csv_logger, early_stop])
model.save_weights("model.h5")
pred = model.predict(X_test)
compute_accuracy(name, pred, test_labels, history_callback)
evalRes(pred, test_labels, y_test, name)
| [
"keras.layers.pooling.MaxPooling1D",
"keras.utils.vis_utils.plot_model",
"tensorflow.set_random_seed",
"DeepEEG.evaluation.compute_accuracy",
"keras.layers.convolutional.Conv1D",
"numpy.random.seed",
"keras.callbacks.EarlyStopping",
"tensorflow.ConfigProto",
"tensorflow.get_default_graph",
"keras.... | [((970, 988), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (984, 988), True, 'import numpy as np\n'), ((1174, 1188), 'random.seed', 'rn.seed', (['(12345)'], {}), '(12345)\n', (1181, 1188), True, 'import random as rn\n'), ((1329, 1346), 'numpy.random.seed', 'np.random.seed', (['(3)'], {}), '(3)\n', (1343, 1346), True, 'import numpy as np\n'), ((1347, 1361), 'random.seed', 'rn.seed', (['(12345)'], {}), '(12345)\n', (1354, 1361), True, 'import random as rn\n'), ((1377, 1455), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'intra_op_parallelism_threads': '(1)', 'inter_op_parallelism_threads': '(1)'}), '(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n', (1391, 1455), True, 'import tensorflow as tf\n'), ((1501, 1525), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1234)'], {}), '(1234)\n', (1519, 1525), True, 'import tensorflow as tf\n'), ((1607, 1626), 'keras.backend.set_session', 'K.set_session', (['sess'], {}), '(sess)\n', (1620, 1626), True, 'from keras import backend as K, optimizers\n'), ((1643, 1721), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'intra_op_parallelism_threads': '(1)', 'inter_op_parallelism_threads': '(1)'}), '(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n', (1657, 1721), True, 'import tensorflow as tf\n'), ((1722, 1746), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1234)'], {}), '(1234)\n', (1740, 1746), True, 'import tensorflow as tf\n'), ((1803, 1815), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1813, 1815), False, 'from keras.models import Sequential\n'), ((3800, 3825), 'keras.optimizers.adam', 'optimizers.adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (3815, 3825), False, 'from keras import backend as K, optimizers\n'), ((3918, 3974), 'keras.utils.vis_utils.plot_model', 'plot_model', (['model'], {'to_file': '"""model.png"""', 'show_shapes': '(True)'}), "(model, to_file='model.png', show_shapes=True)\n", (3928, 3974), False, 'from keras.utils.vis_utils import plot_model\n'), ((4107, 4131), 'DeepEEGACC.input_preparation.build_inputs', 'build_inputs', (['(False)', '(330)'], {}), '(False, 330)\n', (4119, 4131), False, 'from DeepEEGACC.input_preparation import build_inputs\n'), ((5055, 5127), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_acc"""', 'min_delta': '(0.1)', 'patience': '(2)', 'mode': '"""auto"""'}), "(monitor='val_acc', min_delta=0.1, patience=2, mode='auto')\n", (5068, 5127), False, 'from keras.callbacks import EarlyStopping, CSVLogger\n'), ((5145, 5202), 'keras.callbacks.CSVLogger', 'CSVLogger', (['"""res/training.csv"""'], {'append': '(True)', 'separator': '""","""'}), "('res/training.csv', append=True, separator=',')\n", (5154, 5202), False, 'from keras.callbacks import EarlyStopping, CSVLogger\n'), ((5434, 5493), 'DeepEEG.evaluation.compute_accuracy', 'compute_accuracy', (['name', 'pred', 'test_labels', 'history_callback'], {}), '(name, pred, test_labels, history_callback)\n', (5450, 5493), False, 'from DeepEEG.evaluation import compute_accuracy, evalRes\n'), ((5498, 5538), 'DeepEEG.evaluation.evalRes', 'evalRes', (['pred', 'test_labels', 'y_test', 'name'], {}), '(pred, test_labels, y_test, name)\n', (5505, 5538), False, 'from DeepEEG.evaluation import compute_accuracy, evalRes\n'), ((1562, 1584), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1582, 1584), True, 'import tensorflow as tf\n'), ((1830, 1906), 'keras.layers.convolutional.Conv1D', 'Conv1D', (['(32)', '(10)'], {'strides': '(1)', 'data_format': '"""channels_last"""', 'input_shape': '(330, 5)'}), "(32, 10, strides=1, data_format='channels_last', input_shape=(330, 5))\n", (1836, 1906), False, 'from keras.layers.convolutional import Conv2D, Conv1D, SeparableConv1D\n'), ((1935, 1953), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1945, 1953), False, 'from keras.layers.core import Dense, Activation, Dropout, Flatten, Reshape\n'), ((1969, 1989), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1987, 1989), False, 'from keras.layers.normalization import BatchNormalization\n'), ((2005, 2059), 'keras.layers.convolutional.Conv1D', 'Conv1D', (['(64)', '(15)'], {'strides': '(4)', 'data_format': '"""channels_last"""'}), "(64, 15, strides=4, data_format='channels_last')\n", (2011, 2059), False, 'from keras.layers.convolutional import Conv2D, Conv1D, SeparableConv1D\n'), ((2075, 2093), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2085, 2093), False, 'from keras.layers.core import Dense, Activation, Dropout, Flatten, Reshape\n'), ((2109, 2129), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2127, 2129), False, 'from keras.layers.normalization import BatchNormalization\n'), ((2145, 2199), 'keras.layers.pooling.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(16)', 'strides': '(2)', 'padding': '"""valid"""'}), "(pool_size=16, strides=2, padding='valid')\n", (2157, 2199), False, 'from keras.layers.pooling import MaxPooling1D, AveragePooling1D\n'), ((2215, 2225), 'keras.layers.core.Dense', 'Dense', (['(512)'], {}), '(512)\n', (2220, 2225), False, 'from keras.layers.core import Dense, Activation, Dropout, Flatten, Reshape\n'), ((2241, 2259), 'keras.layers.core.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (2251, 2259), False, 'from keras.layers.core import Dense, Activation, Dropout, Flatten, Reshape\n'), ((2275, 2805), 'keras.layers.recurrent.LSTM', 'LSTM', (['(128)'], {'activation': '"""tanh"""', 'recurrent_activation': '"""hard_sigmoid"""', 'use_bias': '(True)', 'kernel_initializer': '"""glorot_uniform"""', 'recurrent_initializer': '"""orthogonal"""', 'unit_forget_bias': '(True)', 'kernel_regularizer': 'None', 'recurrent_regularizer': 'None', 'bias_regularizer': 'None', 'activity_regularizer': 'None', 'kernel_constraint': 'None', 'recurrent_constraint': 'None', 'bias_constraint': 'None', 'dropout': '(0.0)', 'recurrent_dropout': '(0.0)', 'implementation': '(1)', 'return_sequences': '(True)', 'return_state': '(False)', 'go_backwards': '(False)', 'stateful': '(False)', 'unroll': '(False)'}), "(128, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=\n True, kernel_initializer='glorot_uniform', recurrent_initializer=\n 'orthogonal', unit_forget_bias=True, kernel_regularizer=None,\n recurrent_regularizer=None, bias_regularizer=None, activity_regularizer\n =None, kernel_constraint=None, recurrent_constraint=None,\n bias_constraint=None, dropout=0.0, recurrent_dropout=0.0,\n implementation=1, return_sequences=True, return_state=False,\n go_backwards=False, stateful=False, unroll=False)\n", (2279, 2805), False, 'from keras.layers.recurrent import LSTM\n'), ((2988, 3000), 'keras.layers.core.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (2995, 3000), False, 'from keras.layers.core import Dense, Activation, Dropout, Flatten, Reshape\n'), ((3016, 3547), 'keras.layers.recurrent.LSTM', 'LSTM', (['(128)'], {'activation': '"""tanh"""', 'recurrent_activation': '"""hard_sigmoid"""', 'use_bias': '(True)', 'kernel_initializer': '"""glorot_uniform"""', 'recurrent_initializer': '"""orthogonal"""', 'unit_forget_bias': '(True)', 'kernel_regularizer': 'None', 'recurrent_regularizer': 'None', 'bias_regularizer': 'None', 'activity_regularizer': 'None', 'kernel_constraint': 'None', 'recurrent_constraint': 'None', 'bias_constraint': 'None', 'dropout': '(0.0)', 'recurrent_dropout': '(0.0)', 'implementation': '(1)', 'return_sequences': '(False)', 'return_state': '(False)', 'go_backwards': '(False)', 'stateful': '(False)', 'unroll': '(False)'}), "(128, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=\n True, kernel_initializer='glorot_uniform', recurrent_initializer=\n 'orthogonal', unit_forget_bias=True, kernel_regularizer=None,\n recurrent_regularizer=None, bias_regularizer=None, activity_regularizer\n =None, kernel_constraint=None, recurrent_constraint=None,\n bias_constraint=None, dropout=0.0, recurrent_dropout=0.0,\n implementation=1, return_sequences=False, return_state=False,\n go_backwards=False, stateful=False, unroll=False)\n", (3020, 3547), False, 'from keras.layers.recurrent import LSTM\n'), ((3730, 3742), 'keras.layers.core.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (3737, 3742), False, 'from keras.layers.core import Dense, Activation, Dropout, Flatten, Reshape\n'), ((3758, 3788), 'keras.layers.core.Dense', 'Dense', (['(3)'], {'activation': '"""softmax"""'}), "(3, activation='softmax')\n", (3763, 3788), False, 'from keras.layers.core import Dense, Activation, Dropout, Flatten, Reshape\n')] |
"""
Implementation of DDPG - Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
The algorithm is tested on the Pendulum-v0 OpenAI gym task
and developed with tflearn + Tensorflow
Author: <NAME>
"""
import tensorflow as tf
import numpy as np
import tflearn
import actor
import critic
from replay_buffer import ReplayBuffer
# ==========================
# Training Parameters
# ==========================
# Max training steps
MAX_EPISODES = 50000
# Max episode length
MAX_EP_STEPS = 1000
# Base learning rate for the Actor network
ACTOR_LEARNING_RATE = 0.0001
# Base learning rate for the Critic Network
CRITIC_LEARNING_RATE = 0.001
# Discount factor
GAMMA = 0.99
# Soft target update param
TAU = 0.001
# ===========================
# Utility Parameters
# ===========================
# Render gym env during training
RENDER_ENV = True
# Use Gym Monitor
GYM_MONITOR_EN = True
# Gym environment
ENV_NAME = 'Pendulum-v0'
# Directory for storing gym results
MONITOR_DIR = './results/gym_ddpg'
# Directory for storing tensorboard summary results
SUMMARY_DIR = './results/tf_ddpg'
RANDOM_SEED = 1234
# Size of replay buffer
BUFFER_SIZE = 10000
MINIBATCH_SIZE = 64
# ===========================
# Actor and Critic DNNs
# ===========================
# ===========================
# Tensorflow Summary Ops
# ===========================
def build_summaries():
episode_reward = tf.Variable(0.)
tf.summary.scalar("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.summary.scalar("Qmax Value", episode_ave_max_q)
summary_vars = [episode_reward, episode_ave_max_q]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
# ===========================
# Agent Training
# ===========================
def train(sess, env, actor, critic):
# Set up summary Ops
summary_ops, summary_vars = build_summaries()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph)
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
# Initialize replay memory
replay_buffer = ReplayBuffer(BUFFER_SIZE, RANDOM_SEED)
for i in range(MAX_EPISODES):
s = env.reset()
ep_reward = 0
ep_ave_max_q = 0
for j in range(MAX_EP_STEPS):
# Added exploration noise
a = actor.predict(np.reshape(s, (1, 3))) + (1. / (1. + i))
s2, r, terminal, info = env.step(a[0])
replay_buffer.add(np.reshape(s, (actor.s_dim,)), np.reshape(a, (actor.a_dim,)), r,
terminal, np.reshape(s2, (actor.s_dim,)))
# Keep adding experience to the memory until
# there are at least minibatch size samples
if replay_buffer.size() > MINIBATCH_SIZE:
s_batch, a_batch, r_batch, t_batch, s2_batch = \
replay_buffer.sample_batch(MINIBATCH_SIZE)
# Calculate targets
target_q = critic.predict_target(
s2_batch, actor.predict_target(s2_batch))
y_i = []
for k in range(MINIBATCH_SIZE):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + GAMMA * target_q[k])
# Update the critic given the targets
predicted_q_value, _ = critic.train(
s_batch, a_batch, np.reshape(y_i, (MINIBATCH_SIZE, 1)))
ep_ave_max_q += np.amax(predicted_q_value)
# Update the actor policy using the sampled gradient
a_outs = actor.predict(s_batch)
grads = critic.action_gradients(s_batch, a_outs)
actor.train(s_batch, grads[0])
# Update target networks
actor.update_target_network()
critic.update_target_network()
s = s2
ep_reward += r
if terminal:
summary_str = sess.run(summary_ops, feed_dict={
summary_vars[0]: ep_reward,
summary_vars[1]: ep_ave_max_q / float(j)
})
writer.add_summary(summary_str, i)
writer.flush()
print ('| Reward: %.2i' % int(ep_reward), " | Episode", i, \
'| Qmax: %.4f' % (ep_ave_max_q / float(j)))
break
def main(_):
with tf.Session() as sess:
env = gym.make(ENV_NAME)
np.random.seed(RANDOM_SEED)
tf.set_random_seed(RANDOM_SEED)
env.seed(RANDOM_SEED)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
action_bound = env.action_space.high
# Ensure action bound is symmetric
assert (env.action_space.high == -env.action_space.low)
actor = ActorNetwork(sess, state_dim, action_dim, action_bound,
ACTOR_LEARNING_RATE, TAU)
critic = CriticNetwork(sess, state_dim, action_dim,
CRITIC_LEARNING_RATE, TAU, actor.get_num_trainable_vars())
if GYM_MONITOR_EN:
if not RENDER_ENV:
env = wrappers.Monitor(
env, MONITOR_DIR, video_callable=False, force=True)
else:
env = wrappers.Monitor(env, MONITOR_DIR, force=True)
train(sess, env, actor, critic)
if GYM_MONITOR_EN:
env.monitor.close()
if __name__ == '__main__':
tf.app.run()
| [
"replay_buffer.ReplayBuffer",
"actor.predict",
"tensorflow.set_random_seed",
"tensorflow.app.run",
"critic.action_gradients",
"numpy.reshape",
"actor.predict_target",
"tensorflow.Session",
"actor.train",
"numpy.random.seed",
"tensorflow.summary.scalar",
"tensorflow.summary.merge_all",
"criti... | [((1472, 1488), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {}), '(0.0)\n', (1483, 1488), True, 'import tensorflow as tf\n'), ((1492, 1535), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Reward"""', 'episode_reward'], {}), "('Reward', episode_reward)\n", (1509, 1535), True, 'import tensorflow as tf\n'), ((1560, 1576), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {}), '(0.0)\n', (1571, 1576), True, 'import tensorflow as tf\n'), ((1580, 1630), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Qmax Value"""', 'episode_ave_max_q'], {}), "('Qmax Value', episode_ave_max_q)\n", (1597, 1630), True, 'import tensorflow as tf\n'), ((1705, 1727), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (1725, 1727), True, 'import tensorflow as tf\n'), ((2023, 2069), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['SUMMARY_DIR', 'sess.graph'], {}), '(SUMMARY_DIR, sess.graph)\n', (2044, 2069), True, 'import tensorflow as tf\n'), ((2115, 2144), 'actor.update_target_network', 'actor.update_target_network', ([], {}), '()\n', (2142, 2144), False, 'import actor\n'), ((2149, 2179), 'critic.update_target_network', 'critic.update_target_network', ([], {}), '()\n', (2177, 2179), False, 'import critic\n'), ((2232, 2270), 'replay_buffer.ReplayBuffer', 'ReplayBuffer', (['BUFFER_SIZE', 'RANDOM_SEED'], {}), '(BUFFER_SIZE, RANDOM_SEED)\n', (2244, 2270), False, 'from replay_buffer import ReplayBuffer\n'), ((5675, 5687), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (5685, 5687), True, 'import tensorflow as tf\n'), ((1975, 2008), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2006, 2008), True, 'import tensorflow as tf\n'), ((4592, 4604), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4602, 4604), True, 'import tensorflow as tf\n'), ((4656, 4683), 'numpy.random.seed', 'np.random.seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (4670, 4683), True, 'import numpy as np\n'), ((4692, 4723), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (4710, 4723), True, 'import tensorflow as tf\n'), ((5252, 5282), 'actor.get_num_trainable_vars', 'actor.get_num_trainable_vars', ([], {}), '()\n', (5280, 5282), False, 'import actor\n'), ((2612, 2641), 'numpy.reshape', 'np.reshape', (['s', '(actor.s_dim,)'], {}), '(s, (actor.s_dim,))\n', (2622, 2641), True, 'import numpy as np\n'), ((2643, 2672), 'numpy.reshape', 'np.reshape', (['a', '(actor.a_dim,)'], {}), '(a, (actor.a_dim,))\n', (2653, 2672), True, 'import numpy as np\n'), ((2717, 2747), 'numpy.reshape', 'np.reshape', (['s2', '(actor.s_dim,)'], {}), '(s2, (actor.s_dim,))\n', (2727, 2747), True, 'import numpy as np\n'), ((3662, 3688), 'numpy.amax', 'np.amax', (['predicted_q_value'], {}), '(predicted_q_value)\n', (3669, 3688), True, 'import numpy as np\n'), ((3784, 3806), 'actor.predict', 'actor.predict', (['s_batch'], {}), '(s_batch)\n', (3797, 3806), False, 'import actor\n'), ((3831, 3871), 'critic.action_gradients', 'critic.action_gradients', (['s_batch', 'a_outs'], {}), '(s_batch, a_outs)\n', (3854, 3871), False, 'import critic\n'), ((3888, 3918), 'actor.train', 'actor.train', (['s_batch', 'grads[0]'], {}), '(s_batch, grads[0])\n', (3899, 3918), False, 'import actor\n'), ((3977, 4006), 'actor.update_target_network', 'actor.update_target_network', ([], {}), '()\n', (4004, 4006), False, 'import actor\n'), ((4023, 4053), 'critic.update_target_network', 'critic.update_target_network', ([], {}), '()\n', (4051, 4053), False, 'import critic\n'), ((2488, 2509), 'numpy.reshape', 'np.reshape', (['s', '(1, 3)'], {}), '(s, (1, 3))\n', (2498, 2509), True, 'import numpy as np\n'), ((3162, 3192), 'actor.predict_target', 'actor.predict_target', (['s2_batch'], {}), '(s2_batch)\n', (3182, 3192), False, 'import actor\n'), ((3591, 3627), 'numpy.reshape', 'np.reshape', (['y_i', '(MINIBATCH_SIZE, 1)'], {}), '(y_i, (MINIBATCH_SIZE, 1))\n', (3601, 3627), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import pyrolite.geochem
from ..util.log import Handle
logger = Handle(__name__)
def phasename(phaseID):
"""
Take a phase ID and return the name of the phase.
Parameters
------------
phaseID : :class:`str`
ID for the particular phase (e.g. 'olivine_0')
Returns
--------
:class:`str`
Name of the phase.
"""
if phaseID.find("_") > 0:
n = phaseID[: phaseID.find("_")]
else:
n = phaseID
return n
def tuple_reindex(df, columns=["pressure", "temperature"]):
"""
Create an index based on tuples from multiple columns.
Parameters
-----------
df: :class:`pandas.DataFrame`
Table DataFrame to reindex.
columns : :class:`list`
List of columns to incorporate into the tuple index.
Returns
-------
:class:`pandas.DataFrame`
Reindexed DataFrame.
"""
df.index = df.loc[:, columns].astype(int).itertuples(index=False)
return df
def integrate_solid_composition(df, frac=True):
"""
Integrate solid compositions to return a 'cumulate' like
composition. Note that in the case of non-fractional crystallisation
this will correspond to the solid composition.
Parameters
-----------
df : :class:`pandas.DataFrame`
DataFrame to integrate.
frac : :class:`bool`
Whether the experiment is a fractional crystallisation experiment.
Returns
-----------
df : :class:`pandas.DataFrame`
DataFrame containing an integrated solid composition.
"""
assert not "experiment" in df.columns, "Designed for single tables."
slds = df.loc[df.phase == "solid", :]
idx = (
df.loc[:, ["pressure", "temperature", "step"]]
.dropna()
.drop_duplicates()
.sort_values("step")
)
if frac:
cumulate = pd.DataFrame(columns=slds.columns, index=idx.index)
# solids typically don't exist for part of the history, so we need reindex here
# rather than .loc[<index list>, :]
cumulate["mass"] = np.nancumsum(slds["mass"].reindex(index=idx.index).values)
chem = slds.reindex(
index=idx.index,
columns=[
i for i in slds.pyrochem.list_compositional if i not in ["S", "H", "V"]
],
)
chem = chem.apply(pd.to_numeric, errors="coerce")
increments = (
slds["mass"].reindex(index=idx.index).values[:, np.newaxis] * chem.values
)
cumulate[chem.columns] = np.nancumsum(increments, axis=1)
cumulate[["pressure", "temperature", "step"]] = slds.loc[
:, ["pressure", "temperature", "step"]
]
else:
cumulate = slds.reindex(index=idx.index)
cumulate.pyrochem.add_MgNo()
return cumulate
def integrate_solid_proportions(df, frac=True):
"""
Integrate solid proportions to return a 'cumulate' split by integrated phase
masses. Note that in the case of non-fractional crystallisation
this will correspond to the overall solid phase abundances.
Parameters
-----------
df : :class:`pandas.DataFrame`
DataFrame to integrate.
frac : :class:`bool`
Whether the experiment is a fractional crystallisation experiment.
Returns
-----------
df : :class:`pandas.DataFrame`
DataFrame containing integrated solid phase proportions.
"""
assert not "experiment" in df.columns, "Designed for single tables."
# another dataframe for integrated minerals
phaseIDs = sorted(
[
pID
for pID in df.phaseID.unique()
if (not pd.isnull(pID)) and ("liquid" not in pID)
]
)
idx = (
df.loc[:, ["pressure", "temperature", "step"]]
.dropna()
.drop_duplicates()
.sort_values("step")
)
# empty dataframe
mindf = pd.DataFrame(
columns=["pressure", "temperature", "step"] + phaseIDs, index=idx.index
)
for p in phaseIDs: # integrate cumulate mass per phase
# mindf should have all of the mineral index values
mindf.loc[df.loc[df.phaseID == p, "mass"].index.values, p] = df.loc[
df.phaseID == p, "mass"
].values
mindf = mindf.loc[idx.index, :] # sort index
if frac:
mindf = mindf.apply(np.nancumsum, axis=0) # accumulate minerals
# fractioal mass of total cumulate
mindf = mindf.div(mindf.sum(axis=1).replace(0, np.nan), axis=0) * 100.0
PTS = idx
mindf.loc[idx.index, ["pressure", "temperature", "step"]] = PTS
mindf = mindf.fillna(0)
return mindf
| [
"pandas.DataFrame",
"numpy.nancumsum",
"pandas.isnull"
] | [((3906, 3996), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "(['pressure', 'temperature', 'step'] + phaseIDs)", 'index': 'idx.index'}), "(columns=['pressure', 'temperature', 'step'] + phaseIDs, index=\n idx.index)\n", (3918, 3996), True, 'import pandas as pd\n'), ((1881, 1932), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'slds.columns', 'index': 'idx.index'}), '(columns=slds.columns, index=idx.index)\n', (1893, 1932), True, 'import pandas as pd\n'), ((2555, 2587), 'numpy.nancumsum', 'np.nancumsum', (['increments'], {'axis': '(1)'}), '(increments, axis=1)\n', (2567, 2587), True, 'import numpy as np\n'), ((3667, 3681), 'pandas.isnull', 'pd.isnull', (['pID'], {}), '(pID)\n', (3676, 3681), True, 'import pandas as pd\n')] |
from __future__ import print_function
import torch
from scipy.ndimage.filters import gaussian_filter
import numpy as np
from PIL import Image
import math
import cv2
import matplotlib.pyplot as plt
import torch.nn.functional as functional
import os
from torch.autograd import Variable
def load_heatmap(hm_path):
hm_array = np.load(hm_path)
torch_heatmap = torch.transpose(torch.transpose(torch.from_numpy(hm_array), 1, 2), 0, 1)
returned_mat = torch_heatmap[0:18, :, :]
return returned_mat
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the converted numpy array
def tensor2im(image_tensor, imtype=np.uint8):
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype)
def hmp2pose_by_numpy(hmp_numpy):
all_peaks = []
peak_counter = 0
for part in range(18):
map_ori = hmp_numpy[:, :, part]
map = gaussian_filter(map_ori, sigma=5)
map_left = np.zeros(map.shape)
map_left[1:, :] = map[:-1, :]
map_right = np.zeros(map.shape)
map_right[:-1, :] = map[1:, :]
map_up = np.zeros(map.shape)
map_up[:, 1:] = map[:, :-1]
map_down = np.zeros(map.shape)
map_down[:, :-1] = map[:, 1:]
peaks_binary = np.logical_and.reduce(
(map >= map_left, map >= map_right, map >= map_up, map >= map_down, map > 0.01))
peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
if len(peaks) > 0:
max = 0
for index, peak in enumerate(peaks):
score = map_ori[peak[1], peak[0]]
current_max_score = map_ori[peaks[max][1], peaks[max][0]]
if score > current_max_score:
max = index
peaks_with_score = [(peaks[max][0], peaks[max][1], map_ori[peaks[max][1], peaks[max][0]], peak_counter)]
all_peaks.append(peaks_with_score)
peak_counter += len(peaks_with_score)
else:
all_peaks.append([])
return all_peaks
def hmp2pose(hmp_tensor):
hmp_numpy = hmp_tensor[0].cpu().float().numpy()
hmp_numpy = np.transpose(hmp_numpy, (1, 2, 0))
return hmp2pose_by_numpy(hmp_numpy)
def hmp2im(heatmap_tensor):
all_peaks = hmp2pose(heatmap_tensor)
return pose2im_all(all_peaks)
def pose2im_all(all_peaks):
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10],
[10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17],
[1, 16], [16, 18]]
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
image = pose2im(all_peaks, limbSeq, colors)
return image
def pose2im_limb(all_peaks):
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10],
[10, 11], [2, 12], [12, 13], [13, 14]]
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
image = pose2im(all_peaks, limbSeq, colors, _circle=False)
return image
def pose2im_limb_filter(all_peaks, error, threshold):
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10],
[10, 11], [2, 12], [12, 13], [13, 14]]
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
for error_index, error_value in enumerate(error):
if error_value > threshold:
colors[error_index] = [0, 0, 0]
image = pose2im(all_peaks, limbSeq, colors, _circle=False)
return image
def pose2im(all_peaks, limbSeq, colors, _circle=True, _limb=True, imtype=np.uint8):
canvas = np.zeros(shape=(256, 256, 3))
canvas.fill(255)
if _circle:
for i in range(18):
for j in range(len(all_peaks[i])):
cv2.circle(canvas, all_peaks[i][j][0:2], 4, colors[i], thickness=-1)
if _limb:
stickwidth = 4
for i in range(len(limbSeq)):
limb = limbSeq[i]
cur_canvas = canvas.copy()
point1_index = limb[0] - 1
point2_index = limb[1] - 1
if len(all_peaks[point1_index]) > 0 and len(all_peaks[point2_index]) > 0:
point1 = all_peaks[point1_index][0][0:2]
point2 = all_peaks[point2_index][0][0:2]
X = [point1[1], point2[1]]
Y = [point1[0], point2[0]]
mX = np.mean(X)
mY = np.mean(Y)
# cv2.line()
length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
return canvas.astype(imtype)
def pose2limb(pose):
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10],
[10, 11], [2, 12], [12, 13], [13, 14]]
limbs = []
for seq_index, limb in enumerate(limbSeq):
point1_index = limb[0] - 1
point2_index = limb[1] - 1
if len(pose[point1_index]) > 0 and len(pose[point2_index]) > 0:
offset_x = pose[point2_index][0][0] - pose[point1_index][0][0]
offset_y = pose[point2_index][0][1] - pose[point1_index][0][1]
limbs.append([offset_x, offset_y])
else:
limbs.append([])
return limbs
def distance_limb(limbs1, limbs2):
assert len(limbs1) == len(limbs2)
error_all = 0
error_list = []
count = 0
for lamb_index in range(len(limbs1)):
limb1 = limbs1[lamb_index]
limb2 = limbs2[lamb_index]
if len(limb1)>1 and len(limb2)>1:
distance = (limb1[0] - limb2[0])**2 + (limb1[1] - limb2[1]) ** 2
error_all += distance
count += 1
else:
distance = None
error_list.append(float(distance))
for i, error in enumerate(error_list):
if error is not None:
error = math.sqrt(error)
else:
error = None
error_list[i] = error
error_list.append(math.sqrt(error_all/count))
return np.array(error_list)
def distance_point(all_peaks, index1, index2):
try:
x1 = all_peaks[index1][0][1]
y1 = all_peaks[index1][0][0]
x2 = all_peaks[index2][0][1]
y2 = all_peaks[index2][0][0]
except IndexError:
return 0
return math.sqrt((x1 - x2)**2 + (y1 - y2)**2)
def crop_head(original_tensor, heatmap_tensor, length):
ear_offset = 10
tensor_numpy = heatmap_tensor[0].cpu().float().numpy()
tensor_numpy = np.transpose(tensor_numpy, (1, 2, 0))
all_peaks = hmp2pose_by_numpy(tensor_numpy)
center = [0, 0]
count = 0
for i in [0, 14, 15, 16, 17]:
if len(all_peaks[i]) > 0:
center[0] += all_peaks[i][0][1]
center[1] += all_peaks[i][0][0]
count += 1
center[0] /= count
center[1] /= count
center[0] += (length/6)
if length == None:
a = distance_point(all_peaks, 0, 16) + ear_offset
b = distance_point(all_peaks, 0, 17) + ear_offset
c = distance_point(all_peaks, 1, 0)
length = max(int(a), int(b), int(c))
crop_regeion = crop_patch(original_tensor, center, length)
return crop_regeion, center
def crop_patch(I, patch_center, patch_radius):
[px, py] = [patch_center[0], patch_center[1]]
r = patch_radius
up_boundary = int(px - r) if px - r > 0 else 0
down_boundary = int(px + r + 1) if px + r + 1 < I.size(2) else I.size(2)
left_boundary = int(py - r) if py - r > 0 else 0
right_boundary = int(py + r + 1) if py + r + 1 < I.size(3) else I.size(3)
return I[:, :, up_boundary-1:down_boundary, left_boundary-1:right_boundary]
def paste_patch(I, patch, patch_center, patch_radius):
[px, py] = [patch_center[0], patch_center[1]]
r = patch_radius
up_boundary = int(px - r) if px - r > 0 else 0
down_boundary = int(px + r + 1) if px + r + 1 < I.size(2) else I.size(2)
left_boundary = int(py - r) if py - r > 0 else 0
right_boundary = int(py + r + 1) if py + r + 1 < I.size(3) else I.size(3)
I[:, :, up_boundary+1:down_boundary+2, left_boundary-1:right_boundary] = patch[:, :, :, :]
return I
def padRightDownCorner(img, stride, padValue):
h = img.shape[0]
w = img.shape[1]
pad = 4 * [None]
pad[0] = 0 # up
pad[1] = 0 # left
pad[2] = 0 if (h%stride==0) else stride - (h % stride) # down
pad[3] = 0 if (w%stride==0) else stride - (w % stride) # right
img_padded = img
pad_up = np.tile(img_padded[0:1,:,:]*0 + padValue, (pad[0], 1, 1))
img_padded = np.concatenate((pad_up, img_padded), axis=0)
pad_left = np.tile(img_padded[:,0:1,:]*0 + padValue, (1, pad[1], 1))
img_padded = np.concatenate((pad_left, img_padded), axis=1)
pad_down = np.tile(img_padded[-2:-1,:,:]*0 + padValue, (pad[2], 1, 1))
img_padded = np.concatenate((img_padded, pad_down), axis=0)
pad_right = np.tile(img_padded[:,-2:-1,:]*0 + padValue, (1, pad[3], 1))
img_padded = np.concatenate((img_padded, pad_right), axis=1)
return img_padded, pad
def get_height(poses):
height = 0
top = 1000
bottom = 0
for pose in poses:
_top = 1000
_bottom = 0
for joint_index in [0, 14, 15, 16, 17]:
if len(pose[joint_index]) > 0:
if pose[joint_index][0][1] < _top:
_top = pose[joint_index][0][1]
for joint_index in [10, 13]:
if len(pose[joint_index]) > 0:
if pose[joint_index][0][1] > _bottom:
_bottom = pose[joint_index][0][1]
if _bottom > bottom:
bottom = _bottom
_height = _bottom - _top + 40
if _height > height:
height = _height
top = _top
return min(height, 255), max(0, top-20), min(bottom+20, 255)
def get_center_from_all(poses):
center_x = 0
center_y = 0
count = 0
for pose in poses:
pose_center_x, pose_center_y = get_center(pose)
center_x += pose_center_x
center_y += pose_center_y
count += 1
center_x /= count
center_y /= count
return center_x, center_y
def get_bounding_box(pose):
_top = 1000
_bottom = 0
for i in range(18):
if len(pose[i]) > 0:
value = pose[i][0][1]
if value < _top:
_top = value
if value > _bottom:
_bottom = value
return max(_top-20, 0), min(_bottom+20, 255)
def get_center(pose):
center_x = 0
center_y = 0
count = 0
for i in range(18):
if len(pose[i]) > 0:
center_x += pose[i][0][1]
count += 1
if not count == 0:
center_x /= count
else:
center_x = 0
count = 0
for i in [8, 11 , 2, 5]:
if len(pose[i]) > 0:
center_y += pose[i][0][0]
count += 1
if not count == 0:
center_y /= count
else:
center_y = 0
return center_x, center_y
def find_cloest_joint(joint_x, joint_y, joint_index, target_poses):
min_index = 0
min_distance = 10000
for pose_index, pose in enumerate(target_poses):
if len(pose[joint_index]) > 0:
target_x = pose[joint_index][0][0]
target_y = pose[joint_index][0][1]
distance = (joint_x-target_x)**2 + (joint_y-target_y)**2
if distance < min_distance:
min_index = pose_index
min_distance = distance
return min_index
def find_cloest_limb(libm_index, errors):
min_value = 1000
min_index = 0
for index, error in enumerate(errors):
if error[libm_index] < min_value:
min_index = index
min_value = error[libm_index]
return min_index
def offset_heatmap_channel(source_x, source_y, target_x, target_y, target_channel):
offset_x = target_x - source_x
offset_y = target_y - source_y
target_channel_padding = np.pad(target_channel, ((abs(offset_y), abs(offset_y)), (abs(offset_x), abs(offset_x))), 'constant', constant_values=target_channel[0, 0])
target_channel_crop = target_channel_padding[
abs(offset_y) + offset_y: abs(offset_y) + offset_y + target_channel.shape[1],
abs(offset_x) + offset_x: abs(offset_x) + offset_x + target_channel.shape[0]
]
return target_channel_crop
def replace_heatmaps(source_heatmaps, target_heatmaps):
source_heatmaps_clone = np.copy(source_heatmaps)
refer_map = np.zeros(shape=(len(source_heatmaps), 18))
print('Generating the pose from dataset...')
source_poses = [hmp2pose_by_numpy(heatmap) for heatmap in source_heatmaps]
target_poses = [hmp2pose_by_numpy(heatmap) for heatmap in target_heatmaps]
print('Converted the heatmaps to poses!')
for pose_index, pose in enumerate(source_poses):
for joint_index, joint in enumerate(pose):
if len(joint) > 0:
source_x = joint[0][0]
source_y = joint[0][1]
cloest_index = find_cloest_joint(source_x, source_y, joint_index, target_poses)
target_x = target_poses[cloest_index][joint_index][0][0]
target_y = target_poses[cloest_index][joint_index][0][1]
target_channel = target_heatmaps[cloest_index][:, :, joint_index]
source_heatmaps_clone[pose_index][:, :, joint_index] = offset_heatmap_channel(source_x, source_y, target_x, target_y, target_channel)
refer_map[pose_index, joint_index] = cloest_index
print('Replaced pose %d ...' % pose_index)
else:
refer_map[pose_index, joint_index] = -1
return source_heatmaps_clone, refer_map
def translate_image(image, time, offset_x, offset_y):
offset_x *= time
offset_y *= time
image_resize = cv2.resize(image, dsize=(int(image.shape[1] * time), int(image.shape[0] * time)))
background = np.zeros(shape=(500, 500, 3))
background.fill(255)
left = (500 - image_resize.shape[1])/2
top = (500 - image_resize.shape[0])/2
left = int(left + offset_x)
top = int(top + offset_y)
background[left:(left+image_resize.shape[1]), top:(top+image_resize.shape[0]), :] = image_resize
background = background[122:378, 122:378, :]
return background
def offset_image(image, heatmap, padding=20):
pose = hmp2pose_by_numpy(heatmap)
source_top, source_bottom = get_bounding_box(pose)
source_center_x, source_center_y = get_center(pose)
source_center_x = source_center_x - source_top
image_crop = image[source_top: source_bottom, :, :]
# image_crop = cv2.circle(image_crop, center=(int(source_center_y), int(source_center_x)), radius=2, color=(122,122,0))
# cv2.imwrite('test.png', image_crop)
if source_bottom > 256 - padding:
time = (256 - padding - source_top) / image_crop.shape[0]
image_crop = cv2.resize(image_crop, dsize=(int(image_crop.shape[1] * time), (256 - padding - source_top)))
source_center_x = source_center_x * time
source_center_y = source_center_y * time
# image_crop = cv2.circle(image_crop, center=(int(source_center_x), int(source_center_y)), radius=2, color=(122, 122, 0))
# cv2.imwrite('test_resize.png', image_crop)
if source_top < padding:
time = (source_top + image_crop.shape[0] - padding) / (image_crop.shape[0])
image_crop = cv2.resize(image_crop,
dsize=(int(image_crop.shape[1] * time), (source_top + image_crop.shape[0] - padding)))
source_center_x = int(source_center_x * time)
source_center_y = int(source_center_y * time)
# cv2.imwrite('test_resize1.png', image_crop)
# image_crop = cv2.circle(image_crop, center=(int(source_center_x), int(source_center_y)), radius=2, color=(122, 122, 0))
background = np.zeros(shape=(500, 500, 3))
target_left = int(500 / 2 - (source_center_y))
target_top = int(500 / 2 - 20 - (source_center_x))
background[target_top: target_top + image_crop.shape[0], target_left:target_left + image_crop.shape[1], :] = image_crop
background = background[122:378, 122:378, :]
return background
def channel2image(channel):
cmap = plt.get_cmap('jet')
rgba_img = cmap(channel)
_max = np.max(channel)
_min = np.min(channel)
for x in range(channel.shape[0]):
for y in range(channel.shape[1]):
rgba_img[x][y][3] = 0.2 + 0.799*(channel[x][y] - _min) / (_max - _min)
return rgba_img
def heatmap2array(heatmaps, size=256):
arrays = np.zeros(shape=(size, size))
arrays.fill(heatmaps.max())
for channel in [18]:
x_top = 0
y_top = 0
for x in range(heatmaps.shape[0]):
for y in range(heatmaps.shape[1]):
_x = int(x_top+x)
_y = int(y_top+y)
arrays[_x][_y] = heatmaps[x][y][channel]
return arrays
def array2image(arrays):
cmap = plt.get_cmap('jet')
rgba_img = cmap(1-arrays)
_max = np.max(arrays)
_min = np.min(arrays)
# for x in range(arrays.shape[0]):
# for y in range(arrays.shape[1]):
# rgba_img[x][y][3] = 0.1 + 0.899*(arrays[x][y] - _min) / (_max - _min)
return rgba_img
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def error_graph(path, start=0, end=1500, multi=1):
item_num = multi+2
files = os.listdir(path)
count = int(len(files)/item_num)
x = list(range(count))[start:end]
y = []
for i in range(count)[start:end]:
real_path = '%s/%d_real_B.png' % (path, i)
fake_path = '%s/%d_fake_B.png' % (path, i)
real = cv2.imread(real_path)
fake = cv2.imread(fake_path)
error = np.power(np.sum(np.square((real-fake))) / (256*256*3), 0.5)
print('%d: %s' % (i, error))
y.append(error)
plt.figure()
plt.plot(x, y, 'b')
plt.savefig('test_all_D.png')
def make_dataset(dir_list, phase):
images = []
for dataroot in dir_list:
_images = []
assert os.path.isdir(dataroot), '%s is not a valid directory' % dataroot
for root, _, fnames in sorted(os.walk(dataroot)):
for fname in fnames:
if phase in fname:
path = os.path.join(root, fname)
_images.append(path)
_images.sort(key=lambda x: int(x.split('/')[-1].split('.')[0].split('_')[-1]))
images +=_images
return images
def make_test_dataset(dir_list, phase):
images = []
for dataroot in dir_list:
_images = []
assert os.path.isdir(dataroot), '%s is not a valid directory' % dataroot
for root, _, fnames in sorted(os.walk(dataroot)):
for fname in fnames:
if phase in fname:
path = os.path.join(root, fname)
_images.append(path)
_images.sort(key=lambda x: int(x.split('/')[-1].split('.')[0].split('_')[0]))
images +=_images
return images
def transfer_feature_mean_and_stdv(F, new_mean, new_stdv):
F_mean = mean_channels(F)
F_stdv = stdv_channels(F)
F_normalized = (F - F_mean) / F_stdv
return new_stdv * F_normalized + new_mean
def mean_channels(F):
assert (F.dim() == 4)
spatial_sum = F.sum(3, keepdim=True).sum(2, keepdim=True)
return spatial_sum.expand_as(F) / (F.size(2) * F.size(3))
def stdv_channels(F):
assert (F.dim() == 4)
F_mean = mean_channels(F)
F_variance = (F - F_mean).pow(2).sum(3, keepdim=True).sum(2, keepdim=True) / (F.size(2) * F.size(3)) + 10**(-10)
return F_variance.expand_as(F).pow(0.5)
def convert_m1to1_to_vgg(self, A):
A_0to1 = 0.5 * (A + 1)
mean = Variable(self.Tensor([[[[0.485]], [[0.456]], [[0.406]]]]).expand_as(A_0to1))
stdv = Variable(self.Tensor([[[[0.229]], [[0.224]], [[0.225]]]]).expand_as(A_0to1))
return (A_0to1 - mean) / stdv
def norm_per_channels_m1to1(A, isVar=True):
if isVar == True:
A_relu = functional.relu(A)
else:
A_relu = functional.relu(Variable(A)).data
A_res = A_relu / A_relu.max(3, keepdim=True)[0].max(2, keepdim=True)[0].expand_as(A_relu)
return 2 * A_res - 1
# error_graph('/mnt/results/experiment10_pix2pix(withD_withoutD)/single_paired_D_loss_with_5_batchsize=16_lambda=100/test_latest/images')
| [
"scipy.ndimage.filters.gaussian_filter",
"math.sqrt",
"torch.from_numpy",
"numpy.array",
"numpy.logical_and.reduce",
"os.walk",
"os.path.exists",
"numpy.mean",
"os.listdir",
"matplotlib.pyplot.plot",
"numpy.max",
"cv2.addWeighted",
"os.path.isdir",
"numpy.concatenate",
"numpy.min",
"to... | [((328, 344), 'numpy.load', 'np.load', (['hm_path'], {}), '(hm_path)\n', (335, 344), True, 'import numpy as np\n'), ((2324, 2358), 'numpy.transpose', 'np.transpose', (['hmp_numpy', '(1, 2, 0)'], {}), '(hmp_numpy, (1, 2, 0))\n', (2336, 2358), True, 'import numpy as np\n'), ((4444, 4473), 'numpy.zeros', 'np.zeros', ([], {'shape': '(256, 256, 3)'}), '(shape=(256, 256, 3))\n', (4452, 4473), True, 'import numpy as np\n'), ((7075, 7095), 'numpy.array', 'np.array', (['error_list'], {}), '(error_list)\n', (7083, 7095), True, 'import numpy as np\n'), ((7353, 7395), 'math.sqrt', 'math.sqrt', (['((x1 - x2) ** 2 + (y1 - y2) ** 2)'], {}), '((x1 - x2) ** 2 + (y1 - y2) ** 2)\n', (7362, 7395), False, 'import math\n'), ((7549, 7586), 'numpy.transpose', 'np.transpose', (['tensor_numpy', '(1, 2, 0)'], {}), '(tensor_numpy, (1, 2, 0))\n', (7561, 7586), True, 'import numpy as np\n'), ((9523, 9584), 'numpy.tile', 'np.tile', (['(img_padded[0:1, :, :] * 0 + padValue)', '(pad[0], 1, 1)'], {}), '(img_padded[0:1, :, :] * 0 + padValue, (pad[0], 1, 1))\n', (9530, 9584), True, 'import numpy as np\n'), ((9598, 9642), 'numpy.concatenate', 'np.concatenate', (['(pad_up, img_padded)'], {'axis': '(0)'}), '((pad_up, img_padded), axis=0)\n', (9612, 9642), True, 'import numpy as np\n'), ((9658, 9719), 'numpy.tile', 'np.tile', (['(img_padded[:, 0:1, :] * 0 + padValue)', '(1, pad[1], 1)'], {}), '(img_padded[:, 0:1, :] * 0 + padValue, (1, pad[1], 1))\n', (9665, 9719), True, 'import numpy as np\n'), ((9733, 9779), 'numpy.concatenate', 'np.concatenate', (['(pad_left, img_padded)'], {'axis': '(1)'}), '((pad_left, img_padded), axis=1)\n', (9747, 9779), True, 'import numpy as np\n'), ((9795, 9858), 'numpy.tile', 'np.tile', (['(img_padded[-2:-1, :, :] * 0 + padValue)', '(pad[2], 1, 1)'], {}), '(img_padded[-2:-1, :, :] * 0 + padValue, (pad[2], 1, 1))\n', (9802, 9858), True, 'import numpy as np\n'), ((9872, 9918), 'numpy.concatenate', 'np.concatenate', (['(img_padded, pad_down)'], {'axis': '(0)'}), '((img_padded, pad_down), axis=0)\n', (9886, 9918), True, 'import numpy as np\n'), ((9935, 9998), 'numpy.tile', 'np.tile', (['(img_padded[:, -2:-1, :] * 0 + padValue)', '(1, pad[3], 1)'], {}), '(img_padded[:, -2:-1, :] * 0 + padValue, (1, pad[3], 1))\n', (9942, 9998), True, 'import numpy as np\n'), ((10012, 10059), 'numpy.concatenate', 'np.concatenate', (['(img_padded, pad_right)'], {'axis': '(1)'}), '((img_padded, pad_right), axis=1)\n', (10026, 10059), True, 'import numpy as np\n'), ((13521, 13545), 'numpy.copy', 'np.copy', (['source_heatmaps'], {}), '(source_heatmaps)\n', (13528, 13545), True, 'import numpy as np\n'), ((15005, 15034), 'numpy.zeros', 'np.zeros', ([], {'shape': '(500, 500, 3)'}), '(shape=(500, 500, 3))\n', (15013, 15034), True, 'import numpy as np\n'), ((16934, 16963), 'numpy.zeros', 'np.zeros', ([], {'shape': '(500, 500, 3)'}), '(shape=(500, 500, 3))\n', (16942, 16963), True, 'import numpy as np\n'), ((17306, 17325), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (17318, 17325), True, 'import matplotlib.pyplot as plt\n'), ((17366, 17381), 'numpy.max', 'np.max', (['channel'], {}), '(channel)\n', (17372, 17381), True, 'import numpy as np\n'), ((17393, 17408), 'numpy.min', 'np.min', (['channel'], {}), '(channel)\n', (17399, 17408), True, 'import numpy as np\n'), ((17646, 17674), 'numpy.zeros', 'np.zeros', ([], {'shape': '(size, size)'}), '(shape=(size, size))\n', (17654, 17674), True, 'import numpy as np\n'), ((18039, 18058), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (18051, 18058), True, 'import matplotlib.pyplot as plt\n'), ((18100, 18114), 'numpy.max', 'np.max', (['arrays'], {}), '(arrays)\n', (18106, 18114), True, 'import numpy as np\n'), ((18126, 18140), 'numpy.min', 'np.min', (['arrays'], {}), '(arrays)\n', (18132, 18140), True, 'import numpy as np\n'), ((18690, 18718), 'PIL.Image.fromarray', 'Image.fromarray', (['image_numpy'], {}), '(image_numpy)\n', (18705, 18718), False, 'from PIL import Image\n'), ((19393, 19409), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (19403, 19409), False, 'import os\n'), ((19851, 19863), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19861, 19863), True, 'import matplotlib.pyplot as plt\n'), ((19868, 19887), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""b"""'], {}), "(x, y, 'b')\n", (19876, 19887), True, 'import matplotlib.pyplot as plt\n'), ((19892, 19921), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test_all_D.png"""'], {}), "('test_all_D.png')\n", (19903, 19921), True, 'import matplotlib.pyplot as plt\n'), ((763, 794), 'numpy.tile', 'np.tile', (['image_numpy', '(3, 1, 1)'], {}), '(image_numpy, (3, 1, 1))\n', (770, 794), True, 'import numpy as np\n'), ((1066, 1099), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['map_ori'], {'sigma': '(5)'}), '(map_ori, sigma=5)\n', (1081, 1099), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((1120, 1139), 'numpy.zeros', 'np.zeros', (['map.shape'], {}), '(map.shape)\n', (1128, 1139), True, 'import numpy as np\n'), ((1198, 1217), 'numpy.zeros', 'np.zeros', (['map.shape'], {}), '(map.shape)\n', (1206, 1217), True, 'import numpy as np\n'), ((1274, 1293), 'numpy.zeros', 'np.zeros', (['map.shape'], {}), '(map.shape)\n', (1282, 1293), True, 'import numpy as np\n'), ((1349, 1368), 'numpy.zeros', 'np.zeros', (['map.shape'], {}), '(map.shape)\n', (1357, 1368), True, 'import numpy as np\n'), ((1431, 1538), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(map >= map_left, map >= map_right, map >= map_up, map >= map_down, map > 0.01)'], {}), '((map >= map_left, map >= map_right, map >= map_up, \n map >= map_down, map > 0.01))\n', (1452, 1538), True, 'import numpy as np\n'), ((7036, 7064), 'math.sqrt', 'math.sqrt', (['(error_all / count)'], {}), '(error_all / count)\n', (7045, 7064), False, 'import math\n'), ((19257, 19277), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (19271, 19277), False, 'import os\n'), ((19287, 19304), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (19298, 19304), False, 'import os\n'), ((19651, 19672), 'cv2.imread', 'cv2.imread', (['real_path'], {}), '(real_path)\n', (19661, 19672), False, 'import cv2\n'), ((19688, 19709), 'cv2.imread', 'cv2.imread', (['fake_path'], {}), '(fake_path)\n', (19698, 19709), False, 'import cv2\n'), ((20041, 20064), 'os.path.isdir', 'os.path.isdir', (['dataroot'], {}), '(dataroot)\n', (20054, 20064), False, 'import os\n'), ((20581, 20604), 'os.path.isdir', 'os.path.isdir', (['dataroot'], {}), '(dataroot)\n', (20594, 20604), False, 'import os\n'), ((21978, 21996), 'torch.nn.functional.relu', 'functional.relu', (['A'], {}), '(A)\n', (21993, 21996), True, 'import torch.nn.functional as functional\n'), ((397, 423), 'torch.from_numpy', 'torch.from_numpy', (['hm_array'], {}), '(hm_array)\n', (413, 423), False, 'import torch\n'), ((6928, 6944), 'math.sqrt', 'math.sqrt', (['error'], {}), '(error)\n', (6937, 6944), False, 'import math\n'), ((20145, 20162), 'os.walk', 'os.walk', (['dataroot'], {}), '(dataroot)\n', (20152, 20162), False, 'import os\n'), ((20685, 20702), 'os.walk', 'os.walk', (['dataroot'], {}), '(dataroot)\n', (20692, 20702), False, 'import os\n'), ((814, 850), 'numpy.transpose', 'np.transpose', (['image_numpy', '(1, 2, 0)'], {}), '(image_numpy, (1, 2, 0))\n', (826, 850), True, 'import numpy as np\n'), ((4603, 4671), 'cv2.circle', 'cv2.circle', (['canvas', 'all_peaks[i][j][0:2]', '(4)', 'colors[i]'], {'thickness': '(-1)'}), '(canvas, all_peaks[i][j][0:2], 4, colors[i], thickness=-1)\n', (4613, 4671), False, 'import cv2\n'), ((5204, 5214), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (5211, 5214), True, 'import numpy as np\n'), ((5236, 5246), 'numpy.mean', 'np.mean', (['Y'], {}), '(Y)\n', (5243, 5246), True, 'import numpy as np\n'), ((5558, 5608), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['cur_canvas', 'polygon', 'colors[i]'], {}), '(cur_canvas, polygon, colors[i])\n', (5576, 5608), False, 'import cv2\n'), ((5634, 5682), 'cv2.addWeighted', 'cv2.addWeighted', (['canvas', '(0.4)', 'cur_canvas', '(0.6)', '(0)'], {}), '(canvas, 0.4, cur_canvas, 0.6, 0)\n', (5649, 5682), False, 'import cv2\n'), ((18502, 18528), 'torch.abs', 'torch.abs', (['param.grad.data'], {}), '(param.grad.data)\n', (18511, 18528), False, 'import torch\n'), ((22040, 22051), 'torch.autograd.Variable', 'Variable', (['A'], {}), '(A)\n', (22048, 22051), False, 'from torch.autograd import Variable\n'), ((1573, 1597), 'numpy.nonzero', 'np.nonzero', (['peaks_binary'], {}), '(peaks_binary)\n', (1583, 1597), True, 'import numpy as np\n'), ((1602, 1626), 'numpy.nonzero', 'np.nonzero', (['peaks_binary'], {}), '(peaks_binary)\n', (1612, 1626), True, 'import numpy as np\n'), ((5387, 5423), 'math.atan2', 'math.atan2', (['(X[0] - X[1])', '(Y[0] - Y[1])'], {}), '(X[0] - X[1], Y[0] - Y[1])\n', (5397, 5423), False, 'import math\n'), ((19001, 19011), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (19008, 19011), True, 'import numpy as np\n'), ((19013, 19022), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (19019, 19022), True, 'import numpy as np\n'), ((19024, 19033), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (19030, 19033), True, 'import numpy as np\n'), ((19035, 19047), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (19044, 19047), True, 'import numpy as np\n'), ((19049, 19058), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (19055, 19058), True, 'import numpy as np\n'), ((19742, 19764), 'numpy.square', 'np.square', (['(real - fake)'], {}), '(real - fake)\n', (19751, 19764), True, 'import numpy as np\n'), ((20260, 20285), 'os.path.join', 'os.path.join', (['root', 'fname'], {}), '(root, fname)\n', (20272, 20285), False, 'import os\n'), ((20800, 20825), 'os.path.join', 'os.path.join', (['root', 'fname'], {}), '(root, fname)\n', (20812, 20825), False, 'import os\n')] |
#!/usr/bin/env python3
# coding=utf-8
from math import ceil, floor
import librosa
from librosa import display
import numpy as np
import pandas as pd
import soundfile as sf
from collections import namedtuple
import re
def load_csv(filename: str, sep=',') -> pd.DataFrame:
# print('- loading %s' % filename)
csv = pd.read_csv(filename, comment='#', sep=sep)
# print(csv)
return csv
Signal = namedtuple('Signal', [
'filename',
'info',
'sample_rate',
'tot_samples',
'tot_duration_ms',
])
def load_signal(filename: str) -> Signal:
print('- loading %s' % filename)
info = sf.info(filename)
sample_rate = info.samplerate
tot_samples = int(info.duration * sample_rate)
tot_duration_ms = 1000 * info.duration
return Signal(
filename,
info,
sample_rate,
tot_samples,
tot_duration_ms)
def show_signal(signal: Signal):
print('%s\n tot_duration: %s tot_samples: %s sample_rate: %s\n' % (
signal.filename,
signal.info.duration,
signal.tot_samples,
signal.sample_rate,
))
def plot_spectrogram(interval: np.ndarray,
sample_rate,
ax,
window_size=1024,
window_offset=512,
fmin=0,
fmax=16000,
cmap=None,
):
# ax.xaxis.tick_top()
# ax.xaxis.set_label_position('top')
def compute_stft():
stft = np.abs(librosa.stft(y=interval,
n_fft=window_size,
hop_length=window_offset))
return librosa.amplitude_to_db(stft, ref=np.max)
def spectrogram(stft):
display.specshow(stft, y_axis='mel', x_axis='time',
sr=sample_rate,
cmap=cmap or 'Blues',
fmin=fmin, fmax=fmax,
ax=ax)
spectrogram(compute_stft())
def short_term_lpc(y: np.ndarray,
sample_rate: float,
window_size: int,
window_offset: int,
num_points_per_window: int,
prediction_order: int,
):
"""Short-term LPC"""
def windows():
num_windows = 1 + (len(y) - window_size) // window_offset
print('short_term_lpc: num_windows = {}'.format(num_windows))
for w in range(0, num_windows * window_offset, window_offset):
yield y[w: w + window_size]
def lpc(window):
return librosa.lpc(window, prediction_order)
def lpc_and_envelope(window):
lpc_coeffs = lpc(window)
abs_val = np.abs(np.fft.rfft(a=lpc_coeffs, n=num_points_per_window))
ft = librosa.amplitude_to_db(abs_val, ref=np.max)
return ft
res = np.array([lpc_and_envelope(window) for window in windows()])
return np.transpose(res)
def plot_lpc_spectrogram(interval: np.ndarray,
sample_rate,
prediction_order,
ax,
window_size=1024,
window_offset=512,
num_points_per_window=512,
cmap=None
):
lpcs = short_term_lpc(y=interval,
sample_rate=sample_rate,
window_size=window_size,
window_offset=window_offset,
num_points_per_window=num_points_per_window,
prediction_order=prediction_order)
ax.imshow(lpcs,
origin='lower',
aspect='auto',
cmap=cmap or 'pink',
)
def extract_selection_number(c12n_row: pd.core.series.Series,
column_name: str
):
# eg, "data/sequences/M256/M/01971.seq"
# ^^^^^
col_value = c12n_row.get(column_name)
match = re.search(r'([^/]+)\.seq$', col_value)
if match:
return int(match.group(1))
else:
return -1
Selection = namedtuple('Selection', [
'selection',
'view',
'channel',
'begin_time_s',
'end_time_s',
'low_freq_hz',
'high_freq_hz',
'type_',
])
def get_selection(segments_df: pd.DataFrame, selection_number: int) -> Selection:
rows = segments_df.loc[segments_df['Selection'] == selection_number]
if not rows.empty:
row = rows.iloc[0, :]
return Selection(
row.get('Selection'),
row.get('View'),
row.get('Channel'),
row.get('Begin Time (s)'),
row.get('End Time (s)'),
row.get('Low Freq (Hz)'),
row.get('High Freq (Hz)'),
row.get('Type'),
)
def get_selections_and_c12n(segments_df: pd.DataFrame,
c12n: pd.DataFrame,
desired_rank=None,
desired_class_name=None
) -> [(Selection, pd.core.series.Series)]:
selections_and_c12n = []
for i, c12n_row in c12n.iterrows():
selection_number = extract_selection_number(c12n_row, 'seq_filename')
if selection_number < 0:
continue
if desired_rank is not None:
if int(desired_rank) != c12n_row.get('rank'):
continue
if desired_class_name is not None:
if desired_class_name != c12n_row.get('seq_class_name'):
continue
selection = get_selection(segments_df, selection_number)
selections_and_c12n.append((selection, c12n_row))
return selections_and_c12n
def get_signal_interval(signal, start_time_ms, duration_ms) -> np.ndarray:
start_sample = floor(start_time_ms * signal.sample_rate / 1000)
num_samples = ceil(duration_ms * signal.sample_rate / 1000)
# print('- loading %s samples starting at %s' % (num_samples, start_sample))
samples, _ = sf.read(signal.filename, start=start_sample, frames=num_samples)
# print(' loaded %s samples\n' % len(samples))
return samples
def get_signal_interval_from_selection(signal: Signal,
selection: Selection
) -> np.ndarray or None:
start_time_ms = 1000.0 * selection.begin_time_s
end_time_ms = 1000.0 * selection.end_time_s
duration_ms = end_time_ms - start_time_ms
if start_time_ms + duration_ms < signal.tot_duration_ms:
return get_signal_interval(signal, start_time_ms, duration_ms)
else:
print('WARN: interval beyond signal length')
return None
SignalInterval = namedtuple('SignalInterval', [
'interval', # np.ndarray
'begin_time_s', # float
'duration_s', # float
])
def get_signal_interval_for_min_max_selections(signal: Signal,
min_selection: Selection,
max_selection: Selection,
max_seconds: float
) -> SignalInterval:
max_ms = 1000 * max_seconds
start_time_ms = 1000.0 * min_selection.begin_time_s
end_time_ms = 1000.0 * max_selection.end_time_s
duration_ms = end_time_ms - start_time_ms
if duration_ms > max_ms:
print('applying max_seconds={}'.format(max_seconds))
duration_ms = max_ms
if start_time_ms + duration_ms < signal.tot_duration_ms:
interval = get_signal_interval(signal, start_time_ms, duration_ms)
return SignalInterval(interval,
min_selection.begin_time_s,
duration_ms / 1000.
)
else:
print('WARN: interval beyond signal length')
exit(1)
| [
"soundfile.info",
"librosa.amplitude_to_db",
"collections.namedtuple",
"math.ceil",
"pandas.read_csv",
"math.floor",
"librosa.lpc",
"numpy.fft.rfft",
"librosa.display.specshow",
"librosa.stft",
"soundfile.read",
"numpy.transpose",
"re.search"
] | [((411, 506), 'collections.namedtuple', 'namedtuple', (['"""Signal"""', "['filename', 'info', 'sample_rate', 'tot_samples', 'tot_duration_ms']"], {}), "('Signal', ['filename', 'info', 'sample_rate', 'tot_samples',\n 'tot_duration_ms'])\n", (421, 506), False, 'from collections import namedtuple\n'), ((4190, 4321), 'collections.namedtuple', 'namedtuple', (['"""Selection"""', "['selection', 'view', 'channel', 'begin_time_s', 'end_time_s',\n 'low_freq_hz', 'high_freq_hz', 'type_']"], {}), "('Selection', ['selection', 'view', 'channel', 'begin_time_s',\n 'end_time_s', 'low_freq_hz', 'high_freq_hz', 'type_'])\n", (4200, 4321), False, 'from collections import namedtuple\n'), ((6773, 6845), 'collections.namedtuple', 'namedtuple', (['"""SignalInterval"""', "['interval', 'begin_time_s', 'duration_s']"], {}), "('SignalInterval', ['interval', 'begin_time_s', 'duration_s'])\n", (6783, 6845), False, 'from collections import namedtuple\n'), ((324, 367), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'comment': '"""#"""', 'sep': 'sep'}), "(filename, comment='#', sep=sep)\n", (335, 367), True, 'import pandas as pd\n'), ((618, 635), 'soundfile.info', 'sf.info', (['filename'], {}), '(filename)\n', (625, 635), True, 'import soundfile as sf\n'), ((2943, 2960), 'numpy.transpose', 'np.transpose', (['res'], {}), '(res)\n', (2955, 2960), True, 'import numpy as np\n'), ((4060, 4098), 're.search', 're.search', (['"""([^/]+)\\\\.seq$"""', 'col_value'], {}), "('([^/]+)\\\\.seq$', col_value)\n", (4069, 4098), False, 'import re\n'), ((5863, 5911), 'math.floor', 'floor', (['(start_time_ms * signal.sample_rate / 1000)'], {}), '(start_time_ms * signal.sample_rate / 1000)\n', (5868, 5911), False, 'from math import ceil, floor\n'), ((5930, 5975), 'math.ceil', 'ceil', (['(duration_ms * signal.sample_rate / 1000)'], {}), '(duration_ms * signal.sample_rate / 1000)\n', (5934, 5975), False, 'from math import ceil, floor\n'), ((6075, 6139), 'soundfile.read', 'sf.read', (['signal.filename'], {'start': 'start_sample', 'frames': 'num_samples'}), '(signal.filename, start=start_sample, frames=num_samples)\n', (6082, 6139), True, 'import soundfile as sf\n'), ((1680, 1721), 'librosa.amplitude_to_db', 'librosa.amplitude_to_db', (['stft'], {'ref': 'np.max'}), '(stft, ref=np.max)\n', (1703, 1721), False, 'import librosa\n'), ((1758, 1881), 'librosa.display.specshow', 'display.specshow', (['stft'], {'y_axis': '"""mel"""', 'x_axis': '"""time"""', 'sr': 'sample_rate', 'cmap': "(cmap or 'Blues')", 'fmin': 'fmin', 'fmax': 'fmax', 'ax': 'ax'}), "(stft, y_axis='mel', x_axis='time', sr=sample_rate, cmap=\n cmap or 'Blues', fmin=fmin, fmax=fmax, ax=ax)\n", (1774, 1881), False, 'from librosa import display\n'), ((2601, 2638), 'librosa.lpc', 'librosa.lpc', (['window', 'prediction_order'], {}), '(window, prediction_order)\n', (2612, 2638), False, 'import librosa\n'), ((2797, 2841), 'librosa.amplitude_to_db', 'librosa.amplitude_to_db', (['abs_val'], {'ref': 'np.max'}), '(abs_val, ref=np.max)\n', (2820, 2841), False, 'import librosa\n'), ((1524, 1593), 'librosa.stft', 'librosa.stft', ([], {'y': 'interval', 'n_fft': 'window_size', 'hop_length': 'window_offset'}), '(y=interval, n_fft=window_size, hop_length=window_offset)\n', (1536, 1593), False, 'import librosa\n'), ((2732, 2782), 'numpy.fft.rfft', 'np.fft.rfft', ([], {'a': 'lpc_coeffs', 'n': 'num_points_per_window'}), '(a=lpc_coeffs, n=num_points_per_window)\n', (2743, 2782), True, 'import numpy as np\n')] |
import sys
import numpy as np
X_TRAIN_PATH = sys.argv[1]
Y_TRAIN_PATH = sys.argv[2]
print("Running the File", sys.argv[0])
print("Directory 1: ", X_TRAIN_PATH)
print("Directory 2: ", Y_TRAIN_PATH)
'''
For Testing
'''
'''
X_TRAIN_PATH = 'X_train'
Y_TRAIN_PATH = 'Y_train'
'''
X_train = np.genfromtxt(X_TRAIN_PATH, delimiter=',', skip_header=1)
Y_train = np.genfromtxt(Y_TRAIN_PATH, delimiter=',', skip_header=1)
"""Do the normalization of the data"""
def normalizeColumn(X, specifiedColumns = None, X_mean = None, X_stdev = None):
if specifiedColumns == None:
specifiedColumns = np.arange(X.shape[1])
length = len(specifiedColumns)
X_mean = np.reshape(np.mean(X[:,specifiedColumns], 0), (1, length))
X_stdev = np.reshape(np.std(X[:,specifiedColumns], 0), (1, length))
X[:,specifiedColumns] = np.divide(np.subtract(X[:, specifiedColumns], X_mean), X_stdev)
return X, X_mean, X_stdev
'''Shuffle the data in a random order'''
def shuffle(X, Y):
randomIndex = np.arange(len(X))
np.random.shuffle(randomIndex)
return (X[randomIndex], Y[randomIndex])
'''Split the data into training data and validation data'''
def splitTrainAndValidationData(X, Y, validation_size = 0.1):
train_size = int(round(len(X) * (1 - validation_size)))
return X[0:train_size], Y[0:train_size], X[train_size:None], Y[train_size:None]
def sigmoid(Z):
return np.clip(1 / (1 + np.exp(-Z)), 1e-6, 1-1e-6)
def getY(X,w,b):
return sigmoid(np.add(np.matmul(X, w),b))
def getRoundY(y):
for i in range(len(y)):
if y[i] < 0.5:
y[i] = 0
else:
y[i] = 1
return y
def computeCrossEntropy(y, y_label):
return -np.dot(y_label, np.log(y)) - np.dot(1 - y_label, np.log(1 - y))
def getLoss(y, y_label):
return computeCrossEntropy(y, y_label)
def getGradient(X, y_label, w, b):
y = getY(X, w, b)
loss = y_label - y
w_grad = -np.mean(np.multiply(loss.T, X.T), axis = 1)
b_grad = -np.mean(loss)
return w_grad, b_grad
def getAccuracy(y, y_label):
return np.sum(y == y_label) / len(y)
def train(X, Y, method = 'GRADIENT_ADAM'):
validation_size = 0.1
X_train, y_label, X_validation, y_validation = splitTrainAndValidationData(X, Y, validation_size)
print(X_train.shape)
print(y_label.shape)
print(X_validation.shape)
print(y_validation.shape)
'''Initialize the weight and bias'''
w = np.zeros(X_train.shape[1])
b = np.zeros([1])
eipsilon = 1e-8
if method == 'GRADIENT_ADAM':
beta1 = 0.9
beta2 = 0.999
v_w = np.zeros(w.shape)
s_w = np.zeros(w.shape)
v_b = np.zeros(b.shape)
s_b = np.zeros(b.shape)
max_interation = 41
batch_size = 25
learningRate = 0.0001
step = 1
trainAccuracy_list = []
trainLoss_list = []
validationAccuracy_list = []
validationLoss_list = []
for epoch in range(max_interation):
X_train_epoch, y_train_epoch = shuffle(X_train, y_label)
for i in range(int(np.floor(len(X_train)) / batch_size)):
X_train_batch = X_train_epoch[i * batch_size: (i + 1) * batch_size]
y_train_batch = y_train_epoch[i * batch_size: (i + 1) * batch_size]
if method == 'GRADIENT':
w_grad, b_grad = getGradient(X_train_batch, y_train_batch, w, b)
w = w - learningRate / np.sqrt(step) * w_grad
b = b - learningRate / np.sqrt(step) * b_grad
elif method == 'GRADIENT_ADAM':
w_grad, b_grad = getGradient(X_train_batch, y_train_batch, w, b)
v_w = beta1 * v_w + (1 - beta1) * w_grad
s_w = beta2 * s_w + (1 - beta2) * w_grad ** 2
v_b = beta1 * v_b + (1 - beta1) * b_grad
s_b = beta2 * s_b + (1 - beta2) * b_grad ** 2
v_w_correction = v_w / (1 - beta1 ** step)
s_w_correction = s_w / (1 - beta2 ** step)
v_b_correction = v_b / (1 - beta1 ** step)
s_b_correction = s_b / (1 - beta2 ** step)
w = w - learningRate * v_w_correction / (np.sqrt(s_w_correction) + eipsilon)
b = b - learningRate * v_b_correction / (np.sqrt(s_b_correction) + eipsilon)
step += 1
y_train_predicted = getY(X_train, w, b)
trainLoss_list.append(getLoss(y_train_predicted, y_label) / len(y_train_predicted))
y_train_predicted = getRoundY(y_train_predicted)
trainAccuracy_list.append(getAccuracy(y_train_predicted, y_label))
y_validation_predicted = getY(X_validation, w, b)
validationLoss_list.append(getLoss(y_validation_predicted, y_validation) / len(y_validation_predicted))
y_validation_predicted = getRoundY(y_validation_predicted)
validationAccuracy_list.append(getAccuracy(y_validation_predicted, y_validation))
print("Epoch", epoch, " Training Accuracy: ", (getAccuracy(y_train_predicted, y_label)), " Validation Accuracy: ", (getAccuracy(y_validation_predicted, y_validation)))
return w, b, trainAccuracy_list, validationAccuracy_list, trainLoss_list, validationLoss_list
X_train, X_mean, X_stdev = normalizeColumn(X_train)
weight, bias, trainAccList, validationAccList, trainLossList, validationLossList = train(X_train, Y_train, method = 'GRADIENT_ADAM')
'''
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(trainAccList)
plt.plot(validationAccList)
plt.figure(2)
plt.plot(trainLossList)
plt.plot(validationLossList)
plt.legend(['train', 'validation'])
plt.show()
'''
| [
"numpy.mean",
"numpy.multiply",
"numpy.sqrt",
"numpy.log",
"numpy.subtract",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.matmul",
"numpy.std",
"numpy.genfromtxt",
"numpy.arange",
"numpy.random.shuffle"
] | [((291, 348), 'numpy.genfromtxt', 'np.genfromtxt', (['X_TRAIN_PATH'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(X_TRAIN_PATH, delimiter=',', skip_header=1)\n", (304, 348), True, 'import numpy as np\n'), ((359, 416), 'numpy.genfromtxt', 'np.genfromtxt', (['Y_TRAIN_PATH'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(Y_TRAIN_PATH, delimiter=',', skip_header=1)\n", (372, 416), True, 'import numpy as np\n'), ((1045, 1075), 'numpy.random.shuffle', 'np.random.shuffle', (['randomIndex'], {}), '(randomIndex)\n', (1062, 1075), True, 'import numpy as np\n'), ((2476, 2502), 'numpy.zeros', 'np.zeros', (['X_train.shape[1]'], {}), '(X_train.shape[1])\n', (2484, 2502), True, 'import numpy as np\n'), ((2511, 2524), 'numpy.zeros', 'np.zeros', (['[1]'], {}), '([1])\n', (2519, 2524), True, 'import numpy as np\n'), ((598, 619), 'numpy.arange', 'np.arange', (['X.shape[1]'], {}), '(X.shape[1])\n', (607, 619), True, 'import numpy as np\n'), ((688, 722), 'numpy.mean', 'np.mean', (['X[:, specifiedColumns]', '(0)'], {}), '(X[:, specifiedColumns], 0)\n', (695, 722), True, 'import numpy as np\n'), ((761, 794), 'numpy.std', 'np.std', (['X[:, specifiedColumns]', '(0)'], {}), '(X[:, specifiedColumns], 0)\n', (767, 794), True, 'import numpy as np\n'), ((855, 898), 'numpy.subtract', 'np.subtract', (['X[:, specifiedColumns]', 'X_mean'], {}), '(X[:, specifiedColumns], X_mean)\n', (866, 898), True, 'import numpy as np\n'), ((2023, 2036), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (2030, 2036), True, 'import numpy as np\n'), ((2109, 2129), 'numpy.sum', 'np.sum', (['(y == y_label)'], {}), '(y == y_label)\n', (2115, 2129), True, 'import numpy as np\n'), ((2654, 2671), 'numpy.zeros', 'np.zeros', (['w.shape'], {}), '(w.shape)\n', (2662, 2671), True, 'import numpy as np\n'), ((2686, 2703), 'numpy.zeros', 'np.zeros', (['w.shape'], {}), '(w.shape)\n', (2694, 2703), True, 'import numpy as np\n'), ((2718, 2735), 'numpy.zeros', 'np.zeros', (['b.shape'], {}), '(b.shape)\n', (2726, 2735), True, 'import numpy as np\n'), ((2750, 2767), 'numpy.zeros', 'np.zeros', (['b.shape'], {}), '(b.shape)\n', (2758, 2767), True, 'import numpy as np\n'), ((1518, 1533), 'numpy.matmul', 'np.matmul', (['X', 'w'], {}), '(X, w)\n', (1527, 1533), True, 'import numpy as np\n'), ((1781, 1794), 'numpy.log', 'np.log', (['(1 - y)'], {}), '(1 - y)\n', (1787, 1794), True, 'import numpy as np\n'), ((1973, 1997), 'numpy.multiply', 'np.multiply', (['loss.T', 'X.T'], {}), '(loss.T, X.T)\n', (1984, 1997), True, 'import numpy as np\n'), ((1442, 1452), 'numpy.exp', 'np.exp', (['(-Z)'], {}), '(-Z)\n', (1448, 1452), True, 'import numpy as np\n'), ((1748, 1757), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (1754, 1757), True, 'import numpy as np\n'), ((3511, 3524), 'numpy.sqrt', 'np.sqrt', (['step'], {}), '(step)\n', (3518, 3524), True, 'import numpy as np\n'), ((3573, 3586), 'numpy.sqrt', 'np.sqrt', (['step'], {}), '(step)\n', (3580, 3586), True, 'import numpy as np\n'), ((4304, 4327), 'numpy.sqrt', 'np.sqrt', (['s_w_correction'], {}), '(s_w_correction)\n', (4311, 4327), True, 'import numpy as np\n'), ((4397, 4420), 'numpy.sqrt', 'np.sqrt', (['s_b_correction'], {}), '(s_b_correction)\n', (4404, 4420), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (
print_function,
division,
absolute_import)
from six.moves import xrange
# =============================================================================
# Imports
# =============================================================================
from numpy.testing import (
TestCase,
run_module_suite,
assert_,
assert_allclose,
assert_array_almost_equal_nulp,
assert_array_max_ulp,
assert_array_equal,
assert_array_less,
assert_equal,
assert_raises,
assert_raises_regex,
assert_warns,
assert_string_equal)
import numpy as np
import os
from kamrecsys.data import EventData
from kamrecsys.datasets import SAMPLE_PATH, load_movielens_mini
# =============================================================================
# Module variables
# =============================================================================
# =============================================================================
# Functions
# =============================================================================
def load_test_data():
infile = os.path.join(SAMPLE_PATH, 'pci.event')
dtype = np.dtype([('event', 'U18', 2), ('score', float)])
x = np.genfromtxt(fname=infile, delimiter='\t', dtype=dtype)
data = EventData(n_otypes=2, event_otypes=np.array([0, 1]))
data.set_event(x['event'])
return data, x
# =============================================================================
# Test Classes
# =============================================================================
class TestEventUtilMixin(TestCase):
def test_to_eid_event(self):
data, x = load_test_data()
# test to_eid_event
check = data.to_eid_event(data.event)
assert_array_equal(x['event'], check)
# test to_eid_event / per line conversion
check = np.empty_like(data.event, dtype=x['event'].dtype)
for i, j in enumerate(data.event):
check[i, :] = data.to_eid_event(j)
assert_array_equal(x['event'], check)
def test_to_iid_event(self):
from kamrecsys.data import EventWithScoreData
data, x = load_test_data()
# test EventData.to_iid_event
assert_array_equal(data.event, data.to_iid_event(x['event']))
# test EventData.to_iid_event / per line conversion
check = np.empty_like(x['event'], dtype=int)
for i, j in enumerate(x['event']):
check[i, :] = data.to_iid_event(j)
assert_array_equal(data.event, check)
class TestEventData(TestCase):
def test_filter_event(self):
from kamrecsys.data import EventWithScoreData
# load movie_lens
data = load_movielens_mini()
# filter events
filter_cond = np.arange(data.n_events) % 3 == 0
filtered_data = super(
EventWithScoreData, data).filter_event(filter_cond)
assert_array_equal(
filtered_data.event[:, 0], [1, 5, 3, 4, 0, 0, 0, 2, 2, 0])
assert_array_equal(
filtered_data.event[:, 1], [1, 3, 6, 5, 7, 6, 4, 0, 7, 2])
assert_array_equal(
filtered_data.to_eid(0, filtered_data.event[:, 0]),
data.to_eid(0, data.event[filter_cond, 0]))
assert_array_equal(
filtered_data.to_eid(1, filtered_data.event[:, 1]),
data.to_eid(1, data.event[filter_cond, 1]))
assert_array_equal(
filtered_data.event_feature['timestamp'],
[875636053, 877889130, 891351328, 879362287, 878543541,
875072484, 889751712, 883599478, 883599205, 878542960])
assert_array_equal(filtered_data.eid[0], [1, 5, 6, 7, 8, 10])
assert_array_equal(filtered_data.eid[1], [1, 2, 3, 4, 5, 7, 8, 9])
assert_equal(
filtered_data.iid[0],
{1: 0, 5: 1, 6: 2, 7: 3, 8: 4, 10: 5})
assert_equal(
filtered_data.iid[1],
{1: 0, 2: 1, 3: 2, 4: 3, 5: 4, 7: 5, 8: 6, 9: 7})
assert_equal(
filtered_data.feature[0]['zip'],
[u'85711', u'15213', u'98101', u'91344', u'05201', u'90703'])
assert_equal(
filtered_data.feature[1]['name'],
[u'Toy Story (1995)', u'GoldenEye (1995)', u'Four Rooms (1995)',
u'Get Shorty (1995)', u'Copycat (1995)', u'Twelve Monkeys (1995)',
u'Babe (1995)', u'Dead Man Walking (1995)'])
# dummy event data
data = EventData()
data.set_event(np.tile(np.arange(5), (2, 2)).T)
filtered_data = data.filter_event(
[True, False, True, True, False, False, True, True, False, False])
assert_equal(filtered_data.n_events, 5)
assert_array_equal(
filtered_data.event, [[0, 0], [2, 2], [3, 3], [1, 1], [2, 2]])
# =============================================================================
# Main Routines
# =============================================================================
if __name__ == '__main__':
run_module_suite()
| [
"kamrecsys.datasets.load_movielens_mini",
"numpy.testing.assert_equal",
"numpy.arange",
"os.path.join",
"kamrecsys.data.EventData",
"numpy.array",
"numpy.empty_like",
"numpy.testing.run_module_suite",
"numpy.dtype",
"numpy.genfromtxt",
"numpy.testing.assert_array_equal"
] | [((1162, 1200), 'os.path.join', 'os.path.join', (['SAMPLE_PATH', '"""pci.event"""'], {}), "(SAMPLE_PATH, 'pci.event')\n", (1174, 1200), False, 'import os\n'), ((1213, 1262), 'numpy.dtype', 'np.dtype', (["[('event', 'U18', 2), ('score', float)]"], {}), "([('event', 'U18', 2), ('score', float)])\n", (1221, 1262), True, 'import numpy as np\n'), ((1271, 1327), 'numpy.genfromtxt', 'np.genfromtxt', ([], {'fname': 'infile', 'delimiter': '"""\t"""', 'dtype': 'dtype'}), "(fname=infile, delimiter='\\t', dtype=dtype)\n", (1284, 1327), True, 'import numpy as np\n'), ((5050, 5068), 'numpy.testing.run_module_suite', 'run_module_suite', ([], {}), '()\n', (5066, 5068), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_allclose, assert_array_almost_equal_nulp, assert_array_max_ulp, assert_array_equal, assert_array_less, assert_equal, assert_raises, assert_raises_regex, assert_warns, assert_string_equal\n'), ((1808, 1845), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["x['event']", 'check'], {}), "(x['event'], check)\n", (1826, 1845), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_allclose, assert_array_almost_equal_nulp, assert_array_max_ulp, assert_array_equal, assert_array_less, assert_equal, assert_raises, assert_raises_regex, assert_warns, assert_string_equal\n'), ((1913, 1962), 'numpy.empty_like', 'np.empty_like', (['data.event'], {'dtype': "x['event'].dtype"}), "(data.event, dtype=x['event'].dtype)\n", (1926, 1962), True, 'import numpy as np\n'), ((2061, 2098), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["x['event']", 'check'], {}), "(x['event'], check)\n", (2079, 2098), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_allclose, assert_array_almost_equal_nulp, assert_array_max_ulp, assert_array_equal, assert_array_less, assert_equal, assert_raises, assert_raises_regex, assert_warns, assert_string_equal\n'), ((2408, 2444), 'numpy.empty_like', 'np.empty_like', (["x['event']"], {'dtype': 'int'}), "(x['event'], dtype=int)\n", (2421, 2444), True, 'import numpy as np\n'), ((2543, 2580), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['data.event', 'check'], {}), '(data.event, check)\n', (2561, 2580), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_allclose, assert_array_almost_equal_nulp, assert_array_max_ulp, assert_array_equal, assert_array_less, assert_equal, assert_raises, assert_raises_regex, assert_warns, assert_string_equal\n'), ((2744, 2765), 'kamrecsys.datasets.load_movielens_mini', 'load_movielens_mini', ([], {}), '()\n', (2763, 2765), False, 'from kamrecsys.datasets import SAMPLE_PATH, load_movielens_mini\n'), ((2951, 3028), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['filtered_data.event[:, 0]', '[1, 5, 3, 4, 0, 0, 0, 2, 2, 0]'], {}), '(filtered_data.event[:, 0], [1, 5, 3, 4, 0, 0, 0, 2, 2, 0])\n', (2969, 3028), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_allclose, assert_array_almost_equal_nulp, assert_array_max_ulp, assert_array_equal, assert_array_less, assert_equal, assert_raises, assert_raises_regex, assert_warns, assert_string_equal\n'), ((3050, 3127), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['filtered_data.event[:, 1]', '[1, 3, 6, 5, 7, 6, 4, 0, 7, 2]'], {}), '(filtered_data.event[:, 1], [1, 3, 6, 5, 7, 6, 4, 0, 7, 2])\n', (3068, 3127), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_allclose, assert_array_almost_equal_nulp, assert_array_max_ulp, assert_array_equal, assert_array_less, assert_equal, assert_raises, assert_raises_regex, assert_warns, assert_string_equal\n'), ((3447, 3629), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["filtered_data.event_feature['timestamp']", '[875636053, 877889130, 891351328, 879362287, 878543541, 875072484, \n 889751712, 883599478, 883599205, 878542960]'], {}), "(filtered_data.event_feature['timestamp'], [875636053, \n 877889130, 891351328, 879362287, 878543541, 875072484, 889751712, \n 883599478, 883599205, 878542960])\n", (3465, 3629), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_allclose, assert_array_almost_equal_nulp, assert_array_max_ulp, assert_array_equal, assert_array_less, assert_equal, assert_raises, assert_raises_regex, assert_warns, assert_string_equal\n'), ((3667, 3728), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['filtered_data.eid[0]', '[1, 5, 6, 7, 8, 10]'], {}), '(filtered_data.eid[0], [1, 5, 6, 7, 8, 10])\n', (3685, 3728), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_allclose, assert_array_almost_equal_nulp, assert_array_max_ulp, assert_array_equal, assert_array_less, assert_equal, assert_raises, assert_raises_regex, assert_warns, assert_string_equal\n'), ((3737, 3803), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['filtered_data.eid[1]', '[1, 2, 3, 4, 5, 7, 8, 9]'], {}), '(filtered_data.eid[1], [1, 2, 3, 4, 5, 7, 8, 9])\n', (3755, 3803), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_allclose, assert_array_almost_equal_nulp, assert_array_max_ulp, assert_array_equal, assert_array_less, assert_equal, assert_raises, assert_raises_regex, assert_warns, assert_string_equal\n'), ((3813, 3902), 'numpy.testing.assert_equal', 'assert_equal', (['filtered_data.iid[0]', '{(1): 0, (5): 1, (6): 2, (7): 3, (8): 4, (10): 5}'], {}), '(filtered_data.iid[0], {(1): 0, (5): 1, (6): 2, (7): 3, (8): 4,\n (10): 5})\n', (3825, 3902), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_allclose, assert_array_almost_equal_nulp, assert_array_max_ulp, assert_array_equal, assert_array_less, assert_equal, assert_raises, assert_raises_regex, assert_warns, assert_string_equal\n'), ((3920, 4024), 'numpy.testing.assert_equal', 'assert_equal', (['filtered_data.iid[1]', '{(1): 0, (2): 1, (3): 2, (4): 3, (5): 4, (7): 5, (8): 6, (9): 7}'], {}), '(filtered_data.iid[1], {(1): 0, (2): 1, (3): 2, (4): 3, (5): 4,\n (7): 5, (8): 6, (9): 7})\n', (3932, 4024), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_allclose, assert_array_almost_equal_nulp, assert_array_max_ulp, assert_array_equal, assert_array_less, assert_equal, assert_raises, assert_raises_regex, assert_warns, assert_string_equal\n'), ((4039, 4150), 'numpy.testing.assert_equal', 'assert_equal', (["filtered_data.feature[0]['zip']", "[u'85711', u'15213', u'98101', u'91344', u'05201', u'90703']"], {}), "(filtered_data.feature[0]['zip'], [u'85711', u'15213', u'98101',\n u'91344', u'05201', u'90703'])\n", (4051, 4150), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_allclose, assert_array_almost_equal_nulp, assert_array_max_ulp, assert_array_equal, assert_array_less, assert_equal, assert_raises, assert_raises_regex, assert_warns, assert_string_equal\n'), ((4180, 4415), 'numpy.testing.assert_equal', 'assert_equal', (["filtered_data.feature[1]['name']", "[u'Toy Story (1995)', u'GoldenEye (1995)', u'Four Rooms (1995)',\n u'Get Shorty (1995)', u'Copycat (1995)', u'Twelve Monkeys (1995)',\n u'Babe (1995)', u'Dead Man Walking (1995)']"], {}), "(filtered_data.feature[1]['name'], [u'Toy Story (1995)',\n u'GoldenEye (1995)', u'Four Rooms (1995)', u'Get Shorty (1995)',\n u'Copycat (1995)', u'Twelve Monkeys (1995)', u'Babe (1995)',\n u'Dead Man Walking (1995)'])\n", (4192, 4415), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_allclose, assert_array_almost_equal_nulp, assert_array_max_ulp, assert_array_equal, assert_array_less, assert_equal, assert_raises, assert_raises_regex, assert_warns, assert_string_equal\n'), ((4498, 4509), 'kamrecsys.data.EventData', 'EventData', ([], {}), '()\n', (4507, 4509), False, 'from kamrecsys.data import EventData\n'), ((4697, 4736), 'numpy.testing.assert_equal', 'assert_equal', (['filtered_data.n_events', '(5)'], {}), '(filtered_data.n_events, 5)\n', (4709, 4736), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_allclose, assert_array_almost_equal_nulp, assert_array_max_ulp, assert_array_equal, assert_array_less, assert_equal, assert_raises, assert_raises_regex, assert_warns, assert_string_equal\n'), ((4745, 4830), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['filtered_data.event', '[[0, 0], [2, 2], [3, 3], [1, 1], [2, 2]]'], {}), '(filtered_data.event, [[0, 0], [2, 2], [3, 3], [1, 1], [2,\n 2]])\n', (4763, 4830), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_allclose, assert_array_almost_equal_nulp, assert_array_max_ulp, assert_array_equal, assert_array_less, assert_equal, assert_raises, assert_raises_regex, assert_warns, assert_string_equal\n'), ((1374, 1390), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1382, 1390), True, 'import numpy as np\n'), ((2813, 2837), 'numpy.arange', 'np.arange', (['data.n_events'], {}), '(data.n_events)\n', (2822, 2837), True, 'import numpy as np\n'), ((4541, 4553), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (4550, 4553), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from eig import Context, Bayes
from eig.battleship import BattleshipHypothesisSpace
class TestContext(unittest.TestCase):
def test_observe(self):
hs = BattleshipHypothesisSpace(grid_size=3, ship_labels=[1, 2],
ship_sizes=[2, 3], orientations=['V', 'H'])
belief = Bayes(hs)
observation = np.zeros((3, 3)) - 1
observation[0, 0] = 0
observation[1, 0] = observation[2, 0] = 1
observation[1, 1] = 2
context = Context(hs, belief)
context.observe(observation)
self.assertEqual(len(context.valid_ids), 4)
observation[0, 1] = 2
context.observe(observation)
self.assertEqual(len(context.valid_ids), 2)
observation[2, 1] = 3
context.observe(observation)
self.assertEqual(len(context.valid_ids), 0)
| [
"eig.Context",
"numpy.zeros",
"eig.Bayes",
"eig.battleship.BattleshipHypothesisSpace"
] | [((201, 308), 'eig.battleship.BattleshipHypothesisSpace', 'BattleshipHypothesisSpace', ([], {'grid_size': '(3)', 'ship_labels': '[1, 2]', 'ship_sizes': '[2, 3]', 'orientations': "['V', 'H']"}), "(grid_size=3, ship_labels=[1, 2], ship_sizes=[2, 3\n ], orientations=['V', 'H'])\n", (226, 308), False, 'from eig.battleship import BattleshipHypothesisSpace\n'), ((338, 347), 'eig.Bayes', 'Bayes', (['hs'], {}), '(hs)\n', (343, 347), False, 'from eig import Context, Bayes\n'), ((528, 547), 'eig.Context', 'Context', (['hs', 'belief'], {}), '(hs, belief)\n', (535, 547), False, 'from eig import Context, Bayes\n'), ((370, 386), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (378, 386), True, 'import numpy as np\n')] |
import mybayes as bayes
import numpy as np
from mybayes.influence import ProbTable, Normal
from mybayes.settings import NumberOfSample
from copy import deepcopy
class TempCache(object):
data = {}
id_top = 0
def add(self, item):
self.id_top+=1
self.data[self.id_top] = item
return self.id_top
def get(self, id):
return self.data[id]
def remove(self, id):
if id and id in self.data:
del self.data[id]
export_plot_node = None
class Model(object):
nodes = []
arcs = []
def new_model(self):
bayes.remove_all_network()
self.nodes = []
self.arcs = []
global export_plot_node
export_plot_node = TempCache()
def replace_node(self, node, new_node):
self.remove_node(node)
self.add_node(new_node)
def add_node(self, node):
if node not in self.nodes:
self.nodes.append(node)
def add_arc(self, arc):
if arc not in self.arcs:
self.arcs.append(arc)
def remove_node(self, node):
if isinstance(node, ActivityNodeModel):
self.nodes.remove(node)
else:
id = node
node = self.get_node(id)
if node:
self.nodes.remove(node)
def remove_arc(self, arc):
if isinstance(arc, ArcModel):
self.arcs.remove(arc)
else:
id = arc
arc = self.get_arc(id)
if arc:
self.arcs.remove(arc)
def get_arc(self, id):
return next((a for a in self.arcs if a.arc_id == id), None)
def get_node(self, id):
return next((n for n in self.nodes if n.node_id == id), None)
def is_node(self, id):
nd = self.get_node(id)
return True if nd else False
def get_arcs_attach_node(self, node_id):
arcs = [a for a in self.arcs if (
a.start_id == node_id or a.end_id == node_id)]
if arcs and len(arcs):
return arcs
def build_network(self):
# print('Build network')
# bayes.remove_network('test')
# bayes.new_network('test')
# create nodes
for node in self.nodes:
self.create_action(node)
# populate link
for arc in self.arcs:
self.populate_arc(arc)
# start and end
succes_set = set([arc.end_id for arc in self.arcs])
pre_set = set([arc.start_id for arc in self.arcs])
full_set = set([node.node_id for node in self.nodes])
end_set = full_set - pre_set
start_set = full_set - succes_set
if end_set and start_set:
for end_node_id in end_set:
node = self.get_node(end_node_id)
ef = node.get_bayes_node('ef')
lf = node.get_bayes_node('lf')
lf.add_successors(ef)
lf.set_add_value(0)
# lf.set_weight([1, ])
for start_node_id in start_set:
node = self.get_node(start_node_id)
es = node.get_bayes_node('es')
es.add_successors(bayes.nfact.Constant(value=0))
es.set_add_value(0)
# es.set_weight([1, ])
return True
else:
return False
def run(self):
bayes.update()
def build_and_run(self):
print('Build and run')
bayes.remove_network('test')
bayes.new_network('test')
self.reset()
success = self.build_network()
if success:
# populate duration first
for act in self.nodes:
self.build_duration(act)
print('update')
self.run()
else:
print('graph khong hop le')
def populate_arc(self, arc):
start = self.get_node(arc.start_id)
end = self.get_node(arc.end_id)
if start and end:
end_es = end.get_bayes_node('es')
start_ef = start.get_bayes_node('ef')
end_es.add_successors(start_ef) # , bayes.nfact.Constant(value=1))
# end_es.set_weight([1, 1])
start_lf = start.get_bayes_node('lf')
end_ls = end.get_bayes_node('ls')
start_lf.add_successors(end_ls) # , bayes.nfact.Constant(value=1))
# start_lf.set_weight([1, -1])
def create_action(self, node):
es = bayes.nfact.MaxAddValue(add_value=1) # 5
# duration = bayes.nfact.Gaussian(loc=loc, scale=scale) # 7
duration = bayes.nfact.TempNode()
ef = bayes.nfact.Equation(es, duration) # 8
lf = bayes.nfact.MaxAddValue(add_value=-1) # 9
ls = bayes.nfact.Equation(lf, duration) # 10
ls.set_weight([1, -1])
node.bayes_nodes = (es, ef, ls, lf, duration)
def build_duration(self, activity):
duration = activity.duration_model
delay_node = self.build_knowned_risk(duration, duration.get_element_by_name('knowned_risk'))
duration_node = self.build_trade_off(duration, duration.get_element_by_name('trade_off'))
adjust_node = self.build_unknown_factor(duration, duration.get_element_by_name('unknown_factor'))
duration_bayes = activity.get_bayes_node('duration')
n = NumberOfSample
delays = delay_node.get_samples()
durations = duration_node.get_samples()
adjusts = adjust_node.get_samples()
samples = [(1+delays[i])*durations[i]*adjusts[i] for i in range(n)]
duration_bayes.set_samples(samples)
def build_knowned_risk(self, duration, known_risk):
control = known_risk.get_node('control')
risk_event = known_risk.get_node('risk_event')
impact = known_risk.get_node('impact')
response = known_risk.get_node('response')
# normalize table
control_data = control.get_pre_calc_data()
risk_event_data = risk_event.get_pre_calc_data()
impact_data = impact.get_pre_calc_data()
response_data = response.get_pre_calc_data()
# tinh risk_event
risk_event_values = bayes.influence.calc_two_cpd_network(control_data, risk_event_data,
control.choice_index if control.choice_index!=control.MANUAL else -1)
# build model to run
# risk_event_node = bayes.nfact.TableNode(values=risk_event_values)
# risk_event_samples = risk_event_node.get_samples()
#
# impact_node = bayes.nfact.TableNode(values=impact.data)
# impact_samples = impact_node.get_samples()
#
# response_node = bayes.nfact.TableNode(values=response.data)
# response_samples = response_node.get_samples()
# TODO doi cho nay thanh input
# calc delay from samples
step = 1.0/(len(impact_data)+1)
impact_real_values = [step*(i+1) for i in range(len(impact_data))] # gia tri cua impact tuong ung voi cac rank
step = 1.0/(len(risk_event_values)-1)
risk_event_real_values = [step*i for i in range(len(risk_event_values))] # tu 0...1
step = 1.0 / (len(response_data))
response_real_values = [step * (i+1) for i in range(len(response_data))[::-1]] # tu 1..>0
n = NumberOfSample
response_samples = ProbTable(response_data, range(len(response_data))).generate(n)
if impact.choice_index < 0:
impact_risk_values=[]
impact_risk_prob =[]
impact_prob = impact_data
risk_prob=risk_event_values
for i in range(len(impact_prob)):
for j in range(len(risk_prob)):
impact_risk_prob.append(impact_prob[i]*risk_prob[j])
impact_risk_values.append(impact_real_values[i]*risk_event_real_values[j])
impact_risk_samples = ProbTable(impact_risk_prob, impact_risk_values).generate(n)
else:
impact_real = impact_real_values[impact.choice_index]
values = [impact_real*risk_event_real_values[i] for i in range(len(risk_event_values))]
impact_risk_samples = ProbTable(risk_event_values, values).generate(n)
delay = [None]*n
for i in range(n):
pre_delay = bayes.influence.generate_tnormal(impact_risk_samples[i],0.1,0,1)
delay[i]= pre_delay*response_real_values[response_samples[i]]
# tao node de ve histogram
delay_node = bayes.nfact.TempNode(samples=delay)
id = export_plot_node.add(('Delay', delay_node))
known_risk.export_plot.append(id)
known_risk.output_node = id
return delay_node
def build_trade_off(self, duration, trade_off):
n = NumberOfSample
resources = trade_off.get_node('resources')
initial_estimate = trade_off.get_node('initial_estimate')
if resources.choice_index is not None:
resources_samples = [resources.choice_index]*n
else:
resources_probs= resources.get_pre_calc_data()
resources_samples = ProbTable(resources_probs, range(len(resources_probs))).generate(n)
if initial_estimate.choice_value is not None:
ie_samples = [initial_estimate.choice_value] * n
else:
ie_samples = Normal(initial_estimate.get_param('loc'),
initial_estimate.get_param('scale')).generate(n)
samples =[0] * n
for i in range(n):
index = int(resources_samples[i])
triangle = trade_off.triangle_param_rank[index]
ie = ie_samples[i]
samples[i] = np.random.triangular(triangle[0]*ie, triangle[1]*ie, triangle[2]*ie,1)[0]
# tao node de ve histogram
duration_node = bayes.nfact.TempNode(samples=samples)
id = export_plot_node.add(('Duration', duration_node))
trade_off.export_plot.append(id)
trade_off.output_node = id
return duration_node
def build_unknown_factor(self, duration, unknown_factor):
from scipy.stats import truncnorm
adjust = unknown_factor.get_node('adjustment_factor')
if not adjust.choice_value is None:
samples = [adjust.choice_value] * NumberOfSample
else:
samples = truncnorm.rvs(0,1,
loc=adjust.get_param('loc'),
scale=adjust.get_param('scale'),
size = NumberOfSample)
# tao node de ve histogram
adjust_node = bayes.nfact.TempNode(samples=samples)
id = export_plot_node.add(('AdjustFactor', adjust_node))
unknown_factor.export_plot.append(id)
unknown_factor.output_node = id
return adjust_node
def dump_data(self):
return {
'Model':{
'Activities':[a.dump_data() for a in self.nodes],
'Arcs':[arc.dump_data() for arc in self.arcs]
}
}
def read_data(self, json_data):
activities = json_data['Model']['Activities']
arcs = json_data['Model']['Arcs']
self.nodes = []
self.arcs = []
for a in activities:
self.nodes.append(ActivityNodeModel('').read_data(a))
for arc in arcs:
self.arcs.append(ArcModel().read_data(arc))
def reset(self):
for node in self.nodes:
node.reset()
class ActivityNodeModel(object):
name = ''
node_id = None
text_id = None
ui_position = ()
# (es, ef, ls, lf, duration)
bayes_nodes = ()
duration_model = None # type: DurationNodeModel
def set_name(self, name):
self.name = name
self.duration_model.activity_rename(name)
def copy(self):
a = ActivityNodeModel(self.name)
a.node_id = self.node_id
a.text_id = self.text_id
a.ui_position = self.ui_position
a.duration_model = deepcopy(self.duration_model)
a.bayes_nodes = self.bayes_nodes
return a
def __init__(self, name):
self.name = name
self.duration_model = DurationNodeModel(name)
def get_bayes_node(self, name):
m = ('es', 'ef', 'ls', 'lf', 'duration')
for i, v in enumerate(m):
if v == name:
break
return self.bayes_nodes[i]
def replace_duration(self, new_duration):
self.duration_model = new_duration
def get_export_nodes(self):
export = []
for i,k in enumerate(self.duration_model.element_names_label):
ids = self.duration_model.get_element(i).export_plot
for id in ids:
tnode = export_plot_node.get(id)
name = '%s-%s' %(k,tnode[0])
export.append((name, tnode[1]))
ms = ('es', 'ef', 'ls', 'lf', 'duration')
if self.bayes_nodes:
for m in ms:
node = self.get_bayes_node(m)
export.append((m,node))
return export
def dump_data(self):
return {
'name':self.name,
'id':self.node_id,
'ui_pos':self.ui_position,
'duration':self.duration_model.dump_data(),
}
def read_data(self, json_dict):
self.name = json_dict['name']
self.node_id = int(json_dict['id'])
self.ui_position = json_dict['ui_pos']
self.duration_model.read_data(json_dict['duration'])
return self
def reset(self):
self.duration_model.reset()
self.bayes_nodes = ()
class ArcModel(object):
start_id = None
end_id = None
arc_id = None
start_pos = None
end_pos = None
def dump_data(self):
return [self.start_id, self.end_id]
#, self.start_pos, self.end_pos]
def read_data(self, ls):
self.start_id = ls[0]
self.end_id = ls[1]
# self.start_pos = ls[2]
# self.end_pos = ls[3]
return self
class DurationNodeModel(object):
element_names_label=('Knowned Risks', 'Trade Off', 'Unknown Factor')
element_names = ('knowned_risk', 'trade_off', 'unknown_factor')
def __init__(self, activity_name):
self.activity_name = activity_name
self.elements = [None]*len(self.element_names)
# create knowned risk
knowned_risk = KnownedRiskModel(activity_name)
self.elements[0] = knowned_risk
# create trade off
trade_off = TradeOffModel(activity_name)
self.elements[1] = trade_off
# create unknown factor
unknown_factor = UnknownFactorModel(activity_name)
self.elements[2] = unknown_factor
def activity_rename(self, name):
if self.elements:
for e in self.elements:
e.activity_rename(name)
def get_element_label_index(self, name):
return next(i for i in range(len(self.element_names_label)) if name == self.element_names_label[i])
def get_element(self, index):
return self.elements[index]
def get_element_by_name(self, name):
id = next(i for i in range(len(self.element_names)) if name == self.element_names[i])
return self.get_element(id)
def dump_data(self):
return [e.dump_data() for e in self.elements]
def read_data(self, ls):
for i in range(len(ls)):
self.elements[i].read_data(ls[i])
def reset(self):
for e in self.elements:
e.reset()
class DurationElement(object):
def __init__(self, activity_name):
self.nodes_name_label = []
self.nodes_name=[]
self.nodes = []
self.activity_name = activity_name
self.export_plot = []
self.output_node = None # node dau ra cua element
def get_node_index_by_name(self, name):
return next((i for i in range(len(self.nodes_name)) if self.nodes_name[i] == name))
def set_node(self, name, node):
index = self.get_node_index_by_name(name)
self.nodes[index] = node
def get_node(self, name):
index = self.get_node_index_by_name(name)
return self.nodes[index]
def get_node_by_id(self, id):
return self.nodes[id]
def dump_data(self):
return [node.dump_data() for node in self.nodes]
def read_data(self, ls):
for i in range(len(ls)):
self.nodes[i].read_data(ls[i])
def reset(self):
for e in self.export_plot:
export_plot_node.remove(e)
export_plot_node.remove(self.output_node)
self.export_plot = []
self.output_node = None
def activity_rename(self, name):
if self.nodes:
for node in self.nodes:
s = node.name.split('-')
node.name = '%s-%s' %(name, s[1])
class KnownedRiskModel(DurationElement):
# self.control = None # Cpd
# self.impact = None # Value
# self.risk_event = None # Cpd
# self.resource = None # Value
def __init__(self, activity_name):
super(KnownedRiskModel, self).__init__(activity_name)
self.nodes_name_label=('Control', 'Impact', 'Risk Event', 'Response')
self.nodes_name =('control', 'impact', 'risk_event', 'response')
self.nodes = [None] * len(self.nodes_name)
for i in range(len(self.nodes_name)):
n_name = "%s-%s" %(self.activity_name, self.nodes_name[i])
self.nodes[i]= NodeCpdModel(n_name)
# link control va risk_event
control = self.get_node('control')
risk_event = self.get_node('risk_event')
risk_event.evidences = [control,]
impact = self.nodes[1]
impact.set_labels(['Very Low', 'Low', 'Medium', 'High', 'Very Hide'])
impact.lock_labels = True
class TradeOffModel(DurationElement):
def __init__(self, activity_name):
super(TradeOffModel, self).__init__(activity_name)
self.triangle_param_rank=[
[1.4, 1.8, 2.5],
[1, 1.3, 1.5],
[0.9, 1, 1.2],
[0.8, 0.9, 1],
[0.7, 0.75, 0.9]
]
self.nodes_name_label=('Resources', 'Initial Estimate')
self.nodes_name=('resources', 'initial_estimate')
n = len(self.nodes_name)
self.nodes = [None] * n
n_names = ["%s-%s" %(self.activity_name, self.nodes_name[i])
for i in range(len(self.nodes_name))]
self.nodes[0] = NodeCpdModel(n_names[0])
self.nodes[0].set_labels(['Very Low', 'Low', 'Medium', 'High', 'Very Hide'])
self.nodes[0].lock_labels = True
self.nodes[1] = NodeContinuousInterval(n_names[1], 'normal')
class UnknownFactorModel(DurationElement):
def __init__(self, activity_name):
super(UnknownFactorModel, self).__init__(activity_name)
self.nodes_name_label=('Adjustment Factor',)
self.nodes_name=('adjustment_factor',)
n_name = "%s-%s" %(self.activity_name, self.nodes_name[0])
node = NodeContinuousInterval(n_name,'tnormal01')
self.nodes = [node,]
class NodeContinuousInterval(object):
type = {'normal':['loc','scale'], 'tnormal01':['loc','scale']}
def __init__(self, name, type_string):
self.name = name
self.type_string = type_string
self.param_names = self.type[type_string]
self.choice_value = None
self.data = None
def pre_calc_choice(self):
raise NotImplementedError()
def can_pre_choice(self):
# raise NotImplementedError()
return True
def try_set_choice(self, value):
if value is None:
self.choice_value = None
else:
min, max = self.get_bound()
if value < min: value = min
if value > max: value = max
self.choice_value = value
return self.choice_value
def get_bound(self):
if self.data:
loc = self.get_param('loc')
scale = self.get_param('scale')
return (loc - 3*scale, loc+3*scale)
else:
return (0,0)
def get_columns_label(self):
return ['Values',]
def get_rows_label(self):
return self.param_names
def get_param(self, name):
index = next(i for i in range(len(self.param_names)) if self.param_names[i]==name)
return self.data[index][0]
def dump_data(self):
return {
'model':'NodeContinuousInterval',
'name': self.name,
'type_string':self.type_string,
'choice_value':self.choice_value,
'data':self.data
}
def read_data(self, json_dict):
self.name = json_dict['name']
self.type_string = json_dict['type_string']
self.choice_value = json_dict['choice_value']
self.data = json_dict['data']
self.param_names = self.type[self.type_string]
def get_type_string(self):
return 'Distribution: %s' % self.type_string
class LabeledNodeModel(object):
def __init__(self, name, node_id=-1):
self.name = name
self.node_id = node_id
self.labels = []
self.lock_labels = False
def set_labels(self, labels):
self.labels = [x for x in labels if x]
class NodeCpdModel(LabeledNodeModel):
MANUAL = -1
def __init__(self, name, node_id=-1):
super(NodeCpdModel, self).__init__(name, node_id)
self.evidences = [] # nodeCpdModel
self.data = []
self.choice_index = self.MANUAL
# self.algo_node = None # node chay thuat toan
def get_pre_calc_data(self):
data = deepcopy(self.data)
if data:
for j in range(len(data[0])):
n = len(data)
tong = sum([data[i][j] for i in range(n)])
for i in range(n):
data[i][j] = data[i][j]/tong
# format lai neu column == 1
if len(data[0]) == 1:
data = [data[i][0] for i in range(len(data))]
return data
def can_pre_choice(self):
return not self.evidences
def get_table_labels(self):
if self.evidences:
# TODO cai nay moi chi chay evidences = 1, mo rong them
return self.evidences[0].labels
else:
return ['Prob',]
def dump_data(self):
# self.normalize_data()
return {'model':'NodeCpdModel',
'name':self.name,
'labels':self.labels,
'data':self.data,
'choice_index':self.choice_index
}
def read_data(self, json_dict):
self.name = json_dict['name']
self.labels = json_dict['labels']
self.data = json_dict['data']
self.choice_index = int(json_dict['choice_index'])
def get_type_string(self):
return 'CPD'
| [
"mybayes.remove_all_network",
"mybayes.nfact.Constant",
"mybayes.influence.generate_tnormal",
"numpy.random.triangular",
"mybayes.new_network",
"mybayes.remove_network",
"mybayes.nfact.MaxAddValue",
"mybayes.nfact.TempNode",
"mybayes.update",
"mybayes.influence.ProbTable",
"mybayes.influence.cal... | [((587, 613), 'mybayes.remove_all_network', 'bayes.remove_all_network', ([], {}), '()\n', (611, 613), True, 'import mybayes as bayes\n'), ((3326, 3340), 'mybayes.update', 'bayes.update', ([], {}), '()\n', (3338, 3340), True, 'import mybayes as bayes\n'), ((3410, 3438), 'mybayes.remove_network', 'bayes.remove_network', (['"""test"""'], {}), "('test')\n", (3430, 3438), True, 'import mybayes as bayes\n'), ((3447, 3472), 'mybayes.new_network', 'bayes.new_network', (['"""test"""'], {}), "('test')\n", (3464, 3472), True, 'import mybayes as bayes\n'), ((4403, 4439), 'mybayes.nfact.MaxAddValue', 'bayes.nfact.MaxAddValue', ([], {'add_value': '(1)'}), '(add_value=1)\n', (4426, 4439), True, 'import mybayes as bayes\n'), ((4533, 4555), 'mybayes.nfact.TempNode', 'bayes.nfact.TempNode', ([], {}), '()\n', (4553, 4555), True, 'import mybayes as bayes\n'), ((4569, 4603), 'mybayes.nfact.Equation', 'bayes.nfact.Equation', (['es', 'duration'], {}), '(es, duration)\n', (4589, 4603), True, 'import mybayes as bayes\n'), ((4622, 4659), 'mybayes.nfact.MaxAddValue', 'bayes.nfact.MaxAddValue', ([], {'add_value': '(-1)'}), '(add_value=-1)\n', (4645, 4659), True, 'import mybayes as bayes\n'), ((4678, 4712), 'mybayes.nfact.Equation', 'bayes.nfact.Equation', (['lf', 'duration'], {}), '(lf, duration)\n', (4698, 4712), True, 'import mybayes as bayes\n'), ((6093, 6237), 'mybayes.influence.calc_two_cpd_network', 'bayes.influence.calc_two_cpd_network', (['control_data', 'risk_event_data', '(control.choice_index if control.choice_index != control.MANUAL else -1)'], {}), '(control_data, risk_event_data, control\n .choice_index if control.choice_index != control.MANUAL else -1)\n', (6129, 6237), True, 'import mybayes as bayes\n'), ((8437, 8472), 'mybayes.nfact.TempNode', 'bayes.nfact.TempNode', ([], {'samples': 'delay'}), '(samples=delay)\n', (8457, 8472), True, 'import mybayes as bayes\n'), ((9746, 9783), 'mybayes.nfact.TempNode', 'bayes.nfact.TempNode', ([], {'samples': 'samples'}), '(samples=samples)\n', (9766, 9783), True, 'import mybayes as bayes\n'), ((10527, 10564), 'mybayes.nfact.TempNode', 'bayes.nfact.TempNode', ([], {'samples': 'samples'}), '(samples=samples)\n', (10547, 10564), True, 'import mybayes as bayes\n'), ((11921, 11950), 'copy.deepcopy', 'deepcopy', (['self.duration_model'], {}), '(self.duration_model)\n', (11929, 11950), False, 'from copy import deepcopy\n'), ((21548, 21567), 'copy.deepcopy', 'deepcopy', (['self.data'], {}), '(self.data)\n', (21556, 21567), False, 'from copy import deepcopy\n'), ((8241, 8308), 'mybayes.influence.generate_tnormal', 'bayes.influence.generate_tnormal', (['impact_risk_samples[i]', '(0.1)', '(0)', '(1)'], {}), '(impact_risk_samples[i], 0.1, 0, 1)\n', (8273, 8308), True, 'import mybayes as bayes\n'), ((9612, 9689), 'numpy.random.triangular', 'np.random.triangular', (['(triangle[0] * ie)', '(triangle[1] * ie)', '(triangle[2] * ie)', '(1)'], {}), '(triangle[0] * ie, triangle[1] * ie, triangle[2] * ie, 1)\n', (9632, 9689), True, 'import numpy as np\n'), ((3129, 3158), 'mybayes.nfact.Constant', 'bayes.nfact.Constant', ([], {'value': '(0)'}), '(value=0)\n', (3149, 3158), True, 'import mybayes as bayes\n'), ((7840, 7887), 'mybayes.influence.ProbTable', 'ProbTable', (['impact_risk_prob', 'impact_risk_values'], {}), '(impact_risk_prob, impact_risk_values)\n', (7849, 7887), False, 'from mybayes.influence import ProbTable, Normal\n'), ((8114, 8150), 'mybayes.influence.ProbTable', 'ProbTable', (['risk_event_values', 'values'], {}), '(risk_event_values, values)\n', (8123, 8150), False, 'from mybayes.influence import ProbTable, Normal\n')] |
import numpy as np
from numba import jit
import tensorflow as tf
cB = np.array((0, 0, 0), dtype=np.float32) # --------- BOUNDARY
c0 = np.array((255, 255, 255), dtype=np.float32) # --- STREET
c1 = np.array((0, 0, 255), dtype=np.float32) # ------- HOUSE
c2 = np.array((0, 255, 255), dtype=np.float32) # ----- LOW VEGETATION
c3 = np.array((0, 255, 0), dtype=np.float32) # ------- HIGH VEGETATION
c4 = np.array((255, 255, 0), dtype=np.float32) # ----- CAR
c5 = np.array((255, 0, 0), dtype=np.float32) # ------- CLUTTER
C_P = ord('P')
C_V = ord('V')
C_S = ord('S')
C_H = ord('H')
# ====== from DW-tools
@jit(nopython=True)
def update_confusion_matrix(confusions, predicted_labels, reference_labels):
# reference labels with label < 0 will not be considered
reshaped_pr = np.ravel(predicted_labels)
reshaped_gt = np.ravel(reference_labels)
for predicted, actual in zip(reshaped_pr, reshaped_gt):
if actual >= 0 and predicted >= 0:
confusions[predicted, actual] += 1
class mova(object):
# class to keep track of the MOVING AVERAGE of a variable
def __init__(self, initial_value=None, momentum=0.9, print_accuracy=4):
assert 0.0 < momentum < 1.0, "momentum has to be between 0.0 and 1.0"
self.value = None if not initial_value else float(initial_value)
self.momentum = float(momentum)
self.inc = 1.0 - momentum
self.str = '{:.' + str(int(print_accuracy)) + 'f}'
def __call__(self, other):
self.value = float(other) if not self.value else self.value * self.momentum + other * self.inc
return self
def __str__(self):
return self.str.format(self.value)
def get_confusion_metrics(confusion_matrix):
"""Computes confusion metrics out of a confusion matrix (N classes)
Parameters
----------
confusion_matrix : numpy.ndarray
Confusion matrix [N x N]
Returns
-------
metrics : dict
a dictionary holding all computed metrics
Notes
-----
Metrics are: 'percentages', 'precisions', 'recalls', 'f1s', 'mf1', 'oa'
"""
tp = np.diag(confusion_matrix)
tp_fn = np.sum(confusion_matrix, axis=0)
tp_fp = np.sum(confusion_matrix, axis=1)
percentages = tp_fn / np.sum(confusion_matrix)
precisions = tp / tp_fp
recalls = tp / tp_fn
f1s = 2 * (precisions * recalls) / (precisions + recalls)
ious = tp / (tp_fn + tp_fp - tp)
f1s[np.isnan(f1s)] = 0.0
f1s[percentages == 0.0] = np.nan
mf1 = np.nanmean(f1s)
miou = np.nanmean(ious)
oa = np.trace(confusion_matrix) / np.sum(confusion_matrix)
metrics = {'percentages': percentages,
'precisions': precisions,
'recalls': recalls,
'f1s': f1s,
'mf1': mf1,
'ious': ious,
'miou': miou,
'oa': oa}
return metrics
@jit(nopython=True)
def smooth1d(a, n):
"""Reads an image from disk. Returns the array representation.
Parameters
----------
confusion_matrix : numpy.ndarray
Confusion matrix [N x N]
Returns
-------
out : ndarray of float64
Image as 3D array
Notes
-----
'I' will always have 3 dimensions: (rows, columns dimensions).
Last dimension will be of length 1 or 3, depending on the image.
"""
d = n // 2
b = np.zeros_like(a)
for i in range(len(a)):
summ = 0
for j in range(n):
k = i - d + j
if k < 0:
summ += a[0]
elif k > len(a) - 1:
summ += a[len(a) - 1]
else:
summ += a[k]
b[i] = summ / n
return b
def maxpool(input, fac):
return tf.layers.max_pooling2d(inputs=input, pool_size=[fac, fac], strides=fac)
def unpool(input, fac):
shape = input.get_shape().as_list()
return tf.image.resize_nearest_neighbor(input, (shape[1] * fac, shape[2] * fac))
def instance_norm(input):
return tf.contrib.layers.instance_norm(input)
def dropout(id, input, is_train, rate=0.5):
return tf.layers.dropout(input, rate=rate, training=is_train, name='dropout' + id)
# ============== LABEL PREPROCESSING TOOLS ========================================================
@jit(nopython=True, cache=True)
def index_to_color(patch, num_classes=6):
h, w = patch.shape[:2]
result = np.zeros((h, w, 3), dtype=np.uint8)
for x in range(h):
for y in range(w):
c = patch[x, y]
if num_classes == 6:
if c == 0:
result[x, y] = (255, 255, 255) # Impervious
elif c == 1:
result[x, y] = (0, 0, 255) # Building
elif c == 2:
result[x, y] = (0, 255, 255) # Low vegetation
elif c == 3:
result[x, y] = (0, 255, 0) # Tree
elif c == 4:
result[x, y] = (255, 255, 0) # Car
elif c == 5:
result[x, y] = (255, 0, 0) # Clutter
elif c == -1:
result[x, y] = (255, 0, 255) # Outside
else:
assert False, 'unknown class!'
elif num_classes == 8:
if c == 0:
result[x, y] = (255, 128, 0) # Building
elif c == 1:
result[x, y] = (128, 128, 128) # Sealed
elif c == 2:
result[x, y] = (200, 135, 70) # Soil
elif c == 3:
result[x, y] = (0, 255, 0) # Grass
elif c == 4:
result[x, y] = (64, 128, 0) # Tree
elif c == 5:
result[x, y] = (0, 0, 255) # Water
elif c == 6:
result[x, y] = (255, 0, 0) # Car
elif c == 7:
result[x, y] = (128, 0, 25) # Clutter
elif c == -1:
result[x, y] = (255, 0, 255) # Outside
else:
assert False, 'unknown class!'
else:
if c == 0:
result[x, y] = (0, 128, 0) # tree
elif c == 1:
result[x, y] = (255, 128, 0) # building
elif c == 2:
result[x, y] = (255, 255, 255) # ground
elif c == -1:
result[x, y] = (255, 0, 255) # Outside
else:
assert False, 'unknown class!'
return result
# ================= TF wrappers
def conv(id, input, channels, size=3, stride=1, use_bias=True, padding="SAME", init_stddev=-1.0, dilation=1):
# regular conv with my favorite settings :)
assert padding in ["SAME", "VALID", "REFLECT", "PARTIAL"], 'valid paddings: "SAME", "VALID", "REFLECT", "PARTIAL"'
if type(size) == int: size = [size, size]
if init_stddev <= 0.0:
init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32)
else:
init = tf.truncated_normal_initializer(stddev=init_stddev)
if padding == "PARTIAL":
with tf.variable_scope('mask'):
_, h, w, _ = input.get_shape().as_list()
slide_window = size[0] * size[1]
mask = tf.ones(shape=[1, h, w, 1])
update_mask = tf.layers.conv2d(mask, filters=1, dilation_rate=(dilation, dilation), name='mask' + id,
kernel_size=size, kernel_initializer=tf.constant_initializer(1.0),
strides=stride, padding="SAME", use_bias=False, trainable=False)
mask_ratio = slide_window / (update_mask + 1e-8)
update_mask = tf.clip_by_value(update_mask, 0.0, 1.0)
mask_ratio = mask_ratio * update_mask
with tf.variable_scope('parconv'):
x = tf.layers.conv2d(input, filters=channels, name='conv' + id, kernel_size=size, kernel_initializer=init,
strides=stride, padding="SAME", use_bias=False)
x = x * mask_ratio
if use_bias:
bias = tf.get_variable("bias" + id, [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
return x * update_mask
if padding == "REFLECT":
assert size[0] % 2 == 1 and size[1] % 2 == 1, "REFLECTION PAD ONLY WORKING FOR ODD FILTER SIZE.. " + str(size)
pad_x = size[0] // 2
pad_y = size[1] // 2
input = tf.pad(input, [[0, 0], [pad_x, pad_x], [pad_y, pad_y], [0, 0]], "REFLECT")
padding = "VALID"
return tf.layers.conv2d(input, channels, kernel_size=size, strides=[stride, stride],
padding=padding, kernel_initializer=init, name='conv' + id,
use_bias=use_bias, dilation_rate=(dilation, dilation))
# zero mean conv
def z_conv(id, input, channels, size, stride=1, padding="SAME", use_bias=False, dilation=1):
if type(size) == int: size = [size, size]
in_ch = input.get_shape().as_list()[-1]
# init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32)
init = tf.truncated_normal_initializer(mean=0.0, stddev=0.02)
filters = tf.get_variable('zero_conv_weights' + id, initializer=init, shape=[size[0], size[1], in_ch, channels])
filters = filters - tf.reduce_mean(filters, axis=[0, 1, 2], keepdims=True)
if padding == "PARTIAL":
with tf.variable_scope('mask'):
_, h, w, _ = input.get_shape().as_list()
slide_window = size[0] * size[1]
mask = tf.ones(shape=[1, h, w, 1])
update_mask = tf.layers.conv2d(mask, filters=1, name='mask' + id,
kernel_size=size, kernel_initializer=tf.constant_initializer(1.0),
strides=stride, padding="SAME", use_bias=False, trainable=False,
dilation_rate=(dilation, dilation))
mask_ratio = slide_window / (update_mask + 1e-8)
update_mask = tf.clip_by_value(update_mask, 0.0, 1.0)
mask_ratio = mask_ratio * update_mask
with tf.variable_scope('parconv'):
x = tf.nn.conv2d(input, filters, strides=[1, stride, stride, 1], padding="SAME", name='zero-conv_' + id,
dilations=(1, dilation, dilation, 1))
x = x * mask_ratio
if use_bias:
bias = tf.get_variable("bias" + id, [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
return x * update_mask
x = tf.nn.conv2d(input, filters, strides=[1, stride, stride, 1], padding=padding, name='zero-conv_' + id,
dilations=(1, dilation, dilation, 1))
if use_bias:
bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
return x
def t_conv(id, input, channels, size=3, stride=1, use_bias=True, padding="SAME", init_stddev=-1.0):
# good old t-conv. I love it!
assert padding in ["SAME", "VALID"], 'valid paddings are "SAME", "VALID"'
if type(size) == int:
size = [size, size]
if init_stddev <= 0.0:
init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32)
else:
init = tf.truncated_normal_initializer(stddev=init_stddev)
return tf.layers.conv2d_transpose(input, channels, kernel_size=size, strides=[stride, stride],
padding=padding, kernel_initializer=init, name='tr_conv' + id, use_bias=use_bias)
| [
"numpy.trace",
"tensorflow.get_variable",
"tensorflow.pad",
"tensorflow.contrib.layers.variance_scaling_initializer",
"tensorflow.truncated_normal_initializer",
"numpy.array",
"numpy.nanmean",
"tensorflow.reduce_mean",
"tensorflow.image.resize_nearest_neighbor",
"tensorflow.layers.conv2d",
"tens... | [((71, 108), 'numpy.array', 'np.array', (['(0, 0, 0)'], {'dtype': 'np.float32'}), '((0, 0, 0), dtype=np.float32)\n', (79, 108), True, 'import numpy as np\n'), ((136, 179), 'numpy.array', 'np.array', (['(255, 255, 255)'], {'dtype': 'np.float32'}), '((255, 255, 255), dtype=np.float32)\n', (144, 179), True, 'import numpy as np\n'), ((199, 238), 'numpy.array', 'np.array', (['(0, 0, 255)'], {'dtype': 'np.float32'}), '((0, 0, 255), dtype=np.float32)\n', (207, 238), True, 'import numpy as np\n'), ((261, 302), 'numpy.array', 'np.array', (['(0, 255, 255)'], {'dtype': 'np.float32'}), '((0, 255, 255), dtype=np.float32)\n', (269, 302), True, 'import numpy as np\n'), ((332, 371), 'numpy.array', 'np.array', (['(0, 255, 0)'], {'dtype': 'np.float32'}), '((0, 255, 0), dtype=np.float32)\n', (340, 371), True, 'import numpy as np\n'), ((404, 445), 'numpy.array', 'np.array', (['(255, 255, 0)'], {'dtype': 'np.float32'}), '((255, 255, 0), dtype=np.float32)\n', (412, 445), True, 'import numpy as np\n'), ((464, 503), 'numpy.array', 'np.array', (['(255, 0, 0)'], {'dtype': 'np.float32'}), '((255, 0, 0), dtype=np.float32)\n', (472, 503), True, 'import numpy as np\n'), ((611, 629), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (614, 629), False, 'from numba import jit\n'), ((2928, 2946), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (2931, 2946), False, 'from numba import jit\n'), ((4349, 4379), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (4352, 4379), False, 'from numba import jit\n'), ((786, 812), 'numpy.ravel', 'np.ravel', (['predicted_labels'], {}), '(predicted_labels)\n', (794, 812), True, 'import numpy as np\n'), ((831, 857), 'numpy.ravel', 'np.ravel', (['reference_labels'], {}), '(reference_labels)\n', (839, 857), True, 'import numpy as np\n'), ((2147, 2172), 'numpy.diag', 'np.diag', (['confusion_matrix'], {}), '(confusion_matrix)\n', (2154, 2172), True, 'import numpy as np\n'), ((2185, 2217), 'numpy.sum', 'np.sum', (['confusion_matrix'], {'axis': '(0)'}), '(confusion_matrix, axis=0)\n', (2191, 2217), True, 'import numpy as np\n'), ((2230, 2262), 'numpy.sum', 'np.sum', (['confusion_matrix'], {'axis': '(1)'}), '(confusion_matrix, axis=1)\n', (2236, 2262), True, 'import numpy as np\n'), ((2542, 2557), 'numpy.nanmean', 'np.nanmean', (['f1s'], {}), '(f1s)\n', (2552, 2557), True, 'import numpy as np\n'), ((2569, 2585), 'numpy.nanmean', 'np.nanmean', (['ious'], {}), '(ious)\n', (2579, 2585), True, 'import numpy as np\n'), ((3452, 3468), 'numpy.zeros_like', 'np.zeros_like', (['a'], {}), '(a)\n', (3465, 3468), True, 'import numpy as np\n'), ((3810, 3882), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'input', 'pool_size': '[fac, fac]', 'strides': 'fac'}), '(inputs=input, pool_size=[fac, fac], strides=fac)\n', (3833, 3882), True, 'import tensorflow as tf\n'), ((3960, 4033), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['input', '(shape[1] * fac, shape[2] * fac)'], {}), '(input, (shape[1] * fac, shape[2] * fac))\n', (3992, 4033), True, 'import tensorflow as tf\n'), ((4073, 4111), 'tensorflow.contrib.layers.instance_norm', 'tf.contrib.layers.instance_norm', (['input'], {}), '(input)\n', (4104, 4111), True, 'import tensorflow as tf\n'), ((4169, 4244), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['input'], {'rate': 'rate', 'training': 'is_train', 'name': "('dropout' + id)"}), "(input, rate=rate, training=is_train, name='dropout' + id)\n", (4186, 4244), True, 'import tensorflow as tf\n'), ((4462, 4497), 'numpy.zeros', 'np.zeros', (['(h, w, 3)'], {'dtype': 'np.uint8'}), '((h, w, 3), dtype=np.uint8)\n', (4470, 4497), True, 'import numpy as np\n'), ((8743, 8945), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['input', 'channels'], {'kernel_size': 'size', 'strides': '[stride, stride]', 'padding': 'padding', 'kernel_initializer': 'init', 'name': "('conv' + id)", 'use_bias': 'use_bias', 'dilation_rate': '(dilation, dilation)'}), "(input, channels, kernel_size=size, strides=[stride, stride\n ], padding=padding, kernel_initializer=init, name='conv' + id, use_bias\n =use_bias, dilation_rate=(dilation, dilation))\n", (8759, 8945), True, 'import tensorflow as tf\n'), ((9283, 9337), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.02)'}), '(mean=0.0, stddev=0.02)\n', (9314, 9337), True, 'import tensorflow as tf\n'), ((9352, 9458), 'tensorflow.get_variable', 'tf.get_variable', (["('zero_conv_weights' + id)"], {'initializer': 'init', 'shape': '[size[0], size[1], in_ch, channels]'}), "('zero_conv_weights' + id, initializer=init, shape=[size[0],\n size[1], in_ch, channels])\n", (9367, 9458), True, 'import tensorflow as tf\n'), ((10780, 10924), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['input', 'filters'], {'strides': '[1, stride, stride, 1]', 'padding': 'padding', 'name': "('zero-conv_' + id)", 'dilations': '(1, dilation, dilation, 1)'}), "(input, filters, strides=[1, stride, stride, 1], padding=\n padding, name='zero-conv_' + id, dilations=(1, dilation, dilation, 1))\n", (10792, 10924), True, 'import tensorflow as tf\n'), ((11564, 11743), 'tensorflow.layers.conv2d_transpose', 'tf.layers.conv2d_transpose', (['input', 'channels'], {'kernel_size': 'size', 'strides': '[stride, stride]', 'padding': 'padding', 'kernel_initializer': 'init', 'name': "('tr_conv' + id)", 'use_bias': 'use_bias'}), "(input, channels, kernel_size=size, strides=[\n stride, stride], padding=padding, kernel_initializer=init, name=\n 'tr_conv' + id, use_bias=use_bias)\n", (11590, 11743), True, 'import tensorflow as tf\n'), ((2289, 2313), 'numpy.sum', 'np.sum', (['confusion_matrix'], {}), '(confusion_matrix)\n', (2295, 2313), True, 'import numpy as np\n'), ((2474, 2487), 'numpy.isnan', 'np.isnan', (['f1s'], {}), '(f1s)\n', (2482, 2487), True, 'import numpy as np\n'), ((2595, 2621), 'numpy.trace', 'np.trace', (['confusion_matrix'], {}), '(confusion_matrix)\n', (2603, 2621), True, 'import numpy as np\n'), ((2624, 2648), 'numpy.sum', 'np.sum', (['confusion_matrix'], {}), '(confusion_matrix)\n', (2630, 2648), True, 'import numpy as np\n'), ((7055, 7119), 'tensorflow.contrib.layers.variance_scaling_initializer', 'tf.contrib.layers.variance_scaling_initializer', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (7101, 7119), True, 'import tensorflow as tf\n'), ((7145, 7196), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (7176, 7196), True, 'import tensorflow as tf\n'), ((8630, 8704), 'tensorflow.pad', 'tf.pad', (['input', '[[0, 0], [pad_x, pad_x], [pad_y, pad_y], [0, 0]]', '"""REFLECT"""'], {}), "(input, [[0, 0], [pad_x, pad_x], [pad_y, pad_y], [0, 0]], 'REFLECT')\n", (8636, 8704), True, 'import tensorflow as tf\n'), ((9479, 9533), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['filters'], {'axis': '[0, 1, 2]', 'keepdims': '(True)'}), '(filters, axis=[0, 1, 2], keepdims=True)\n', (9493, 9533), True, 'import tensorflow as tf\n'), ((11063, 11086), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'bias'], {}), '(x, bias)\n', (11077, 11086), True, 'import tensorflow as tf\n'), ((11411, 11475), 'tensorflow.contrib.layers.variance_scaling_initializer', 'tf.contrib.layers.variance_scaling_initializer', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (11457, 11475), True, 'import tensorflow as tf\n'), ((11501, 11552), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': 'init_stddev'}), '(stddev=init_stddev)\n', (11532, 11552), True, 'import tensorflow as tf\n'), ((7240, 7265), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""mask"""'], {}), "('mask')\n", (7257, 7265), True, 'import tensorflow as tf\n'), ((7385, 7412), 'tensorflow.ones', 'tf.ones', ([], {'shape': '[1, h, w, 1]'}), '(shape=[1, h, w, 1])\n', (7392, 7412), True, 'import tensorflow as tf\n'), ((7832, 7871), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['update_mask', '(0.0)', '(1.0)'], {}), '(update_mask, 0.0, 1.0)\n', (7848, 7871), True, 'import tensorflow as tf\n'), ((7936, 7964), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""parconv"""'], {}), "('parconv')\n", (7953, 7964), True, 'import tensorflow as tf\n'), ((7982, 8142), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['input'], {'filters': 'channels', 'name': "('conv' + id)", 'kernel_size': 'size', 'kernel_initializer': 'init', 'strides': 'stride', 'padding': '"""SAME"""', 'use_bias': '(False)'}), "(input, filters=channels, name='conv' + id, kernel_size=\n size, kernel_initializer=init, strides=stride, padding='SAME', use_bias\n =False)\n", (7998, 8142), True, 'import tensorflow as tf\n'), ((9577, 9602), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""mask"""'], {}), "('mask')\n", (9594, 9602), True, 'import tensorflow as tf\n'), ((9722, 9749), 'tensorflow.ones', 'tf.ones', ([], {'shape': '[1, h, w, 1]'}), '(shape=[1, h, w, 1])\n', (9729, 9749), True, 'import tensorflow as tf\n'), ((10212, 10251), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['update_mask', '(0.0)', '(1.0)'], {}), '(update_mask, 0.0, 1.0)\n', (10228, 10251), True, 'import tensorflow as tf\n'), ((10316, 10344), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""parconv"""'], {}), "('parconv')\n", (10333, 10344), True, 'import tensorflow as tf\n'), ((10362, 10504), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['input', 'filters'], {'strides': '[1, stride, stride, 1]', 'padding': '"""SAME"""', 'name': "('zero-conv_' + id)", 'dilations': '(1, dilation, dilation, 1)'}), "(input, filters, strides=[1, stride, stride, 1], padding='SAME',\n name='zero-conv_' + id, dilations=(1, dilation, dilation, 1))\n", (10374, 10504), True, 'import tensorflow as tf\n'), ((8348, 8371), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'bias'], {}), '(x, bias)\n', (8362, 8371), True, 'import tensorflow as tf\n'), ((10712, 10735), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'bias'], {}), '(x, bias)\n', (10726, 10735), True, 'import tensorflow as tf\n'), ((11021, 11049), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (11044, 11049), True, 'import tensorflow as tf\n'), ((7607, 7635), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (7630, 7635), True, 'import tensorflow as tf\n'), ((9908, 9936), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (9931, 9936), True, 'import tensorflow as tf\n'), ((8298, 8326), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (8321, 8326), True, 'import tensorflow as tf\n'), ((10662, 10690), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (10685, 10690), True, 'import tensorflow as tf\n')] |
'''
(c) University of Liverpool 2020
All rights reserved.
@author: neilswainston
'''
# pylint: disable=invalid-name
# pylint: disable=no-member
from ast import literal_eval as make_tuple
import collections
from functools import partial
import json
import sys
from rdkit import Chem
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def get_data(filename, tol):
'''Get data.'''
df = pd.read_csv(filename)
df['m/z'] = df['m/z'].apply(_to_numpy, sep=',')
df['METFRAG_MZ'] = df['METFRAG_MZ'].apply(_to_numpy, sep=',')
df['METFRAG_BROKEN_BONDS'] = df['METFRAG_BROKEN_BONDS'].apply(_to_numpy_2d)
df['match_idxs'] = df.apply(partial(_match, tol=tol), axis=1)
df['bonds_broken'] = df.apply(_get_broken_bonds, axis=1)
return df
def get_bonds_freq(df):
'''Get multiple bonds frequency.'''
bonds_freq = pd.Series(
[item for sublist in df['bonds_broken']
for item in sublist]).value_counts(normalize=True, dropna=False)
bonds_freq_df = bonds_freq.reset_index()
bonds_freq_df['index'] = bonds_freq_df['index'].apply(_decode)
bonds_freq_df.set_index('index', inplace=True)
bonds_freq_df.index.name = 'bonds'
bonds_freq_df.columns = ['freq']
return bonds_freq_df
def get_bond_freq(bonds_freq_df):
'''Get individual bond frequency.'''
bond_freq = collections.defaultdict(int)
partial_bond_freq = partial(_get_bond_freq, bond_freq=bond_freq)
bonds_freq_df.apply(partial_bond_freq, axis=1)
data = []
for key, value in bond_freq.items():
data.append(list(key) + [value])
cols = ['atom1', 'atom2', 'order', 'aromatic', 'match', 'precursor',
'freq']
df = pd.DataFrame(data, columns=cols)
matched_freq = df.loc[df['match'], 'freq']
df.loc[df['match'], 'freq_matched'] = matched_freq / sum(matched_freq)
return df.sort_values('freq', ascending=False)
def plot(bond_freq_df, freq_col, match, out_filename):
'''Plot.'''
categories, labels, data = _get_plot_data(bond_freq_df, freq_col, match)
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects = []
for idx, vals in enumerate(data):
rects.append(ax.bar(x + width * idx, vals, width,
label=categories[idx]))
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Frequency')
ax.set_title('Frequencies of broken bonds of matching fragments')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
for rect in rects:
_autolabel(ax, rect)
fig.tight_layout()
plt.savefig(out_filename)
def _to_numpy(array_str, sep=','):
'''Convert array_str to numpy.'''
return np.fromstring(array_str[1:-1], sep=sep)
def _to_numpy_2d(array_str):
'''Convert array_str to numpy 2d array.'''
values = json.loads(array_str)
return np.array([tuple(val) for val in values])
def _match(row, tol):
'''Determine if masses match.'''
match_idxs = np.ones(row['m/z'].size, dtype=int) * -1
if row['METFRAG_MZ'].size:
abs_diff = np.abs(np.subtract.outer(
row['m/z'], row['METFRAG_MZ'])) < tol
abs_diff_match_idxs = np.where(np.any(abs_diff, axis=1))
abs_diff_idxs = np.argmax(abs_diff, axis=1)[abs_diff_match_idxs]
np.put(match_idxs, abs_diff_match_idxs, abs_diff_idxs, mode='raise')
return match_idxs
def _get_broken_bonds(row):
'''Get bonds broken.'''
broken_bonds = np.empty(row['match_idxs'].size, dtype=object)
match_idxs = np.where(row['match_idxs'] > -1)
bonds_idxs = row['match_idxs'][match_idxs]
vals = row['METFRAG_BROKEN_BONDS'][bonds_idxs]
np.put(broken_bonds, match_idxs, vals)
return broken_bonds
def _decode(value):
'''Decode.'''
if isinstance(value, tuple):
return tuple([_decode_val(val) for val in value])
return value
def _decode_val(encoded):
'''Decode value.'''
table = Chem.GetPeriodicTable()
atomic_number_1 = (encoded & 2**18 - 1) >> 11
atomic_number_2 = (encoded & 2**11 - 1) >> 4
order_ordinal = (encoded & 2**4 - 1) >> 1
aromatic = (encoded & 1) == 1
return table.GetElementSymbol(atomic_number_1), \
table.GetElementSymbol(atomic_number_2), \
order_ordinal + 1, \
aromatic
def _get_bond_freq(row, bond_freq):
'''Get individual bond frequency for given row.'''
if isinstance(row.name, float) and np.isnan(row.name):
# Special case: unmatched:
bond_freq[
(None, None, float('NaN'), False, False, False)] += row.freq
elif not row.name:
# Special case: precursor:
bond_freq[
(None, None, float('NaN'), False, True, True)] += row.freq
else:
for bond in row.name:
key = tuple(list(bond) + [True, False])
bond_freq[key] += row.freq / len(row.name)
def _get_plot_data(df, freq_col, match, min_freq=0.001):
'''Get plot data.'''
data_df = df[df[freq_col] > min_freq].copy()
if match:
data_df = data_df[data_df['match']]
data_df.loc[:, 'bond_label'] = data_df.apply(_get_bond_label, axis=1)
categories = data_df['aromatic'].unique()
labels = data_df['bond_label'].unique()
data = np.zeros((len(categories), len(labels)))
data_df.apply(partial(_add_plot_data,
freq_col=freq_col,
categories=categories,
labels=labels,
data=data), axis=1)
return ['Aromatic' if cat else 'Non-aromatic' for cat in categories], \
labels, data
def _get_bond_label(row):
'''Get bond label.'''
bond_chrs = {1.0: '-',
2.0: '=',
3.0: '#',
4.0: '$'}
if not row['match']:
return 'UNMATCH'
try:
return row['atom1'] + bond_chrs[row['order']] + row['atom2']
except KeyError:
return 'PREC'
def _add_plot_data(row, freq_col, categories, labels, data):
'''Add plot data.'''
category = np.argwhere(categories == row['aromatic'])[0]
label = np.argwhere(labels == row['bond_label'])[0]
data[category, label] = row[freq_col]
def _autolabel(ax, rects):
'''Attach a text label above each bar in *rects*, displaying its height.'''
for rect in rects:
height = rect.get_height()
ax.annotate('%.3f' % height if height else '',
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords='offset points',
ha='center', va='bottom')
def _make_tuple(val):
'''Make tuple.'''
if isinstance(val, float) and np.isnan(val):
return val
return make_tuple(val)
def main(args):
'''main method.'''
in_filename = args[0]
if in_filename.startswith('bonds_freq'):
bonds_freq_df = pd.read_csv(in_filename)
bonds_freq_df['bonds'] = bonds_freq_df['bonds'].apply(_make_tuple)
bonds_freq_df.set_index('bonds', inplace=True)
else:
df = get_data(in_filename, float(args[1]))
bonds_freq_df = get_bonds_freq(df)
bonds_freq_df.to_csv('bonds_freq.csv')
bond_freq_df = get_bond_freq(bonds_freq_df)
bond_freq_df.to_csv('bond_freq.csv', index=False)
plot(bond_freq_df, args[2], args[3] == 'True', in_filename + '.png')
if __name__ == '__main__':
main(sys.argv[1:])
| [
"pandas.read_csv",
"numpy.where",
"numpy.empty",
"pandas.DataFrame",
"numpy.fromstring",
"json.loads",
"matplotlib.pyplot.savefig",
"rdkit.Chem.GetPeriodicTable",
"numpy.ones",
"numpy.argmax",
"numpy.any",
"ast.literal_eval",
"numpy.isnan",
"pandas.Series",
"numpy.subtract.outer",
"num... | [((417, 438), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (428, 438), True, 'import pandas as pd\n'), ((1355, 1383), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (1378, 1383), False, 'import collections\n'), ((1409, 1453), 'functools.partial', 'partial', (['_get_bond_freq'], {'bond_freq': 'bond_freq'}), '(_get_bond_freq, bond_freq=bond_freq)\n', (1416, 1453), False, 'from functools import partial\n'), ((1707, 1739), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (1719, 1739), True, 'import pandas as pd\n'), ((2177, 2191), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2189, 2191), True, 'import matplotlib.pyplot as plt\n'), ((2682, 2707), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_filename'], {}), '(out_filename)\n', (2693, 2707), True, 'import matplotlib.pyplot as plt\n'), ((2794, 2833), 'numpy.fromstring', 'np.fromstring', (['array_str[1:-1]'], {'sep': 'sep'}), '(array_str[1:-1], sep=sep)\n', (2807, 2833), True, 'import numpy as np\n'), ((2925, 2946), 'json.loads', 'json.loads', (['array_str'], {}), '(array_str)\n', (2935, 2946), False, 'import json\n'), ((3560, 3606), 'numpy.empty', 'np.empty', (["row['match_idxs'].size"], {'dtype': 'object'}), "(row['match_idxs'].size, dtype=object)\n", (3568, 3606), True, 'import numpy as np\n'), ((3624, 3656), 'numpy.where', 'np.where', (["(row['match_idxs'] > -1)"], {}), "(row['match_idxs'] > -1)\n", (3632, 3656), True, 'import numpy as np\n'), ((3759, 3797), 'numpy.put', 'np.put', (['broken_bonds', 'match_idxs', 'vals'], {}), '(broken_bonds, match_idxs, vals)\n', (3765, 3797), True, 'import numpy as np\n'), ((4035, 4058), 'rdkit.Chem.GetPeriodicTable', 'Chem.GetPeriodicTable', ([], {}), '()\n', (4056, 4058), False, 'from rdkit import Chem\n'), ((6854, 6869), 'ast.literal_eval', 'make_tuple', (['val'], {}), '(val)\n', (6864, 6869), True, 'from ast import literal_eval as make_tuple\n'), ((670, 694), 'functools.partial', 'partial', (['_match'], {'tol': 'tol'}), '(_match, tol=tol)\n', (677, 694), False, 'from functools import partial\n'), ((3077, 3112), 'numpy.ones', 'np.ones', (["row['m/z'].size"], {'dtype': 'int'}), "(row['m/z'].size, dtype=int)\n", (3084, 3112), True, 'import numpy as np\n'), ((3391, 3459), 'numpy.put', 'np.put', (['match_idxs', 'abs_diff_match_idxs', 'abs_diff_idxs'], {'mode': '"""raise"""'}), "(match_idxs, abs_diff_match_idxs, abs_diff_idxs, mode='raise')\n", (3397, 3459), True, 'import numpy as np\n'), ((4523, 4541), 'numpy.isnan', 'np.isnan', (['row.name'], {}), '(row.name)\n', (4531, 4541), True, 'import numpy as np\n'), ((5394, 5490), 'functools.partial', 'partial', (['_add_plot_data'], {'freq_col': 'freq_col', 'categories': 'categories', 'labels': 'labels', 'data': 'data'}), '(_add_plot_data, freq_col=freq_col, categories=categories, labels=\n labels, data=data)\n', (5401, 5490), False, 'from functools import partial\n'), ((6135, 6177), 'numpy.argwhere', 'np.argwhere', (["(categories == row['aromatic'])"], {}), "(categories == row['aromatic'])\n", (6146, 6177), True, 'import numpy as np\n'), ((6193, 6233), 'numpy.argwhere', 'np.argwhere', (["(labels == row['bond_label'])"], {}), "(labels == row['bond_label'])\n", (6204, 6233), True, 'import numpy as np\n'), ((6808, 6821), 'numpy.isnan', 'np.isnan', (['val'], {}), '(val)\n', (6816, 6821), True, 'import numpy as np\n'), ((7007, 7031), 'pandas.read_csv', 'pd.read_csv', (['in_filename'], {}), '(in_filename)\n', (7018, 7031), True, 'import pandas as pd\n'), ((863, 934), 'pandas.Series', 'pd.Series', (["[item for sublist in df['bonds_broken'] for item in sublist]"], {}), "([item for sublist in df['bonds_broken'] for item in sublist])\n", (872, 934), True, 'import pandas as pd\n'), ((3284, 3308), 'numpy.any', 'np.any', (['abs_diff'], {'axis': '(1)'}), '(abs_diff, axis=1)\n', (3290, 3308), True, 'import numpy as np\n'), ((3334, 3361), 'numpy.argmax', 'np.argmax', (['abs_diff'], {'axis': '(1)'}), '(abs_diff, axis=1)\n', (3343, 3361), True, 'import numpy as np\n'), ((3176, 3224), 'numpy.subtract.outer', 'np.subtract.outer', (["row['m/z']", "row['METFRAG_MZ']"], {}), "(row['m/z'], row['METFRAG_MZ'])\n", (3193, 3224), True, 'import numpy as np\n')] |
import numpy as np
from CNN.py import ConvolutionNN
def TestForwardPass():
np.random.seed(0)
data_size = np.random.randint(10, 50)
for i in range(2,6):
f_h, f_w = i, i # Filter height and width (m2, n2)
padding, stride = i, i
print('For filter with size : ({}, {})'.format(f_h, f_w), ' Padding :',padding, ' Stride :', stride, '\n')
# input image dimensions
for j in range(5, 12):
val = np.random.randint(4, j)
n_h_in, n_w_in = f_h + val, f_w + val # m1, n1
n_c_in = np.random.randint(1, 10)
n_c = n_c_in + np.random.randint(1, 10) # Number of kernels
# Initializing input dataset
A_in = np.random.randn(data_size, n_h_in, n_w_in, n_c_in)
# Initializing weights
W = np.random.randn(f_h, f_w, n_c_in, n_c)
b = np.random.randn(1, 1, 1, n_c)
Z, A1, cache_conv1 = CNN_node_forward(A_in, W, b, padding, stride, relu)
pool_size, stride_pool = 2, 2
P1, cache_pool1 = MAX_pool_forward(A1, pool_size, stride_pool)
print('Input Image Dimesions :',A_in.shape,' Convoluted Image Dimensions :',A1.shape,' Image After Pooling :',P1.shape)
print()
TestForwardPass() | [
"numpy.random.randint",
"numpy.random.randn",
"numpy.random.seed"
] | [((81, 98), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (95, 98), True, 'import numpy as np\n'), ((115, 140), 'numpy.random.randint', 'np.random.randint', (['(10)', '(50)'], {}), '(10, 50)\n', (132, 140), True, 'import numpy as np\n'), ((460, 483), 'numpy.random.randint', 'np.random.randint', (['(4)', 'j'], {}), '(4, j)\n', (477, 483), True, 'import numpy as np\n'), ((567, 591), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (584, 591), True, 'import numpy as np\n'), ((726, 776), 'numpy.random.randn', 'np.random.randn', (['data_size', 'n_h_in', 'n_w_in', 'n_c_in'], {}), '(data_size, n_h_in, n_w_in, n_c_in)\n', (741, 776), True, 'import numpy as np\n'), ((829, 867), 'numpy.random.randn', 'np.random.randn', (['f_h', 'f_w', 'n_c_in', 'n_c'], {}), '(f_h, f_w, n_c_in, n_c)\n', (844, 867), True, 'import numpy as np\n'), ((884, 913), 'numpy.random.randn', 'np.random.randn', (['(1)', '(1)', '(1)', 'n_c'], {}), '(1, 1, 1, n_c)\n', (899, 913), True, 'import numpy as np\n'), ((619, 643), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (636, 643), True, 'import numpy as np\n')] |
# encoding: utf-8
# Author: <NAME>
# License: MIT
from numba import njit
import numpy as np
from .dataset import get_dataset
from polylearn.kernels import anova_kernel, homogeneous_kernel
@njit
def _all_subsets_fast(output, X, P):
n_samples_x = X.get_n_samples()
output[:, :] = 1.0
for i1 in range(n_samples_x):
n_nz_x, indices_x, data_x = X.get_row(i1)
for jj in range(n_nz_x):
j = indices_x[jj]
n_nz_p, indices_p, data_p = P.get_column(j)
for ii2 in range(n_nz_p):
i2 = indices_p[ii2]
output[i1, i2] *= (1 + data_x[jj]*data_p[ii2])
def all_subsets_kernel(X, P):
output = np.ones((X.shape[0], P.shape[0]))
_all_subsets_fast(output,
get_dataset(X, order='c'),
get_dataset(P, order='fortran'))
return output
def _poly_predict(X, P, lams, kernel, degree=2):
if kernel == "anova":
K = anova_kernel(X, P, degree)
elif kernel == "poly":
K = homogeneous_kernel(X, P, degree)
elif kernel == "all-subsets":
K = all_subsets_kernel(X, P)
else:
raise ValueError(("Unsuppported kernel: {}. Use one "
"of {{'anova'|'poly'|'all-subsets'}}").format(kernel))
return np.dot(K, lams)
| [
"polylearn.kernels.anova_kernel",
"numpy.dot",
"polylearn.kernels.homogeneous_kernel",
"numpy.ones"
] | [((678, 711), 'numpy.ones', 'np.ones', (['(X.shape[0], P.shape[0])'], {}), '((X.shape[0], P.shape[0]))\n', (685, 711), True, 'import numpy as np\n'), ((1286, 1301), 'numpy.dot', 'np.dot', (['K', 'lams'], {}), '(K, lams)\n', (1292, 1301), True, 'import numpy as np\n'), ((953, 979), 'polylearn.kernels.anova_kernel', 'anova_kernel', (['X', 'P', 'degree'], {}), '(X, P, degree)\n', (965, 979), False, 'from polylearn.kernels import anova_kernel, homogeneous_kernel\n'), ((1019, 1051), 'polylearn.kernels.homogeneous_kernel', 'homogeneous_kernel', (['X', 'P', 'degree'], {}), '(X, P, degree)\n', (1037, 1051), False, 'from polylearn.kernels import anova_kernel, homogeneous_kernel\n')] |
#!/usr/bin/env python
import math,operator,random,sys
import numpy as np
#######
#Probability Tools for DNA sequence analysis
#######
def snr(observed,expected):
return observed/expected
def zscore(observed,expected):
return (observed-expected)/math.sqrt(expected)
def which_bin(bins, x, safe=0):
"""
# if we're interested in binning x with boundaries
# 0, 5, 10, 15
# then it will return which boundary it belongs in.
# if x<0: -1
# if 0<=x<5: 0
# if 5<=x<10: 1
# if 10<=x<15: 2
# if x>=15: 3
"""
if x<bins[0]: return -1
for i in range(1,len(bins)):
if x<bins[i]: return i-1
if safe and x==bins[-1]: return len(bins)
return len(i)+1
def cumulative_sum(quality):
if not quality: return quality
sum_q = quality[:]
for i in range(1,len(quality)):
sum_q[i] = sum_q[i-1]+quality[i]
return sum_q
def frequency_dic(seq):
"""Generates dictionary of k,v='nucleotide':'frequency' from seq"""
dic = {}
bases = ['A','C','G','T']
seq=seq.upper()
for b in bases:
dic[b]=seq.count(b)/float(len(seq))
return dic
def pick_one(dic):
# {'A': .18, 'C': .32, 'G': .32, 'T': .18}
# will generate A with probability .18 and so on
items = dic.items()
cums = cumulative_sum(cget(items,1))
if 1: #debug:
#print cums
x = random.uniform(0,cums[-1])
bin = which_bin(cums, x, safe=1)
#print "%s is in bin %s and char %s. items=%s"%(
# x,bin,items[bin][0],items)
return items[bin+1][0]
else:
return items[which_bin(cums, random.uniform(0,cums[-1]), safe=1)][0]
def pick_many(dic, n):
# {'A': .18, 'C': .32, 'G': .32, 'T': .18}
# will generate A with probability .18 and so on
items = dic.items()
cums = cumulative_sum(cget(items,1))
choices = []
for i in range(0,n):
x = random.uniform(0,cums[-1])
bin = which_bin(cums, x, safe=1)
choices.append(items[bin+1][0])
return choices
def gaussian(x,mu,sigma):
"""
Evaluate N(mu,sigma) at x.
where N(mu,sigma) is a gaussian of mean mu and stdev sigma
"""
return ( (1.0/math.sqrt(2*math.pi*sigma)) * (math.e**(-((x-mu)**2)/(2*sigma**2))))
def make_gaussian(mu,sigma):
"""
usage:
N2_3 = make_gaussian(2,3)
N2_3(4) -> gaussianN(2,3) evaluated at 4
"""
return lambda x,mu=mu,sigma=sigma: ( (1.0/math.sqrt(2*math.pi*sigma)) * (math.e**(-((x-mu)**2)/(2*sigma**2))))
def make_adder(n):
"""
usage:
Add2=make_adder(2)
Add2(3) -> 5
"""
return lambda x,n=n: x+n
#############
#Math Primitives
#############
loge_2 = math.log(2)
def avg(l,precise=0):
if not l: return 0
if precise:
return reduce(operator.add,l,0)/float(len(l))
else:
return reduce(operator.add,l,0)/len(l)
def movavg(s, n):
''' returns an n period moving average for the time series s
s is a list ordered from oldest (index 0) to most recent (index -1)
n is an integer
returns a numeric array of the moving average
'''
s = np.array(s)
c = np.cumsum(s)
return (c[n-1:] - c[:-n+1]) / float(n)
def median(l):
if not l: return None
l = my_sort(l)
if len(l)%2: return my_sort(l)[len(l)/2]
else: return (l[len(l)/2]+l[len(l)/2-1])/2.0
def stdev(l, failfast=1):
return math.sqrt(variance(l,failfast=failfast))
def variance(l,failfast=1):
if (not l) or len(l)==1:
if failfast: raise "tools.variance: Not enough samples. Need >= 2, got %s"%len(l)
else: return 0#'N/A'
m = avg(l,1)
s = 0
for i in l:
s = s + (i-m)*(i-m)
return s / (len(l)-1)
def log2(x):
#converting bases: log_a(b) = log_c(b)/log_c(a)
#i.e. log_2(x) = log_e(2)/log_e(x) = log_10(2)/log_10(x)
return math.log(x)/float(loge_2)
def log_k(x,k):
return math.log(x)/math.log(k)
def prob2score(prob):
#1/100 -> 20
try:
return -10*float(math.log10(float(prob)))
except:
return -1
def p2bits(p):
"""Takes p-value and returns negative log2"""
return -log2(p)
def factorial(n):
result = 1
for i in range(n,0,-1):
#print i
result = result * i
return result
###########
#Poisson
###########
def poisson_expected(rate):
for x in range(1,50,1):
p = poisson(rate,x)
print(f"{x}\t{p}\t{12000000*p}")
def poisson(rate, x):
"""Returns the probability of observing a count of x"""
return math.exp(-rate)*(rate**x)/factorial(x)
######################
#Binomial Distribution
#######################
def binomial_likelihood_ratio(ps,k,n):
# p[0] is the null hypothesis
# p[1] is the hypothesis being tested
assert(len(ps)==2)
likelihoods = []
for p in ps:
likelihoods.append(binomial(p,k,n))
#i = argmax(likelihoods)
#p = likelihoods[i] / sum(likelihoods)
#return p
if likelihoods[0]: return np.log(likelihoods[1]) / likelihoods[0]
else:
print(f"Warning: likelihood ratio set to sys.maxint. p(H1)={p[1]}, p(H0)=0")
return sys.maxint
def binomial_log_likelihood_ratio(ps,k,n):
return log_binomial(ps[1],k,n) - log_binomial(ps[0],k,n)
def log_binomial(p,k,n):
# the log probability of seeing exactly k successes in n trials
# given the probability of success is p
return log_n_choose_k(n,k)+math.log(p)*k+math.log(1-p)*(n-k)
def binomial(p,k,n):
# probability of seeing exactly k successes in n trials, given
# the probability of success is p
#return n_choose_k(n,k)*(p**k)*((1-p)**(n-k))
return n_choose_k(n,k)*(p**k)*((1-p)**(n-k))
def cumBinomial(p,k,n):
#Returns the cumulative probability from the binomaial distribution
Pval = 0.0
for j in range(0,k+1):
Pval+=binomial(p,j,n)
return Pval
def n_choose_k(n,k):
# (n k) = n! / (k! (n-k)!)
#
# n*(n-1)*(n-2)*....*(n-k+1)
# = --------------------------
# k*(k-1)*...*1
assert(k<=n)
k = min(k, n-k)
nominator = range(n,n-k,-1)
denominator = range(k,0,-1)
result = 1.0
for nom, den in map(None, nominator, denominator):
result = (result * nom) / den
#result = result*nom
#print result
#result = result/den
#print result
return result
def log_n_choose_k(n,k):
# (n k) = n! / (k! (n-k)!)
#
# n*(n-1)*(n-2)*....*(n-k+1)
# = --------------------------
# k*(k-1)*...*1
assert(k<=n)
k = min(k, n-k)
nominator = range(n,n-k,-1)
denominator = range(k,0,-1)
result = 0
for nom, den in map(None, nominator, denominator):
result = (result + math.log(nom)) - math.log(den)
return result
#################
#Dictionary Tools
#################
def cget(diclist, key, strict=1):
# cross_get was: gather(diclist,key)
# gathers the same key from a list of dictionaries
# can also be used in lists
# input: a list of dictionaries all of which contains key
# output: a list of elements d[key] for each d in diclist
if strict:
# return map(lambda d,key=key: d[key], diclist)
result = [None]*len(diclist)
for i in range(0,len(diclist)):
result[i] = diclist[i][key]
return result
else:
results = []
for dic in diclist:
if dic and generic_has_key(dic,key):
results.append(dic[key])
return results
| [
"random.uniform",
"numpy.log",
"math.sqrt",
"math.log",
"numpy.array",
"numpy.cumsum",
"math.exp"
] | [((2661, 2672), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (2669, 2672), False, 'import math, operator, random, sys\n'), ((3102, 3113), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (3110, 3113), True, 'import numpy as np\n'), ((3122, 3134), 'numpy.cumsum', 'np.cumsum', (['s'], {}), '(s)\n', (3131, 3134), True, 'import numpy as np\n'), ((255, 274), 'math.sqrt', 'math.sqrt', (['expected'], {}), '(expected)\n', (264, 274), False, 'import math, operator, random, sys\n'), ((1365, 1392), 'random.uniform', 'random.uniform', (['(0)', 'cums[-1]'], {}), '(0, cums[-1])\n', (1379, 1392), False, 'import math, operator, random, sys\n'), ((1891, 1918), 'random.uniform', 'random.uniform', (['(0)', 'cums[-1]'], {}), '(0, cums[-1])\n', (1905, 1918), False, 'import math, operator, random, sys\n'), ((3826, 3837), 'math.log', 'math.log', (['x'], {}), '(x)\n', (3834, 3837), False, 'import math, operator, random, sys\n'), ((3880, 3891), 'math.log', 'math.log', (['x'], {}), '(x)\n', (3888, 3891), False, 'import math, operator, random, sys\n'), ((3892, 3903), 'math.log', 'math.log', (['k'], {}), '(k)\n', (3900, 3903), False, 'import math, operator, random, sys\n'), ((2174, 2204), 'math.sqrt', 'math.sqrt', (['(2 * math.pi * sigma)'], {}), '(2 * math.pi * sigma)\n', (2183, 2204), False, 'import math, operator, random, sys\n'), ((4497, 4512), 'math.exp', 'math.exp', (['(-rate)'], {}), '(-rate)\n', (4505, 4512), False, 'import math, operator, random, sys\n'), ((4943, 4965), 'numpy.log', 'np.log', (['likelihoods[1]'], {}), '(likelihoods[1])\n', (4949, 4965), True, 'import numpy as np\n'), ((5393, 5408), 'math.log', 'math.log', (['(1 - p)'], {}), '(1 - p)\n', (5401, 5408), False, 'import math, operator, random, sys\n'), ((6729, 6742), 'math.log', 'math.log', (['den'], {}), '(den)\n', (6737, 6742), False, 'import math, operator, random, sys\n'), ((2421, 2451), 'math.sqrt', 'math.sqrt', (['(2 * math.pi * sigma)'], {}), '(2 * math.pi * sigma)\n', (2430, 2451), False, 'import math, operator, random, sys\n'), ((5379, 5390), 'math.log', 'math.log', (['p'], {}), '(p)\n', (5387, 5390), False, 'import math, operator, random, sys\n'), ((6712, 6725), 'math.log', 'math.log', (['nom'], {}), '(nom)\n', (6720, 6725), False, 'import math, operator, random, sys\n'), ((1608, 1635), 'random.uniform', 'random.uniform', (['(0)', 'cums[-1]'], {}), '(0, cums[-1])\n', (1622, 1635), False, 'import math, operator, random, sys\n')] |
"""
Created on Friday 25 August 2017
Last update: Tuesday 26 December 2017
@author: <NAME>
<EMAIL>
Some functions to illustrate the convergence of the optimization algorithms
for quadratic systems
"""
import sys
sys.path.append('helpers/')
from colors import colors_list
import matplotlib.pyplot as plt
import numpy as np
#import seaborn as sns
#sns.set_style('white')
def gd_error_decomposition(eigenvalues=[0.1, 1.5, 1.6, 1.8, 2],
x0=np.ones((5, 1)), n_steps=50, t='optimal', ax=None,
cumulative=True):
"""
#FIX: docstring
short description
Inputs:
-
Output:
-
"""
if t=='optimal':
t = 2 / (min(eigenvalues) + max(eigenvalues))
n = len(eigenvalues)
ev = np.reshape(eigenvalues, (-1, 1)) # vector format
steps = np.arange(0, n_steps + 1)
error_per_comp = (1 - t * ev) ** (2 * steps.reshape((1, -1)))
error_per_comp *= ev * x0**2
if ax is not None:
colors = iter(colors_list)
prev = np.zeros_like(steps) + 1e-10
current = prev + 0
for i, val in enumerate((eigenvalues)):
label=r'$\lambda_{}=${}'.format(i+1, val)
if cumulative:
current += error_per_comp[i,:]
ax.fill_between(steps+1, prev, current,
color=next(colors),
label=label)
prev[:] = current
else:
ax.plot(steps+1, error_per_comp[i,:],color=next(colors),
label=label, lw=2)
ax.legend(loc=0)
if cumulative:
ax.set_ylabel(r'$f(\mathbf{x}^{(k)})-f(\mathbf{x}^\star)$')
#ax.set_ylim([1e-10, 5])
ax.set_ylim([1e-10, error_per_comp[:,0].sum()])
else:
ax.set_ylabel(r'$(1-t\lambda_i)^{2k}\lambda_i[\mathbf{u}_i^\intercal(\mathbf{x}^{(k)} - \mathbf{x}^\star)]^2$')
ax.set_xlabel(r'$k+1$')
ax.set_ylim([1e-10, error_per_comp[:,0].sum()])
return error_per_comp
def gd_convergence_and_bound(eigenvalues=[0.1, 1.5, 1.6, 1.8, 2], n_steps=50,
t='optimal', x0=np.ones((5, 1))):
# condition number
#FIX: docstring
kappa = max(eigenvalues) / min(eigenvalues)
c = 1 - 1/kappa
error = gd_error_decomposition(eigenvalues=eigenvalues, n_steps=n_steps,
t=t, x0=x0).sum(0)
bound = np.sum([xi**2 * e for xi, e in zip(x0, eigenvalues)]) * c ** np.arange(0, n_steps+1)
return error, bound
if __name__ == '__main__':
# choose format of the plots
format = 'png'
# convergence plot
n_steps = 1000 - 1
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(12, 6))
E = gd_error_decomposition(ax=ax0, n_steps=n_steps, cumulative=True)
ax0.semilogx()
ax0.set_title('Error of gradient descent\n(cumulative eigencomponents)')
gd_error_decomposition(ax=ax1, n_steps=n_steps, cumulative=False)
ax1.set_title('Error of gradient descent\n(individual eigencomponents)')
ax1.loglog()
fig.tight_layout()
fig.savefig('Chapters/01.Quadratic/Figures/convergence_decomposition.{}'.format(format))
# error vs bound
kappas = [1.1, 2.5, 5, 50, 100]
n_steps = 10000
colors = iter(colors_list)
fig, ax = plt.subplots()
steps = np.arange(0, n_steps+1)+1
for kappa in kappas:
color = next(colors)
error, bound = gd_convergence_and_bound(eigenvalues=[1, kappa],
n_steps=n_steps, x0=np.ones((2, 1)))
ax.plot(steps, error, color=color, ls='-', label=r'$\kappa$={}'.format(kappa), lw=2)
ax.plot(steps, bound, color=color, ls='--', lw=2)
ax.set_ylim([1e-6, 200])
ax.loglog()
ax.set_title('Convergence (-) and bound (--) of GD\nfor different condition numbers')
ax.legend(loc=0)
ax.set_ylabel(r'$f(\mathbf{x}^{(k)})-f(\mathbf{x}^\star)$')
ax.set_xlabel(r'$k+1$')
fig.tight_layout()
fig.savefig('Chapters/01.Quadratic/Figures/convergence_bound.{}'.format(format))
plt.close('all')
| [
"numpy.reshape",
"numpy.ones",
"numpy.zeros_like",
"matplotlib.pyplot.close",
"sys.path.append",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((215, 242), 'sys.path.append', 'sys.path.append', (['"""helpers/"""'], {}), "('helpers/')\n", (230, 242), False, 'import sys\n'), ((460, 475), 'numpy.ones', 'np.ones', (['(5, 1)'], {}), '((5, 1))\n', (467, 475), True, 'import numpy as np\n'), ((762, 794), 'numpy.reshape', 'np.reshape', (['eigenvalues', '(-1, 1)'], {}), '(eigenvalues, (-1, 1))\n', (772, 794), True, 'import numpy as np\n'), ((824, 849), 'numpy.arange', 'np.arange', (['(0)', '(n_steps + 1)'], {}), '(0, n_steps + 1)\n', (833, 849), True, 'import numpy as np\n'), ((2151, 2166), 'numpy.ones', 'np.ones', (['(5, 1)'], {}), '((5, 1))\n', (2158, 2166), True, 'import numpy as np\n'), ((2656, 2694), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'figsize': '(12, 6)'}), '(ncols=2, figsize=(12, 6))\n', (2668, 2694), True, 'import matplotlib.pyplot as plt\n'), ((3270, 3284), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3282, 3284), True, 'import matplotlib.pyplot as plt\n'), ((4019, 4035), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4028, 4035), True, 'import matplotlib.pyplot as plt\n'), ((3297, 3322), 'numpy.arange', 'np.arange', (['(0)', '(n_steps + 1)'], {}), '(0, n_steps + 1)\n', (3306, 3322), True, 'import numpy as np\n'), ((1022, 1042), 'numpy.zeros_like', 'np.zeros_like', (['steps'], {}), '(steps)\n', (1035, 1042), True, 'import numpy as np\n'), ((2457, 2482), 'numpy.arange', 'np.arange', (['(0)', '(n_steps + 1)'], {}), '(0, n_steps + 1)\n', (2466, 2482), True, 'import numpy as np\n'), ((3489, 3504), 'numpy.ones', 'np.ones', (['(2, 1)'], {}), '((2, 1))\n', (3496, 3504), True, 'import numpy as np\n')] |
from typing import Tuple
import numpy as np
import torch
import torch.nn as nn
from rlcycle.common.abstract.action_selector import ActionSelector
from rlcycle.common.utils.common_utils import np2tensor
class SACActionSelector(ActionSelector):
"""Action selector for (vanilla) DDPG policy
Attributes:
action_dim (int): size of action space dimension
action_min (np.ndarray): lower bound for continuous actions
action_max (np.ndarray): upper bound for continuous actions
"""
def __init__(self, action_dim: int, action_range: list, use_cuda: bool):
ActionSelector.__init__(self, use_cuda)
self.action_dim = action_dim
self.action_min = np.array(action_range[0])
self.action_max = np.array(action_range[1])
def __call__(
self, policy: nn.Module, state: np.ndarray
) -> Tuple[torch.Tensor, ...]:
"""Generate action via policy"""
if state.ndim == 1:
state = state.reshape(1, -1)
mu, sigma, z, log_pi = policy.sample(np2tensor(state, self.use_cuda))
action = torch.tanh(z)
action_np = action.cpu().detach().view(-1).numpy()
return action_np
def rescale_action(self, action: np.ndarray) -> np.ndarray:
"""Rescale actions to fit continuous action spaces"""
action_rescaled = (
action * (self.action_max - self.action_min) / 2.0
+ (self.action_max + self.action_min) / 2.0
)
return action_rescaled
| [
"rlcycle.common.abstract.action_selector.ActionSelector.__init__",
"numpy.array",
"torch.tanh",
"rlcycle.common.utils.common_utils.np2tensor"
] | [((601, 640), 'rlcycle.common.abstract.action_selector.ActionSelector.__init__', 'ActionSelector.__init__', (['self', 'use_cuda'], {}), '(self, use_cuda)\n', (624, 640), False, 'from rlcycle.common.abstract.action_selector import ActionSelector\n'), ((704, 729), 'numpy.array', 'np.array', (['action_range[0]'], {}), '(action_range[0])\n', (712, 729), True, 'import numpy as np\n'), ((756, 781), 'numpy.array', 'np.array', (['action_range[1]'], {}), '(action_range[1])\n', (764, 781), True, 'import numpy as np\n'), ((1092, 1105), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (1102, 1105), False, 'import torch\n'), ((1042, 1073), 'rlcycle.common.utils.common_utils.np2tensor', 'np2tensor', (['state', 'self.use_cuda'], {}), '(state, self.use_cuda)\n', (1051, 1073), False, 'from rlcycle.common.utils.common_utils import np2tensor\n')] |
from matplotlib import style
from matplotlib import ticker as mticker
from matplotlib.finance import candlestick_ohlc
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import PCA
import datetime as dt
import matplotlib.dates as dates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def m_hist2d(x, y, bin, norm):
plt.figure(1)
# the histogram of the data
plt.hist2d(x, y, bin, normed=norm)
plt.xlabel('Events T')
plt.ylabel('Events V')
plt.title('Events Histogram')
plt.grid(True)
plt.colorbar()
plt.show()
def m_hist(x, bin, norm, c):
plt.figure(1)
# the histogram of the data
n, bins, patches = plt.hist(x, bin, normed=norm, facecolor=c)
plt.xlabel('Events')
plt.ylabel('Probability')
plt.title('Events Histogram')
plt.grid(True)
plt.show()
def m_plot2d(X, Y, factor):
style.use('ggplot')
Y = Y * factor
plt.figure(1)
plt.plot(X, Y)
plt.tight_layout()
plt.legend()
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
return plt
def m_scatter1(X, Y, factor):
style.use('ggplot')
Y = Y * factor
plt.figure(1)
plt.scatter(X, Y)
plt.tight_layout()
plt.legend()
plt.show()
return plt
def m_scatter2d(X1, Y1):
plt.figure(1)
plt.scatter(X1, Y1, label="XY")
plt.tight_layout()
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Scatter Plot 2D")
plt.legend()
plt.grid(True)
plt.show()
return plt
def m_scatter3(X1, Y1, Z1):
plt.figure(1)
plt.scatter(X1, Y1, Z1)
plt.tight_layout()
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Scatter Plot 3")
plt.legend()
plt.grid(True)
plt.show()
return plt
def m_scatter2v(X1, Y1, X2, Y2):
plt.figure(1)
plt.scatter(X1, Y1, X2, Y2)
plt.tight_layout()
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Scatter Plot 2 vectors")
plt.legend()
plt.grid(True)
plt.show()
return plt
def m_plot(X):
A = X
style.use('ggplot')
plt.plot(X)
#ax.xaxis.set_minor_formatter(dates.AutoDateFormatter(dates.AutoDateLocator()))
#ax.xaxis.grid(True, which="minor")
#ax.yaxis.grid()
#ax.xaxis.set_major_formatter(dates.DateFormatter('%b%Y'))
def m_pca(df):
pca = PCA(n_components=2)
data = df[["Close","Open"]]
pca.fit(data.dropna())
print(pca.explained_variance_ratio_)
return pca
def m_subplot(row, col):
ax = list()
fig, ax = plt.subplots(row, col, sharex=True, sharey=True)
return ax
def m_lag3d(df, lag):
X = df[0 :-2 * lag]
Y = df[1*lag:-1 * lag]
Z = df[2*lag: ]
m_scatter3d(X, Y, Z)
def m_vector(df, lag = 1):
X = df[0 :-1*lag]
Y = df[lag : ]
U = X[1:]
V = Y[1:]
return X[:-1], Y[:-1], U, V
def m_quiver(df, lag = 1):
X, Y, U, V = m_vector(df, lag)
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.quiver(X, Y, U, V, color="blue", linewidth=0.1)
plt.axis('equal')
plt.grid()
plt.title('Quive Plot, Dynamic Colours')
plt.show() # display the plot
def m_arrow(df, lag = 1):
X = df[0 :-1*lag]
Y = df[lag : ]
draw_path(X.values, Y.values)
def distance(X, Y):
return np.sum((X[1:] - X[:-1]) ** 2 + (Y[1:] - Y[:-1]) ** 2) ** .5
def draw_path(X, Y):
HEAD_WIDTH = 0.5
HEAD_LEN = 0.5
fig = plt.figure()
axes = fig.add_subplot(111)
x = X
y = Y
axes.plot(x, y)
theta = np.arctan2(y[1:] - y[:-1], x[1:] - x[:-1])
dist = distance(X, Y) - HEAD_LEN
x = x[:-1]
y = y[:-1]
ax = x + dist * 0.01 * np.sin(theta)
ay = y + dist * 0.01 * np.cos(theta)
for x1, y1, x2, y2 in zip(x,y,ax-x,ay-y):
axes.arrow(x1, y1, x2, y2, head_width=HEAD_WIDTH, head_length=HEAD_LEN)
plt.show()
def m_stream(df, lag = 1):
X, Y, U, V = m_vector(df, lag)
plot1 = plt.figure()
plt.streamplot(X, Y, U, V, # data
color=U, # array that determines the colour
cmap=plt.get_cmap('cool'),# colour map
linewidth=2, # line thickness
arrowstyle='->', # arrow style
arrowsize=1.5) # arrow size
#plt.colorbar() # add colour bar on the right
plt.title('Stream Plot, Dynamic Colour')
plt.show(plot1) # display the plot
def m_scatter3d(X1, Y1, Z1):
fig = plt.figure(1)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X1, Y1, Z1)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.title("Scatter Plot 3D")
plt.tight_layout()
plt.legend()
plt.grid(True)
plt.show()
return plt
def m_scatter4d(X1, Y1, Z1, C1):
fig = plt.figure(1)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X1, Y1, Z1, c=C1, cmap=plt.get_cmap('prism'))
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.title("Scatter Plot 4D")
plt.tight_layout()
plt.legend()
plt.grid(True)
plt.show()
return plt
def m_scatter5d(X1, Y1, Z1, S1, C1):
fig = plt.figure(1)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X1, Y1, Z1, s=np.abs(S1), c=C1, cmap=plt.get_cmap('prism'))
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.title("Scatter Plot 5D")
plt.tight_layout()
plt.legend()
plt.grid(True)
plt.show()
return plt
def m_plot3d(X1, Y1, Z1):
fig = plt.figure(1)
ax = fig.add_subplot(111, projection='3d')
ax.plot3D(X1, Y1, Z1, linestyle='None', marker='o')
#ax.set_prop_cycle
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.title("Plot 3D")
plt.tight_layout()
plt.legend()
plt.grid(True)
plt.show()
def m_scatter_color(X1, Y1, Z1):
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.scatter(X1, Y1, s=np.abs(Z1)*10.0, marker='o', c=Y1, cmap=plt.get_cmap('prism'))
ax.set_xlabel("X")
ax.set_ylabel("Y")
plt.title("Plot Scatter Color")
plt.tight_layout()
plt.legend()
plt.grid(True)
plt.show()
def m_trisurf(X1, Y1, Z1):
fig = plt.figure(1)
ax = fig.gca(projection='3d')
ax.plot_trisurf(X1, Y1, Z1, cmap=plt.get_cmap('prism'))
#ax.set_prop_cycle
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.title("Plot Triple Surface")
plt.tight_layout()
plt.legend()
plt.grid(True)
plt.draw()
plt.show()
def m_frame(X1, Y1, Z1):
fig = plt.figure(1)
ax = fig.gca(projection='3d')
ax.plot_wireframe(X1, Y1, Z1, cmap=plt.get_cmap('prism'))
#ax.set_prop_cycle
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.title("Plot wire frame")
plt.tight_layout()
plt.legend()
plt.grid(True)
plt.draw()
plt.show()
def m_image(H):
fig = plt.figure(figsize=(25, 25))
ax = fig.add_subplot(1,1,1)
ax.set_title('colorMap')
plt.imshow(H, cmap=plt.get_cmap('prism'))
ax.set_aspect('equal')
cax = fig.add_axes([0.12, 0.1, 0.78, 0.8])
cax.get_xaxis().set_visible(False)
cax.get_yaxis().set_visible(False)
cax.patch.set_alpha(0.5)
cax.set_frame_on(False)
plt.colorbar(orientation='vertical')
plt.show()
return plt
def m_mesh(H):
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.set_title('colorMap')
x, y = H.shape
X = np.arange(0, x, 1)
Y = np.arange(0, y, 1)
X, Y = np.meshgrid(X, Y)
Z = H[X, Y]
plt.pcolormesh(X, Y, Z)
ax.set_aspect('equal')
cax = fig.add_axes([0.12, 0.1, 0.78, 0.8])
cax.get_xaxis().set_visible(False)
cax.get_yaxis().set_visible(False)
cax.patch.set_alpha(0)
cax.set_frame_on(False)
plt.colorbar(orientation='vertical')
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
return plt
def m_matrix(M):
plt.matshow(M, cmap=plt.get_cmap('prism'))
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
def m_candlestick(data):
# convert the datetime64 column in the dataframe to 'float days'
data['Date']=dates.date2num(data['Date'].astype(dt.date))
print(data['Date'])
fig = plt.figure()
ax1 = plt.subplot2grid((1,1),(0,0))
plt.ylabel('Price')
ax1.xaxis.set_major_locator(mticker.MaxNLocator(6))
ax1.xaxis.set_major_formatter(dates.DateFormatter('%Y-%m-%d'))
candlestick_ohlc(ax1,data.values,width=1.0)
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
def m_surface(mat):
fig = plt.figure()
ax = fig.gca(projection='3d')
matrix = mat #np.array([[0, 1, 2, 3, 4], [.5, 1.5, 2.5, 3.5, 4.5], [1, 2, 3, 4, 5]])
x, y = matrix.shape
X = np.arange(0, x, 1)
Y = np.arange(0, y, 1)
X, Y = np.meshgrid(X, Y)
Z = matrix[X, Y]
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, linewidth=0, antialiased=False, cmap=plt.get_cmap('coolwarm'))
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.show()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.pcolormesh",
"matplotlib.style.use",
"numpy.arctan2",
"matplotlib.ticker.MaxNLocator",
"numpy.sin",
"matplotlib.pyplot.subplot2grid",
"numpy.arange",
"sklearn.decomposition.PCA",
"matplotlib.pypl... | [((372, 385), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (382, 385), True, 'import matplotlib.pyplot as plt\n'), ((424, 458), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (['x', 'y', 'bin'], {'normed': 'norm'}), '(x, y, bin, normed=norm)\n', (434, 458), True, 'import matplotlib.pyplot as plt\n'), ((466, 488), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Events T"""'], {}), "('Events T')\n", (476, 488), True, 'import matplotlib.pyplot as plt\n'), ((494, 516), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Events V"""'], {}), "('Events V')\n", (504, 516), True, 'import matplotlib.pyplot as plt\n'), ((522, 551), 'matplotlib.pyplot.title', 'plt.title', (['"""Events Histogram"""'], {}), "('Events Histogram')\n", (531, 551), True, 'import matplotlib.pyplot as plt\n'), ((557, 571), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (565, 571), True, 'import matplotlib.pyplot as plt\n'), ((583, 597), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (595, 597), True, 'import matplotlib.pyplot as plt\n'), ((605, 615), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (613, 615), True, 'import matplotlib.pyplot as plt\n'), ((657, 670), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (667, 670), True, 'import matplotlib.pyplot as plt\n'), ((728, 770), 'matplotlib.pyplot.hist', 'plt.hist', (['x', 'bin'], {'normed': 'norm', 'facecolor': 'c'}), '(x, bin, normed=norm, facecolor=c)\n', (736, 770), True, 'import matplotlib.pyplot as plt\n'), ((778, 798), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Events"""'], {}), "('Events')\n", (788, 798), True, 'import matplotlib.pyplot as plt\n'), ((804, 829), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (814, 829), True, 'import matplotlib.pyplot as plt\n'), ((835, 864), 'matplotlib.pyplot.title', 'plt.title', (['"""Events Histogram"""'], {}), "('Events Histogram')\n", (844, 864), True, 'import matplotlib.pyplot as plt\n'), ((870, 884), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (878, 884), True, 'import matplotlib.pyplot as plt\n'), ((892, 902), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (900, 902), True, 'import matplotlib.pyplot as plt\n'), ((939, 958), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (948, 958), False, 'from matplotlib import style\n'), ((996, 1009), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1006, 1009), True, 'import matplotlib.pyplot as plt\n'), ((1017, 1031), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {}), '(X, Y)\n', (1025, 1031), True, 'import matplotlib.pyplot as plt\n'), ((1043, 1061), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1059, 1061), True, 'import matplotlib.pyplot as plt\n'), ((1067, 1079), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1077, 1079), True, 'import matplotlib.pyplot as plt\n'), ((1097, 1126), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (1124, 1126), True, 'import matplotlib.pyplot as plt\n'), ((1164, 1174), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1172, 1174), True, 'import matplotlib.pyplot as plt\n'), ((1235, 1254), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (1244, 1254), False, 'from matplotlib import style\n'), ((1292, 1305), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1302, 1305), True, 'import matplotlib.pyplot as plt\n'), ((1313, 1330), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'Y'], {}), '(X, Y)\n', (1324, 1330), True, 'import matplotlib.pyplot as plt\n'), ((1342, 1360), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1358, 1360), True, 'import matplotlib.pyplot as plt\n'), ((1366, 1378), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1376, 1378), True, 'import matplotlib.pyplot as plt\n'), ((1390, 1400), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1398, 1400), True, 'import matplotlib.pyplot as plt\n'), ((1460, 1473), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1470, 1473), True, 'import matplotlib.pyplot as plt\n'), ((1481, 1512), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X1', 'Y1'], {'label': '"""XY"""'}), "(X1, Y1, label='XY')\n", (1492, 1512), True, 'import matplotlib.pyplot as plt\n'), ((1524, 1542), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1540, 1542), True, 'import matplotlib.pyplot as plt\n'), ((1548, 1563), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (1558, 1563), True, 'import matplotlib.pyplot as plt\n'), ((1569, 1584), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (1579, 1584), True, 'import matplotlib.pyplot as plt\n'), ((1590, 1618), 'matplotlib.pyplot.title', 'plt.title', (['"""Scatter Plot 2D"""'], {}), "('Scatter Plot 2D')\n", (1599, 1618), True, 'import matplotlib.pyplot as plt\n'), ((1624, 1636), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1634, 1636), True, 'import matplotlib.pyplot as plt\n'), ((1642, 1656), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1650, 1656), True, 'import matplotlib.pyplot as plt\n'), ((1664, 1674), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1672, 1674), True, 'import matplotlib.pyplot as plt\n'), ((1737, 1750), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1747, 1750), True, 'import matplotlib.pyplot as plt\n'), ((1758, 1781), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X1', 'Y1', 'Z1'], {}), '(X1, Y1, Z1)\n', (1769, 1781), True, 'import matplotlib.pyplot as plt\n'), ((1793, 1811), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1809, 1811), True, 'import matplotlib.pyplot as plt\n'), ((1817, 1832), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (1827, 1832), True, 'import matplotlib.pyplot as plt\n'), ((1838, 1853), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (1848, 1853), True, 'import matplotlib.pyplot as plt\n'), ((1859, 1886), 'matplotlib.pyplot.title', 'plt.title', (['"""Scatter Plot 3"""'], {}), "('Scatter Plot 3')\n", (1868, 1886), True, 'import matplotlib.pyplot as plt\n'), ((1892, 1904), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1902, 1904), True, 'import matplotlib.pyplot as plt\n'), ((1910, 1924), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1918, 1924), True, 'import matplotlib.pyplot as plt\n'), ((1932, 1942), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1940, 1942), True, 'import matplotlib.pyplot as plt\n'), ((2010, 2023), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2020, 2023), True, 'import matplotlib.pyplot as plt\n'), ((2031, 2058), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X1', 'Y1', 'X2', 'Y2'], {}), '(X1, Y1, X2, Y2)\n', (2042, 2058), True, 'import matplotlib.pyplot as plt\n'), ((2070, 2088), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2086, 2088), True, 'import matplotlib.pyplot as plt\n'), ((2094, 2109), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (2104, 2109), True, 'import matplotlib.pyplot as plt\n'), ((2115, 2130), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (2125, 2130), True, 'import matplotlib.pyplot as plt\n'), ((2136, 2171), 'matplotlib.pyplot.title', 'plt.title', (['"""Scatter Plot 2 vectors"""'], {}), "('Scatter Plot 2 vectors')\n", (2145, 2171), True, 'import matplotlib.pyplot as plt\n'), ((2177, 2189), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2187, 2189), True, 'import matplotlib.pyplot as plt\n'), ((2195, 2209), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2203, 2209), True, 'import matplotlib.pyplot as plt\n'), ((2217, 2227), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2225, 2227), True, 'import matplotlib.pyplot as plt\n'), ((2298, 2317), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (2307, 2317), False, 'from matplotlib import style\n'), ((2325, 2336), 'matplotlib.pyplot.plot', 'plt.plot', (['X'], {}), '(X)\n', (2333, 2336), True, 'import matplotlib.pyplot as plt\n'), ((2584, 2603), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (2587, 2603), False, 'from sklearn.decomposition import PCA\n'), ((2801, 2849), 'matplotlib.pyplot.subplots', 'plt.subplots', (['row', 'col'], {'sharex': '(True)', 'sharey': '(True)'}), '(row, col, sharex=True, sharey=True)\n', (2813, 2849), True, 'import matplotlib.pyplot as plt\n'), ((3260, 3273), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3270, 3273), True, 'import matplotlib.pyplot as plt\n'), ((3366, 3383), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (3374, 3383), True, 'import matplotlib.pyplot as plt\n'), ((3389, 3399), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3397, 3399), True, 'import matplotlib.pyplot as plt\n'), ((3405, 3445), 'matplotlib.pyplot.title', 'plt.title', (['"""Quive Plot, Dynamic Colours"""'], {}), "('Quive Plot, Dynamic Colours')\n", (3414, 3445), True, 'import matplotlib.pyplot as plt\n'), ((3451, 3461), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3459, 3461), True, 'import matplotlib.pyplot as plt\n'), ((3803, 3815), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3813, 3815), True, 'import matplotlib.pyplot as plt\n'), ((3909, 3951), 'numpy.arctan2', 'np.arctan2', (['(y[1:] - y[:-1])', '(x[1:] - x[:-1])'], {}), '(y[1:] - y[:-1], x[1:] - x[:-1])\n', (3919, 3951), True, 'import numpy as np\n'), ((4254, 4264), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4262, 4264), True, 'import matplotlib.pyplot as plt\n'), ((4350, 4362), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4360, 4362), True, 'import matplotlib.pyplot as plt\n'), ((4790, 4830), 'matplotlib.pyplot.title', 'plt.title', (['"""Stream Plot, Dynamic Colour"""'], {}), "('Stream Plot, Dynamic Colour')\n", (4799, 4830), True, 'import matplotlib.pyplot as plt\n'), ((4836, 4851), 'matplotlib.pyplot.show', 'plt.show', (['plot1'], {}), '(plot1)\n', (4844, 4851), True, 'import matplotlib.pyplot as plt\n'), ((4934, 4947), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (4944, 4947), True, 'import matplotlib.pyplot as plt\n'), ((5115, 5143), 'matplotlib.pyplot.title', 'plt.title', (['"""Scatter Plot 3D"""'], {}), "('Scatter Plot 3D')\n", (5124, 5143), True, 'import matplotlib.pyplot as plt\n'), ((5149, 5167), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5165, 5167), True, 'import matplotlib.pyplot as plt\n'), ((5173, 5185), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5183, 5185), True, 'import matplotlib.pyplot as plt\n'), ((5191, 5205), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5199, 5205), True, 'import matplotlib.pyplot as plt\n'), ((5213, 5223), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5221, 5223), True, 'import matplotlib.pyplot as plt\n'), ((5297, 5310), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (5307, 5310), True, 'import matplotlib.pyplot as plt\n'), ((5512, 5540), 'matplotlib.pyplot.title', 'plt.title', (['"""Scatter Plot 4D"""'], {}), "('Scatter Plot 4D')\n", (5521, 5540), True, 'import matplotlib.pyplot as plt\n'), ((5546, 5564), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5562, 5564), True, 'import matplotlib.pyplot as plt\n'), ((5570, 5582), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5580, 5582), True, 'import matplotlib.pyplot as plt\n'), ((5588, 5602), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5596, 5602), True, 'import matplotlib.pyplot as plt\n'), ((5610, 5620), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5618, 5620), True, 'import matplotlib.pyplot as plt\n'), ((5698, 5711), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (5708, 5711), True, 'import matplotlib.pyplot as plt\n'), ((5927, 5955), 'matplotlib.pyplot.title', 'plt.title', (['"""Scatter Plot 5D"""'], {}), "('Scatter Plot 5D')\n", (5936, 5955), True, 'import matplotlib.pyplot as plt\n'), ((5961, 5979), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5977, 5979), True, 'import matplotlib.pyplot as plt\n'), ((5985, 5997), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5995, 5997), True, 'import matplotlib.pyplot as plt\n'), ((6003, 6017), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6011, 6017), True, 'import matplotlib.pyplot as plt\n'), ((6025, 6035), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6033, 6035), True, 'import matplotlib.pyplot as plt\n'), ((6102, 6115), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (6112, 6115), True, 'import matplotlib.pyplot as plt\n'), ((6330, 6350), 'matplotlib.pyplot.title', 'plt.title', (['"""Plot 3D"""'], {}), "('Plot 3D')\n", (6339, 6350), True, 'import matplotlib.pyplot as plt\n'), ((6356, 6374), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6372, 6374), True, 'import matplotlib.pyplot as plt\n'), ((6380, 6392), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6390, 6392), True, 'import matplotlib.pyplot as plt\n'), ((6398, 6412), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6406, 6412), True, 'import matplotlib.pyplot as plt\n'), ((6420, 6430), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6428, 6430), True, 'import matplotlib.pyplot as plt\n'), ((6486, 6499), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (6496, 6499), True, 'import matplotlib.pyplot as plt\n'), ((6681, 6712), 'matplotlib.pyplot.title', 'plt.title', (['"""Plot Scatter Color"""'], {}), "('Plot Scatter Color')\n", (6690, 6712), True, 'import matplotlib.pyplot as plt\n'), ((6718, 6736), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6734, 6736), True, 'import matplotlib.pyplot as plt\n'), ((6742, 6754), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6752, 6754), True, 'import matplotlib.pyplot as plt\n'), ((6760, 6774), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6768, 6774), True, 'import matplotlib.pyplot as plt\n'), ((6782, 6792), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6790, 6792), True, 'import matplotlib.pyplot as plt\n'), ((6834, 6847), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (6844, 6847), True, 'import matplotlib.pyplot as plt\n'), ((7053, 7085), 'matplotlib.pyplot.title', 'plt.title', (['"""Plot Triple Surface"""'], {}), "('Plot Triple Surface')\n", (7062, 7085), True, 'import matplotlib.pyplot as plt\n'), ((7091, 7109), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7107, 7109), True, 'import matplotlib.pyplot as plt\n'), ((7115, 7127), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7125, 7127), True, 'import matplotlib.pyplot as plt\n'), ((7133, 7147), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (7141, 7147), True, 'import matplotlib.pyplot as plt\n'), ((7153, 7163), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (7161, 7163), True, 'import matplotlib.pyplot as plt\n'), ((7175, 7185), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7183, 7185), True, 'import matplotlib.pyplot as plt\n'), ((7226, 7239), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (7236, 7239), True, 'import matplotlib.pyplot as plt\n'), ((7447, 7475), 'matplotlib.pyplot.title', 'plt.title', (['"""Plot wire frame"""'], {}), "('Plot wire frame')\n", (7456, 7475), True, 'import matplotlib.pyplot as plt\n'), ((7481, 7499), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7497, 7499), True, 'import matplotlib.pyplot as plt\n'), ((7505, 7517), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7515, 7517), True, 'import matplotlib.pyplot as plt\n'), ((7523, 7537), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (7531, 7537), True, 'import matplotlib.pyplot as plt\n'), ((7543, 7553), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (7551, 7553), True, 'import matplotlib.pyplot as plt\n'), ((7565, 7575), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7573, 7575), True, 'import matplotlib.pyplot as plt\n'), ((7611, 7639), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(25, 25)'}), '(figsize=(25, 25))\n', (7621, 7639), True, 'import matplotlib.pyplot as plt\n'), ((7980, 8016), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'orientation': '"""vertical"""'}), "(orientation='vertical')\n", (7992, 8016), True, 'import matplotlib.pyplot as plt\n'), ((8022, 8032), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8030, 8032), True, 'import matplotlib.pyplot as plt\n'), ((8084, 8096), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8094, 8096), True, 'import matplotlib.pyplot as plt\n'), ((8197, 8215), 'numpy.arange', 'np.arange', (['(0)', 'x', '(1)'], {}), '(0, x, 1)\n', (8206, 8215), True, 'import numpy as np\n'), ((8225, 8243), 'numpy.arange', 'np.arange', (['(0)', 'y', '(1)'], {}), '(0, y, 1)\n', (8234, 8243), True, 'import numpy as np\n'), ((8256, 8273), 'numpy.meshgrid', 'np.meshgrid', (['X', 'Y'], {}), '(X, Y)\n', (8267, 8273), True, 'import numpy as np\n'), ((8302, 8325), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['X', 'Y', 'Z'], {}), '(X, Y, Z)\n', (8316, 8325), True, 'import matplotlib.pyplot as plt\n'), ((8552, 8588), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'orientation': '"""vertical"""'}), "(orientation='vertical')\n", (8564, 8588), True, 'import matplotlib.pyplot as plt\n'), ((8606, 8635), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (8633, 8635), True, 'import matplotlib.pyplot as plt\n'), ((8673, 8683), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8681, 8683), True, 'import matplotlib.pyplot as plt\n'), ((8791, 8820), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (8818, 8820), True, 'import matplotlib.pyplot as plt\n'), ((8858, 8868), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8866, 8868), True, 'import matplotlib.pyplot as plt\n'), ((9068, 9080), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9078, 9080), True, 'import matplotlib.pyplot as plt\n'), ((9092, 9124), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 1)', '(0, 0)'], {}), '((1, 1), (0, 0))\n', (9108, 9124), True, 'import matplotlib.pyplot as plt\n'), ((9127, 9146), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (9137, 9146), True, 'import matplotlib.pyplot as plt\n'), ((9279, 9324), 'matplotlib.finance.candlestick_ohlc', 'candlestick_ohlc', (['ax1', 'data.values'], {'width': '(1.0)'}), '(ax1, data.values, width=1.0)\n', (9295, 9324), False, 'from matplotlib.finance import candlestick_ohlc\n'), ((9340, 9369), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (9367, 9369), True, 'import matplotlib.pyplot as plt\n'), ((9407, 9417), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9415, 9417), True, 'import matplotlib.pyplot as plt\n'), ((9456, 9468), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9466, 9468), True, 'import matplotlib.pyplot as plt\n'), ((9634, 9652), 'numpy.arange', 'np.arange', (['(0)', 'x', '(1)'], {}), '(0, x, 1)\n', (9643, 9652), True, 'import numpy as np\n'), ((9662, 9680), 'numpy.arange', 'np.arange', (['(0)', 'y', '(1)'], {}), '(0, y, 1)\n', (9671, 9680), True, 'import numpy as np\n'), ((9693, 9710), 'numpy.meshgrid', 'np.meshgrid', (['X', 'Y'], {}), '(X, Y)\n', (9704, 9710), True, 'import numpy as np\n'), ((9861, 9890), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (9888, 9890), True, 'import matplotlib.pyplot as plt\n'), ((9928, 9938), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9936, 9938), True, 'import matplotlib.pyplot as plt\n'), ((3662, 3715), 'numpy.sum', 'np.sum', (['((X[1:] - X[:-1]) ** 2 + (Y[1:] - Y[:-1]) ** 2)'], {}), '((X[1:] - X[:-1]) ** 2 + (Y[1:] - Y[:-1]) ** 2)\n', (3668, 3715), True, 'import numpy as np\n'), ((9180, 9202), 'matplotlib.ticker.MaxNLocator', 'mticker.MaxNLocator', (['(6)'], {}), '(6)\n', (9199, 9202), True, 'from matplotlib import ticker as mticker\n'), ((9239, 9270), 'matplotlib.dates.DateFormatter', 'dates.DateFormatter', (['"""%Y-%m-%d"""'], {}), "('%Y-%m-%d')\n", (9258, 9270), True, 'import matplotlib.dates as dates\n'), ((4053, 4066), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4059, 4066), True, 'import numpy as np\n'), ((4095, 4108), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4101, 4108), True, 'import numpy as np\n'), ((4510, 4530), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""cool"""'], {}), "('cool')\n", (4522, 4530), True, 'import matplotlib.pyplot as plt\n'), ((5400, 5421), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""prism"""'], {}), "('prism')\n", (5412, 5421), True, 'import matplotlib.pyplot as plt\n'), ((5792, 5802), 'numpy.abs', 'np.abs', (['S1'], {}), '(S1)\n', (5798, 5802), True, 'import numpy as np\n'), ((5815, 5836), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""prism"""'], {}), "('prism')\n", (5827, 5836), True, 'import matplotlib.pyplot as plt\n'), ((6599, 6620), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""prism"""'], {}), "('prism')\n", (6611, 6620), True, 'import matplotlib.pyplot as plt\n'), ((6923, 6944), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""prism"""'], {}), "('prism')\n", (6935, 6944), True, 'import matplotlib.pyplot as plt\n'), ((7317, 7338), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""prism"""'], {}), "('prism')\n", (7329, 7338), True, 'import matplotlib.pyplot as plt\n'), ((7729, 7750), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""prism"""'], {}), "('prism')\n", (7741, 7750), True, 'import matplotlib.pyplot as plt\n'), ((8751, 8772), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""prism"""'], {}), "('prism')\n", (8763, 8772), True, 'import matplotlib.pyplot as plt\n'), ((9822, 9846), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""coolwarm"""'], {}), "('coolwarm')\n", (9834, 9846), True, 'import matplotlib.pyplot as plt\n'), ((6559, 6569), 'numpy.abs', 'np.abs', (['Z1'], {}), '(Z1)\n', (6565, 6569), True, 'import numpy as np\n')] |
import h5py
import numpy as np
from torch.utils.data import Dataset
from torchvision import transforms
from data.vision_transform import ToTensor, Normalize, RandomCrop, CenterCrop, RandomHorizontalFlip
from .datautils import prepare_input
class LRW(Dataset):
"""
A custom dataset class for the LRW main (includes train, val, test) dataset
"""
def __init__(self, dataset, datadir, h5file, wordToIx, stepSize, lrwaug):
super(LRW, self).__init__()
with open(datadir + "/" + dataset + ".txt", "r") as f:
lines = f.readlines()
self.datalist = [line.strip() for line in lines]
self.h5file = h5file
self.dataset = dataset
self.wordToIx = wordToIx
self.stepSize = stepSize
if lrwaug:
self.transform = transforms.Compose([
ToTensor(),
RandomCrop(112),
RandomHorizontalFlip(0.5),
Normalize(mean=[0.413621], std=[0.1700239])
])
else:
self.transform = transforms.Compose([
ToTensor(),
CenterCrop(112),
Normalize(mean=[0.413621], std=[0.1700239])
])
return
def open_h5(self):
self.h5 = h5py.File(self.h5file, "r")
def __getitem__(self, index):
if not hasattr(self, 'h5'):
self.open_h5()
# using the same procedure as in pretrain dataset class only for the train dataset
if self.dataset == "train":
base = self.stepSize * np.arange(int(len(self.datalist) / self.stepSize) + 1)
ixs = base + index
ixs = ixs[ixs < len(self.datalist)]
index = ixs[0] if len(ixs) == 1 else np.random.choice(ixs)
# passing the sample files and the target file paths to the prepare function to obtain the input tensors
target = self.datalist[index].split("/")[-3]
if self.dataset == "train":
pass
elif self.dataset == "val":
index += 488766
elif self.dataset == "test":
index += 513766
inp, wordTrgt = prepare_input(index, self.h5, target, self.wordToIx, self.transform)
return inp, wordTrgt
def __len__(self):
# using step size only for train dataset and not for val and test datasets because
# the size of val and test datasets is smaller than step size and we generally want to validate and test
# on the complete dataset
if self.dataset == "train":
return self.stepSize
else:
return len(self.datalist)
| [
"numpy.random.choice",
"data.vision_transform.RandomCrop",
"data.vision_transform.ToTensor",
"data.vision_transform.RandomHorizontalFlip",
"h5py.File",
"data.vision_transform.CenterCrop",
"data.vision_transform.Normalize"
] | [((1259, 1286), 'h5py.File', 'h5py.File', (['self.h5file', '"""r"""'], {}), "(self.h5file, 'r')\n", (1268, 1286), False, 'import h5py\n'), ((1730, 1751), 'numpy.random.choice', 'np.random.choice', (['ixs'], {}), '(ixs)\n', (1746, 1751), True, 'import numpy as np\n'), ((839, 849), 'data.vision_transform.ToTensor', 'ToTensor', ([], {}), '()\n', (847, 849), False, 'from data.vision_transform import ToTensor, Normalize, RandomCrop, CenterCrop, RandomHorizontalFlip\n'), ((867, 882), 'data.vision_transform.RandomCrop', 'RandomCrop', (['(112)'], {}), '(112)\n', (877, 882), False, 'from data.vision_transform import ToTensor, Normalize, RandomCrop, CenterCrop, RandomHorizontalFlip\n'), ((900, 925), 'data.vision_transform.RandomHorizontalFlip', 'RandomHorizontalFlip', (['(0.5)'], {}), '(0.5)\n', (920, 925), False, 'from data.vision_transform import ToTensor, Normalize, RandomCrop, CenterCrop, RandomHorizontalFlip\n'), ((943, 986), 'data.vision_transform.Normalize', 'Normalize', ([], {'mean': '[0.413621]', 'std': '[0.1700239]'}), '(mean=[0.413621], std=[0.1700239])\n', (952, 986), False, 'from data.vision_transform import ToTensor, Normalize, RandomCrop, CenterCrop, RandomHorizontalFlip\n'), ((1082, 1092), 'data.vision_transform.ToTensor', 'ToTensor', ([], {}), '()\n', (1090, 1092), False, 'from data.vision_transform import ToTensor, Normalize, RandomCrop, CenterCrop, RandomHorizontalFlip\n'), ((1110, 1125), 'data.vision_transform.CenterCrop', 'CenterCrop', (['(112)'], {}), '(112)\n', (1120, 1125), False, 'from data.vision_transform import ToTensor, Normalize, RandomCrop, CenterCrop, RandomHorizontalFlip\n'), ((1143, 1186), 'data.vision_transform.Normalize', 'Normalize', ([], {'mean': '[0.413621]', 'std': '[0.1700239]'}), '(mean=[0.413621], std=[0.1700239])\n', (1152, 1186), False, 'from data.vision_transform import ToTensor, Normalize, RandomCrop, CenterCrop, RandomHorizontalFlip\n')] |
#_*_coding:utf-8_*_
import pandas as pd
import numpy as np
from docxtpl import DocxTemplate,InlineImage
from docx import shared
import os
# have_content = False
file = pd.ExcelFile("data\小于400三调精度.xlsx")
df = file.parse("小于400三调精度")
x_data = np.array(df)
x_list = x_data.tolist()
index = 0
while index<len(x_list):
tpl = DocxTemplate("插图模板.docx")
values= x_list[index]
# values.pop()
file_name = "data\\文档\\" + str(x_list[index][0]) + ".docx"
gaoqing_name = "data\\高清\\" + x_list[index][3] + ".tif"
update_name = "data\\更新\\" + x_list[index][3] + ".tif"
if os.path.exists(gaoqing_name) == True:
if os.path.exists(update_name):
lableID = values[1]
context = \
{
"col_labels": ["序号", "ID", "行政区代码", "地块序号", "地块面积(平方米)", \
"地块面积(亩)", "核查结果"],
"infos": [InlineImage(tpl, gaoqing_name, width=shared.Cm(7.75), height=shared.Cm(7)),
InlineImage(tpl, update_name, width=shared.Cm(9.52), height=shared.Cm(6.8))],
# "infos1": [InlineImage(tpl, gaoqing_name, width=shared.Cm(7.75), height=shared.Cm(7))],
# "infos2": [InlineImage(tpl, update_name, width=shared.Cm(9.52), height=shared.Cm(6.8))],
"tbl_contents": [{"cols": values}],
}
tpl.render(context)
tpl.save(file_name)
# print(context)
# print(index)
else:
print(update_name)
else:
print(gaoqing_name)
index += 1
# print("ok")
| [
"os.path.exists",
"docx.shared.Cm",
"numpy.array",
"pandas.ExcelFile",
"docxtpl.DocxTemplate"
] | [((179, 215), 'pandas.ExcelFile', 'pd.ExcelFile', (['"""data\\\\小于400三调精度.xlsx"""'], {}), "('data\\\\小于400三调精度.xlsx')\n", (191, 215), True, 'import pandas as pd\n'), ((257, 269), 'numpy.array', 'np.array', (['df'], {}), '(df)\n', (265, 269), True, 'import numpy as np\n'), ((346, 371), 'docxtpl.DocxTemplate', 'DocxTemplate', (['"""插图模板.docx"""'], {}), "('插图模板.docx')\n", (358, 371), False, 'from docxtpl import DocxTemplate, InlineImage\n'), ((614, 642), 'os.path.exists', 'os.path.exists', (['gaoqing_name'], {}), '(gaoqing_name)\n', (628, 642), False, 'import os\n'), ((664, 691), 'os.path.exists', 'os.path.exists', (['update_name'], {}), '(update_name)\n', (678, 691), False, 'import os\n'), ((974, 989), 'docx.shared.Cm', 'shared.Cm', (['(7.75)'], {}), '(7.75)\n', (983, 989), False, 'from docx import shared\n'), ((998, 1010), 'docx.shared.Cm', 'shared.Cm', (['(7)'], {}), '(7)\n', (1007, 1010), False, 'from docx import shared\n'), ((1080, 1095), 'docx.shared.Cm', 'shared.Cm', (['(9.52)'], {}), '(9.52)\n', (1089, 1095), False, 'from docx import shared\n'), ((1104, 1118), 'docx.shared.Cm', 'shared.Cm', (['(6.8)'], {}), '(6.8)\n', (1113, 1118), False, 'from docx import shared\n')] |
# -*- coding: utf-8 -*-
u"""
Created on 2017-1-25
@author: cheng.li
"""
import unittest
import copy
import pickle
import tempfile
import os
import numpy as np
import pandas as pd
from PyFin.Analysis.SeriesValues import SeriesValues
class TestSecurityValues(unittest.TestCase):
def testSecurityValuesInit(self):
data = np.array([1, 2, 3])
index = ['c', 'b', 'a']
test = SeriesValues(data, dict(zip(index, range(len(index)))))
expected = dict(zip(index, data))
for name in test.index():
self.assertEqual(test[name], expected[name])
def testSecurityValuesRank(self):
data = np.array([3, 2, np.nan, np.nan, 4, 5])
index = [1, 2, 3, 4, 5, 6]
data = SeriesValues(data, index)
test = data.rank()
expected = SeriesValues(np.array([2, 1, np.nan, np.nan, 3, 4]), dict(zip(index, range(len(index)))))
for name in test.index():
if np.isnan(test[name]):
self.assertTrue(np.isnan(expected[name]))
else:
self.assertEqual(test[name], expected[name])
def testSecurityValuesRankWithGroup(self):
data = np.random.randn(3000)
groups = np.random.randint(0, 30, 3000)
index = list(range(3000))
data = SeriesValues(data, index)
groups = SeriesValues(groups, index)
test = data.rank(groups)
pd_series = pd.Series(data.values)
expected = pd_series.groupby(groups.values).rank()
np.testing.assert_array_almost_equal(test.values, expected.values)
def testSecurityValuesUnit(self):
data = np.array([3, -2, np.nan, np.nan, 4, 5])
index = [1, 2, 3, 4, 5, 6]
test = SeriesValues(data, index)
test = test.unit()
expected = SeriesValues(data / np.nansum(np.abs(data)), dict(zip(index, range(len(index)))))
for name in test.index():
if np.isnan(test[name]):
self.assertTrue(np.isnan(expected[name]))
else:
self.assertEqual(test[name], expected[name])
def testSecurityValuesDeepCopy(self):
data = np.array([3, 2, 2., 1., 4., 5.])
index = [1, 2, 3, 4, 5, 6]
test = SeriesValues(data, index)
copied = copy.deepcopy(test)
np.testing.assert_array_equal(test.values, copied.values)
self.assertEqual(test.name_mapping, copied.name_mapping)
def testSecurityValuesAdd(self):
data1 = np.array([3, 2, 2., 1., 4., 5.])
data2 = -np.array([3, 2, 2., 1., 4., 5.])
index = [1, 2, 3, 4, 5, 6]
test1 = SeriesValues(data1, index)
test2 = SeriesValues(data2, index)
calculated = test1 + test2
expected = SeriesValues(data1 + data2, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
calculated = test1 + 2.0
expected = SeriesValues(data1 + 2.0, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
calculated = 2.0 + test2
expected = SeriesValues(2.0 + data2, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
def testSecurityValuesSub(self):
data1 = np.array([3, 2, 2., 1., 4., 5.])
data2 = -np.array([3, 2, 2., 1., 4., 5.])
index = [1, 2, 3, 4, 5, 6]
test1 = SeriesValues(data1, index)
test2 = SeriesValues(data2, index)
calculated = test1 - test2
expected = SeriesValues(data1 - data2, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
calculated = test1 - 2.0
expected = SeriesValues(data1 - 2.0, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
calculated = 2.0 - test2
expected = SeriesValues(2.0 - data2, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
def testSecurityValuesMul(self):
data1 = np.array([3, 2, 2., 1., 4., 5.])
data2 = -np.array([3, 2, 2., 1., 4., 5.])
index = [1, 2, 3, 4, 5, 6]
test1 = SeriesValues(data1, index)
test2 = SeriesValues(data2, index)
calculated = test1 * test2
expected = SeriesValues(data1 * data2, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
calculated = test1 * 2.0
expected = SeriesValues(data1 * 2.0, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
calculated = 2.0 * test2
expected = SeriesValues(2.0 * data2, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
def testSecurityValuesXor(self):
data1 = np.array([3, 2, 2., 1., 4., 5.])
data2 = -np.array([3, 2, 2., 1., 4., 5.])
index = [1, 2, 3, 4, 5, 6]
test1 = SeriesValues(data1, index)
test2 = SeriesValues(data2, index)
calculated = test1 ^ test2
expected = SeriesValues(np.array([data1, data2]).T, index=index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
for name in index:
np.testing.assert_array_almost_equal(calculated[name], expected[name])
def testSecurityValuesDiv(self):
data1 = np.array([3, 2, 2., 1., 4., 5.])
data2 = -np.array([3, 2, 2., 1., 4., 5.])
index = [1, 2, 3, 4, 5, 6]
test1 = SeriesValues(data1, index)
test2 = SeriesValues(data2, index)
calculated = test1 / test2
expected = SeriesValues(data1 / data2, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
calculated = test1 / 2.0
expected = SeriesValues(data1 / 2.0, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
calculated = 2.0 / test2
expected = SeriesValues(2.0 / data2, index)
np.testing.assert_array_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
def testSecurityValuesRes(self):
data1 = np.array([3, 2, 2., 1., 4., 5.])
data2 = -np.array([3, 2, 2., 1., 4., 5.])
index = [1, 2, 3, 4, 5, 6]
test1 = SeriesValues(data1, index)
test2 = SeriesValues(data2, index)
calculated = test1.res(test2)
expected = SeriesValues(np.zeros(len(data1)), index)
np.testing.assert_array_almost_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
data1 = np.random.randn(100)
data1 = data1 - data1.mean()
data2 = np.random.randn(100)
data2 = data2 - data2.mean()
index = list(range(1, 101))
test1 = SeriesValues(data1, index)
test2 = SeriesValues(data2, index)
calculated = test1.res(test2)
expected = SeriesValues(data1 - np.dot(data2, data1) / np.dot(data2, data2) * data2, index)
np.testing.assert_array_almost_equal(calculated.values, expected.values)
self.assertEqual(calculated.name_mapping, expected.name_mapping)
def testSecurityValuesPickle(self):
data = np.array([3, 2, np.nan, np.nan, 4, 5])
index = [1, 2, 3, 4, 5, 6]
test = SeriesValues(data, index)
f = tempfile.NamedTemporaryFile('w+b', delete=False)
pickle.dump(test, f)
f.close()
with open(f.name, 'rb') as f2:
pickled = pickle.load(f2)
np.testing.assert_array_equal(test.values, pickled.values)
self.assertEqual(test.name_mapping, pickled.name_mapping)
os.unlink(f.name)
| [
"pandas.Series",
"numpy.abs",
"numpy.testing.assert_array_almost_equal",
"pickle.dump",
"pickle.load",
"numpy.array",
"numpy.random.randint",
"tempfile.NamedTemporaryFile",
"numpy.isnan",
"os.unlink",
"PyFin.Analysis.SeriesValues.SeriesValues",
"copy.deepcopy",
"numpy.dot",
"numpy.random.r... | [((335, 354), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (343, 354), True, 'import numpy as np\n'), ((648, 686), 'numpy.array', 'np.array', (['[3, 2, np.nan, np.nan, 4, 5]'], {}), '([3, 2, np.nan, np.nan, 4, 5])\n', (656, 686), True, 'import numpy as np\n'), ((738, 763), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data', 'index'], {}), '(data, index)\n', (750, 763), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((1172, 1193), 'numpy.random.randn', 'np.random.randn', (['(3000)'], {}), '(3000)\n', (1187, 1193), True, 'import numpy as np\n'), ((1211, 1241), 'numpy.random.randint', 'np.random.randint', (['(0)', '(30)', '(3000)'], {}), '(0, 30, 3000)\n', (1228, 1241), True, 'import numpy as np\n'), ((1292, 1317), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data', 'index'], {}), '(data, index)\n', (1304, 1317), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((1335, 1362), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['groups', 'index'], {}), '(groups, index)\n', (1347, 1362), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((1418, 1440), 'pandas.Series', 'pd.Series', (['data.values'], {}), '(data.values)\n', (1427, 1440), True, 'import pandas as pd\n'), ((1508, 1574), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['test.values', 'expected.values'], {}), '(test.values, expected.values)\n', (1544, 1574), True, 'import numpy as np\n'), ((1629, 1668), 'numpy.array', 'np.array', (['[3, -2, np.nan, np.nan, 4, 5]'], {}), '([3, -2, np.nan, np.nan, 4, 5])\n', (1637, 1668), True, 'import numpy as np\n'), ((1719, 1744), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data', 'index'], {}), '(data, index)\n', (1731, 1744), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((2140, 2176), 'numpy.array', 'np.array', (['[3, 2, 2.0, 1.0, 4.0, 5.0]'], {}), '([3, 2, 2.0, 1.0, 4.0, 5.0])\n', (2148, 2176), True, 'import numpy as np\n'), ((2224, 2249), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data', 'index'], {}), '(data, index)\n', (2236, 2249), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((2267, 2286), 'copy.deepcopy', 'copy.deepcopy', (['test'], {}), '(test)\n', (2280, 2286), False, 'import copy\n'), ((2296, 2353), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['test.values', 'copied.values'], {}), '(test.values, copied.values)\n', (2325, 2353), True, 'import numpy as np\n'), ((2474, 2510), 'numpy.array', 'np.array', (['[3, 2, 2.0, 1.0, 4.0, 5.0]'], {}), '([3, 2, 2.0, 1.0, 4.0, 5.0])\n', (2482, 2510), True, 'import numpy as np\n'), ((2609, 2635), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data1', 'index'], {}), '(data1, index)\n', (2621, 2635), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((2652, 2678), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data2', 'index'], {}), '(data2, index)\n', (2664, 2678), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((2734, 2768), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['(data1 + data2)', 'index'], {}), '(data1 + data2, index)\n', (2746, 2768), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((2778, 2843), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['calculated.values', 'expected.values'], {}), '(calculated.values, expected.values)\n', (2807, 2843), True, 'import numpy as np\n'), ((2970, 3002), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['(data1 + 2.0)', 'index'], {}), '(data1 + 2.0, index)\n', (2982, 3002), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((3012, 3077), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['calculated.values', 'expected.values'], {}), '(calculated.values, expected.values)\n', (3041, 3077), True, 'import numpy as np\n'), ((3204, 3236), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['(2.0 + data2)', 'index'], {}), '(2.0 + data2, index)\n', (3216, 3236), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((3246, 3311), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['calculated.values', 'expected.values'], {}), '(calculated.values, expected.values)\n', (3275, 3311), True, 'import numpy as np\n'), ((3440, 3476), 'numpy.array', 'np.array', (['[3, 2, 2.0, 1.0, 4.0, 5.0]'], {}), '([3, 2, 2.0, 1.0, 4.0, 5.0])\n', (3448, 3476), True, 'import numpy as np\n'), ((3575, 3601), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data1', 'index'], {}), '(data1, index)\n', (3587, 3601), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((3618, 3644), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data2', 'index'], {}), '(data2, index)\n', (3630, 3644), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((3700, 3734), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['(data1 - data2)', 'index'], {}), '(data1 - data2, index)\n', (3712, 3734), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((3744, 3809), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['calculated.values', 'expected.values'], {}), '(calculated.values, expected.values)\n', (3773, 3809), True, 'import numpy as np\n'), ((3936, 3968), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['(data1 - 2.0)', 'index'], {}), '(data1 - 2.0, index)\n', (3948, 3968), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((3978, 4043), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['calculated.values', 'expected.values'], {}), '(calculated.values, expected.values)\n', (4007, 4043), True, 'import numpy as np\n'), ((4170, 4202), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['(2.0 - data2)', 'index'], {}), '(2.0 - data2, index)\n', (4182, 4202), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((4212, 4277), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['calculated.values', 'expected.values'], {}), '(calculated.values, expected.values)\n', (4241, 4277), True, 'import numpy as np\n'), ((4406, 4442), 'numpy.array', 'np.array', (['[3, 2, 2.0, 1.0, 4.0, 5.0]'], {}), '([3, 2, 2.0, 1.0, 4.0, 5.0])\n', (4414, 4442), True, 'import numpy as np\n'), ((4541, 4567), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data1', 'index'], {}), '(data1, index)\n', (4553, 4567), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((4584, 4610), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data2', 'index'], {}), '(data2, index)\n', (4596, 4610), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((4666, 4700), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['(data1 * data2)', 'index'], {}), '(data1 * data2, index)\n', (4678, 4700), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((4710, 4775), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['calculated.values', 'expected.values'], {}), '(calculated.values, expected.values)\n', (4739, 4775), True, 'import numpy as np\n'), ((4902, 4934), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['(data1 * 2.0)', 'index'], {}), '(data1 * 2.0, index)\n', (4914, 4934), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((4944, 5009), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['calculated.values', 'expected.values'], {}), '(calculated.values, expected.values)\n', (4973, 5009), True, 'import numpy as np\n'), ((5136, 5168), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['(2.0 * data2)', 'index'], {}), '(2.0 * data2, index)\n', (5148, 5168), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((5178, 5243), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['calculated.values', 'expected.values'], {}), '(calculated.values, expected.values)\n', (5207, 5243), True, 'import numpy as np\n'), ((5371, 5407), 'numpy.array', 'np.array', (['[3, 2, 2.0, 1.0, 4.0, 5.0]'], {}), '([3, 2, 2.0, 1.0, 4.0, 5.0])\n', (5379, 5407), True, 'import numpy as np\n'), ((5506, 5532), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data1', 'index'], {}), '(data1, index)\n', (5518, 5532), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((5549, 5575), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data2', 'index'], {}), '(data2, index)\n', (5561, 5575), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((5694, 5759), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['calculated.values', 'expected.values'], {}), '(calculated.values, expected.values)\n', (5723, 5759), True, 'import numpy as np\n'), ((5999, 6035), 'numpy.array', 'np.array', (['[3, 2, 2.0, 1.0, 4.0, 5.0]'], {}), '([3, 2, 2.0, 1.0, 4.0, 5.0])\n', (6007, 6035), True, 'import numpy as np\n'), ((6134, 6160), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data1', 'index'], {}), '(data1, index)\n', (6146, 6160), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((6177, 6203), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data2', 'index'], {}), '(data2, index)\n', (6189, 6203), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((6259, 6293), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['(data1 / data2)', 'index'], {}), '(data1 / data2, index)\n', (6271, 6293), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((6303, 6368), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['calculated.values', 'expected.values'], {}), '(calculated.values, expected.values)\n', (6332, 6368), True, 'import numpy as np\n'), ((6495, 6527), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['(data1 / 2.0)', 'index'], {}), '(data1 / 2.0, index)\n', (6507, 6527), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((6537, 6602), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['calculated.values', 'expected.values'], {}), '(calculated.values, expected.values)\n', (6566, 6602), True, 'import numpy as np\n'), ((6729, 6761), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['(2.0 / data2)', 'index'], {}), '(2.0 / data2, index)\n', (6741, 6761), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((6771, 6836), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['calculated.values', 'expected.values'], {}), '(calculated.values, expected.values)\n', (6800, 6836), True, 'import numpy as np\n'), ((6964, 7000), 'numpy.array', 'np.array', (['[3, 2, 2.0, 1.0, 4.0, 5.0]'], {}), '([3, 2, 2.0, 1.0, 4.0, 5.0])\n', (6972, 7000), True, 'import numpy as np\n'), ((7099, 7125), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data1', 'index'], {}), '(data1, index)\n', (7111, 7125), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((7142, 7168), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data2', 'index'], {}), '(data2, index)\n', (7154, 7168), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((7278, 7350), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['calculated.values', 'expected.values'], {}), '(calculated.values, expected.values)\n', (7314, 7350), True, 'import numpy as np\n'), ((7441, 7461), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (7456, 7461), True, 'import numpy as np\n'), ((7515, 7535), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (7530, 7535), True, 'import numpy as np\n'), ((7627, 7653), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data1', 'index'], {}), '(data1, index)\n', (7639, 7653), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((7670, 7696), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data2', 'index'], {}), '(data2, index)\n', (7682, 7696), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((7845, 7917), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['calculated.values', 'expected.values'], {}), '(calculated.values, expected.values)\n', (7881, 7917), True, 'import numpy as np\n'), ((8047, 8085), 'numpy.array', 'np.array', (['[3, 2, np.nan, np.nan, 4, 5]'], {}), '([3, 2, np.nan, np.nan, 4, 5])\n', (8055, 8085), True, 'import numpy as np\n'), ((8137, 8162), 'PyFin.Analysis.SeriesValues.SeriesValues', 'SeriesValues', (['data', 'index'], {}), '(data, index)\n', (8149, 8162), False, 'from PyFin.Analysis.SeriesValues import SeriesValues\n'), ((8176, 8224), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""w+b"""'], {'delete': '(False)'}), "('w+b', delete=False)\n", (8203, 8224), False, 'import tempfile\n'), ((8233, 8253), 'pickle.dump', 'pickle.dump', (['test', 'f'], {}), '(test, f)\n', (8244, 8253), False, 'import pickle\n'), ((8500, 8517), 'os.unlink', 'os.unlink', (['f.name'], {}), '(f.name)\n', (8509, 8517), False, 'import os\n'), ((824, 862), 'numpy.array', 'np.array', (['[2, 1, np.nan, np.nan, 3, 4]'], {}), '([2, 1, np.nan, np.nan, 3, 4])\n', (832, 862), True, 'import numpy as np\n'), ((950, 970), 'numpy.isnan', 'np.isnan', (['test[name]'], {}), '(test[name])\n', (958, 970), True, 'import numpy as np\n'), ((1923, 1943), 'numpy.isnan', 'np.isnan', (['test[name]'], {}), '(test[name])\n', (1931, 1943), True, 'import numpy as np\n'), ((2524, 2560), 'numpy.array', 'np.array', (['[3, 2, 2.0, 1.0, 4.0, 5.0]'], {}), '([3, 2, 2.0, 1.0, 4.0, 5.0])\n', (2532, 2560), True, 'import numpy as np\n'), ((3490, 3526), 'numpy.array', 'np.array', (['[3, 2, 2.0, 1.0, 4.0, 5.0]'], {}), '([3, 2, 2.0, 1.0, 4.0, 5.0])\n', (3498, 3526), True, 'import numpy as np\n'), ((4456, 4492), 'numpy.array', 'np.array', (['[3, 2, 2.0, 1.0, 4.0, 5.0]'], {}), '([3, 2, 2.0, 1.0, 4.0, 5.0])\n', (4464, 4492), True, 'import numpy as np\n'), ((5421, 5457), 'numpy.array', 'np.array', (['[3, 2, 2.0, 1.0, 4.0, 5.0]'], {}), '([3, 2, 2.0, 1.0, 4.0, 5.0])\n', (5429, 5457), True, 'import numpy as np\n'), ((5873, 5943), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['calculated[name]', 'expected[name]'], {}), '(calculated[name], expected[name])\n', (5909, 5943), True, 'import numpy as np\n'), ((6049, 6085), 'numpy.array', 'np.array', (['[3, 2, 2.0, 1.0, 4.0, 5.0]'], {}), '([3, 2, 2.0, 1.0, 4.0, 5.0])\n', (6057, 6085), True, 'import numpy as np\n'), ((7014, 7050), 'numpy.array', 'np.array', (['[3, 2, 2.0, 1.0, 4.0, 5.0]'], {}), '([3, 2, 2.0, 1.0, 4.0, 5.0])\n', (7022, 7050), True, 'import numpy as np\n'), ((8334, 8349), 'pickle.load', 'pickle.load', (['f2'], {}), '(f2)\n', (8345, 8349), False, 'import pickle\n'), ((8362, 8420), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['test.values', 'pickled.values'], {}), '(test.values, pickled.values)\n', (8391, 8420), True, 'import numpy as np\n'), ((5645, 5669), 'numpy.array', 'np.array', (['[data1, data2]'], {}), '([data1, data2])\n', (5653, 5669), True, 'import numpy as np\n'), ((1004, 1028), 'numpy.isnan', 'np.isnan', (['expected[name]'], {}), '(expected[name])\n', (1012, 1028), True, 'import numpy as np\n'), ((1822, 1834), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (1828, 1834), True, 'import numpy as np\n'), ((1977, 2001), 'numpy.isnan', 'np.isnan', (['expected[name]'], {}), '(expected[name])\n', (1985, 2001), True, 'import numpy as np\n'), ((7776, 7796), 'numpy.dot', 'np.dot', (['data2', 'data1'], {}), '(data2, data1)\n', (7782, 7796), True, 'import numpy as np\n'), ((7799, 7819), 'numpy.dot', 'np.dot', (['data2', 'data2'], {}), '(data2, data2)\n', (7805, 7819), True, 'import numpy as np\n')] |
import argparse
import random
import sys
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
def visualize_voxel(res: np.ndarray, color: str = "blue") -> None:
"""Visualize voxel data in the specified color.
This function visualize voxel data. We can specify the voxel color.
It's default is blue.
Args:
res(np.ndarray) : Boolean matrix of voxel data.
color(str) : Surface color of visualised voxel data.
Return:
None
"""
# create colot map
colors = np.full((res.shape[0], res.shape[1],
res.shape[2]), "blue", dtype=str)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.voxels(res, facecolors=colors, edgecolor='k')
plt.show()
def load_npy(npy_file_path: str) -> np.ndarray:
"""Load npy file includes voxel model"""
return np.load(npy_file_path)
def quarry(voxel_map: np.ndarray, side_length:int) -> np.ndarray:
x_len, y_len, z_len = voxel_map.shape
if side_length > min(x_len, y_len, z_len):
print(
f"Minium length of one side is {min(x_len, y_len, z_len)}. You should input large integer than it.")
sys.exit()
# Decide on a range to leave values.
x_start = random.randint(0, x_len - side_length)
x_end = x_start + side_length
y_start = random.randint(0, y_len - side_length)
y_end = y_start + side_length
z_start = random.randint(0, z_len - side_length)
z_end = z_start + side_length
# Masking
voxel_map_masked = np.zeros((voxel_map.shape), dtype=int)
voxel_map_masked[x_start:x_end, y_start:y_end,
z_start:z_end] = voxel_map[x_start:x_end, y_start:y_end, z_start:z_end]
return voxel_map_masked
def main():
# Argments
parser = argparse.ArgumentParser(description='Voxel_model_augment.')
parser.add_argument('npy_path', help='Voxel npy filepath.')
parser.add_argument('side_lenght', help='Side lenght augmented voxel model.')
args = parser.parse_args()
# Get argments
npy_path = args.npy_path
side_length = int(args.side_lenght)
# load voxel model
voxel_map = load_npy(npy_path)
#Augment
voxel_map_masked = quarry(voxel_map, side_length)
visualize_voxel(voxel_map_masked)
###If you want to save,
#np.save("./voxcelized", visualize_voxel)
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser",
"matplotlib.pyplot.figure",
"numpy.zeros",
"sys.exit",
"numpy.full",
"numpy.load",
"random.randint",
"matplotlib.pyplot.show"
] | [((551, 621), 'numpy.full', 'np.full', (['(res.shape[0], res.shape[1], res.shape[2])', '"""blue"""'], {'dtype': 'str'}), "((res.shape[0], res.shape[1], res.shape[2]), 'blue', dtype=str)\n", (558, 621), True, 'import numpy as np\n'), ((655, 667), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (665, 667), True, 'import matplotlib.pyplot as plt\n'), ((759, 769), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (767, 769), True, 'import matplotlib.pyplot as plt\n'), ((876, 898), 'numpy.load', 'np.load', (['npy_file_path'], {}), '(npy_file_path)\n', (883, 898), True, 'import numpy as np\n'), ((1258, 1296), 'random.randint', 'random.randint', (['(0)', '(x_len - side_length)'], {}), '(0, x_len - side_length)\n', (1272, 1296), False, 'import random\n'), ((1345, 1383), 'random.randint', 'random.randint', (['(0)', '(y_len - side_length)'], {}), '(0, y_len - side_length)\n', (1359, 1383), False, 'import random\n'), ((1432, 1470), 'random.randint', 'random.randint', (['(0)', '(z_len - side_length)'], {}), '(0, z_len - side_length)\n', (1446, 1470), False, 'import random\n'), ((1543, 1579), 'numpy.zeros', 'np.zeros', (['voxel_map.shape'], {'dtype': 'int'}), '(voxel_map.shape, dtype=int)\n', (1551, 1579), True, 'import numpy as np\n'), ((1797, 1856), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Voxel_model_augment."""'}), "(description='Voxel_model_augment.')\n", (1820, 1856), False, 'import argparse\n'), ((1191, 1201), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1199, 1201), False, 'import sys\n')] |
import pandas as pd
pd.options.display.max_rows = 999
import numpy as np
import poly_brute_force as poly
def constraint_hamiltonian(qubo, num_problem_qubits, num_qubo_qubits):
# construct constraint equations
# auxiliary qubit a_ij = b_i b_j is enforced by
# b_i b_j - 2 b_i a_ij - 2 b_j a_ij + 3 a_ij
coeff_scale = 10000
coeff_bb = coeff_scale * 1
coeff_ba = coeff_scale * -2
coeff_aa = coeff_scale * 3
if coeff_bb + 2. * coeff_ba + coeff_aa == 0:
pass
else:
print("constraint equation poorly defined")
import sys
sys.exit()
# constrain b_i b_j
for index_j in range(num_problem_qubits):
for index_i in range(index_j):
qubo[index_i, index_j] = coeff_bb
# constrain -2 b_i a_ij -2 b_j a_ij
qubo_to_aux_index = dict() # maps auxiliary qubit indices (i,j) to qubo matrix index
accumulate = 0
row_counter = 0
triangle_counter = num_problem_qubits - 1
for index_j in range(num_problem_qubits, num_qubo_qubits):
qubo[row_counter, index_j] = coeff_ba
accumulate += 1
qubo_to_aux_index[(row_counter, accumulate + row_counter)] = index_j
if accumulate == triangle_counter:
accumulate = 0
row_counter += 1
triangle_counter -= 1
accumulate = 0
row_counter = 1
triangle_counter = num_problem_qubits - 1
for index_row in range(num_problem_qubits):
for index_ij in range(triangle_counter):
index_i = row_counter + index_ij
index_j = num_problem_qubits + index_ij + accumulate
qubo[index_i, index_j] = coeff_ba
accumulate += index_ij + 1
row_counter += 1
triangle_counter -= 1
# constrain 3 a_ij
for index_ij in range(num_problem_qubits, num_qubo_qubits):
qubo[index_ij, index_ij] = coeff_aa
print("constraint hamiltonian")
print(pd.DataFrame(qubo))
return qubo, qubo_to_aux_index
def quadratize(reduced_qubo):
# quadratizes up to 4-body interactions
# reduction by substitution (Rosenberg 1975)
# quadratization in discrete optimization and quantum mechanics
# section V. A.
# <NAME> arXiv: 1901.04405
# initialize quadratized hamiltonian
num_problem_qubits = len(reduced_qubo['qubit_residual_dim1'])
num_auxiliary_qubits = int(num_problem_qubits * (num_problem_qubits - 1) / 2)
num_qubo_qubits = num_problem_qubits + num_auxiliary_qubits
quad_qubo = np.zeros((num_qubo_qubits, num_qubo_qubits), float)
# construct constraint Hamiltonian
quad_qubo, qubo_to_aux_index = constraint_hamiltonian(quad_qubo, num_problem_qubits, num_qubo_qubits)
# load reduced qubo into quadratized qubo
# dim 0
qubo_constant = reduced_qubo['qubit_residual_dim0'].copy()
# check if all non-zero entries are remapped
reduced_qubo['qubit_residual_dim0'] = np.array(0)
# dim 1
for index_ij in range(num_problem_qubits):
quad_qubo[index_ij, index_ij] += reduced_qubo['qubit_residual_dim1'][index_ij]
# check if all non-zero entries are remapped
reduced_qubo['qubit_residual_dim1'][index_ij] = 0
# dim 2
for index_j in range(num_problem_qubits):
for index_i in range(index_j):
quad_qubo[index_i, index_j] += reduced_qubo['qubit_residual_dim2'][index_i, index_j]
# check if all non-zero entries are remapped
reduced_qubo['qubit_residual_dim2'][index_i, index_j] = 0
# dim 3
for index_k in range(num_problem_qubits):
for index_j in range(index_k):
for index_i in range(index_j):
row_index = index_i
col_index = qubo_to_aux_index[(index_j, index_k)]
quad_qubo[row_index, col_index] += reduced_qubo['qubit_residual_dim3'][index_i, index_j, index_k]
# check if all non-zero entries are remapped
reduced_qubo['qubit_residual_dim3'][index_i, index_j, index_k] = 0
# dim 4
for index_l in range(num_problem_qubits):
for index_k in range(index_l):
for index_j in range(index_k):
for index_i in range(index_j):
row_index = qubo_to_aux_index[(index_i, index_j)]
col_index = qubo_to_aux_index[(index_k, index_l)]
quad_qubo[row_index, col_index] += reduced_qubo['qubit_residual_dim4'][
index_i, index_j, index_k, index_l]
# check if all non-zero entries are remapped
reduced_qubo['qubit_residual_dim4'][index_i, index_j, index_k, index_l] = 0
# check
print("quadratized hamiltonian")
print(pd.DataFrame(quad_qubo))
print("qubo constant: ", qubo_constant)
check = sum([sum(reduced_qubo[key].flatten()) for key in reduced_qubo])
if check == 0:
print("check if all non-zero entires are remapped:", True)
else:
print("check if all non-zero entires are remapped:", False)
return quad_qubo, qubo_constant, qubo_to_aux_index
def argmin_QUBO(qubo, qubo_constant):
# this is for an actual quadratic qubo (yes yes qubo = quadratic binary blah blah...)
num_of_qubits = len(qubo)
ground_state_eigenvector = poly.int_to_bin(hilbert_index=0, num_of_qubits=num_of_qubits)
ground_state_eigenvalue = np.einsum('i,ij,j', ground_state_eigenvector.T, qubo,
ground_state_eigenvector) + qubo_constant
result_eigenvalue = []
result_eigenvector = []
for h_idx in range(2 ** num_of_qubits): # loop over all 2^n possibilities
eigenvector = poly.int_to_bin(h_idx, num_of_qubits)
eigenvalue = np.einsum('i,ij,j', eigenvector.T, qubo, eigenvector) + qubo_constant
result_eigenvalue.append(eigenvalue)
result_eigenvector.append(eigenvector)
if eigenvalue < ground_state_eigenvalue:
ground_state_eigenvalue = eigenvalue
ground_state_eigenvector = eigenvector
return ground_state_eigenvector, ground_state_eigenvalue, result_eigenvalue, result_eigenvector
def quadratized_inverse_mapping(eigenvector, eigenvalue, basis_map, qubo_to_aux_index):
num_problem_qubits = len(basis_map['basis']) * len(basis_map['basis_offset'])
# reconstruct result
result = []
problem_eigenvector = eigenvector[:num_problem_qubits]
num_equations = len(basis_map['basis_coeff'])
qubits_per_var = len(basis_map['basis'])
for idx_params in range(num_equations):
result.append(
basis_map['basis_coeff'][idx_params]
* sum(
basis_map['basis'] * problem_eigenvector[
idx_params * qubits_per_var:idx_params * qubits_per_var + qubits_per_var])
+ basis_map['basis_offset'][idx_params])
print("result:", result)
print("squared residual:", eigenvalue)
# check constraint equations
print("check constraints")
for qubit_idx in range(num_problem_qubits):
for pair_idx in range(qubit_idx):
print("x_%s%s == x_%s x_%s: " % (pair_idx, qubit_idx, pair_idx, qubit_idx),
eigenvector[qubit_idx] * eigenvector[pair_idx] == eigenvector[
qubo_to_aux_index[(pair_idx, qubit_idx)]])
return result
def main(evaluate=False):
extended_qubo, triangle_qubo, reduced_qubo, basis_map = poly.import_QUBO()
quadratized_qubo, qubo_constant, qubo_to_aux_index = quadratize(reduced_qubo)
if evaluate:
ground_state_eigenvector, ground_state_eigenvalue, result_eigenvalue, result_eigenvector = argmin_QUBO(
quadratized_qubo, qubo_constant)
quadratized_inverse_mapping(ground_state_eigenvector, ground_state_eigenvalue, basis_map, qubo_to_aux_index)
return quadratized_qubo, qubo_constant, basis_map, qubo_to_aux_index
if __name__ == "__main__":
main(True)
| [
"poly_brute_force.import_QUBO",
"poly_brute_force.int_to_bin",
"numpy.array",
"numpy.zeros",
"numpy.einsum",
"sys.exit",
"pandas.DataFrame"
] | [((2490, 2541), 'numpy.zeros', 'np.zeros', (['(num_qubo_qubits, num_qubo_qubits)', 'float'], {}), '((num_qubo_qubits, num_qubo_qubits), float)\n', (2498, 2541), True, 'import numpy as np\n'), ((2901, 2912), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (2909, 2912), True, 'import numpy as np\n'), ((5252, 5313), 'poly_brute_force.int_to_bin', 'poly.int_to_bin', ([], {'hilbert_index': '(0)', 'num_of_qubits': 'num_of_qubits'}), '(hilbert_index=0, num_of_qubits=num_of_qubits)\n', (5267, 5313), True, 'import poly_brute_force as poly\n'), ((7401, 7419), 'poly_brute_force.import_QUBO', 'poly.import_QUBO', ([], {}), '()\n', (7417, 7419), True, 'import poly_brute_force as poly\n'), ((587, 597), 'sys.exit', 'sys.exit', ([], {}), '()\n', (595, 597), False, 'import sys\n'), ((1920, 1938), 'pandas.DataFrame', 'pd.DataFrame', (['qubo'], {}), '(qubo)\n', (1932, 1938), True, 'import pandas as pd\n'), ((4695, 4718), 'pandas.DataFrame', 'pd.DataFrame', (['quad_qubo'], {}), '(quad_qubo)\n', (4707, 4718), True, 'import pandas as pd\n'), ((5344, 5423), 'numpy.einsum', 'np.einsum', (['"""i,ij,j"""', 'ground_state_eigenvector.T', 'qubo', 'ground_state_eigenvector'], {}), "('i,ij,j', ground_state_eigenvector.T, qubo, ground_state_eigenvector)\n", (5353, 5423), True, 'import numpy as np\n'), ((5636, 5673), 'poly_brute_force.int_to_bin', 'poly.int_to_bin', (['h_idx', 'num_of_qubits'], {}), '(h_idx, num_of_qubits)\n', (5651, 5673), True, 'import poly_brute_force as poly\n'), ((5695, 5748), 'numpy.einsum', 'np.einsum', (['"""i,ij,j"""', 'eigenvector.T', 'qubo', 'eigenvector'], {}), "('i,ij,j', eigenvector.T, qubo, eigenvector)\n", (5704, 5748), True, 'import numpy as np\n')] |
#!/usr/bin/python
import os, sys
import json
import numpy as np
import re
# GITHUB location: https://github.com/minogud2/ARC/blob/master/src/manual_solve.py
### YOUR CODE HERE: write at least three functions which solve
### specific tasks by transforming the input x and returning the
### result. Name them according to the task ID as in the three
### examples below. Delete the three examples. The tasks you choose
### must be in the data/training directory, not data/evaluation.
def solve_b94a9452(x):
"""
1. Description: The aim is to find a square, flip the colours and reduce the matrix to the dimension of
the square. Method: a) Find coloured shape in matrix i.e. all non-zero values, b) reduce the dimensions
of the matrix to the size of the shape i.e. delete all non-zero values, c) reverse the colors of the shape
d) return new shape.
2. Solved Correctly? Yes, all training and test grids are solved correctly. See output.
"""
i, shape_size = 0,0 # find size to reshape the vector
while i <len(x) and (shape_size == 0):
if np.count_nonzero(x[i]) > shape_size:
shape_size = np.count_nonzero(x[i])
i+=1
# Remove all zeros and reshape
x = x[np.nonzero(x)].reshape(shape_size,shape_size)
# identify unique values and swap the order.
unique_values =np.unique(x)
x = np.where(x == unique_values[0], unique_values[1], unique_values[0])
return x
def solve_6c434453(x):
"""
1. Description: The aim is to find blue square boxes with an empty centre and convert to a red cross.
Method: a) create two vectors, one for the square and another for the red cross. b) flatten the matrix and
find each value of blue (i.e. 1) in the matrix. Loop through the array and see if it matches the correct index
values that would correspond to the blue square. c) If they correspond, replace with the red cross.
d) Reshape the flattened array to the original grid shape.
2. Solved Correctly? Yes, all training and test grids are solved correctly. See output.
"""
shp = x.shape # To reshape at the end.
x = x.flatten() # convert to 1d array.
sq_shape = np.array([1, 1, 1, 1, 0, 1, 1, 1, 1]) # Square shape
plus_shape = np.array([0,2,0,2,2,2,0,2,0]) # Red Triangle Shape.
for i in range(0, len(x)):
shape_index = np.array([0,1,2,10,11,12,20,21,22]) # index of square shape across a flattened vector
if ((shape_index[-1] + i) < len(x)): # Don't exceed the bounds
if x[i] == 1: # Check each element in blue to see if it is a square shape.
shape_index = shape_index + i
if (x[shape_index] == sq_shape).all(): # checks new index of square shape + i.
x[shape_index] = plus_shape # assign the plus shape.
return x.reshape(shp)
def solve_1bfc4729(x):
"""
1. Description: Aim is to find two coloured squares in each half of the grid and then flood fill the horizonal line
across from the square to the same colour as the square. Then fill the perimiter of each half with the same color of
each square found in each half of the grid space.
2. Solved Correctly? Yes, all training and test grids are solved correctly. See output.
"""
nums = x[x>0] # create an array from the 2 values above 0 in either half of the matrix.
x[0], x[-1] = nums[0], nums[1] # fill first array with num[0] and last array with num[1]
x[np.where(x == nums[0])[0]] = nums[0] # fill array with num[0] at index where num[0] is located
x[np.where(x == nums[1])[0]] = nums[1] # fill array with num[1] at index where num[1] is located
x[:len(x)//2,0], x[:len(x)//2,-1] = nums[0], nums[0] # fill half the perimeter with num[0]
x[len(x)//2:,0], x[len(x)//2:,-1] = nums[1], nums[1] # fill remaining perimeter with num[1]
return x
""" Summary
All solutions were hard coded. Only numpy was imported to resolve each of the problems identified.
All solutions identified were based on my own inferences for solving the problem and would likely
fail when applied to other ARC tests. This in itself shows the complexity of designing algorithms
to resolve such problems.
There are commonalities between function 1 and 2 as there is a need to search for a square shape
within the grid, but solutions applied were very different as the search space was more cluttered
in function 2, whereas in function 1, the dimensions could be reduced immediately. In all examples
the key shapes for transformation could be easily identified i.e. finding the square.
"""
def main():
# Find all the functions defined in this file whose names are
# like solve_abcd1234(), and run them.
# regex to match solve_* functions and extract task IDs
p = r"solve_([a-f0-9]{8})"
tasks_solvers = []
# globals() gives a dict containing all global names (variables
# and functions), as name: value pairs.
for name in globals():
m = re.match(p, name)
if m:
# if the name fits the pattern eg solve_abcd1234
ID = m.group(1) # just the task ID
solve_fn = globals()[name] # the fn itself
tasks_solvers.append((ID, solve_fn))
for ID, solve_fn in tasks_solvers:
# for each task, read the data and call test()
directory = os.path.join("..", "data", "training")
json_filename = os.path.join(directory, ID + ".json")
data = read_ARC_JSON(json_filename)
test(ID, solve_fn, data)
def read_ARC_JSON(filepath):
"""Given a filepath, read in the ARC task data which is in JSON
format. Extract the train/test input/output pairs of
grids. Convert each grid to np.array and return train_input,
train_output, test_input, test_output."""
# Open the JSON file and load it
data = json.load(open(filepath))
# Extract the train/test input/output grids. Each grid will be a
# list of lists of ints. We convert to Numpy.
train_input = [np.array(data['train'][i]['input']) for i in range(len(data['train']))]
train_output = [np.array(data['train'][i]['output']) for i in range(len(data['train']))]
test_input = [np.array(data['test'][i]['input']) for i in range(len(data['test']))]
test_output = [np.array(data['test'][i]['output']) for i in range(len(data['test']))]
return (train_input, train_output, test_input, test_output)
def test(taskID, solve, data):
"""Given a task ID, call the given solve() function on every
example in the task data."""
print(taskID)
train_input, train_output, test_input, test_output = data
print("Training grids")
for x, y in zip(train_input, train_output):
yhat = solve(x)
show_result(x, y, yhat)
print("Test grids")
for x, y in zip(test_input, test_output):
yhat = solve(x)
show_result(x, y, yhat)
def show_result(x, y, yhat):
print("Input")
print(x)
print("Correct output")
print(y)
print("Our output")
print(yhat)
print("Correct?")
if y.shape != yhat.shape:
print(f"False. Incorrect shape: {y.shape} v {yhat.shape}")
else:
print(np.all(y == yhat))
if __name__ == "__main__": main()
| [
"numpy.unique",
"numpy.where",
"re.match",
"os.path.join",
"numpy.count_nonzero",
"numpy.array",
"numpy.nonzero",
"numpy.all"
] | [((1340, 1352), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (1349, 1352), True, 'import numpy as np\n'), ((1361, 1428), 'numpy.where', 'np.where', (['(x == unique_values[0])', 'unique_values[1]', 'unique_values[0]'], {}), '(x == unique_values[0], unique_values[1], unique_values[0])\n', (1369, 1428), True, 'import numpy as np\n'), ((2175, 2212), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 0, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 0, 1, 1, 1, 1])\n', (2183, 2212), True, 'import numpy as np\n'), ((2245, 2282), 'numpy.array', 'np.array', (['[0, 2, 0, 2, 2, 2, 0, 2, 0]'], {}), '([0, 2, 0, 2, 2, 2, 0, 2, 0])\n', (2253, 2282), True, 'import numpy as np\n'), ((2350, 2393), 'numpy.array', 'np.array', (['[0, 1, 2, 10, 11, 12, 20, 21, 22]'], {}), '([0, 1, 2, 10, 11, 12, 20, 21, 22])\n', (2358, 2393), True, 'import numpy as np\n'), ((4993, 5010), 're.match', 're.match', (['p', 'name'], {}), '(p, name)\n', (5001, 5010), False, 'import re\n'), ((5352, 5390), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""training"""'], {}), "('..', 'data', 'training')\n", (5364, 5390), False, 'import os, sys\n'), ((5415, 5452), 'os.path.join', 'os.path.join', (['directory', "(ID + '.json')"], {}), "(directory, ID + '.json')\n", (5427, 5452), False, 'import os, sys\n'), ((6010, 6045), 'numpy.array', 'np.array', (["data['train'][i]['input']"], {}), "(data['train'][i]['input'])\n", (6018, 6045), True, 'import numpy as np\n'), ((6102, 6138), 'numpy.array', 'np.array', (["data['train'][i]['output']"], {}), "(data['train'][i]['output'])\n", (6110, 6138), True, 'import numpy as np\n'), ((6193, 6227), 'numpy.array', 'np.array', (["data['test'][i]['input']"], {}), "(data['test'][i]['input'])\n", (6201, 6227), True, 'import numpy as np\n'), ((6282, 6317), 'numpy.array', 'np.array', (["data['test'][i]['output']"], {}), "(data['test'][i]['output'])\n", (6290, 6317), True, 'import numpy as np\n'), ((1083, 1105), 'numpy.count_nonzero', 'np.count_nonzero', (['x[i]'], {}), '(x[i])\n', (1099, 1105), True, 'import numpy as np\n'), ((1145, 1167), 'numpy.count_nonzero', 'np.count_nonzero', (['x[i]'], {}), '(x[i])\n', (1161, 1167), True, 'import numpy as np\n'), ((3453, 3475), 'numpy.where', 'np.where', (['(x == nums[0])'], {}), '(x == nums[0])\n', (3461, 3475), True, 'import numpy as np\n'), ((3554, 3576), 'numpy.where', 'np.where', (['(x == nums[1])'], {}), '(x == nums[1])\n', (3562, 3576), True, 'import numpy as np\n'), ((7174, 7191), 'numpy.all', 'np.all', (['(y == yhat)'], {}), '(y == yhat)\n', (7180, 7191), True, 'import numpy as np\n'), ((1226, 1239), 'numpy.nonzero', 'np.nonzero', (['x'], {}), '(x)\n', (1236, 1239), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import logging
logger = logging.getLogger(__name__)
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(format=FORMAT)
logger.setLevel(logging.ERROR)
################################
# wrangling methods used in:
# - income_distribution.ipynb
################################
def get_persons_per_income_group(df):
"""Formats cumulative bins (e.g. <50k) to 25k incremental bins (e.g. >25-50k)."""
df["VALUE"].values[1:-1] = df["VALUE"].values[1:-1] - df["VALUE"].values[2:]
return df
def create_income_bins(y)->np.array:
raw_income_bins = 13
# sum 0:5, 5:7, and then take individual values
logger.info("create_income_bins()")
logger.debug(f"y: /n {y}")
if len(y)==raw_income_bins:
y = np.add.reduceat(y, [0,5,7,8,9,10,11,12])
return y
elif len(y)==0:
return np.array([np.nan]*13)
else: return y
def add_gaps(y):
# some empty values for discontinuities
y = np.insert(y, [4, 7], [np.nan])
return y
def normalize_plot_data(y)->np.array:
y = np.divide(y, np.sum(y))
return y
def format_hist_data(df)->np.array:
df = get_persons_per_income_group(df)
y = df.VALUE.values
y_hist = normalize_plot_data(y)
return y_hist
def preprocess_income_bin_data(df)->tuple:
"""Process the data for plotting.
Returns
---------
tuple of np.arrays
"""
y_hist = format_hist_data(df)
y_hist = create_income_bins(y_hist)
y_cumulative = np.cumsum(y_hist)
y_hist = add_gaps(y_hist)
y_cumulative = add_gaps(y_cumulative)
return y_hist, y_cumulative
def subset_plot_data_for_income_bins(df)->pd.DataFrame:
"""Used in make_data.py to subset the raw data."""
cols_to_keep = ['REF_DATE',
'GEO',
'Sex',
'Age group',
'Persons with income',
'SCALAR_FACTOR',
'VALUE',
]
income_to_plot = ["Persons with income under $5,000",
"Persons with income of $5,000 and over",
"Persons with income of $10,000 and over",
"Persons with income of $15,000 and over",
"Persons with income of $20,000 and over",
"Persons with income of $25,000 and over",
"Persons with income of $35,000 and over",
"Persons with income of $50,000 and over",
"Persons with income of $75,000 and over",
"Persons with income of $100,000 and over",
"Persons with income of $150,000 and over",
"Persons with income of $200,000 and over",
"Persons with income of $250,000 and over"]
df = df.loc[:,cols_to_keep]
df = get_income_to_plot_for_hist(df, income_to_plot)
return df
###############################
# wrangling methods used in:
# - median_income.ipynb
###############################
def subset_rows(df, column, value)->np.ndarray:
"""
A method to s A method to subset rows of the https://doi.org/10.25318/1110000801-eng
df : pd.DataFrame
column : str
value : str
"""
mask = (df[column] == value)
return mask.values
def subset_REF_DATE(df, year):
return subset_rows(df, "REF_DATE", year)
def subset_GEO(df, geo):
return subset_rows(df, "GEO", geo)
def subset_Sex(df, sex):
# return subset_rows(df, "Sex", sex)
logger.debug(f"sex: {sex}")
return df["Sex"].isin(sex)
def subset_Age(df, age):
return subset_rows(df, "Age group", age)
def subset_year_age_sex_geo(df, year=None, age=None, sex=None, geo=None):
mask_year = subset_REF_DATE(df, year)
mask_geo = subset_GEO(df, geo)
mask_sex = subset_Sex(df, sex)
mask_age = subset_Age(df, age)
return df[(mask_year) & (mask_geo) & (mask_sex) & (mask_age)]
def get_income_to_plot_for_hist(df, income_to_plot):
df = df[df["Persons with income"].isin(income_to_plot)]
return df
def get_income_to_plot_for_scatter(df, income_to_plot):
df = df[df["Statistics"].isin(income_to_plot)]
return df
def subset_plot_data_for_scatter_plot(
df, year, age, sex, geo,
income_source, income_to_plot, cols_to_keep):
df = df.loc[:,cols_to_keep]
df = subset_year_age_sex_geo(df, year, age, sex, geo)
df = df[df["Income source"].isin(income_source)]
df = get_income_to_plot_for_scatter(df, income_to_plot)
return df
def subset_for_scatter_plot(df, income_source, income_to_plot, cols_to_keep):
df = df.loc[:,cols_to_keep]
df = df[df["Income source"]==income_source]
df = df[df["Statistics"]==income_to_plot]
return df
| [
"logging.getLogger",
"numpy.insert",
"logging.basicConfig",
"numpy.sum",
"numpy.array",
"numpy.cumsum",
"numpy.add.reduceat"
] | [((64, 91), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (81, 91), False, 'import logging\n'), ((161, 195), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'FORMAT'}), '(format=FORMAT)\n', (180, 195), False, 'import logging\n'), ((1010, 1040), 'numpy.insert', 'np.insert', (['y', '[4, 7]', '[np.nan]'], {}), '(y, [4, 7], [np.nan])\n', (1019, 1040), True, 'import numpy as np\n'), ((1531, 1548), 'numpy.cumsum', 'np.cumsum', (['y_hist'], {}), '(y_hist)\n', (1540, 1548), True, 'import numpy as np\n'), ((806, 853), 'numpy.add.reduceat', 'np.add.reduceat', (['y', '[0, 5, 7, 8, 9, 10, 11, 12]'], {}), '(y, [0, 5, 7, 8, 9, 10, 11, 12])\n', (821, 853), True, 'import numpy as np\n'), ((1115, 1124), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (1121, 1124), True, 'import numpy as np\n'), ((899, 922), 'numpy.array', 'np.array', (['([np.nan] * 13)'], {}), '([np.nan] * 13)\n', (907, 922), True, 'import numpy as np\n')] |
from __future__ import print_function, division
import os,unittest,numpy as np
from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations
from pyscf.nao.m_fermi_energy import fermi_energy as get_fermi_energy
class KnowValues(unittest.TestCase):
def test_fermi_energy_spin_saturated(self):
""" This is to test the determination of Fermi level"""
ee = np.arange(-10.13, 100.0, 0.1)
#print('0: ', ee.shape)
nelec = 5.0
telec = 0.01
fermi_energy = get_fermi_energy(ee, nelec, telec)
occ = 2.0*fermi_dirac_occupations(telec, ee, fermi_energy)
self.assertAlmostEqual(occ.sum(), 5.0)
self.assertAlmostEqual(fermi_energy, -9.93)
#print(occ)
#print(occ.sum())
#print(fermi_energy)
def test_fermi_energy_spin_resolved_spin1(self):
""" This is to test the determination of Fermi level"""
ee = np.linspace(-10.13, 99.97, 1102).reshape((1,1102))
#print('1: ', ee.shape)
nelec = 5.0
telec = 0.01
fermi_energy = get_fermi_energy(ee, nelec, telec)
occ = 2.0*fermi_dirac_occupations(telec, ee, fermi_energy)
self.assertAlmostEqual(occ.sum(), 5.0)
self.assertAlmostEqual(fermi_energy, -9.93)
#print(occ)
#print(occ.sum())
#print(fermi_energy)
def test_fermi_energy_spin_resolved(self):
""" This is to test the determination of Fermi level in spin-resolved case"""
ee = np.row_stack((np.linspace(-10.3, 100.0, 1003), np.linspace(-10.0, 100.0, 1003)))
nelec = 11.0
telec = 0.02
#print(ee)
fermi_energy = get_fermi_energy(ee, nelec, telec)
occ = fermi_dirac_occupations(telec, ee, fermi_energy)
self.assertAlmostEqual(occ.sum(), 11.0)
self.assertAlmostEqual(fermi_energy, -9.60016955367)
#print(occ)
#print(occ.sum())
#print(fermi_energy)
def test_fermi_energy_spin_resolved_even(self):
""" This is to test the determination of Fermi level in spin-resolved case"""
ee = np.row_stack((np.linspace(-10.3, 100.0, 1003), np.linspace(-10.0, 100.0, 1003)))
nelec = 20.0
telec = 0.02
#print(ee)
fermi_energy = get_fermi_energy(ee, nelec, telec)
occ = fermi_dirac_occupations(telec, ee, fermi_energy)
self.assertAlmostEqual(occ.sum(), 20.0)
self.assertAlmostEqual(fermi_energy, -9.10544404859)
#print(occ)
#print(occ.sum())
#print(fermi_energy)
def test_fermi_energy_spin_resolved_even_kpoints(self):
""" This is to test the determination of Fermi level in spin-resolved case"""
ee = np.row_stack((np.linspace(-10.1, 100.0, 1003),
np.linspace(-10.2, 100.0, 1003),
np.linspace(-10.3, 100.0, 1003),
np.linspace(-10.4, 100.0, 1003))).reshape((4,1,1003))
nelec = 20.0
telec = 0.02
nkpts = ee.shape[0]
nspin = ee.shape[-2]
#print(ee)
fermi_energy = get_fermi_energy(ee, nelec, telec)
occ = (3.0-nspin)*fermi_dirac_occupations(telec, ee, fermi_energy)
#print(occ)
#print(occ.sum()/nkpts)
#print(fermi_energy)
self.assertAlmostEqual(occ.sum()/nkpts, 20.0)
self.assertAlmostEqual(fermi_energy, -9.2045998319213016)
def test_fermi_energy_spin_resolved_even_kpoints_spin2(self):
""" This is to test the determination of Fermi level in spin-resolved case"""
ee = np.row_stack((np.linspace(-10.1, 100.0, 1003),
np.linspace(-10.2, 100.0, 1003),
np.linspace(-10.3, 100.0, 1003),
np.linspace(-10.4, 100.0, 1003))).reshape((2,2,1003))
nelec = 20.0
telec = 0.02
nkpts = ee.shape[0]
nspin = ee.shape[-2]
#print(ee)
fermi_energy = get_fermi_energy(ee, nelec, telec)
occ = (3.0-nspin)*fermi_dirac_occupations(telec, ee, fermi_energy)
#print(occ)
#print(occ.sum()/nkpts)
#print(fermi_energy)
self.assertAlmostEqual(occ.sum()/nkpts, 20.0)
self.assertAlmostEqual(fermi_energy, -9.2045998319213016)
if __name__ == "__main__" : unittest.main()
| [
"numpy.linspace",
"unittest.main",
"pyscf.nao.m_fermi_dirac.fermi_dirac_occupations",
"pyscf.nao.m_fermi_energy.fermi_energy",
"numpy.arange"
] | [((3964, 3979), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3977, 3979), False, 'import os, unittest, numpy as np\n'), ((365, 394), 'numpy.arange', 'np.arange', (['(-10.13)', '(100.0)', '(0.1)'], {}), '(-10.13, 100.0, 0.1)\n', (374, 394), True, 'import os, unittest, numpy as np\n'), ((475, 509), 'pyscf.nao.m_fermi_energy.fermi_energy', 'get_fermi_energy', (['ee', 'nelec', 'telec'], {}), '(ee, nelec, telec)\n', (491, 509), True, 'from pyscf.nao.m_fermi_energy import fermi_energy as get_fermi_energy\n'), ((984, 1018), 'pyscf.nao.m_fermi_energy.fermi_energy', 'get_fermi_energy', (['ee', 'nelec', 'telec'], {}), '(ee, nelec, telec)\n', (1000, 1018), True, 'from pyscf.nao.m_fermi_energy import fermi_energy as get_fermi_energy\n'), ((1527, 1561), 'pyscf.nao.m_fermi_energy.fermi_energy', 'get_fermi_energy', (['ee', 'nelec', 'telec'], {}), '(ee, nelec, telec)\n', (1543, 1561), True, 'from pyscf.nao.m_fermi_energy import fermi_energy as get_fermi_energy\n'), ((1572, 1620), 'pyscf.nao.m_fermi_dirac.fermi_dirac_occupations', 'fermi_dirac_occupations', (['telec', 'ee', 'fermi_energy'], {}), '(telec, ee, fermi_energy)\n', (1595, 1620), False, 'from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations\n'), ((2078, 2112), 'pyscf.nao.m_fermi_energy.fermi_energy', 'get_fermi_energy', (['ee', 'nelec', 'telec'], {}), '(ee, nelec, telec)\n', (2094, 2112), True, 'from pyscf.nao.m_fermi_energy import fermi_energy as get_fermi_energy\n'), ((2123, 2171), 'pyscf.nao.m_fermi_dirac.fermi_dirac_occupations', 'fermi_dirac_occupations', (['telec', 'ee', 'fermi_energy'], {}), '(telec, ee, fermi_energy)\n', (2146, 2171), False, 'from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations\n'), ((2842, 2876), 'pyscf.nao.m_fermi_energy.fermi_energy', 'get_fermi_energy', (['ee', 'nelec', 'telec'], {}), '(ee, nelec, telec)\n', (2858, 2876), True, 'from pyscf.nao.m_fermi_energy import fermi_energy as get_fermi_energy\n'), ((3641, 3675), 'pyscf.nao.m_fermi_energy.fermi_energy', 'get_fermi_energy', (['ee', 'nelec', 'telec'], {}), '(ee, nelec, telec)\n', (3657, 3675), True, 'from pyscf.nao.m_fermi_energy import fermi_energy as get_fermi_energy\n'), ((524, 572), 'pyscf.nao.m_fermi_dirac.fermi_dirac_occupations', 'fermi_dirac_occupations', (['telec', 'ee', 'fermi_energy'], {}), '(telec, ee, fermi_energy)\n', (547, 572), False, 'from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations\n'), ((1033, 1081), 'pyscf.nao.m_fermi_dirac.fermi_dirac_occupations', 'fermi_dirac_occupations', (['telec', 'ee', 'fermi_energy'], {}), '(telec, ee, fermi_energy)\n', (1056, 1081), False, 'from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations\n'), ((2899, 2947), 'pyscf.nao.m_fermi_dirac.fermi_dirac_occupations', 'fermi_dirac_occupations', (['telec', 'ee', 'fermi_energy'], {}), '(telec, ee, fermi_energy)\n', (2922, 2947), False, 'from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations\n'), ((3698, 3746), 'pyscf.nao.m_fermi_dirac.fermi_dirac_occupations', 'fermi_dirac_occupations', (['telec', 'ee', 'fermi_energy'], {}), '(telec, ee, fermi_energy)\n', (3721, 3746), False, 'from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations\n'), ((853, 885), 'numpy.linspace', 'np.linspace', (['(-10.13)', '(99.97)', '(1102)'], {}), '(-10.13, 99.97, 1102)\n', (864, 885), True, 'import os, unittest, numpy as np\n'), ((1392, 1423), 'numpy.linspace', 'np.linspace', (['(-10.3)', '(100.0)', '(1003)'], {}), '(-10.3, 100.0, 1003)\n', (1403, 1423), True, 'import os, unittest, numpy as np\n'), ((1425, 1456), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(100.0)', '(1003)'], {}), '(-10.0, 100.0, 1003)\n', (1436, 1456), True, 'import os, unittest, numpy as np\n'), ((1943, 1974), 'numpy.linspace', 'np.linspace', (['(-10.3)', '(100.0)', '(1003)'], {}), '(-10.3, 100.0, 1003)\n', (1954, 1974), True, 'import os, unittest, numpy as np\n'), ((1976, 2007), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(100.0)', '(1003)'], {}), '(-10.0, 100.0, 1003)\n', (1987, 2007), True, 'import os, unittest, numpy as np\n'), ((2502, 2533), 'numpy.linspace', 'np.linspace', (['(-10.1)', '(100.0)', '(1003)'], {}), '(-10.1, 100.0, 1003)\n', (2513, 2533), True, 'import os, unittest, numpy as np\n'), ((2559, 2590), 'numpy.linspace', 'np.linspace', (['(-10.2)', '(100.0)', '(1003)'], {}), '(-10.2, 100.0, 1003)\n', (2570, 2590), True, 'import os, unittest, numpy as np\n'), ((2615, 2646), 'numpy.linspace', 'np.linspace', (['(-10.3)', '(100.0)', '(1003)'], {}), '(-10.3, 100.0, 1003)\n', (2626, 2646), True, 'import os, unittest, numpy as np\n'), ((2671, 2702), 'numpy.linspace', 'np.linspace', (['(-10.4)', '(100.0)', '(1003)'], {}), '(-10.4, 100.0, 1003)\n', (2682, 2702), True, 'import os, unittest, numpy as np\n'), ((3301, 3332), 'numpy.linspace', 'np.linspace', (['(-10.1)', '(100.0)', '(1003)'], {}), '(-10.1, 100.0, 1003)\n', (3312, 3332), True, 'import os, unittest, numpy as np\n'), ((3358, 3389), 'numpy.linspace', 'np.linspace', (['(-10.2)', '(100.0)', '(1003)'], {}), '(-10.2, 100.0, 1003)\n', (3369, 3389), True, 'import os, unittest, numpy as np\n'), ((3414, 3445), 'numpy.linspace', 'np.linspace', (['(-10.3)', '(100.0)', '(1003)'], {}), '(-10.3, 100.0, 1003)\n', (3425, 3445), True, 'import os, unittest, numpy as np\n'), ((3470, 3501), 'numpy.linspace', 'np.linspace', (['(-10.4)', '(100.0)', '(1003)'], {}), '(-10.4, 100.0, 1003)\n', (3481, 3501), True, 'import os, unittest, numpy as np\n')] |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 ts=4 sts=4 sw=4 et tw=80 :
#
# Second cut at astrometry fitting for UCD project.
#
# <NAME>
# Created: 2021-08-30
# Last modified: 2021-08-30
#--------------------------------------------------------------------------
#**************************************************************************
#--------------------------------------------------------------------------
## Logging setup:
import logging
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
#logger.setLevel(logging.DEBUG)
logger.setLevel(logging.INFO)
## Current version:
__version__ = "0.1.0"
## Modules:
import os
import sys
import time
import numpy as np
from numpy.lib.recfunctions import append_fields
#import datetime as dt
#from dateutil import parser as dtp
#import scipy.linalg as sla
#import scipy.signal as ssig
#import scipy.ndimage as ndi
import scipy.optimize as opti
#import scipy.interpolate as stp
#import scipy.spatial.distance as ssd
#from functools import partial
#from collections import OrderedDict
#from collections.abc import Iterable
#import multiprocessing as mp
#np.set_printoptions(suppress=True, linewidth=160)
#import pandas as pd
#import statsmodels.api as sm
#import statsmodels.formula.api as smf
#from statsmodels.regression.quantile_regression import QuantReg
_have_np_vers = float('.'.join(np.__version__.split('.')[:2]))
import theil_sen as ts
## Useful stats routines:
def calc_ls_med_MAD(a, axis=None):
"""Return median and median absolute deviation of *a* (scaled to normal)."""
med_val = np.median(a, axis=axis)
sig_hat = (1.482602218 * np.median(np.abs(a - med_val), axis=axis))
return (med_val, sig_hat)
## Median absolute residual:
def calc_MAR(residuals, scalefactor=1.482602218):
"""Return median absolute residual (MAR) of input array. By default,
the result is scaled to the normal distribution."""
return scalefactor * np.median(np.abs(residuals))
##--------------------------------------------------------------------------##
##------------------ Astrometry Fitting (5-par) ----------------##
##--------------------------------------------------------------------------##
_ARCSEC_PER_RADIAN = 180. * 3600.0 / np.pi
_MAS_PER_RADIAN = _ARCSEC_PER_RADIAN * 1e3
class AstFit(object):
"""
This module provides astrometric fitting capability. Internally, a
5-parameter model is maintained in a numpy array. Its contents are:
* RA (radians) at reference epoch
* DE (radians) at reference epoch
* pmRA (radians / yr). [this is pmRA* / cos(dec)]
* pmDE (radians / yr)
* parallax (radians)
"""
_need_eph_keys = ['jdtdb', 'x', 'y', 'z']
_need_data_keys = ['jdtdb', 'dra', 'dde', 'obs_x', 'obs_y', 'obs_z']
_asec_per_rad = _ARCSEC_PER_RADIAN
_mas_per_rad = _MAS_PER_RADIAN
def __init__(self):
self._jd_tdb = None
self._dt_yrs = None
self.obs_eph = None
self.ref_tdb = None
self.inliers = None
self.rweight = None
self._is_set = False
self._chiexp = 2
self._can_iterate = False
return
def set_exponent(self, exponent=2):
"""
Choose exponent used in penalty function (N below). The solver seeks
to minimize the sum over data points of:
((obs - model) / err)**N
"""
#Setting N=2 behaves like Chi-squared. Setting N=1 minimizes total
#absolute deviation
self._chiexp = exponent
return
#def setup(self, jd_tdb_ref, RA_deg, DE_deg, obs_eph,
def setup(self, data, reject_outliers=True,
jd_tdb_ref=None, RA_err=None, DE_err=None):
self._is_rdy = False
if not all([isinstance(data[x], np.ndarray) \
for x in self._need_data_keys]):
sys.stderr.write("Incomplete data set!\n")
sys.stderr.write("Required columns include:\n")
sys.stderr.write("--> %s\n" % str(self._need_data_keys))
return False
self._outrej = reject_outliers
#if not all([isinstance(obs_eph[x], np.ndarray) \
# for x in self._need_eph_keys]):
# sys.stderr.write("Incomplete ephemeris data!\n")
# sys.stderr.write("Required columns include:\n")
# sys.stderr.write("--> %s\n" % str(self._need_eph_keys))
# return False
#self.inliers = np.ones_like(RA_deg, dtype='bool')
#self.rweight = np.ones_like(RA_deg)
self.inliers = np.ones(len(data), dtype='bool')
self.rweight = np.ones(len(data), dtype='float')
#self.obs_eph = self._augmented_eph(obs_eph)
self.dataset = np.copy(data)
if jd_tdb_ref:
self.ref_tdb = jd_tdb_ref
else:
self.ref_tdb = data['jdtdb'][0]
#self.ref_tdb = jd_tdb_ref
self._dt_yrs = (self.dataset['jdtdb'] - self.ref_tdb) / 365.25
#self._RA_rad = np.radians(RA_deg)
#self._DE_rad = np.radians(DE_deg)
self._RA_rad = np.radians(self.dataset['dra'])
self._DE_rad = np.radians(self.dataset['dde'])
#self._RA_med, self._RA_MAD = calc_ls_med_MAD(self._RA_rad)
#self._DE_med, self._DE_MAD = calc_ls_med_MAD(self._DE_rad)
#self._RA_MAD *= np.cos(self._DE_med)
self._RA_err = RA_err
self._DE_err = DE_err
self._need_resid_errors = False
if not isinstance(RA_err, np.ndarray):
sys.stderr.write("WARNING: RA_err not given, using estimated\n")
self._need_resid_errors = True
if not isinstance(DE_err, np.ndarray):
sys.stderr.write("WARNING: DE_err not given, using estimated\n")
self._need_resid_errors = True
#if isinstance(RA_err, np.ndarray):
# self._RA_err = np.radians(RA_err)
#else:
# self._RA_err = self._RA_MAD
#if isinstance(DE_err, np.ndarray):
# self._DE_err = np.radians(DE_err)
#else:
# self._DE_err = self._DE_MAD
#self._DE_err = np.radians(DE_err) if DE_err else self._DE_MAD
self._is_set = True
self._can_iterate = False
return True
#def set_ref_time(self, t_ref):
# self.ref_time = t_ref
# return
@staticmethod
def _calc_parallax_factors(RA_rad, DE_rad, X_au, Y_au, Z_au):
"""Compute parallax factors in arcseconds. The RA component has
been divided by cos(dec) so that it can be used directly for
residual minimization."""
sinRA, cosRA = np.sin(RA_rad), np.cos(RA_rad)
sinDE, cosDE = np.sin(DE_rad), np.cos(DE_rad)
ra_factor = (X_au * sinRA - Y_au * cosRA) / cosDE
de_factor = X_au * cosRA * sinDE \
+ Y_au * sinRA * sinDE \
- Z_au * cosDE
return ra_factor, de_factor
#def ts_fit_coord(self, time_vals, coo_vals):
@staticmethod
def ts_fit_radec_pm(t_yrs, RA_rad, DE_rad, plx_as=0, weighted=False):
ts_ra_model = ts.linefit(t_yrs, RA_rad, weighted=weighted)
ts_de_model = ts.linefit(t_yrs, DE_rad, weighted=weighted)
return np.array([ts_ra_model[0], ts_de_model[0],
ts_ra_model[1], ts_de_model[1], plx_as])
def apparent_radec(self, t_ref, astrom_pars, eph_obs):
"""
t_ref -- chosen reference epoch
astrom_pars -- five astrometric parameters specified at the
reference epoch: meanRA (rad), meanDE (rad),
pmRA*cos(DE), pmDE, and parallax
eph_obs -- dict with x,y,z,t elements describing the times
and places of observations (numpy arrays)
FOR NOW, assume
[t_ref] = JD (TDB)
[t] = JD (TDB)
[pars] = rad, rad, arcsec/yr, arcsec/yr, arcsec
*no cos(d)*
"""
rra, rde, pmra, pmde, prlx = astrom_pars
t_diff_yr = (eph_obs['t'] - t_ref) / 365.25 # units of years
pfra, pfde = self._calc_parallax_factors(rra, rde,
eph_obs['x'], eph_obs['y'], eph_obs['z'])
delta_ra = (t_diff_yr * pmra + prlx * pfra)
delta_de = (t_diff_yr * pmde + prlx * pfde)
return (rra + delta_ra, rde + delta_de)
def eval_model(self, params):
return self._solver_eval(params)
#def eval_model(self, params):
# rra, rde, pmra, pmde, prlx = params
# pfra, pfde = self._calc_parallax_factors(rra, rde,
# self.dataset['obs_x'], self.dataset['obs_y'],
# self.dataset['obs_z'])
# delta_ra = self._dt_yrs * pmra + prlx * pfra
# delta_de = self._dt_yrs * pmde + prlx * pfde
# return (rra + delta_ra, rde + delta_de)
def _solver_eval(self, params):
rra, rde, pmra, pmde, prlx = params
pfra, pfde = self._calc_parallax_factors(rra, rde,
self.dataset['obs_x'], self.dataset['obs_y'],
self.dataset['obs_z'])
delta_ra = self._dt_yrs * pmra + prlx * pfra
delta_de = self._dt_yrs * pmde + prlx * pfde
#delta_ra = self._dt_yrs * pmra - prlx * pfra
#delta_de = self._dt_yrs * pmde - prlx * pfde
return (rra + delta_ra, rde + delta_de)
def _calc_radec_residuals(self, params):
model_RA, model_DE = self._solver_eval(params)
return (self._RA_rad - model_RA, self._DE_rad - model_DE)
def _calc_radec_residuals_sigma(self, params):
model_RA, model_DE = self._solver_eval(params)
#rsigs_RA = (self._RA_rad - model_RA) / self._RA_err
#rsigs_DE = (self._DE_rad - model_DE) / self._DE_err
rsigs_RA = (self._RA_rad - model_RA) / self._use_RA_err
rsigs_DE = (self._DE_rad - model_DE) / self._use_DE_err
return rsigs_RA, rsigs_DE
def _calc_total_residuals_sigma(self, params):
return np.hypot(*self._calc_radec_residuals_sigma(params))
def _calc_chi_square(self, params, negplxhit=100.):
model_ra, model_de = self._solver_eval(params)
#resid_ra = (model_ra - self._RA_rad) #/ np.cos(model_de)
#resid_de = (model_de - self._DE_rad)
resid_ra = (self._RA_rad - model_ra) #/ np.cos(model_de)
resid_de = (self._DE_rad - model_de)
#resid_ra = (model_ra - self._RA_rad) / self._RA_err
#resid_de = (model_de - self._DE_rad) / self._DE_err
#if isinstance(self._RA_err, np.ndarray):
# resid_ra /= self._RA_err
#if isinstance(self._DE_err, np.ndarray):
# resid_de /= self._DE_err
if isinstance(self._use_RA_err, np.ndarray):
resid_ra /= self._use_RA_err
if isinstance(self._use_DE_err, np.ndarray):
resid_de /= self._use_DE_err
#return np.sum(np.hypot(resid_ra, resid_de))
#return np.sum(np.hypot(resid_ra, resid_de)**2)
resid_tot = np.hypot(resid_ra, resid_de)[self.inliers]
if (params[4] < 0.0):
resid_tot *= negplxhit
return np.sum(resid_tot**self._chiexp)
#return np.sum(np.hypot(resid_ra, resid_de)**self._chiexp)
#return np.sum(np.abs(resid_ra * resid_de)**self._chiexp)
def _calc_initial_parallax(self, params):
rra_resid, rde_resid = self._calc_radec_residuals(params)
mar_ra_rad = calc_MAR(rra_resid)
mar_ra_mas = _MAS_PER_RADIAN * mar_ra_rad
sys.stderr.write("mar_ra_rad: %f\n" % mar_ra_rad)
sys.stderr.write("mar_ra_mas: %f\n" % mar_ra_mas)
pfra, pfde = self._calc_parallax_factors(
self._RA_rad, self._DE_rad, self.dataset['obs_x'],
self.dataset['obs_y'], self.dataset['obs_z'])
#sys.stderr.write("pfra_arcsec: %s\n" % str(pfra_arcsec))
#pfra_rad = pfra_arcsec / _ARCSEC_PER_RADIAN
adjustment_arcsec = ts.linefit(pfra, _ARCSEC_PER_RADIAN * rra_resid)
sys.stderr.write("adjustment (arcsec): %s\n" % str(adjustment_arcsec))
return adjustment_arcsec
# Driver routine for 5-parameter astrometric fitting:
def fit_bestpars(self, sigcut=5):
if not self._is_set:
sys.stderr.write("Error: data not OK for fitting!\n")
sys.stderr.write("Run setup() first and retry ...\n")
return False
# robust initial guess with Theil-Sen:
uguess = self.ts_fit_radec_pm(self._dt_yrs, self._RA_rad, self._DE_rad)
wguess = self.ts_fit_radec_pm(self._dt_yrs, self._RA_rad, self._DE_rad,
weighted=True)
#sys.stderr.write("Initial guess: %s\n" % str(guess))
sys.stderr.write("Initial guess (unweighted):\n")
sys.stderr.write("==> %s\n" % str(self.nice_units(uguess)))
sys.stderr.write("\n")
sys.stderr.write("Initial guess (weighted):\n")
sys.stderr.write("==> %s\n" % str(self.nice_units(wguess)))
sys.stderr.write("\n")
guess = uguess # adopt unweighted for now
#guess[4] = 1000. / _MAS_PER_RADIAN
# initial crack at parallax and zero-point:
woohoo = self._calc_initial_parallax(guess)
sys.stderr.write("woohoo: %s\n" % str(woohoo))
self.woohoo = woohoo
ra_nudge_rad, plx_rad = woohoo / _ARCSEC_PER_RADIAN
guess[0] += ra_nudge_rad
guess[4] = plx_rad
# estimate RA,Dec uncertainty from residuals if not known a prior:
if self._need_resid_errors:
rra_resid, rde_resid = self._calc_radec_residuals(guess)
rra_scatter = calc_MAR(rra_resid)
rde_scatter = calc_MAR(rde_resid)
mra_scatter = _MAS_PER_RADIAN * rra_scatter
mde_scatter = _MAS_PER_RADIAN * rde_scatter
#sys.stderr.write("rra_resid: %s\n" % str(rra_resid))
#sys.stderr.write("rde_resid: %s\n" % str(rde_resid))
sys.stderr.write("rra_scatter: %e (rad)\n" % rra_scatter)
sys.stderr.write("rde_scatter: %e (rad)\n" % rde_scatter)
sys.stderr.write("mra_scatter: %10.5f (mas)\n" % mra_scatter)
sys.stderr.write("mde_scatter: %10.5f (mas)\n" % mde_scatter)
self._RA_err = np.ones_like(self._RA_rad) * rra_scatter
self._DE_err = np.ones_like(self._DE_rad) * rde_scatter
self._use_RA_err = np.copy(self._RA_err)
self._use_DE_err = np.copy(self._DE_err)
# check whether anything looks really bad:
self._par_guess = guess
#rsig_tot = np.hypot(*self._calc_radec_residuals_sigma(guess))
rsig_tot = self._calc_total_residuals_sigma(guess)
#sys.stderr.write("rsig_tot:\n")
#sys.stderr.write("%s\n" % str(rsig_tot))
sys.stderr.write("typical rsig_tot: %8.3f\n" % np.median(rsig_tot))
#sys.stderr.write("rsig_tot: %s\n" % str(rsig_tot))
self.inliers = (rsig_tot < sigcut)
ndropped = self.inliers.size - np.sum(self.inliers)
sys.stderr.write("Dropped %d point(s) beyond %.2f-sigma.\n"
% (ndropped, sigcut))
#sys.stderr.write("ra_res: %s\n" % str(ra_res))
#sys.stderr.write("de_res: %s\n" % str(de_res))
#sys.stderr.write("ra_sig: %s\n" % str(ra_sig))
#sys.stderr.write("de_sig: %s\n" % str(de_sig))
# find minimum:
self.full_result = opti.fmin(self._calc_chi_square, guess,
xtol=1e-7, ftol=1e-7, full_output=True)
#xtol=1e-9, ftol=1e-9, full_output=True)
self.result = self.full_result[0]
# brute-force minimum:
#ra_fudge = np.median(self._RA_err)
#de_fudge = np.median(self._DE_err)
#pm_fudge = 0.2
#px_fudge = 4.0
#ranges = [(guess[0] - ra_fudge, guess[0] + ra_fudge), # RA
# (guess[1] - de_fudge, guess[1] + de_fudge), # DE
# (guess[2] / pm_fudge, guess[2] * pm_fudge), # pmRA
# (guess[3] / pm_fudge, guess[3] * pm_fudge), # pmRA
# (guess[4] / px_fudge, guess[3] * px_fudge), # parallax
# ]
#npts = 10
#self.result = opti.brute(self._calc_chi_square, ranges, Ns=npts)
sys.stderr.write("Found minimum:\n")
sys.stderr.write("==> %s\n" % str(self.nice_units(self.result)))
self._can_iterate = True
return self.result
# -----------------------------------------------------------------------
def _calc_huber_rweights(self, residuals, sigma):
_k_sig = 1.34 * sigma
res_devs = np.abs(residuals / _k_sig)
rweights = np.ones_like(res_devs)
distants = (res_devs > 1.0)
rweights[distants] = 1.0 / res_devs[distants]
return rweights
def iter_update_bestpars(self, params):
"""Perform an IRLS iteration."""
# calculate residuals:
rra_resid, rde_resid = self._calc_radec_residuals(params)
#sys.stderr.write("rra_resid: %s\n" % str(rra_resid))
#sys.stderr.write("rde_resid: %s\n" % str(rde_resid))
rra_scatter = calc_MAR(rra_resid)
rde_scatter = calc_MAR(rde_resid)
#sys.stderr.write("rra_scatter: %e (rad)\n" % rra_scatter)
#sys.stderr.write("rde_scatter: %e (rad)\n" % rde_scatter)
ra_rweights = self._calc_huber_rweights(rra_resid, rra_scatter)
self._use_RA_err = ra_rweights * self._RA_err
de_rweights = self._calc_huber_rweights(rde_resid, rde_scatter)
self._use_DE_err = de_rweights * self._DE_err
# find minimum:
self.iresult = opti.fmin(self._calc_chi_square, params ,
xtol=1e-7, ftol=1e-7, full_output=True)
sys.stderr.write("Found IRLS minimum:\n")
sys.stderr.write("==> %s\n" % str(self.nice_units(self.iresult[0])))
self._can_iterate = True
return self.iresult[0]
# -----------------------------------------------------------------------
def nice_units(self, params):
result = np.degrees(params)
result[2:5] *= 3.6e6 # into milliarcsec
result[2] *= np.cos(params[1]) # cos(dec) for pmRA
return result
def list_resid_sigmas(self, params):
rsig_RA, rsig_DE = self._calc_radec_residuals_sigma(params)
rsig_tot = np.hypot(rsig_RA, rsig_DE)
#sys.stderr.write("%15s %15s\n")
for ii,point in enumerate(zip(rsig_RA, rsig_DE, rsig_tot), 0):
sys.stderr.write("> %10.5f %10.5f (%10.5f)\n" % point)
return
######################################################################
# CHANGELOG (astrom_test_2.py):
#---------------------------------------------------------------------
#
# 2020-02-07:
# -- Increased __version__ to 0.1.0.
# -- First created astrom_test_2.py.
#
| [
"logging.basicConfig",
"numpy.copy",
"logging.getLogger",
"numpy.median",
"numpy.radians",
"numpy.abs",
"scipy.optimize.fmin",
"numpy.ones_like",
"numpy.__version__.split",
"theil_sen.linefit",
"numpy.hypot",
"numpy.array",
"numpy.sum",
"sys.stderr.write",
"numpy.cos",
"numpy.sin",
"... | [((504, 543), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (523, 543), False, 'import logging\n'), ((553, 580), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (570, 580), False, 'import logging\n'), ((1632, 1655), 'numpy.median', 'np.median', (['a'], {'axis': 'axis'}), '(a, axis=axis)\n', (1641, 1655), True, 'import numpy as np\n'), ((4769, 4782), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (4776, 4782), True, 'import numpy as np\n'), ((5126, 5157), 'numpy.radians', 'np.radians', (["self.dataset['dra']"], {}), "(self.dataset['dra'])\n", (5136, 5157), True, 'import numpy as np\n'), ((5181, 5212), 'numpy.radians', 'np.radians', (["self.dataset['dde']"], {}), "(self.dataset['dde'])\n", (5191, 5212), True, 'import numpy as np\n'), ((7120, 7164), 'theil_sen.linefit', 'ts.linefit', (['t_yrs', 'RA_rad'], {'weighted': 'weighted'}), '(t_yrs, RA_rad, weighted=weighted)\n', (7130, 7164), True, 'import theil_sen as ts\n'), ((7187, 7231), 'theil_sen.linefit', 'ts.linefit', (['t_yrs', 'DE_rad'], {'weighted': 'weighted'}), '(t_yrs, DE_rad, weighted=weighted)\n', (7197, 7231), True, 'import theil_sen as ts\n'), ((7247, 7333), 'numpy.array', 'np.array', (['[ts_ra_model[0], ts_de_model[0], ts_ra_model[1], ts_de_model[1], plx_as]'], {}), '([ts_ra_model[0], ts_de_model[0], ts_ra_model[1], ts_de_model[1],\n plx_as])\n', (7255, 7333), True, 'import numpy as np\n'), ((11176, 11209), 'numpy.sum', 'np.sum', (['(resid_tot ** self._chiexp)'], {}), '(resid_tot ** self._chiexp)\n', (11182, 11209), True, 'import numpy as np\n'), ((11553, 11602), 'sys.stderr.write', 'sys.stderr.write', (["('mar_ra_rad: %f\\n' % mar_ra_rad)"], {}), "('mar_ra_rad: %f\\n' % mar_ra_rad)\n", (11569, 11602), False, 'import sys\n'), ((11611, 11660), 'sys.stderr.write', 'sys.stderr.write', (["('mar_ra_mas: %f\\n' % mar_ra_mas)"], {}), "('mar_ra_mas: %f\\n' % mar_ra_mas)\n", (11627, 11660), False, 'import sys\n'), ((11989, 12037), 'theil_sen.linefit', 'ts.linefit', (['pfra', '(_ARCSEC_PER_RADIAN * rra_resid)'], {}), '(pfra, _ARCSEC_PER_RADIAN * rra_resid)\n', (11999, 12037), True, 'import theil_sen as ts\n'), ((12742, 12791), 'sys.stderr.write', 'sys.stderr.write', (['"""Initial guess (unweighted):\n"""'], {}), "('Initial guess (unweighted):\\n')\n", (12758, 12791), False, 'import sys\n'), ((12868, 12890), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (12884, 12890), False, 'import sys\n'), ((12899, 12946), 'sys.stderr.write', 'sys.stderr.write', (['"""Initial guess (weighted):\n"""'], {}), "('Initial guess (weighted):\\n')\n", (12915, 12946), False, 'import sys\n'), ((13023, 13045), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (13039, 13045), False, 'import sys\n'), ((14418, 14439), 'numpy.copy', 'np.copy', (['self._RA_err'], {}), '(self._RA_err)\n', (14425, 14439), True, 'import numpy as np\n'), ((14467, 14488), 'numpy.copy', 'np.copy', (['self._DE_err'], {}), '(self._DE_err)\n', (14474, 14488), True, 'import numpy as np\n'), ((15041, 15126), 'sys.stderr.write', 'sys.stderr.write', (["('Dropped %d point(s) beyond %.2f-sigma.\\n' % (ndropped, sigcut))"], {}), "('Dropped %d point(s) beyond %.2f-sigma.\\n' % (ndropped,\n sigcut))\n", (15057, 15126), False, 'import sys\n'), ((15417, 15503), 'scipy.optimize.fmin', 'opti.fmin', (['self._calc_chi_square', 'guess'], {'xtol': '(1e-07)', 'ftol': '(1e-07)', 'full_output': '(True)'}), '(self._calc_chi_square, guess, xtol=1e-07, ftol=1e-07, full_output\n =True)\n', (15426, 15503), True, 'import scipy.optimize as opti\n'), ((16264, 16300), 'sys.stderr.write', 'sys.stderr.write', (['"""Found minimum:\n"""'], {}), "('Found minimum:\\n')\n", (16280, 16300), False, 'import sys\n'), ((16618, 16644), 'numpy.abs', 'np.abs', (['(residuals / _k_sig)'], {}), '(residuals / _k_sig)\n', (16624, 16644), True, 'import numpy as np\n'), ((16664, 16686), 'numpy.ones_like', 'np.ones_like', (['res_devs'], {}), '(res_devs)\n', (16676, 16686), True, 'import numpy as np\n'), ((17628, 17714), 'scipy.optimize.fmin', 'opti.fmin', (['self._calc_chi_square', 'params'], {'xtol': '(1e-07)', 'ftol': '(1e-07)', 'full_output': '(True)'}), '(self._calc_chi_square, params, xtol=1e-07, ftol=1e-07,\n full_output=True)\n', (17637, 17714), True, 'import scipy.optimize as opti\n'), ((17735, 17776), 'sys.stderr.write', 'sys.stderr.write', (['"""Found IRLS minimum:\n"""'], {}), "('Found IRLS minimum:\\n')\n", (17751, 17776), False, 'import sys\n'), ((18050, 18068), 'numpy.degrees', 'np.degrees', (['params'], {}), '(params)\n', (18060, 18068), True, 'import numpy as np\n'), ((18153, 18170), 'numpy.cos', 'np.cos', (['params[1]'], {}), '(params[1])\n', (18159, 18170), True, 'import numpy as np\n'), ((18347, 18373), 'numpy.hypot', 'np.hypot', (['rsig_RA', 'rsig_DE'], {}), '(rsig_RA, rsig_DE)\n', (18355, 18373), True, 'import numpy as np\n'), ((1419, 1444), 'numpy.__version__.split', 'np.__version__.split', (['"""."""'], {}), "('.')\n", (1439, 1444), True, 'import numpy as np\n'), ((1695, 1714), 'numpy.abs', 'np.abs', (['(a - med_val)'], {}), '(a - med_val)\n', (1701, 1714), True, 'import numpy as np\n'), ((2002, 2019), 'numpy.abs', 'np.abs', (['residuals'], {}), '(residuals)\n', (2008, 2019), True, 'import numpy as np\n'), ((3904, 3946), 'sys.stderr.write', 'sys.stderr.write', (['"""Incomplete data set!\n"""'], {}), "('Incomplete data set!\\n')\n", (3920, 3946), False, 'import sys\n'), ((3960, 4007), 'sys.stderr.write', 'sys.stderr.write', (['"""Required columns include:\n"""'], {}), "('Required columns include:\\n')\n", (3976, 4007), False, 'import sys\n'), ((5555, 5619), 'sys.stderr.write', 'sys.stderr.write', (['"""WARNING: RA_err not given, using estimated\n"""'], {}), "('WARNING: RA_err not given, using estimated\\n')\n", (5571, 5619), False, 'import sys\n'), ((5722, 5786), 'sys.stderr.write', 'sys.stderr.write', (['"""WARNING: DE_err not given, using estimated\n"""'], {}), "('WARNING: DE_err not given, using estimated\\n')\n", (5738, 5786), False, 'import sys\n'), ((6654, 6668), 'numpy.sin', 'np.sin', (['RA_rad'], {}), '(RA_rad)\n', (6660, 6668), True, 'import numpy as np\n'), ((6670, 6684), 'numpy.cos', 'np.cos', (['RA_rad'], {}), '(RA_rad)\n', (6676, 6684), True, 'import numpy as np\n'), ((6708, 6722), 'numpy.sin', 'np.sin', (['DE_rad'], {}), '(DE_rad)\n', (6714, 6722), True, 'import numpy as np\n'), ((6724, 6738), 'numpy.cos', 'np.cos', (['DE_rad'], {}), '(DE_rad)\n', (6730, 6738), True, 'import numpy as np\n'), ((11053, 11081), 'numpy.hypot', 'np.hypot', (['resid_ra', 'resid_de'], {}), '(resid_ra, resid_de)\n', (11061, 11081), True, 'import numpy as np\n'), ((12288, 12341), 'sys.stderr.write', 'sys.stderr.write', (['"""Error: data not OK for fitting!\n"""'], {}), "('Error: data not OK for fitting!\\n')\n", (12304, 12341), False, 'import sys\n'), ((12354, 12407), 'sys.stderr.write', 'sys.stderr.write', (['"""Run setup() first and retry ...\n"""'], {}), "('Run setup() first and retry ...\\n')\n", (12370, 12407), False, 'import sys\n'), ((13979, 14036), 'sys.stderr.write', 'sys.stderr.write', (["('rra_scatter: %e (rad)\\n' % rra_scatter)"], {}), "('rra_scatter: %e (rad)\\n' % rra_scatter)\n", (13995, 14036), False, 'import sys\n'), ((14049, 14106), 'sys.stderr.write', 'sys.stderr.write', (["('rde_scatter: %e (rad)\\n' % rde_scatter)"], {}), "('rde_scatter: %e (rad)\\n' % rde_scatter)\n", (14065, 14106), False, 'import sys\n'), ((14119, 14180), 'sys.stderr.write', 'sys.stderr.write', (["('mra_scatter: %10.5f (mas)\\n' % mra_scatter)"], {}), "('mra_scatter: %10.5f (mas)\\n' % mra_scatter)\n", (14135, 14180), False, 'import sys\n'), ((14193, 14254), 'sys.stderr.write', 'sys.stderr.write', (["('mde_scatter: %10.5f (mas)\\n' % mde_scatter)"], {}), "('mde_scatter: %10.5f (mas)\\n' % mde_scatter)\n", (14209, 14254), False, 'import sys\n'), ((15012, 15032), 'numpy.sum', 'np.sum', (['self.inliers'], {}), '(self.inliers)\n', (15018, 15032), True, 'import numpy as np\n'), ((18498, 18552), 'sys.stderr.write', 'sys.stderr.write', (["('> %10.5f %10.5f (%10.5f)\\n' % point)"], {}), "('> %10.5f %10.5f (%10.5f)\\n' % point)\n", (18514, 18552), False, 'import sys\n'), ((14282, 14308), 'numpy.ones_like', 'np.ones_like', (['self._RA_rad'], {}), '(self._RA_rad)\n', (14294, 14308), True, 'import numpy as np\n'), ((14350, 14376), 'numpy.ones_like', 'np.ones_like', (['self._DE_rad'], {}), '(self._DE_rad)\n', (14362, 14376), True, 'import numpy as np\n'), ((14849, 14868), 'numpy.median', 'np.median', (['rsig_tot'], {}), '(rsig_tot)\n', (14858, 14868), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
import cv2
from utils.misc import get_center
Rectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height'])
def get_gauss_filter_weight(width, height, mu_x, mu_y, sigma=7):
xy = np.indices((height,width))
x = xy[1,:,:]
y = xy[0,:,:]
psf = np.exp(-(((x-mu_x)**2+(y-mu_y)**2)/(2*sigma**2))) # not multiple by 2
return psf
def get_template_correlation_response(im_size=225, out_size=None):
# out_size = [width, height]
# output = [H,W]
gauss_response = get_gauss_filter_weight(im_size, im_size, im_size//2, im_size//2)
if out_size is not None:
gauss_response = cv2.resize(gauss_response, tuple(out_size))
return gauss_response
def batch_fft2d(inputs, transpose=True):
# inputs: [B,H,W,C]
if inputs.dtype != tf.complex64:
inputs = tf.cast(inputs, tf.complex64)
if transpose:
inputs = tf.transpose(inputs, [0,3,1,2])
outputs = tf.fft2d(inputs) # [B,C,H,W]
if transpose:
outputs = tf.transpose(outputs, [0,2,3,1]) # [B,H,W,C]
return outputs
def batch_ifft2d(inputs, transpose=True):
# inputs: [B,H,W,C]
if transpose:
inputs = tf.transpose(inputs, [0,3,1,2])
outputs = tf.ifft2d(inputs)
if transpose:
outputs = tf.transpose(outputs, [0,2,3,1]) # [B,H,W,C]
return outputs
def get_cx(rect):
return (rect[0]+rect[2])*0.5
def get_cy(rect):
return (rect[1]+rect[3])*0.5
def get_width(rect):
return (rect[2]-rect[0])
def get_height(rect):
return (rect[3]-rect[1])
def get_area(rect):
return (rect[2]-rect[0]) * (rect[3]-rect[1])
def get_intersection(rect1, rect2):
x1 = max(rect1[0], rect2[0])
y1 = max(rect1[1], rect2[1])
x2 = min(rect1[2], rect2[2])
y2 = min(rect1[3], rect2[3])
return np.array([x1,y1,x2,y2], dtype=rect1.dtype)
def get_IoU(rect1, rect2):
inter = get_intersection(rect1, rect2)
area1 = get_area(rect1)
area2 = get_area(rect2)
area_I = get_area(inter)
IoU = float(area_I) / float(area1 + area2 - area_I)
return IoU
def im2rgb(im):
if len(im.shape) != 3:
im = np.stack([im, im, im], -1)
return im
def convert_bbox_format(bbox, to):
x, y, target_width, target_height = bbox.x, bbox.y, bbox.width, bbox.height
if to == 'top-left-based':
x -= get_center(target_width)
y -= get_center(target_height)
elif to == 'center-based':
y += get_center(target_height)
x += get_center(target_width)
else:
raise ValueError("Bbox format: {} was not recognized".format(to))
return Rectangle(x, y, target_width, target_height)
def get_exemplar_images(images, exemplar_size, targets_pos=None):
"""Crop exemplar image from input images"""
with tf.name_scope('get_exemplar_image'):
batch_size, x_height, x_width = images.get_shape().as_list()[:3]
z_height, z_width = exemplar_size
if targets_pos is None:
# crop from the center
target_pos_single = [[get_center(x_height), get_center(x_width)]]
targets_pos_ = tf.tile(target_pos_single, [batch_size, 1])
else:
targets_pos_ = targets_pos
# convert to top-left corner based coordinates
top = tf.to_int32(tf.round(targets_pos_[:, 0] - get_center(z_height)))
bottom = tf.to_int32(top + z_height)
left = tf.to_int32(tf.round(targets_pos_[:, 1] - get_center(z_width)))
right = tf.to_int32(left + z_width)
def _slice(x):
f, t, l, b, r = x
c = f[t:b, l:r]
return c
exemplar_img = tf.map_fn(_slice, (images, top, left, bottom, right), dtype=images.dtype)
exemplar_img.set_shape([batch_size, z_height, z_width, 3])
return exemplar_img
def get_crops(im, bbox, size_z, size_x, context_amount):
"""Obtain image sub-window, padding with avg channel if area goes outside of border
Adapted from https://github.com/bertinetto/siamese-fc/blob/master/ILSVRC15-curation/save_crops.m#L46
Args:
im: Image ndarray
bbox: Named tuple (x, y, width, height) x, y corresponds to the crops center
size_z: Target + context size
size_x: The resultant crop size
context_amount: The amount of context
Returns:
image crop: Image ndarray
"""
cy, cx, h, w = bbox.y, bbox.x, bbox.height, bbox.width
wc_z = w + context_amount * (w + h)
hc_z = h + context_amount * (w + h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = size_z / s_z
d_search = (size_x - size_z) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
scale_x = size_x / s_x
image_crop_x, _, _, _, _ = get_subwindow_avg(im, [cy, cx],
[size_x, size_x],
[np.round(s_x), np.round(s_x)])
return image_crop_x, scale_x
def get_subwindow_avg(im, pos, model_sz, original_sz):
# avg_chans = np.mean(im, axis=(0, 1)) # This version is 3x slower
avg_chans = [np.mean(im[:, :, 0]), np.mean(im[:, :, 1]), np.mean(im[:, :, 2])]
if not original_sz:
original_sz = model_sz
sz = original_sz
im_sz = im.shape
# make sure the size is not too small
assert im_sz[0] > 2 and im_sz[1] > 2
c = [get_center(s) for s in sz]
# check out-of-bounds coordinates, and set them to avg_chans
context_xmin = np.int(np.round(pos[1] - c[1]))
context_xmax = np.int(context_xmin + sz[1] - 1)
context_ymin = np.int(np.round(pos[0] - c[0]))
context_ymax = np.int(context_ymin + sz[0] - 1)
left_pad = np.int(np.maximum(0, -context_xmin))
top_pad = np.int(np.maximum(0, -context_ymin))
right_pad = np.int(np.maximum(0, context_xmax - im_sz[1] + 1))
bottom_pad = np.int(np.maximum(0, context_ymax - im_sz[0] + 1))
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
if top_pad > 0 or bottom_pad > 0 or left_pad > 0 or right_pad > 0:
R = np.pad(im[:, :, 0], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[0]))
G = np.pad(im[:, :, 1], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[1]))
B = np.pad(im[:, :, 2], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[2]))
im = np.stack((R, G, B), axis=2)
im_patch_original = im[context_ymin:context_ymax + 1,
context_xmin:context_xmax + 1, :]
if not (model_sz[0] == original_sz[0] and model_sz[1] == original_sz[1]):
im_patch = cv2.resize(im_patch_original, tuple(model_sz))
else:
im_patch = im_patch_original
return im_patch, left_pad, top_pad, right_pad, bottom_pad
def normalize_01(inputs):
# inputs: [B,H,W,C], tf.float32
mins = tf.reduce_min(inputs, axis=[1,2,3], keep_dims=True)
maxs = tf.reduce_max(inputs, axis=[1,2,3], keep_dims=True)
outputs = (inputs - mins) / (maxs-mins+1e-6)
return outputs
def spatial_softmax(logits):
shape = tf.shape(logits)
flatten = tf.layers.flatten(logits)
softmax = tf.nn.softmax(flatten)
softmax = tf.reshape(softmax, shape)
return softmax
def detect_hard_peak_position(inputs):
# inputs: [B,H,W,1] filter responses
# This function is non-differentiable
# Return: peak positions ([B,2] x,y coordinates, tf.int32)
batch_size, height, width, channels = tf.unstack(tf.shape(inputs))
inputs_flat = tf.layers.flatten(inputs) # [B, H*W]
argmax_inds = tf.argmax(inputs_flat, axis=1, output_type=tf.int32)
argmax_x = tf.cast(tf.mod(argmax_inds, width), tf.int32)
argmax_y = tf.cast(tf.divide(argmax_inds, width), tf.int32)
peak_pos = tf.concat([argmax_x[:,None], argmax_y[:,None]], axis=1) # [B,2]
return peak_pos
| [
"tensorflow.tile",
"numpy.sqrt",
"tensorflow.shape",
"tensorflow.layers.flatten",
"tensorflow.transpose",
"tensorflow.ifft2d",
"numpy.array",
"tensorflow.nn.softmax",
"tensorflow.cast",
"tensorflow.reduce_min",
"numpy.mean",
"numpy.exp",
"tensorflow.concat",
"numpy.stack",
"tensorflow.mo... | [((232, 298), 'collections.namedtuple', 'collections.namedtuple', (['"""Rectangle"""', "['x', 'y', 'width', 'height']"], {}), "('Rectangle', ['x', 'y', 'width', 'height'])\n", (254, 298), False, 'import collections\n'), ((374, 401), 'numpy.indices', 'np.indices', (['(height, width)'], {}), '((height, width))\n', (384, 401), True, 'import numpy as np\n'), ((448, 513), 'numpy.exp', 'np.exp', (['(-(((x - mu_x) ** 2 + (y - mu_y) ** 2) / (2 * sigma ** 2)))'], {}), '(-(((x - mu_x) ** 2 + (y - mu_y) ** 2) / (2 * sigma ** 2)))\n', (454, 513), True, 'import numpy as np\n'), ((1098, 1114), 'tensorflow.fft2d', 'tf.fft2d', (['inputs'], {}), '(inputs)\n', (1106, 1114), True, 'import tensorflow as tf\n'), ((1375, 1392), 'tensorflow.ifft2d', 'tf.ifft2d', (['inputs'], {}), '(inputs)\n', (1384, 1392), True, 'import tensorflow as tf\n'), ((1956, 2001), 'numpy.array', 'np.array', (['[x1, y1, x2, y2]'], {'dtype': 'rect1.dtype'}), '([x1, y1, x2, y2], dtype=rect1.dtype)\n', (1964, 2001), True, 'import numpy as np\n'), ((4647, 4667), 'numpy.sqrt', 'np.sqrt', (['(wc_z * hc_z)'], {}), '(wc_z * hc_z)\n', (4654, 4667), True, 'import numpy as np\n'), ((5618, 5650), 'numpy.int', 'np.int', (['(context_xmin + sz[1] - 1)'], {}), '(context_xmin + sz[1] - 1)\n', (5624, 5650), True, 'import numpy as np\n'), ((5721, 5753), 'numpy.int', 'np.int', (['(context_ymin + sz[0] - 1)'], {}), '(context_ymin + sz[0] - 1)\n', (5727, 5753), True, 'import numpy as np\n'), ((7147, 7200), 'tensorflow.reduce_min', 'tf.reduce_min', (['inputs'], {'axis': '[1, 2, 3]', 'keep_dims': '(True)'}), '(inputs, axis=[1, 2, 3], keep_dims=True)\n', (7160, 7200), True, 'import tensorflow as tf\n'), ((7210, 7263), 'tensorflow.reduce_max', 'tf.reduce_max', (['inputs'], {'axis': '[1, 2, 3]', 'keep_dims': '(True)'}), '(inputs, axis=[1, 2, 3], keep_dims=True)\n', (7223, 7263), True, 'import tensorflow as tf\n'), ((7375, 7391), 'tensorflow.shape', 'tf.shape', (['logits'], {}), '(logits)\n', (7383, 7391), True, 'import tensorflow as tf\n'), ((7406, 7431), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['logits'], {}), '(logits)\n', (7423, 7431), True, 'import tensorflow as tf\n'), ((7446, 7468), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['flatten'], {}), '(flatten)\n', (7459, 7468), True, 'import tensorflow as tf\n'), ((7483, 7509), 'tensorflow.reshape', 'tf.reshape', (['softmax', 'shape'], {}), '(softmax, shape)\n', (7493, 7509), True, 'import tensorflow as tf\n'), ((7806, 7831), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['inputs'], {}), '(inputs)\n', (7823, 7831), True, 'import tensorflow as tf\n'), ((7861, 7913), 'tensorflow.argmax', 'tf.argmax', (['inputs_flat'], {'axis': '(1)', 'output_type': 'tf.int32'}), '(inputs_flat, axis=1, output_type=tf.int32)\n', (7870, 7913), True, 'import tensorflow as tf\n'), ((8055, 8112), 'tensorflow.concat', 'tf.concat', (['[argmax_x[:, None], argmax_y[:, None]]'], {'axis': '(1)'}), '([argmax_x[:, None], argmax_y[:, None]], axis=1)\n', (8064, 8112), True, 'import tensorflow as tf\n'), ((987, 1016), 'tensorflow.cast', 'tf.cast', (['inputs', 'tf.complex64'], {}), '(inputs, tf.complex64)\n', (994, 1016), True, 'import tensorflow as tf\n'), ((1052, 1086), 'tensorflow.transpose', 'tf.transpose', (['inputs', '[0, 3, 1, 2]'], {}), '(inputs, [0, 3, 1, 2])\n', (1064, 1086), True, 'import tensorflow as tf\n'), ((1163, 1198), 'tensorflow.transpose', 'tf.transpose', (['outputs', '[0, 2, 3, 1]'], {}), '(outputs, [0, 2, 3, 1])\n', (1175, 1198), True, 'import tensorflow as tf\n'), ((1329, 1363), 'tensorflow.transpose', 'tf.transpose', (['inputs', '[0, 3, 1, 2]'], {}), '(inputs, [0, 3, 1, 2])\n', (1341, 1363), True, 'import tensorflow as tf\n'), ((1429, 1464), 'tensorflow.transpose', 'tf.transpose', (['outputs', '[0, 2, 3, 1]'], {}), '(outputs, [0, 2, 3, 1])\n', (1441, 1464), True, 'import tensorflow as tf\n'), ((2284, 2310), 'numpy.stack', 'np.stack', (['[im, im, im]', '(-1)'], {}), '([im, im, im], -1)\n', (2292, 2310), True, 'import numpy as np\n'), ((2485, 2509), 'utils.misc.get_center', 'get_center', (['target_width'], {}), '(target_width)\n', (2495, 2509), False, 'from utils.misc import get_center\n'), ((2523, 2548), 'utils.misc.get_center', 'get_center', (['target_height'], {}), '(target_height)\n', (2533, 2548), False, 'from utils.misc import get_center\n'), ((2922, 2957), 'tensorflow.name_scope', 'tf.name_scope', (['"""get_exemplar_image"""'], {}), "('get_exemplar_image')\n", (2935, 2957), True, 'import tensorflow as tf\n'), ((3496, 3523), 'tensorflow.to_int32', 'tf.to_int32', (['(top + z_height)'], {}), '(top + z_height)\n', (3507, 3523), True, 'import tensorflow as tf\n'), ((3619, 3646), 'tensorflow.to_int32', 'tf.to_int32', (['(left + z_width)'], {}), '(left + z_width)\n', (3630, 3646), True, 'import tensorflow as tf\n'), ((3774, 3847), 'tensorflow.map_fn', 'tf.map_fn', (['_slice', '(images, top, left, bottom, right)'], {'dtype': 'images.dtype'}), '(_slice, (images, top, left, bottom, right), dtype=images.dtype)\n', (3783, 3847), True, 'import tensorflow as tf\n'), ((5200, 5220), 'numpy.mean', 'np.mean', (['im[:, :, 0]'], {}), '(im[:, :, 0])\n', (5207, 5220), True, 'import numpy as np\n'), ((5222, 5242), 'numpy.mean', 'np.mean', (['im[:, :, 1]'], {}), '(im[:, :, 1])\n', (5229, 5242), True, 'import numpy as np\n'), ((5244, 5264), 'numpy.mean', 'np.mean', (['im[:, :, 2]'], {}), '(im[:, :, 2])\n', (5251, 5264), True, 'import numpy as np\n'), ((5455, 5468), 'utils.misc.get_center', 'get_center', (['s'], {}), '(s)\n', (5465, 5468), False, 'from utils.misc import get_center\n'), ((5574, 5597), 'numpy.round', 'np.round', (['(pos[1] - c[1])'], {}), '(pos[1] - c[1])\n', (5582, 5597), True, 'import numpy as np\n'), ((5677, 5700), 'numpy.round', 'np.round', (['(pos[0] - c[0])'], {}), '(pos[0] - c[0])\n', (5685, 5700), True, 'import numpy as np\n'), ((5776, 5804), 'numpy.maximum', 'np.maximum', (['(0)', '(-context_xmin)'], {}), '(0, -context_xmin)\n', (5786, 5804), True, 'import numpy as np\n'), ((5827, 5855), 'numpy.maximum', 'np.maximum', (['(0)', '(-context_ymin)'], {}), '(0, -context_ymin)\n', (5837, 5855), True, 'import numpy as np\n'), ((5880, 5922), 'numpy.maximum', 'np.maximum', (['(0)', '(context_xmax - im_sz[1] + 1)'], {}), '(0, context_xmax - im_sz[1] + 1)\n', (5890, 5922), True, 'import numpy as np\n'), ((5948, 5990), 'numpy.maximum', 'np.maximum', (['(0)', '(context_ymax - im_sz[0] + 1)'], {}), '(0, context_ymax - im_sz[0] + 1)\n', (5958, 5990), True, 'import numpy as np\n'), ((6246, 6359), 'numpy.pad', 'np.pad', (['im[:, :, 0]', '((top_pad, bottom_pad), (left_pad, right_pad))', '"""constant"""'], {'constant_values': 'avg_chans[0]'}), "(im[:, :, 0], ((top_pad, bottom_pad), (left_pad, right_pad)),\n 'constant', constant_values=avg_chans[0])\n", (6252, 6359), True, 'import numpy as np\n'), ((6389, 6502), 'numpy.pad', 'np.pad', (['im[:, :, 1]', '((top_pad, bottom_pad), (left_pad, right_pad))', '"""constant"""'], {'constant_values': 'avg_chans[1]'}), "(im[:, :, 1], ((top_pad, bottom_pad), (left_pad, right_pad)),\n 'constant', constant_values=avg_chans[1])\n", (6395, 6502), True, 'import numpy as np\n'), ((6532, 6645), 'numpy.pad', 'np.pad', (['im[:, :, 2]', '((top_pad, bottom_pad), (left_pad, right_pad))', '"""constant"""'], {'constant_values': 'avg_chans[2]'}), "(im[:, :, 2], ((top_pad, bottom_pad), (left_pad, right_pad)),\n 'constant', constant_values=avg_chans[2])\n", (6538, 6645), True, 'import numpy as np\n'), ((6677, 6704), 'numpy.stack', 'np.stack', (['(R, G, B)'], {'axis': '(2)'}), '((R, G, B), axis=2)\n', (6685, 6704), True, 'import numpy as np\n'), ((7769, 7785), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (7777, 7785), True, 'import tensorflow as tf\n'), ((7937, 7963), 'tensorflow.mod', 'tf.mod', (['argmax_inds', 'width'], {}), '(argmax_inds, width)\n', (7943, 7963), True, 'import tensorflow as tf\n'), ((7998, 8027), 'tensorflow.divide', 'tf.divide', (['argmax_inds', 'width'], {}), '(argmax_inds, width)\n', (8007, 8027), True, 'import tensorflow as tf\n'), ((2593, 2618), 'utils.misc.get_center', 'get_center', (['target_height'], {}), '(target_height)\n', (2603, 2618), False, 'from utils.misc import get_center\n'), ((2632, 2656), 'utils.misc.get_center', 'get_center', (['target_width'], {}), '(target_width)\n', (2642, 2656), False, 'from utils.misc import get_center\n'), ((3247, 3290), 'tensorflow.tile', 'tf.tile', (['target_pos_single', '[batch_size, 1]'], {}), '(target_pos_single, [batch_size, 1])\n', (3254, 3290), True, 'import tensorflow as tf\n'), ((4990, 5003), 'numpy.round', 'np.round', (['s_x'], {}), '(s_x)\n', (4998, 5003), True, 'import numpy as np\n'), ((5005, 5018), 'numpy.round', 'np.round', (['s_x'], {}), '(s_x)\n', (5013, 5018), True, 'import numpy as np\n'), ((3176, 3196), 'utils.misc.get_center', 'get_center', (['x_height'], {}), '(x_height)\n', (3186, 3196), False, 'from utils.misc import get_center\n'), ((3198, 3217), 'utils.misc.get_center', 'get_center', (['x_width'], {}), '(x_width)\n', (3208, 3217), False, 'from utils.misc import get_center\n'), ((3456, 3476), 'utils.misc.get_center', 'get_center', (['z_height'], {}), '(z_height)\n', (3466, 3476), False, 'from utils.misc import get_center\n'), ((3581, 3600), 'utils.misc.get_center', 'get_center', (['z_width'], {}), '(z_width)\n', (3591, 3600), False, 'from utils.misc import get_center\n')] |
from __future__ import absolute_import, division, print_function
import cv2
import numpy as np
import six
def figure(fnum=None, pnum=(1, 1, 1), title=None, figtitle=None, doclf=False,
docla=False, projection=None, **kwargs):
"""
http://matplotlib.org/users/gridspec.html
Args:
fnum (int): fignum = figure number
pnum (int, str, or tuple(int, int, int)): plotnum = plot tuple
title (str): (default = None)
figtitle (None): (default = None)
docla (bool): (default = False)
doclf (bool): (default = False)
Returns:
mpl.Figure: fig
CommandLine:
python -m plottool.custom_figure --exec-figure:0 --show
python -m plottool.custom_figure --exec-figure:1 --show
Example:
>>> fnum = 1
>>> fig = figure(fnum, (2, 2, 1))
>>> gca().text(0.5, 0.5, "ax1", va="center", ha="center")
>>> fig = figure(fnum, (2, 2, 2))
>>> gca().text(0.5, 0.5, "ax2", va="center", ha="center")
>>> ut.show_if_requested()
Example:
>>> fnum = 1
>>> fig = figure(fnum, (2, 2, 1))
>>> gca().text(0.5, 0.5, "ax1", va="center", ha="center")
>>> fig = figure(fnum, (2, 2, 2))
>>> gca().text(0.5, 0.5, "ax2", va="center", ha="center")
>>> fig = figure(fnum, (2, 4, (1, slice(1, None))))
>>> gca().text(0.5, 0.5, "ax3", va="center", ha="center")
>>> ut.show_if_requested()
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def ensure_fig(fnum=None):
if fnum is None:
try:
fig = plt.gcf()
except Exception as ex:
fig = plt.figure()
else:
try:
fig = plt.figure(fnum)
except Exception as ex:
fig = plt.gcf()
return fig
def _convert_pnum_int_to_tup(int_pnum):
# Convert pnum to tuple format if in integer format
nr = int_pnum // 100
nc = int_pnum // 10 - (nr * 10)
px = int_pnum - (nr * 100) - (nc * 10)
pnum = (nr, nc, px)
return pnum
def _pnum_to_subspec(pnum):
if isinstance(pnum, six.string_types):
pnum = list(pnum)
nrow, ncols, plotnum = pnum
# if kwargs.get('use_gridspec', True):
# Convert old pnums to gridspec
gs = gridspec.GridSpec(nrow, ncols)
if isinstance(plotnum, (tuple, slice, list)):
subspec = gs[plotnum]
else:
subspec = gs[plotnum - 1]
return (subspec,)
def _setup_subfigure(pnum):
if isinstance(pnum, int):
pnum = _convert_pnum_int_to_tup(pnum)
if doclf:
fig.clf()
axes_list = fig.get_axes()
if docla or len(axes_list) == 0:
if pnum is not None:
assert pnum[0] > 0, 'nRows must be > 0: pnum=%r' % (pnum,)
assert pnum[1] > 0, 'nCols must be > 0: pnum=%r' % (pnum,)
subspec = _pnum_to_subspec(pnum)
ax = fig.add_subplot(*subspec, projection=projection)
if len(axes_list) > 0:
ax.cla()
else:
ax = plt.gca()
else:
if pnum is not None:
subspec = _pnum_to_subspec(pnum)
ax = plt.subplot(*subspec)
else:
ax = plt.gca()
fig = ensure_fig(fnum)
if pnum is not None:
_setup_subfigure(pnum)
# Set the title / figtitle
if title is not None:
ax = plt.gca()
ax.set_title(title)
if figtitle is not None:
fig.suptitle(figtitle)
return fig
def pandas_plot_matrix(df, rot=90, ax=None, grid=True, label=None,
zerodiag=False,
cmap='viridis', showvals=False, logscale=True):
import matplotlib as mpl
import copy
from matplotlib import pyplot as plt
if ax is None:
fig = figure(fnum=1, pnum=(1, 1, 1))
fig.clear()
ax = plt.gca()
ax = plt.gca()
values = df.values
if zerodiag:
values = values.copy()
values = values - np.diag(np.diag(values))
# aximg = ax.imshow(values, interpolation='none', cmap='viridis')
if logscale:
from matplotlib.colors import LogNorm
vmin = df[df > 0].min().min()
norm = LogNorm(vmin=vmin, vmax=values.max())
else:
norm = None
cmap = copy.copy(mpl.cm.get_cmap(cmap)) # copy the default cmap
cmap.set_bad((0, 0, 0))
aximg = ax.matshow(values, interpolation='none', cmap=cmap, norm=norm)
# aximg = ax.imshow(values, interpolation='none', cmap='viridis', norm=norm)
# ax.imshow(values, interpolation='none', cmap='viridis')
ax.grid(False)
cax = plt.colorbar(aximg, ax=ax)
if label is not None:
cax.set_label(label)
ax.set_xticks(list(range(len(df.index))))
ax.set_xticklabels([lbl[0:100] for lbl in df.index])
for lbl in ax.get_xticklabels():
lbl.set_rotation(rot)
for lbl in ax.get_xticklabels():
lbl.set_horizontalalignment('center')
ax.set_yticks(list(range(len(df.columns))))
ax.set_yticklabels([lbl[0:100] for lbl in df.columns])
for lbl in ax.get_yticklabels():
lbl.set_horizontalalignment('right')
for lbl in ax.get_yticklabels():
lbl.set_verticalalignment('center')
# Grid lines around the pixels
if grid:
offset = -.5
xlim = [-.5, len(df.columns)]
ylim = [-.5, len(df.index)]
segments = []
for x in range(ylim[1]):
xdata = [x + offset, x + offset]
ydata = ylim
segment = list(zip(xdata, ydata))
segments.append(segment)
for y in range(xlim[1]):
xdata = xlim
ydata = [y + offset, y + offset]
segment = list(zip(xdata, ydata))
segments.append(segment)
bingrid = mpl.collections.LineCollection(segments, color='w', linewidths=1)
ax.add_collection(bingrid)
if showvals:
x_basis = np.arange(len(df.columns))
y_basis = np.arange(len(df.index))
x, y = np.meshgrid(x_basis, y_basis)
for c, r in zip(x.flatten(), y.flatten()):
val = df.iloc[r, c]
ax.text(c, r, val, va='center', ha='center', color='white')
return ax
def axes_extent(axs, pad=0.0):
"""
Get the full extent of a group of axes, including axes labels, tick labels,
and titles.
"""
import itertools as it
import matplotlib as mpl
def axes_parts(ax):
yield ax
for label in ax.get_xticklabels():
if label.get_text():
yield label
for label in ax.get_yticklabels():
if label.get_text():
yield label
xlabel = ax.get_xaxis().get_label()
ylabel = ax.get_yaxis().get_label()
for label in (xlabel, ylabel, ax.title):
if label.get_text():
yield label
items = it.chain.from_iterable(axes_parts(ax) for ax in axs)
extents = [item.get_window_extent() for item in items]
#mpl.transforms.Affine2D().scale(1.1)
extent = mpl.transforms.Bbox.union(extents)
extent = extent.expanded(1.0 + pad, 1.0 + pad)
return extent
def extract_axes_extents(fig, combine=False, pad=0.0):
# Make sure we draw the axes first so we can
# extract positions from the text objects
import matplotlib as mpl
fig.canvas.draw()
# Group axes that belong together
atomic_axes = []
seen_ = set([])
for ax in fig.axes:
if ax not in seen_:
atomic_axes.append([ax])
seen_.add(ax)
dpi_scale_trans_inv = fig.dpi_scale_trans.inverted()
axes_bboxes_ = [axes_extent(axs, pad) for axs in atomic_axes]
axes_extents_ = [extent.transformed(dpi_scale_trans_inv) for extent in axes_bboxes_]
# axes_extents_ = axes_bboxes_
if combine:
# Grab include extents of figure text as well
# FIXME: This might break on OSX
# http://stackoverflow.com/questions/22667224/bbox-backend
renderer = fig.canvas.get_renderer()
for mpl_text in fig.texts:
bbox = mpl_text.get_window_extent(renderer=renderer)
extent_ = bbox.expanded(1.0 + pad, 1.0 + pad)
extent = extent_.transformed(dpi_scale_trans_inv)
# extent = extent_
axes_extents_.append(extent)
axes_extents = mpl.transforms.Bbox.union(axes_extents_)
else:
axes_extents = axes_extents_
# if True:
# axes_extents.x0 = 0
# # axes_extents.y1 = 0
return axes_extents
def adjust_subplots(left=None, right=None, bottom=None, top=None, wspace=None,
hspace=None, fig=None):
"""
Kwargs:
left (float): left side of the subplots of the figure
right (float): right side of the subplots of the figure
bottom (float): bottom of the subplots of the figure
top (float): top of the subplots of the figure
wspace (float): width reserved for blank space between subplots
hspace (float): height reserved for blank space between subplots
"""
from matplotlib import pyplot as plt
kwargs = dict(left=left, right=right, bottom=bottom, top=top,
wspace=wspace, hspace=hspace)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
if fig is None:
fig = plt.gcf()
subplotpars = fig.subplotpars
adjust_dict = subplotpars.__dict__.copy()
del adjust_dict['validate']
adjust_dict.update(kwargs)
fig.subplots_adjust(**adjust_dict)
def render_figure_to_image(fig, **savekw):
import io
import cv2
import matplotlib as mpl
axes_extents = extract_axes_extents(fig)
extent = mpl.transforms.Bbox.union(axes_extents)
with io.BytesIO() as stream:
# This call takes 23% - 15% of the time depending on settings
fig.savefig(stream, bbox_inches=extent, **savekw)
# fig.savefig(stream, **savekw)
stream.seek(0)
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
im_bgra = cv2.imdecode(data, cv2.IMREAD_UNCHANGED)
return im_bgra
def savefig2(fig, fpath, **kwargs):
"""
Does a tight layout and saves the figure with transparency
"""
import matplotlib as mpl
if 'transparent' not in kwargs:
kwargs['transparent'] = True
if 'extent' not in kwargs:
axes_extents = extract_axes_extents(fig)
extent = mpl.transforms.Bbox.union(axes_extents)
kwargs['extent'] = extent
fig.savefig(fpath, **kwargs)
def copy_figure_to_clipboard(fig):
"""
References:
https://stackoverflow.com/questions/17676373/python-matplotlib-pyqt-copy-image-to-clipboard
"""
print('Copying figure %d to the clipboard' % fig.number)
import matplotlib as mpl
app = mpl.backends.backend_qt5.qApp
QtGui = mpl.backends.backend_qt5.QtGui
im_bgra = render_figure_to_image(fig, transparent=True)
im_rgba = cv2.cvtColor(im_bgra, cv2.COLOR_BGRA2RGBA)
im = im_rgba
QImage = QtGui.QImage
qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_RGBA8888)
clipboard = app.clipboard()
clipboard.setImage(qim)
# size = fig.canvas.size()
# width, height = size.width(), size.height()
# qim = QtGui.QImage(fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32)
# QtWidgets = mpl.backends.backend_qt5.QtWidgets
# pixmap = QtWidgets.QWidget.grab(fig.canvas)
# clipboard.setPixmap(pixmap)
| [
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.colorbar",
"io.BytesIO",
"matplotlib.collections.LineCollection",
"numpy.diag",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.figure",
"matplotlib.transforms.Bbox.union",
"cv2.cvtColor",
"cv2.imdecode",
"numpy.meshgrid",... | [((4081, 4090), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4088, 4090), True, 'from matplotlib import pyplot as plt\n'), ((4815, 4841), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['aximg'], {'ax': 'ax'}), '(aximg, ax=ax)\n', (4827, 4841), True, 'from matplotlib import pyplot as plt\n'), ((7228, 7262), 'matplotlib.transforms.Bbox.union', 'mpl.transforms.Bbox.union', (['extents'], {}), '(extents)\n', (7253, 7262), True, 'import matplotlib as mpl\n'), ((9851, 9890), 'matplotlib.transforms.Bbox.union', 'mpl.transforms.Bbox.union', (['axes_extents'], {}), '(axes_extents)\n', (9876, 9890), True, 'import matplotlib as mpl\n'), ((10193, 10233), 'cv2.imdecode', 'cv2.imdecode', (['data', 'cv2.IMREAD_UNCHANGED'], {}), '(data, cv2.IMREAD_UNCHANGED)\n', (10205, 10233), False, 'import cv2\n'), ((11092, 11134), 'cv2.cvtColor', 'cv2.cvtColor', (['im_bgra', 'cv2.COLOR_BGRA2RGBA'], {}), '(im_bgra, cv2.COLOR_BGRA2RGBA)\n', (11104, 11134), False, 'import cv2\n'), ((2396, 2426), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['nrow', 'ncols'], {}), '(nrow, ncols)\n', (2413, 2426), True, 'import matplotlib.gridspec as gridspec\n'), ((3587, 3596), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3594, 3596), True, 'from matplotlib import pyplot as plt\n'), ((4062, 4071), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4069, 4071), True, 'from matplotlib import pyplot as plt\n'), ((4490, 4511), 'matplotlib.cm.get_cmap', 'mpl.cm.get_cmap', (['cmap'], {}), '(cmap)\n', (4505, 4511), True, 'import matplotlib as mpl\n'), ((5978, 6043), 'matplotlib.collections.LineCollection', 'mpl.collections.LineCollection', (['segments'], {'color': '"""w"""', 'linewidths': '(1)'}), "(segments, color='w', linewidths=1)\n", (6008, 6043), True, 'import matplotlib as mpl\n'), ((6200, 6229), 'numpy.meshgrid', 'np.meshgrid', (['x_basis', 'y_basis'], {}), '(x_basis, y_basis)\n', (6211, 6229), True, 'import numpy as np\n'), ((8516, 8556), 'matplotlib.transforms.Bbox.union', 'mpl.transforms.Bbox.union', (['axes_extents_'], {}), '(axes_extents_)\n', (8541, 8556), True, 'import matplotlib as mpl\n'), ((9498, 9507), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9505, 9507), True, 'from matplotlib import pyplot as plt\n'), ((9900, 9912), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (9910, 9912), False, 'import io\n'), ((10569, 10608), 'matplotlib.transforms.Bbox.union', 'mpl.transforms.Bbox.union', (['axes_extents'], {}), '(axes_extents)\n', (10594, 10608), True, 'import matplotlib as mpl\n'), ((1643, 1652), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1650, 1652), True, 'from matplotlib import pyplot as plt\n'), ((1777, 1793), 'matplotlib.pyplot.figure', 'plt.figure', (['fnum'], {}), '(fnum)\n', (1787, 1793), True, 'from matplotlib import pyplot as plt\n'), ((3235, 3244), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3242, 3244), True, 'from matplotlib import pyplot as plt\n'), ((3362, 3383), 'matplotlib.pyplot.subplot', 'plt.subplot', (['*subspec'], {}), '(*subspec)\n', (3373, 3383), True, 'from matplotlib import pyplot as plt\n'), ((3423, 3432), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3430, 3432), True, 'from matplotlib import pyplot as plt\n'), ((4196, 4211), 'numpy.diag', 'np.diag', (['values'], {}), '(values)\n', (4203, 4211), True, 'import numpy as np\n'), ((1711, 1723), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1721, 1723), True, 'from matplotlib import pyplot as plt\n'), ((1852, 1861), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1859, 1861), True, 'from matplotlib import pyplot as plt\n')] |
import os
import numpy
from amuse.units import units
this_dir, this_filename = os.path.split(__file__)
if this_dir == "":
this_dir = "."
instrument = "WFPC_II_WFC3"
data_dir = this_dir + "/data/" + instrument + "/"
f = open(data_dir + "ciexyz31.csv","r")
lines = f.readlines()
cielam = [] | units.nano(units.m)
x = []
y = []
z = []
for l in lines:
line = l.split(",")
cielam.append( float(line[0]) | units.nano(units.m) )
x.append( float(line[1]) )
y.append( float(line[2]) )
z.append( float(line[3]) )
x = numpy.array(x)
y = numpy.array(y)
z = numpy.array(z)
xyz_data = dict()
xyz_data['x'] = dict(wavelength=cielam, throughput=x)
xyz_data['y'] = dict(wavelength=cielam, throughput=y)
xyz_data['z'] = dict(wavelength=cielam, throughput=z)
| [
"numpy.array",
"amuse.units.units.nano",
"os.path.split"
] | [((82, 105), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (95, 105), False, 'import os\n'), ((570, 584), 'numpy.array', 'numpy.array', (['x'], {}), '(x)\n', (581, 584), False, 'import numpy\n'), ((591, 605), 'numpy.array', 'numpy.array', (['y'], {}), '(y)\n', (602, 605), False, 'import numpy\n'), ((612, 626), 'numpy.array', 'numpy.array', (['z'], {}), '(z)\n', (623, 626), False, 'import numpy\n'), ((310, 329), 'amuse.units.units.nano', 'units.nano', (['units.m'], {}), '(units.m)\n', (320, 329), False, 'from amuse.units import units\n'), ((449, 468), 'amuse.units.units.nano', 'units.nano', (['units.m'], {}), '(units.m)\n', (459, 468), False, 'from amuse.units import units\n')] |
import numpy
import sys
def main():
if len(sys.argv) != 3:
print(sys.argv)
print('There are two arguments: input output')
return
with open(sys.argv[1], 'rb') as fp:
b = fp.read()
b = b[0x280:]
b = b[:512]
b = list(map(lambda x: numpy.uint8(x), b))
counter = 1
check_byte = b[0]
data_ptr_index = 1
previous_byte = b[0]
while counter < 512:
counter += 1
times_five = numpy.uint8(5) * previous_byte
previous_byte = b[data_ptr_index]
transformer = b[data_ptr_index] - (times_five + numpy.uint8(1))
check_byte += transformer
b[data_ptr_index] = transformer
data_ptr_index += 1
if check_byte:
print('Error decoding the save file')
else:
print('Successful decoding')
part1 = b[1:]
part1 = part1[:44]
part2 = b[45:]
part2 = part2[:464]
b = [*part1, *part2]
with open(sys.argv[2], 'wb') as fp:
fp.write(bytes(b))
if __name__ == '__main__':
main()
| [
"numpy.uint8"
] | [((462, 476), 'numpy.uint8', 'numpy.uint8', (['(5)'], {}), '(5)\n', (473, 476), False, 'import numpy\n'), ((288, 302), 'numpy.uint8', 'numpy.uint8', (['x'], {}), '(x)\n', (299, 302), False, 'import numpy\n'), ((592, 606), 'numpy.uint8', 'numpy.uint8', (['(1)'], {}), '(1)\n', (603, 606), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
import json
import numpy as np
import pandas as pd
import sklearn
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LassoCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import accuracy_score
import datetime
import matplotlib.pyplot as plt
import seaborn as sns
# Access data store
data_store = pd.HDFStore("processed_data.h5")
# Retrieve data using key
match_df = data_store["preprocessed_df"]
data_store.close()
match_df.head()
match_df["Winner_team"] = match_df["Winner"]
for ind in match_df.index:
if match_df["Winner"][ind] == match_df["Team_1"][ind]:
match_df["Winner_team"][ind] = 1
elif match_df["Winner"][ind] == match_df["Team_2"][ind]:
match_df["Winner_team"][ind] = 2
else:
match_df["Winner_team"][ind] = 0
match_df["Winner_team"].value_counts()
match_df.head()
np.random.seed(60)
"""##Calculating Net Run Rate
###Import Data
"""
attributes = pd.read_csv("../Data/attributes.csv")
attributes.head()
scorecard = open("../Data/scorecard.json",)
scorecard_data = json.load(scorecard)
tmap = open("../Data/tmap.json",)
tmap_data = json.load(tmap)
"""###Get NNR"""
match = match_df.copy()
match["NRR_team1"] = ""
match["NRR_team2"] = ""
name_to_code = {
"Afghanistan": "AFG",
"Australia": "AUS",
"Bangladesh": "BAN",
"England": "ENG",
"India": "IND",
"Ireland": "IRE",
"Kenya": "KEN",
"Netherlands": "NED",
"New Zealand": "NZL",
"Pakistan": "PAK",
"South Africa": "SAF",
"Scotland": "SCO",
"Sri Lanka": "SRL",
"West Indies": "WIN",
"Zimbabwe": "ZIM",
}
skip_keys = ["1282", "1761", "1765", "1770", "1862", "1866", "2528"]
def check_allOut(scorecard_data, matchCode, team_num):
bat = "BATTING" + str(team_num)
dismissal = [i[1] for i in scorecard_data[matchCode][bat]]
if "not out" in dismissal or "" in dismissal:
return False
return True
def get_totalOvers(scorecard_data, matchCode, team_num):
bat = "BATTING" + str(team_num)
balls = [i[3] for i in scorecard_data[matchCode][bat] if i[3] != -1]
overs = sum(balls) / 6
return overs
for ind in match.index:
if match["Winner_team"][ind] == 0:
match["NRR_team1"][ind] = 0
match["NNR_team2"][ind] = 0
else:
team_num = 2
match_code = str(match["MatchCode"][ind])
if match_code in skip_keys:
continue
order = scorecard_data[match_code]["ORDER"]
if name_to_code[order[1]] == match["Team_2"][ind]:
team_num = 1
runRate_team1 = match["Score_1"][ind] / 50
if check_allOut(scorecard_data, match_code, team_num):
runRate_team2 = match["Score_2"][ind] / 50
else:
if match["Winner_team"][ind] == 2:
runRate_team2 = match["Score_2"][ind] / get_totalOvers(
scorecard_data, match_code, team_num
)
else:
runRate_team2 = match["Score_2"][ind] / 50
match["NRR_team1"][ind] = runRate_team1 - runRate_team2
match["NRR_team2"][ind] = runRate_team2 - runRate_team1
match.head()
len(match)
match = match[~match["MatchCode"].isin(skip_keys)]
len(match)
"""###Store the NNR dataframe"""
# Save the final dataframe
data_store = pd.HDFStore("nnr_data.h5")
data_store["preprocessed_df"] = match
data_store.close()
"""#Flipped Dataset"""
# Access data store
data_store = pd.HDFStore("nnr_data.h5")
# Retrieve data using key
match_df = data_store["preprocessed_df"]
data_store.close()
match_df.head()
match_flipped_df = match_df.copy()
for ind in match_flipped_df.index:
match_flipped_df["Team_1"][ind], match_flipped_df["Team_2"][ind] = (
match_df["Team_2"][ind],
match_df["Team_1"][ind],
)
match_flipped_df["Score_1"][ind], match_flipped_df["Score_2"][ind] = (
match_df["Score_2"][ind],
match_df["Score_1"][ind],
)
match_flipped_df["NRR_team1"][ind], match_flipped_df["NRR_team2"][ind] = (
match_df["NRR_team2"][ind],
match_df["NRR_team1"][ind],
)
if match_df["TOSS"][ind] == 1:
match_flipped_df["TOSS"][ind] = 2
else:
match_flipped_df["TOSS"][ind] = 1
if match_df["Venue"][ind] == 1:
match_flipped_df["Venue"][ind] = 2
elif match_df["Venue"][ind] == 2:
match_flipped_df["Venue"][ind] = 1
for ind in match_flipped_df.index:
if match_flipped_df["Winner"][ind] == match_flipped_df["Team_1"][ind]:
match_flipped_df["Winner_team"][ind] = 1
else:
match_flipped_df["Winner_team"][ind] = 2
match_flipped_df.head()
# Access data store
data_store = pd.HDFStore("processed_data.h5")
# Retrieve data using key
match_df = data_store["preprocessed_df"]
data_store.close()
frames = [match_df, match_flipped_df]
final_df = pd.concat(frames)
final_df.head()
len(final_df)
# Save the final dataframe
data_store = pd.HDFStore("flipped_data.h5")
data_store["flip_df"] = final_df
data_store.close()
"""####Flipped dataframe"""
# Access data store
data_store = pd.HDFStore("flipped_data.h5")
# Retrieve data using key
flipped_df = data_store["flip_df"]
data_store.close()
flipped_df.head()
len(flipped_df)
"""#Models using Flipped Data"""
enc = OneHotEncoder(handle_unknown="ignore")
enc_df = pd.DataFrame(enc.fit_transform(flipped_df[["Winner_team", "TOSS"]]).toarray())
flipped_df = flipped_df.join(enc_df)
flipped_df.head()
flipped_df.rename(columns={0: "Win1", 1: "Win2", 2: "Toss1", 3: "Toss2"}, inplace=True)
labelencoder = LabelEncoder()
flipped_df["Team_1Enc"] = labelencoder.fit_transform(flipped_df["Team_1"])
flipped_df["Team_2Enc"] = labelencoder.fit_transform(flipped_df["Team_2"])
flipped_df.head()
X = flipped_df[
["Date", "Team_1Enc", "Team_2Enc", "Venue", "GroundCode", "TOSS", "Toss1", "Toss2"]
].copy()
y = flipped_df[
["Winner_team", "Win1", "Win2", "Score_1", "Score_2", "NRR_team1", "NRR_team2"]
].copy()
# Test Train Split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=1 / 5, random_state=100
)
X_train.head()
y_train.head()
"""##Training on Scores"""
def print_model_scores(model, data, y, predictors, target):
"""
A generic function to generate the performance report of the
model in question on the data passed to it
Args:
model: ML Model to be checked
data: data on which the model needs to pe trained
y: data containing the target variables
predictors: independent feature variable
target: target variable
"""
model.fit(data[predictors], y[target])
predictions = model.predict(data[predictors])
rms = sklearn.metrics.mean_squared_error(predictions, y[target]) ** 0.5
print("RMS : %s" % "{0:.2%}".format(rms))
r2 = sklearn.metrics.r2_score(predictions, y[target])
print("R2 : %s" % "{0:.2%}".format(r2))
return np.asarray(predictions)
def winner_prediction(model, data, y, predictors, winner):
"""
A generic function to predict the winner for the model in question
Args:
model: ML Model to be checked
data: data on which the model needs to be trained
y: data containing the target variables
predictors: independent feature variable
winner: winning team
"""
pred1 = print_model_scores(model, X_train, y_train, predictor_var, ["Score_1"])
pred2 = print_model_scores(model, X_train, y_train, predictor_var, ["Score_2"])
pred = pred1 - pred2
for i in range(len(pred)):
if (pred[i]) > 0:
pred[i] = 1
else:
pred[i] = 2
print("Model Accuracy is: ")
print(sum(1 for x, y in zip(pred, winner) if x == y) / len(winner))
"""##Model1 - Toss + GroundCode """
winner = y_train["Winner_team"]
predictor_var = ["Team_1Enc", "Team_2Enc", "GroundCode", "TOSS"]
model = RandomForestRegressor(n_estimators=100, random_state=0)
winner_prediction(model, X_train, y_train, predictor_var, winner)
winner = y_train["Winner_team"]
predictor_var = ["Team_1Enc", "Team_2Enc", "GroundCode", "TOSS"]
model = LinearRegression()
winner_prediction(model, X_train, y_train, predictor_var, winner)
winner = y_train["Winner_team"]
predictor_var = ["Team_1Enc", "Team_2Enc", "GroundCode", "TOSS"]
model = DecisionTreeRegressor()
winner_prediction(model, X_train, y_train, predictor_var, winner)
winner = y_train["Winner_team"]
predictor_var = ["Team_1Enc", "Team_2Enc", "GroundCode", "TOSS"]
model = LassoCV()
winner_prediction(model, X_train, y_train, predictor_var, winner)
"""##Model2 - Toss + GroundCode + Venue"""
winner = y_train["Winner_team"]
predictor_var = ["Team_1Enc", "Team_2Enc", "GroundCode", "TOSS", "Venue"]
model = RandomForestRegressor(n_estimators=100, random_state=0)
winner_prediction(model, X_train, y_train, predictor_var, winner)
winner = y_train["Winner_team"]
predictor_var = ["Team_1Enc", "Team_2Enc", "GroundCode", "TOSS", "Venue"]
model = LinearRegression()
winner_prediction(model, X_train, y_train, predictor_var, winner)
winner = y_train["Winner_team"]
predictor_var = ["Team_1Enc", "Team_2Enc", "GroundCode", "TOSS", "Venue"]
model = DecisionTreeRegressor()
winner_prediction(model, X_train, y_train, predictor_var, winner)
winner = y_train["Winner_team"]
predictor_var = ["Team_1Enc", "Team_2Enc", "GroundCode", "TOSS", "Venue"]
model = LassoCV()
winner_prediction(model, X_train, y_train, predictor_var, winner)
"""##Training on NRR"""
def print_model_scores(model, data, y, predictors, target):
"""
A generic function to generate the performance report of the
model in question on the data passed to it using cross-validation
Args:
model: ML Model to be checked
data: data on which the model needs to pe trained
predictors: independent feature variable
target: target variable
"""
model.fit(data[predictors], y[target])
predictions = model.predict(data[predictors])
# scores = cross_val_score(model, data[predictors], y[target], scoring="neg_mean_squared_error", cv=5)
# print('Cross-Validation Score :{}'.format(np.sqrt(-scores)))
rms = sklearn.metrics.mean_squared_error(predictions, y[target]) ** 0.5
print("RMS : %s" % "{0:.2%}".format(rms))
# print(f"Average RMSE: {np.sqrt(-scores).mean()}")
r2 = sklearn.metrics.r2_score(predictions, y[target])
print("R2 : %s" % "{0:.2%}".format(r2))
return np.asarray(predictions)
def winner_prediction(model, data, y, predictors, winner):
pred = print_model_scores(model, X_train, y_train, predictor_var, ["NRR_team1"])
for i in range(len(pred)):
if (pred[i]) > 0:
pred[i] = 1
elif pred[i] == 0:
pred[i] = 0
else:
pred[i] = 2
print("Model Accuracy is: ")
print(sum(1 for x, y in zip(pred, winner) if x == y) / len(winner))
"""##Model1 - Toss + GroundCode """
winner = y_train["Winner_team"]
predictor_var = ["Team_1Enc", "Team_2Enc", "GroundCode", "TOSS"]
model = RandomForestRegressor(n_estimators=100, random_state=0)
winner_prediction(model, X_train, y_train, predictor_var, winner)
winner = y_train["Winner_team"]
predictor_var = ["Team_1Enc", "Team_2Enc", "GroundCode", "TOSS"]
model = LinearRegression()
winner_prediction(model, X_train, y_train, predictor_var, winner)
winner = y_train["Winner_team"]
predictor_var = ["Team_1Enc", "Team_2Enc", "GroundCode", "TOSS"]
model = DecisionTreeRegressor()
winner_prediction(model, X_train, y_train, predictor_var, winner)
winner = y_train["Winner_team"]
predictor_var = ["Team_1Enc", "Team_2Enc", "GroundCode", "TOSS"]
model = LassoCV()
winner_prediction(model, X_train, y_train, predictor_var, winner)
"""##Model2 - Toss + GroundCode + Venue"""
winner = y_train["Winner_team"]
predictor_var = ["Team_1Enc", "Team_2Enc", "GroundCode", "TOSS", "Venue"]
model = RandomForestRegressor(n_estimators=100, random_state=0)
winner_prediction(model, X_train, y_train, predictor_var, winner)
winner = y_train["Winner_team"]
predictor_var = ["Team_1Enc", "Team_2Enc", "GroundCode", "TOSS", "Venue"]
model = LinearRegression()
winner_prediction(model, X_train, y_train, predictor_var, winner)
winner = y_train["Winner_team"]
predictor_var = ["Team_1Enc", "Team_2Enc", "GroundCode", "TOSS", "Venue"]
model = DecisionTreeRegressor()
winner_prediction(model, X_train, y_train, predictor_var, winner)
winner = y_train["Winner_team"]
predictor_var = ["Team_1Enc", "Team_2Enc", "GroundCode", "TOSS", "Venue"]
model = LassoCV()
winner_prediction(model, X_train, y_train, predictor_var, winner)
| [
"sklearn.preprocessing.LabelEncoder",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.metrics.r2_score",
"sklearn.tree.DecisionTreeRegressor",
"pandas.read_csv",
"sklearn.linear_model.LassoCV",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.OneHotEncoder",
"numpy.asarray",
"... | [((648, 680), 'pandas.HDFStore', 'pd.HDFStore', (['"""processed_data.h5"""'], {}), "('processed_data.h5')\n", (659, 680), True, 'import pandas as pd\n'), ((1170, 1188), 'numpy.random.seed', 'np.random.seed', (['(60)'], {}), '(60)\n', (1184, 1188), True, 'import numpy as np\n'), ((1254, 1291), 'pandas.read_csv', 'pd.read_csv', (['"""../Data/attributes.csv"""'], {}), "('../Data/attributes.csv')\n", (1265, 1291), True, 'import pandas as pd\n'), ((1372, 1392), 'json.load', 'json.load', (['scorecard'], {}), '(scorecard)\n', (1381, 1392), False, 'import json\n'), ((1440, 1455), 'json.load', 'json.load', (['tmap'], {}), '(tmap)\n', (1449, 1455), False, 'import json\n'), ((3616, 3642), 'pandas.HDFStore', 'pd.HDFStore', (['"""nnr_data.h5"""'], {}), "('nnr_data.h5')\n", (3627, 3642), True, 'import pandas as pd\n'), ((3759, 3785), 'pandas.HDFStore', 'pd.HDFStore', (['"""nnr_data.h5"""'], {}), "('nnr_data.h5')\n", (3770, 3785), True, 'import pandas as pd\n'), ((4982, 5014), 'pandas.HDFStore', 'pd.HDFStore', (['"""processed_data.h5"""'], {}), "('processed_data.h5')\n", (4993, 5014), True, 'import pandas as pd\n'), ((5153, 5170), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (5162, 5170), True, 'import pandas as pd\n'), ((5244, 5274), 'pandas.HDFStore', 'pd.HDFStore', (['"""flipped_data.h5"""'], {}), "('flipped_data.h5')\n", (5255, 5274), True, 'import pandas as pd\n'), ((5391, 5421), 'pandas.HDFStore', 'pd.HDFStore', (['"""flipped_data.h5"""'], {}), "('flipped_data.h5')\n", (5402, 5421), True, 'import pandas as pd\n'), ((5580, 5618), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (5593, 5618), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n'), ((5869, 5883), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (5881, 5883), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n'), ((6332, 6389), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(1 / 5)', 'random_state': '(100)'}), '(X, y, test_size=1 / 5, random_state=100)\n', (6348, 6389), False, 'from sklearn.model_selection import train_test_split\n'), ((8181, 8236), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(100)', 'random_state': '(0)'}), '(n_estimators=100, random_state=0)\n', (8202, 8236), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((8409, 8427), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (8425, 8427), False, 'from sklearn.linear_model import LinearRegression\n'), ((8600, 8623), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (8621, 8623), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((8796, 8805), 'sklearn.linear_model.LassoCV', 'LassoCV', ([], {}), '()\n', (8803, 8805), False, 'from sklearn.linear_model import LassoCV\n'), ((9031, 9086), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(100)', 'random_state': '(0)'}), '(n_estimators=100, random_state=0)\n', (9052, 9086), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((9268, 9286), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (9284, 9286), False, 'from sklearn.linear_model import LinearRegression\n'), ((9468, 9491), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (9489, 9491), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((9673, 9682), 'sklearn.linear_model.LassoCV', 'LassoCV', ([], {}), '()\n', (9680, 9682), False, 'from sklearn.linear_model import LassoCV\n'), ((11328, 11383), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(100)', 'random_state': '(0)'}), '(n_estimators=100, random_state=0)\n', (11349, 11383), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((11556, 11574), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (11572, 11574), False, 'from sklearn.linear_model import LinearRegression\n'), ((11747, 11770), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (11768, 11770), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((11943, 11952), 'sklearn.linear_model.LassoCV', 'LassoCV', ([], {}), '()\n', (11950, 11952), False, 'from sklearn.linear_model import LassoCV\n'), ((12178, 12233), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(100)', 'random_state': '(0)'}), '(n_estimators=100, random_state=0)\n', (12199, 12233), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((12415, 12433), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (12431, 12433), False, 'from sklearn.linear_model import LinearRegression\n'), ((12615, 12638), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (12636, 12638), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((12820, 12829), 'sklearn.linear_model.LassoCV', 'LassoCV', ([], {}), '()\n', (12827, 12829), False, 'from sklearn.linear_model import LassoCV\n'), ((7105, 7153), 'sklearn.metrics.r2_score', 'sklearn.metrics.r2_score', (['predictions', 'y[target]'], {}), '(predictions, y[target])\n', (7129, 7153), False, 'import sklearn\n'), ((7209, 7232), 'numpy.asarray', 'np.asarray', (['predictions'], {}), '(predictions)\n', (7219, 7232), True, 'import numpy as np\n'), ((10633, 10681), 'sklearn.metrics.r2_score', 'sklearn.metrics.r2_score', (['predictions', 'y[target]'], {}), '(predictions, y[target])\n', (10657, 10681), False, 'import sklearn\n'), ((10737, 10760), 'numpy.asarray', 'np.asarray', (['predictions'], {}), '(predictions)\n', (10747, 10760), True, 'import numpy as np\n'), ((6984, 7042), 'sklearn.metrics.mean_squared_error', 'sklearn.metrics.mean_squared_error', (['predictions', 'y[target]'], {}), '(predictions, y[target])\n', (7018, 7042), False, 'import sklearn\n'), ((10456, 10514), 'sklearn.metrics.mean_squared_error', 'sklearn.metrics.mean_squared_error', (['predictions', 'y[target]'], {}), '(predictions, y[target])\n', (10490, 10514), False, 'import sklearn\n')] |
# Copyright (c) 2016, <NAME>
# Licensed under the BSD 3-clause license (see LICENSE)
# This file was modified from the GPy project. Its file header is replicated
# below. Its LICENSE.txt is replicated in the LICENSE file for this directory.
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
class Norm(object):
def __init__(self):
self.mean = None
self.std = None
def scale_by(self, Y):
"""
Use data matrix Y as normalization space to work in.
"""
Y = np.ma.masked_invalid(Y, copy=False)
self.mean = Y.mean(0).view(np.ndarray)
self.std = Y.std(0).view(np.ndarray)
def normalize(self, Y):
"""
Project Y into normalized space
"""
if not self.scaled():
raise AttributeError(
'Norm object not initialized yet,'
'try calling scale_by(data) first.')
return (Y - self.mean) / self.std
def inverse_mean(self, X):
"""
Project the normalized object X into space of Y
"""
return (X * self.std) + self.mean
def inverse_variance(self, var):
return var * (self.std**2)
def scaled(self):
"""
Whether this Norm object has been initialized.
"""
return self.mean is not None
| [
"numpy.ma.masked_invalid"
] | [((601, 636), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['Y'], {'copy': '(False)'}), '(Y, copy=False)\n', (621, 636), True, 'import numpy as np\n')] |
"""Analysis classification result data."""
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def get_csv_files(path):
path = os.path.abspath(path)
results = [os.path.join(path, file) for file in os.listdir(path) if file.endswith('.csv')]
print(results)
return results
def load_csv(file_name: str):
df = pd.read_csv(file_name, index_col=0)
return df
def _guess_top_category(df: pd.DataFrame):
columns = df.columns.drop('file', errors='ignore')
avg = [df[col].sum() for col in columns]
idx = max(range(len(avg)), key=avg.__getitem__)
return columns[idx], idx
def summarize_label(df_in, label='', df_out=None, save_false_result=False, path='false_result.csv'):
df_new = df_in.drop(labels='file', axis=1, errors='ignore')
if len(df_new) <= 0:
return
if label == '' or label is None:
label, col_idx = _guess_top_category(df_new)
else:
try:
col_idx = df_new.columns.get_loc(label)
except KeyError:
print("Can't find label '{0}'".format(label))
return
total = len(df_new)
row_correct = np.argmax(df_new.values, axis=1) == col_idx
num_correct = sum(row_correct)
print("category: {0}".format(label))
print("Pass rate: {0:.3f}%. {1}/{2}".format(num_correct * 100.0 / total, num_correct, total))
avg = df_new[label].sum() / total
maximal = df_new[label].max()
minimal = df_new[label].min()
print("Top-1 confidence: (avg: {0:.3f}%, max: {1:.3f}%, min:{2:.3f}%)\n".format(avg * 100,
maximal * 100, minimal * 100))
if save_false_result is True:
incorrect = df_in.loc[row_correct == False]
incorrect.to_csv(path)
if df_out is not None:
row = {'label': label, 'pass-rate': num_correct / total, 'avg': avg,
'max': maximal, 'min': minimal, 'passed': num_correct, 'total': total}
df_out.loc[len(df_out)] = row
def plot_csv(df: pd.DataFrame):
columns = df.columns.drop('file', errors='ignore')
for c in columns:
plt.plot(df[c])
plt.show()
def _get_image_label(path):
assert path.endswith('.csv')
return os.path.basename(path)[:-4]
def summarize(path):
files = get_csv_files(path)
df_out = pd.DataFrame(columns=['label', 'pass-rate', 'avg', 'max', 'min', 'passed', 'total'])
for f in files:
df = load_csv(f)
label = _get_image_label(f)
summarize_label(df, label, df_out)
return df_out
if __name__ == "__main__":
df = summarize('data/')
print(df)
| [
"os.listdir",
"pandas.DataFrame",
"pandas.read_csv",
"matplotlib.pyplot.plot",
"os.path.join",
"numpy.argmax",
"os.path.basename",
"os.path.abspath",
"matplotlib.pyplot.show"
] | [((163, 184), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (178, 184), False, 'import os\n'), ((359, 394), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'index_col': '(0)'}), '(file_name, index_col=0)\n', (370, 394), True, 'import pandas as pd\n'), ((2100, 2110), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2108, 2110), True, 'import matplotlib.pyplot as plt\n'), ((2281, 2369), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['label', 'pass-rate', 'avg', 'max', 'min', 'passed', 'total']"}), "(columns=['label', 'pass-rate', 'avg', 'max', 'min', 'passed',\n 'total'])\n", (2293, 2369), True, 'import pandas as pd\n'), ((200, 224), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (212, 224), False, 'import os\n'), ((1153, 1185), 'numpy.argmax', 'np.argmax', (['df_new.values'], {'axis': '(1)'}), '(df_new.values, axis=1)\n', (1162, 1185), True, 'import numpy as np\n'), ((2080, 2095), 'matplotlib.pyplot.plot', 'plt.plot', (['df[c]'], {}), '(df[c])\n', (2088, 2095), True, 'import matplotlib.pyplot as plt\n'), ((2185, 2207), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2201, 2207), False, 'import os\n'), ((237, 253), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (247, 253), False, 'import os\n')] |
# cd/d C:\Python27 & for /l %x in (1176291, 1, 1180590) do (
# python "C:\Users\Public\Documents\ShalabyGroup\Aimsun Controller\RunSeveralReplications.py" -aconsolePath "C:\Program Files\Aimsun\Aimsun Next 8.3\aconsole.exe" -modelPath "C:\Users\Public\Documents\ShalabyGroup\TSP-Louis\finchTSPs_3 intx_west_Subnetwork 1171379.ang" -targets %x
# )
# & "C:\\Program Files\\Aimsun\\Aimsun Next 8.3\\aconsole.exe"
# -v -log -project "C:\\Users\\siwei\\Documents\\Developer\\aimsun\\finchTSPs_3 intx_west_Subnetwork 1171379.ang" -cmd execute -target 1180681
import socket
from subprocess import Popen, PIPE
import time
import numpy as np
import random
AIMSUN_MODEL_PATH = 'C:\\Users\\siwei\\Documents\\Developer\\aimsun\\finchTSPs_3 intx_west_Subnetwork 1171379.ang'
ACONSOLE_PATH = "C:\\Program Files\\Aimsun\\Aimsun Next 8.3\\aconsole.exe"
def run(AIMSUNU_MODEL_PATH, ACONSOLE_PATH):
HOST = 'localhost'
PORT = 23000
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(10)
print('[Info] Aimsun Manager Ready. Waiting For Aimsun Instance...')
process = Popen(['"aconsole.exe"', '-v', '-log',
'-project', AIMSUNU_MODEL_PATH,
'-cmd', 'execute', '-target', '1180681'], executable=ACONSOLE_PATH)
# process = Popen(['"aconsole.exe"'], executable="C:\\Program Files\\Aimsun\\Aimsun Next 8.3\\aconsole.exe")
# def start_aimsun_instance(self, s):
# process = Popen(['python', 'aimsun.py'])
# print("Waiting for aimsun instance to connect...")
# conn, addr = s.accept()
# print('[Info] Connected by', addr)
# conn.send(b'SYN')
# data = conn.recv(1024).decode("utf-8")
# if data != "SYN":
# print("[ERROR] Handshake Failed.")
# return False, -1
# else:
# print("[Info] Aimsun Instance connected.")
# return True, conn
# process = Popen(['python', 'aimsun.py'])
conn, addr = s.accept()
print('Connected by', addr)
repID = 1180681
sync_message = 'SYN' + str(repID)
conn.send(bytes(sync_message, 'utf-8'))
data = conn.recv(1024).decode("utf-8")
if data != "SYN":
print("[ERROR] Handshake Failed.")
else:
print("[Info] Aimsun Instance connected.")
while True:
conn.send(b'GET_STATE')
data = conn.recv(1024).decode("utf-8")
if(data == 'FIN'):
print("FIN Received")
break
elif(data[:10] != 'DATA_READY'):
print("ERROR")
break
else:
time = data[10:]
feature = np.load('realtime_state.npy')
print(feature.shape)
print("Time: " + time)
# apply action here
rand = random.randint(0, 100)
if rand < 90:
conn.send(b'WRITE_ACTION:EXTEND')
else:
conn.send(b'WRITE_ACTION:NOTHING')
s.close()
print('END')
| [
"subprocess.Popen",
"numpy.load",
"random.randint",
"socket.socket"
] | [((943, 992), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (956, 992), False, 'import socket\n'), ((1183, 1325), 'subprocess.Popen', 'Popen', (['[\'"aconsole.exe"\', \'-v\', \'-log\', \'-project\', AIMSUNU_MODEL_PATH, \'-cmd\',\n \'execute\', \'-target\', \'1180681\']'], {'executable': 'ACONSOLE_PATH'}), '([\'"aconsole.exe"\', \'-v\', \'-log\', \'-project\', AIMSUNU_MODEL_PATH,\n \'-cmd\', \'execute\', \'-target\', \'1180681\'], executable=ACONSOLE_PATH)\n', (1188, 1325), False, 'from subprocess import Popen, PIPE\n'), ((2705, 2734), 'numpy.load', 'np.load', (['"""realtime_state.npy"""'], {}), "('realtime_state.npy')\n", (2712, 2734), True, 'import numpy as np\n'), ((2855, 2877), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (2869, 2877), False, 'import random\n')] |
from ray.rllib.offline.estimators.off_policy_estimator import OffPolicyEstimate
from ray.rllib.offline.estimators.direct_method import DirectMethod, k_fold_cv
from ray.rllib.utils.annotations import DeveloperAPI, override
from ray.rllib.utils.typing import SampleBatchType
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.numpy import convert_to_numpy
import numpy as np
@DeveloperAPI
class DoublyRobust(DirectMethod):
"""The Doubly Robust (DR) estimator.
DR estimator described in https://arxiv.org/pdf/1511.03722.pdf"""
@override(DirectMethod)
def estimate(
self, batch: SampleBatchType, should_train: bool = True
) -> OffPolicyEstimate:
self.check_can_estimate_for(batch)
estimates = []
# Split data into train and test using k-fold cross validation
for train_episodes, test_episodes in k_fold_cv(batch, self.k, should_train):
# Train Q-function
if train_episodes:
# Reinitialize model
self.model.reset()
train_batch = SampleBatch.concat_samples(train_episodes)
losses = self.train(train_batch)
self.losses.append(losses)
# Calculate doubly robust OPE estimates
for episode in test_episodes:
rewards, old_prob = episode["rewards"], episode["action_prob"]
new_prob = np.exp(self.action_log_likelihood(episode))
v_old = 0.0
v_new = 0.0
q_values = self.model.estimate_q(
episode[SampleBatch.OBS], episode[SampleBatch.ACTIONS]
)
q_values = convert_to_numpy(q_values)
all_actions = np.zeros([episode.count, self.policy.action_space.n])
all_actions[:] = np.arange(self.policy.action_space.n)
# Two transposes required for torch.distributions to work
tmp_episode = episode.copy()
tmp_episode[SampleBatch.ACTIONS] = all_actions.T
action_probs = np.exp(self.action_log_likelihood(tmp_episode)).T
v_values = self.model.estimate_v(episode[SampleBatch.OBS], action_probs)
v_values = convert_to_numpy(v_values)
for t in reversed(range(episode.count)):
v_old = rewards[t] + self.gamma * v_old
v_new = v_values[t] + (new_prob[t] / old_prob[t]) * (
rewards[t] + self.gamma * v_new - q_values[t]
)
v_new = v_new.item()
estimates.append(
OffPolicyEstimate(
self.name,
{
"v_old": v_old,
"v_new": v_new,
"v_gain": v_new / max(1e-8, v_old),
},
)
)
return estimates
| [
"ray.rllib.offline.estimators.direct_method.k_fold_cv",
"numpy.arange",
"ray.rllib.utils.annotations.override",
"numpy.zeros",
"ray.rllib.utils.numpy.convert_to_numpy",
"ray.rllib.policy.sample_batch.SampleBatch.concat_samples"
] | [((565, 587), 'ray.rllib.utils.annotations.override', 'override', (['DirectMethod'], {}), '(DirectMethod)\n', (573, 587), False, 'from ray.rllib.utils.annotations import DeveloperAPI, override\n'), ((880, 918), 'ray.rllib.offline.estimators.direct_method.k_fold_cv', 'k_fold_cv', (['batch', 'self.k', 'should_train'], {}), '(batch, self.k, should_train)\n', (889, 918), False, 'from ray.rllib.offline.estimators.direct_method import DirectMethod, k_fold_cv\n'), ((1085, 1127), 'ray.rllib.policy.sample_batch.SampleBatch.concat_samples', 'SampleBatch.concat_samples', (['train_episodes'], {}), '(train_episodes)\n', (1111, 1127), False, 'from ray.rllib.policy.sample_batch import SampleBatch\n'), ((1692, 1718), 'ray.rllib.utils.numpy.convert_to_numpy', 'convert_to_numpy', (['q_values'], {}), '(q_values)\n', (1708, 1718), False, 'from ray.rllib.utils.numpy import convert_to_numpy\n'), ((1750, 1803), 'numpy.zeros', 'np.zeros', (['[episode.count, self.policy.action_space.n]'], {}), '([episode.count, self.policy.action_space.n])\n', (1758, 1803), True, 'import numpy as np\n'), ((1837, 1874), 'numpy.arange', 'np.arange', (['self.policy.action_space.n'], {}), '(self.policy.action_space.n)\n', (1846, 1874), True, 'import numpy as np\n'), ((2256, 2282), 'ray.rllib.utils.numpy.convert_to_numpy', 'convert_to_numpy', (['v_values'], {}), '(v_values)\n', (2272, 2282), False, 'from ray.rllib.utils.numpy import convert_to_numpy\n')] |
import math
import numpy as np
import pandas as pd
import scipy.ndimage as ndi
import tensorflow as tf
import tqdm
from scipy.ndimage.morphology import binary_erosion
from data_kits import nf_kits
from data_kits import np_ops, tf_ops
_data_cache = None
def loads(opt, logger, set_key):
data_list = nf_kits.load_split(set_key, opt.fold)
if set_key in ["train", "val"]:
return get_loader_train(opt, logger, data_list, set_key)
elif set_key == "eval":
return get_loader_eval(opt, logger, data_list)
elif set_key == "test":
if opt.use_box:
return get_loader_test_with_box(opt, logger, data_list)
return get_loader_test(opt, logger, data_list, set_key)
elif set_key == "extra":
return get_loader_test(opt, logger, data_list, set_key)
else:
raise ValueError
def load_data(logger=None):
global _data_cache
if _data_cache is None:
_data_cache = nf_kits.load_data(logger)
return _data_cache
def prob_by_area(x):
if x < 50:
return 0.04
if x < 300:
return 0.2
return 1
def del_circle(image, ctr, radius):
"""
Delete a circle from an image. Set the values in the circle to zero.
Notice that this function manipulate the input `image`.
Parameters
----------
image: np.ndarray
[depth, height, width]
ctr: np.ndarray
length 3, circle center
radius: int
circle radius
"""
_, height, width = image.shape
y1 = max(ctr[-2] - radius, 0)
y2 = min(ctr[-2] + radius + 1, height)
x1 = max(ctr[-1] - radius, 0)
x2 = min(ctr[-1] + radius + 1, width)
rcy, rcx, rh, rw = ctr[-2] - y1, ctr[-1] - x1, y2 - y1, x2 - x1 # relative center y, x, relative height, width
y, x = np.meshgrid(np.arange(rh), np.arange(rw), indexing="ij", sparse=True)
circle = (x - rcx) ** 2 + (y - rcy) ** 2 > radius ** 2
image[ctr[0], y1:y2, x1:x2] *= circle # Only operate on single slice
def inter_simulation(mask, sampler, margin=3, step=10, n=11, bg=False, d=40, strategy=0):
"""
Interaction simulation, including positive points and negative points
Parameters
----------
mask: np.ndarray
binary mask, foreground points sampled from label=1 and bg from label=0
sampler: np.random.RandomState
margin: int
margin band width in which no points are sampled
step: int
minimal distance between multiple interactions
n: int
maximum number of interactions
bg: bool
True for border_value=1, False for border_value=0 in binary erosion
d: int
band width outside the object
strategy: int
value in [0, 1, 2],
0: random in whole fg
1: random in band
2: surround the object evenly in band
Returns
-------
fg_pts: np.ndarray, shape [m, 3], ret_type, coordinates of the positive points
bg_pts: np.ndarray, shape [n, 3], ret_type, coordinates of the negative points
"""
small = False
first = True
all_pts = []
struct = np.zeros((3, 3, 3), dtype=np.bool)
struct[1] = True
g = binary_erosion(mask, struct, iterations=margin, border_value=bg)
if bg and strategy != 0:
# let g be the background band
g = g ^ binary_erosion(g, struct, iterations=d, border_value=bg)
if not g.max():
# tumor is too small, discard `margin`
g = mask.copy()
small = True
# determine sample number
inter_num = sampler.randint(int(not bg), n)
for _ in range(inter_num):
ctr = np.stack(np.where(g), axis=1)
if not small:
if first or strategy in [0, 1]:
i = sampler.choice(ctr.shape[0])
else: # strategy == 2
dist = ctr.reshape(-1, 1, 3) - np.asarray(all_pts).reshape(1, -1, 3)
# choose the farest point
i = np.argmax(np.sum(dist ** 2, axis=-1).min(axis=1))
ctr = ctr[i] # center z, y, x
else:
# For small object, directly use the center
ctr = ctr.mean(axis=0).round().astype(np.int32)
first = False
all_pts.append(ctr)
del_circle(g, ctr, step)
if small or g.max() == 0: # Should not add more points
break
return np.asarray(all_pts, dtype=np.float32).reshape(-1, 3)
def get_pts(lab_patch, num_inters_max, sampler):
"""
get interactive points
Parameters
----------
lab_patch: np.ndarray
label patch, np.uint8
num_inters_max: int
maximum number of interactions
sampler: np.random.RandomState
Returns
-------
fg_pts: np.ndarray, with shape [None, 3], foreground points
bg_pts: np.ndarray, with shape [None, 3], background points
"""
if lab_patch.max() > 0:
fg_pts = inter_simulation(lab_patch, sampler, n=num_inters_max, bg=False, strategy=0)
else:
fg_pts = np.zeros((0, 3), dtype=np.float32)
strategy = 1 if sampler.uniform() > 0.5 else 2
bg_pts = inter_simulation(1 - lab_patch, sampler, n=num_inters_max, bg=True, strategy=strategy)
return fg_pts, bg_pts
def data_processing(img, lab, *pts, opt, logger, mode):
"""
Pre-process training data with tensorflow API
Parameters
----------
img: tf.Tensor
with shape [None, None, None, channel]
lab: tf.Tensor
with shape [None, None, None]
pts: tuple
containing two tf.Tensor, with shape [None, 3]
opt:
logger:
mode: str
must in [train|val]
Returns
-------
img: tf.Tensor if guide_type == "none", else a tuple with two tf.Tensor (img, guide)
img: with shape [depth, height, width, channel]
guide: with shape [depth, height, width, 2]
lab: tf.Tensor, with shape [depth, height, width]
"""
# z_score
img = tf_ops.z_score(img)
target_shape = [opt.depth, opt.height, opt.width]
# Only resize height and width. Keep depth unchanged.
img = tf.image.resize(img, (opt.height, opt.width))
def pts_to_guide(ctr, std):
if tf.shape(ctr)[0] > 0:
if opt.guide == "exp":
stddev = tf.maximum(tf.ones(tf.shape(ctr), tf.float32) * std, 0.1)
gd = tf_ops.gen_guide_3d(target_shape, ctr, stddev, euclidean=False)
elif opt.guide == "euc":
gd = tf_ops.gen_guide_3d(target_shape, ctr, euclidean=True)
elif opt.guide == "geo":
int_ctr = tf.cast(ctr, tf.int32)
gd = tf.py_function(
np_ops.gen_guide_geo_3d, [img[..., 0], int_ctr, opt.geo_lamb, opt.geo_iter],
tf.float32, name="GeoDistLarge")
gd.set_shape((opt.depth, opt.height, opt.width))
gd = tf.expand_dims(gd, axis=-1)
else:
raise ValueError(f"Unsupported guide type: {opt.guide}")
guide = tf.cast(gd, tf.float32)
else:
guide = tf.zeros(tf.concat([tf.shape(img)[:-1], [1]], axis=0), tf.float32)
return guide
if opt.guide != "none":
fg_pts, bg_pts = pts
scale = tf.constant(target_shape, tf.float32) / tf.cast(tf.shape(lab), tf.float32)
fg_pts = fg_pts * scale
bg_pts = bg_pts * scale
fg_guide = pts_to_guide(fg_pts, opt.exp_stddev)
bg_guide = pts_to_guide(bg_pts, opt.exp_stddev)
logger.info(f"Use guide with {opt.guide} distance" +
f", stddev={tuple(opt.exp_stddev)}" * (opt.guide == "exp"))
img = tf.concat([img, fg_guide, bg_guide], axis=-1)
if mode == "train":
if opt.flip > 0:
img, lab = tf_ops.random_flip(img, lab, flip=opt.flip)
if opt.rotate > 0:
# Only rotate height and width. Keep depth unchanged.
lab = tf.expand_dims(lab, axis=-1)
img, lab = tf_ops.random_rotate(img, lab, rotate_scale=opt.rotate)
lab = tf.squeeze(lab, axis=-1)
sp_guide = None
if opt.guide != "none":
img, sp_guide = tf.split(img, [opt.channel, 2], axis=-1)
lab = tf.expand_dims(lab, axis=-1)
lab = tf.image.resize(lab, (opt.height, opt.width), tf.image.ResizeMethod.NEAREST_NEIGHBOR)
lab = tf.squeeze(lab, axis=-1)
if mode == "train":
img = tf_ops.augment_gamma(img, gamma_range=(0.7, 1.5), retain_stats=True, p_per_sample=0.3)
if opt.guide != "none":
img = (img, sp_guide)
return img, lab
def volume_crop(volume, center, shape, extend_z=(0, 0)):
depth, height, width = volume.shape
half_d, half_h, half_w = shape[0] // 2, shape[1] // 2, shape[2] // 2
z1 = min(max(center[0] - half_d, 0), depth - shape[0])
z2 = z1 + shape[0]
y1 = min(max(center[1] - half_h, 0), height - shape[1])
y2 = y1 + shape[1]
x1 = min(max(center[2] - half_w, 0), width - shape[2])
x2 = x1 + shape[2]
slices = (slice(z1, z2), slice(y1, y2), slice(x1, x2))
pad_z1 = max(0, extend_z[0] - z1)
pad_z2 = max(0, extend_z[1] + z2 - depth)
z1 = max(0, z1 - extend_z[0])
z2 = min(z2 + extend_z[1], depth)
img = volume[z1:z2, y1:y2, x1:x2]
img = np.pad(img, ((pad_z1, pad_z2), (0, 0), (0, 0)))
return img, slices
def gen_batch(opt, data_list, mode, sampler):
""" Batch sampler
Parameters
----------
opt:
Configurations
data_list: pd.DataFrame
with column [split, pid, remove]
mode: str
train or val
sampler: np.random.RandomState
random state
Returns
-------
A generator:
img_patch: np.ndarray
with shape [None, None, None, 1], tf.float32
lab_patch: np.ndarray
with shape [None, None, None], tf.int32
fg_pts: np.ndarray
[guide != "none"] with shape [None, 3], np.float32
bg_pts: np.ndarray
[guide != "none"] with shape [None, 3], np.float32
"""
train = mode == "train"
data = load_data()
data_list = data_list[[True if pid in data else False for pid in data_list.pid]]
# dataset containing nf (remove benign scans)
data_list['nf'] = [True if len(data[pid]['lab_rng']) > 1 else False for pid in data_list.pid]
nf_set = data_list[data_list.nf]
force_tumor = math.ceil(opt.bs * opt.tumor_percent)
target_size = np.array([opt.depth, opt.height, opt.width], dtype=np.float32)
if train:
zoom = opt.zoom
else:
zoom = ((opt.zoom[0] + opt.zoom[1]) / 2, ) * 2
while True:
nf = nf_set.sample(
n=force_tumor, replace=False, weights=None, random_state=sampler.get_state())
nf['flag'] = [1] * len(nf.index)
rem = data_list[~data_list.index.isin(nf.index)].sample(
n=opt.bs - force_tumor, replace=False, weights=None, random_state=sampler.get_state())
rem['flag'] = [0] * len(rem.index)
batch = pd.concat([nf, rem])
for i, sample in batch.iterrows(): # columns [split, pid, nf(bool)]
crop_shape = (target_size * (1, *sampler.uniform(*zoom, 2))).astype(np.int32)
d, h, w = data[sample.pid]['img'].shape # volume shape
if sample.flag == 1:
# choose a foreground pixel
i = sampler.choice(data[sample.pid]['pos'].shape[0])
pz, py, px = data[sample.pid]['pos'][i]
else:
# choose a random pixel
pz = sampler.randint(d)
py = sampler.randint(h)
px = sampler.randint(w)
img_patch, slices = volume_crop(data[sample.pid]['img'], (pz, py, px), crop_shape)
lab_patch = np.clip(data[sample.pid]['lab'][slices], 0, 1) # np.uint8
img_patch = img_patch[..., None]
yield_list = (img_patch.astype(np.float32), lab_patch.astype(np.int32))
if opt.guide != "none":
yield_list = yield_list + get_pts(lab_patch, opt.num_inters_max, sampler)
yield yield_list
def get_loader_train(opt, logger, data_list, set_key):
load_data(logger) # load data to cache
bs = opt.bs
def train_gen():
infinity_generator = gen_batch(opt, data_list, "train", np.random.RandomState())
ranges = tqdm.tqdm(
range(opt.train_n * bs), leave=False, desc="Train", unit_scale=1. / bs,
dynamic_ncols=True)
for _ in ranges:
yield next(infinity_generator)
def val_gen():
infinity_generator = gen_batch(opt, data_list, "val", sampler=np.random.RandomState(1234))
ranges = tqdm.tqdm(
range(opt.val_n * bs), leave=False, desc="Val", unit_scale=1. / bs,
dynamic_ncols=True)
for _ in ranges:
yield next(infinity_generator)
data_gen = train_gen if set_key == "train" else val_gen
input_shape = (bs, opt.depth, opt.height, opt.width, opt.channel)
output_signature = (
tf.TensorSpec((None, None, None, opt.channel), tf.float32),
tf.TensorSpec((None, None, None), tf.int32)
)
if opt.guide != "none":
input_shape = [input_shape, (bs, opt.depth, opt.height, opt.width, 2)]
output_signature = output_signature + (
tf.TensorSpec((None, 3), tf.float32),
tf.TensorSpec((None, 3), tf.float32)
)
def map_fn(*args):
func = data_processing(*args, opt, logger, mode=set_key)
return func
dataset = tf.data.Dataset.from_generator(data_gen, output_signature=output_signature)\
.map(map_fn, num_parallel_calls=bs * 2)\
.batch(bs, drop_remainder=True)\
.prefetch(2)
logger.info(f" ==> Dataloader 3D for {set_key}")
return dataset, input_shape
def eval_gen(opt, data_fn, data_list):
for _, sample in data_list.iterrows():
volume, label, meta = data_fn(sample.pid)
if label.max() == 0:
continue
assert volume.shape == label.shape, f"{volume.shape} vs {label.shape}"
zoom_scale = np.array(
(1, opt.test_height / volume.shape[1], opt.test_width / volume.shape[2]), np.float32)
resized_volume = ndi.zoom(volume, zoom_scale, order=1)
if resized_volume.shape[0] % 2 != 0:
resized_volume = np.pad(
resized_volume, ((0, 1), (0, 0), (0, 0)), mode="constant", constant_values=0)
normed_volume = np_ops.z_score(resized_volume)
yield sample, meta, normed_volume, label
def get_loader_eval(opt, logger, data_list):
data = load_data(logger)
data_list = data_list[[True if item.pid in data and item.remove != 1 else False
for _, item in data_list.iterrows()]].copy()
data_list['nf'] = [True if len(data[pid]['lab_rng']) > 1 else False for pid in data_list.pid]
data_list = data_list[data_list.nf]
data = nf_kits.slim_labels(data, logger)
def data_fn(pid):
return data[pid]["img"].astype(np.float32), data[pid]["slim"], data[pid]["meta"]
generator = eval_gen(opt, data_fn, data_list)
input_shape = (1, opt.test_depth, opt.test_height, opt.test_width, opt.channel)
if opt.guide != "none":
input_shape = [input_shape, (1, opt.test_depth, opt.test_height, opt.test_width, 2)]
return generator, input_shape
def get_loader_test(opt, logger, data_list, set_key,
test_depth, test_height, test_width, channel, guide_type, std_split,
reduce_shift):
if set_key == "test":
data = nf_kits.load_test_data_paths() # Only contain data paths
elif set_key == "extra":
data = nf_kits.load_extra_data_paths()
else:
raise ValueError
data_list = data_list[[True if item.pid in data and item.remove != 1 else False
for _, item in data_list.iterrows()]].copy()
def data_fn(pid):
volume = nf_kits.read_nii(data[pid]["img_path"])[1].astype(np.float32)
meta, label = nf_kits.read_nii(data[pid]["lab_path"], np.int8)
label = np.clip(label, 0, 1)
if volume.min() < 0:
volume[volume < 0] = 0
return volume, label, meta
generator = eval_gen(data_fn, data_list)
input_shape = (1, opt.test_depth, opt.test_height, opt.test_width, opt.channel)
if opt.guide != "none":
input_shape = [input_shape, (1, opt.test_depth, opt.test_height, opt.test_width, 2)]
return generator, input_shape
def eval_gen_with_box(opt, data_fn, data_list, box_ds):
for _, sample in data_list.iterrows():
volume, label, meta = data_fn(sample.pid)
if label.max() == 0:
continue
assert volume.shape == label.shape, f"{volume.shape} vs {label.shape}"
depth, height, width = volume.shape
# Inference with each of the boxes
case_box_ds = box_ds[box_ds['pid'] == sample.pid]
total = len(case_box_ds)
for bid, (_, box) in enumerate(case_box_ds.iterrows()):
if box['z1'] < 0:
# Use full image
img_patch = volume
lab_patch = label
crop_box = None
mask = None
t_height = opt.test_height
t_width = opt.test_width
max_iter = 20
else:
pid, z1, z2, y1, y2, x1, x2, max_iter = box
if z2 - z1 < 6:
half = (6 - z2 + z1) // 2
z1 = min(max(z1 - half, 0), depth - 6)
z2 = z1 + 6
if y2 - y1 < 128:
half = (128 - y2 + y1) // 2
y1 = min(max(y1 - half, 0), height - 128)
y2 = y1 + 128
if x2 - x1 < 128:
half = (128 - x2 + x1) // 2
x1 = min(max(x1 - half, 0), width - 128)
x2 = x1 + 128
img_patch = volume[z1:z2, y1:y2, x1:x2]
lab_patch = label[z1:z2, y1:y2, x1:x2]
mask = np.zeros_like(label, np.int32)
_, a, b, c, d, e, f, _ = box
mask[a:b, c:d, e:f] = 1
mask = mask[z1:z2, y1:y2, x1:x2]
lab_patch *= mask
crop_box = [bid, total, (slice(z1, z2), slice(y1, y2), slice(x1, x2))]
t_height = int(round(img_patch.shape[1] / 16) * 16)
t_width = int(round(img_patch.shape[2] / 16) * 16)
zoom_scale = np.array(
(1, t_height / img_patch.shape[1], t_width / img_patch.shape[2]), np.float32)
resized_patch = ndi.zoom(img_patch, zoom_scale, order=1)
if mask is not None:
mask = ndi.zoom(mask, zoom_scale, order=0)
if resized_patch.shape[0] % 2 != 0:
resized_patch = np.pad(
resized_patch, ((0, 1), (0, 0), (0, 0)), mode="constant", constant_values=0)
normed_patch = np_ops.z_score(resized_patch)
yield sample, meta, normed_patch, (lab_patch, label), crop_box, mask, max_iter
def get_loader_test_with_box(opt, logger, data_list):
data = nf_kits.load_test_data_paths() # Only contain data paths
box_ds = nf_kits.load_box_csv()
box_pids = list(box_ds["pid"])
data_list = data_list[[True if item.pid in data and item.pid in box_pids and item.remove != 1
else False for _, item in data_list.iterrows()]].copy()
def data_fn(pid):
volume = nf_kits.read_nii(data[pid]["img_path"])[1].astype(np.float32)
meta, label = nf_kits.read_nii(data[pid]["lab_path"], np.int8)
label = np.clip(label, 0, 1)
return volume, label, meta
generator = eval_gen_with_box(data_fn, data_list, box_ds)
input_shape = [(1, None, None, None, 1), (1, None, None, None, 2)]
return generator, input_shape
def load_case(cfg):
meta, volume = nf_kits.read_nii(cfg.case.img, np.float32)
_, label = nf_kits.read_nii(cfg.case.lab, np.int32)
if volume.shape[0] % 2 != 0:
volume = np.pad(volume, ((0, 1), (0, 0), (0, 0)))
if volume.shape[1] % 16 != 0 or volume.shape[2] % 16 != 0:
height = np.round(volume.shape[1] / 16) * 16
width = np.round(volume.shape[2] / 16) * 16
zoom_scale = [1, height / volume.shape[1], width / volume.shape[2]]
volume = ndi.zoom(volume, zoom_scale, order=1)
volume = np_ops.z_score(volume)
def generator():
sample = pd.Series([0, 0, 0, True], index="split pid remove nf".split())
yield sample, meta, volume, label
input_shape = [(1, *volume.shape, 1), (1, *volume.shape, 2)]
return generator(), input_shape
| [
"numpy.clip",
"data_kits.tf_ops.gen_guide_3d",
"data_kits.nf_kits.load_split",
"tensorflow.shape",
"tensorflow.split",
"numpy.array",
"data_kits.nf_kits.load_extra_data_paths",
"tensorflow.cast",
"scipy.ndimage.zoom",
"numpy.arange",
"numpy.random.RandomState",
"numpy.where",
"tensorflow.py_... | [((307, 344), 'data_kits.nf_kits.load_split', 'nf_kits.load_split', (['set_key', 'opt.fold'], {}), '(set_key, opt.fold)\n', (325, 344), False, 'from data_kits import nf_kits\n'), ((3065, 3099), 'numpy.zeros', 'np.zeros', (['(3, 3, 3)'], {'dtype': 'np.bool'}), '((3, 3, 3), dtype=np.bool)\n', (3073, 3099), True, 'import numpy as np\n'), ((3129, 3193), 'scipy.ndimage.morphology.binary_erosion', 'binary_erosion', (['mask', 'struct'], {'iterations': 'margin', 'border_value': 'bg'}), '(mask, struct, iterations=margin, border_value=bg)\n', (3143, 3193), False, 'from scipy.ndimage.morphology import binary_erosion\n'), ((5858, 5877), 'data_kits.tf_ops.z_score', 'tf_ops.z_score', (['img'], {}), '(img)\n', (5872, 5877), False, 'from data_kits import np_ops, tf_ops\n'), ((6001, 6046), 'tensorflow.image.resize', 'tf.image.resize', (['img', '(opt.height, opt.width)'], {}), '(img, (opt.height, opt.width))\n', (6016, 6046), True, 'import tensorflow as tf\n'), ((8103, 8131), 'tensorflow.expand_dims', 'tf.expand_dims', (['lab'], {'axis': '(-1)'}), '(lab, axis=-1)\n', (8117, 8131), True, 'import tensorflow as tf\n'), ((8142, 8232), 'tensorflow.image.resize', 'tf.image.resize', (['lab', '(opt.height, opt.width)', 'tf.image.ResizeMethod.NEAREST_NEIGHBOR'], {}), '(lab, (opt.height, opt.width), tf.image.ResizeMethod.\n NEAREST_NEIGHBOR)\n', (8157, 8232), True, 'import tensorflow as tf\n'), ((8238, 8262), 'tensorflow.squeeze', 'tf.squeeze', (['lab'], {'axis': '(-1)'}), '(lab, axis=-1)\n', (8248, 8262), True, 'import tensorflow as tf\n'), ((9151, 9198), 'numpy.pad', 'np.pad', (['img', '((pad_z1, pad_z2), (0, 0), (0, 0))'], {}), '(img, ((pad_z1, pad_z2), (0, 0), (0, 0)))\n', (9157, 9198), True, 'import numpy as np\n'), ((10254, 10291), 'math.ceil', 'math.ceil', (['(opt.bs * opt.tumor_percent)'], {}), '(opt.bs * opt.tumor_percent)\n', (10263, 10291), False, 'import math\n'), ((10311, 10373), 'numpy.array', 'np.array', (['[opt.depth, opt.height, opt.width]'], {'dtype': 'np.float32'}), '([opt.depth, opt.height, opt.width], dtype=np.float32)\n', (10319, 10373), True, 'import numpy as np\n'), ((14820, 14853), 'data_kits.nf_kits.slim_labels', 'nf_kits.slim_labels', (['data', 'logger'], {}), '(data, logger)\n', (14839, 14853), False, 'from data_kits import nf_kits\n'), ((19075, 19105), 'data_kits.nf_kits.load_test_data_paths', 'nf_kits.load_test_data_paths', ([], {}), '()\n', (19103, 19105), False, 'from data_kits import nf_kits\n'), ((19147, 19169), 'data_kits.nf_kits.load_box_csv', 'nf_kits.load_box_csv', ([], {}), '()\n', (19167, 19169), False, 'from data_kits import nf_kits\n'), ((19840, 19882), 'data_kits.nf_kits.read_nii', 'nf_kits.read_nii', (['cfg.case.img', 'np.float32'], {}), '(cfg.case.img, np.float32)\n', (19856, 19882), False, 'from data_kits import nf_kits\n'), ((19898, 19938), 'data_kits.nf_kits.read_nii', 'nf_kits.read_nii', (['cfg.case.lab', 'np.int32'], {}), '(cfg.case.lab, np.int32)\n', (19914, 19938), False, 'from data_kits import nf_kits\n'), ((20342, 20364), 'data_kits.np_ops.z_score', 'np_ops.z_score', (['volume'], {}), '(volume)\n', (20356, 20364), False, 'from data_kits import np_ops, tf_ops\n'), ((946, 971), 'data_kits.nf_kits.load_data', 'nf_kits.load_data', (['logger'], {}), '(logger)\n', (963, 971), False, 'from data_kits import nf_kits\n'), ((1790, 1803), 'numpy.arange', 'np.arange', (['rh'], {}), '(rh)\n', (1799, 1803), True, 'import numpy as np\n'), ((1805, 1818), 'numpy.arange', 'np.arange', (['rw'], {}), '(rw)\n', (1814, 1818), True, 'import numpy as np\n'), ((4933, 4967), 'numpy.zeros', 'np.zeros', (['(0, 3)'], {'dtype': 'np.float32'}), '((0, 3), dtype=np.float32)\n', (4941, 4967), True, 'import numpy as np\n'), ((7553, 7598), 'tensorflow.concat', 'tf.concat', (['[img, fg_guide, bg_guide]'], {'axis': '(-1)'}), '([img, fg_guide, bg_guide], axis=-1)\n', (7562, 7598), True, 'import tensorflow as tf\n'), ((8051, 8091), 'tensorflow.split', 'tf.split', (['img', '[opt.channel, 2]'], {'axis': '(-1)'}), '(img, [opt.channel, 2], axis=-1)\n', (8059, 8091), True, 'import tensorflow as tf\n'), ((8302, 8392), 'data_kits.tf_ops.augment_gamma', 'tf_ops.augment_gamma', (['img'], {'gamma_range': '(0.7, 1.5)', 'retain_stats': '(True)', 'p_per_sample': '(0.3)'}), '(img, gamma_range=(0.7, 1.5), retain_stats=True,\n p_per_sample=0.3)\n', (8322, 8392), False, 'from data_kits import np_ops, tf_ops\n'), ((10876, 10896), 'pandas.concat', 'pd.concat', (['[nf, rem]'], {}), '([nf, rem])\n', (10885, 10896), True, 'import pandas as pd\n'), ((12916, 12974), 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None, None, opt.channel)', 'tf.float32'], {}), '((None, None, None, opt.channel), tf.float32)\n', (12929, 12974), True, 'import tensorflow as tf\n'), ((12984, 13027), 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None, None)', 'tf.int32'], {}), '((None, None, None), tf.int32)\n', (12997, 13027), True, 'import tensorflow as tf\n'), ((13990, 14089), 'numpy.array', 'np.array', (['(1, opt.test_height / volume.shape[1], opt.test_width / volume.shape[2])', 'np.float32'], {}), '((1, opt.test_height / volume.shape[1], opt.test_width / volume.\n shape[2]), np.float32)\n', (13998, 14089), True, 'import numpy as np\n'), ((14123, 14160), 'scipy.ndimage.zoom', 'ndi.zoom', (['volume', 'zoom_scale'], {'order': '(1)'}), '(volume, zoom_scale, order=1)\n', (14131, 14160), True, 'import scipy.ndimage as ndi\n'), ((14361, 14391), 'data_kits.np_ops.z_score', 'np_ops.z_score', (['resized_volume'], {}), '(resized_volume)\n', (14375, 14391), False, 'from data_kits import np_ops, tf_ops\n'), ((15477, 15507), 'data_kits.nf_kits.load_test_data_paths', 'nf_kits.load_test_data_paths', ([], {}), '()\n', (15505, 15507), False, 'from data_kits import nf_kits\n'), ((15928, 15976), 'data_kits.nf_kits.read_nii', 'nf_kits.read_nii', (["data[pid]['lab_path']", 'np.int8'], {}), "(data[pid]['lab_path'], np.int8)\n", (15944, 15976), False, 'from data_kits import nf_kits\n'), ((15993, 16013), 'numpy.clip', 'np.clip', (['label', '(0)', '(1)'], {}), '(label, 0, 1)\n', (16000, 16013), True, 'import numpy as np\n'), ((19510, 19558), 'data_kits.nf_kits.read_nii', 'nf_kits.read_nii', (["data[pid]['lab_path']", 'np.int8'], {}), "(data[pid]['lab_path'], np.int8)\n", (19526, 19558), False, 'from data_kits import nf_kits\n'), ((19575, 19595), 'numpy.clip', 'np.clip', (['label', '(0)', '(1)'], {}), '(label, 0, 1)\n', (19582, 19595), True, 'import numpy as np\n'), ((19989, 20029), 'numpy.pad', 'np.pad', (['volume', '((0, 1), (0, 0), (0, 0))'], {}), '(volume, ((0, 1), (0, 0), (0, 0)))\n', (19995, 20029), True, 'import numpy as np\n'), ((20291, 20328), 'scipy.ndimage.zoom', 'ndi.zoom', (['volume', 'zoom_scale'], {'order': '(1)'}), '(volume, zoom_scale, order=1)\n', (20299, 20328), True, 'import scipy.ndimage as ndi\n'), ((3278, 3334), 'scipy.ndimage.morphology.binary_erosion', 'binary_erosion', (['g', 'struct'], {'iterations': 'd', 'border_value': 'bg'}), '(g, struct, iterations=d, border_value=bg)\n', (3292, 3334), False, 'from scipy.ndimage.morphology import binary_erosion\n'), ((3580, 3591), 'numpy.where', 'np.where', (['g'], {}), '(g)\n', (3588, 3591), True, 'import numpy as np\n'), ((4299, 4336), 'numpy.asarray', 'np.asarray', (['all_pts'], {'dtype': 'np.float32'}), '(all_pts, dtype=np.float32)\n', (4309, 4336), True, 'import numpy as np\n'), ((6927, 6950), 'tensorflow.cast', 'tf.cast', (['gd', 'tf.float32'], {}), '(gd, tf.float32)\n', (6934, 6950), True, 'import tensorflow as tf\n'), ((7147, 7184), 'tensorflow.constant', 'tf.constant', (['target_shape', 'tf.float32'], {}), '(target_shape, tf.float32)\n', (7158, 7184), True, 'import tensorflow as tf\n'), ((7672, 7715), 'data_kits.tf_ops.random_flip', 'tf_ops.random_flip', (['img', 'lab'], {'flip': 'opt.flip'}), '(img, lab, flip=opt.flip)\n', (7690, 7715), False, 'from data_kits import np_ops, tf_ops\n'), ((7827, 7855), 'tensorflow.expand_dims', 'tf.expand_dims', (['lab'], {'axis': '(-1)'}), '(lab, axis=-1)\n', (7841, 7855), True, 'import tensorflow as tf\n'), ((7879, 7934), 'data_kits.tf_ops.random_rotate', 'tf_ops.random_rotate', (['img', 'lab'], {'rotate_scale': 'opt.rotate'}), '(img, lab, rotate_scale=opt.rotate)\n', (7899, 7934), False, 'from data_kits import np_ops, tf_ops\n'), ((7953, 7977), 'tensorflow.squeeze', 'tf.squeeze', (['lab'], {'axis': '(-1)'}), '(lab, axis=-1)\n', (7963, 7977), True, 'import tensorflow as tf\n'), ((11635, 11681), 'numpy.clip', 'np.clip', (["data[sample.pid]['lab'][slices]", '(0)', '(1)'], {}), "(data[sample.pid]['lab'][slices], 0, 1)\n", (11642, 11681), True, 'import numpy as np\n'), ((12187, 12210), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (12208, 12210), True, 'import numpy as np\n'), ((14235, 14323), 'numpy.pad', 'np.pad', (['resized_volume', '((0, 1), (0, 0), (0, 0))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(resized_volume, ((0, 1), (0, 0), (0, 0)), mode='constant',\n constant_values=0)\n", (14241, 14323), True, 'import numpy as np\n'), ((15580, 15611), 'data_kits.nf_kits.load_extra_data_paths', 'nf_kits.load_extra_data_paths', ([], {}), '()\n', (15609, 15611), False, 'from data_kits import nf_kits\n'), ((18410, 18500), 'numpy.array', 'np.array', (['(1, t_height / img_patch.shape[1], t_width / img_patch.shape[2])', 'np.float32'], {}), '((1, t_height / img_patch.shape[1], t_width / img_patch.shape[2]),\n np.float32)\n', (18418, 18500), True, 'import numpy as np\n'), ((18542, 18582), 'scipy.ndimage.zoom', 'ndi.zoom', (['img_patch', 'zoom_scale'], {'order': '(1)'}), '(img_patch, zoom_scale, order=1)\n', (18550, 18582), True, 'import scipy.ndimage as ndi\n'), ((18887, 18916), 'data_kits.np_ops.z_score', 'np_ops.z_score', (['resized_patch'], {}), '(resized_patch)\n', (18901, 18916), False, 'from data_kits import np_ops, tf_ops\n'), ((20110, 20140), 'numpy.round', 'np.round', (['(volume.shape[1] / 16)'], {}), '(volume.shape[1] / 16)\n', (20118, 20140), True, 'import numpy as np\n'), ((20162, 20192), 'numpy.round', 'np.round', (['(volume.shape[2] / 16)'], {}), '(volume.shape[2] / 16)\n', (20170, 20192), True, 'import numpy as np\n'), ((6091, 6104), 'tensorflow.shape', 'tf.shape', (['ctr'], {}), '(ctr)\n', (6099, 6104), True, 'import tensorflow as tf\n'), ((6252, 6315), 'data_kits.tf_ops.gen_guide_3d', 'tf_ops.gen_guide_3d', (['target_shape', 'ctr', 'stddev'], {'euclidean': '(False)'}), '(target_shape, ctr, stddev, euclidean=False)\n', (6271, 6315), False, 'from data_kits import np_ops, tf_ops\n'), ((7195, 7208), 'tensorflow.shape', 'tf.shape', (['lab'], {}), '(lab)\n', (7203, 7208), True, 'import tensorflow as tf\n'), ((12514, 12541), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (12535, 12541), True, 'import numpy as np\n'), ((13201, 13237), 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, 3)', 'tf.float32'], {}), '((None, 3), tf.float32)\n', (13214, 13237), True, 'import tensorflow as tf\n'), ((13251, 13287), 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, 3)', 'tf.float32'], {}), '((None, 3), tf.float32)\n', (13264, 13287), True, 'import tensorflow as tf\n'), ((17963, 17993), 'numpy.zeros_like', 'np.zeros_like', (['label', 'np.int32'], {}), '(label, np.int32)\n', (17976, 17993), True, 'import numpy as np\n'), ((18639, 18674), 'scipy.ndimage.zoom', 'ndi.zoom', (['mask', 'zoom_scale'], {'order': '(0)'}), '(mask, zoom_scale, order=0)\n', (18647, 18674), True, 'import scipy.ndimage as ndi\n'), ((18755, 18842), 'numpy.pad', 'np.pad', (['resized_patch', '((0, 1), (0, 0), (0, 0))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(resized_patch, ((0, 1), (0, 0), (0, 0)), mode='constant',\n constant_values=0)\n", (18761, 18842), True, 'import numpy as np\n'), ((6374, 6428), 'data_kits.tf_ops.gen_guide_3d', 'tf_ops.gen_guide_3d', (['target_shape', 'ctr'], {'euclidean': '(True)'}), '(target_shape, ctr, euclidean=True)\n', (6393, 6428), False, 'from data_kits import np_ops, tf_ops\n'), ((15844, 15883), 'data_kits.nf_kits.read_nii', 'nf_kits.read_nii', (["data[pid]['img_path']"], {}), "(data[pid]['img_path'])\n", (15860, 15883), False, 'from data_kits import nf_kits\n'), ((19426, 19465), 'data_kits.nf_kits.read_nii', 'nf_kits.read_nii', (["data[pid]['img_path']"], {}), "(data[pid]['img_path'])\n", (19442, 19465), False, 'from data_kits import nf_kits\n'), ((6492, 6514), 'tensorflow.cast', 'tf.cast', (['ctr', 'tf.int32'], {}), '(ctr, tf.int32)\n', (6499, 6514), True, 'import tensorflow as tf\n'), ((6536, 6664), 'tensorflow.py_function', 'tf.py_function', (['np_ops.gen_guide_geo_3d', '[img[..., 0], int_ctr, opt.geo_lamb, opt.geo_iter]', 'tf.float32'], {'name': '"""GeoDistLarge"""'}), "(np_ops.gen_guide_geo_3d, [img[..., 0], int_ctr, opt.geo_lamb,\n opt.geo_iter], tf.float32, name='GeoDistLarge')\n", (6550, 6664), True, 'import tensorflow as tf\n'), ((6788, 6815), 'tensorflow.expand_dims', 'tf.expand_dims', (['gd'], {'axis': '(-1)'}), '(gd, axis=-1)\n', (6802, 6815), True, 'import tensorflow as tf\n'), ((3798, 3817), 'numpy.asarray', 'np.asarray', (['all_pts'], {}), '(all_pts)\n', (3808, 3817), True, 'import numpy as np\n'), ((3908, 3934), 'numpy.sum', 'np.sum', (['(dist ** 2)'], {'axis': '(-1)'}), '(dist ** 2, axis=-1)\n', (3914, 3934), True, 'import numpy as np\n'), ((6192, 6205), 'tensorflow.shape', 'tf.shape', (['ctr'], {}), '(ctr)\n', (6200, 6205), True, 'import tensorflow as tf\n'), ((7005, 7018), 'tensorflow.shape', 'tf.shape', (['img'], {}), '(img)\n', (7013, 7018), True, 'import tensorflow as tf\n'), ((13422, 13497), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['data_gen'], {'output_signature': 'output_signature'}), '(data_gen, output_signature=output_signature)\n', (13452, 13497), True, 'import tensorflow as tf\n')] |
### Libraries
# Standard libraries
import numpy as np
# Third-party libraries
import skfuzzy as fuzz
import matplotlib.pyplot as plt
def visualize_mf(b, inputs):
fig, (ax0, ax1, ax2) = plt.subplots(nrows=3, figsize=(8, 5))
ax0.plot(inputs[0], b[0][0], 'g', linewidth=1.5, label= '-ve Medium')
ax0.plot(inputs[0], b[0][1], 'r', linewidth=1.5, label= '-ve small')
ax0.plot(inputs[0], b[0][2], 'c', linewidth=1.5, label= 'zero')
ax0.plot(inputs[0], b[0][3], 'm', linewidth=1.5, label= '+ve small')
ax0.plot(inputs[0], b[0][4], 'y', linewidth=1.5, label= '+ve Medium')
ax0.set_title('Error')
# ax0.legend()
ax1.plot(inputs[1], b[1][0], 'g', linewidth=1.5, label= '-ve Medium')
ax1.plot(inputs[1], b[1][1], 'r', linewidth=1.5, label= '-ve small')
ax1.plot(inputs[1], b[1][2], 'c', linewidth=1.5, label= 'zero')
ax1.plot(inputs[1], b[1][3], 'm', linewidth=1.5, label= '+ve small')
ax1.plot(inputs[1], b[1][4], 'y', linewidth=1.5, label= '+ve Medium')
ax1.set_title('Del_Error')
# ax1.legend()
ax2.plot(inputs[2], b[2][0], 'g', linewidth=1.5, label= '-ve Medium')
ax2.plot(inputs[2], b[2][1], 'r', linewidth=1.5, label= '-ve small')
ax2.plot(inputs[2], b[2][2], 'c', linewidth=1.5, label= 'zero')
ax2.plot(inputs[2], b[2][3], 'm', linewidth=1.5, label= '+ve small')
ax2.plot(inputs[2], b[2][4], 'y', linewidth=1.5, label= '+ve Medium')
ax2.set_title('Output')
# ax2.legend()
# Turn off top/right axes
for ax in (ax0, ax1, ax2):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.tight_layout()
def visualize_output(b, inputs, output, out_final, aggregated):
# Visualize this
out_activation = fuzz.interp_membership(inputs[2], aggregated, out_final) # for plot
fig, (ax3, ax4) = plt.subplots(nrows=2, figsize=(8, 5))
output0 = np.zeros_like(inputs[2])
ax3.fill_between(inputs[2], output0, output[0], facecolor='g', alpha=0.7)
ax3.plot(inputs[2], b[2][0], 'g', linewidth=0.5, linestyle='--' )
ax3.fill_between(inputs[2], output0, output[1], facecolor='r', alpha=0.7)
ax3.plot(inputs[2], b[2][1], 'r', linewidth=0.5, linestyle='--')
ax3.fill_between(inputs[2], output0, output[2], facecolor='k', alpha=0.7)
ax3.plot(inputs[2], b[2][2], 'c', linewidth=0.5, linestyle='--' )
ax3.fill_between(inputs[2], output0, output[3], facecolor='m', alpha=0.7)
ax3.plot(inputs[2], b[2][3], 'm', linewidth=0.5, linestyle='--')
ax3.fill_between(inputs[2], output0, output[4], facecolor='c', alpha=0.7)
ax3.plot(inputs[2], b[2][4], 'y', linewidth=0.5, linestyle='--')
ax3.set_title('Output membership activity')
ax4.plot(inputs[2], b[2][0], 'g', linewidth=0.5, linestyle='--', )
ax4.plot(inputs[2], b[2][1], 'r', linewidth=0.5, linestyle='--', )
ax4.plot(inputs[2], b[2][2], 'c', linewidth=0.5, linestyle='--', )
ax4.plot(inputs[2], b[2][3], 'm', linewidth=0.5, linestyle='--', )
ax4.plot(inputs[2], b[2][4], 'y', linewidth=0.5, linestyle='--', )
ax4.fill_between(inputs[2], output0, aggregated, facecolor='g', alpha=0.7)
ax4.plot([out_final, out_final], [0, out_activation], 'k', linewidth=1.5, alpha=0.9)
ax4.set_title('Aggregated membership and result (line)')
for ax in (ax3, ax4):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.tight_layout()
plt.show()
| [
"skfuzzy.interp_membership",
"matplotlib.pyplot.tight_layout",
"numpy.zeros_like",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((192, 229), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'figsize': '(8, 5)'}), '(nrows=3, figsize=(8, 5))\n', (204, 229), True, 'import matplotlib.pyplot as plt\n'), ((1689, 1707), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1705, 1707), True, 'import matplotlib.pyplot as plt\n'), ((1815, 1871), 'skfuzzy.interp_membership', 'fuzz.interp_membership', (['inputs[2]', 'aggregated', 'out_final'], {}), '(inputs[2], aggregated, out_final)\n', (1837, 1871), True, 'import skfuzzy as fuzz\n'), ((1905, 1942), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'figsize': '(8, 5)'}), '(nrows=2, figsize=(8, 5))\n', (1917, 1942), True, 'import matplotlib.pyplot as plt\n'), ((1957, 1981), 'numpy.zeros_like', 'np.zeros_like', (['inputs[2]'], {}), '(inputs[2])\n', (1970, 1981), True, 'import numpy as np\n'), ((3545, 3563), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3561, 3563), True, 'import matplotlib.pyplot as plt\n'), ((3568, 3578), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3576, 3578), True, 'import matplotlib.pyplot as plt\n')] |
import keras
import numpy as np
import os
from glob import glob
# Utility Functions
def get_list_IDs(any_path, val_split = 0.8):
'''
param any_path: directory to either the align files or the .mpg files;
i.e. s1_path or s1_align
'''
id_list =[os.path.splitext(file)[0] for file in os.listdir(any_path)]
total = len(id_list)
train = round(total * val_split)
return {'train': id_list[:train], 'val': id_list[train:]
}
def text_to_labels(text):
'''
Converts the align files to their encoded format.
'''
ret = []
for char in text:
if char >= 'a' and char <= 'z':
ret.append(ord(char) - ord('a'))
elif char == ' ':
ret.append(26)
return ret
def enumerate_align_hash(align_path, absolute_max_string_len):
'''
Makes a dictionary of all of the align files
* Make sure that `dir` ends with \\
param align_path: path to the directory with all of the align files
'''
align_hash = {}
video_list = glob(align_path+'*.align', recursive = True)
for (i,video_path) in enumerate(video_list):
video_id = os.path.splitext(video_path)[0].split('/')[-1] # split('\\') for windows
align_hash[video_id] = Align(absolute_max_string_len, text_to_labels).from_file(video_path)
return align_hash
# generator to inherit from
class BaseGenerator(keras.utils.Sequence):
'''
For generating 2D thread-safe data in keras. (no preprocessing and channels_last)
Attributes:
list_IDs: filenames (.nii files); must be same for training and labels
data_dirs: list of [training_dir, labels_dir]
batch_size: int of desired number images per epoch
n_channels: <-
'''
# https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
def __init__(self, list_IDs, data_dirs, batch_size, shuffle = True):
# lists of paths to images
self.list_IDs = list_IDs
self.data_dirs = data_dirs
self.batch_size = batch_size
self.shuffle = shuffle
self.indexes = np.arange(len(self.list_IDs))
def __len__(self):
return int(np.ceil(len(self.list_IDs) / float(self.batch_size)))
def __getitem__(self, idx):
'''
Defines the fetching and on-the-fly preprocessing of data.
'''
# file names
indexes = self.indexes[idx*self.batch_size:(idx+1)*self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
X, y = self.data_gen(list_IDs_temp)
return (X, y)
def on_epoch_end(self):
'Updates indexes after each epoch'
# self.img_idx = np.arange(len(self.x))
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def data_gen(self, list_IDs_temp):
'''
Preprocesses the data
Args:
batch_x, batch_y
Returns
x, y
'''
raise NotImplementedError
# Align class from the LipNet repository
class Align(object):
def __init__(self, absolute_max_string_len=32, label_func=None):
self.label_func = label_func
self.absolute_max_string_len = absolute_max_string_len
def from_file(self, path):
with open(path, 'r') as f:
lines = f.readlines()
align = [(int(y[0])/1000, int(y[1])/1000, y[2]) for y in [x.strip().split(" ") for x in lines]]
self.build(align)
return self
def from_array(self, align):
self.build(align)
return self
def build(self, align):
self.align = self.strip(align, ['sp','sil'])
self.sentence = self.get_sentence(align)
self.label = self.get_label(self.sentence)
self.padded_label = self.get_padded_label(self.label)
def strip(self, align, items):
return [sub for sub in align if sub[2] not in items]
def get_sentence(self, align):
return " ".join([y[-1] for y in align if y[-1] not in ['sp', 'sil']])
def get_label(self, sentence):
return self.label_func(sentence)
def get_padded_label(self, label):
padding = np.ones((self.absolute_max_string_len-len(label))) * -1
return np.concatenate((np.array(label), padding), axis=0)
@property
def word_length(self):
return len(self.sentence.split(" "))
@property
def sentence_length(self):
return len(self.sentence)
@property
def label_length(self):
return len(self.label)
| [
"os.listdir",
"os.path.splitext",
"numpy.array",
"glob.glob",
"numpy.random.shuffle"
] | [((1061, 1105), 'glob.glob', 'glob', (["(align_path + '*.align')"], {'recursive': '(True)'}), "(align_path + '*.align', recursive=True)\n", (1065, 1105), False, 'from glob import glob\n'), ((275, 297), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (291, 297), False, 'import os\n'), ((313, 333), 'os.listdir', 'os.listdir', (['any_path'], {}), '(any_path)\n', (323, 333), False, 'import os\n'), ((2878, 2909), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indexes'], {}), '(self.indexes)\n', (2895, 2909), True, 'import numpy as np\n'), ((4400, 4415), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (4408, 4415), True, 'import numpy as np\n'), ((1176, 1204), 'os.path.splitext', 'os.path.splitext', (['video_path'], {}), '(video_path)\n', (1192, 1204), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""Testing the covariance matrix class.
"""
import shutil
import pytest
import numpy as np
import scipy as sp
import os
from tempfile import mkstemp
from typhon.arts.covariancematrix import Block, CovarianceMatrix
from typhon.arts.xml import load, save
class TestCovarianceMatrix:
def setup_method(self):
# Temporary file
fd, self.f = mkstemp()
os.close(fd)
# Simple covariance matrix for testing
b1 = Block(0, 0, 0, 0, False, np.random.normal(size = (10, 10)))
b2 = Block(1, 1, 10, 10, False, sp.sparse.identity(10))
self.covmat = CovarianceMatrix([b1, b2])
def test_xml_io(self):
save(self.covmat, self.f)
covmat2 = load(self.f)
def compare_matrices(args):
b1, b2 = args
m1 = b1.matrix
m2 = b2.matrix
if isinstance(m1, sp.sparse.spmatrix):
m1 = m1.todense()
m2 = m2.todense()
print(m1)
return np.allclose(m1, m2)
assert(all(map(compare_matrices, zip(self.covmat.blocks, covmat2.blocks))))
def test_to_dense(self):
m = self.covmat.to_dense()
assert(np.allclose(m[:10, :10], self.covmat.blocks[0].matrix))
assert(np.allclose(m[10:, 10:], self.covmat.blocks[1].matrix.toarray()))
def teardown_method(self):
# Remove temp file
os.remove(self.f)
| [
"typhon.arts.xml.load",
"numpy.random.normal",
"typhon.arts.covariancematrix.CovarianceMatrix",
"numpy.allclose",
"os.close",
"typhon.arts.xml.save",
"scipy.sparse.identity",
"tempfile.mkstemp",
"os.remove"
] | [((382, 391), 'tempfile.mkstemp', 'mkstemp', ([], {}), '()\n', (389, 391), False, 'from tempfile import mkstemp\n'), ((400, 412), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (408, 412), False, 'import os\n'), ((620, 646), 'typhon.arts.covariancematrix.CovarianceMatrix', 'CovarianceMatrix', (['[b1, b2]'], {}), '([b1, b2])\n', (636, 646), False, 'from typhon.arts.covariancematrix import Block, CovarianceMatrix\n'), ((683, 708), 'typhon.arts.xml.save', 'save', (['self.covmat', 'self.f'], {}), '(self.covmat, self.f)\n', (687, 708), False, 'from typhon.arts.xml import load, save\n'), ((727, 739), 'typhon.arts.xml.load', 'load', (['self.f'], {}), '(self.f)\n', (731, 739), False, 'from typhon.arts.xml import load, save\n'), ((1206, 1260), 'numpy.allclose', 'np.allclose', (['m[:10, :10]', 'self.covmat.blocks[0].matrix'], {}), '(m[:10, :10], self.covmat.blocks[0].matrix)\n', (1217, 1260), True, 'import numpy as np\n'), ((1410, 1427), 'os.remove', 'os.remove', (['self.f'], {}), '(self.f)\n', (1419, 1427), False, 'import os\n'), ((499, 530), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10, 10)'}), '(size=(10, 10))\n', (515, 530), True, 'import numpy as np\n'), ((574, 596), 'scipy.sparse.identity', 'sp.sparse.identity', (['(10)'], {}), '(10)\n', (592, 596), True, 'import scipy as sp\n'), ((1021, 1040), 'numpy.allclose', 'np.allclose', (['m1', 'm2'], {}), '(m1, m2)\n', (1032, 1040), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
__all__ = [
'pipeline']
import pandas as pd
import numpy as np
from collections import OrderedDict
from tools import sizeof_file
from sklearn import preprocessing
class Define():
"""Define module.
Parameters
------------
data_name : string (The dataset's name which is expected to be a csv file)
header : list (The dataset's header, i.e, the features and the class name)
response : string (The name of the variable will be used for prediction.)
problem_type: string (Classification and Regression.)
Attributes
-----------
n_features : int (number of features or predictors)
samples : int (Number of rows in the dataset)
"""
def __init__(self,
data_path,data_name,
problem_type='Classification'):
self.problem_type = problem_type
self.data_path = data_path
self.data_name = data_name
self.response = 'class'
self.n_features = None
self.describe = None
self.samples = None
self.size = None
self.data = None
self.X_1 = None
self.X_2 = None
self.X = None
self.y = None
def pipeline(self):
self.read()
self.description()
self.categoricalToNumeric()
return self
def read(self):
self.head_y = None
self.count = None
try:
if self.data_path is not None:
self.data = pd.read_csv(self.data_path)
self.count = len(self.data.columns.values) - 1
self.head_y = self.data.columns.values[self.count]
self.data.rename(columns={self.head_y:'class'}, inplace=True)
self.data.dropna(inplace=True)
self.X = self.data.loc[:, self.data.columns != self.response]
self.X_1 = self.X
self.y = self.data.loc[:, self.data.columns == self.response]
self.y = np.ravel(self.y)
except:
print("Error reading")
def description(self):
self.n_features = len(self.data.columns)-1
self.samples = len(self.data)
self.size = sizeof_file(self.data_path)
self.describe = [self.data_name.replace(".csv",""), self.n_features, self.samples, self.size]
self.describe = pd.DataFrame([self.describe], columns = ["name","n_features","samples","size"])
return self.describe
def categoricalToNumeric(self):
if self.X.select_dtypes(include=[object]).shape[1]:
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
self.X_1 = self.X.select_dtypes(include=numerics)
self.X_2 = self.X.select_dtypes(include=[object])
le = preprocessing.LabelEncoder()
self.X_2 = self.X_2.apply(le.fit_transform)
self.X = pd.concat([self.X_1,self.X_2],axis=1)
| [
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"numpy.ravel",
"tools.sizeof_file",
"pandas.DataFrame",
"pandas.concat"
] | [((2257, 2284), 'tools.sizeof_file', 'sizeof_file', (['self.data_path'], {}), '(self.data_path)\n', (2268, 2284), False, 'from tools import sizeof_file\n'), ((2412, 2497), 'pandas.DataFrame', 'pd.DataFrame', (['[self.describe]'], {'columns': "['name', 'n_features', 'samples', 'size']"}), "([self.describe], columns=['name', 'n_features', 'samples', 'size']\n )\n", (2424, 2497), True, 'import pandas as pd\n'), ((2843, 2871), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (2869, 2871), False, 'from sklearn import preprocessing\n'), ((2949, 2988), 'pandas.concat', 'pd.concat', (['[self.X_1, self.X_2]'], {'axis': '(1)'}), '([self.X_1, self.X_2], axis=1)\n', (2958, 2988), True, 'import pandas as pd\n'), ((1554, 1581), 'pandas.read_csv', 'pd.read_csv', (['self.data_path'], {}), '(self.data_path)\n', (1565, 1581), True, 'import pandas as pd\n'), ((2052, 2068), 'numpy.ravel', 'np.ravel', (['self.y'], {}), '(self.y)\n', (2060, 2068), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import os
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator
import seaborn as sns
from operator import itemgetter
import matplotlib.ticker as ticker
import math
import matplotlib.patches as patches
#run correlation matrix and save only those above threshold
def run_corr(args, df_by_gene, title, path_filename, method_name='pearson', sig_threshold= 0.5, min_period=3, save_corrs=False):
try:
from .dim_reduction import return_top_pca_gene
except (SystemError, ValueError, ImportError):
from dim_reduction import return_top_pca_gene
if len(df_by_gene.columns.tolist())>5000:
df_by_gene, top_pca_list = return_top_pca_gene(args, df_by_gene.transpose(), user_num_genes=5000)
if method_name != 'kendall':
corr_by_gene = df_by_gene.corr(method=method_name, min_periods=min_period)
else:
corr_by_gene = df_by_gene.corr(method=method_name)
cor = corr_by_gene
cor.loc[:,:] = np.tril(cor.values, k=-1)
cor = cor.stack()
corr_by_gene_pos = cor[cor >=sig_threshold]
corr_by_gene_neg = cor[cor <=(sig_threshold*-1)]
cor_pos_df = pd.DataFrame(corr_by_gene_pos)
cor_neg_df = pd.DataFrame(corr_by_gene_neg)
sig_corr = cor_pos_df.append(cor_neg_df)
sig_corrs = pd.DataFrame(sig_corr[0], columns=["corr"])
sig_corrs.to_csv(os.path.join(path_filename, title+'_counts_corr_sig_'+method_name+'.txt'), sep = '\t')
return sig_corrs
#finds most correlated gene groups that are not overlapping
def find_top_corrs(terms_to_search, sig_corrs, num_to_return, gene_corr_list = []):
all_corrs_list = []
best_corrs_list = []
for term_to_search in terms_to_search:
corr_tup = [(term_to_search, 1)]
for index, row in sig_corrs.iterrows():
if term_to_search in index:
if index[0]==term_to_search:
corr_tup.append((index[1],row['corr']))
else:
corr_tup.append((index[0],row['corr']))
all_corrs_list.append(corr_tup)
all_corrs_list.sort(key=len, reverse=True)
good_count = 0
len_count = 0
corr_genes_seen = []
while good_count <= num_to_return and len_count <= len(all_corrs_list):
for i, corrs in enumerate(all_corrs_list):
len_count+=1
if corrs[0][0] not in corr_genes_seen:
best_corrs_list.append(corrs)
good_count+=1
for g, c in corrs:
if g not in corr_genes_seen and '-' not in str(c):
corr_genes_seen.append(g)
if gene_corr_list != []:
search_corrs = []
for term in gene_corr_list:
corr_tup = [(term, 1)]
for index, row in sig_corrs.iterrows():
if term in index:
if index[0]==term:
corr_tup.append((index[1],row['corr']))
else:
corr_tup.append((index[0],row['corr']))
search_corrs.append(corr_tup)
best_corrs_list = search_corrs+best_corrs_list
return best_corrs_list[0:num_to_return+len(gene_corr_list)+1]
else:
return best_corrs_list[0:num_to_return]
#corr_plot finds and plots all correlated genes, log turns on log scale, sort plots the genes in the rank order of the gene searched
def corr_plot(terms_to_search, df_by_gene_corr, args, matrix_data, title ='', sort=True, sig_threshold=0.5):
path_filename = matrix_data.new_filepath
#if there are genes supplied with genes_corr flag process them to a list for correlation search
if args.genes_corr != '':
gene_corr_list = args.genes_corr.split(',')
#otherwise pass an empty list
else:
gene_corr_list = []
size_cells = len(df_by_gene_corr.index.tolist())
figlen=int(size_cells/11)
if figlen < 15:
figlen = 15
ncol = int(figlen/3.2)
if size_cells <100:
sig_threshold = -0.137*math.log(size_cells)+1.1322
sig_corrs = run_corr(args, df_by_gene_corr, title, path_filename, sig_threshold=sig_threshold)
corr_list = find_top_corrs(terms_to_search, sig_corrs, num_to_return=3, gene_corr_list=gene_corr_list)
for corr_tup in corr_list:
term_to_search = corr_tup[0][0]
corr_tup.sort(key=itemgetter(1), reverse=True)
corr_df = pd.DataFrame(corr_tup, columns=['GeneID', 'Correlation'])
corr_df.to_csv(os.path.join(matrix_data.new_filepath, title+'_Corr_w_'+term_to_search+'_list.txt'), sep = '\t', index=False)
to_plot = [x[0] for x in corr_tup]
sns.set_palette(sns.cubehelix_palette(len(to_plot), start=1, rot=-.9, reverse=True))
sns.set_context("notebook", font_scale=.9, rc={"lines.linewidth": 1})
try:
sorted_df = df_by_gene_corr.sort_values(by=[term_to_search])
ylabel='Counts (log2)'
if sort:
ax = sorted_df[to_plot].plot(figsize = (figlen,10))
xlabels = sorted_df[to_plot].index.values
else:
ax = df_by_gene_corr[to_plot].plot(figsize = (figlen,10))
xlabels = df_by_gene_corr[to_plot].index.values
ax.set_xlabel('Cell Label')
ax.set_ylabel(ylabel)
ax.set_title('Correlates with '+term_to_search, loc='right')
ax.xaxis.set_minor_locator(LinearLocator(numticks=len(xlabels)))
if matrix_data.cell_label_map:
ax.set_xticklabels(xlabels, minor=True, rotation='vertical', fontsize=3)
Xcolors = [matrix_data.cell_label_map[cell][0][0] for cell in xlabels]
group_labels = [matrix_data.cell_label_map[cell][0][2] for cell in xlabels]
group_seen = []
leg_handles = []
for xtick, xcolor, group_name in zip(ax.get_xticklabels(which='minor'), Xcolors, group_labels):
xtick.set_color(xcolor)
xtick.set_rotation(90)
if group_name not in group_seen:
leg_handles.append(patches.Patch(color=xcolor, label=group_name))
group_seen.append(group_name)
else:
ax.set_xticklabels(xlabels, minor=True, rotation='vertical', fontsize=3)
ax.set_ylim([0, df_by_gene_corr[to_plot].values.max()])
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.tick_params(axis='x', which ='minor', labelsize=9)
#scale bbox anchoring to account for number of correlated genes and plot size
if len(corr_tup)>1:
bbox_height = float(1E-13)*pow(len(corr_tup),6) - float(7E-11)*pow(len(corr_tup),5) + float(1E-8)*pow(len(corr_tup),4) - float(8E-7)*pow(len(corr_tup),3) - float(3E-5)*pow(len(corr_tup),2) + 0.0086*len(corr_tup) + 1.0042
else:
bbox_height = 1.05
l_labels = [str(x[0])+' '+"%.2f" % x[1] for x in corr_tup]
if matrix_data.cell_label_map:
first_legend = ax.legend(l_labels, loc='upper left', bbox_to_anchor=(0.01, bbox_height+.1), ncol=ncol, prop={'size':10})
ax = plt.gca().add_artist(first_legend)
plt.legend(handles=leg_handles, loc='upper right', bbox_to_anchor=(0.9, bbox_height+.1))
else:
ax.legend(l_labels, loc='upper left', bbox_to_anchor=(0.01, bbox_height), ncol=ncol, prop={'size':10})
fig = plt.gcf()
fig.subplots_adjust(bottom=0.08, top=0.95, right=0.98, left=0.03)
plt.savefig(os.path.join(path_filename, title+'_corr_with_'+term_to_search+'.'+args.image_format), bbox_inches='tight')
plt.close('all')
except KeyError:
if args.verbose:
print(term_to_search+' not in this matrix.')
pass
| [
"matplotlib.ticker.NullFormatter",
"matplotlib.use",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.gca",
"seaborn.set_context",
"os.path.join",
"math.log",
"matplotlib.pyplot.close",
"numpy.tril",
"matplotlib.patches.Patch",
"pandas.DataFrame",
"operator.itemgetter",
"matplotlib.pyplot.legend"... | [((67, 90), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (81, 90), False, 'import matplotlib\n'), ((1049, 1074), 'numpy.tril', 'np.tril', (['cor.values'], {'k': '(-1)'}), '(cor.values, k=-1)\n', (1056, 1074), True, 'import numpy as np\n'), ((1216, 1246), 'pandas.DataFrame', 'pd.DataFrame', (['corr_by_gene_pos'], {}), '(corr_by_gene_pos)\n', (1228, 1246), True, 'import pandas as pd\n'), ((1264, 1294), 'pandas.DataFrame', 'pd.DataFrame', (['corr_by_gene_neg'], {}), '(corr_by_gene_neg)\n', (1276, 1294), True, 'import pandas as pd\n'), ((1356, 1399), 'pandas.DataFrame', 'pd.DataFrame', (['sig_corr[0]'], {'columns': "['corr']"}), "(sig_corr[0], columns=['corr'])\n", (1368, 1399), True, 'import pandas as pd\n'), ((1423, 1502), 'os.path.join', 'os.path.join', (['path_filename', "(title + '_counts_corr_sig_' + method_name + '.txt')"], {}), "(path_filename, title + '_counts_corr_sig_' + method_name + '.txt')\n", (1435, 1502), False, 'import os\n'), ((4416, 4473), 'pandas.DataFrame', 'pd.DataFrame', (['corr_tup'], {'columns': "['GeneID', 'Correlation']"}), "(corr_tup, columns=['GeneID', 'Correlation'])\n", (4428, 4473), True, 'import pandas as pd\n'), ((4752, 4822), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {'font_scale': '(0.9)', 'rc': "{'lines.linewidth': 1}"}), "('notebook', font_scale=0.9, rc={'lines.linewidth': 1})\n", (4767, 4822), True, 'import seaborn as sns\n'), ((4497, 4590), 'os.path.join', 'os.path.join', (['matrix_data.new_filepath', "(title + '_Corr_w_' + term_to_search + '_list.txt')"], {}), "(matrix_data.new_filepath, title + '_Corr_w_' + term_to_search +\n '_list.txt')\n", (4509, 4590), False, 'import os\n'), ((7528, 7537), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7535, 7537), True, 'import matplotlib.pyplot as plt\n'), ((7760, 7776), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (7769, 7776), True, 'import matplotlib.pyplot as plt\n'), ((4036, 4056), 'math.log', 'math.log', (['size_cells'], {}), '(size_cells)\n', (4044, 4056), False, 'import math\n'), ((4369, 4382), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (4379, 4382), False, 'from operator import itemgetter\n'), ((6459, 6481), 'matplotlib.ticker.NullFormatter', 'ticker.NullFormatter', ([], {}), '()\n', (6479, 6481), True, 'import matplotlib.ticker as ticker\n'), ((7284, 7380), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'leg_handles', 'loc': '"""upper right"""', 'bbox_to_anchor': '(0.9, bbox_height + 0.1)'}), "(handles=leg_handles, loc='upper right', bbox_to_anchor=(0.9, \n bbox_height + 0.1))\n", (7294, 7380), True, 'import matplotlib.pyplot as plt\n'), ((7640, 7737), 'os.path.join', 'os.path.join', (['path_filename', "(title + '_corr_with_' + term_to_search + '.' + args.image_format)"], {}), "(path_filename, title + '_corr_with_' + term_to_search + '.' +\n args.image_format)\n", (7652, 7737), False, 'import os\n'), ((7233, 7242), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7240, 7242), True, 'import matplotlib.pyplot as plt\n'), ((6141, 6186), 'matplotlib.patches.Patch', 'patches.Patch', ([], {'color': 'xcolor', 'label': 'group_name'}), '(color=xcolor, label=group_name)\n', (6154, 6186), True, 'import matplotlib.patches as patches\n')] |
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
# http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
# http://opensource.org/licenses/MIT>, at your option. This file may not be
# copied, modified, or distributed except according to those terms.
'''
Defines the Sb_desc class to interface Python with `libsbdesc.so`.
'''
import ctypes as ct
import numpy as np
import numpy.ctypeslib as ctl
class Sb_desc:
'''
Calculates spherical Bessel descriptors for the given atomic environment.
While the description below specifies several of the argument as numpy
arrays, anything that exposes the __array_interface__ should be accepted.
Parameters
----------
`disp`: numpy array
The relative Cartesian coordinates of the surrounding atoms in the
format `[x_1, y_1, z_1, ...]`
`weights`: numpy array
Atomic weights used to contruct the neighbor density function, e.g.
`[1., ...]`
`rc`: double
Cutoff radius for the environment
`n_atom`: int
Number of atoms in the environment
`n_max`: int
The function evaluates (n_max + 1) * (n_max + 2) / 2 descriptors
Returns
----------
`desc`: numpy array
Holds the descriptors, labelled by (n, l) and ordered lexicographically
Warning
----------
You are reponsible for ensuring that enough memory is allocated for the
relevant arrays, and should expect undefined behavior otherwise. The
lengths should be:
`disp`: at least `3 * n_atom`
`weights`: at least `n_atom`
'''
def __init__(self, libname, libdir):
self.c_sb_desc = ctl.load_library(libname, libdir).sb_descriptors
def __call__(self, disp, weights, rc, n_atom, n_max):
desc = np.empty((n_max + 1) * (n_max + 2) // 2)
self.c_sb_desc(
ctl.as_ctypes(desc), \
ctl.as_ctypes(disp), \
ctl.as_ctypes(weights), \
ct.c_double(rc), \
ct.c_uint32(n_atom), \
ct.c_uint32(n_max))
return desc
| [
"ctypes.c_uint32",
"numpy.empty",
"ctypes.c_double",
"numpy.ctypeslib.load_library",
"numpy.ctypeslib.as_ctypes"
] | [((1815, 1855), 'numpy.empty', 'np.empty', (['((n_max + 1) * (n_max + 2) // 2)'], {}), '((n_max + 1) * (n_max + 2) // 2)\n', (1823, 1855), True, 'import numpy as np\n'), ((1692, 1725), 'numpy.ctypeslib.load_library', 'ctl.load_library', (['libname', 'libdir'], {}), '(libname, libdir)\n', (1708, 1725), True, 'import numpy.ctypeslib as ctl\n'), ((1892, 1911), 'numpy.ctypeslib.as_ctypes', 'ctl.as_ctypes', (['desc'], {}), '(desc)\n', (1905, 1911), True, 'import numpy.ctypeslib as ctl\n'), ((1930, 1949), 'numpy.ctypeslib.as_ctypes', 'ctl.as_ctypes', (['disp'], {}), '(disp)\n', (1943, 1949), True, 'import numpy.ctypeslib as ctl\n'), ((1968, 1990), 'numpy.ctypeslib.as_ctypes', 'ctl.as_ctypes', (['weights'], {}), '(weights)\n', (1981, 1990), True, 'import numpy.ctypeslib as ctl\n'), ((2006, 2021), 'ctypes.c_double', 'ct.c_double', (['rc'], {}), '(rc)\n', (2017, 2021), True, 'import ctypes as ct\n'), ((2044, 2063), 'ctypes.c_uint32', 'ct.c_uint32', (['n_atom'], {}), '(n_atom)\n', (2055, 2063), True, 'import ctypes as ct\n'), ((2082, 2100), 'ctypes.c_uint32', 'ct.c_uint32', (['n_max'], {}), '(n_max)\n', (2093, 2100), True, 'import ctypes as ct\n')] |
import tvm
from tvm import target
import tvm.relay as relay
import tvm.relay.op.nn as nn
import tvm.relay.analysis.call_graph as cg
import tvm.relay.analysis.analysis as an
import tvm._ffi.registry as registry
import tensorflow as tf
import tensorflow.keras as keras
import numpy as np
import os
from tvm import relay, auto_scheduler
"""
编译mnist模型
"""
def test_mnist():
path = "./mnist_02.3.h5"
print(tf.__version__)
ctx = tvm.cpu(0)
#target=tvm.target.Target("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu")
target_host = 'llvm'
target = tvm.target.Target("llvm -mcpu=skylake")
layout = 'NHWC'
mod = keras.models.load_model(path,compile=True)
input_tensor = np.ones(shape=(1,28,28,1))
output_tensor = np.zeros(shape=(1,1))
shape_dict = {"conv2d_input": (1,28,28,1)}
dtype_dict = {"input": "float32"}
mod, params = relay.frontend.from_keras(mod,layout = layout,shape=shape_dict)
print("Tensorflow keras imported to relay frontend.")
log_file = "%s-%s-%s.json" % ("./mnist_02.h5", layout, target.kind.name)
print("Extract tasks...")
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
for idx, task in enumerate(tasks):
print("========== Task %d (workload key: %s) ==========" % (idx, task.workload_key))
print(task.compute_dag)
def run_tuning():
print("Begin tuning...")
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=20000, # change this to 20000 to achieve the best performance
runner=auto_scheduler.LocalRunner(repeat=10, enable_cpu_cache_flush=True),
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
tuner.tune(tune_option)
#run_tuning()
def printAttr(node):
if (isinstance(node,relay.Call)):
print("node.attrs=="+str(node.attrs))
dfscall = an.post_order_visit(opt_model,printAttr)
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(opt_level=3,config={"relay.backend.use_auto_scheduler": True}):
#with tvm.transform.PassContext(opt_level=3,config={"tir.disable_vectorize": True}):
#graph, lib, params= relay.build(mod, target=arget, target_host=target_host, params=params)#return IRModule
#opt_model, opt_params = relay.optimize(mod, target_host, params)#return IRModule
#callgraph = cg.CallGraph(opt_model)
#print(dfscall)
#print(opt_model.astext(show_meta_data=True))
print(os.getpid())
target = tvm.target.target.micro(
"host", options=["-link-params=1", "--executor=aot", "--unpacked-api=1", "--interface-api=c++"])
#lib = relay.build(mod,target="c --mcpu=core-avx2 ",target_host="c --runtime=aot --link-params ", params=params)
factory : AOTExecutorFactoryModule = relay.build(mod,target=target,target_host="llvm", params=params)
#lib = relay.build(mod,target="c --executor=aot --link-params ", params=params)
#lib.lib.save('lib_aot.c')
# print(lib.lib.get_source("asm"))
#lib = relay.build_module.create_executor(kind="graph",mod = mod, target=target_host)
#func = relay.Function(relay.analysis.free_vars(opt_model), opt_model)
# m = graph_runtime.GraphModule(lib["default"](ctx))
# a = tvm.nd.array(input_tensor, ctx)
# b = tvm.nd.array(output_tensor,ctx)
#lib(a,b)
#m = graph_runtime.GraphModule(lib["default"](tvm.cpu(0)))
path_lib = "mnist_02.3.so"
# from tvm.contrib import ndk
# lib.export_library(path_lib,ndk.create_shared)
factory.export_library(path_lib)
# with open("./cbrc.c",'w') as file:
# file.write(lib.get_source())
test_mnist() | [
"tvm.target.target.micro",
"numpy.ones",
"tvm.auto_scheduler.extract_tasks",
"tvm.relay.build",
"tvm.transform.PassContext",
"tvm.auto_scheduler.RecordToFile",
"tvm.auto_scheduler.ApplyHistoryBest",
"tvm.auto_scheduler.LocalRunner",
"tvm.relay.frontend.from_keras",
"numpy.zeros",
"tensorflow.ker... | [((436, 446), 'tvm.cpu', 'tvm.cpu', (['(0)'], {}), '(0)\n', (443, 446), False, 'import tvm\n'), ((566, 605), 'tvm.target.Target', 'tvm.target.Target', (['"""llvm -mcpu=skylake"""'], {}), "('llvm -mcpu=skylake')\n", (583, 605), False, 'import tvm\n'), ((636, 679), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['path'], {'compile': '(True)'}), '(path, compile=True)\n', (659, 679), True, 'import tensorflow.keras as keras\n'), ((698, 727), 'numpy.ones', 'np.ones', ([], {'shape': '(1, 28, 28, 1)'}), '(shape=(1, 28, 28, 1))\n', (705, 727), True, 'import numpy as np\n'), ((745, 767), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 1)'}), '(shape=(1, 1))\n', (753, 767), True, 'import numpy as np\n'), ((870, 933), 'tvm.relay.frontend.from_keras', 'relay.frontend.from_keras', (['mod'], {'layout': 'layout', 'shape': 'shape_dict'}), '(mod, layout=layout, shape=shape_dict)\n', (895, 933), False, 'from tvm import relay, auto_scheduler\n'), ((1125, 1182), 'tvm.auto_scheduler.extract_tasks', 'auto_scheduler.extract_tasks', (["mod['main']", 'params', 'target'], {}), "(mod['main'], params, target)\n", (1153, 1182), False, 'from tvm import relay, auto_scheduler\n'), ((1953, 1994), 'tvm.relay.analysis.analysis.post_order_visit', 'an.post_order_visit', (['opt_model', 'printAttr'], {}), '(opt_model, printAttr)\n', (1972, 1994), True, 'import tvm.relay.analysis.analysis as an\n'), ((1420, 1469), 'tvm.auto_scheduler.TaskScheduler', 'auto_scheduler.TaskScheduler', (['tasks', 'task_weights'], {}), '(tasks, task_weights)\n', (1448, 1469), False, 'from tvm import relay, auto_scheduler\n'), ((2010, 2051), 'tvm.auto_scheduler.ApplyHistoryBest', 'auto_scheduler.ApplyHistoryBest', (['log_file'], {}), '(log_file)\n', (2041, 2051), False, 'from tvm import relay, auto_scheduler\n'), ((2066, 2160), 'tvm.transform.PassContext', 'tvm.transform.PassContext', ([], {'opt_level': '(3)', 'config': "{'relay.backend.use_auto_scheduler': True}"}), "(opt_level=3, config={\n 'relay.backend.use_auto_scheduler': True})\n", (2091, 2160), False, 'import tvm\n'), ((2663, 2787), 'tvm.target.target.micro', 'tvm.target.target.micro', (['"""host"""'], {'options': "['-link-params=1', '--executor=aot', '--unpacked-api=1', '--interface-api=c++']"}), "('host', options=['-link-params=1', '--executor=aot',\n '--unpacked-api=1', '--interface-api=c++'])\n", (2686, 2787), False, 'import tvm\n'), ((2972, 3038), 'tvm.relay.build', 'relay.build', (['mod'], {'target': 'target', 'target_host': '"""llvm"""', 'params': 'params'}), "(mod, target=target, target_host='llvm', params=params)\n", (2983, 3038), False, 'from tvm import relay, auto_scheduler\n'), ((1627, 1693), 'tvm.auto_scheduler.LocalRunner', 'auto_scheduler.LocalRunner', ([], {'repeat': '(10)', 'enable_cpu_cache_flush': '(True)'}), '(repeat=10, enable_cpu_cache_flush=True)\n', (1653, 1693), False, 'from tvm import relay, auto_scheduler\n'), ((2629, 2640), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2638, 2640), False, 'import os\n'), ((1722, 1759), 'tvm.auto_scheduler.RecordToFile', 'auto_scheduler.RecordToFile', (['log_file'], {}), '(log_file)\n', (1749, 1759), False, 'from tvm import relay, auto_scheduler\n')] |
from collections import defaultdict, Counter
import numpy as np
import tensorflow as tf
from capreolus import ConfigOption, Dependency, constants, get_logger
from capreolus.utils.common import padlist
from capreolus.utils.exceptions import MissingDocError
from . import Extractor
from .common import load_pretrained_embeddings
logger = get_logger(__name__)
@Extractor.register
class EmbedText(Extractor):
module_name = "embedtext"
requires_random_seed = True
dependencies = [
Dependency(key="benchmark", module="benchmark", name=None),
Dependency(
key="index", module="index", name="anserini", default_config_overrides={"indexstops": True, "stemmer": "none"}
),
Dependency(key="tokenizer", module="tokenizer", name="anserini"),
]
config_spec = [
ConfigOption("embeddings", "glove6b", "embeddings to use: fasttext, glove6b, glove6b.50d, or w2vnews"),
ConfigOption("calcidf", True),
ConfigOption("maxqlen", 4, "maximum query length (shorter will be truncated)"),
ConfigOption("maxdoclen", 800, "maximum doc length (shorter will be truncated)"),
]
pad_tok = "<pad>"
def build(self):
self._embedding_cache = constants["CACHE_BASE_PATH"] / "embeddings"
self._numpy_cache = self._embedding_cache / (self.config["embeddings"] + ".npy")
self._vocab_cache = self._embedding_cache / (self.config["embeddings"] + ".vocab.txt")
self.embeddings, self.stoi, self.itos = None, None, None
self._next_oov_index = -1
def _load_pretrained_embeddings(self):
if self.embeddings is not None:
return
self.embeddings, self.itos, self.stoi = load_pretrained_embeddings(self.config["embeddings"])
def get_tf_feature_description(self):
feature_description = {
"query": tf.io.FixedLenFeature([self.config["maxqlen"]], tf.int64),
"query_idf": tf.io.FixedLenFeature([self.config["maxqlen"]], tf.float32),
"posdoc": tf.io.FixedLenFeature([self.config["maxdoclen"]], tf.int64),
"negdoc": tf.io.FixedLenFeature([self.config["maxdoclen"]], tf.int64),
"label": tf.io.FixedLenFeature([2], tf.float32, default_value=tf.convert_to_tensor([1, 0], dtype=tf.float32)),
}
return feature_description
def create_tf_feature(self, sample):
"""
sample - output from self.id2vec()
return - a tensorflow feature
"""
query, query_idf, posdoc, negdoc = (sample["query"], sample["query_idf"], sample["posdoc"], sample["negdoc"])
feature = {
"query": tf.train.Feature(int64_list=tf.train.Int64List(value=query)),
"query_idf": tf.train.Feature(float_list=tf.train.FloatList(value=query_idf)),
"posdoc": tf.train.Feature(int64_list=tf.train.Int64List(value=posdoc)),
"negdoc": tf.train.Feature(int64_list=tf.train.Int64List(value=negdoc)),
}
return feature
def parse_tf_example(self, example_proto):
feature_description = self.get_tf_feature_description()
parsed_example = tf.io.parse_example(example_proto, feature_description)
posdoc = parsed_example["posdoc"]
negdoc = parsed_example["negdoc"]
query = parsed_example["query"]
query_idf = parsed_example["query_idf"]
label = parsed_example["label"]
return (posdoc, negdoc, query, query_idf), label
def _get_idf(self, toks):
return [self.idf.get(tok, 0) for tok in toks]
def preprocess(self, qids, docids, topics):
self._load_pretrained_embeddings()
self.index.create_index()
self.qid2toks = {}
self.docid2toks = {}
self.idf = defaultdict(lambda: 0)
for qid in qids:
if qid not in self.qid2toks:
self.qid2toks[qid] = self.tokenizer.tokenize(topics[qid])
self._add_oov_to_vocab(self.qid2toks[qid])
query_lengths = Counter(len(toks) for toks in self.qid2toks.values())
if any(qlen > self.config["maxqlen"] for qlen in query_lengths):
logger.warning(
"Some queries are longer than maxqlen; longest: %s; counter: %s",
max(query_lengths),
sorted(query_lengths.items()),
)
def get_doc_tokens(self, docid):
if docid not in self.docid2toks:
self.docid2toks[docid] = self.tokenizer.tokenize(self.index.get_doc(docid))
self._add_oov_to_vocab(self.docid2toks[docid])
return self.docid2toks[docid]
def _add_oov_to_vocab(self, tokens):
for tok in tokens:
if tok not in self.stoi:
self.stoi[tok] = self._next_oov_index
self.itos[self._next_oov_index] = tok
self._next_oov_index -= 1
def _tok2vec(self, toks):
return [self.stoi[tok] for tok in toks]
def id2vec(self, qid, posid, negid=None, **kwargs):
query = self.qid2toks[qid]
# TODO find a way to calculate qlen/doclen stats earlier, so we can log them and check sanity of our values
qlen, doclen = self.config["maxqlen"], self.config["maxdoclen"]
posdoc = self.get_doc_tokens(posid)
if not posdoc:
raise MissingDocError(qid, posid)
idfs = padlist(self._get_idf(query), qlen, 0)
query = self._tok2vec(padlist(query, qlen, self.pad_tok))
posdoc = self._tok2vec(padlist(posdoc, doclen, self.pad_tok))
# TODO determine whether pin_memory is happening. may not be because we don't place the strings in a np or torch object
data = {
"qid": qid,
"posdocid": posid,
"idfs": np.array(idfs, dtype=np.float32),
"query": np.array(query, dtype=np.long),
"posdoc": np.array(posdoc, dtype=np.long),
"query_idf": np.array(idfs, dtype=np.float32),
"negdocid": "",
"negdoc": np.zeros(self.config["maxdoclen"], dtype=np.long),
}
if negid:
negdoc = self.get_doc_tokens(negid)
if not negdoc:
raise MissingDocError(qid, negid)
negdoc = self._tok2vec(padlist(negdoc, doclen, self.pad_tok))
data["negdocid"] = negid
data["negdoc"] = np.array(negdoc, dtype=np.long)
return data
| [
"capreolus.utils.common.padlist",
"capreolus.Dependency",
"capreolus.get_logger",
"tensorflow.train.Int64List",
"capreolus.ConfigOption",
"capreolus.utils.exceptions.MissingDocError",
"numpy.array",
"numpy.zeros",
"tensorflow.io.FixedLenFeature",
"collections.defaultdict",
"tensorflow.train.Floa... | [((340, 360), 'capreolus.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (350, 360), False, 'from capreolus import ConfigOption, Dependency, constants, get_logger\n'), ((502, 560), 'capreolus.Dependency', 'Dependency', ([], {'key': '"""benchmark"""', 'module': '"""benchmark"""', 'name': 'None'}), "(key='benchmark', module='benchmark', name=None)\n", (512, 560), False, 'from capreolus import ConfigOption, Dependency, constants, get_logger\n'), ((570, 696), 'capreolus.Dependency', 'Dependency', ([], {'key': '"""index"""', 'module': '"""index"""', 'name': '"""anserini"""', 'default_config_overrides': "{'indexstops': True, 'stemmer': 'none'}"}), "(key='index', module='index', name='anserini',\n default_config_overrides={'indexstops': True, 'stemmer': 'none'})\n", (580, 696), False, 'from capreolus import ConfigOption, Dependency, constants, get_logger\n'), ((724, 788), 'capreolus.Dependency', 'Dependency', ([], {'key': '"""tokenizer"""', 'module': '"""tokenizer"""', 'name': '"""anserini"""'}), "(key='tokenizer', module='tokenizer', name='anserini')\n", (734, 788), False, 'from capreolus import ConfigOption, Dependency, constants, get_logger\n'), ((824, 930), 'capreolus.ConfigOption', 'ConfigOption', (['"""embeddings"""', '"""glove6b"""', '"""embeddings to use: fasttext, glove6b, glove6b.50d, or w2vnews"""'], {}), "('embeddings', 'glove6b',\n 'embeddings to use: fasttext, glove6b, glove6b.50d, or w2vnews')\n", (836, 930), False, 'from capreolus import ConfigOption, Dependency, constants, get_logger\n'), ((936, 965), 'capreolus.ConfigOption', 'ConfigOption', (['"""calcidf"""', '(True)'], {}), "('calcidf', True)\n", (948, 965), False, 'from capreolus import ConfigOption, Dependency, constants, get_logger\n'), ((975, 1053), 'capreolus.ConfigOption', 'ConfigOption', (['"""maxqlen"""', '(4)', '"""maximum query length (shorter will be truncated)"""'], {}), "('maxqlen', 4, 'maximum query length (shorter will be truncated)')\n", (987, 1053), False, 'from capreolus import ConfigOption, Dependency, constants, get_logger\n'), ((1063, 1148), 'capreolus.ConfigOption', 'ConfigOption', (['"""maxdoclen"""', '(800)', '"""maximum doc length (shorter will be truncated)"""'], {}), "('maxdoclen', 800, 'maximum doc length (shorter will be truncated)'\n )\n", (1075, 1148), False, 'from capreolus import ConfigOption, Dependency, constants, get_logger\n'), ((3137, 3192), 'tensorflow.io.parse_example', 'tf.io.parse_example', (['example_proto', 'feature_description'], {}), '(example_proto, feature_description)\n', (3156, 3192), True, 'import tensorflow as tf\n'), ((3751, 3774), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (3762, 3774), False, 'from collections import defaultdict, Counter\n'), ((1857, 1914), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (["[self.config['maxqlen']]", 'tf.int64'], {}), "([self.config['maxqlen']], tf.int64)\n", (1878, 1914), True, 'import tensorflow as tf\n'), ((1941, 2000), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (["[self.config['maxqlen']]", 'tf.float32'], {}), "([self.config['maxqlen']], tf.float32)\n", (1962, 2000), True, 'import tensorflow as tf\n'), ((2024, 2083), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (["[self.config['maxdoclen']]", 'tf.int64'], {}), "([self.config['maxdoclen']], tf.int64)\n", (2045, 2083), True, 'import tensorflow as tf\n'), ((2107, 2166), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (["[self.config['maxdoclen']]", 'tf.int64'], {}), "([self.config['maxdoclen']], tf.int64)\n", (2128, 2166), True, 'import tensorflow as tf\n'), ((5299, 5326), 'capreolus.utils.exceptions.MissingDocError', 'MissingDocError', (['qid', 'posid'], {}), '(qid, posid)\n', (5314, 5326), False, 'from capreolus.utils.exceptions import MissingDocError\n'), ((5412, 5446), 'capreolus.utils.common.padlist', 'padlist', (['query', 'qlen', 'self.pad_tok'], {}), '(query, qlen, self.pad_tok)\n', (5419, 5446), False, 'from capreolus.utils.common import padlist\n'), ((5479, 5516), 'capreolus.utils.common.padlist', 'padlist', (['posdoc', 'doclen', 'self.pad_tok'], {}), '(posdoc, doclen, self.pad_tok)\n', (5486, 5516), False, 'from capreolus.utils.common import padlist\n'), ((5739, 5771), 'numpy.array', 'np.array', (['idfs'], {'dtype': 'np.float32'}), '(idfs, dtype=np.float32)\n', (5747, 5771), True, 'import numpy as np\n'), ((5794, 5824), 'numpy.array', 'np.array', (['query'], {'dtype': 'np.long'}), '(query, dtype=np.long)\n', (5802, 5824), True, 'import numpy as np\n'), ((5848, 5879), 'numpy.array', 'np.array', (['posdoc'], {'dtype': 'np.long'}), '(posdoc, dtype=np.long)\n', (5856, 5879), True, 'import numpy as np\n'), ((5906, 5938), 'numpy.array', 'np.array', (['idfs'], {'dtype': 'np.float32'}), '(idfs, dtype=np.float32)\n', (5914, 5938), True, 'import numpy as np\n'), ((5990, 6039), 'numpy.zeros', 'np.zeros', (["self.config['maxdoclen']"], {'dtype': 'np.long'}), "(self.config['maxdoclen'], dtype=np.long)\n", (5998, 6039), True, 'import numpy as np\n'), ((6336, 6367), 'numpy.array', 'np.array', (['negdoc'], {'dtype': 'np.long'}), '(negdoc, dtype=np.long)\n', (6344, 6367), True, 'import numpy as np\n'), ((6167, 6194), 'capreolus.utils.exceptions.MissingDocError', 'MissingDocError', (['qid', 'negid'], {}), '(qid, negid)\n', (6182, 6194), False, 'from capreolus.utils.exceptions import MissingDocError\n'), ((6231, 6268), 'capreolus.utils.common.padlist', 'padlist', (['negdoc', 'doclen', 'self.pad_tok'], {}), '(negdoc, doclen, self.pad_tok)\n', (6238, 6268), False, 'from capreolus.utils.common import padlist\n'), ((2242, 2288), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[1, 0]'], {'dtype': 'tf.float32'}), '([1, 0], dtype=tf.float32)\n', (2262, 2288), True, 'import tensorflow as tf\n'), ((2671, 2702), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': 'query'}), '(value=query)\n', (2689, 2702), True, 'import tensorflow as tf\n'), ((2758, 2793), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'query_idf'}), '(value=query_idf)\n', (2776, 2793), True, 'import tensorflow as tf\n'), ((2846, 2878), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': 'posdoc'}), '(value=posdoc)\n', (2864, 2878), True, 'import tensorflow as tf\n'), ((2931, 2963), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': 'negdoc'}), '(value=negdoc)\n', (2949, 2963), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 1 22:13:08 2021
@author: tavastm1
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 20 21:35:25 2021
@author: tavastm1
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
# Import the human reference data
sesoi = pd.read_csv('HumanData/PANAS_SESOI.csv')
# Import gpt data
engines = []
engines.append('davinci')
replication_number = 1
experiment_list = ['PANAS_bl']
titles = ['GPT-3 Davinci']
# Dataframes to a dictionary
experiment_dfs = {}
for experiment in experiment_list:
for engine in engines:
experiment_dfs[f'{experiment}_{engine}'] = pd.read_csv(f'Output/{experiment}_{engine}_R{replication_number}.csv')
#%% Scale Scores
import pandas as pd
negatives = []
positives = []
positives_t = pd.Series()
NumOfRows = len(sesoi)
temp_data = sesoi
for i in range(NumOfRows):
data = temp_data.iloc[i,:]
answers_positive = data["interested"] + data["excited"] + data["strong"] + data["alert"] + data["enthusiastic"] + data["proud"] + data["inspired"] + data["determined"] + data["attentive"] + data["active"]
answers_negative = data["distressed"] + data["upset"] + data["guilty"] + data["scared"] + data["hostile"] + data["irritable"] + data["ashamed"] + data["nervous"] + data["jittery"] + data["afraid"]
positives.append(answers_positive)
negatives.append(answers_negative)
## Just testing
positives_s = pd.Series(answers_positive)
positives_t = positives_t.append(positives_s)
print(f'{engine} Positives:', round((sum(positives) / len(positives)), ndigits=1), 'SD: ', round(np.std(positives), ndigits=1))
print(f'{engine} Negatives:', round((sum(negatives) / len(negatives)), ndigits=1), 'SD: ', round(np.std(negatives), ndigits=1))
#print(f'{experiment_name} Balance:', (sum(affectbalance) / len(affectbalance)))
#%% Scale Scores
compare = {'sesoi': sesoi, 'GPT': experiment_dfs['PANAS_bl_davinci']}
for column_i in range(20):
for item in compare:
temp_list = []
temp = compare[item]
NumOfRows = len(temp)
temp = temp[["interested", "excited", "strong", "alert", "enthusiastic", "proud", "inspired", "determined", "attentive", "active", "distressed", "upset", "guilty", "scared", "hostile", "irritable", "ashamed", "nervous", "jittery", "afraid"]]
for i in range(NumOfRows):
if i == NumOfRows - 1:
data = temp.iloc[i,column_i]
temp_list.append(data)
column_name = temp.columns[column_i]
print(f' {column_name} {item} Column mean', sum(temp_list) / len(temp_list), 'Length', len(temp_list))
else:
data = temp_data.iloc[i,column_i]
temp_list.append(data)
#%% Bar chart
plt.rcParams['legend.title_fontsize'] = '24'
experiment = 'PANAS_bl'
TitleTemp = 'GPT-3 Davinci'
sesoi = sesoi[["interested", "excited", "strong", "alert", "enthusiastic", "proud", "inspired", "determined", "attentive", "active", "distressed", "upset", "guilty", "scared", "hostile", "irritable", "ashamed", "nervous", "jittery", "afraid"]]
labels = [1, 2, 3, 4, 5]
GPT = experiment_dfs[f'{experiment}_davinci']
GPT = GPT[["interested", "excited", "strong", "alert", "enthusiastic", "proud", "inspired", "determined", "attentive", "active", "distressed", "upset", "guilty", "scared", "hostile", "irritable", "ashamed", "nervous", "jittery", "afraid"]]
fig = plt.figure()
for i in range(20):
plt.style.use('ggplot')
plt.subplot(4,5,i+1)
## SESOI DATA
temp = sesoi.iloc[:, i]
column_title = GPT.columns[i]
temp2 = Counter(temp)
# Dictionary keys contains values for 1-5, divide by number of total observations
means_sesoi = [temp2[1] / len(temp), temp2[2] / len(temp), temp2[3] / len(temp), temp2[4] / len(temp), temp2[5] / len(temp)]
# GPT data
temp = GPT.iloc[:, i]
temp2 = Counter(temp)
# Dictionary keys contains values for 1-5, divide by number of total observations
means_gpt = [temp2[1] / len(temp), temp2[2] / len(temp), temp2[3] / len(temp), temp2[4] / len(temp), temp2[5] / len(temp)]
x = np.arange(start=1,stop=6) # the label locations
width = 0.25 # the width of the bars
rects1 = plt.bar(x - width, means_gpt, width, label='GPT-3 Davinci')
rects2 = plt.bar(x, means_sesoi, width, label='Human Data [1]')
# Add some text for labels, title and custom x-axis tick labels, etc.
plt.title(f'{column_title}')
if i > 14:
plt.xticks(x)
plt.grid(axis='x')
else:
plt.xticks([])
if i == 0:
plt.figlegend(fontsize='16', loc='upper right', bbox_to_anchor=(1, 1))
plt.suptitle(f'Item level PANAS responses for GPT-3 Davinci and Human reference data', fontsize=22)
plt.subplots_adjust(right=0.88)
manager = plt.get_current_fig_manager()
manager.window.showMaximized()
plt.show()
fig.set_size_inches((18.5, 8.5), forward=False)
plt.savefig(f'Figures/GPTHumans_{TitleTemp}.png', dpi=500) | [
"pandas.Series",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"numpy.arange",
"matplotlib.pyplot.xticks",
"numpy.std",
"matplotlib.pyplot.figlegend",
"matplotlib.pyplot.style.use",
"collections.Counter",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"matpl... | [((388, 428), 'pandas.read_csv', 'pd.read_csv', (['"""HumanData/PANAS_SESOI.csv"""'], {}), "('HumanData/PANAS_SESOI.csv')\n", (399, 428), True, 'import pandas as pd\n'), ((885, 896), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (894, 896), True, 'import pandas as pd\n'), ((3551, 3563), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3561, 3563), True, 'import matplotlib.pyplot as plt\n'), ((4905, 4936), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'right': '(0.88)'}), '(right=0.88)\n', (4924, 4936), True, 'import matplotlib.pyplot as plt\n'), ((4950, 4979), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (4977, 4979), True, 'import matplotlib.pyplot as plt\n'), ((5013, 5023), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5021, 5023), True, 'import matplotlib.pyplot as plt\n'), ((5073, 5131), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""Figures/GPTHumans_{TitleTemp}.png"""'], {'dpi': '(500)'}), "(f'Figures/GPTHumans_{TitleTemp}.png', dpi=500)\n", (5084, 5131), True, 'import matplotlib.pyplot as plt\n'), ((1530, 1557), 'pandas.Series', 'pd.Series', (['answers_positive'], {}), '(answers_positive)\n', (1539, 1557), True, 'import pandas as pd\n'), ((3590, 3613), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (3603, 3613), True, 'import matplotlib.pyplot as plt\n'), ((3618, 3642), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(5)', '(i + 1)'], {}), '(4, 5, i + 1)\n', (3629, 3642), True, 'import matplotlib.pyplot as plt\n'), ((3731, 3744), 'collections.Counter', 'Counter', (['temp'], {}), '(temp)\n', (3738, 3744), False, 'from collections import Counter\n'), ((4013, 4026), 'collections.Counter', 'Counter', (['temp'], {}), '(temp)\n', (4020, 4026), False, 'from collections import Counter\n'), ((4250, 4276), 'numpy.arange', 'np.arange', ([], {'start': '(1)', 'stop': '(6)'}), '(start=1, stop=6)\n', (4259, 4276), True, 'import numpy as np\n'), ((4358, 4417), 'matplotlib.pyplot.bar', 'plt.bar', (['(x - width)', 'means_gpt', 'width'], {'label': '"""GPT-3 Davinci"""'}), "(x - width, means_gpt, width, label='GPT-3 Davinci')\n", (4365, 4417), True, 'import matplotlib.pyplot as plt\n'), ((4431, 4485), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'means_sesoi', 'width'], {'label': '"""Human Data [1]"""'}), "(x, means_sesoi, width, label='Human Data [1]')\n", (4438, 4485), True, 'import matplotlib.pyplot as plt\n'), ((4564, 4592), 'matplotlib.pyplot.title', 'plt.title', (['f"""{column_title}"""'], {}), "(f'{column_title}')\n", (4573, 4592), True, 'import matplotlib.pyplot as plt\n'), ((731, 801), 'pandas.read_csv', 'pd.read_csv', (['f"""Output/{experiment}_{engine}_R{replication_number}.csv"""'], {}), "(f'Output/{experiment}_{engine}_R{replication_number}.csv')\n", (742, 801), True, 'import pandas as pd\n'), ((1717, 1734), 'numpy.std', 'np.std', (['positives'], {}), '(positives)\n', (1723, 1734), True, 'import numpy as np\n'), ((1845, 1862), 'numpy.std', 'np.std', (['negatives'], {}), '(negatives)\n', (1851, 1862), True, 'import numpy as np\n'), ((4616, 4629), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x'], {}), '(x)\n', (4626, 4629), True, 'import matplotlib.pyplot as plt\n'), ((4646, 4664), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""x"""'}), "(axis='x')\n", (4654, 4664), True, 'import matplotlib.pyplot as plt\n'), ((4683, 4697), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4693, 4697), True, 'import matplotlib.pyplot as plt\n'), ((4722, 4792), 'matplotlib.pyplot.figlegend', 'plt.figlegend', ([], {'fontsize': '"""16"""', 'loc': '"""upper right"""', 'bbox_to_anchor': '(1, 1)'}), "(fontsize='16', loc='upper right', bbox_to_anchor=(1, 1))\n", (4735, 4792), True, 'import matplotlib.pyplot as plt\n'), ((4801, 4909), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""Item level PANAS responses for GPT-3 Davinci and Human reference data"""'], {'fontsize': '(22)'}), "(\n f'Item level PANAS responses for GPT-3 Davinci and Human reference data',\n fontsize=22)\n", (4813, 4909), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Downloads the losses from tensorboard and saves into --dst-folder.
Then it smooths the losses and saves the best ones into a CSV file.
'''
from __future__ import ( division, absolute_import, print_function, unicode_literals )
import argparse
import csv
import os
import numpy as np
import pickle as pkl
import urllib.request
import pandas as pd
parser = argparse.ArgumentParser(description='Train model.')
parser.add_argument( 'tensorboard_url', help='URL containing the address of a running tensorboard isntance' )
parser.add_argument( '--hidden-size', help='Suffix to use for mlps')
parser.add_argument( '--loss-name', help='the url-encoded name for the tensorflow loss')
parser.add_argument( '--dst-folder', help='folder to save the loss csvs')
parser.add_argument( '--smooth-k', type=int, help='size of the smoothing kernel')
parser.set_defaults(hidden_size="1024")
parser.set_defaults(loss_name="losses%2Fgenerator_l1_loss")
parser.set_defaults(dst_folder="losses")
parser.set_defaults(smooth_k=1)
TASKS = "autoencoder denoise edge2d edge3d impainting keypoint2d keypoint3d reshade rgb2depth rgb2sfnorm"
def main():
args = parser.parse_args()
tasks = TASKS.split("\t")
# losses_for_dst = {t: "losses%2Fabsolute_difference%2Fvalue_summary" for t in tasks}
losses_for_dst = {t: args.loss_name for t in tasks}
losses_for_dst = {t: args.loss_name for t in tasks}
# Download losses
print("Downloading losses from tensorboard...")
os.makedirs(args.dst_folder, exist_ok=True)
for src in tasks:
for dst in tasks:
try:
path = make_url(args.tensorboard_url,
src, dst, losses_for_dst[dst], args.hidden_size)
urllib.request.urlretrieve(path, '{}/{}__{}__{}.csv'.format(
args.dst_folder, src, dst, args.hidden_size))
except:
path = make_url(args.tensorboard_url,
src, dst, "losses%2Fabsolute_difference%2Fvalue_summary", args.hidden_size)
urllib.request.urlretrieve(path, '{}/{}__{}__{}.csv'.format(
args.dst_folder, src, dst, args.hidden_size))
# Smooth and save
print("Smoothing losses and saving in loss.pkl...")
smooth_k = args.smooth_k
results = {}
subdir = args.dst_folder
transfers = os.listdir(subdir)
for transfer in transfers:
if '.pkl' in transfer:
continue
src, dst, _ = transfer.split("__")
with open(os.path.join(subdir, transfer), 'r') as f:
reader = csv.DictReader(f)
res = [row for row in reader]
vals = np.array([float(v['Value']) for v in res])
kernel = np.ones((smooth_k,)) / float(smooth_k)
smoothed = np.convolve(vals, kernel)[(smooth_k+1)//2:-smooth_k//2]
results["{src}->{dst}".format(**locals())] = smoothed[-1] #.min()
with open("{}/l1_loss_tensorboard_train.pkl".format(subdir), 'wb') as f:
pkl.dump(results, f)
# Now compute the difference between this and the TF loss
print("Calculating diffs between TB and TF...")
transfer_losses = pd.read_csv("/home/ubuntu/s3/model_log/results_{}/transfer_losses.csv".format(args.hidden_size))
raw_losses = transfer_losses.mean(axis=0)
del raw_losses['index']
for dst in tasks:
for src in tasks:
tf = raw_losses[src + '_' + dst]
tb = results[src + '->' + dst]
print("{}->{}: TB: {} | TF: {}".format(
src, dst, tb, tf))
def make_url(tensorboard_url, src, dst, loss, hidden_size):
path = tensorboard_url +\
"/data/scalars?run={src}__{dst}__{hs}%2Flogs%2Fslim-train%2Ftime&".format(
src=src, dst=dst, hs=hidden_size
) + \
"tag={loss}&format=csv".format(loss=loss)
# print(path)
return path
import csv
import os
import numpy as np
import pickle as pkl
if __name__ == '__main__':
main()
# http://ec2-34-209-45-36.us-west-2.compute.amazonaws.com:6006/data/scalars?run=rgb2sfnorm__rgb2sfnorm__1024%2Flogs%2Fslim-train%2Ftime&tag=losses%2Fgenerator_l1_loss&format=csv
# http://ec2-34-209-45-36.us-west-2.compute.amazonaws.com:6006/data/scalars?run=autoencoder__autoencoder__1024%2Flogs%2Fslim-train%2Ftime&tag=losses%2Fgenerator_l1_loss&format=csv
| [
"os.listdir",
"pickle.dump",
"csv.DictReader",
"os.makedirs",
"argparse.ArgumentParser",
"numpy.ones",
"numpy.convolve",
"os.path.join"
] | [((414, 465), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train model."""'}), "(description='Train model.')\n", (437, 465), False, 'import argparse\n'), ((1528, 1571), 'os.makedirs', 'os.makedirs', (['args.dst_folder'], {'exist_ok': '(True)'}), '(args.dst_folder, exist_ok=True)\n', (1539, 1571), False, 'import os\n'), ((2403, 2421), 'os.listdir', 'os.listdir', (['subdir'], {}), '(subdir)\n', (2413, 2421), False, 'import os\n'), ((3055, 3075), 'pickle.dump', 'pkl.dump', (['results', 'f'], {}), '(results, f)\n', (3063, 3075), True, 'import pickle as pkl\n'), ((2630, 2647), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (2644, 2647), False, 'import csv\n'), ((2566, 2596), 'os.path.join', 'os.path.join', (['subdir', 'transfer'], {}), '(subdir, transfer)\n', (2578, 2596), False, 'import os\n'), ((2773, 2793), 'numpy.ones', 'np.ones', (['(smooth_k,)'], {}), '((smooth_k,))\n', (2780, 2793), True, 'import numpy as np\n'), ((2835, 2860), 'numpy.convolve', 'np.convolve', (['vals', 'kernel'], {}), '(vals, kernel)\n', (2846, 2860), True, 'import numpy as np\n')] |
import numpy as np
import cv2
from matplotlib import pyplot as plt
imgInPath='D:/vaa3d_tools/hackathon/wpkenan/DN_data20181019/1.png';
imgOutPath=imgInPath.split('.')[0]+"_waterShed"+"."+imgInPath.split('.')[1];
img = cv2.imread(imgInPath)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# 降噪处理
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)
# 确定背景
sure_bg = cv2.dilate(opening,kernel,iterations=3)
# 查找前景
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# 查找未确定区域
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg,sure_fg)
# 标注
ret, markers = cv2.connectedComponents(sure_fg)
markers = markers+1
# 将未确定区域置为0
markers[unknown==255] = 0
# 执行分水岭
markers = cv2.watershed(img,markers)
img[markers == -1] = [0,255,0]
cv2.imwrite(imgOutPath,img);
cv2.imshow("img",img)
cv2.waitKey(6000)
cv2.destroyAllWindows() | [
"numpy.uint8",
"cv2.imwrite",
"numpy.ones",
"cv2.threshold",
"cv2.imshow",
"cv2.morphologyEx",
"cv2.waitKey",
"cv2.distanceTransform",
"cv2.connectedComponents",
"cv2.cvtColor",
"cv2.destroyAllWindows",
"cv2.dilate",
"cv2.subtract",
"cv2.imread",
"cv2.watershed"
] | [((219, 240), 'cv2.imread', 'cv2.imread', (['imgInPath'], {}), '(imgInPath)\n', (229, 240), False, 'import cv2\n'), ((248, 285), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (260, 285), False, 'import cv2\n'), ((303, 371), 'cv2.threshold', 'cv2.threshold', (['gray', '(0)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)'], {}), '(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n', (316, 371), False, 'import cv2\n'), ((384, 409), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (391, 409), True, 'import numpy as np\n'), ((418, 480), 'cv2.morphologyEx', 'cv2.morphologyEx', (['thresh', 'cv2.MORPH_OPEN', 'kernel'], {'iterations': '(2)'}), '(thresh, cv2.MORPH_OPEN, kernel, iterations=2)\n', (434, 480), False, 'import cv2\n'), ((499, 540), 'cv2.dilate', 'cv2.dilate', (['opening', 'kernel'], {'iterations': '(3)'}), '(opening, kernel, iterations=3)\n', (509, 540), False, 'import cv2\n'), ((564, 610), 'cv2.distanceTransform', 'cv2.distanceTransform', (['opening', 'cv2.DIST_L2', '(5)'], {}), '(opening, cv2.DIST_L2, 5)\n', (585, 610), False, 'import cv2\n'), ((706, 723), 'numpy.uint8', 'np.uint8', (['sure_fg'], {}), '(sure_fg)\n', (714, 723), True, 'import numpy as np\n'), ((734, 764), 'cv2.subtract', 'cv2.subtract', (['sure_bg', 'sure_fg'], {}), '(sure_bg, sure_fg)\n', (746, 764), False, 'import cv2\n'), ((785, 817), 'cv2.connectedComponents', 'cv2.connectedComponents', (['sure_fg'], {}), '(sure_fg)\n', (808, 817), False, 'import cv2\n'), ((896, 923), 'cv2.watershed', 'cv2.watershed', (['img', 'markers'], {}), '(img, markers)\n', (909, 923), False, 'import cv2\n'), ((955, 983), 'cv2.imwrite', 'cv2.imwrite', (['imgOutPath', 'img'], {}), '(imgOutPath, img)\n', (966, 983), False, 'import cv2\n'), ((984, 1006), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (994, 1006), False, 'import cv2\n'), ((1007, 1024), 'cv2.waitKey', 'cv2.waitKey', (['(6000)'], {}), '(6000)\n', (1018, 1024), False, 'import cv2\n'), ((1025, 1048), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1046, 1048), False, 'import cv2\n')] |
"""
===========
Multicursor
===========
Showing a cursor on multiple plots simultaneously.
This example generates two subplots and on hovering the cursor over data in one
subplot, the values of that datapoint are shown in both respectively.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import MultiCursor
t = np.arange(0.0, 2.0, 0.01)
s1 = np.sin(2*np.pi*t)
s2 = np.sin(4*np.pi*t)
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
ax1.plot(t, s1)
ax2.plot(t, s2)
multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1)
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.widgets.MultiCursor`
| [
"matplotlib.widgets.MultiCursor",
"numpy.sin",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((346, 371), 'numpy.arange', 'np.arange', (['(0.0)', '(2.0)', '(0.01)'], {}), '(0.0, 2.0, 0.01)\n', (355, 371), True, 'import numpy as np\n'), ((377, 398), 'numpy.sin', 'np.sin', (['(2 * np.pi * t)'], {}), '(2 * np.pi * t)\n', (383, 398), True, 'import numpy as np\n'), ((400, 421), 'numpy.sin', 'np.sin', (['(4 * np.pi * t)'], {}), '(4 * np.pi * t)\n', (406, 421), True, 'import numpy as np\n'), ((437, 465), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'sharex': '(True)'}), '(2, sharex=True)\n', (449, 465), True, 'import matplotlib.pyplot as plt\n'), ((507, 559), 'matplotlib.widgets.MultiCursor', 'MultiCursor', (['fig.canvas', '(ax1, ax2)'], {'color': '"""r"""', 'lw': '(1)'}), "(fig.canvas, (ax1, ax2), color='r', lw=1)\n", (518, 559), False, 'from matplotlib.widgets import MultiCursor\n'), ((560, 570), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (568, 570), True, 'import matplotlib.pyplot as plt\n')] |
from pathlib import Path
import numpy as np
import torch
def get_app_name(filename: str) -> str:
idx1 = filename.find(":") + 4
idx2 = filename.find("_fetched_")
return filename[idx1:idx2]
def is_random(filename: str) -> bool:
return "_rand" in filename
def get_parameter(filename: str, param: str) -> float:
suffix_idx = filename.find(".pkl")
if suffix_idx:
filename = filename[:suffix_idx]
idx = filename.split("_").index(param) + 1
return float(filename.split("_")[idx])
def get_filename_pairs(high_load_dir: str, low_load_dir: str) -> dict:
high_load_file_names = []
low_load_file_names = []
files = Path(high_load_dir).glob('*.pkl')
for f in files:
assert "_b_" in str(f)
high_load_file_names.append(str(f))
files = Path(low_load_dir).glob('*.pkl')
for f in files:
assert "_b_" not in str(f)
low_load_file_names.append(str(f))
assert len(high_load_file_names) > 0
assert len(low_load_file_names) > 0
assert len(high_load_file_names) == len(low_load_file_names)
# Create 1:1 relationships between high load and low load files
pairs = {}
for hl_file in sorted(high_load_file_names):
hl_app = get_app_name(hl_file)
for ll_file in low_load_file_names:
ll_app = get_app_name(ll_file)
if hl_app == ll_app and is_random(hl_file) == is_random(ll_file):
assert hl_file not in pairs
pairs[hl_file] = ll_file
assert len(high_load_file_names) == len(pairs)
#for key, value in pairs.items():
# key = key[key.find(":")+4:]
# value = value[value.find(":")+4:]
# print(f"{key} ### {value}\n")
return pairs
def match_marks(sequences: list, input_mapping: dict, output_mapping: dict) -> list:
int_to_str_input_mapping = {value: key for key, value in input_mapping.items()}
for seq in sequences:
original_marks = seq["marks"]
str_marks = list(map(lambda int_mark: int_to_str_input_mapping[int_mark], original_marks))
seq["marks"] = list(map(lambda str_mark: output_mapping[str_mark], str_marks))
assert type(seq["marks"]) == list
assert len(original_marks) == len(seq["marks"])
assert type(seq["marks"][0]) == int
assert original_marks != seq["marks"]
return sequences
def inject_high_in_low_load_datasets(filename_pairs, output_dir):
def attribute_to_matrix(sequences, attribute):
attribute_list = []
for seq in sequences:
attribute_list.append(seq[attribute])
return np.array(attribute_list)
def cold_start_ratio(init_times):
num_activations = init_times.size
num_cold_starts = np.sum(init_times > 0)
return num_cold_starts / num_activations
for count, (hl_file, ll_file) in enumerate(filename_pairs.items()):
with open(hl_file, "rb") as f:
hl_data = torch.load(f)
hl_sequences = hl_data["sequences"]
hl_mapping = hl_data["mapping"]
with open(ll_file, "rb") as f:
ll_data = torch.load(f)
ll_sequences = ll_data["sequences"]
ll_mapping = ll_data["mapping"]
if hl_mapping != ll_mapping:
# Match marks of high load datasets with marks of low load dataset.
hl_sequences = match_marks(hl_sequences, hl_mapping, ll_mapping)
assert len(hl_sequences) == get_parameter(hl_file, "fetched")
assert len(ll_sequences) == get_parameter(ll_file, "fetched")
# Inject high load traces until there are 30% cold starts in the final dataset.
threshold = 0
ratio = 0.0
merged_sequences = []
while ratio < 0.3 or len(merged_sequences) < 1000:
assert threshold <= len(hl_sequences)
assert threshold <= len(ll_sequences)
merged_sequences = ll_sequences[:1000-threshold] + hl_sequences[:threshold]
ratio = cold_start_ratio(attribute_to_matrix(merged_sequences, "init_times"))
threshold += 1
ll_data["sequences"] = merged_sequences
assert len(merged_sequences) == 1000
assert len(ll_data["sequences"]) == 1000
save_name = f"/injected_{hl_file[hl_file.rfind('/')+1:]}"
with open(output_dir + save_name, "wb") as f:
torch.save(ll_data, f, pickle_protocol=4)
print(f"Created {count + 1} of {len(filename_pairs)} datasets.")
if __name__=='__main__':
high_load_dir = "./final_batched_high_load_n_400"
low_load_dir = "./final_low_load_n_1000"
output_dir = "./final_high_load_n_1000"
pairs = get_filename_pairs(high_load_dir, low_load_dir)
inject_high_in_low_load_datasets(pairs, output_dir) | [
"pathlib.Path",
"torch.load",
"numpy.array",
"numpy.sum",
"torch.save"
] | [((2624, 2648), 'numpy.array', 'np.array', (['attribute_list'], {}), '(attribute_list)\n', (2632, 2648), True, 'import numpy as np\n'), ((2760, 2782), 'numpy.sum', 'np.sum', (['(init_times > 0)'], {}), '(init_times > 0)\n', (2766, 2782), True, 'import numpy as np\n'), ((660, 679), 'pathlib.Path', 'Path', (['high_load_dir'], {}), '(high_load_dir)\n', (664, 679), False, 'from pathlib import Path\n'), ((802, 820), 'pathlib.Path', 'Path', (['low_load_dir'], {}), '(low_load_dir)\n', (806, 820), False, 'from pathlib import Path\n'), ((2966, 2979), 'torch.load', 'torch.load', (['f'], {}), '(f)\n', (2976, 2979), False, 'import torch\n'), ((3125, 3138), 'torch.load', 'torch.load', (['f'], {}), '(f)\n', (3135, 3138), False, 'import torch\n'), ((4412, 4453), 'torch.save', 'torch.save', (['ll_data', 'f'], {'pickle_protocol': '(4)'}), '(ll_data, f, pickle_protocol=4)\n', (4422, 4453), False, 'import torch\n')] |
import os
import cv2
import json
import pandas as pd
import numpy as np
from glob import glob
from tqdm import tqdm
from IPython import embed
import base64
from labelme import utils
image_path = "./images/"
csv_file = "./train_labels.csv"
annotations = pd.read_csv(csv_file,header=None).values
total_csv_annotations = {}
for annotation in annotations:
key = annotation[0].split(os.sep)[-1]
value = np.array([annotation[1:]])
if key in total_csv_annotations.keys():
total_csv_annotations[key] = np.concatenate((total_csv_annotations[key],value),axis=0)
else:
total_csv_annotations[key] = value
for key,value in total_csv_annotations.items():
height,width = cv2.imread(image_path+key).shape[:2]
labelme_format = {
"version":"3.6.16",
"flags":{},
"lineColor":[0,255,0,128],
"fillColor":[255,0,0,128],
"imagePath":key,
"imageHeight":height,
"imageWidth":width
}
with open(image_path+key,"rb") as f:
imageData = f.read()
imageData = base64.b64encode(imageData).decode('utf-8')
#img = utils.img_b64_to_arr(imageData)
labelme_format["imageData"] = imageData
shapes = []
for shape in value:
label = shape[-1]
s = {"label":label,"line_color":None,"fill_color":None,"shape_type":"rectangle"}
points = [
[shape[0],shape[1]],
[shape[2],shape[3]]
]
s["points"] = points
shapes.append(s)
labelme_format["shapes"] = shapes
json.dump(labelme_format,open("%s/%s/"%(image_path,key.replace(".jpg",".json")),"w"),ensure_ascii=False, indent=2) | [
"pandas.read_csv",
"base64.b64encode",
"numpy.array",
"numpy.concatenate",
"cv2.imread"
] | [((254, 288), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {'header': 'None'}), '(csv_file, header=None)\n', (265, 288), True, 'import pandas as pd\n'), ((407, 433), 'numpy.array', 'np.array', (['[annotation[1:]]'], {}), '([annotation[1:]])\n', (415, 433), True, 'import numpy as np\n'), ((515, 574), 'numpy.concatenate', 'np.concatenate', (['(total_csv_annotations[key], value)'], {'axis': '(0)'}), '((total_csv_annotations[key], value), axis=0)\n', (529, 574), True, 'import numpy as np\n'), ((693, 721), 'cv2.imread', 'cv2.imread', (['(image_path + key)'], {}), '(image_path + key)\n', (703, 721), False, 'import cv2\n'), ((1021, 1048), 'base64.b64encode', 'base64.b64encode', (['imageData'], {}), '(imageData)\n', (1037, 1048), False, 'import base64\n')] |
import os
import sys
import hlt
import numpy as np
WIDTH = 50
HEIGHT = 50
myID, gameMap = getInit()
# Make sure not to produce stderr when loading the model
backup = sys.stderr
with open('err.log', 'w') as sys.stderr:
from keras.models import load_model
model = load_model('model.h5')
model.predict(np.random.normal(size=(1, 50, 50, 4))).shape # make sure model is compiled during init
sys.stderr = backup
def frame_to_input(frame):
game_map = np.array([[(x.owner, x.production, x.strength) for x in row] for row in frame.contents])
data = np.array([(game_map[:, :, 0] == myID), # 0 : owner is me
((game_map[:, :, 0] != 0) & (game_map[:, :, 0] != myID)), # 1 : owner is enemy
game_map[:, :, 1] / 20, # 2 : production
game_map[:, :, 2] / 255, # 3 : strength
]).astype(np.float32)
data = np.transpose(data, (1, 2, 0))
nx = data.shape[0]
ny = data.shape[1]
pad_x = int((WIDTH - nx) / 2)
extra_x = int(WIDTH - nx - 2 * pad_x)
pad_y = int((HEIGHT - ny) / 2)
extra_y = int(HEIGHT - ny - 2 * pad_y)
data = np.pad(data, ((pad_x, pad_x + extra_x), (pad_y, pad_y + extra_y), (0, 0)), 'wrap')
return data, pad_x, extra_x, pad_y, extra_y
sendInit('ibab')
with open('status.log', 'w') as sys.stderr:
while True:
frame = getFrame()
state, px, pxx, py, pyy = frame_to_input(frame)
output = model.predict(state[np.newaxis])
output = output.reshape(1, WIDTH, HEIGHT, 5)
output = output[0, px:-(px + pxx), py:-(py + pyy), :]
moves = []
for y in range(gameMap.height):
for x in range(gameMap.width):
location = Location(x, y)
if gameMap.getSite(location).owner == myID:
p = output[x, y, :]
decision = np.random.choice(np.arange(5), p=p)
#decision = np.argmax(p)
print('Decide to go {} at ({}, {}), p={}'.format(decision, x, y, p), file=sys.stderr)
moves.append(Move(location, decision))
sendFrame(moves)
| [
"numpy.random.normal",
"keras.models.load_model",
"numpy.array",
"numpy.pad",
"numpy.transpose",
"numpy.arange"
] | [((273, 295), 'keras.models.load_model', 'load_model', (['"""model.h5"""'], {}), "('model.h5')\n", (283, 295), False, 'from keras.models import load_model\n'), ((464, 556), 'numpy.array', 'np.array', (['[[(x.owner, x.production, x.strength) for x in row] for row in frame.contents]'], {}), '([[(x.owner, x.production, x.strength) for x in row] for row in\n frame.contents])\n', (472, 556), True, 'import numpy as np\n'), ((908, 937), 'numpy.transpose', 'np.transpose', (['data', '(1, 2, 0)'], {}), '(data, (1, 2, 0))\n', (920, 937), True, 'import numpy as np\n'), ((1152, 1238), 'numpy.pad', 'np.pad', (['data', '((pad_x, pad_x + extra_x), (pad_y, pad_y + extra_y), (0, 0))', '"""wrap"""'], {}), "(data, ((pad_x, pad_x + extra_x), (pad_y, pad_y + extra_y), (0, 0)),\n 'wrap')\n", (1158, 1238), True, 'import numpy as np\n'), ((314, 351), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 50, 50, 4)'}), '(size=(1, 50, 50, 4))\n', (330, 351), True, 'import numpy as np\n'), ((565, 711), 'numpy.array', 'np.array', (['[game_map[:, :, 0] == myID, (game_map[:, :, 0] != 0) & (game_map[:, :, 0] !=\n myID), game_map[:, :, 1] / 20, game_map[:, :, 2] / 255]'], {}), '([game_map[:, :, 0] == myID, (game_map[:, :, 0] != 0) & (game_map[:,\n :, 0] != myID), game_map[:, :, 1] / 20, game_map[:, :, 2] / 255])\n', (573, 711), True, 'import numpy as np\n'), ((1904, 1916), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (1913, 1916), True, 'import numpy as np\n')] |
import unittest
import os
import datetime as dt
import netCDF4
import numpy as np
from forest import (eida50, satellite, navigate)
from forest.exceptions import FileNotFound, IndexNotFound
class TestLocator(unittest.TestCase):
def setUp(self):
self.paths = []
self.pattern = "test-eida50*.nc"
self.locator = satellite.Locator(self.pattern)
def tearDown(self):
for path in self.paths:
if os.path.exists(path):
os.remove(path)
def test_parse_date(self):
path = "/some/file-20190101.nc"
result = self.locator.parse_date(path)
expect = dt.datetime(2019, 1, 1)
self.assertEqual(expect, result)
def test_find_given_no_files_raises_notfound(self):
any_date = dt.datetime.now()
with self.assertRaises(FileNotFound):
self.locator.find(any_date)
def test_find_given_a_single_file(self):
valid_date = dt.datetime(2019, 1, 1)
path = "test-eida50-20190101.nc"
self.paths.append(path)
times = [valid_date]
with netCDF4.Dataset(path, "w") as dataset:
self.set_times(dataset, times)
found_path, index = self.locator.find(valid_date)
self.assertEqual(found_path, path)
self.assertEqual(index, 0)
def test_find_given_multiple_files(self):
dates = [
dt.datetime(2019, 1, 1),
dt.datetime(2019, 1, 2),
dt.datetime(2019, 1, 3)]
for date in dates:
path = "test-eida50-{:%Y%m%d}.nc".format(date)
self.paths.append(path)
with netCDF4.Dataset(path, "w") as dataset:
self.set_times(dataset, [date])
valid_date = dt.datetime(2019, 1, 2, 0, 14)
found_path, index = self.locator.find(valid_date)
expect_path = "test-eida50-20190102.nc"
self.assertEqual(found_path, expect_path)
self.assertEqual(index, 0)
def test_find_index_given_valid_time(self):
time = dt.datetime(2019, 1, 1, 3, 31)
times = [
dt.datetime(2019, 1, 1, 3, 0),
dt.datetime(2019, 1, 1, 3, 15),
dt.datetime(2019, 1, 1, 3, 30),
dt.datetime(2019, 1, 1, 3, 45),
dt.datetime(2019, 1, 1, 4, 0),
]
freq = dt.timedelta(minutes=15)
result = self.locator.find_index(times, time, freq)
expect = 2
self.assertEqual(expect, result)
def test_find_index_outside_range_raises_exception(self):
time = dt.datetime(2019, 1, 4, 16)
times = [
dt.datetime(2019, 1, 1, 3, 0),
dt.datetime(2019, 1, 1, 3, 15),
dt.datetime(2019, 1, 1, 3, 30),
dt.datetime(2019, 1, 1, 3, 45),
dt.datetime(2019, 1, 1, 4, 0),
]
freq = dt.timedelta(minutes=15)
with self.assertRaises(IndexNotFound):
self.locator.find_index(times, time, freq)
def set_times(self, dataset, times):
units = "seconds since 1970-01-01 00:00:00"
dataset.createDimension("time", len(times))
var = dataset.createVariable("time", "d", ("time",))
var.units = units
var[:] = netCDF4.date2num(times, units=units)
class Formatter(object):
def __init__(self, dataset):
self.dataset = dataset
def define(self, times):
dataset = self.dataset
dataset.createDimension("time", len(times))
dataset.createDimension("longitude", 1)
dataset.createDimension("latitude", 1)
units = "hours since 1970-01-01 00:00:00"
var = dataset.createVariable(
"time", "d", ("time",))
var.axis = "T"
var.units = units
var.standard_name = "time"
var.calendar = "gregorian"
var[:] = netCDF4.date2num(times, units=units)
var = dataset.createVariable(
"longitude", "f", ("longitude",))
var.axis = "X"
var.units = "degrees_east"
var.standard_name = "longitude"
var[:] = 0
var = dataset.createVariable(
"latitude", "f", ("latitude",))
var.axis = "Y"
var.units = "degrees_north"
var.standard_name = "latitude"
var[:] = 0
var = dataset.createVariable(
"data", "f", ("time", "latitude", "longitude"))
var.standard_name = "toa_brightness_temperature"
var.long_name = "toa_brightness_temperature"
var.units = "K"
var[:] = 0
class TestCoordinates(unittest.TestCase):
def setUp(self):
self.path = "test-navigate-eida50.nc"
def tearDown(self):
if os.path.exists(self.path):
os.remove(self.path)
def test_valid_times_given_eida50_toa_brightness_temperature(self):
times = [dt.datetime(2019, 1, 1)]
with netCDF4.Dataset(self.path, "w") as dataset:
writer = Formatter(dataset)
writer.define(times)
coord = eida50.Coordinates()
result = coord.valid_times(self.path, "toa_brightness_temperature")
expect = times
self.assertEqual(expect, result)
class TestEIDA50(unittest.TestCase):
def setUp(self):
self.path = "test-navigate-eida50.nc"
self.navigator = navigate.FileSystem.file_type([self.path], "eida50")
self.times = [
dt.datetime(2019, 1, 1, 0),
dt.datetime(2019, 1, 1, 0, 15),
dt.datetime(2019, 1, 1, 0, 30),
dt.datetime(2019, 1, 1, 0, 45),
]
def tearDown(self):
if os.path.exists(self.path):
os.remove(self.path)
def test_initial_times(self):
with netCDF4.Dataset(self.path, "w") as dataset:
writer = Formatter(dataset)
writer.define(self.times)
result = self.navigator.initial_times(self.path)
expect = [self.times[0]]
self.assertEqual(expect, result)
def test_valid_times(self):
with netCDF4.Dataset(self.path, "w") as dataset:
writer = Formatter(dataset)
writer.define(self.times)
result = self.navigator.valid_times(
self.path,
"toa_brightness_temperature",
self.times[0])
expect = self.times
np.testing.assert_array_equal(expect, result)
def test_pressures(self):
with netCDF4.Dataset(self.path, "w") as dataset:
writer = Formatter(dataset)
writer.define(self.times)
result = self.navigator.pressures(
self.path,
"toa_brightness_temperature",
self.times[0])
expect = []
np.testing.assert_array_equal(expect, result)
| [
"datetime.datetime",
"forest.satellite.Locator",
"os.path.exists",
"forest.navigate.FileSystem.file_type",
"netCDF4.Dataset",
"forest.eida50.Coordinates",
"datetime.datetime.now",
"datetime.timedelta",
"netCDF4.date2num",
"numpy.testing.assert_array_equal",
"os.remove"
] | [((338, 369), 'forest.satellite.Locator', 'satellite.Locator', (['self.pattern'], {}), '(self.pattern)\n', (355, 369), False, 'from forest import eida50, satellite, navigate\n'), ((632, 655), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (643, 655), True, 'import datetime as dt\n'), ((773, 790), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (788, 790), True, 'import datetime as dt\n'), ((944, 967), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (955, 967), True, 'import datetime as dt\n'), ((1738, 1768), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(2)', '(0)', '(14)'], {}), '(2019, 1, 2, 0, 14)\n', (1749, 1768), True, 'import datetime as dt\n'), ((2024, 2054), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)', '(3)', '(31)'], {}), '(2019, 1, 1, 3, 31)\n', (2035, 2054), True, 'import datetime as dt\n'), ((2316, 2340), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': '(15)'}), '(minutes=15)\n', (2328, 2340), True, 'import datetime as dt\n'), ((2539, 2566), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(4)', '(16)'], {}), '(2019, 1, 4, 16)\n', (2550, 2566), True, 'import datetime as dt\n'), ((2828, 2852), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': '(15)'}), '(minutes=15)\n', (2840, 2852), True, 'import datetime as dt\n'), ((3205, 3241), 'netCDF4.date2num', 'netCDF4.date2num', (['times'], {'units': 'units'}), '(times, units=units)\n', (3221, 3241), False, 'import netCDF4\n'), ((3805, 3841), 'netCDF4.date2num', 'netCDF4.date2num', (['times'], {'units': 'units'}), '(times, units=units)\n', (3821, 3841), False, 'import netCDF4\n'), ((4652, 4677), 'os.path.exists', 'os.path.exists', (['self.path'], {}), '(self.path)\n', (4666, 4677), False, 'import os\n'), ((4974, 4994), 'forest.eida50.Coordinates', 'eida50.Coordinates', ([], {}), '()\n', (4992, 4994), False, 'from forest import eida50, satellite, navigate\n'), ((5266, 5318), 'forest.navigate.FileSystem.file_type', 'navigate.FileSystem.file_type', (['[self.path]', '"""eida50"""'], {}), "([self.path], 'eida50')\n", (5295, 5318), False, 'from forest import eida50, satellite, navigate\n'), ((5560, 5585), 'os.path.exists', 'os.path.exists', (['self.path'], {}), '(self.path)\n', (5574, 5585), False, 'import os\n'), ((6274, 6319), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expect', 'result'], {}), '(expect, result)\n', (6303, 6319), True, 'import numpy as np\n'), ((6661, 6706), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expect', 'result'], {}), '(expect, result)\n', (6690, 6706), True, 'import numpy as np\n'), ((442, 462), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (456, 462), False, 'import os\n'), ((1084, 1110), 'netCDF4.Dataset', 'netCDF4.Dataset', (['path', '"""w"""'], {}), "(path, 'w')\n", (1099, 1110), False, 'import netCDF4\n'), ((1384, 1407), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (1395, 1407), True, 'import datetime as dt\n'), ((1425, 1448), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(2)'], {}), '(2019, 1, 2)\n', (1436, 1448), True, 'import datetime as dt\n'), ((1466, 1489), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(3)'], {}), '(2019, 1, 3)\n', (1477, 1489), True, 'import datetime as dt\n'), ((2085, 2114), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)', '(3)', '(0)'], {}), '(2019, 1, 1, 3, 0)\n', (2096, 2114), True, 'import datetime as dt\n'), ((2128, 2158), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)', '(3)', '(15)'], {}), '(2019, 1, 1, 3, 15)\n', (2139, 2158), True, 'import datetime as dt\n'), ((2172, 2202), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)', '(3)', '(30)'], {}), '(2019, 1, 1, 3, 30)\n', (2183, 2202), True, 'import datetime as dt\n'), ((2216, 2246), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)', '(3)', '(45)'], {}), '(2019, 1, 1, 3, 45)\n', (2227, 2246), True, 'import datetime as dt\n'), ((2260, 2289), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)', '(4)', '(0)'], {}), '(2019, 1, 1, 4, 0)\n', (2271, 2289), True, 'import datetime as dt\n'), ((2597, 2626), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)', '(3)', '(0)'], {}), '(2019, 1, 1, 3, 0)\n', (2608, 2626), True, 'import datetime as dt\n'), ((2640, 2670), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)', '(3)', '(15)'], {}), '(2019, 1, 1, 3, 15)\n', (2651, 2670), True, 'import datetime as dt\n'), ((2684, 2714), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)', '(3)', '(30)'], {}), '(2019, 1, 1, 3, 30)\n', (2695, 2714), True, 'import datetime as dt\n'), ((2728, 2758), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)', '(3)', '(45)'], {}), '(2019, 1, 1, 3, 45)\n', (2739, 2758), True, 'import datetime as dt\n'), ((2772, 2801), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)', '(4)', '(0)'], {}), '(2019, 1, 1, 4, 0)\n', (2783, 2801), True, 'import datetime as dt\n'), ((4691, 4711), 'os.remove', 'os.remove', (['self.path'], {}), '(self.path)\n', (4700, 4711), False, 'import os\n'), ((4802, 4825), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (4813, 4825), True, 'import datetime as dt\n'), ((4840, 4871), 'netCDF4.Dataset', 'netCDF4.Dataset', (['self.path', '"""w"""'], {}), "(self.path, 'w')\n", (4855, 4871), False, 'import netCDF4\n'), ((5354, 5380), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)', '(0)'], {}), '(2019, 1, 1, 0)\n', (5365, 5380), True, 'import datetime as dt\n'), ((5394, 5424), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)', '(0)', '(15)'], {}), '(2019, 1, 1, 0, 15)\n', (5405, 5424), True, 'import datetime as dt\n'), ((5438, 5468), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)', '(0)', '(30)'], {}), '(2019, 1, 1, 0, 30)\n', (5449, 5468), True, 'import datetime as dt\n'), ((5482, 5512), 'datetime.datetime', 'dt.datetime', (['(2019)', '(1)', '(1)', '(0)', '(45)'], {}), '(2019, 1, 1, 0, 45)\n', (5493, 5512), True, 'import datetime as dt\n'), ((5599, 5619), 'os.remove', 'os.remove', (['self.path'], {}), '(self.path)\n', (5608, 5619), False, 'import os\n'), ((5668, 5699), 'netCDF4.Dataset', 'netCDF4.Dataset', (['self.path', '"""w"""'], {}), "(self.path, 'w')\n", (5683, 5699), False, 'import netCDF4\n'), ((5967, 5998), 'netCDF4.Dataset', 'netCDF4.Dataset', (['self.path', '"""w"""'], {}), "(self.path, 'w')\n", (5982, 5998), False, 'import netCDF4\n'), ((6364, 6395), 'netCDF4.Dataset', 'netCDF4.Dataset', (['self.path', '"""w"""'], {}), "(self.path, 'w')\n", (6379, 6395), False, 'import netCDF4\n'), ((480, 495), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (489, 495), False, 'import os\n'), ((1630, 1656), 'netCDF4.Dataset', 'netCDF4.Dataset', (['path', '"""w"""'], {}), "(path, 'w')\n", (1645, 1656), False, 'import netCDF4\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing
class Architechture():
def __init__(self, pattern, output, eta, mi, alpha, iterations, epslon):
self.pattern = pattern
self.output = output
self.eta = eta
self.mi = mi
self.alpha = alpha
self.iterations = iterations
self.epslon = epslon
self.Y = np.zeros(self.output)
self.weights = np.random.uniform(0, 0.01, [self.pattern, self.output])
self.side_weights = np.random.uniform(0, 0.01, [self.output, self.output])
self.side_weights -= np.tril(self.side_weights)
def updateWeights(self, x):
# delta[i][j]
delta = self.eta * x * np.transpose(self.Y)
self.weights += delta
self.weights /= np.amax(self.weights)
def updateSideWeights(self):
for l in range(self.output):
for j in range(self.output):
if l < j:
self.side_weights[l, j] += - self.mi * self.Y[l] * self.Y[j]
def updateParams(self):
self.eta = max(self.alpha * self.eta, 0.0001)
self.mi = max(self.alpha * self.mi, 0.0002)
def start(self, data):
data_std = preprocessing.scale(data)
np.random.shuffle(data_std)
epoch = 0
while(epoch <= self.iterations):
print("Step " + str(epoch) + " of " + str(self.iterations))
for p in range(data_std.shape[0]):
sample = data_std[p]
for i in range(self.output):
self.Y[i] = sum(self.weights[:, i] * sample) + sum(self.side_weights[i, :] * self.Y)
self.updateWeights(sample)
self.updateSideWeights()
self.updateParams()
if np.sum((self.side_weights)) <= self.epslon:
break
epoch += 1 | [
"numpy.sum",
"numpy.zeros",
"numpy.tril",
"numpy.random.uniform",
"numpy.transpose",
"numpy.amax",
"sklearn.preprocessing.scale",
"numpy.random.shuffle"
] | [((423, 444), 'numpy.zeros', 'np.zeros', (['self.output'], {}), '(self.output)\n', (431, 444), True, 'import numpy as np\n'), ((468, 523), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(0.01)', '[self.pattern, self.output]'], {}), '(0, 0.01, [self.pattern, self.output])\n', (485, 523), True, 'import numpy as np\n'), ((552, 606), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(0.01)', '[self.output, self.output]'], {}), '(0, 0.01, [self.output, self.output])\n', (569, 606), True, 'import numpy as np\n'), ((636, 662), 'numpy.tril', 'np.tril', (['self.side_weights'], {}), '(self.side_weights)\n', (643, 662), True, 'import numpy as np\n'), ((825, 846), 'numpy.amax', 'np.amax', (['self.weights'], {}), '(self.weights)\n', (832, 846), True, 'import numpy as np\n'), ((1251, 1276), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['data'], {}), '(data)\n', (1270, 1276), False, 'from sklearn import preprocessing\n'), ((1286, 1313), 'numpy.random.shuffle', 'np.random.shuffle', (['data_std'], {}), '(data_std)\n', (1303, 1313), True, 'import numpy as np\n'), ((750, 770), 'numpy.transpose', 'np.transpose', (['self.Y'], {}), '(self.Y)\n', (762, 770), True, 'import numpy as np\n'), ((1838, 1863), 'numpy.sum', 'np.sum', (['self.side_weights'], {}), '(self.side_weights)\n', (1844, 1863), True, 'import numpy as np\n')] |
from __future__ import division
from skimage import img_as_float, io
from skimage.filters import threshold_otsu
import numpy as np
def quantize(image, L=1, N=4):
"""Quantize an image.
Parameters
----------
image : array_like
Input image.
L : float
Maximum input value.
N : int
Number of quantization levels.
"""
T = np.linspace(0, L, N, endpoint=False)[1:]
return np.digitize(image.flat, T).reshape(image.shape)
def dither(image, N=4, positions=None, weights=None):
"""Quantize an image, using dithering.
Parameters
----------
image : ndarray
Input image.
N : int
Number of quantization levels.
positions : list of (i, j) offsets
Position offset to which the quantization error is distributed.
By default, implement Sierra's "Filter Lite".
weights : list of ints
Weights for propagated error.
By default, implement Sierra's "Filter Lite".
References
----------
http://www.efg2.com/Lab/Library/ImageProcessing/DHALF.TXT
"""
image = img_as_float(image.copy())
if positions is None or weights is None:
positions = [(0, 1), (1, -1), (1, 0)]
weights = [2, 1, 1]
weights = weights / np.sum(weights)
T = np.linspace(0, 1, N, endpoint=False)[1:]
rows, cols = image.shape
out = np.zeros_like(image, dtype=float)
for i in range(rows):
for j in range(cols):
# Quantize
out[i, j], = np.digitize([image[i, j]], T)
# Propagate quantization noise
d = (image[i, j] - out[i, j] / (N - 1))
for (ii, jj), w in zip(positions, weights):
ii = i + ii
jj = j + jj
if ii < rows and jj < cols:
image[ii, jj] += d * w
return out
def floyd_steinberg(image, N):
offsets = [(0, 1), (1, -1), (1, 0), (1, 1)]
weights = [ 7,
3, 5, 1]
return dither(image, N, offsets, weights)
def stucki(image, N):
offsets = [(0, 1), (0, 2), (1, -2), (1, -1),
(1, 0), (1, 1), (1, 2),
(2, -2), (2, -1), (2, 0), (2, 1), (2, 2)]
weights = [ 8, 4,
2, 4, 8, 4, 2,
1, 2, 4, 2, 1]
return dither(image, N, offsets, weights)
# Image with 255 color levels
img = img_as_float(io.imread('data/david.png'))
# Quantize to N levels
N = 2
img_quant = quantize(img, N=N)
img_dither_random = img + np.abs(np.random.normal(size=img.shape,
scale=1./(3 * N)))
img_dither_random = quantize(img_dither_random, L=1, N=N)
img_dither_fs = floyd_steinberg(img, N=N)
img_dither_stucki = stucki(img, N=N)
import matplotlib.pyplot as plt
f, ax = plt.subplots(2, 3, subplot_kw={'xticks': [], 'yticks': []})
ax[0, 0].imshow(img, cmap=plt.cm.gray, interpolation='nearest')
ax[0, 1].imshow(img_quant, cmap=plt.cm.gray, interpolation='nearest')
ax[0, 2].imshow(img > threshold_otsu(img), cmap=plt.cm.gray, interpolation='nearest')
#ax[0, 2].set_visible(False)
ax[1, 0].imshow(img_dither_random, cmap=plt.cm.gray, interpolation='nearest')
ax[1, 1].imshow(img_dither_fs, cmap=plt.cm.gray, interpolation='nearest')
ax[1, 2].imshow(img_dither_stucki, cmap=plt.cm.gray, interpolation='nearest')
ax[0, 0].set_title('Input')
ax[0, 1].set_title('Quantization (N=%d)' % N)
ax[0, 2].set_title('Otsu threshold')
ax[1, 0].set_title('Dithering: Image + Noise')
ax[1, 1].set_title('Floyd-Steinberg')
ax[1, 2].set_title('Stucki')
plt.show()
| [
"numpy.random.normal",
"skimage.filters.threshold_otsu",
"numpy.digitize",
"numpy.sum",
"skimage.io.imread",
"numpy.linspace",
"numpy.zeros_like",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((2782, 2841), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'subplot_kw': "{'xticks': [], 'yticks': []}"}), "(2, 3, subplot_kw={'xticks': [], 'yticks': []})\n", (2794, 2841), True, 'import matplotlib.pyplot as plt\n'), ((3548, 3558), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3556, 3558), True, 'import matplotlib.pyplot as plt\n'), ((1374, 1407), 'numpy.zeros_like', 'np.zeros_like', (['image'], {'dtype': 'float'}), '(image, dtype=float)\n', (1387, 1407), True, 'import numpy as np\n'), ((2384, 2411), 'skimage.io.imread', 'io.imread', (['"""data/david.png"""'], {}), "('data/david.png')\n", (2393, 2411), False, 'from skimage import img_as_float, io\n'), ((376, 412), 'numpy.linspace', 'np.linspace', (['(0)', 'L', 'N'], {'endpoint': '(False)'}), '(0, L, N, endpoint=False)\n', (387, 412), True, 'import numpy as np\n'), ((1268, 1283), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (1274, 1283), True, 'import numpy as np\n'), ((1293, 1329), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {'endpoint': '(False)'}), '(0, 1, N, endpoint=False)\n', (1304, 1329), True, 'import numpy as np\n'), ((2508, 2561), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'img.shape', 'scale': '(1.0 / (3 * N))'}), '(size=img.shape, scale=1.0 / (3 * N))\n', (2524, 2561), True, 'import numpy as np\n'), ((2998, 3017), 'skimage.filters.threshold_otsu', 'threshold_otsu', (['img'], {}), '(img)\n', (3012, 3017), False, 'from skimage.filters import threshold_otsu\n'), ((428, 454), 'numpy.digitize', 'np.digitize', (['image.flat', 'T'], {}), '(image.flat, T)\n', (439, 454), True, 'import numpy as np\n'), ((1512, 1541), 'numpy.digitize', 'np.digitize', (['[image[i, j]]', 'T'], {}), '([image[i, j]], T)\n', (1523, 1541), True, 'import numpy as np\n')] |
import numpy as np
import dolfin as df
import matplotlib.pyplot as plt
def column_chart(results, solvers, preconditioners, offset=None, ymax=10):
slowest = results.max(0)
fastest = results.min(0)
default = results[0]
no_prec = results[1]
fig = plt.figure(figsize=(8, 4))
ax = fig.add_subplot(111)
width = 0.2
ind = np.arange(len(solvers))
ax.axhline(results[0, 0], color=(0.3, 0.3, 0.3), ls=":", zorder=0)
rects_fastest = ax.bar(ind, fastest, width, color="green", label="fastest prec.")
ax.bar(width + ind, default, width, color=(0.3, 0.3, 0.3), label="default prec.")
ax.bar(2 * width + ind, no_prec, width, color=(0.8, 0.8, 0.8), label="no prec.")
ax.bar(3 * width + ind, slowest, width, color="red", label="slowest prec.")
# annotate fastest runs with name of preconditioner
fastest_ind = results.argmin(0)
for i, rect in enumerate(rects_fastest):
height = rect.get_height()
offset = offset if offset is not None else 1.05 * height
ax.text(rect.get_x() + rect.get_width() / 2.0, height + offset,
preconditioners[fastest_ind[i]],
ha='center', va='bottom', rotation=90)
ax.set_xlabel("method")
ax.set_ylabel("time (ms)")
ax.set_ylim((0, ymax))
ax.legend()
ax.set_xticks(ind + 2 * width)
xtickNames = plt.setp(ax, xticklabels=solvers)
plt.setp(xtickNames, rotation=0)
return fig
if __name__ == "__main__":
ms = 1e3
solvers = [s[0] for s in df.krylov_solver_methods()]
preconditioners = [p[0] for p in df.krylov_solver_preconditioners()]
ymax = [[6, 6], [6, 10]]
for i, system in enumerate(["ball", "film"]):
for j, potential in enumerate(["1", "2"]):
results = ms * np.ma.load(system + "_" + potential + ".pickle")
with open(system + "_" + potential + ".txt", "w") as f:
f.write("& {} \\\\\n".format(" & ".join(solvers)))
f.write("\\hline\n")
for pi, p in enumerate(preconditioners):
numbers = ["{:.3}".format(r) for r in results[pi]]
f.write("{} & {} \\\\\n".format(p, " & ".join(numbers)))
fig = column_chart(results, solvers, preconditioners, offset=0.2, ymax=ymax[i][j])
plt.savefig(system + "_" + potential + ".png")
plt.close()
| [
"matplotlib.pyplot.setp",
"matplotlib.pyplot.savefig",
"numpy.ma.load",
"dolfin.krylov_solver_methods",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"dolfin.krylov_solver_preconditioners"
] | [((266, 292), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (276, 292), True, 'import matplotlib.pyplot as plt\n'), ((1351, 1384), 'matplotlib.pyplot.setp', 'plt.setp', (['ax'], {'xticklabels': 'solvers'}), '(ax, xticklabels=solvers)\n', (1359, 1384), True, 'import matplotlib.pyplot as plt\n'), ((1389, 1421), 'matplotlib.pyplot.setp', 'plt.setp', (['xtickNames'], {'rotation': '(0)'}), '(xtickNames, rotation=0)\n', (1397, 1421), True, 'import matplotlib.pyplot as plt\n'), ((1507, 1533), 'dolfin.krylov_solver_methods', 'df.krylov_solver_methods', ([], {}), '()\n', (1531, 1533), True, 'import dolfin as df\n'), ((1572, 1606), 'dolfin.krylov_solver_preconditioners', 'df.krylov_solver_preconditioners', ([], {}), '()\n', (1604, 1606), True, 'import dolfin as df\n'), ((2301, 2347), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(system + '_' + potential + '.png')"], {}), "(system + '_' + potential + '.png')\n", (2312, 2347), True, 'import matplotlib.pyplot as plt\n'), ((2360, 2371), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2369, 2371), True, 'import matplotlib.pyplot as plt\n'), ((1766, 1814), 'numpy.ma.load', 'np.ma.load', (["(system + '_' + potential + '.pickle')"], {}), "(system + '_' + potential + '.pickle')\n", (1776, 1814), True, 'import numpy as np\n')] |
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_in
from nose.tools import assert_raises
from nose.tools import assert_true
import networkx as nx
import numpy as np
import dagology as dag
class TestCausalSet(object):
""" Unit tests for the causal set model"""
def test_number_of_nodes(self):
N = 80
D = 2
R = np.random.random((N, D)) # probably shouldn't test stochastically...
for p in [0.0, 0.1, 0.9, 1.0]:
G = dag.causal_set_graph(R, p)
assert_equal(G.number_of_nodes(), N)
def test_number_of_edges(self):
N = 80
D = 2
R = np.random.random((N, D))
G = dag.causal_set_graph(R, 0.)
assert_true(G.number_of_edges() == 0)
def periodicity(self):
R = np.array([[0., 0.],
[1., -0.8],
[1., 0.8],
[2., 1.6]])
G_boundary = dag.causal_set_graph(R)
G_periodic = dag.causal_set_graph(R, period=2.)
assert_equal(G_boundary.number_of_edges(), 4)
assert_equal(G_periodic.number_of_edgeS(), 5)
class TestMinkowskiInterval(object):
""" Unit tests for minkowski_interval"""
def test_shape(self):
N = 120
D = 3
R = dag.minkowski_interval(N, D)
assert_equal(R.shape, (N,D))
def test_fix_ends_true(self):
N = 100
D = 2
R = dag.minkowski_interval(N, D, fix_ends=True)
assert_equal(R[0, 0], 0.)
assert_equal(R[0, 1], 0.5)
assert_equal(R[1, 0], 1.)
assert_equal(R[1, 1], 0.5)
def test_fix_ends_false(self):
N = 100
D = 2
R = dag.minkowski_interval(N, D, fix_ends=False)
assert_true(0. < R[0, 0] < 1.)
assert_true(R[0, 1] != 0.5)
assert_true(0. < R[0, 0] < 1.)
assert_true(R[0, 1] != 0.5)
class TestDeSitterInterval(object):
""" Unit tests for de_sitter_interval"""
def test_shape(self):
N = 120
D = 3
R = dag.de_sitter_interval(N, D, 0.1)
assert_equal(R.shape, (N,D))
def test_fix_ends_false(self):
N = 100
D = 2
R = dag.de_sitter_interval(N, D, 0.5, fix_ends=False)
assert_true(0. < R[0, 0] < 1.)
assert_true(R[0, 1] != 0.5)
assert_true(0. < R[0, 0] < 1.)
assert_true(R[0, 1] != 0.5)
def test_fix_ends_true(self):
N = 100
D = 2
R = dag.de_sitter_interval(N, D, 0.5, fix_ends=True)
assert_equal(R[0, 0], 0.)
assert_equal(R[0, 1], 0.)
assert_equal(R[1, 0], 1.)
assert_equal(R[1, 1], 0.)
| [
"nose.tools.assert_equal",
"numpy.random.random",
"nose.tools.assert_true",
"numpy.array",
"dagology.minkowski_interval",
"dagology.causal_set_graph",
"dagology.de_sitter_interval"
] | [((397, 421), 'numpy.random.random', 'np.random.random', (['(N, D)'], {}), '((N, D))\n', (413, 421), True, 'import numpy as np\n'), ((675, 699), 'numpy.random.random', 'np.random.random', (['(N, D)'], {}), '((N, D))\n', (691, 699), True, 'import numpy as np\n'), ((712, 740), 'dagology.causal_set_graph', 'dag.causal_set_graph', (['R', '(0.0)'], {}), '(R, 0.0)\n', (732, 740), True, 'import dagology as dag\n'), ((826, 885), 'numpy.array', 'np.array', (['[[0.0, 0.0], [1.0, -0.8], [1.0, 0.8], [2.0, 1.6]]'], {}), '([[0.0, 0.0], [1.0, -0.8], [1.0, 0.8], [2.0, 1.6]])\n', (834, 885), True, 'import numpy as np\n'), ((968, 991), 'dagology.causal_set_graph', 'dag.causal_set_graph', (['R'], {}), '(R)\n', (988, 991), True, 'import dagology as dag\n'), ((1013, 1048), 'dagology.causal_set_graph', 'dag.causal_set_graph', (['R'], {'period': '(2.0)'}), '(R, period=2.0)\n', (1033, 1048), True, 'import dagology as dag\n'), ((1307, 1335), 'dagology.minkowski_interval', 'dag.minkowski_interval', (['N', 'D'], {}), '(N, D)\n', (1329, 1335), True, 'import dagology as dag\n'), ((1344, 1373), 'nose.tools.assert_equal', 'assert_equal', (['R.shape', '(N, D)'], {}), '(R.shape, (N, D))\n', (1356, 1373), False, 'from nose.tools import assert_equal\n'), ((1450, 1493), 'dagology.minkowski_interval', 'dag.minkowski_interval', (['N', 'D'], {'fix_ends': '(True)'}), '(N, D, fix_ends=True)\n', (1472, 1493), True, 'import dagology as dag\n'), ((1502, 1528), 'nose.tools.assert_equal', 'assert_equal', (['R[0, 0]', '(0.0)'], {}), '(R[0, 0], 0.0)\n', (1514, 1528), False, 'from nose.tools import assert_equal\n'), ((1536, 1562), 'nose.tools.assert_equal', 'assert_equal', (['R[0, 1]', '(0.5)'], {}), '(R[0, 1], 0.5)\n', (1548, 1562), False, 'from nose.tools import assert_equal\n'), ((1571, 1597), 'nose.tools.assert_equal', 'assert_equal', (['R[1, 0]', '(1.0)'], {}), '(R[1, 0], 1.0)\n', (1583, 1597), False, 'from nose.tools import assert_equal\n'), ((1605, 1631), 'nose.tools.assert_equal', 'assert_equal', (['R[1, 1]', '(0.5)'], {}), '(R[1, 1], 0.5)\n', (1617, 1631), False, 'from nose.tools import assert_equal\n'), ((1710, 1754), 'dagology.minkowski_interval', 'dag.minkowski_interval', (['N', 'D'], {'fix_ends': '(False)'}), '(N, D, fix_ends=False)\n', (1732, 1754), True, 'import dagology as dag\n'), ((1763, 1795), 'nose.tools.assert_true', 'assert_true', (['(0.0 < R[0, 0] < 1.0)'], {}), '(0.0 < R[0, 0] < 1.0)\n', (1774, 1795), False, 'from nose.tools import assert_true\n'), ((1802, 1829), 'nose.tools.assert_true', 'assert_true', (['(R[0, 1] != 0.5)'], {}), '(R[0, 1] != 0.5)\n', (1813, 1829), False, 'from nose.tools import assert_true\n'), ((1838, 1870), 'nose.tools.assert_true', 'assert_true', (['(0.0 < R[0, 0] < 1.0)'], {}), '(0.0 < R[0, 0] < 1.0)\n', (1849, 1870), False, 'from nose.tools import assert_true\n'), ((1877, 1904), 'nose.tools.assert_true', 'assert_true', (['(R[0, 1] != 0.5)'], {}), '(R[0, 1] != 0.5)\n', (1888, 1904), False, 'from nose.tools import assert_true\n'), ((2055, 2088), 'dagology.de_sitter_interval', 'dag.de_sitter_interval', (['N', 'D', '(0.1)'], {}), '(N, D, 0.1)\n', (2077, 2088), True, 'import dagology as dag\n'), ((2097, 2126), 'nose.tools.assert_equal', 'assert_equal', (['R.shape', '(N, D)'], {}), '(R.shape, (N, D))\n', (2109, 2126), False, 'from nose.tools import assert_equal\n'), ((2204, 2253), 'dagology.de_sitter_interval', 'dag.de_sitter_interval', (['N', 'D', '(0.5)'], {'fix_ends': '(False)'}), '(N, D, 0.5, fix_ends=False)\n', (2226, 2253), True, 'import dagology as dag\n'), ((2262, 2294), 'nose.tools.assert_true', 'assert_true', (['(0.0 < R[0, 0] < 1.0)'], {}), '(0.0 < R[0, 0] < 1.0)\n', (2273, 2294), False, 'from nose.tools import assert_true\n'), ((2301, 2328), 'nose.tools.assert_true', 'assert_true', (['(R[0, 1] != 0.5)'], {}), '(R[0, 1] != 0.5)\n', (2312, 2328), False, 'from nose.tools import assert_true\n'), ((2337, 2369), 'nose.tools.assert_true', 'assert_true', (['(0.0 < R[0, 0] < 1.0)'], {}), '(0.0 < R[0, 0] < 1.0)\n', (2348, 2369), False, 'from nose.tools import assert_true\n'), ((2376, 2403), 'nose.tools.assert_true', 'assert_true', (['(R[0, 1] != 0.5)'], {}), '(R[0, 1] != 0.5)\n', (2387, 2403), False, 'from nose.tools import assert_true\n'), ((2481, 2529), 'dagology.de_sitter_interval', 'dag.de_sitter_interval', (['N', 'D', '(0.5)'], {'fix_ends': '(True)'}), '(N, D, 0.5, fix_ends=True)\n', (2503, 2529), True, 'import dagology as dag\n'), ((2538, 2564), 'nose.tools.assert_equal', 'assert_equal', (['R[0, 0]', '(0.0)'], {}), '(R[0, 0], 0.0)\n', (2550, 2564), False, 'from nose.tools import assert_equal\n'), ((2572, 2598), 'nose.tools.assert_equal', 'assert_equal', (['R[0, 1]', '(0.0)'], {}), '(R[0, 1], 0.0)\n', (2584, 2598), False, 'from nose.tools import assert_equal\n'), ((2606, 2632), 'nose.tools.assert_equal', 'assert_equal', (['R[1, 0]', '(1.0)'], {}), '(R[1, 0], 1.0)\n', (2618, 2632), False, 'from nose.tools import assert_equal\n'), ((2640, 2666), 'nose.tools.assert_equal', 'assert_equal', (['R[1, 1]', '(0.0)'], {}), '(R[1, 1], 0.0)\n', (2652, 2666), False, 'from nose.tools import assert_equal\n'), ((521, 547), 'dagology.causal_set_graph', 'dag.causal_set_graph', (['R', 'p'], {}), '(R, p)\n', (541, 547), True, 'import dagology as dag\n')] |
import numpy as np
import time
import matplotlib.pyplot as plt
import visdom
# find the minimum of y
# y = x^2 + x + 1
# y' = 2x + 1
# y'' = 2
x0 = 91.91
g = 2*x0 + 1 # f'(x0)
gg = 2 # f''(x0)
while abs(g) > 0.001:
# update x0
x0 = -g/gg + x0
# update one order partial
g = 2*x0 + 1
# update two order partial, will, in this case it is constant
gg = 2
print(g)
y = x0**2 + x0 + 1
print('\nx_final is {0}, and min_y is {1}'.format(x0, y))
# 2-Dim Newton
# z = x^2 + y^2 + x + y + xy + 1
# partial x = 2*x + y + 1
# partial y = 2*y + x + 1
x0 = [1, 1] # nit point
p_x = 2*x0[0] + x0[1] + 1
p_y = 2*x0[1] + x0[0] + 1
p_xx = 2
p_yy = 2
p_xy = 1
p_yx = 1
g0 = [p_x, p_y]
g0_norm = np.linalg.norm(g0)
H0 = np.array([[p_xx, p_xy],
[p_yx, p_yy]
])
print('H0:\n', H0)
# pts = [x0]
pts = []
z = []
while g0_norm > 0.01:
# print(g0_norm)
x0 = x0 - 0.1 * np.dot(np.linalg.inv(H0), g0) # update x0
# print('x0:', x0)
pts.append(x0)
# update g0 sprct partail x/y
p_x = 2 * x0[0] + x0[1] + 1
p_y = 2 * x0[1] + x0[0] + 1
g0 = [p_x, p_y]
g0_norm = np.linalg.norm(g0) # don't forget to update end_condition!
print('g0: ', g0)
# this case, H is constant!
# check y
y = x0[0]**2 + x0[1]**2 + x0[0] + x0[1] + x0[0]*x0[1] + 1
print('y: ', y)
z.append(z)
time.sleep(0.01) # fastidium delay...
# pts = np.array(pts).T
# # print(x0.shape)
# print('pts:', pts)
# plt.scatter(pts[0], pts[1], marker='*')
# plt.show()
| [
"numpy.array",
"numpy.linalg.inv",
"time.sleep",
"numpy.linalg.norm"
] | [((722, 740), 'numpy.linalg.norm', 'np.linalg.norm', (['g0'], {}), '(g0)\n', (736, 740), True, 'import numpy as np\n'), ((747, 785), 'numpy.array', 'np.array', (['[[p_xx, p_xy], [p_yx, p_yy]]'], {}), '([[p_xx, p_xy], [p_yx, p_yy]])\n', (755, 785), True, 'import numpy as np\n'), ((1150, 1168), 'numpy.linalg.norm', 'np.linalg.norm', (['g0'], {}), '(g0)\n', (1164, 1168), True, 'import numpy as np\n'), ((1382, 1398), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1392, 1398), False, 'import time\n'), ((939, 956), 'numpy.linalg.inv', 'np.linalg.inv', (['H0'], {}), '(H0)\n', (952, 956), True, 'import numpy as np\n')] |
import sys
import unittest
from itertools import product
import numpy as np
import torch
from metal.label_model.class_balance import ClassBalanceModel
sys.path.append("../synthetic")
class ClassBalanceModelTest(unittest.TestCase):
def _set_seed(self, seed):
torch.manual_seed(seed)
np.random.seed(seed)
def _generate_class_balance(self, k):
"""Generate class balance"""
p_Y = np.random.random(k)
p_Y /= p_Y.sum()
return p_Y
def _generate_cond_probs(self, k, m, bias_diag=True, abstains=False):
"""Generate conditional probability tables for the m conditionally ind.
LFs, such that:
cpts[i, y1, y2] = P(\lambda_i = y1 | Y = y2)
Args:
k: (int) Number of classes
m: (int) Number of LFs
bias_diag: (bool) If True, adds a bias (proportional to (k-1)) to
the diagonal of the randomly generated conditional probability
tables, to enforce assumption that LFs are better than random
abstains: (bool) Incorporate abstains
Outputs:
C: (np.array) An (m, k, k) tensor, if abstains=False; or, if
abstains=True, (m, k+1, k)
"""
cpts = []
k_lf = k + 1 if abstains else k
for i in range(m):
a = np.random.random((k_lf, k))
if bias_diag:
if abstains:
a[1:, :] += (k - 1) * np.eye(k)
else:
a += (k - 1) * np.eye(k)
cpts.append(a @ np.diag(1 / a.sum(axis=0)))
return np.array(cpts)
def _generate_L(self, p_Y, C, n, abstains=False):
"""Generate a label matrix L, with entries in {0,1,...,k} if
abstains=True, else in {1,...,k}, given the true class balance, p_Y, and
a conditional probabilities table C of m cond. ind. LFs"""
k = len(p_Y)
m = C.shape[0]
# Generate true data labels for n data points
Y = np.random.choice(range(1, k + 1), n, p=p_Y)
# Generate label matrix L with entries in {0,1,...,k} if abstains=True,
# else in {1,...,k}
lf_0 = 0 if abstains else 1
L = np.zeros((n, m))
for i, y in enumerate(Y):
for j in range(m):
L[i, j] = np.random.choice(range(lf_0, k + 1), p=C[j, :, y - 1])
return L
def _test_model(self, model, p_Y, C, O=None, L=None, tol=1e-3, verbose=True):
model.train_model(O=O, L=L)
if verbose:
print(f"True class balance: {p_Y}")
print(f"Estimated class balance: {model.class_balance}")
self.assertLess(np.mean(np.abs(p_Y - model.class_balance)), tol)
self.assertLess(np.mean(np.abs(C - model.cond_probs)), tol)
def _test_class_balance_estimation(self, k, m, abstains=False, verbose=True):
model = ClassBalanceModel(k, abstains=abstains)
p_Y = self._generate_class_balance(k)
C = self._generate_cond_probs(k, m, bias_diag=True, abstains=abstains)
# Compute O; mask out diagonal entries
mask = model.get_mask(m)
O = np.einsum("aby,cdy,efy,y->acebdf", C, C, C, p_Y)
O = torch.from_numpy(O).float()
O[1 - mask] = 0
# Test recovery of the class balance
self._test_model(model, p_Y, C, O=O)
def _test_class_balance_estimation_noisy(
self, k, m, n, abstains=False, verbose=True
):
model = ClassBalanceModel(k, abstains=abstains)
p_Y = self._generate_class_balance(k)
C = self._generate_cond_probs(k, m, bias_diag=True, abstains=abstains)
# Generate label matrix L
L = self._generate_L(p_Y, C, n, abstains=abstains)
# Test recovery of the class balance
self._test_model(model, p_Y, C, L=L, tol=1e-2)
def test_class_balance_estimation_2(self):
self._set_seed(123)
self._test_class_balance_estimation(2, 25)
def test_class_balance_estimation_3(self):
self._set_seed(123)
self._test_class_balance_estimation(3, 25)
# Note: This should pass! However, commented out because too slow...
# def test_class_balance_estimation_5(self):
# self._set_seed(123)
# self._test_class_balance_estimation(5, 25)
def test_class_balance_estimation_2_abstains(self):
self._set_seed(123)
self._test_class_balance_estimation(2, 25, abstains=True)
def test_class_balance_estimation_2_noisy(self):
self._set_seed(123)
self._test_class_balance_estimation_noisy(2, 25, 10000, abstains=True)
if __name__ == "__main__":
unittest.main()
| [
"torch.manual_seed",
"numpy.abs",
"metal.label_model.class_balance.ClassBalanceModel",
"numpy.eye",
"numpy.random.random",
"torch.from_numpy",
"numpy.array",
"numpy.zeros",
"numpy.einsum",
"numpy.random.seed",
"unittest.main",
"sys.path.append"
] | [((154, 185), 'sys.path.append', 'sys.path.append', (['"""../synthetic"""'], {}), "('../synthetic')\n", (169, 185), False, 'import sys\n'), ((4640, 4655), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4653, 4655), False, 'import unittest\n'), ((275, 298), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (292, 298), False, 'import torch\n'), ((307, 327), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (321, 327), True, 'import numpy as np\n'), ((422, 441), 'numpy.random.random', 'np.random.random', (['k'], {}), '(k)\n', (438, 441), True, 'import numpy as np\n'), ((1616, 1630), 'numpy.array', 'np.array', (['cpts'], {}), '(cpts)\n', (1624, 1630), True, 'import numpy as np\n'), ((2215, 2231), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (2223, 2231), True, 'import numpy as np\n'), ((2891, 2930), 'metal.label_model.class_balance.ClassBalanceModel', 'ClassBalanceModel', (['k'], {'abstains': 'abstains'}), '(k, abstains=abstains)\n', (2908, 2930), False, 'from metal.label_model.class_balance import ClassBalanceModel\n'), ((3149, 3197), 'numpy.einsum', 'np.einsum', (['"""aby,cdy,efy,y->acebdf"""', 'C', 'C', 'C', 'p_Y'], {}), "('aby,cdy,efy,y->acebdf', C, C, C, p_Y)\n", (3158, 3197), True, 'import numpy as np\n'), ((3475, 3514), 'metal.label_model.class_balance.ClassBalanceModel', 'ClassBalanceModel', (['k'], {'abstains': 'abstains'}), '(k, abstains=abstains)\n', (3492, 3514), False, 'from metal.label_model.class_balance import ClassBalanceModel\n'), ((1343, 1370), 'numpy.random.random', 'np.random.random', (['(k_lf, k)'], {}), '((k_lf, k))\n', (1359, 1370), True, 'import numpy as np\n'), ((2683, 2716), 'numpy.abs', 'np.abs', (['(p_Y - model.class_balance)'], {}), '(p_Y - model.class_balance)\n', (2689, 2716), True, 'import numpy as np\n'), ((2756, 2784), 'numpy.abs', 'np.abs', (['(C - model.cond_probs)'], {}), '(C - model.cond_probs)\n', (2762, 2784), True, 'import numpy as np\n'), ((3210, 3229), 'torch.from_numpy', 'torch.from_numpy', (['O'], {}), '(O)\n', (3226, 3229), False, 'import torch\n'), ((1468, 1477), 'numpy.eye', 'np.eye', (['k'], {}), '(k)\n', (1474, 1477), True, 'import numpy as np\n'), ((1535, 1544), 'numpy.eye', 'np.eye', (['k'], {}), '(k)\n', (1541, 1544), True, 'import numpy as np\n')] |
import torch
import numpy as np
import pandas as pd
import os
import re
import seaborn as sns
import json
import itertools
import pandas as pd
import torch
import pandas_market_calendars as mcal
import datetime
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
class StockDataset(Dataset):
"""Price dataset"""
def __init__(self, company_to_price_df, company_to_tweets, date_universe, n_days, n_stocks, max_tweets):
# Initialize class members
self.n_stocks = n_stocks
self.n_days = n_days
self.max_tweets = max_tweets
self.window = 6
window = self.window
# Build maps
self.company_to_index = {c:i for i,c in enumerate(sorted(list(company_to_tweets.keys())))}
self.date_to_index = {d:i for i,d in enumerate(date_universe)}
self.index_to_date = {i:d for i,d in enumerate(date_universe)}
# Store data
self.company_to_price_df = company_to_price_df
self.company_to_tweets = company_to_tweets
# Get price data tensor: n_stocks, n_days, 3
self.price_data = np.zeros((n_stocks, n_days, 3))
for company in company_to_price_df.keys():
df = company_to_price_df[company]
df.reset_index(inplace=True, drop=True)
# Look up specific rows in DF
for index, row in df.iterrows():
# Grab row with particular date
if index != 0:
d_index = self.date_to_index[row['date']]
c_index = self.company_to_index[company]
self.price_data[c_index, d_index, 0] = row['high'] / prev_close
self.price_data[c_index, d_index, 1] = row['low'] / prev_close
self.price_data[c_index, d_index, 2] = row['close'] / prev_close
prev_close = row['close']
# Which stocks are usable for these dates, shape n_days n_stocks
self.usable_stocks = torch.ones((self.n_days-7, self.n_stocks))
# Labels of shape n_days, n_stocks
self.labels = torch.zeros((self.n_days-7, self.n_stocks))
# Get labels
for i in range(self.n_days-7):
# Day after (for label)
day_after = self.index_to_date[i + window + 1]
# Current day
current_day = self.index_to_date[i + window]
for company in self.company_to_price_df.keys():
df = self.company_to_price_df[company]
# Grab row with particular date
post_row = df.loc[df['date'] == day_after]
row = df.loc[df['date'] == current_day]
c_index = self.company_to_index[company]
if (len(post_row['close']) > 0) and (len(row['close']) > 0):
close = np.zeros((1))
close[0] = post_row['close']
close[0] /= row['close']
if close >= 1.0055:
self.labels[i, c_index] = 1
elif close <= 0.995:
self.labels[i, c_index] = 0
else:
self.usable_stocks[i, c_index] = 0
else:
self.usable_stocks[i, c_index] = 0
def __len__(self):
return self.n_days-7
def __getitem__(self, idx):
"""
gets a price tensor of shape (n_stocks, 6, 3)
gets a smi tensor of shape (n_stocks, 6, K, 512)
"""
if torch.is_tensor(idx):
idx = idx.tolist()
# Size of sliding window
window = self.window
# Current day's usable stocks from price filter
usable_stocks = self.usable_stocks[idx]
# Labels from price day
labels = self.labels[idx]
# Dates that we need to look up
dates_range = [self.index_to_date[i] for i in range(idx + 1, idx + window + 1)]
# Day after (for label)
day_after = self.index_to_date[idx + window + 1]
# Current day
current_day = self.index_to_date[idx + window]
# Get price data tensor: n_stocks, window, 3
price_data = self.price_data[:, idx+1:idx+window+1, :]
# Extract tweets for specific window
smi_data = np.zeros((self.n_stocks, window, self.max_tweets, 512))
tweet_counts = np.zeros((self.n_stocks, window))
for company in self.company_to_tweets.keys():
# Look up tweets from specific days
for date_idx, date in enumerate(dates_range):
n_tweets = 0
tweets = []
c_index = self.company_to_index[company]
if date in self.company_to_tweets[company]:
n_tweets = len(self.company_to_tweets[company][date])
tweets = [self.company_to_tweets[company][date][k]['embedding'] for k in range(n_tweets)]
else:
usable_stocks[c_index] = 0
tweet_counts[c_index, date_idx] = n_tweets
if n_tweets == 0:
usable_stocks[c_index] = 0
for i,embedding in enumerate(tweets):
#stocks, day, lags, tweet, embedding
smi_data[c_index, date_idx, i, :] = embedding[:]
usable_stocks = (usable_stocks == 1)
m_mask = torch.zeros(6, self.n_stocks, self.max_tweets, 1)
for t in range(6):
for i in range(self.n_stocks):
m_mask[t, i, 0:int(round(tweet_counts[i][t])), 0] = 1
price_output = price_data[usable_stocks,:,:]
smi_output = smi_data[usable_stocks,:,:,:]
tweet_count = tweet_counts[usable_stocks,:]
m_mask = m_mask[:,usable_stocks,:,:]
labels = labels[usable_stocks]
# construct output
return price_output, smi_output, tweet_count, usable_stocks, labels, m_mask
| [
"torch.is_tensor",
"numpy.zeros",
"torch.zeros",
"torch.ones"
] | [((1113, 1144), 'numpy.zeros', 'np.zeros', (['(n_stocks, n_days, 3)'], {}), '((n_stocks, n_days, 3))\n', (1121, 1144), True, 'import numpy as np\n'), ((1996, 2040), 'torch.ones', 'torch.ones', (['(self.n_days - 7, self.n_stocks)'], {}), '((self.n_days - 7, self.n_stocks))\n', (2006, 2040), False, 'import torch\n'), ((2109, 2154), 'torch.zeros', 'torch.zeros', (['(self.n_days - 7, self.n_stocks)'], {}), '((self.n_days - 7, self.n_stocks))\n', (2120, 2154), False, 'import torch\n'), ((3530, 3550), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (3545, 3550), False, 'import torch\n'), ((4321, 4376), 'numpy.zeros', 'np.zeros', (['(self.n_stocks, window, self.max_tweets, 512)'], {}), '((self.n_stocks, window, self.max_tweets, 512))\n', (4329, 4376), True, 'import numpy as np\n'), ((4400, 4433), 'numpy.zeros', 'np.zeros', (['(self.n_stocks, window)'], {}), '((self.n_stocks, window))\n', (4408, 4433), True, 'import numpy as np\n'), ((5407, 5456), 'torch.zeros', 'torch.zeros', (['(6)', 'self.n_stocks', 'self.max_tweets', '(1)'], {}), '(6, self.n_stocks, self.max_tweets, 1)\n', (5418, 5456), False, 'import torch\n'), ((2843, 2854), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (2851, 2854), True, 'import numpy as np\n')] |
import os
import numpy as np
import matplotlib.pyplot as plt
def func(external_proc_num, internal_proc_num, Matrix):
for e in range(0, external_proc_num):
for i in range(0, internal_proc_num):
cmd = './heatmap.out ' + str(e) + ' ' + str(i)
# print(cmd, sep = '\n')
so = os.popen(cmd).read()
# print(so)
Matrix[e][i] = so
def draw(Matrix):
plt.imshow(Matrix, cmap='hot', interpolation='nearest')
plt.show()
if __name__ == "__main__":
ex_proc_num = 4
in_proc_num = 16
Matrix = np.zeros((ex_proc_num, in_proc_num))
func(ex_proc_num, in_proc_num, Matrix)
print(Matrix)
draw(Matrix)
| [
"matplotlib.pyplot.imshow",
"numpy.zeros",
"os.popen",
"matplotlib.pyplot.show"
] | [((360, 415), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Matrix'], {'cmap': '"""hot"""', 'interpolation': '"""nearest"""'}), "(Matrix, cmap='hot', interpolation='nearest')\n", (370, 415), True, 'import matplotlib.pyplot as plt\n'), ((417, 427), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (425, 427), True, 'import matplotlib.pyplot as plt\n'), ((501, 537), 'numpy.zeros', 'np.zeros', (['(ex_proc_num, in_proc_num)'], {}), '((ex_proc_num, in_proc_num))\n', (509, 537), True, 'import numpy as np\n'), ((284, 297), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (292, 297), False, 'import os\n')] |
"""
Copyright 2013 <NAME>
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.atoms.atom import Atom
import scipy.sparse as sp
from numpy import linalg as LA
import numpy as np
class sigma_max(Atom):
""" Maximum singular value. """
_allow_complex = True
def __init__(self, A):
super(sigma_max, self).__init__(A)
@Atom.numpy_numeric
def numeric(self, values):
"""Returns the largest singular value of A.
"""
return LA.norm(values[0], 2)
def _grad(self, values):
"""Gives the (sub/super)gradient of the atom w.r.t. each argument.
Matrix expressions are vectorized, so the gradient is a matrix.
Args:
values: A list of numeric values for the arguments.
Returns:
A list of SciPy CSC sparse matrices or None.
"""
# Grad: U diag(e_1) V.T
U, s, V = LA.svd(values[0])
ds = np.zeros(len(s))
ds[0] = 1
D = U.dot(np.diag(ds)).dot(V)
return [sp.csc_matrix(D.ravel(order='F')).T]
def shape_from_args(self):
"""Returns the (row, col) shape of the expression.
"""
return tuple()
def sign_from_args(self):
"""Returns sign (is positive, is negative) of the expression.
"""
# Always positive.
return (True, False)
def is_atom_convex(self):
"""Is the atom convex?
"""
return True
def is_atom_concave(self):
"""Is the atom concave?
"""
return False
def is_incr(self, idx):
"""Is the composition non-decreasing in argument idx?
"""
return False
def is_decr(self, idx):
"""Is the composition non-increasing in argument idx?
"""
return False
| [
"numpy.linalg.svd",
"numpy.diag",
"numpy.linalg.norm"
] | [((1055, 1076), 'numpy.linalg.norm', 'LA.norm', (['values[0]', '(2)'], {}), '(values[0], 2)\n', (1062, 1076), True, 'from numpy import linalg as LA\n'), ((1471, 1488), 'numpy.linalg.svd', 'LA.svd', (['values[0]'], {}), '(values[0])\n', (1477, 1488), True, 'from numpy import linalg as LA\n'), ((1555, 1566), 'numpy.diag', 'np.diag', (['ds'], {}), '(ds)\n', (1562, 1566), True, 'import numpy as np\n')] |
import numpy as np
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from sklearn.model_selection import KFold
from sklearn.feature_selection import SelectPercentile, mutual_info_classif
import sys
from pathlib import Path
sys.path[0] = str(Path(sys.path[0]).parent)
from metrics import metrics, meanMetrics, stdMetrics, printMetrics
import time
def select_features(classifier, n_features, fwd, fltg):
sfs = SFS(classifier,
k_features=n_features,
forward=fwd,
floating=fltg,
verbose=1,
scoring='accuracy',
cv=10,
n_jobs=-1)
return sfs
def select_features_number(classifier, number_features, fwd, fltg, X, Y):
tiempo_i = time.time()
Errores = np.ones(10)
Metrics = np.zeros((10,5))
j = 0
kf = KFold(n_splits=10)
clf = classifier
sf = select_features(clf, number_features, fwd, fltg)
sf = sf.fit(X, Y)
X_sf = sf.transform(X)
for train_index, test_index in kf.split(X_sf):
X_train, X_test = X_sf[train_index], X_sf[test_index]
y_train, y_test = Y[train_index], Y[test_index]
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
Errores[j] = 1-metrics(y_test,y_pred)[0]
Metrics[j,:] = metrics(y_test, y_pred)
j+=1
print("\nError de validación aplicando SFS: "+str(np.mean(Errores))+"+/-"+str(np.std(Errores)))
print("\nEficiencia en validación aplicando SFS: "+str((1-np.mean(Errores))*100)+"%")
print("\nTiempo total de ejecución: "+str(time.time()-tiempo_i)+" segundos.")
MetricsMean = meanMetrics(Metrics)
MetricsStd = stdMetrics(Metrics)
printMetrics(MetricsMean)
print("\nDesviaciones Estandard")
printMetrics(MetricsStd)
return sf
def select_features_filter_percentage(classifier, percentage, X, Y):
tiempo_i = time.time()
Errores = np.ones(10)
Metrics = np.zeros((10,5))
j = 0
kf = KFold(n_splits=10)
filter_method = SelectPercentile(mutual_info_classif, percentile=percentage)
filter_method.fit(X,Y)
X_sf = filter_method.transform(X)
for train_index, test_index in kf.split(X_sf):
X_train, X_test = X_sf[train_index], X_sf[test_index]
y_train, y_test = Y[train_index], Y[test_index]
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
Metrics[j,:] = metrics(y_test, y_pred)
Errores[j] = 1-metrics(y_test,y_pred)[0]
j+=1
print("\nError de validación aplicando at "+str(percentage)+"%: "+str(np.mean(Errores))+"+/-"+str(np.std(Errores)))
print("\nEficiencia en validación aplicando at "+str(percentage)+"%: "+str((1-np.mean(Errores))*100)+"%")
print("\nTiempo total de ejecución: "+str(time.time()-tiempo_i)+" segundos.")
MetricsMean = meanMetrics(Metrics)
MetricsStd = stdMetrics(Metrics)
printMetrics(MetricsMean)
print("\nDesviaciones Estandard")
printMetrics(MetricsStd)
return filter_method
| [
"numpy.mean",
"numpy.ones",
"metrics.meanMetrics",
"pathlib.Path",
"metrics.stdMetrics",
"mlxtend.feature_selection.SequentialFeatureSelector",
"numpy.zeros",
"metrics.printMetrics",
"sklearn.feature_selection.SelectPercentile",
"numpy.std",
"metrics.metrics",
"sklearn.model_selection.KFold",
... | [((438, 558), 'mlxtend.feature_selection.SequentialFeatureSelector', 'SFS', (['classifier'], {'k_features': 'n_features', 'forward': 'fwd', 'floating': 'fltg', 'verbose': '(1)', 'scoring': '"""accuracy"""', 'cv': '(10)', 'n_jobs': '(-1)'}), "(classifier, k_features=n_features, forward=fwd, floating=fltg, verbose=\n 1, scoring='accuracy', cv=10, n_jobs=-1)\n", (441, 558), True, 'from mlxtend.feature_selection import SequentialFeatureSelector as SFS\n'), ((716, 727), 'time.time', 'time.time', ([], {}), '()\n', (725, 727), False, 'import time\n'), ((743, 754), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (750, 754), True, 'import numpy as np\n'), ((769, 786), 'numpy.zeros', 'np.zeros', (['(10, 5)'], {}), '((10, 5))\n', (777, 786), True, 'import numpy as np\n'), ((805, 823), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)'}), '(n_splits=10)\n', (810, 823), False, 'from sklearn.model_selection import KFold\n'), ((1618, 1638), 'metrics.meanMetrics', 'meanMetrics', (['Metrics'], {}), '(Metrics)\n', (1629, 1638), False, 'from metrics import metrics, meanMetrics, stdMetrics, printMetrics\n'), ((1656, 1675), 'metrics.stdMetrics', 'stdMetrics', (['Metrics'], {}), '(Metrics)\n', (1666, 1675), False, 'from metrics import metrics, meanMetrics, stdMetrics, printMetrics\n'), ((1681, 1706), 'metrics.printMetrics', 'printMetrics', (['MetricsMean'], {}), '(MetricsMean)\n', (1693, 1706), False, 'from metrics import metrics, meanMetrics, stdMetrics, printMetrics\n'), ((1749, 1773), 'metrics.printMetrics', 'printMetrics', (['MetricsStd'], {}), '(MetricsStd)\n', (1761, 1773), False, 'from metrics import metrics, meanMetrics, stdMetrics, printMetrics\n'), ((1874, 1885), 'time.time', 'time.time', ([], {}), '()\n', (1883, 1885), False, 'import time\n'), ((1901, 1912), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (1908, 1912), True, 'import numpy as np\n'), ((1927, 1944), 'numpy.zeros', 'np.zeros', (['(10, 5)'], {}), '((10, 5))\n', (1935, 1944), True, 'import numpy as np\n'), ((1963, 1981), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)'}), '(n_splits=10)\n', (1968, 1981), False, 'from sklearn.model_selection import KFold\n'), ((2003, 2063), 'sklearn.feature_selection.SelectPercentile', 'SelectPercentile', (['mutual_info_classif'], {'percentile': 'percentage'}), '(mutual_info_classif, percentile=percentage)\n', (2019, 2063), False, 'from sklearn.feature_selection import SelectPercentile, mutual_info_classif\n'), ((2839, 2859), 'metrics.meanMetrics', 'meanMetrics', (['Metrics'], {}), '(Metrics)\n', (2850, 2859), False, 'from metrics import metrics, meanMetrics, stdMetrics, printMetrics\n'), ((2877, 2896), 'metrics.stdMetrics', 'stdMetrics', (['Metrics'], {}), '(Metrics)\n', (2887, 2896), False, 'from metrics import metrics, meanMetrics, stdMetrics, printMetrics\n'), ((2902, 2927), 'metrics.printMetrics', 'printMetrics', (['MetricsMean'], {}), '(MetricsMean)\n', (2914, 2927), False, 'from metrics import metrics, meanMetrics, stdMetrics, printMetrics\n'), ((2970, 2994), 'metrics.printMetrics', 'printMetrics', (['MetricsStd'], {}), '(MetricsStd)\n', (2982, 2994), False, 'from metrics import metrics, meanMetrics, stdMetrics, printMetrics\n'), ((263, 280), 'pathlib.Path', 'Path', (['sys.path[0]'], {}), '(sys.path[0])\n', (267, 280), False, 'from pathlib import Path\n'), ((1285, 1308), 'metrics.metrics', 'metrics', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1292, 1308), False, 'from metrics import metrics, meanMetrics, stdMetrics, printMetrics\n'), ((2412, 2435), 'metrics.metrics', 'metrics', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2419, 2435), False, 'from metrics import metrics, meanMetrics, stdMetrics, printMetrics\n'), ((1236, 1259), 'metrics.metrics', 'metrics', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1243, 1259), False, 'from metrics import metrics, meanMetrics, stdMetrics, printMetrics\n'), ((1406, 1421), 'numpy.std', 'np.std', (['Errores'], {}), '(Errores)\n', (1412, 1421), True, 'import numpy as np\n'), ((2459, 2482), 'metrics.metrics', 'metrics', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2466, 2482), False, 'from metrics import metrics, meanMetrics, stdMetrics, printMetrics\n'), ((2607, 2622), 'numpy.std', 'np.std', (['Errores'], {}), '(Errores)\n', (2613, 2622), True, 'import numpy as np\n'), ((1378, 1394), 'numpy.mean', 'np.mean', (['Errores'], {}), '(Errores)\n', (1385, 1394), True, 'import numpy as np\n'), ((1560, 1571), 'time.time', 'time.time', ([], {}), '()\n', (1569, 1571), False, 'import time\n'), ((2579, 2595), 'numpy.mean', 'np.mean', (['Errores'], {}), '(Errores)\n', (2586, 2595), True, 'import numpy as np\n'), ((2781, 2792), 'time.time', 'time.time', ([], {}), '()\n', (2790, 2792), False, 'import time\n'), ((1486, 1502), 'numpy.mean', 'np.mean', (['Errores'], {}), '(Errores)\n', (1493, 1502), True, 'import numpy as np\n'), ((2707, 2723), 'numpy.mean', 'np.mean', (['Errores'], {}), '(Errores)\n', (2714, 2723), True, 'import numpy as np\n')] |
import numpy as np
# Unit prefixes for val and lst
unitPrefixes = "kMGTPEZYyzafpnμm"
# Table chars
singleFrameChars = ['│', '─', '┼', '┌', '┐', '└', '┘', '├', '┬', '┤' ,'┴']
doubleFrameChars = ['║', '═', '╬', '╔', '╗', '╚', '╝', '╠', '╦', '╣', '╩']
tableChars = singleFrameChars
def sigval(val, err, fix_mul3=True, fix_exp=None, manual_digits=3):
"""
Converts a value and its error to the appropriate significant digits.
Moreover the exponent of the two values gets returned as one mutual exponent string respecting the chosen convention.
Parameters
----------
val: float
The value to convert
err: float
The uncertainty of val, must be non-negative
fix_mul3: bool
If True fixes the exponent to a multiple of 3
fix_exp: int
If specified fixes the exponent string to that value
manual_digits: int
If specified sets the digits of val and err, if they have to be set manually
Returns
-------
valstr: string
Formatted string of value respecting the uncertainty
errstr: string
Formatted string of the uncertainty
expstr: string
Formatted string of the exponent
"""
if not isinstance(val, float) and not isinstance(val, int):
raise TypeError('type of val must be a number.')
if not isinstance(err, float) and not isinstance(err, int):
raise TypeError('type of err must be a number.')
if not isinstance(fix_mul3, bool):
raise TypeError('type of fix_mul3 be bool.')
if not fix_exp is None and not isinstance(fix_exp, int):
raise TypeError('type of fix_exp must be int.')
if not isinstance(manual_digits, int):
raise TypeError('type of manual_digits must be int.')
if err < 0.0:
raise TypeError('err must be non-negative.')
if manual_digits < 0:
raise TypeError('manual_digits must be non-negative.')
def exp10(x):
if x == 0.0:
return 0
else:
return int(np.floor(np.log10(abs(x))))
def sig_digit(x, e):
return int(x // (10**e))
val_exp = exp10(val)
if fix_exp != None:
exp_fix = fix_exp
elif fix_mul3:
exp_fix = 3 * (val_exp // 3)
else:
exp_fix = val_exp
if err == 0.0:
val_mant = val * 10**-exp_fix
digits = manual_digits - (val_exp - exp_fix)
digits = max(digits, 0)
val_str = ('{:.' + str(digits) + 'f}').format(val_mant)
exp_str = '{:d}'.format(exp_fix)
return val_str, '', exp_str
err_exp = exp10(err)
err_sig_digit = sig_digit(err, err_exp)
two_digits = err_sig_digit == 1 or err_sig_digit == 2
if err > abs(val):
val_mant = val * 10**-exp_fix
err_mant = err * 10**-exp_fix
digits = val_exp - exp_fix + manual_digits
digits = max(digits, 0)
val_str = ('{:.' + str(digits) + 'f}').format(val_mant)
err_str = ('{:.' + str(digits) + 'f}').format(err_mant)
exp_str = '{:d}'.format(exp_fix)
return val_str, err_str, exp_str
else:
val_mant = val * 10**-exp_fix
err_mant = err * 10**-exp_fix
digits = (val_exp - err_exp) - (val_exp - exp_fix) + two_digits
digits = max(digits, 0)
val_str = ('{:.' + str(digits) + 'f}').format(val_mant)
err_str = ('{:.' + str(digits) + 'f}').format(err_mant)
exp_str = '{:d}'.format(exp_fix)
return val_str, err_str, exp_str
def val(val, err=0.0, syserr=0.0, name='', unit='', prefix=True, exp_to_fix=None):
"""
Returns the scientific format 'name = (val ± err)e+exp' of the given defective value.
Parameters
----------
val: float
Value to format
err: float
Uncertainty of the value
name: string
Name of the value
Returns
-------
valstr: string
'name = (val ± err)e+exp' with the appropriate significant digits
"""
if syserr != 0.0 and err == 0.0:
raise TypeError('If syserr is specified one must also specify err')
if err < 0.0:
raise TypeError('The Uncertainty must be greater than zero')
if abs(val) < err:
print('Warning: The Uncertainty is greater than the value itself.')
out = ''
if name != '':
out += name + ' = '
syserrstr = None
if syserr != 0.0:
if syserr > err:
valstr, syserrstr, expstr = sigval(val, syserr, unit != '' and prefix, exp_to_fix)
exp = int(expstr)
_, errstr, _ = sigval(val, err, True, exp)
else:
valstr, errstr, expstr = sigval(val, err, unit != '' and prefix, exp_to_fix)
exp = int(expstr)
_, syserrstr, _ = sigval(val, syserr, True, exp)
else:
valstr, errstr, expstr = sigval(val, err, unit != '' and prefix, exp_to_fix)
if err != 0.0 and (expstr[0] != '0' or unit != ''):
out += '('
out += valstr
if err != 0.0:
out += ' ± ' + errstr
if syserr != 0.0:
out += ' stat. ± ' + syserrstr + ' syst.'
if err != 0.0 and (expstr[0] != '0' or unit != ''):
out += ')'
if expstr[0] != '0':
exp = int(expstr)
if unit != '' and prefix and abs(exp) <= 3 * len(unitPrefixes) / 2:
p = exp // 3
if p > 0:
p -= 1
out += ' ' + unitPrefixes[p] + unit
else:
out += 'e' + expstr
if unit != '':
out += ' ' + unit
else:
out += ' ' + unit
return out
def lst(val, err=[], name='', unit='', prefix=True, exp_to_fix=None):
"""
Parameters
val: array of floats with length N
err: array of floats with length N, uncertainties of val
name: string, name of the list
----------
Returns
array of strings, format "val[i] ± err[i]" with significant digits
"""
# Use zeros in case of empty err
if (err == []):
err = [0.0 for i in range(len(val))]
# Use most frequent exponent (multiple of 3)
N = len(val)
lstExp = exp_to_fix
if exp_to_fix == None or prefix:
exps = np.zeros(N)
for i in range(N):
_, _, exps[i] = sigval(val[i], err[i], fix_mul3=True)
exps, counts = np.unique(exps, return_counts=True)
lstExp = int(exps[np.argmax(counts)])
# Determine maximal val and err lengths
valmaxlen = 0
errmaxlen = 0
for i in range(N):
tmp = sigval(val[i], err[i], fix_mul3=True, fix_exp=lstExp)
if (len(tmp[0]) > valmaxlen):
valmaxlen = len(tmp[0])
if (len(tmp[1]) > errmaxlen):
errmaxlen = len(tmp[1])
colWidth = valmaxlen + errmaxlen + 3 if errmaxlen > 0 else valmaxlen
# Create title, center title and write to out
out = []
title = ''
if name != '':
title += name
if unit != '' and lstExp != 0:
title += ' / '
if prefix:
p = lstExp // 3
uPrefix = ''
if p > 0:
uPrefix = unitPrefixes[p - 1]
elif p < 0:
uPrefix = unitPrefixes[p]
title += uPrefix + unit
else:
title += '(' + 'e' + str(lstExp) + ' ' + unit + ')'
elif unit != '':
title += ' / ' + unit
elif lstExp != 0:
title += ' / ' + 'e' + str(lstExp)
colWidth = max(colWidth, len(title))
adjust = (colWidth + len(title)) // 2
out.append(title.rjust(adjust))
# Write and adjust value error strings to out
for i in range(len(val)):
tmp = sigval(val[i], err[i], fix_mul3=True, fix_exp=lstExp)
entry = tmp[0].rjust(valmaxlen)
if (tmp[1] != ''):
entry += ' ± ' + tmp[1].ljust(errmaxlen)
elif (errmaxlen != 0):
entry += ''.ljust(errmaxlen + 3)
adjust = (colWidth + len(entry)) // 2
out.append(entry.rjust(adjust))
return out
def tbl(lists, name='', endl=True):
"""
Parameters
lists: array of rowarrays with length N, which should be arrays with length M of the column strings
name: string, which is added before the table
----------
Returns
string of the MxN array
"""
out = ''
colWidths = [max([len(lists[i][j]) for j in range(len(lists[i]))]) for i in range(len(lists))]
titles = [lists[i][0] for i in range(len(lists))]
cols = [lists[i][1:] for i in range(len(lists))]
nRows = len(cols[0])
nCols = len(cols)
# Print column titles
for i in range(len(titles) - 1):
out += titles[i].ljust(colWidths[i]) + ' ' + tableChars[0] + ' '
out += titles[-1].ljust(colWidths[-1]) + '\n'
# Print crossbar
for i in range(len(titles) - 1):
out += tableChars[1] * colWidths[i] + tableChars[1] + tableChars[2] + tableChars[1]
out += tableChars[1] * colWidths[-1] + tableChars[1] + '\n'
# Print tabular rows, by column entries
for j in range(nRows - 1):
for i in range(nCols - 1):
out += cols[i][j].ljust(colWidths[i]) + ' ' + tableChars[0] + ' '
out += cols[-1][j].ljust(colWidths[-1]) + '\n'
for i in range(nCols - 1):
out += cols[i][-1].ljust(colWidths[i]) + ' ' + tableChars[0] + ' '
out += cols[-1][-1].ljust(colWidths[-1])
# Connect extra column, which might be generated by dev
rows = out.split('\n')
for i in range(1, len(rows)):
inds = [s for s in range(len(rows[i])) if rows[i][s] == tableChars[0]]
for s in inds:
upperRow = list(rows[i - 1])
if upperRow[s] == tableChars[1]:
upperRow[s] = tableChars[8]
rows[i - 1] = ''.join(upperRow)
out = ''
for i in range(len(rows) - 1):
out += rows[i] + '\n'
out += rows[-1]
# Print subtitle
if (name != ''):
out += '\n' + name
return out + ('\n' if endl else '')
def dev(val1, err1, val2, err2=0.0, name='', perc=False):
# Returns deviation string
def get_dev(nom, denom):
if (nom == 0.0):
sigstr = '0'
elif (denom == 0.0):
sigstr = '∞ '
else:
sigma = nom / denom
if (sigma < 0.95):
digits = int(abs(np.floor(np.log10(sigma))))
elif (sigma < 3.95):
digits = 1
else:
digits = 0
sigstr = '{:.{digits}f}'.format(sigma,digits=digits)
sigstr += 'σ'
return sigstr
# Returns percental deviation string
def get_perc(val1,val2,pformat='{:.2f}'):
percval = abs(val1 - val2) / abs(val2) * 100
percstr = pformat.format(percval) + '%'
return percstr
# Gets deviation of the difference from zero
out = None
nom = abs(val1 - val2)
denom = np.sqrt(err1**2 + err2**2)
if type(nom) is np.ndarray or type(denom) is np.ndarray:
# Reconcile argument types
out = []
N = len(val1)
if type(val1) is not np.ndarray:
val1 = np.array([val1] * N)
if type(val2) is not np.ndarray:
val2 = np.array([val2] * N)
if type(err1) is not np.ndarray:
err1 = np.array([err1] * N)
if type(err2) is not np.ndarray:
err2 = np.array([err2] * N)
if type(nom) is not np.ndarray:
nom = np.array([nom] * N)
if type(denom) is not np.ndarray:
denom = np.array([denom] * N)
if perc:
# Get deviation and percental deviation strings and determine max lengths
devs = []
percs = []
devmaxlen = 0
percmaxlen = 0
for i in range(N):
# Get deviation string
devs.append(get_dev(nom[i], denom[i]))
siglen = len(devs[i])
if (siglen > devmaxlen):
devmaxlen = siglen
# Get percental deviation string
percs.append(get_perc(val1[i], val2[i]))
perclen = len(percs[i])
if (perclen > percmaxlen):
percmaxlen = perclen
colWidth = devmaxlen + 3 + percmaxlen if percmaxlen > 0 else devmaxlen
if (name != ''):
# Center name and write to out
colWidth = max(colWidth, len(name))
adjust = (colWidth + len(name)) // 2
out.append(name.rjust(adjust))
for i in range(N):
# Center entry and write to out
entry = devs[i].rjust(devmaxlen) + ' ' + tableChars[0] + ' ' + percs[i].rjust(percmaxlen)
adjust = (colWidth + len(entry)) // 2
out.append(entry.rjust(adjust))
else:
devs = []
devmaxlen = 0
for i in range(N):
# Get deviation string
devs.append(get_dev(nom[i], denom[i]))
siglen = len(devs[i])
if (siglen > devmaxlen):
devmaxlen = siglen
colWidth = devmaxlen
if (name != ''):
# Center name and write to out
colWidth = max(colWidth, len(name))
adjust = (colWidth + len(name)) // 2
out.append(name.rjust(adjust))
for i in range(N):
# Center entry and write to out
out.append(get_dev(nom[i], denom[i]).rjust(colWidth))
else:
out = ''
prefix = ''
if (name != ''):
prefix = name + ': '
out += prefix + get_dev(nom, denom)
if perc:
out += ' ≙ ' + get_perc(val1, val2, pformat='{:.2g}')
return out
| [
"numpy.log10",
"numpy.sqrt",
"numpy.unique",
"numpy.argmax",
"numpy.array",
"numpy.zeros"
] | [((9813, 9843), 'numpy.sqrt', 'np.sqrt', (['(err1 ** 2 + err2 ** 2)'], {}), '(err1 ** 2 + err2 ** 2)\n', (9820, 9843), True, 'import numpy as np\n'), ((5622, 5633), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (5630, 5633), True, 'import numpy as np\n'), ((5736, 5771), 'numpy.unique', 'np.unique', (['exps'], {'return_counts': '(True)'}), '(exps, return_counts=True)\n', (5745, 5771), True, 'import numpy as np\n'), ((10011, 10031), 'numpy.array', 'np.array', (['([val1] * N)'], {}), '([val1] * N)\n', (10019, 10031), True, 'import numpy as np\n'), ((10082, 10102), 'numpy.array', 'np.array', (['([val2] * N)'], {}), '([val2] * N)\n', (10090, 10102), True, 'import numpy as np\n'), ((10153, 10173), 'numpy.array', 'np.array', (['([err1] * N)'], {}), '([err1] * N)\n', (10161, 10173), True, 'import numpy as np\n'), ((10224, 10244), 'numpy.array', 'np.array', (['([err2] * N)'], {}), '([err2] * N)\n', (10232, 10244), True, 'import numpy as np\n'), ((10293, 10312), 'numpy.array', 'np.array', (['([nom] * N)'], {}), '([nom] * N)\n', (10301, 10312), True, 'import numpy as np\n'), ((10365, 10386), 'numpy.array', 'np.array', (['([denom] * N)'], {}), '([denom] * N)\n', (10373, 10386), True, 'import numpy as np\n'), ((5794, 5811), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (5803, 5811), True, 'import numpy as np\n'), ((9330, 9345), 'numpy.log10', 'np.log10', (['sigma'], {}), '(sigma)\n', (9338, 9345), True, 'import numpy as np\n')] |
# Copyright 2018 Google, Inc.,
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset for iterating over MIDI files."""
from __future__ import print_function
import numpy as np
import pretty_midi
import tensorflow as tf
def piano_roll_sequences(filenames, batch_size, sequence_size, rate=100):
"""Returns a dataset of piano roll sequences from the given files.."""
def _to_piano_roll(filename, sequence_size):
"""Load a file and return consecutive piano roll sequences."""
try:
midi = pretty_midi.PrettyMIDI(tf.compat.as_text(filename))
except Exception:
print("Skipping corrupt MIDI file", filename)
return np.zeros([0, sequence_size, 128], dtype=np.bool)
roll = np.asarray(midi.get_piano_roll(rate).transpose(), dtype=np.bool)
assert roll.shape[1] == 128
# Pad the roll to a multiple of sequence_size
length = len(roll)
remainder = length % sequence_size
if remainder:
new_length = length + sequence_size - remainder
roll = np.resize(roll, (new_length, 128))
roll[length:, :] = False
length = new_length
return np.reshape(roll, (length // sequence_size, sequence_size, 128))
def _to_piano_roll_dataset(filename):
"""Filename (string scalar) -> Dataset of piano roll sequences."""
sequences, = tf.py_func(_to_piano_roll,
[filename, sequence_size],
[tf.bool])
sequences.set_shape([None, None, 128])
return tf.data.Dataset.from_tensor_slices(sequences)
batch_size = tf.to_int64(batch_size)
return (tf.data.Dataset.from_tensor_slices(filenames)
.interleave(_to_piano_roll_dataset,
cycle_length=batch_size * 5,
block_length=1)
.repeat()
.shuffle(1000)
.batch(batch_size))
def piano_roll_to_midi(piano_roll, sample_rate):
"""Convert the piano roll to a PrettyMIDI object.
See: http://github.com/craffel/examples/reverse_pianoroll.py
"""
midi = pretty_midi.PrettyMIDI()
instrument = pretty_midi.Instrument(0)
midi.instruments.append(instrument)
padded_roll = np.pad(piano_roll, [(1, 1), (0, 0)], mode='constant')
changes = np.diff(padded_roll, axis=0)
notes = np.full(piano_roll.shape[1], -1, dtype=np.int)
for tick, pitch in zip(*np.where(changes)):
prev = notes[pitch]
if prev == -1:
notes[pitch] = tick
continue
notes[pitch] = -1
instrument.notes.append(pretty_midi.Note(
velocity=100,
pitch=pitch,
start=prev / float(sample_rate),
end=tick / float(sample_rate)))
return midi
def write_test_note(path, duration, note):
midi = pretty_midi.PrettyMIDI()
instrument = pretty_midi.Instrument(0)
instrument.notes.append(pretty_midi.Note(100, note, 0.0, duration))
midi.instruments.append(instrument)
midi.write(path)
| [
"numpy.reshape",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.where",
"numpy.diff",
"tensorflow.to_int64",
"pretty_midi.Note",
"pretty_midi.PrettyMIDI",
"pretty_midi.Instrument",
"numpy.resize",
"numpy.zeros",
"tensorflow.py_func",
"numpy.full",
"numpy.pad",
"tensorflow.compat.as_t... | [((2152, 2175), 'tensorflow.to_int64', 'tf.to_int64', (['batch_size'], {}), '(batch_size)\n', (2163, 2175), True, 'import tensorflow as tf\n'), ((2645, 2669), 'pretty_midi.PrettyMIDI', 'pretty_midi.PrettyMIDI', ([], {}), '()\n', (2667, 2669), False, 'import pretty_midi\n'), ((2687, 2712), 'pretty_midi.Instrument', 'pretty_midi.Instrument', (['(0)'], {}), '(0)\n', (2709, 2712), False, 'import pretty_midi\n'), ((2771, 2824), 'numpy.pad', 'np.pad', (['piano_roll', '[(1, 1), (0, 0)]'], {'mode': '"""constant"""'}), "(piano_roll, [(1, 1), (0, 0)], mode='constant')\n", (2777, 2824), True, 'import numpy as np\n'), ((2839, 2867), 'numpy.diff', 'np.diff', (['padded_roll'], {'axis': '(0)'}), '(padded_roll, axis=0)\n', (2846, 2867), True, 'import numpy as np\n'), ((2880, 2926), 'numpy.full', 'np.full', (['piano_roll.shape[1]', '(-1)'], {'dtype': 'np.int'}), '(piano_roll.shape[1], -1, dtype=np.int)\n', (2887, 2926), True, 'import numpy as np\n'), ((3367, 3391), 'pretty_midi.PrettyMIDI', 'pretty_midi.PrettyMIDI', ([], {}), '()\n', (3389, 3391), False, 'import pretty_midi\n'), ((3409, 3434), 'pretty_midi.Instrument', 'pretty_midi.Instrument', (['(0)'], {}), '(0)\n', (3431, 3434), False, 'import pretty_midi\n'), ((1694, 1757), 'numpy.reshape', 'np.reshape', (['roll', '(length // sequence_size, sequence_size, 128)'], {}), '(roll, (length // sequence_size, sequence_size, 128))\n', (1704, 1757), True, 'import numpy as np\n'), ((1897, 1961), 'tensorflow.py_func', 'tf.py_func', (['_to_piano_roll', '[filename, sequence_size]', '[tf.bool]'], {}), '(_to_piano_roll, [filename, sequence_size], [tf.bool])\n', (1907, 1961), True, 'import tensorflow as tf\n'), ((2088, 2133), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['sequences'], {}), '(sequences)\n', (2122, 2133), True, 'import tensorflow as tf\n'), ((3463, 3505), 'pretty_midi.Note', 'pretty_midi.Note', (['(100)', 'note', '(0.0)', 'duration'], {}), '(100, note, 0.0, duration)\n', (3479, 3505), False, 'import pretty_midi\n'), ((1575, 1609), 'numpy.resize', 'np.resize', (['roll', '(new_length, 128)'], {}), '(roll, (new_length, 128))\n', (1584, 1609), True, 'import numpy as np\n'), ((2955, 2972), 'numpy.where', 'np.where', (['changes'], {}), '(changes)\n', (2963, 2972), True, 'import numpy as np\n'), ((1053, 1080), 'tensorflow.compat.as_text', 'tf.compat.as_text', (['filename'], {}), '(filename)\n', (1070, 1080), True, 'import tensorflow as tf\n'), ((1185, 1233), 'numpy.zeros', 'np.zeros', (['[0, sequence_size, 128]'], {'dtype': 'np.bool'}), '([0, sequence_size, 128], dtype=np.bool)\n', (1193, 1233), True, 'import numpy as np\n'), ((2188, 2233), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['filenames'], {}), '(filenames)\n', (2222, 2233), True, 'import tensorflow as tf\n')] |
import cv2
import mediapipe as mp
import numpy as np
import pandas as pd
mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose
def calculate_angle(a,b,c):
a = np.array(a) # First
b = np.array(b) # Mid
c = np.array(c) # End
radians = np.arctan2(c[1]-b[1], c[0]-b[0]) - np.arctan2(a[1]-b[1], a[0]-b[0])
angle = np.abs(radians*180.0/np.pi)
if angle >180.0:
angle = 360-angle
return angle
def angle_from_video(path):
empty_left_leg = []
empty_right_leg = []
empty_right_arm = []
empty_left_arm = []
empty_right_body = []
empty_left_body = []
frame_count = []
cap = cv2.VideoCapture(path)
#cap = cv2.VideoCapture(0)
## Setup mediapipe instance
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
while cap.isOpened():
ret, frame = cap.read()
current_frame = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
frame_count.append(current_frame)
#number_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
#Recolor image to RGB
bad_frame = True
if ret:
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
else:
bad_frame = False
image.flags.writeable = False
# Make detection
results = pose.process(image)
# Recolor back to BGR
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# Extract landmarks
try:
landmarks = results.pose_landmarks.landmark
# Get coordinates
# for left shoulder to left wrist angle
left_shoulder = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x,landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y]
left_elbow = [landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].x,landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].y]
left_wrist = [landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x,landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y]
# for right shoulder to right wrist angle
right_shoulder = [landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].y]
right_elbow = [landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_ELBOW.value].y]
right_wrist = [landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value].y]
# for left hip to left ankle ankle
left_hip = [landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].x,landmarks[mp_pose.PoseLandmark.LEFT_HIP.value].y]
left_knee = [landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].x,landmarks[mp_pose.PoseLandmark.LEFT_KNEE.value].y]
left_ankle = [landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].x,landmarks[mp_pose.PoseLandmark.LEFT_ANKLE.value].y]
# for right hip to right ankle angle
right_hip = [landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value].y]
right_knee = [landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_KNEE.value].y]
right_ankle = [landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_ANKLE.value].y]
# for left elbow to left hip
left_shoulder = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x,landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y]
# for right elbow to right hip
right_shoulder = [landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].x,landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value].y]
# Calculate angle leg
angle_left_leg = calculate_angle(left_hip, left_knee, left_ankle)
angle_right_leg = calculate_angle(right_hip, right_knee, right_ankle)
empty_left_leg.append(angle_left_leg)
empty_right_leg.append(angle_right_leg)
# Calculate angle arm
angle_left_arm = calculate_angle(left_shoulder, left_elbow, left_wrist)
angle_right_arm = calculate_angle(right_shoulder, right_elbow, right_wrist)
empty_left_arm.append(angle_left_arm)
empty_right_arm.append(angle_right_arm)
# Calculate angle body
angle_left_body = calculate_angle(left_elbow, left_shoulder, left_hip)
angle_right_body = calculate_angle(right_elbow, right_shoulder, right_hip)
empty_left_body.append(angle_left_body)
empty_right_body.append(angle_right_body)
print(angle_right_body, angle_left_body)
print(current_frame)
# Visualize angle
cv2.putText(image, str(angle_right_arm),
tuple(np.multiply(left_elbow, [640, 480]).astype(int)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(image, str(angle_left_arm),
tuple(np.multiply(right_elbow, [640, 480]).astype(int)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA
)
except:
pass
# Render detections
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=2),
mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2)
)
#cv2.imshow('Mediapipe Feed', image)
if cv2.waitKey(10) & 0xFF == ord('q') or bad_frame == False:
d = {'frame':frame_count,'hip2ankle_left':empty_left_leg, 'hip2ankle_right':empty_right_leg,
'shoulder2wrist_left':empty_left_arm, 'shoulder2wrist_right':empty_right_arm, 'elbow2hip_left': empty_left_body, 'elbow2hip_right': empty_right_body}
df = pd.DataFrame(d)
return df
break
cap.release()
cv2.destroyAllWindows()
#print(angle_from_video('Flask_template/pose/videos/serve/djok/djokserveside.mp4'))
def user_data(path):
df = angle_from_video(path)
writer = pd.ExcelWriter('output.xlsx')
export = df.to_excel(writer)
writer.save()
return export
#print(user_data('pose/videos/serve/djok/djokserveside.mp4')) | [
"numpy.abs",
"numpy.multiply",
"numpy.array",
"numpy.arctan2",
"cv2.VideoCapture",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"pandas.DataFrame",
"pandas.ExcelWriter",
"cv2.waitKey"
] | [((178, 189), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (186, 189), True, 'import numpy as np\n'), ((206, 217), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (214, 217), True, 'import numpy as np\n'), ((232, 243), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (240, 243), True, 'import numpy as np\n'), ((349, 380), 'numpy.abs', 'np.abs', (['(radians * 180.0 / np.pi)'], {}), '(radians * 180.0 / np.pi)\n', (355, 380), True, 'import numpy as np\n'), ((670, 692), 'cv2.VideoCapture', 'cv2.VideoCapture', (['path'], {}), '(path)\n', (686, 692), False, 'import cv2\n'), ((6799, 6828), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['"""output.xlsx"""'], {}), "('output.xlsx')\n", (6813, 6828), True, 'import pandas as pd\n'), ((269, 305), 'numpy.arctan2', 'np.arctan2', (['(c[1] - b[1])', '(c[0] - b[0])'], {}), '(c[1] - b[1], c[0] - b[0])\n', (279, 305), True, 'import numpy as np\n'), ((304, 340), 'numpy.arctan2', 'np.arctan2', (['(a[1] - b[1])', '(a[0] - b[0])'], {}), '(a[1] - b[1], a[0] - b[0])\n', (314, 340), True, 'import numpy as np\n'), ((6622, 6645), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6643, 6645), False, 'import cv2\n'), ((1537, 1575), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (1549, 1575), False, 'import cv2\n'), ((1220, 1258), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (1232, 1258), False, 'import cv2\n'), ((6518, 6533), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (6530, 6533), True, 'import pandas as pd\n'), ((6163, 6178), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (6174, 6178), False, 'import cv2\n'), ((5197, 5232), 'numpy.multiply', 'np.multiply', (['left_elbow', '[640, 480]'], {}), '(left_elbow, [640, 480])\n', (5208, 5232), True, 'import numpy as np\n'), ((5432, 5468), 'numpy.multiply', 'np.multiply', (['right_elbow', '[640, 480]'], {}), '(right_elbow, [640, 480])\n', (5443, 5468), True, 'import numpy as np\n')] |
import numpy as np
from paddle import fluid
class MaskedMultiHeadAttention(object):
def __init__(self, model_dim, num_heads, dropout=0.0):
assert model_dim % num_heads == 0
self.model_dim = model_dim
self.num_heads = num_heads
self.per_head_dim = model_dim // num_heads
self.dropout = dropout
def _split(self, x):
"""Split state to query, key and value"""
_, seq_len, qkv_dim = x.shape
x = fluid.layers.reshape(
x, [-1, seq_len, 3, qkv_dim // 3], inplace=True)
return fluid.layers.unstack(x, axis=2)
def _split_heads(self, x):
"""Split single head for multi-heads"""
split_x = fluid.layers.reshape(
x, [0, 0, self.num_heads, self.per_head_dim], inplace=True)
split_x = fluid.layers.transpose(split_x, perm=[0, 2, 1, 3])
return split_x
def _merge_heads(self, x):
"""Merge multi-heads for single head"""
merged_x = fluid.layers.transpose(x, perm=[0, 2, 1, 3])
merged_dim = merged_x.shape[2] * merged_x.shape[3]
merged_x = fluid.layers.reshape(
merged_x, [0, 0, merged_dim], inplace=True)
return merged_x
def _apply_attn_score_mask(self, product, attn_mask):
product = product * attn_mask - 1e10 * (1 - attn_mask)
return product
def _scaled_dot_product_attention(self, query, key, value, attn_mask,
d_key, attn_bias=None, dropout=0.0):
# Q is in shape [bs, nheads, tgt_seq_len, per_head_dim]
# K and V are in shape [bs, nheads, src_seq_len, per_head_dim]
# attn_mask is in shape [bs, tgt_seq_len, src_seq_len]
product = fluid.layers.matmul(query, key, transpose_y=True,
alpha=d_key**-0.5)
if attn_bias is not None:
product += attn_bias
attn_mask = fluid.layers.expand(
fluid.layers.unsqueeze(attn_mask, axes=[1]),
[1, self.num_heads, 1, 1])
product = self._apply_attn_score_mask(product, attn_mask)
# weights is in shape [bs, nheads, tgt_seq_len, src_seq_len]
weights = fluid.layers.softmax(product)
weights = weights * attn_mask
if dropout > 0:
weights = fluid.layers.dropout(
weights, dropout, dropout_implementation='upscale_in_train')
# attn is in shape [bs, nheads, tgt_seq_len, per_head_dim]
attn = fluid.layers.matmul(weights, value)
return attn, weights
def __call__(self, x, attn_mask, past_kv=None, attn_bias=None):
# Parameters:
# qkv_fc: x_dim * model_dim * 3
# scaled_dot_product_attention: 0
# out_fc: model_dim * model_dim
# Computation (assume bs = 1):
# let N1 = tgt_seq_len * x_dim * model_dim * 3
# N2 = nheads * tgt_seq_len * src_seq_len
# N3 = nheads * tgt_seq_len * per_head_dim = tl * md
# ph = per_head_dim; md = model_dim
# sl = src_seq_len; tl = tgt_seq_len
# qkv_fc: N1 * (model_dim (mul_op) + model_dim (add_op))
# scaled_dot_product_attention:
# N2 * (2*ph(mul_op) + 2*ph(add_op) + ph(div_op) + ph(exp_op))
# + N3 * (sl (mul_op) + sl (add_op))
# out_fc: tl * md * (md (mul_op) + md (add_op))
#
# for sl = tl = 200, md = 512, ph = 64, nh = 8, around 10^12
assert len(x.shape) == 3
# TODO: add customize parameter initializer for QKV project.
c = fluid.layers.fc(x, self.model_dim * 3, num_flatten_dims=2,
bias_attr=False, name='qkv_fc')
# Q, K, V is in shape [bs, tgt_seq_len, model_dim]
# attn_mask is in shape [bs, tgt_seq_len, src_seq_len]
# past_kv is None or in [bs, 2, nheads, past_seq_len, per_head_dim]
# when past_kv is None, tgt_seq_len = src_seq_len
# otherwise, src_seq_len = past_seq_len + tgt_seq_len
query, key, value = self._split(c)
assert len(query.shape) == len(key.shape) == len(value.shape) == 3
query = self._split_heads(query)
key = self._split_heads(key)
value = self._split_heads(value)
present_kv = fluid.layers.stack([key, value], axis=1)
if past_kv is not None:
pk, pv = fluid.layers.unstack(past_kv, axis=1)
key = fluid.layers.concat([pk, key], axis=-2)
value = fluid.layers.concat([pv, value], axis=-2)
attn, attn_weights = self._scaled_dot_product_attention(
query, key, value, attn_mask, self.per_head_dim,
attn_bias=attn_bias, dropout=self.dropout)
attn = self._merge_heads(attn)
attn = fluid.layers.fc(attn, self.model_dim, num_flatten_dims=2,
bias_attr=False, name='out_fc')
return attn, present_kv, attn_weights
class TransformerDecoderBlock(object):
def __init__(self, model_dim, num_heads, ffn_dim,
dropout=0.0, normalize_before=False):
self.model_dim = model_dim
self.num_heads = num_heads
self.ffn_dim = ffn_dim
self.dropout = dropout
self.normalize_before = normalize_before
self.masked_self_attn = MaskedMultiHeadAttention(
model_dim, num_heads, dropout=dropout)
def _merge_mask(self, attn_mask, padding_mask):
pm = fluid.layers.unsqueeze(padding_mask, 2)
pm_t = fluid.layers.unsqueeze(padding_mask, 1)
new_pm = fluid.layers.matmul(pm, pm_t)
attn_mask = fluid.layers.elementwise_mul(attn_mask, new_pm, axis=0)
return attn_mask
def _with_frame_emb(self, x, frame_emb):
return x if frame_emb is None else x + frame_emb
def _pad_past_attn_mask(self, attn_mask, past_padding_mask):
def _np_func(m, pm):
m, pm = np.array(m), np.array(pm)
pad = np.ones((m.shape[0], m.shape[1], pm.shape[1]),
dtype=m.dtype)
m_ = np.concatenate([pad, m], axis=2)
return m_
name = fluid.unique_name.generate(attn_mask.name)
new_mask = fluid.default_main_program().current_block().create_var(
name=name, dtype=attn_mask.dtype, shape=attn_mask.shape)
fluid.layers.py_func(
func=_np_func, x=[attn_mask, past_padding_mask], out=new_mask)
return new_mask
def _mlp(self, x, n_state, dropout=0.0):
# TODO: try other activation
nx = x.shape[-1]
h1 = fluid.layers.fc(x, n_state, num_flatten_dims=2, act='gelu')
if dropout > 0:
h1 = fluid.layers.dropout(
h1, dropout, dropout_implementation='upscale_in_train')
h2 = fluid.layers.fc(h1, nx, num_flatten_dims=2)
return h2
def _forward_post(self, x, frame_emb, attn_mask, padding_mask,
past_kv, past_padding_mask):
x = self._with_frame_emb(x, frame_emb)
if past_padding_mask is not None:
# [bs, src_seq_len]
padding_mask = fluid.layers.concat(
[past_padding_mask, padding_mask], axis=-1)
attn_mask = self._merge_mask(attn_mask, padding_mask)
attn, present_kv, attn_weights = self.masked_self_attn(
x, attn_mask, past_kv=past_kv)
if self.dropout > 0:
attn = fluid.layers.dropout(
attn, self.dropout, dropout_implementation='upscale_in_train')
x = x + attn
x = fluid.layers.layer_norm(
x, begin_norm_axis=2, epsilon=1e-6,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.)))
m = self._mlp(x, self.ffn_dim, dropout=self.dropout)
if self.dropout > 0:
m = fluid.layers.dropout(
m, self.dropout, dropout_implementation='upscale_in_train')
x = x + m
x = fluid.layers.layer_norm(
x, begin_norm_axis=2, epsilon=1e-6,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.)))
return x, present_kv, attn_weights
def _forward_pre(self, x, frame_emb, attn_mask, padding_mask,
past_kv, past_padding_mask):
x_ = fluid.layers.layer_norm(
x, begin_norm_axis=2, epsilon=1e-6,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.)))
x_ = self._with_frame_emb(x_, frame_emb)
if past_padding_mask is not None:
# [bs, src_seq_len]
padding_mask = fluid.layers.concat(
[past_padding_mask, padding_mask], axis=-1)
attn_mask = self._merge_mask(attn_mask, padding_mask)
attn, present_kv, attn_weights = self.masked_self_attn(
x_, attn_mask, past_kv=past_kv)
if self.dropout > 0:
attn = fluid.layers.dropout(
attn, self.dropout, dropout_implementation='upscale_in_train')
x = x + attn
x_ = fluid.layers.layer_norm(
x, begin_norm_axis=2, epsilon=1e-6,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.)))
m = self._mlp(x_, self.ffn_dim, dropout=self.dropout)
if self.dropout > 0:
m = fluid.layers.dropout(
m, self.dropout, dropout_implementation='upscale_in_train')
x = x + m
return x, present_kv, attn_weights
def __call__(self, x, frame_emb, attn_mask, padding_mask,
past_kv=None, past_padding_mask=None):
# x: [bs, tgt_seq_len, model_dim]
# frame_emb: [bs, tgt_seq_len, model_dim]
# attn_mask: [bs, tgt_seq_len, tgt_seq_len]
# padding_mask: [bs, tgt_seq_len]
# past_kv: [bs, 2, nheads, past_seq_len, per_head_dim]
# past_padding_mask: [bs, past_seq_len]
# src_seq_len = tgt_seq_len + past_seq_len
if past_padding_mask is not None:
# Now attn_mask: [bs, tgt_seq_len, src_seq_len]
attn_mask = self._pad_past_attn_mask(attn_mask, past_padding_mask)
if self.normalize_before:
return self._forward_pre(x, frame_emb, attn_mask,
padding_mask, past_kv,
past_padding_mask)
return self._forward_post(x, frame_emb, attn_mask, padding_mask,
past_kv, past_padding_mask)
class TransformerDecoder(object):
def __init__(self, num_blocks, model_dim, num_heads, ffn_dim,
tokens_per_frame=10, dropout=0.0, normalize_before=False):
self.num_blocks = num_blocks
self.tokens_per_frame = tokens_per_frame
self.blocks = []
for _ in range(num_blocks):
decoder_block = TransformerDecoderBlock(
model_dim, num_heads, ffn_dim,
dropout=dropout,
normalize_before=normalize_before)
self.blocks.append(decoder_block)
def _apply_padding_mask(self, x, padding_mask):
h = padding_mask * x - 1e10 * (1 - padding_mask)
return h
def _pooling_over_frames(self, x, padding_mask):
_, tgt_seq_len, model_dim = x.shape
num_frames = tgt_seq_len // self.tokens_per_frame
padding_mask_ = fluid.layers.expand(
fluid.layers.unsqueeze(padding_mask, [2]),
[1, 1, model_dim])
h = self._apply_padding_mask(x, padding_mask_)
h = fluid.layers.reshape(
h, [-1, num_frames, self.tokens_per_frame, model_dim])
h = fluid.layers.reduce_max(h, dim=2)
return h
def __call__(self, x, frame_emb, attn_mask, padding_mask,
past_kv_arr=None, past_padding_mask=None):
assert x.shape[1] % self.tokens_per_frame == 0
if past_kv_arr is not None:
past_kv_arr = fluid.layers.unstack(past_kv_arr, axis=1)
else:
past_kv_arr = [None] * self.num_blocks
present_kv_lst, attn_weights_lst = [], []
for i, past_kv in enumerate(past_kv_arr):
x, present_kv, attn_weights = self.blocks[i](
x, frame_emb, attn_mask, padding_mask,
past_kv=past_kv, past_padding_mask=past_padding_mask)
present_kv_lst.append(present_kv)
attn_weights_lst.append(attn_weights)
present_kv_arr = fluid.layers.stack(present_kv_lst, axis=1)
attn_weights_arr = fluid.layers.stack(attn_weights_lst, axis=1)
frame_hid = self._pooling_over_frames(x, padding_mask)
return x, frame_hid, present_kv_arr, attn_weights_arr
| [
"numpy.array",
"paddle.fluid.layers.elementwise_mul",
"paddle.fluid.layers.transpose",
"paddle.fluid.layers.matmul",
"paddle.fluid.layers.reshape",
"paddle.fluid.default_main_program",
"numpy.concatenate",
"numpy.ones",
"paddle.fluid.layers.softmax",
"paddle.fluid.unique_name.generate",
"paddle.... | [((465, 534), 'paddle.fluid.layers.reshape', 'fluid.layers.reshape', (['x', '[-1, seq_len, 3, qkv_dim // 3]'], {'inplace': '(True)'}), '(x, [-1, seq_len, 3, qkv_dim // 3], inplace=True)\n', (485, 534), False, 'from paddle import fluid\n'), ((563, 594), 'paddle.fluid.layers.unstack', 'fluid.layers.unstack', (['x'], {'axis': '(2)'}), '(x, axis=2)\n', (583, 594), False, 'from paddle import fluid\n'), ((693, 778), 'paddle.fluid.layers.reshape', 'fluid.layers.reshape', (['x', '[0, 0, self.num_heads, self.per_head_dim]'], {'inplace': '(True)'}), '(x, [0, 0, self.num_heads, self.per_head_dim], inplace=True\n )\n', (713, 778), False, 'from paddle import fluid\n'), ((805, 855), 'paddle.fluid.layers.transpose', 'fluid.layers.transpose', (['split_x'], {'perm': '[0, 2, 1, 3]'}), '(split_x, perm=[0, 2, 1, 3])\n', (827, 855), False, 'from paddle import fluid\n'), ((978, 1022), 'paddle.fluid.layers.transpose', 'fluid.layers.transpose', (['x'], {'perm': '[0, 2, 1, 3]'}), '(x, perm=[0, 2, 1, 3])\n', (1000, 1022), False, 'from paddle import fluid\n'), ((1101, 1165), 'paddle.fluid.layers.reshape', 'fluid.layers.reshape', (['merged_x', '[0, 0, merged_dim]'], {'inplace': '(True)'}), '(merged_x, [0, 0, merged_dim], inplace=True)\n', (1121, 1165), False, 'from paddle import fluid\n'), ((1714, 1784), 'paddle.fluid.layers.matmul', 'fluid.layers.matmul', (['query', 'key'], {'transpose_y': '(True)', 'alpha': '(d_key ** -0.5)'}), '(query, key, transpose_y=True, alpha=d_key ** -0.5)\n', (1733, 1784), False, 'from paddle import fluid\n'), ((2180, 2209), 'paddle.fluid.layers.softmax', 'fluid.layers.softmax', (['product'], {}), '(product)\n', (2200, 2209), False, 'from paddle import fluid\n'), ((2476, 2511), 'paddle.fluid.layers.matmul', 'fluid.layers.matmul', (['weights', 'value'], {}), '(weights, value)\n', (2495, 2511), False, 'from paddle import fluid\n'), ((3544, 3638), 'paddle.fluid.layers.fc', 'fluid.layers.fc', (['x', '(self.model_dim * 3)'], {'num_flatten_dims': '(2)', 'bias_attr': '(False)', 'name': '"""qkv_fc"""'}), "(x, self.model_dim * 3, num_flatten_dims=2, bias_attr=False,\n name='qkv_fc')\n", (3559, 3638), False, 'from paddle import fluid\n'), ((4240, 4280), 'paddle.fluid.layers.stack', 'fluid.layers.stack', (['[key, value]'], {'axis': '(1)'}), '([key, value], axis=1)\n', (4258, 4280), False, 'from paddle import fluid\n'), ((4729, 4822), 'paddle.fluid.layers.fc', 'fluid.layers.fc', (['attn', 'self.model_dim'], {'num_flatten_dims': '(2)', 'bias_attr': '(False)', 'name': '"""out_fc"""'}), "(attn, self.model_dim, num_flatten_dims=2, bias_attr=False,\n name='out_fc')\n", (4744, 4822), False, 'from paddle import fluid\n'), ((5404, 5443), 'paddle.fluid.layers.unsqueeze', 'fluid.layers.unsqueeze', (['padding_mask', '(2)'], {}), '(padding_mask, 2)\n', (5426, 5443), False, 'from paddle import fluid\n'), ((5459, 5498), 'paddle.fluid.layers.unsqueeze', 'fluid.layers.unsqueeze', (['padding_mask', '(1)'], {}), '(padding_mask, 1)\n', (5481, 5498), False, 'from paddle import fluid\n'), ((5516, 5545), 'paddle.fluid.layers.matmul', 'fluid.layers.matmul', (['pm', 'pm_t'], {}), '(pm, pm_t)\n', (5535, 5545), False, 'from paddle import fluid\n'), ((5566, 5621), 'paddle.fluid.layers.elementwise_mul', 'fluid.layers.elementwise_mul', (['attn_mask', 'new_pm'], {'axis': '(0)'}), '(attn_mask, new_pm, axis=0)\n', (5594, 5621), False, 'from paddle import fluid\n'), ((6085, 6127), 'paddle.fluid.unique_name.generate', 'fluid.unique_name.generate', (['attn_mask.name'], {}), '(attn_mask.name)\n', (6111, 6127), False, 'from paddle import fluid\n'), ((6281, 6369), 'paddle.fluid.layers.py_func', 'fluid.layers.py_func', ([], {'func': '_np_func', 'x': '[attn_mask, past_padding_mask]', 'out': 'new_mask'}), '(func=_np_func, x=[attn_mask, past_padding_mask], out=\n new_mask)\n', (6301, 6369), False, 'from paddle import fluid\n'), ((6523, 6582), 'paddle.fluid.layers.fc', 'fluid.layers.fc', (['x', 'n_state'], {'num_flatten_dims': '(2)', 'act': '"""gelu"""'}), "(x, n_state, num_flatten_dims=2, act='gelu')\n", (6538, 6582), False, 'from paddle import fluid\n'), ((6731, 6774), 'paddle.fluid.layers.fc', 'fluid.layers.fc', (['h1', 'nx'], {'num_flatten_dims': '(2)'}), '(h1, nx, num_flatten_dims=2)\n', (6746, 6774), False, 'from paddle import fluid\n'), ((11887, 11962), 'paddle.fluid.layers.reshape', 'fluid.layers.reshape', (['h', '[-1, num_frames, self.tokens_per_frame, model_dim]'], {}), '(h, [-1, num_frames, self.tokens_per_frame, model_dim])\n', (11907, 11962), False, 'from paddle import fluid\n'), ((11988, 12021), 'paddle.fluid.layers.reduce_max', 'fluid.layers.reduce_max', (['h'], {'dim': '(2)'}), '(h, dim=2)\n', (12011, 12021), False, 'from paddle import fluid\n'), ((12792, 12834), 'paddle.fluid.layers.stack', 'fluid.layers.stack', (['present_kv_lst'], {'axis': '(1)'}), '(present_kv_lst, axis=1)\n', (12810, 12834), False, 'from paddle import fluid\n'), ((12862, 12906), 'paddle.fluid.layers.stack', 'fluid.layers.stack', (['attn_weights_lst'], {'axis': '(1)'}), '(attn_weights_lst, axis=1)\n', (12880, 12906), False, 'from paddle import fluid\n'), ((1942, 1985), 'paddle.fluid.layers.unsqueeze', 'fluid.layers.unsqueeze', (['attn_mask'], {'axes': '[1]'}), '(attn_mask, axes=[1])\n', (1964, 1985), False, 'from paddle import fluid\n'), ((2294, 2380), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', (['weights', 'dropout'], {'dropout_implementation': '"""upscale_in_train"""'}), "(weights, dropout, dropout_implementation=\n 'upscale_in_train')\n", (2314, 2380), False, 'from paddle import fluid\n'), ((4335, 4372), 'paddle.fluid.layers.unstack', 'fluid.layers.unstack', (['past_kv'], {'axis': '(1)'}), '(past_kv, axis=1)\n', (4355, 4372), False, 'from paddle import fluid\n'), ((4391, 4430), 'paddle.fluid.layers.concat', 'fluid.layers.concat', (['[pk, key]'], {'axis': '(-2)'}), '([pk, key], axis=-2)\n', (4410, 4430), False, 'from paddle import fluid\n'), ((4451, 4492), 'paddle.fluid.layers.concat', 'fluid.layers.concat', (['[pv, value]'], {'axis': '(-2)'}), '([pv, value], axis=-2)\n', (4470, 4492), False, 'from paddle import fluid\n'), ((5909, 5970), 'numpy.ones', 'np.ones', (['(m.shape[0], m.shape[1], pm.shape[1])'], {'dtype': 'm.dtype'}), '((m.shape[0], m.shape[1], pm.shape[1]), dtype=m.dtype)\n', (5916, 5970), True, 'import numpy as np\n'), ((6014, 6046), 'numpy.concatenate', 'np.concatenate', (['[pad, m]'], {'axis': '(2)'}), '([pad, m], axis=2)\n', (6028, 6046), True, 'import numpy as np\n'), ((6624, 6700), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', (['h1', 'dropout'], {'dropout_implementation': '"""upscale_in_train"""'}), "(h1, dropout, dropout_implementation='upscale_in_train')\n", (6644, 6700), False, 'from paddle import fluid\n'), ((7060, 7123), 'paddle.fluid.layers.concat', 'fluid.layers.concat', (['[past_padding_mask, padding_mask]'], {'axis': '(-1)'}), '([past_padding_mask, padding_mask], axis=-1)\n', (7079, 7123), False, 'from paddle import fluid\n'), ((7359, 7447), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', (['attn', 'self.dropout'], {'dropout_implementation': '"""upscale_in_train"""'}), "(attn, self.dropout, dropout_implementation=\n 'upscale_in_train')\n", (7379, 7447), False, 'from paddle import fluid\n'), ((7875, 7960), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', (['m', 'self.dropout'], {'dropout_implementation': '"""upscale_in_train"""'}), "(m, self.dropout, dropout_implementation='upscale_in_train'\n )\n", (7895, 7960), False, 'from paddle import fluid\n'), ((8875, 8938), 'paddle.fluid.layers.concat', 'fluid.layers.concat', (['[past_padding_mask, padding_mask]'], {'axis': '(-1)'}), '([past_padding_mask, padding_mask], axis=-1)\n', (8894, 8938), False, 'from paddle import fluid\n'), ((9175, 9263), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', (['attn', 'self.dropout'], {'dropout_implementation': '"""upscale_in_train"""'}), "(attn, self.dropout, dropout_implementation=\n 'upscale_in_train')\n", (9195, 9263), False, 'from paddle import fluid\n'), ((9693, 9778), 'paddle.fluid.layers.dropout', 'fluid.layers.dropout', (['m', 'self.dropout'], {'dropout_implementation': '"""upscale_in_train"""'}), "(m, self.dropout, dropout_implementation='upscale_in_train'\n )\n", (9713, 9778), False, 'from paddle import fluid\n'), ((11746, 11787), 'paddle.fluid.layers.unsqueeze', 'fluid.layers.unsqueeze', (['padding_mask', '[2]'], {}), '(padding_mask, [2])\n', (11768, 11787), False, 'from paddle import fluid\n'), ((12279, 12320), 'paddle.fluid.layers.unstack', 'fluid.layers.unstack', (['past_kv_arr'], {'axis': '(1)'}), '(past_kv_arr, axis=1)\n', (12299, 12320), False, 'from paddle import fluid\n'), ((5865, 5876), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (5873, 5876), True, 'import numpy as np\n'), ((5878, 5890), 'numpy.array', 'np.array', (['pm'], {}), '(pm)\n', (5886, 5890), True, 'import numpy as np\n'), ((6147, 6175), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (6173, 6175), False, 'from paddle import fluid\n'), ((7635, 7666), 'paddle.fluid.initializer.Constant', 'fluid.initializer.Constant', (['(1.0)'], {}), '(1.0)\n', (7661, 7666), False, 'from paddle import fluid\n'), ((7735, 7766), 'paddle.fluid.initializer.Constant', 'fluid.initializer.Constant', (['(0.0)'], {}), '(0.0)\n', (7761, 7766), False, 'from paddle import fluid\n'), ((8144, 8175), 'paddle.fluid.initializer.Constant', 'fluid.initializer.Constant', (['(1.0)'], {}), '(1.0)\n', (8170, 8175), False, 'from paddle import fluid\n'), ((8244, 8275), 'paddle.fluid.initializer.Constant', 'fluid.initializer.Constant', (['(0.0)'], {}), '(0.0)\n', (8270, 8275), False, 'from paddle import fluid\n'), ((8591, 8622), 'paddle.fluid.initializer.Constant', 'fluid.initializer.Constant', (['(1.0)'], {}), '(1.0)\n', (8617, 8622), False, 'from paddle import fluid\n'), ((8691, 8722), 'paddle.fluid.initializer.Constant', 'fluid.initializer.Constant', (['(0.0)'], {}), '(0.0)\n', (8717, 8722), False, 'from paddle import fluid\n'), ((9452, 9483), 'paddle.fluid.initializer.Constant', 'fluid.initializer.Constant', (['(1.0)'], {}), '(1.0)\n', (9478, 9483), False, 'from paddle import fluid\n'), ((9552, 9583), 'paddle.fluid.initializer.Constant', 'fluid.initializer.Constant', (['(0.0)'], {}), '(0.0)\n', (9578, 9583), False, 'from paddle import fluid\n')] |
import argparse
from time import time
import torch.optim as optim
from caser import Caser
from train_caser import Recommender
from evaluation import evaluate_ranking
from interactions import Interactions
from losses import weighted_sigmoid_log_loss
from utils import *
import numpy as np
import torch
import os
class DistilledRecommender(Recommender):
"""
Contains attributes and methods that needed to train a sequential
recommendation model with ranking distillation[1]. Models are trained
by many tuples of (users, sequences, targets, negatives) and negatives
are from negative sampling: for any known tuple of (user, sequence, targets),
one or more items are randomly sampled to act as negatives.
[1] Ranking Distillation: Learning Compact Ranking Models With High
Performance for Recommender System, <NAME> and <NAME> , KDD '18
Parameters
----------
n_iter: int,
Number of iterations to run.
batch_size: int,
Minibatch size.
l2: float,
L2 loss penalty, also known as the 'lambda' of l2 regularization.
neg_samples: int,
Number of negative samples to generate for each targets.
learning_rate: float,
Initial learning rate.
use_cuda: boolean,
Run the model on a GPU or CPU.
teacher_model_path: string,
Path to teacher's model checkpoint.
teacher_topk_path: string,
Path to teacher's top-K ranking cache for each training instance.
lamda: float
Hyperparameter for tuning the sharpness of position importance weight.
mu: float
Hyperparameter for tuning the sharpness of ranking discrepancy weight.
num_dynamic_samples: int
Number of samples used for estimating student's rank.
dynamic_start_epoch: int
Number of iteration to start using hybrid of two different weights.
K: int
Length of teacher's exemplary ranking.
teach_alpha: float:
Weight for balancing ranking loss and distillation loss.
student_model_args: args,
Student model related arguments, like latent dimensions.
teacher_model_args: args,
Teacher model related arguments, like latent dimensions.
"""
def __init__(self,
n_iter=None,
batch_size=None,
l2=None,
neg_samples=None,
learning_rate=None,
use_cuda=False,
teacher_model_path=None,
teacher_topk_path=None,
lamda=None,
mu=None,
num_dynamic_samples=None,
dynamic_start_epoch=None,
K=None,
teach_alpha=None,
student_model_args=None,
teacher_model_args=None):
# data related
self.L = None
self.T = None
# model related
self._num_items = None
self._num_users = None
self._teacher_net = None # teacher model
self._student_net = None # student model
self._student_model_args = student_model_args
self._teacher_model_args = teacher_model_args
# learning related
self._batch_size = batch_size
self._n_iter = n_iter
self._learning_rate = learning_rate
self._l2 = l2
self._neg_samples = neg_samples
self._device = torch.device("cuda" if use_cuda else "cpu")
# ranking distillation related
self._teach_alpha = teach_alpha
self._lambda = lamda
self._mu = mu
self._num_dynamic_samples = num_dynamic_samples
self._dynamic_start_epoch = dynamic_start_epoch
self._K = K
self._teacher_model_path = teacher_model_path
self._teacher_topk_path = teacher_topk_path
self._weight_renormalize = False
# rank evaluation related
self.test_sequence = None
self._candidate = dict()
@property
def _teacher_initialized(self):
return self._teacher_net is not None
def _initialize_teacher(self, interactions):
# initialize teacher model
self._num_items = interactions.num_items
self._num_users = interactions.num_users
self._teacher_net = Caser(self._num_users,
self._num_items,
self._teacher_model_args)
# load teacher model
if os.path.isfile(self._teacher_model_path):
output_str = ("loading teacher model from %s" % self._teacher_model_path)
print(output_str)
checkpoint = torch.load(self._teacher_model_path,map_location='cpu')
self._teacher_net.load_state_dict(checkpoint['state_dict'])
output_str = "loaded model %s (epoch %d)" % (self._teacher_model_path, checkpoint['epoch_num'])
print(output_str)
else:
output_str = "no model found at %s" % self._teacher_model_path
print(output_str)
# set teacher model to evaluation mode
self._teacher_net.eval()
@property
def _student_initialized(self):
return self._student_net is not None
def _initialize_student(self, interactions):
self._num_items = interactions.num_items
self._num_users = interactions.num_users
self.test_sequence = interactions.test_sequences
self._student_net = Caser(self._num_users,
self._num_items,
self._student_model_args)
self._optimizer = optim.Adam(self._student_net.parameters(),
weight_decay=self._l2,
lr=self._learning_rate)
def fit(self, train, test, verbose=False):
"""
The general training loop to fit the model
Parameters
----------
train: :class:`interactions.Interactions`
training instances, also contains test sequences
test: :class:`interactions.Interactions`
only contains targets for test sequences
verbose: bool, optional
print the logs
"""
# convert sequences, targets and users to numpy arrays
sequences_np = train.sequences.sequences
targets_np = train.sequences.targets
users_np = train.sequences.user_ids.reshape(-1, 1)
self.L, self.T = train.sequences.L, train.sequences.T
n_train = sequences_np.shape[0]
output_str = 'total training instances: %d' % n_train
print(output_str)
if not self._teacher_initialized:
self._initialize_teacher(train)
if not self._student_initialized:
self._initialize_student(train)
# here we compute teacher top-K ranking for each training instance in advance for faster training speed
# while we have to compute the top-K ranking on the fly if it is too large to keep in memory
if os.path.isfile(self._teacher_topk_path):
print('found teacher topk file, loading..')
teacher_ranking = np.load(self._teacher_topk_path)
else:
print('teacher topk file not found, generating.. ')
teacher_ranking = self._get_teacher_topk(sequences_np, users_np, targets_np, k=self._K)
# initialize static weight (position importance weight)
weight_static = np.array(range(1, self._K + 1), dtype=np.float32)
weight_static = np.exp(-weight_static / self._lambda)
weight_static = weight_static / np.sum(weight_static)
weight_static = torch.from_numpy(weight_static).to(self._device)
weight_static = weight_static.unsqueeze(0)
# initialize dynamic weight (ranking discrepancy weight)
weight_dynamic = None
# count number of parameters
print("Number of params in teacher model: %d" % compute_model_size(self._teacher_net))
print("Number of params in student model: %d" % compute_model_size(self._student_net))
indices = np.arange(n_train)
start_epoch = 1
for epoch_num in range(start_epoch, self._n_iter + 1):
t1 = time()
# set teacher model to evaluation mode and move it to the corresponding devices
self._teacher_net.eval()
self._teacher_net = self._teacher_net.to(self._device)
# set student model to training mode and move it to the corresponding devices
self._student_net.train()
self._student_net = self._student_net.to(self._device)
(users_np, sequences_np, targets_np), shuffle_indices = shuffle(users_np,
sequences_np,
targets_np,
indices=True)
indices = indices[shuffle_indices] # keep indices for retrieval teacher's top-K ranking from cache
negatives_np = self._generate_negative_samples(users_np, train, n=self._neg_samples)
dynamic_samples_np = self._generate_negative_samples(users_np, train, n=self._num_dynamic_samples)
# convert numpy arrays to PyTorch tensors and move it to the corresponding devices
users, sequences, targets, negatives = (torch.from_numpy(users_np).long(),
torch.from_numpy(sequences_np).long(),
torch.from_numpy(targets_np).long(),
torch.from_numpy(negatives_np).long())
users, sequences, targets, negatives = (users.to(self._device),
sequences.to(self._device),
targets.to(self._device),
negatives.to(self._device))
dynamic_samples = torch.from_numpy(dynamic_samples_np).long().to(self._device)
epoch_loss = 0.0
epoch_regular_loss = 0.0
for (minibatch_num,
(batch_indices,
batch_users,
batch_sequences,
batch_targets,
batch_negatives,
batch_dynamics)) in enumerate(minibatch(indices,
users,
sequences,
targets,
negatives,
dynamic_samples,
batch_size=self._batch_size)):
# retrieval teacher top-K ranking given indices
batch_candidates = torch.from_numpy(teacher_ranking[batch_indices, :]).long().to(self._device)
# concatenate all variables to get predictions in one run
items_to_predict = torch.cat((batch_targets, batch_negatives,
batch_candidates, batch_dynamics), 1)
items_prediction = self._student_net(batch_sequences,
batch_users,
items_to_predict)
(targets_prediction,
negatives_prediction,
candidates_prediction,
dynamics_prediction) = torch.split(items_prediction, [batch_targets.size(1),
batch_negatives.size(1),
batch_candidates.size(1),
batch_dynamics.size(1)], dim=1)
self._optimizer.zero_grad()
if epoch_num > self._dynamic_start_epoch:
# compute dynamic weight
dynamic_weights = list()
for col in range(self._K):
col_prediction = candidates_prediction[:, col].unsqueeze(1)
num_smaller_than = torch.sum(col_prediction < dynamics_prediction, dim=1).float()
relative_rank = num_smaller_than / self._num_dynamic_samples
predicted_rank = torch.floor((self._num_items - 1) * relative_rank)
dynamic_weight = torch.tanh(self._mu * (predicted_rank - col))
dynamic_weight = torch.clamp(dynamic_weight, min=0.0)
dynamic_weights.append(dynamic_weight)
weight_dynamic = torch.stack(dynamic_weights, 1)
# hybrid two weights
weight = weight_dynamic * weight_static
if self._weight_renormalize:
weight = F.normalize(weight, p=1, dim=1)
else:
weight = weight_static
# detach the weight to stop the gradient flow to the weight
weight = weight.detach()
loss, regular_loss = weighted_sigmoid_log_loss(targets_prediction,
negatives_prediction,
candidates_prediction,
weight, self._teach_alpha)
epoch_loss += loss.item()
epoch_regular_loss += regular_loss.item()
loss.backward()
# assert False
self._optimizer.step()
epoch_loss /= minibatch_num + 1
epoch_regular_loss /= minibatch_num + 1
t2 = time()
if verbose and epoch_num % 10 == 0:
precision, recall, ndcg, mean_aps = evaluate_ranking(self, test, train, k=[3, 5, 10])
str_precs = "precisions=%.4f,%.4f,%.4f" % tuple([np.mean(a) for a in precision])
str_recalls = "recalls=%.4f,%.4f,%.4f" % tuple([np.mean(a) for a in recall])
str_ndcgs = "ndcgs=%.4f,%.4f,%.4f" % tuple([np.mean(a) for a in ndcg])
output_str = "Epoch %d [%.1f s]\tloss=%.4f, regular_loss=%.4f, " \
"map=%.4f, %s, %s, %s[%.1f s]" % (epoch_num, t2 - t1,
epoch_loss, epoch_regular_loss,
mean_aps, str_precs, str_recalls, str_ndcgs,
time() - t2)
print(output_str)
else:
output_str = "Epoch %d [%.1f s]\tloss=%.4f, regular_loss=%.4f[%.1f s]" % (epoch_num, t2 - t1,
epoch_loss,
epoch_regular_loss,
time() - t2)
print(output_str)
def _get_teacher_topk(self, sequences, users, targets, k):
"""
Pre-compute and cache teacher's top-K ranking for each training instance.
By doing this we can make training with distillation much faster.
Parameters
----------
sequences: array of np.int64
sequencces of items
users: array of np.int64
users associated with each sequence
targets: array of np.int64
target item that user interact with given the sequence
k: int
length of teacher's exemplary ranking
"""
with_targets = False
n_train = sequences.shape[0]
indices = np.arange(n_train)
users, sequences = torch.from_numpy(users).long(), torch.from_numpy(sequences).long()
# teacher topk results
teacher_topk = np.zeros((n_train, k), dtype=np.int64)
for (batch_indices,
batch_users,
batch_sequences,
batch_targets) in minibatch(indices,
users,
sequences,
targets,
batch_size=16):
cur_batch_size = batch_users.shape[0]
all_items = torch.arange(start=0, end=self._num_items).repeat(cur_batch_size, 1).long()
teacher_prediction = self._teacher_net(batch_sequences,
batch_users,
all_items).detach()
_, tops = teacher_prediction.topk(k * 2, dim=1) # return the topk by column
tops = tops.cpu().numpy()
new_tops = np.concatenate((batch_targets, tops), axis=1)
topks = np.zeros((cur_batch_size, k), dtype=np.int64)
for i, row in enumerate(new_tops):
_, idx = np.unique(row, return_index=True)
# whether teacher's top-k ranking consider target items
if with_targets:
topk = row[np.sort(idx)][:k]
else:
topk = row[np.sort(idx)][self.T:k + self.T]
topks[i, :] = topk
teacher_topk[batch_indices, :] = topks
np.save('gowalla-teacher-dim=%d-top=%d.npy' % (self._teacher_model_args.d, k), teacher_topk)
return teacher_topk
def predict(self, user_id, item_ids=None, model=None):
return super(DistilledRecommender, self).predict(user_id, item_ids,
model=self._student_net)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# data arguments
parser.add_argument('--train_root', type=str, default='datasets/gowalla/test/train.txt')
parser.add_argument('--test_root', type=str, default='datasets/gowalla/test/test.txt')
parser.add_argument('--L', type=int, default=5)
# train arguments
parser.add_argument('--n_iter', type=int, default=50)
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--learning_rate', type=float, default=1e-3)
parser.add_argument('--l2', type=float, default=1e-6)
parser.add_argument('--neg_samples', type=int, default=3)
parser.add_argument('--use_cuda', type=str2bool, default=True)
# distillation arguments
# dimensionality of teacher model (specifically for embedding)
parser.add_argument('--teacher_model_dim', type=int, default=100)
# path to teacher's model checkpoint
parser.add_argument('--teacher_model_path', type=str, default='checkpoints/gowalla-caser-dim=100.pth.tar')
# path to teacher's top-K ranking cache for each training instance
parser.add_argument('--teacher_topk_path', type=str, default='')
# here alpha=1.0 stands for equal weight for ranking loss and distillation loss
parser.add_argument('--teach_alpha', type=float, default=1.0)
# length of teacher's exemplary ranking
parser.add_argument('--K', type=int, default=10)
# hyperparameter for tuning the sharpness of position importance weight in Eq.(8)
parser.add_argument('--lamda', type=float, default=1)
# hyperparameter for tuning the sharpness of ranking discrepancy weight in Eq.(9)
parser.add_argument('--mu', type=float, default=0.1)
# number of samples used for estimating student's rank in Eq.(9)
parser.add_argument('--num_dynamic_samples', type=int, default=100)
# number of iteration to start using hybrid of two different weights
parser.add_argument('--dynamic_start_epoch', type=int, default=10)
config = parser.parse_args()
# model dependent arguments
model_parser = argparse.ArgumentParser()
model_parser.add_argument('--d', type=int, default=50)
# Caser args
model_parser.add_argument('--nv', type=int, default=2)
model_parser.add_argument('--nh', type=int, default=16)
model_parser.add_argument('--drop', type=float, default=0.5)
model_parser.add_argument('--ac_conv', type=str, default='iden')
model_parser.add_argument('--ac_fc', type=str, default='sigm')
teacher_model_config = model_parser.parse_args()
teacher_model_config.L = config.L
teacher_model_config.d = config.teacher_model_dim
student_model_config = model_parser.parse_args()
student_model_config.L = config.L
# set seed
set_seed(config.seed,
cuda=config.use_cuda)
train = Interactions(config.train_root)
# transform triplets to sequence representation
train.to_sequence(config.L)
test = Interactions(config.test_root,
user_map=train.user_map,
item_map=train.item_map)
print(config)
print(student_model_config)
# fit model
model = DistilledRecommender(n_iter=config.n_iter,
batch_size=config.batch_size,
learning_rate=config.learning_rate,
l2=config.l2,
use_cuda=config.use_cuda,
neg_samples=config.neg_samples,
teacher_model_path=config.teacher_model_path,
teacher_topk_path=config.teacher_topk_path,
teacher_model_args=teacher_model_config,
student_model_args=student_model_config,
lamda=config.lamda,
mu=config.mu,
num_dynamic_samples=config.num_dynamic_samples,
dynamic_start_epoch=config.dynamic_start_epoch,
K=config.K,
teach_alpha=config.teach_alpha)
model.fit(train, test, verbose=True)
| [
"torch.from_numpy",
"torch.sum",
"numpy.save",
"numpy.arange",
"torch.tanh",
"numpy.mean",
"torch.arange",
"argparse.ArgumentParser",
"numpy.sort",
"torch.floor",
"numpy.exp",
"numpy.concatenate",
"os.path.isfile",
"caser.Caser",
"interactions.Interactions",
"evaluation.evaluate_rankin... | [((18042, 18067), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (18065, 18067), False, 'import argparse\n'), ((20145, 20170), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (20168, 20170), False, 'import argparse\n'), ((20896, 20927), 'interactions.Interactions', 'Interactions', (['config.train_root'], {}), '(config.train_root)\n', (20908, 20927), False, 'from interactions import Interactions\n'), ((21024, 21109), 'interactions.Interactions', 'Interactions', (['config.test_root'], {'user_map': 'train.user_map', 'item_map': 'train.item_map'}), '(config.test_root, user_map=train.user_map, item_map=train.item_map\n )\n', (21036, 21109), False, 'from interactions import Interactions\n'), ((3387, 3430), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (3399, 3430), False, 'import torch\n'), ((4251, 4316), 'caser.Caser', 'Caser', (['self._num_users', 'self._num_items', 'self._teacher_model_args'], {}), '(self._num_users, self._num_items, self._teacher_model_args)\n', (4256, 4316), False, 'from caser import Caser\n'), ((4425, 4465), 'os.path.isfile', 'os.path.isfile', (['self._teacher_model_path'], {}), '(self._teacher_model_path)\n', (4439, 4465), False, 'import os\n'), ((5406, 5471), 'caser.Caser', 'Caser', (['self._num_users', 'self._num_items', 'self._student_model_args'], {}), '(self._num_users, self._num_items, self._student_model_args)\n', (5411, 5471), False, 'from caser import Caser\n'), ((6974, 7013), 'os.path.isfile', 'os.path.isfile', (['self._teacher_topk_path'], {}), '(self._teacher_topk_path)\n', (6988, 7013), False, 'import os\n'), ((7475, 7512), 'numpy.exp', 'np.exp', (['(-weight_static / self._lambda)'], {}), '(-weight_static / self._lambda)\n', (7481, 7512), True, 'import numpy as np\n'), ((8043, 8061), 'numpy.arange', 'np.arange', (['n_train'], {}), '(n_train)\n', (8052, 8061), True, 'import numpy as np\n'), ((16051, 16069), 'numpy.arange', 'np.arange', (['n_train'], {}), '(n_train)\n', (16060, 16069), True, 'import numpy as np\n'), ((16220, 16258), 'numpy.zeros', 'np.zeros', (['(n_train, k)'], {'dtype': 'np.int64'}), '((n_train, k), dtype=np.int64)\n', (16228, 16258), True, 'import numpy as np\n'), ((17661, 17757), 'numpy.save', 'np.save', (["('gowalla-teacher-dim=%d-top=%d.npy' % (self._teacher_model_args.d, k))", 'teacher_topk'], {}), "('gowalla-teacher-dim=%d-top=%d.npy' % (self._teacher_model_args.d,\n k), teacher_topk)\n", (17668, 17757), True, 'import numpy as np\n'), ((4609, 4665), 'torch.load', 'torch.load', (['self._teacher_model_path'], {'map_location': '"""cpu"""'}), "(self._teacher_model_path, map_location='cpu')\n", (4619, 4665), False, 'import torch\n'), ((7101, 7133), 'numpy.load', 'np.load', (['self._teacher_topk_path'], {}), '(self._teacher_topk_path)\n', (7108, 7133), True, 'import numpy as np\n'), ((7553, 7574), 'numpy.sum', 'np.sum', (['weight_static'], {}), '(weight_static)\n', (7559, 7574), True, 'import numpy as np\n'), ((8168, 8174), 'time.time', 'time', ([], {}), '()\n', (8172, 8174), False, 'from time import time\n'), ((13972, 13978), 'time.time', 'time', ([], {}), '()\n', (13976, 13978), False, 'from time import time\n'), ((17108, 17153), 'numpy.concatenate', 'np.concatenate', (['(batch_targets, tops)'], {'axis': '(1)'}), '((batch_targets, tops), axis=1)\n', (17122, 17153), True, 'import numpy as np\n'), ((17174, 17219), 'numpy.zeros', 'np.zeros', (['(cur_batch_size, k)'], {'dtype': 'np.int64'}), '((cur_batch_size, k), dtype=np.int64)\n', (17182, 17219), True, 'import numpy as np\n'), ((7600, 7631), 'torch.from_numpy', 'torch.from_numpy', (['weight_static'], {}), '(weight_static)\n', (7616, 7631), False, 'import torch\n'), ((11158, 11243), 'torch.cat', 'torch.cat', (['(batch_targets, batch_negatives, batch_candidates, batch_dynamics)', '(1)'], {}), '((batch_targets, batch_negatives, batch_candidates, batch_dynamics), 1\n )\n', (11167, 11243), False, 'import torch\n'), ((13345, 13466), 'losses.weighted_sigmoid_log_loss', 'weighted_sigmoid_log_loss', (['targets_prediction', 'negatives_prediction', 'candidates_prediction', 'weight', 'self._teach_alpha'], {}), '(targets_prediction, negatives_prediction,\n candidates_prediction, weight, self._teach_alpha)\n', (13370, 13466), False, 'from losses import weighted_sigmoid_log_loss\n'), ((14080, 14129), 'evaluation.evaluate_ranking', 'evaluate_ranking', (['self', 'test', 'train'], {'k': '[3, 5, 10]'}), '(self, test, train, k=[3, 5, 10])\n', (14096, 14129), False, 'from evaluation import evaluate_ranking\n'), ((17293, 17326), 'numpy.unique', 'np.unique', (['row'], {'return_index': '(True)'}), '(row, return_index=True)\n', (17302, 17326), True, 'import numpy as np\n'), ((12876, 12907), 'torch.stack', 'torch.stack', (['dynamic_weights', '(1)'], {}), '(dynamic_weights, 1)\n', (12887, 12907), False, 'import torch\n'), ((16098, 16121), 'torch.from_numpy', 'torch.from_numpy', (['users'], {}), '(users)\n', (16114, 16121), False, 'import torch\n'), ((16130, 16157), 'torch.from_numpy', 'torch.from_numpy', (['sequences'], {}), '(sequences)\n', (16146, 16157), False, 'import torch\n'), ((9392, 9418), 'torch.from_numpy', 'torch.from_numpy', (['users_np'], {}), '(users_np)\n', (9408, 9418), False, 'import torch\n'), ((9479, 9509), 'torch.from_numpy', 'torch.from_numpy', (['sequences_np'], {}), '(sequences_np)\n', (9495, 9509), False, 'import torch\n'), ((9570, 9598), 'torch.from_numpy', 'torch.from_numpy', (['targets_np'], {}), '(targets_np)\n', (9586, 9598), False, 'import torch\n'), ((9659, 9689), 'torch.from_numpy', 'torch.from_numpy', (['negatives_np'], {}), '(negatives_np)\n', (9675, 9689), False, 'import torch\n'), ((12558, 12608), 'torch.floor', 'torch.floor', (['((self._num_items - 1) * relative_rank)'], {}), '((self._num_items - 1) * relative_rank)\n', (12569, 12608), False, 'import torch\n'), ((12651, 12696), 'torch.tanh', 'torch.tanh', (['(self._mu * (predicted_rank - col))'], {}), '(self._mu * (predicted_rank - col))\n', (12661, 12696), False, 'import torch\n'), ((12738, 12774), 'torch.clamp', 'torch.clamp', (['dynamic_weight'], {'min': '(0.0)'}), '(dynamic_weight, min=0.0)\n', (12749, 12774), False, 'import torch\n'), ((10044, 10080), 'torch.from_numpy', 'torch.from_numpy', (['dynamic_samples_np'], {}), '(dynamic_samples_np)\n', (10060, 10080), False, 'import torch\n'), ((14196, 14206), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (14203, 14206), True, 'import numpy as np\n'), ((14292, 14302), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (14299, 14302), True, 'import numpy as np\n'), ((14381, 14391), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (14388, 14391), True, 'import numpy as np\n'), ((14841, 14847), 'time.time', 'time', ([], {}), '()\n', (14845, 14847), False, 'from time import time\n'), ((15318, 15324), 'time.time', 'time', ([], {}), '()\n', (15322, 15324), False, 'from time import time\n'), ((16676, 16718), 'torch.arange', 'torch.arange', ([], {'start': '(0)', 'end': 'self._num_items'}), '(start=0, end=self._num_items)\n', (16688, 16718), False, 'import torch\n'), ((17463, 17475), 'numpy.sort', 'np.sort', (['idx'], {}), '(idx)\n', (17470, 17475), True, 'import numpy as np\n'), ((17534, 17546), 'numpy.sort', 'np.sort', (['idx'], {}), '(idx)\n', (17541, 17546), True, 'import numpy as np\n'), ((10973, 11024), 'torch.from_numpy', 'torch.from_numpy', (['teacher_ranking[batch_indices, :]'], {}), '(teacher_ranking[batch_indices, :])\n', (10989, 11024), False, 'import torch\n'), ((12369, 12423), 'torch.sum', 'torch.sum', (['(col_prediction < dynamics_prediction)'], {'dim': '(1)'}), '(col_prediction < dynamics_prediction, dim=1)\n', (12378, 12423), False, 'import torch\n')] |
"""Neighborhood SPIN Module."""
import numpy as np
from .utils import check_distance_matrix, spin_energy
class NeighborhoodSPIN():
"""Neighborhood SPIN clustering method.
Parameters
----------
initial_sigma : float, optional (default=2e10)
Initial sigma value. This parameter controls the weight matrix
dispersion.
update_factor : float, optional (default=0.5)
The number that will update the sigma value at each iteration. Sigma
will be updated by sigma = sigma * update_factor.
max_iter : int, optional (default=100)
The maximum number of iterations of each round of sorting.
verbose : boolean, optional (default=False)
Flag indicating to show logs and information during the SPIN process.
Attributes
----------
distances_ : array, shape (n, n)
The original distances matrix provided.
permutation_ : array, shape (n, n)
Permutation matrix that can be applied to the original distances matrix
to get to the ordered distances matrix.
ordered_distances_ : array, shape (n, n)
Distances matrix reordered by the permutation matrix. Before run this
is the original distance matrix.
References
----------
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
Sortiug points into neighborhoods (SPIN): data analysis and
visualization by ordering distance matrices, Bioinformatics, Volume 21,
Issue 10, , Pages 2301–2308,
https://doi.org/10.1093/bioinformatics/bti329
"""
def __init__(self, intial_sigma=2**10, update_factor=0.5, max_iter=100,
verbose=False):
self.intial_sigma = intial_sigma
self.update_factor = update_factor
self.max_iter = max_iter
self.verbose = verbose
def run(self, X):
"""Execute the Neighborhood sorting.
Parameters
----------
X : array, shape (n, n)
Returns
-------
self : NeighborhoodSPIN
The object itself containing the ordered distances matrix.
"""
check_distance_matrix(X)
self.size_ = X.shape[0]
self.distances_ = X
self.permutation_ = np.identity(self.size_)
self.ordered_distances_ = self.permutation_.dot(self.distances_) \
.dot(self.permutation_.T)
sigma = self.intial_sigma
while sigma > 1:
weight_matrix = initial_weight_matrix(self.size_, sigma)
permutation = neighborhood(self.ordered_distances_,
weight_matrix,
self.max_iter,
self.verbose)
self.ordered_distances_ = permutation.dot(self.ordered_distances_)\
.dot(permutation.T)
self.permutation_ = permutation.dot(self.permutation_)
sigma = sigma * self.update_factor
return self
def neighborhood(distances, weight_matrix, max_iter=100, verbose=False):
"""Neighborhood SPIN algorithm.
Parameters
----------
distances : np.array, shape [n, n]
Distance symmetric square matrix.
weight_matrix : np.array, shape [n, n]
A initial weight matrix to update permutaions matrix.
max_iter : int, default=100
Maximum number of iterations.
verbose : bool
Verbosity flag, if it is true print useful information about the
process.
Returns
-------
permutation : np.array, shape [n, n]
Permutation matrix with the same dimensions of the distance matrix.
"""
permutation = np.identity(distances.shape[0])
mismatch_matrix = distances.dot(weight_matrix)
trace = np.trace(permutation.dot(mismatch_matrix))
for i in range(max_iter):
(new_permutation,
new_mismatch) = single_neighborhood_sort(distances, weight_matrix)
new_trace = np.trace(new_permutation.dot(new_mismatch))
if new_trace == trace:
break
weight_matrix = new_permutation.T.dot(weight_matrix)
trace = new_trace
return new_permutation
def single_neighborhood_sort(distances, weight_matrix):
"""Single stage on the neighborhood sorting process.
Parameters
----------
distances : array, shape (n, n)
The distances matrix to be sorted.
weight_matrix : array, shape (n, n)
The weight matrix to take into in account in sorting. The distribuition
on the matrix values control the scale of the sorting operations.
"""
size = len(distances)
mismatch = distances.dot(weight_matrix)
min_index = np.argmin(mismatch, axis=1)
min_values = mismatch[np.arange(size), min_index]
max_value = max(min_values)
sort_score = (min_index + 1.
- 0.1 * np.sign((size / 2. - min_index + 1.)) *
min_values / max_value)
sorted_ind = np.argsort(sort_score)
permutation = np.identity(distances.shape[0])[sorted_ind]
return permutation, mismatch
def initial_weight_matrix(size, sigma=1e2):
"""Initialize the weight matrix for neighborhood method.
This initial matrix is initialized with exponential coefficients and then
turned into a doubly stochastic matrix.
Parameters
----------
size : int
The size of the initial weight matrix.
sigma : float, optional, (default=1e2)
Coefficient to control dispersion of the weigth metrix coefficients.
Returns
-------
weight_matrix : array, shape (size, size)
The initial weight matrix. It is a square matrix.
"""
rows_index_matrix, columns_index_matrix = np.indices((size, size))
diff_index_matrix = rows_index_matrix - columns_index_matrix
exp_arg_index_matrix = -(diff_index_matrix**2)/(size*sigma)
non_normalized_weight_matrix = np.exp(exp_arg_index_matrix)
weight_matrix = sinkhorn_knopp_normalization_alogrithm(
non_normalized_weight_matrix
)
return weight_matrix
def sinkhorn_knopp_normalization_alogrithm(matrix, tolerance=1e-5,
max_iter=1000):
"""Turn matrices into doubly stochastic matrices.
Turn matrices into doubly stochastic matrix through the Sinkhorn Knopp
algorithm.
Parameters
----------
matrix : array
The matrix that will be normalized.
tolerance : float
The tolerance in the matrix approximation.
max_iter : int
If the tolerance is not reached this argument will set the maximun
number of iterations.
Returns
-------
norm_matrix : array
The normalized version from the original matrix.
References
----------
Sinkhorn, Richard. A Relationship Between Arbitrary Positive Matrices and
Doubly Stochastic Matrices. Ann. Math. Statist. 35 (1964), no. 2,
876--879. doi:10.1214/aoms/1177703591.
https://projecteuclid.org/euclid.aoms/1177703591
Sinkhorn, Richard, and <NAME>. "Concerning nonnegative matrices and
doubly stochastic matrices." Pacific Journal of Mathematics 21.2
(1967): 343-348.
http://www.yaroslavvb.com/papers/sinkhorn-concerning.pdf
"""
norm_matrix = matrix.copy()
for i in range(max_iter):
col_sum = norm_matrix.sum(axis=0)
norm_matrix = norm_matrix/col_sum
row_sum = norm_matrix.sum(axis=1).reshape(-1, 1)
norm_matrix = norm_matrix/row_sum
if (np.all(np.abs(norm_matrix.sum(axis=1) - 1) < tolerance) and
np.all(np.abs(norm_matrix.sum(axis=0) - 1) < tolerance)):
break
return norm_matrix
| [
"numpy.identity",
"numpy.indices",
"numpy.argsort",
"numpy.exp",
"numpy.sign",
"numpy.argmin",
"numpy.arange"
] | [((3705, 3736), 'numpy.identity', 'np.identity', (['distances.shape[0]'], {}), '(distances.shape[0])\n', (3716, 3736), True, 'import numpy as np\n'), ((4716, 4743), 'numpy.argmin', 'np.argmin', (['mismatch'], {'axis': '(1)'}), '(mismatch, axis=1)\n', (4725, 4743), True, 'import numpy as np\n'), ((4988, 5010), 'numpy.argsort', 'np.argsort', (['sort_score'], {}), '(sort_score)\n', (4998, 5010), True, 'import numpy as np\n'), ((5733, 5757), 'numpy.indices', 'np.indices', (['(size, size)'], {}), '((size, size))\n', (5743, 5757), True, 'import numpy as np\n'), ((5922, 5950), 'numpy.exp', 'np.exp', (['exp_arg_index_matrix'], {}), '(exp_arg_index_matrix)\n', (5928, 5950), True, 'import numpy as np\n'), ((2217, 2240), 'numpy.identity', 'np.identity', (['self.size_'], {}), '(self.size_)\n', (2228, 2240), True, 'import numpy as np\n'), ((5029, 5060), 'numpy.identity', 'np.identity', (['distances.shape[0]'], {}), '(distances.shape[0])\n', (5040, 5060), True, 'import numpy as np\n'), ((4770, 4785), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (4779, 4785), True, 'import numpy as np\n'), ((4889, 4926), 'numpy.sign', 'np.sign', (['(size / 2.0 - min_index + 1.0)'], {}), '(size / 2.0 - min_index + 1.0)\n', (4896, 4926), True, 'import numpy as np\n')] |
import xml.etree.ElementTree as elemTree
from openslide import OpenSlide
import matplotlib.pyplot as plt
import numpy as np
import os
os.environ['OPENCV_IO_MAX_IMAGE_PIXELS'] = str(2**64)
import cv2
from skimage import io
import sys
from tqdm import tqdm
from PIL import Image
import gc
import time
import datetime
def get_data_name(path):
image_path = path.split('/')
data_name = image_path[4].split('.')[0]
return data_name
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print('Error: Creating directory. ' + directory)
def get_xml(tree, img, data_name, xx, yy):
root = tree.getroot()
destination = root.findall("destination/annotations/annotation")
# for x in destination:
# print(x.attrib['name'])
#
# print(x_y[0])
# print(destination[0].attrib['name'])
cnt = 0
for x in tqdm(destination):
point = []
name_ = x.attrib['name']
for a in x.findall('p'):
temp = []
temp.append(int(a.attrib['x']))
temp.append(int(a.attrib['y']))
point.append(temp)
point = np.array(point)
x_min = np.sort(point[:, 0], axis=0)[0]
x_max = np.sort(point[:, 0], axis=0)[-1]
y_min = np.sort(point[:, 1], axis=0)[0]
y_max = np.sort(point[:, 1], axis=0)[-1]
while 1:
if y_max - y_min != 1000:
y_max += 0.5
y_min -= 0.5
if x_max - x_min != 1000:
x_max += 0.5
x_min -= 0.5
if y_max - y_min == 1000.0 and x_max - x_min == 1000.0:
print(y_max, y_min, x_max, x_min)
y_max = int(y_max * yy)
y_min = int(y_min * yy)
x_max = int(x_max * xx)
x_min = int(x_min * xx)
img_save = img[y_min:y_max, x_min:x_max]
break
cv2.imwrite('/home/sjwang/biotox/datasets/mrxs_label/' + data_name + '/' + name_ + '_' + str(cnt) + '.png', img_save)
cnt += 1
def load_wsi(path):
wsi = OpenSlide(path)
level_dim = wsi.level_dimensions
x = level_dim[1][0]
y = level_dim[1][1]
xx = level_dim[1][0] / level_dim[0][0]
yy = level_dim[1][1] / level_dim[0][1]
img = wsi.read_region((0, 0), 1, (x, y))
print(xx, yy)
print('mrxs load end')
np_img = np.array(img)
del(img)
# np_img[np_img == 0] = 255
print('numpy end')
np_img = cv2.cvtColor(np_img, cv2.COLOR_RGBA2BGR)
return np_img, xx, yy
def main(path):
print(path)
data_name = get_data_name(path)
createFolder('../../datasets/mrxs_label/' + data_name)
img, xx, yy = load_wsi(path)
tree = elemTree.parse('/home/sjwang/biotox/datasets/mrxs_label/1105-1/1105-1.xml')
get_xml(tree, img, data_name, xx, yy)
if __name__ == '__main__':
start = time.time()
main('../../datasets/mrxs_a/CELL1105-1.mrxs')
end = time.time()
print(datetime.timedelta(seconds=end-start)) | [
"os.path.exists",
"xml.etree.ElementTree.parse",
"os.makedirs",
"numpy.sort",
"tqdm.tqdm",
"numpy.array",
"cv2.cvtColor",
"openslide.OpenSlide",
"datetime.timedelta",
"time.time"
] | [((933, 950), 'tqdm.tqdm', 'tqdm', (['destination'], {}), '(destination)\n', (937, 950), False, 'from tqdm import tqdm\n'), ((2150, 2165), 'openslide.OpenSlide', 'OpenSlide', (['path'], {}), '(path)\n', (2159, 2165), False, 'from openslide import OpenSlide\n'), ((2444, 2457), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2452, 2457), True, 'import numpy as np\n'), ((2540, 2580), 'cv2.cvtColor', 'cv2.cvtColor', (['np_img', 'cv2.COLOR_RGBA2BGR'], {}), '(np_img, cv2.COLOR_RGBA2BGR)\n', (2552, 2580), False, 'import cv2\n'), ((2784, 2859), 'xml.etree.ElementTree.parse', 'elemTree.parse', (['"""/home/sjwang/biotox/datasets/mrxs_label/1105-1/1105-1.xml"""'], {}), "('/home/sjwang/biotox/datasets/mrxs_label/1105-1/1105-1.xml')\n", (2798, 2859), True, 'import xml.etree.ElementTree as elemTree\n'), ((2943, 2954), 'time.time', 'time.time', ([], {}), '()\n', (2952, 2954), False, 'import time\n'), ((3015, 3026), 'time.time', 'time.time', ([], {}), '()\n', (3024, 3026), False, 'import time\n'), ((1195, 1210), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (1203, 1210), True, 'import numpy as np\n'), ((3037, 3076), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(end - start)'}), '(seconds=end - start)\n', (3055, 3076), False, 'import datetime\n'), ((496, 521), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (510, 521), False, 'import os\n'), ((535, 557), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (546, 557), False, 'import os\n'), ((1228, 1256), 'numpy.sort', 'np.sort', (['point[:, 0]'], {'axis': '(0)'}), '(point[:, 0], axis=0)\n', (1235, 1256), True, 'import numpy as np\n'), ((1276, 1304), 'numpy.sort', 'np.sort', (['point[:, 0]'], {'axis': '(0)'}), '(point[:, 0], axis=0)\n', (1283, 1304), True, 'import numpy as np\n'), ((1325, 1353), 'numpy.sort', 'np.sort', (['point[:, 1]'], {'axis': '(0)'}), '(point[:, 1], axis=0)\n', (1332, 1353), True, 'import numpy as np\n'), ((1373, 1401), 'numpy.sort', 'np.sort', (['point[:, 1]'], {'axis': '(0)'}), '(point[:, 1], axis=0)\n', (1380, 1401), True, 'import numpy as np\n')] |
import nibabel as nib
import numpy as np
from ATT.algorithm import surf_tools
_, faces = nib.freesurfer.read_geometry('/nfs/t1/nsppara/corticalsurface/fsaverage/surf/lh.sphere')
# data = nib.load('../fsfast_surf_mt_lh.mgz').get_data()
# data = data[...,0]
data = nib.load('/nfs/j3/userhome/huangtaicheng/hworkingshop/parcellation_MT/BAA/surface_proc/S0001/mt/mt.sm0.lh/mt_fix/t.mgz').get_data()
mask = nib.load('../mask_lh_thr7_mt.mgz').get_data()
vxall = np.where(data*mask>0)[0]
localmax = []
for i,vx in enumerate(vxall):
print('{0} start'.format(i))
neigh = surf_tools.get_n_ring_neighbor(vx, faces, n=2, ordinal = True)
neigh_mag = data[list(neigh[0]),0,0]
if np.all(data[vx,0,0]>neigh_mag):
localmax.append(vx)
mag = data[list(localmax),0,0]
locmax_sort = [localmax[i] for i in np.argsort(mag)[::-1]]
| [
"numpy.all",
"nibabel.load",
"numpy.where",
"numpy.argsort",
"ATT.algorithm.surf_tools.get_n_ring_neighbor",
"nibabel.freesurfer.read_geometry"
] | [((90, 183), 'nibabel.freesurfer.read_geometry', 'nib.freesurfer.read_geometry', (['"""/nfs/t1/nsppara/corticalsurface/fsaverage/surf/lh.sphere"""'], {}), "(\n '/nfs/t1/nsppara/corticalsurface/fsaverage/surf/lh.sphere')\n", (118, 183), True, 'import nibabel as nib\n'), ((460, 485), 'numpy.where', 'np.where', (['(data * mask > 0)'], {}), '(data * mask > 0)\n', (468, 485), True, 'import numpy as np\n'), ((575, 635), 'ATT.algorithm.surf_tools.get_n_ring_neighbor', 'surf_tools.get_n_ring_neighbor', (['vx', 'faces'], {'n': '(2)', 'ordinal': '(True)'}), '(vx, faces, n=2, ordinal=True)\n', (605, 635), False, 'from ATT.algorithm import surf_tools\n'), ((686, 720), 'numpy.all', 'np.all', (['(data[vx, 0, 0] > neigh_mag)'], {}), '(data[vx, 0, 0] > neigh_mag)\n', (692, 720), True, 'import numpy as np\n'), ((265, 395), 'nibabel.load', 'nib.load', (['"""/nfs/j3/userhome/huangtaicheng/hworkingshop/parcellation_MT/BAA/surface_proc/S0001/mt/mt.sm0.lh/mt_fix/t.mgz"""'], {}), "(\n '/nfs/j3/userhome/huangtaicheng/hworkingshop/parcellation_MT/BAA/surface_proc/S0001/mt/mt.sm0.lh/mt_fix/t.mgz'\n )\n", (273, 395), True, 'import nibabel as nib\n'), ((405, 439), 'nibabel.load', 'nib.load', (['"""../mask_lh_thr7_mt.mgz"""'], {}), "('../mask_lh_thr7_mt.mgz')\n", (413, 439), True, 'import nibabel as nib\n'), ((813, 828), 'numpy.argsort', 'np.argsort', (['mag'], {}), '(mag)\n', (823, 828), True, 'import numpy as np\n')] |
import gzip
import numpy as np
import keras as kr
import sklearn.preprocessing as pre
import matplotlib.pyplot as plt
from keras.models import load_model
from keras.datasets import mnist
from keras.layers import Dense, Dropout, Flatten
(x_train, y_train), (x_test, y_test) = mnist.load_data()
inputs = ~np.array(x_train).reshape(60000,784)/255.0
inputs = inputs.astype('float32')
model = kr.models.Sequential()
#model.add(kr.layers.Dense(units=1568, activation='relu',input_dim=784))
model.add(kr.layers.Dense(units=784, activation='relu'))
#model.add(Dropout(0.01))
model.add(kr.layers.Dense(units=392, activation='relu'))
#model.add(Dropout(0.005))
model.add(kr.layers.Dense(units=98, activation='relu'))
#model.add(kr.layers.Dense(units=30, activation='relu'))
model.add(kr.layers.Dense(units=10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
y_train = y_train.astype('float32')
encoder = pre.LabelBinarizer()
encoder.fit(y_train)
outputs = encoder.transform(y_train)
model.fit(inputs, outputs, epochs=100, batch_size=100)
test_inputs = ~np.array(x_test).reshape(10000,784)/255.0
test_inputs = test_inputs.astype('float32')
y_test = y_test.astype('float32')
encoder = pre.LabelBinarizer()
encoder.fit(y_test)
test_outputs = encoder.transform(y_test)
#model.save("784-392-98-10-nd.h5")
scores = model.evaluate(test_inputs, test_outputs, verbose=0)
print(scores) | [
"sklearn.preprocessing.LabelBinarizer",
"keras.datasets.mnist.load_data",
"keras.models.Sequential",
"numpy.array",
"keras.layers.Dense"
] | [((276, 293), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (291, 293), False, 'from keras.datasets import mnist\n'), ((393, 415), 'keras.models.Sequential', 'kr.models.Sequential', ([], {}), '()\n', (413, 415), True, 'import keras as kr\n'), ((964, 984), 'sklearn.preprocessing.LabelBinarizer', 'pre.LabelBinarizer', ([], {}), '()\n', (982, 984), True, 'import sklearn.preprocessing as pre\n'), ((1248, 1268), 'sklearn.preprocessing.LabelBinarizer', 'pre.LabelBinarizer', ([], {}), '()\n', (1266, 1268), True, 'import sklearn.preprocessing as pre\n'), ((500, 545), 'keras.layers.Dense', 'kr.layers.Dense', ([], {'units': '(784)', 'activation': '"""relu"""'}), "(units=784, activation='relu')\n", (515, 545), True, 'import keras as kr\n'), ((583, 628), 'keras.layers.Dense', 'kr.layers.Dense', ([], {'units': '(392)', 'activation': '"""relu"""'}), "(units=392, activation='relu')\n", (598, 628), True, 'import keras as kr\n'), ((667, 711), 'keras.layers.Dense', 'kr.layers.Dense', ([], {'units': '(98)', 'activation': '"""relu"""'}), "(units=98, activation='relu')\n", (682, 711), True, 'import keras as kr\n'), ((780, 827), 'keras.layers.Dense', 'kr.layers.Dense', ([], {'units': '(10)', 'activation': '"""softmax"""'}), "(units=10, activation='softmax')\n", (795, 827), True, 'import keras as kr\n'), ((305, 322), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (313, 322), True, 'import numpy as np\n'), ((1117, 1133), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (1125, 1133), True, 'import numpy as np\n')] |
import sys
from NiaPy.algorithms.basic import BatAlgorithm as BA
from NiaPy.task import StoppingTask
from sklearn.linear_model import LogisticRegression
from niaaml.preprocessing.feature_selection.feature_selection_algorithm import FeatureSelectionAlgorithm
from niaaml.utilities import ParameterDefinition, MinMax
from niaaml.preprocessing.feature_selection._feature_selection_threshold_benchmark import _FeatureSelectionThresholdBenchmark
import numpy
__all__ = [
'BatAlgorithm'
]
class BatAlgorithm(FeatureSelectionAlgorithm):
r"""Implementation of feature selection using BA algorithm.
Date:
2020
Author:
<NAME>
Reference:
The implementation is adapted according to the following article:
<NAME>, <NAME>, <NAME>, <NAME>., <NAME>. A novel self-adaptive differential evolution for feature selection using threshold mechanism . In: Proceedings of the 2018 IEEE Symposium on Computational Intelligence (SSCI 2018), pp. 17-24, 2018.
Reference URL:
http://iztok-jr-fister.eu/static/publications/236.pdf
License:
MIT
See Also:
* :class:`niaaml.preprocessing.feature_selection.feature_selection_algorithm.FeatureSelectionAlgorithm`
"""
Name = 'Bat Algorithm'
def __init__(self, **kwargs):
r"""Initialize BA feature selection algorithm.
"""
self._params = dict(
A = ParameterDefinition(MinMax(0.5, 1.0), param_type=float),
r = ParameterDefinition(MinMax(0.0, 0.5), param_type=float),
Qmin = ParameterDefinition(MinMax(0.0, 1.0), param_type=float),
Qmax = ParameterDefinition(MinMax(1.0, 2.0), param_type=float)
)
self.__ba = BA(NP=10)
def set_parameters(self, **kwargs):
r"""Set the parameters/arguments of the algorithm.
"""
kwargs['NP'] = self.__ba.NP
self.__ba.setParameters(**kwargs)
def __final_output(self, sol):
r"""Calculate final array of features.
Arguments:
sol (numpy.ndarray[float]): Individual of population/ possible solution.
Returns:
numpy.ndarray[bool]: Mask of selected features.
"""
selected = numpy.ones(sol.shape[0] - 1, dtype=bool)
threshold = sol[sol.shape[0] - 1]
for i in range(sol.shape[0] - 1):
if sol[i] < threshold:
selected[i] = False
return selected
def select_features(self, x, y, **kwargs):
r"""Perform the feature selection process.
Arguments:
x (pandas.core.frame.DataFrame): Array of original features.
y (pandas.core.series.Series) Expected classifier results.
Returns:
pandas.core.frame.DataFrame: Mask of selected features.
"""
num_features = x.shape[1]
benchmark = _FeatureSelectionThresholdBenchmark(x, y)
task = StoppingTask(D=num_features+1, nFES=1000, benchmark=benchmark)
best = self.__ba.run(task)
return self.__final_output(benchmark.get_best_solution())
def to_string(self):
r"""User friendly representation of the object.
Returns:
str: User friendly representation of the object.
"""
return FeatureSelectionAlgorithm.to_string(self).format(name=self.Name, args=self._parameters_to_string(self.__ba.getParameters()))
| [
"numpy.ones",
"niaaml.preprocessing.feature_selection._feature_selection_threshold_benchmark._FeatureSelectionThresholdBenchmark",
"niaaml.preprocessing.feature_selection.feature_selection_algorithm.FeatureSelectionAlgorithm.to_string",
"NiaPy.task.StoppingTask",
"niaaml.utilities.MinMax",
"NiaPy.algorith... | [((1729, 1738), 'NiaPy.algorithms.basic.BatAlgorithm', 'BA', ([], {'NP': '(10)'}), '(NP=10)\n', (1731, 1738), True, 'from NiaPy.algorithms.basic import BatAlgorithm as BA\n'), ((2226, 2266), 'numpy.ones', 'numpy.ones', (['(sol.shape[0] - 1)'], {'dtype': 'bool'}), '(sol.shape[0] - 1, dtype=bool)\n', (2236, 2266), False, 'import numpy\n'), ((2865, 2906), 'niaaml.preprocessing.feature_selection._feature_selection_threshold_benchmark._FeatureSelectionThresholdBenchmark', '_FeatureSelectionThresholdBenchmark', (['x', 'y'], {}), '(x, y)\n', (2900, 2906), False, 'from niaaml.preprocessing.feature_selection._feature_selection_threshold_benchmark import _FeatureSelectionThresholdBenchmark\n'), ((2922, 2986), 'NiaPy.task.StoppingTask', 'StoppingTask', ([], {'D': '(num_features + 1)', 'nFES': '(1000)', 'benchmark': 'benchmark'}), '(D=num_features + 1, nFES=1000, benchmark=benchmark)\n', (2934, 2986), False, 'from NiaPy.task import StoppingTask\n'), ((3274, 3315), 'niaaml.preprocessing.feature_selection.feature_selection_algorithm.FeatureSelectionAlgorithm.to_string', 'FeatureSelectionAlgorithm.to_string', (['self'], {}), '(self)\n', (3309, 3315), False, 'from niaaml.preprocessing.feature_selection.feature_selection_algorithm import FeatureSelectionAlgorithm\n'), ((1438, 1454), 'niaaml.utilities.MinMax', 'MinMax', (['(0.5)', '(1.0)'], {}), '(0.5, 1.0)\n', (1444, 1454), False, 'from niaaml.utilities import ParameterDefinition, MinMax\n'), ((1511, 1527), 'niaaml.utilities.MinMax', 'MinMax', (['(0.0)', '(0.5)'], {}), '(0.0, 0.5)\n', (1517, 1527), False, 'from niaaml.utilities import ParameterDefinition, MinMax\n'), ((1587, 1603), 'niaaml.utilities.MinMax', 'MinMax', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1593, 1603), False, 'from niaaml.utilities import ParameterDefinition, MinMax\n'), ((1663, 1679), 'niaaml.utilities.MinMax', 'MinMax', (['(1.0)', '(2.0)'], {}), '(1.0, 2.0)\n', (1669, 1679), False, 'from niaaml.utilities import ParameterDefinition, MinMax\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from qpsolvers import solve_qp
from sklearn.metrics import confusion_matrix
from mpl_toolkits import mplot3d
import random
#Importing Dataset
dataset = pd.read_csv('Mall_Customers.csv')
X = dataset.iloc[:, 2:].values
k = [1, 2, 3, 4, 5, 6, 7, 8]
Cost_points = np.zeros(len(k))
for n in range(len(k)):
min_cost = np.zeros(10)
for p in range(10):
mu = np.zeros((k[n], X.shape[1]))
indices = random.sample(range(len(X)), k[n])
Class = []
for i in range(k[n]):
Class.append([])
mu[i, :] = X[indices[i]]
No_iter = 30
Cost_fun = np.zeros(No_iter)
for m in range(No_iter):
for j in range(len(X)):
dis = np.zeros(k[n])
for l in range(k[n]):
dis[l] = np.linalg.norm(X[j, :]- mu[l, :])
Class[np.argmin(dis)].append(j)
Cost = np.zeros(k[n])
for i in range(k[n]):
mu[i, :] = 1/len(X[Class[i]])*sum(X[Class[i]])
Cost[i] = sum(sum(((X[Class[i], :]-mu[i, :])**2).T))
Class[i] = []
Cost_fun[m] = sum(Cost)
min_cost[p] = Cost_fun[-1]
Cost_points[n] = np.min(min_cost)
plt.plot(k, Cost_points)
plt.title("Cost vs different k(Elbow graph)")
plt.xlabel("k")
plt.ylabel("cost")
plt.show()
print("Elbow graph shows data can be clustered in 5 categories")
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linalg.norm",
"numpy.zeros",
"numpy.min",
"numpy.argmin",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((226, 259), 'pandas.read_csv', 'pd.read_csv', (['"""Mall_Customers.csv"""'], {}), "('Mall_Customers.csv')\n", (237, 259), True, 'import pandas as pd\n'), ((1290, 1314), 'matplotlib.pyplot.plot', 'plt.plot', (['k', 'Cost_points'], {}), '(k, Cost_points)\n', (1298, 1314), True, 'import matplotlib.pyplot as plt\n'), ((1315, 1360), 'matplotlib.pyplot.title', 'plt.title', (['"""Cost vs different k(Elbow graph)"""'], {}), "('Cost vs different k(Elbow graph)')\n", (1324, 1360), True, 'import matplotlib.pyplot as plt\n'), ((1361, 1376), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""k"""'], {}), "('k')\n", (1371, 1376), True, 'import matplotlib.pyplot as plt\n'), ((1377, 1395), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cost"""'], {}), "('cost')\n", (1387, 1395), True, 'import matplotlib.pyplot as plt\n'), ((1396, 1406), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1404, 1406), True, 'import matplotlib.pyplot as plt\n'), ((391, 403), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (399, 403), True, 'import numpy as np\n'), ((1273, 1289), 'numpy.min', 'np.min', (['min_cost'], {}), '(min_cost)\n', (1279, 1289), True, 'import numpy as np\n'), ((441, 469), 'numpy.zeros', 'np.zeros', (['(k[n], X.shape[1])'], {}), '((k[n], X.shape[1]))\n', (449, 469), True, 'import numpy as np\n'), ((678, 695), 'numpy.zeros', 'np.zeros', (['No_iter'], {}), '(No_iter)\n', (686, 695), True, 'import numpy as np\n'), ((970, 984), 'numpy.zeros', 'np.zeros', (['k[n]'], {}), '(k[n])\n', (978, 984), True, 'import numpy as np\n'), ((787, 801), 'numpy.zeros', 'np.zeros', (['k[n]'], {}), '(k[n])\n', (795, 801), True, 'import numpy as np\n'), ((869, 903), 'numpy.linalg.norm', 'np.linalg.norm', (['(X[j, :] - mu[l, :])'], {}), '(X[j, :] - mu[l, :])\n', (883, 903), True, 'import numpy as np\n'), ((925, 939), 'numpy.argmin', 'np.argmin', (['dis'], {}), '(dis)\n', (934, 939), True, 'import numpy as np\n')] |
import math, torch
import numpy as np
from numpy.random import normal as normrnd
from scipy.stats import multivariate_normal, norm
from scipy.linalg import sqrtm, expm
from pdb import set_trace as bp
from include.DNN import DNN
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from include.dataStructures.particle import Particle
class localize:
def __init__(self, numP, su, sz, distMap, mat, wayPts, R, dim, useClas, hardClas, modelpath="./models/best.pth"):
self.np = numP
self.sz = sz
self.dists = distMap
self.dim = dim
self.wayPts = wayPts
self.pts = self.convert(wayPts)
self.nAP = mat.numAPs
self.tx = mat.Tx
self.R = R
self.start = self.wayPts[0]
self.su = su
self.path = []
self.APLocs = []
self.IDs = []
self.use = useClas
self.hard = hardClas
self.modelpath = modelpath
self.model = None
self.confidence = [0, 0, 0, 0] # true positive, false positive, true negative, false negative
if self.dim == 2: self.su = su[0:2]
if self.use: self.load_model()
def print(self, samples):
for i in range(self.np):
print("pose: ", samples[i].pose, " | weight: ", samples[i].w)
def distance(self, x, y):
if len(x)==3 and len(y)==3:
return math.sqrt( (x[1]-y[1])**2 + (x[0]-y[0])**2 + (x[2]-y[2])**2 )
else:
return math.sqrt( (x[1]-y[1])**2 + (x[0]-y[0])**2 )
def MSE(self):
mse = 0
for i in range(len(self.pts)):
mse += self.distance(self.wayPts[i], self.path[i])
mse = mse/len(self.pts)
return mse
def getCDF(self):
cdf = [0 for x in range(len(self.pts))]
for i in range(len(self.pts)):
cdf[i] = self.distance(self.wayPts[i], self.path[i])
return cdf
def distrib(self):
start = self.wayPts[0] ; samples = []
if self.dim == 2: start = [start[0], start[1]]
if self.dim == 3: start = start
for _ in range(self.np):
samples.append(Particle(start, 1/self.np))
return samples
def convert(self, pts):
n = len(pts)
rtPts = []
for i in range(1, n):
dx = pts[i][0] - pts[i-1][0]
dy = pts[i][1] - pts[i-1][1]
if self.dim==2: rtPts.append([dx, dy])
if self.dim==3: dz = pts[i][2] - pts[i-1][2] ; rtPts.append([dx, dy, dz])
return rtPts
'''
load pytorch model and save dict
'''
def load_model(self):
model = DNN()
path = self.modelpath
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['state_dict'])
self.model = model
self.model.eval()
'''
classify into LOS/NLOS
'''
def classify(self, rssi, euc):
inp = torch.tensor([rssi, euc])
out = self.model(inp.float())
pred = 1 if (out[1]>out[0]) else 0
return pred
'''
weighting using the normpdf subroutine
'''
def getWeight(self, dz):
norpdf = 1
for i in range(len(dz)):
if dz[i]!=0:
norpdf *= norm.pdf(dz[i], 0, self.sz[i])
return norpdf
'''
weighting using the mvnpdf subroutine
'''
def getMultiWeight(self, dz):
idx = [i for i, e in enumerate(dz) if e != 0]
val = [] ; sig = []
if len(idx)==0:
return 1/self.np
for i in idx:
val.append(dz[i])
sig.append(self.sz[i])
mvn = multivariate_normal([0]*len(idx), np.diag(sig))
return mvn.pdf(val)
'''
return is not required as python works on
pass-by-reference and there is no way of
emulating pass-by-value
'''
def motion_model(self, samples, point, su):
for i in range(self.np):
dx = point[0] - normrnd(0, su[0])
dy = point[1] - normrnd(0, su[1])
if self.dim == 2: pose = [samples[i].pose[0] + dx, samples[i].pose[1] + dy]
if self.dim == 3: dz = point[2] - normrnd(0, su[2])
if self.dim == 3: pose = [samples[i].pose[0] + dx, samples[i].pose[1] + dy, samples[i].pose[2] + dz]
samples[i].pose = pose
'''
measurement model for the particle filter
label for dMap = 1 : NLOS , 0 : LOS
'''
def measure_model(self, samples, z):
totalWt = 0 ; nAP = len(z)
for i in range(self.np):
dz = [0 for x in range(nAP)]
for j in range(nAP):
tx = self.tx[j] ; pos = samples[i].pose
d = self.distance(tx, pos)
if d <= self.R:
if self.use:
if self.hard:
label = self.classify(z[j].rssi, d)
# confidence matrix calculation
if label==0 and z[j].label==0: self.confidence[0]= self.confidence[0]+1 # true positive
elif label==0 and z[j].label==1: self.confidence[1]= self.confidence[1]+1 # false negative
elif label==1 and z[j].label==1: self.confidence[2]= self.confidence[2]+1 # true negative
elif label==1 and z[j].label==0: self.confidence[3]= self.confidence[3]+1 # false positive
if label==0:
dz[j] = abs(z[j].rssi-d)
else:
inp = torch.tensor([z[j].rssi, d])
out = self.model(inp.float()).detach().numpy()
dz[j] = out[0]*abs(z[j].rssi-d) + out[1]*abs(z[j].rssi - normrnd(self.R,3))
# confidence matrix calculation
if out[0]>out[1] and z[j].label==0: self.confidence[0]= self.confidence[0]+1 # true positive
elif out[0]>out[1] and z[j].label==1: self.confidence[1]= self.confidence[1]+1 # false positive
elif out[0]<out[1] and z[j].label==1: self.confidence[2]= self.confidence[2]+1 # true negative
elif out[0]<out[1] and z[j].label==0: self.confidence[3]= self.confidence[3]+1 # false negative
else:
dz[j] = abs(z[j].rssi-d)
wt = self.getWeight(dz)
samples[i].w *= wt
totalWt += wt
if totalWt!=0:
for i in range(self.np):
samples[i].w = samples[i].w / totalWt
else:
for i in range(self.np):
samples[i].w = 1/self.np
'''
measurement model for fast slam v1
label for dMap = 1 : NLOS , 0 : LOS
'''
def fast_measure_model(self, samples, z):
if self.dim == 2: Qt = np.diag([10,10])
if self.dim == 3: Qt = np.diag([10,10,10])
Qt = Qt.tolist() ; nAP = len(z) ; totWt = 0
for i in range(self.np):
for j in range(nAP):
tx = np.array(self.tx[j]) ; pos = np.array(samples[i].pose)
d = self.distance(tx, pos)
if d <= self.R:
# initialize particle map
if j not in samples[i].mapID:
samples[i].mapMu.append(tx)
samples[i].mapSigma.append(Qt)
samples[i].mapID.append(j)
samples[i].hashMap[j] = len(samples[i].mapID) - 1
samples[i].w = 1/self.np
# update particle map
else:
ID = samples[i].hashMap[j]
# prediction step
muHat = samples[i].mapMu[ID]
sigHat = np.array(samples[i].mapSigma[ID])
# update step
dHat = self.distance(pos, muHat)
# use classifier or not
if self.use:
if self.hard:
label = self.classify(z[j].rssi, dHat)
# confidence matrix calculation
if label==0 and z[j].label==0: self.confidence[0]= self.confidence[0]+1 # true positive
elif label==0 and z[j].label==1: self.confidence[1]= self.confidence[1]+1 # false negative
elif label==1 and z[j].label==1: self.confidence[2]= self.confidence[2]+1 # true negative
elif label==1 and z[j].label==0: self.confidence[3]= self.confidence[3]+1 # false positive
if label==0:
innov = abs(z[j].rssi-dHat)
else:
continue
else:
inp = torch.tensor([z[j].rssi, dHat])
out = self.model(inp.float()).detach().numpy()
innov = out[0]*abs(z[j].rssi - dHat) + out[1]*abs(z[j].rssi - normrnd(self.R,3))
# confidence matrix calculation
if out[0]>out[1] and z[j].label==0: self.confidence[0]= self.confidence[0]+1 # true positive
elif out[0]>out[1] and z[j].label==1: self.confidence[1]= self.confidence[1]+1 # false negative
elif out[0]<out[1] and z[j].label==1: self.confidence[2]= self.confidence[2]+1 # true negative
elif out[0]<out[1] and z[j].label==0: self.confidence[3]= self.confidence[3]+1 # false positive
else:
innov = abs(z[j].rssi - dHat)
dx = muHat[0] - pos[0] ; dy = muHat[1] - pos[1]
den = math.sqrt(dx**2 + dy**2)
H = np.array([dx/den, dy/den])
if self.dim==3:
dz = muHat[2] - pos[2]
den = math.sqrt(dx**2 + dy**2 + dz**2)
H = np.array([dx/den, dy/den, dz/den])
try:
Q = np.matmul(np.matmul(H, sigHat), H) + self.sz[j]
except:
bp()
# Kalman Gain
K = np.matmul(sigHat, H)/Q
# update pose/ covar
mu = muHat + innov*K
K = K.reshape((self.dim,1))
sig = (np.identity(self.dim) - K*H)*sigHat
samples[i].mapMu[ID] = mu.reshape((self.dim,))
samples[i].mapSigma[ID] = sig.tolist()
samples[i].w = max(samples[i].w, math.sqrt(2*math.pi*Q)*math.exp(-0.5*(innov**2)/Q))
totWt += samples[i].w
# normalize the weights
if totWt==0:
for i in range(self.np):
samples[i].w = 1/self.np
else:
for i in range(self.np):
samples[i].w = samples[i].w/totWt
'''
resampling algorithm applicable to both
particle filter and fast slam because of
common structure of particle
'''
def resample(self, samples):
idx = [0]*self.np ; Q = [0]*self.np ; Q[0] = samples[0].w
for i in range(1, self.np):
Q[i] = samples[i].w + Q[i-1]
t = np.random.rand(self.np+1, 1)
T = np.sort(t, axis=0)
T[self.np] = 1 ; i,j = 0,0
while i<self.np and j<self.np:
if T[i] < Q[j]:
idx[i] = j
i += 1
else:
j += 1
if len(set(idx))>0.2*self.np:
for i in range(self.np):
samples[i].pose = samples[idx[i]].pose
samples[i].w = 1/self.np
samples[i].mapMu = samples[idx[i]].mapMu
samples[i].mapID = samples[idx[i]].mapID
samples[i].mapSigma = samples[idx[i]].mapSigma
samples[i].hashMap = samples[idx[i]].hashMap
'''
Calculates the effective number of particles
in the sampled distribution.
'''
def neff(self, samples):
wghts = [0]*self.np ; totWt = 0
for i in range(self.np):
wghts[i] = samples[i].w
totWt += samples[i].w
den = 0
for i in range(self.np):
wghts[i] = (wghts[i]/totWt)**2
den += wghts[i]
return 1/den
'''
Calculates weighted mean and variance of the
sample distribution
'''
def meanVar(self, samples):
totWt = 0 ; mu = [0 for _ in range(self.dim)] ; sig = np.zeros((self.dim,self.dim))
for i in range(self.np):
mu[0] += samples[i].pose[0]
mu[1] += samples[i].pose[1]
if self.dim==3: mu[2] += samples[i].pose[2]
totWt += samples[i].w
if self.dim==2: mu = [mu[0]/self.np, mu[1]/self.np]
if self.dim==3: mu = [mu[0]/self.np, mu[1]/self.np, mu[2]/self.np]
for i in range(self.np):
if self.dim==2: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] ])
if self.dim==3: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] , samples[i].pose[2]-mu[2] ])
sig += np.matmul(x.reshape((self.dim,1)),x.reshape((1,self.dim)))
sig = sig/self.np
return mu, sig
'''
Calculates weighted mean and variance of the
sample distribution
'''
def weightedMeanVar(self, samples):
totWt = 0 ; mu = [0 for _ in range(self.dim)] ; sig = np.zeros((self.dim,self.dim))
for i in range(self.np):
mu[0] += samples[i].w*samples[i].pose[0]
mu[1] += samples[i].w*samples[i].pose[1]
if self.dim==3: mu[2] += samples[i].w*samples[i].pose[2]
totWt += samples[i].w
if self.dim==2: mu = [mu[0]/totWt, mu[1]/totWt]
if self.dim==3: mu = [mu[0]/totWt, mu[1]/totWt, mu[2]/totWt]
for i in range(self.np):
if self.dim==2: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] ])
if self.dim==3: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] , samples[i].pose[2]-mu[2] ])
sig += samples[i].w*np.matmul(x.reshape((self.dim,1)),x.reshape((1,self.dim)))
sig = sig/totWt
return mu, sig
'''
Get the maximum weighted particle and use it
to calculate the IDs of the APs discovered &
the locations of the discovered APs
'''
def getAPLocs(self, samples):
maxWeight = -9999999999 ; idx = 0
for i in range(self.np):
if samples[i].w > maxWeight:
maxWeight = samples[i].w
idx = i
self.APLocs = samples[idx].mapMu
self.IDs = samples[idx].mapID
'''
Plot the particle poses for each particle. Can
only be used for debugging as of now as animation
support is yet to be added
'''
def plot(self, samples):
x = [] ; y = []
for i in range(self.np):
x.append(samples[i].pose[0])
y.append(samples[i].pose[1])
plt.plot(x,y,'c.')
mXY,_ = self.meanVar(samples)
wmXY,_ = self.weightedMeanVar(samples)
plt.plot(mXY[0],mXY[1],'ro')
plt.plot(wmXY[0],wmXY[1],'bo')
plt.xlim([-100,300])
plt.ylim([-100,300])
plt.show()
'''
The main Particle filter class
'''
def particleFilter(self):
self.path.append(self.wayPts[0])
samples = self.distrib()
print("Running Particle Filter ..")
for i in range(len(self.pts)):
# provide action update
self.motion_model(samples, self.pts[i], self.su)
# provide measurement update
self.measure_model(samples, self.dists[i])
# resample only when number of effective particle drops
if self.neff(samples) <= 1/3*self.np:
self.resample(samples)
mXY, _ = self.weightedMeanVar(samples)
self.path.append(mXY)
print("Particle Filter has finished running ..")
'''
The main Fast SLAM v1 class
'''
def FastSLAM(self):
self.path.append(self.wayPts[0])
samples = self.distrib()
print("Running Fast SLAM ..")
for i in range(len(self.pts)):
# provide action update
self.motion_model(samples, self.pts[i], self.su)
# provide measurement update
self.fast_measure_model(samples, self.dists[i])
# resample only when number of effective particle drops
if self.neff(samples) <= 1/3*self.np:
self.resample(samples)
mXY, _ = self.weightedMeanVar(samples)
self.path.append(mXY)
self.getAPLocs(samples)
print("FastSLAM has finished running ..")
'''
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
'''
'''
Localizer for Experimental Setup:
1. Contains only FastSlam
2. Measurement Model updated to read data from experiments
'''
class localizeExp:
def __init__(self, numP, su, sz, map, useClas, hardClas, modelpath="./models/best.pth"):
self.np = numP
self.sz = sz
self.dim = map.dim
self.wayPts = map.wayPts
self.pts = self.convert(self.wayPts)
self.dim = map.dim
self.TXName = map.TXName
self.numPts = map.numPts
self.numAPs = map.numAPs
self.maxZ = map.maxZ
self.dists = map.distMap
self.name2MAC = map.name2MAC
self.name2Pos = map.name2Pos
self.MAC2Name = map.MAC2Name
self.start = self.wayPts[0][:2]
self.su = su
self.path = []
self.APLocs = []
self.IDs = []
self.use = useClas
self.hard = hardClas
self.modelpath = modelpath
self.model = None
self.confidence = [0, 0, 0, 0] # true positive, false positive, true negative, false negative
if self.dim == 2: self.su = su[0:2]
if self.use: self.load_model()
def print(self, samples):
for i in range(self.np):
print("pose: ", samples[i].pose, " | weight: ", samples[i].w)
def distance(self, x, y):
if len(x)==3 and len(y)==3:
return math.sqrt( (x[1]-y[1])**2 + (x[0]-y[0])**2 + (x[2]-y[2])**2 )
else:
return math.sqrt( (x[1]-y[1])**2 + (x[0]-y[0])**2 )
def MSE(self):
mse = 0
for i in range(len(self.pts)):
mse += self.distance(self.wayPts[i], self.path[i])
mse = mse/len(self.pts)
return mse
def getCDF(self):
cdf = [0 for x in range(len(self.pts))]
for i in range(len(self.pts)):
cdf[i] = self.distance(self.wayPts[i], self.path[i])
return cdf
def distrib(self):
start = self.wayPts[0] ; samples = []
if self.dim == 2: start = [start[0], start[1]]
if self.dim == 3: start = start
for _ in range(self.np):
samples.append(Particle(start, 1/self.np))
return samples
def convert(self, pts):
n = len(pts)
rtPts = []
for i in range(1, n):
dx = pts[i][0] - pts[i-1][0]
dy = pts[i][1] - pts[i-1][1]
if self.dim==2: rtPts.append([dx, dy])
if self.dim==3: dz = pts[i][2] - pts[i-1][2] ; rtPts.append([dx, dy, dz])
return rtPts
'''
load pytorch model and save dict
'''
def load_model(self):
model = DNN()
path = self.modelpath
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['state_dict'])
self.model = model
self.model.eval()
'''
classify into LOS/NLOS
'''
def classify(self, rssi, euc):
inp = torch.tensor([rssi, euc])
out = self.model(inp.float())
pred = 1 if (out[1]>out[0]) else 0
return pred
'''
weighting using the normpdf subroutine
'''
def getWeight(self, dz):
norpdf = 1
for i in range(len(dz)):
if dz[i]!=0:
norpdf *= norm.pdf(dz[i], 0, self.sz[i])
return norpdf
def rssi2Dist(self, rssi):
'''
https://stackoverflow.com/questions/11217674/how-to-calculate-distance-from-wifi-router-using-signal-strength
http://pylayers.github.io/pylayers/notebook/2-AP/CoverageMetis.html
'''
if abs(rssi) > 60: exp = (abs(rssi) - 32.44)/20
else : exp = (abs(rssi) - 12.55)/20
val = (10**exp) / 60
return val
'''
weighting using the mvnpdf subroutine
'''
def getMultiWeight(self, dz):
idx = [i for i, e in enumerate(dz) if e != 0]
val = [] ; sig = []
if len(idx)==0:
return 1/self.np
for i in idx:
val.append(dz[i])
sig.append(self.sz[i])
mvn = multivariate_normal([0]*len(idx), np.diag(sig))
return mvn.pdf(val)
'''
return is not required as python works on
pass-by-reference and there is no way of
emulating pass-by-value
'''
def motion_model(self, samples, point, su):
for i in range(self.np):
dx = point[0] - normrnd(0, su[0])
dy = point[1] - normrnd(0, su[1])
pose = [samples[i].pose[0] + dx, samples[i].pose[1] + dy]
samples[i].pose = pose
'''
measurement model for fast slam v1
label for dMap = 1 : NLOS , 0 : LOS
'''
def fast_measure_model(self, samples, wpID):
Qt = np.diag([5,5])
Qt = Qt.tolist() ; totWt = 0
print("Iteration: " , wpID, end='\r')
for i in range(self.np):
for j in range(len(self.name2Pos)):
name = self.TXName[j]
tx = np.array(self.name2Pos[name])
pos = np.array(samples[i].pose)
# initialize particle map
if name not in samples[i].mapID:
samples[i].mapMu.append(tx[:2])
samples[i].mapSigma.append(Qt)
samples[i].mapID.append(name)
samples[i].hashMap[name] = len(samples[i].mapID) - 1
samples[i].w = 1/self.np
# update particle map
else:
ID = samples[i].hashMap[name]
# prediction step
muHat = samples[i].mapMu[ID]
sigHat = np.array(samples[i].mapSigma[ID])
# update step
dHat = self.distance(pos, muHat)
rssiDist = self.dists[wpID][j].rssi
# use classifier or not
if self.use:
if self.hard:
label = self.classify(rssiDist, dHat)
if label==0:
innov = abs(rssiDist-dHat)
else:
continue
else:
inp = torch.tensor([rssiDist, dHat])
out = self.model(inp.float()).detach().numpy()
innov = out[0]*abs(rssiDist - dHat) + out[1]*abs(rssiDist - normrnd(15,3))
else:
innov = abs(rssiDist - dHat)
dx = muHat[0] - pos[0] ; dy = muHat[1] - pos[1]
den = math.sqrt(dx**2 + dy**2)
H = np.array([dx/den, dy/den])
try:
Q = np.matmul(np.matmul(H, sigHat), H) + self.sz[j]
except:
bp()
# Kalman Gain
K = np.matmul(sigHat, H)/Q
# update pose/ covar
mu = muHat + innov*K
K = K.reshape((self.dim,1))
sig = (np.identity(self.dim) - K*H)*sigHat
samples[i].mapMu[ID] = mu.reshape((self.dim,))
samples[i].mapSigma[ID] = sig.tolist()
samples[i].w = max(samples[i].w, math.sqrt(2*math.pi*Q)*math.exp(-0.5*(innov**2)/Q))
totWt += samples[i].w
# normalize the weights
if totWt==0:
for i in range(self.np):
samples[i].w = 1/self.np
else:
for i in range(self.np):
samples[i].w = samples[i].w/totWt
'''
resampling algorithm applicable to both
particle filter and fast slam because of
common structure of particle
'''
def resample(self, samples):
idx = [0]*self.np ; Q = [0]*self.np ; Q[0] = samples[0].w
for i in range(1, self.np):
Q[i] = samples[i].w + Q[i-1]
t = np.random.rand(self.np+1, 1)
T = np.sort(t, axis=0)
T[self.np] = 1 ; i,j = 0,0
while i<self.np and j<self.np:
if T[i] < Q[j]:
idx[i] = j
i += 1
else:
j += 1
if len(set(idx))>0.2*self.np:
for i in range(self.np):
samples[i].pose = samples[idx[i]].pose
samples[i].w = 1/self.np
samples[i].mapMu = samples[idx[i]].mapMu
samples[i].mapID = samples[idx[i]].mapID
samples[i].mapSigma = samples[idx[i]].mapSigma
samples[i].hashMap = samples[idx[i]].hashMap
'''
Calculates the effective number of particles
in the sampled distribution.
'''
def neff(self, samples):
wghts = [0]*self.np ; totWt = 0
for i in range(self.np):
wghts[i] = samples[i].w
totWt += samples[i].w
den = 0
for i in range(self.np):
wghts[i] = (wghts[i]/totWt)**2
den += wghts[i]
return 1/den
'''
Calculates weighted mean and variance of the
sample distribution
'''
def meanVar(self, samples):
totWt = 0 ; mu = [0 for _ in range(self.dim)] ; sig = np.zeros((self.dim,self.dim))
for i in range(self.np):
mu[0] += samples[i].pose[0]
mu[1] += samples[i].pose[1]
if self.dim==3: mu[2] += samples[i].pose[2]
totWt += samples[i].w
if self.dim==2: mu = [mu[0]/self.np, mu[1]/self.np]
if self.dim==3: mu = [mu[0]/self.np, mu[1]/self.np, mu[2]/self.np]
for i in range(self.np):
if self.dim==2: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] ])
if self.dim==3: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] , samples[i].pose[2]-mu[2] ])
sig += np.matmul(x.reshape((self.dim,1)),x.reshape((1,self.dim)))
sig = sig/self.np
return mu, sig
'''
Calculates weighted mean and variance of the
sample distribution
'''
def weightedMeanVar(self, samples):
totWt = 0 ; mu = [0 for _ in range(self.dim)] ; sig = np.zeros((self.dim,self.dim))
for i in range(self.np):
mu[0] += samples[i].w*samples[i].pose[0]
mu[1] += samples[i].w*samples[i].pose[1]
if self.dim==3: mu[2] += samples[i].w*samples[i].pose[2]
totWt += samples[i].w
if self.dim==2: mu = [mu[0]/totWt, mu[1]/totWt]
if self.dim==3: mu = [mu[0]/totWt, mu[1]/totWt, mu[2]/totWt]
for i in range(self.np):
if self.dim==2: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] ])
if self.dim==3: x = np.array([ samples[i].pose[0]-mu[0] , samples[i].pose[1]-mu[1] , samples[i].pose[2]-mu[2] ])
sig += samples[i].w*np.matmul(x.reshape((self.dim,1)),x.reshape((1,self.dim)))
sig = sig/totWt
return mu, sig
'''
Get the maximum weighted particle and use it
to calculate the IDs of the APs discovered &
the locations of the discovered APs
'''
def getAPLocs(self, samples):
maxWeight = -9999999999 ; idx = 0
for i in range(self.np):
if samples[i].w > maxWeight:
maxWeight = samples[i].w
idx = i
self.APLocs = samples[idx].mapMu
self.IDs = samples[idx].mapID
'''
Plot the particle poses for each particle. Can
only be used for debugging as of now as animation
support is yet to be added
'''
def plot(self):
print("Displaying Floor Plan.")
wayPts = self.wayPts
path = self.path
TX = self.APLocs
ID = self.IDs
# display the waypoints by RRT
if wayPts!=None:
rows = []; cols = []
for x,y in wayPts:
rows.append(x); cols.append(y)
plt.plot(cols, rows, 'b.-')
# display the actual AP locations
if self.TXName!=None:
rows = []; cols = []
for i in self.TXName:
rows.append(self.name2Pos[i][0]); cols.append(self.name2Pos[i][1])
plt.text(i[1],i[0]," NAME-"+str(i), color='black')
plt.plot(rows, cols, 'rx')
# display the localized path
if path!=None:
rows = []; cols = []
for i in path:
rows.append(i[0]); cols.append(i[1])
plt.plot(cols, rows, 'c.-')
# display the estimated AP locations
if TX!=None and ID!=None:
rows = []; cols = []; ctr = 0
for i in TX:
rows.append(i[0]); cols.append(i[1])
plt.text(i[1],i[0]," NAME "+str(ID[ctr]), color='red')
ctr += 1
plt.plot(cols, rows, 'rx')
plt.gca().invert_yaxis()
plt.show()
'''
The main Fast SLAM v1 class
'''
def FastSLAM(self):
self.path.append(self.wayPts[0][:2])
samples = self.distrib()
print("Running Fast SLAM ..")
for i in range(len(self.pts)):
# provide action update
self.motion_model(samples, self.pts[i], self.su)
# provide measurement update
self.fast_measure_model(samples, i)
# resample only when number of effective particle drops
if self.neff(samples) <= 1/3*self.np:
self.resample(samples)
mXY, _ = self.weightedMeanVar(samples)
self.path.append(mXY)
self.getAPLocs(samples)
print("FastSLAM has finished running ..") | [
"numpy.random.rand",
"math.sqrt",
"numpy.array",
"math.exp",
"include.DNN.DNN",
"numpy.sort",
"matplotlib.pyplot.plot",
"numpy.matmul",
"include.dataStructures.particle.Particle",
"matplotlib.pyplot.ylim",
"numpy.random.normal",
"numpy.identity",
"matplotlib.pyplot.gca",
"scipy.stats.norm.... | [((2629, 2634), 'include.DNN.DNN', 'DNN', ([], {}), '()\n', (2632, 2634), False, 'from include.DNN import DNN\n'), ((2686, 2702), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (2696, 2702), False, 'import math, torch\n'), ((2905, 2930), 'torch.tensor', 'torch.tensor', (['[rssi, euc]'], {}), '([rssi, euc])\n', (2917, 2930), False, 'import math, torch\n'), ((11728, 11758), 'numpy.random.rand', 'np.random.rand', (['(self.np + 1)', '(1)'], {}), '(self.np + 1, 1)\n', (11742, 11758), True, 'import numpy as np\n'), ((11769, 11787), 'numpy.sort', 'np.sort', (['t'], {'axis': '(0)'}), '(t, axis=0)\n', (11776, 11787), True, 'import numpy as np\n'), ((12989, 13019), 'numpy.zeros', 'np.zeros', (['(self.dim, self.dim)'], {}), '((self.dim, self.dim))\n', (12997, 13019), True, 'import numpy as np\n'), ((13935, 13965), 'numpy.zeros', 'np.zeros', (['(self.dim, self.dim)'], {}), '((self.dim, self.dim))\n', (13943, 13965), True, 'import numpy as np\n'), ((15509, 15529), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""c."""'], {}), "(x, y, 'c.')\n", (15517, 15529), True, 'import matplotlib.pyplot as plt\n'), ((15631, 15661), 'matplotlib.pyplot.plot', 'plt.plot', (['mXY[0]', 'mXY[1]', '"""ro"""'], {}), "(mXY[0], mXY[1], 'ro')\n", (15639, 15661), True, 'import matplotlib.pyplot as plt\n'), ((15668, 15700), 'matplotlib.pyplot.plot', 'plt.plot', (['wmXY[0]', 'wmXY[1]', '"""bo"""'], {}), "(wmXY[0], wmXY[1], 'bo')\n", (15676, 15700), True, 'import matplotlib.pyplot as plt\n'), ((15708, 15729), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-100, 300]'], {}), '([-100, 300])\n', (15716, 15729), True, 'import matplotlib.pyplot as plt\n'), ((15737, 15758), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-100, 300]'], {}), '([-100, 300])\n', (15745, 15758), True, 'import matplotlib.pyplot as plt\n'), ((15766, 15776), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15774, 15776), True, 'import matplotlib.pyplot as plt\n'), ((20300, 20305), 'include.DNN.DNN', 'DNN', ([], {}), '()\n', (20303, 20305), False, 'from include.DNN import DNN\n'), ((20357, 20373), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (20367, 20373), False, 'import math, torch\n'), ((20576, 20601), 'torch.tensor', 'torch.tensor', (['[rssi, euc]'], {}), '([rssi, euc])\n', (20588, 20601), False, 'import math, torch\n'), ((22340, 22355), 'numpy.diag', 'np.diag', (['[5, 5]'], {}), '([5, 5])\n', (22347, 22355), True, 'import numpy as np\n'), ((25619, 25649), 'numpy.random.rand', 'np.random.rand', (['(self.np + 1)', '(1)'], {}), '(self.np + 1, 1)\n', (25633, 25649), True, 'import numpy as np\n'), ((25660, 25678), 'numpy.sort', 'np.sort', (['t'], {'axis': '(0)'}), '(t, axis=0)\n', (25667, 25678), True, 'import numpy as np\n'), ((26880, 26910), 'numpy.zeros', 'np.zeros', (['(self.dim, self.dim)'], {}), '((self.dim, self.dim))\n', (26888, 26910), True, 'import numpy as np\n'), ((27826, 27856), 'numpy.zeros', 'np.zeros', (['(self.dim, self.dim)'], {}), '((self.dim, self.dim))\n', (27834, 27856), True, 'import numpy as np\n'), ((30530, 30540), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30538, 30540), True, 'import matplotlib.pyplot as plt\n'), ((1390, 1461), 'math.sqrt', 'math.sqrt', (['((x[1] - y[1]) ** 2 + (x[0] - y[0]) ** 2 + (x[2] - y[2]) ** 2)'], {}), '((x[1] - y[1]) ** 2 + (x[0] - y[0]) ** 2 + (x[2] - y[2]) ** 2)\n', (1399, 1461), False, 'import math, torch\n'), ((1485, 1535), 'math.sqrt', 'math.sqrt', (['((x[1] - y[1]) ** 2 + (x[0] - y[0]) ** 2)'], {}), '((x[1] - y[1]) ** 2 + (x[0] - y[0]) ** 2)\n', (1494, 1535), False, 'import math, torch\n'), ((3657, 3669), 'numpy.diag', 'np.diag', (['sig'], {}), '(sig)\n', (3664, 3669), True, 'import numpy as np\n'), ((6902, 6919), 'numpy.diag', 'np.diag', (['[10, 10]'], {}), '([10, 10])\n', (6909, 6919), True, 'import numpy as np\n'), ((6950, 6971), 'numpy.diag', 'np.diag', (['[10, 10, 10]'], {}), '([10, 10, 10])\n', (6957, 6971), True, 'import numpy as np\n'), ((19066, 19137), 'math.sqrt', 'math.sqrt', (['((x[1] - y[1]) ** 2 + (x[0] - y[0]) ** 2 + (x[2] - y[2]) ** 2)'], {}), '((x[1] - y[1]) ** 2 + (x[0] - y[0]) ** 2 + (x[2] - y[2]) ** 2)\n', (19075, 19137), False, 'import math, torch\n'), ((19161, 19211), 'math.sqrt', 'math.sqrt', (['((x[1] - y[1]) ** 2 + (x[0] - y[0]) ** 2)'], {}), '((x[1] - y[1]) ** 2 + (x[0] - y[0]) ** 2)\n', (19170, 19211), False, 'import math, torch\n'), ((21722, 21734), 'numpy.diag', 'np.diag', (['sig'], {}), '(sig)\n', (21729, 21734), True, 'import numpy as np\n'), ((29573, 29600), 'matplotlib.pyplot.plot', 'plt.plot', (['cols', 'rows', '"""b.-"""'], {}), "(cols, rows, 'b.-')\n", (29581, 29600), True, 'import matplotlib.pyplot as plt\n'), ((29903, 29929), 'matplotlib.pyplot.plot', 'plt.plot', (['rows', 'cols', '"""rx"""'], {}), "(rows, cols, 'rx')\n", (29911, 29929), True, 'import matplotlib.pyplot as plt\n'), ((30115, 30142), 'matplotlib.pyplot.plot', 'plt.plot', (['cols', 'rows', '"""c.-"""'], {}), "(cols, rows, 'c.-')\n", (30123, 30142), True, 'import matplotlib.pyplot as plt\n'), ((30453, 30479), 'matplotlib.pyplot.plot', 'plt.plot', (['cols', 'rows', '"""rx"""'], {}), "(cols, rows, 'rx')\n", (30461, 30479), True, 'import matplotlib.pyplot as plt\n'), ((2142, 2170), 'include.dataStructures.particle.Particle', 'Particle', (['start', '(1 / self.np)'], {}), '(start, 1 / self.np)\n', (2150, 2170), False, 'from include.dataStructures.particle import Particle\n'), ((3224, 3254), 'scipy.stats.norm.pdf', 'norm.pdf', (['dz[i]', '(0)', 'self.sz[i]'], {}), '(dz[i], 0, self.sz[i])\n', (3232, 3254), False, 'from scipy.stats import multivariate_normal, norm\n'), ((3944, 3961), 'numpy.random.normal', 'normrnd', (['(0)', 'su[0]'], {}), '(0, su[0])\n', (3951, 3961), True, 'from numpy.random import normal as normrnd\n'), ((3990, 4007), 'numpy.random.normal', 'normrnd', (['(0)', 'su[1]'], {}), '(0, su[1])\n', (3997, 4007), True, 'from numpy.random import normal as normrnd\n'), ((7111, 7131), 'numpy.array', 'np.array', (['self.tx[j]'], {}), '(self.tx[j])\n', (7119, 7131), True, 'import numpy as np\n'), ((7140, 7165), 'numpy.array', 'np.array', (['samples[i].pose'], {}), '(samples[i].pose)\n', (7148, 7165), True, 'import numpy as np\n'), ((13424, 13490), 'numpy.array', 'np.array', (['[samples[i].pose[0] - mu[0], samples[i].pose[1] - mu[1]]'], {}), '([samples[i].pose[0] - mu[0], samples[i].pose[1] - mu[1]])\n', (13432, 13490), True, 'import numpy as np\n'), ((13522, 13621), 'numpy.array', 'np.array', (['[samples[i].pose[0] - mu[0], samples[i].pose[1] - mu[1], samples[i].pose[2] -\n mu[2]]'], {}), '([samples[i].pose[0] - mu[0], samples[i].pose[1] - mu[1], samples[i\n ].pose[2] - mu[2]])\n', (13530, 13621), True, 'import numpy as np\n'), ((14399, 14465), 'numpy.array', 'np.array', (['[samples[i].pose[0] - mu[0], samples[i].pose[1] - mu[1]]'], {}), '([samples[i].pose[0] - mu[0], samples[i].pose[1] - mu[1]])\n', (14407, 14465), True, 'import numpy as np\n'), ((14497, 14596), 'numpy.array', 'np.array', (['[samples[i].pose[0] - mu[0], samples[i].pose[1] - mu[1], samples[i].pose[2] -\n mu[2]]'], {}), '([samples[i].pose[0] - mu[0], samples[i].pose[1] - mu[1], samples[i\n ].pose[2] - mu[2]])\n', (14505, 14596), True, 'import numpy as np\n'), ((19814, 19842), 'include.dataStructures.particle.Particle', 'Particle', (['start', '(1 / self.np)'], {}), '(start, 1 / self.np)\n', (19822, 19842), False, 'from include.dataStructures.particle import Particle\n'), ((20895, 20925), 'scipy.stats.norm.pdf', 'norm.pdf', (['dz[i]', '(0)', 'self.sz[i]'], {}), '(dz[i], 0, self.sz[i])\n', (20903, 20925), False, 'from scipy.stats import multivariate_normal, norm\n'), ((22009, 22026), 'numpy.random.normal', 'normrnd', (['(0)', 'su[0]'], {}), '(0, su[0])\n', (22016, 22026), True, 'from numpy.random import normal as normrnd\n'), ((22055, 22072), 'numpy.random.normal', 'normrnd', (['(0)', 'su[1]'], {}), '(0, su[1])\n', (22062, 22072), True, 'from numpy.random import normal as normrnd\n'), ((22580, 22609), 'numpy.array', 'np.array', (['self.name2Pos[name]'], {}), '(self.name2Pos[name])\n', (22588, 22609), True, 'import numpy as np\n'), ((22632, 22657), 'numpy.array', 'np.array', (['samples[i].pose'], {}), '(samples[i].pose)\n', (22640, 22657), True, 'import numpy as np\n'), ((27315, 27381), 'numpy.array', 'np.array', (['[samples[i].pose[0] - mu[0], samples[i].pose[1] - mu[1]]'], {}), '([samples[i].pose[0] - mu[0], samples[i].pose[1] - mu[1]])\n', (27323, 27381), True, 'import numpy as np\n'), ((27413, 27512), 'numpy.array', 'np.array', (['[samples[i].pose[0] - mu[0], samples[i].pose[1] - mu[1], samples[i].pose[2] -\n mu[2]]'], {}), '([samples[i].pose[0] - mu[0], samples[i].pose[1] - mu[1], samples[i\n ].pose[2] - mu[2]])\n', (27421, 27512), True, 'import numpy as np\n'), ((28290, 28356), 'numpy.array', 'np.array', (['[samples[i].pose[0] - mu[0], samples[i].pose[1] - mu[1]]'], {}), '([samples[i].pose[0] - mu[0], samples[i].pose[1] - mu[1]])\n', (28298, 28356), True, 'import numpy as np\n'), ((28388, 28487), 'numpy.array', 'np.array', (['[samples[i].pose[0] - mu[0], samples[i].pose[1] - mu[1], samples[i].pose[2] -\n mu[2]]'], {}), '([samples[i].pose[0] - mu[0], samples[i].pose[1] - mu[1], samples[i\n ].pose[2] - mu[2]])\n', (28396, 28487), True, 'import numpy as np\n'), ((30497, 30506), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (30504, 30506), True, 'import matplotlib.pyplot as plt\n'), ((4142, 4159), 'numpy.random.normal', 'normrnd', (['(0)', 'su[2]'], {}), '(0, su[2])\n', (4149, 4159), True, 'from numpy.random import normal as normrnd\n'), ((23249, 23282), 'numpy.array', 'np.array', (['samples[i].mapSigma[ID]'], {}), '(samples[i].mapSigma[ID])\n', (23257, 23282), True, 'import numpy as np\n'), ((24253, 24281), 'math.sqrt', 'math.sqrt', (['(dx ** 2 + dy ** 2)'], {}), '(dx ** 2 + dy ** 2)\n', (24262, 24281), False, 'import math, torch\n'), ((24302, 24332), 'numpy.array', 'np.array', (['[dx / den, dy / den]'], {}), '([dx / den, dy / den])\n', (24310, 24332), True, 'import numpy as np\n'), ((7867, 7900), 'numpy.array', 'np.array', (['samples[i].mapSigma[ID]'], {}), '(samples[i].mapSigma[ID])\n', (7875, 7900), True, 'import numpy as np\n'), ((10072, 10100), 'math.sqrt', 'math.sqrt', (['(dx ** 2 + dy ** 2)'], {}), '(dx ** 2 + dy ** 2)\n', (10081, 10100), False, 'import math, torch\n'), ((10125, 10155), 'numpy.array', 'np.array', (['[dx / den, dy / den]'], {}), '([dx / den, dy / den])\n', (10133, 10155), True, 'import numpy as np\n'), ((24567, 24587), 'numpy.matmul', 'np.matmul', (['sigHat', 'H'], {}), '(sigHat, H)\n', (24576, 24587), True, 'import numpy as np\n'), ((5570, 5598), 'torch.tensor', 'torch.tensor', (['[z[j].rssi, d]'], {}), '([z[j].rssi, d])\n', (5582, 5598), False, 'import math, torch\n'), ((10302, 10340), 'math.sqrt', 'math.sqrt', (['(dx ** 2 + dy ** 2 + dz ** 2)'], {}), '(dx ** 2 + dy ** 2 + dz ** 2)\n', (10311, 10340), False, 'import math, torch\n'), ((10367, 10407), 'numpy.array', 'np.array', (['[dx / den, dy / den, dz / den]'], {}), '([dx / den, dy / den, dz / den])\n', (10375, 10407), True, 'import numpy as np\n'), ((10644, 10664), 'numpy.matmul', 'np.matmul', (['sigHat', 'H'], {}), '(sigHat, H)\n', (10653, 10664), True, 'import numpy as np\n'), ((23848, 23878), 'torch.tensor', 'torch.tensor', (['[rssiDist, dHat]'], {}), '([rssiDist, dHat])\n', (23860, 23878), False, 'import math, torch\n'), ((24503, 24507), 'pdb.set_trace', 'bp', ([], {}), '()\n', (24505, 24507), True, 'from pdb import set_trace as bp\n'), ((24748, 24769), 'numpy.identity', 'np.identity', (['self.dim'], {}), '(self.dim)\n', (24759, 24769), True, 'import numpy as np\n'), ((24963, 24989), 'math.sqrt', 'math.sqrt', (['(2 * math.pi * Q)'], {}), '(2 * math.pi * Q)\n', (24972, 24989), False, 'import math, torch\n'), ((24986, 25017), 'math.exp', 'math.exp', (['(-0.5 * innov ** 2 / Q)'], {}), '(-0.5 * innov ** 2 / Q)\n', (24994, 25017), False, 'import math, torch\n'), ((9048, 9079), 'torch.tensor', 'torch.tensor', (['[z[j].rssi, dHat]'], {}), '([z[j].rssi, dHat])\n', (9060, 9079), False, 'import math, torch\n'), ((10572, 10576), 'pdb.set_trace', 'bp', ([], {}), '()\n', (10574, 10576), True, 'from pdb import set_trace as bp\n'), ((10841, 10862), 'numpy.identity', 'np.identity', (['self.dim'], {}), '(self.dim)\n', (10852, 10862), True, 'import numpy as np\n'), ((11068, 11094), 'math.sqrt', 'math.sqrt', (['(2 * math.pi * Q)'], {}), '(2 * math.pi * Q)\n', (11077, 11094), False, 'import math, torch\n'), ((11091, 11122), 'math.exp', 'math.exp', (['(-0.5 * innov ** 2 / Q)'], {}), '(-0.5 * innov ** 2 / Q)\n', (11099, 11122), False, 'import math, torch\n'), ((24413, 24433), 'numpy.matmul', 'np.matmul', (['H', 'sigHat'], {}), '(H, sigHat)\n', (24422, 24433), True, 'import numpy as np\n'), ((10474, 10494), 'numpy.matmul', 'np.matmul', (['H', 'sigHat'], {}), '(H, sigHat)\n', (10483, 10494), True, 'import numpy as np\n'), ((5759, 5777), 'numpy.random.normal', 'normrnd', (['self.R', '(3)'], {}), '(self.R, 3)\n', (5766, 5777), True, 'from numpy.random import normal as normrnd\n'), ((24042, 24056), 'numpy.random.normal', 'normrnd', (['(15)', '(3)'], {}), '(15, 3)\n', (24049, 24056), True, 'from numpy.random import normal as normrnd\n'), ((9253, 9271), 'numpy.random.normal', 'normrnd', (['self.R', '(3)'], {}), '(self.R, 3)\n', (9260, 9271), True, 'from numpy.random import normal as normrnd\n')] |
# run in python 3
import numpy as np
import os, sys, json
import math
def main(theta, alpha):
#constants that change with use
MASS = 1
N = 4
ALPHA = 0.5
RADIUS = 1
angles = [math.pi / 3, math.pi * 2 / 3, math.pi * 4 / 3, math.pi * 5/3]
C = []
C.append([-math.sin(i) for i in angles])
C.append([math.cos(i) for i in angles])
C.append([1/ALPHA for i in range(N)])
C = np.array(C)
print(C)
_C = np.linalg.pinv(C)
print(_C)
A = getCartesianAcceleration(theta)
A.append(alpha)
_A = np.matrix(A).transpose()
print(_A)
print(np.dot(_C, _A))
def getCartesianAcceleration(theta):
return [math.cos(theta), math.sin(theta)] # [a_x, a_y]
main(0, 0)
| [
"numpy.linalg.pinv",
"math.cos",
"numpy.array",
"numpy.dot",
"numpy.matrix",
"math.sin"
] | [((419, 430), 'numpy.array', 'np.array', (['C'], {}), '(C)\n', (427, 430), True, 'import numpy as np\n'), ((453, 470), 'numpy.linalg.pinv', 'np.linalg.pinv', (['C'], {}), '(C)\n', (467, 470), True, 'import numpy as np\n'), ((604, 618), 'numpy.dot', 'np.dot', (['_C', '_A'], {}), '(_C, _A)\n', (610, 618), True, 'import numpy as np\n'), ((671, 686), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (679, 686), False, 'import math\n'), ((688, 703), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (696, 703), False, 'import math\n'), ((338, 349), 'math.cos', 'math.cos', (['i'], {}), '(i)\n', (346, 349), False, 'import math\n'), ((554, 566), 'numpy.matrix', 'np.matrix', (['A'], {}), '(A)\n', (563, 566), True, 'import numpy as np\n'), ((294, 305), 'math.sin', 'math.sin', (['i'], {}), '(i)\n', (302, 305), False, 'import math\n')] |
from collections import defaultdict
from distutils.version import LooseVersion
from functools import partial
import numba
from numba.core import compiler, cgutils, types
from numba.core.errors import TypingError
from numba.core.extending import intrinsic
from numba.experimental import structref
from numba.core.typed_passes import type_inference_stage
import numpy as np
from africanus.averaging.support import _unique_internal
from africanus.experimental.rime.fused.arguments import ArgumentPack
from africanus.experimental.rime.fused.terms.core import StateStructRef
try:
NUMBA_MAJOR, NUMBA_MINOR, _ = LooseVersion(numba.__version__).version
except AttributeError:
# Readthedocs
NUMBA_MAJOR, NUMBA_MINOR = 0, 0
def scalar_scalar(lhs, rhs):
return lhs*rhs
def scalar_diag(lhs, rhs):
return lhs*rhs[0], lhs*rhs[1]
def scalar_full(lhs, rhs):
return lhs*rhs[0], lhs*rhs[1], lhs*rhs[2], lhs*rhs[3]
def diag_scalar(lhs, rhs):
return lhs[0]*rhs, lhs[1]*rhs
def diag_diag(lhs, rhs):
return lhs[0]*rhs[0], lhs[1]*rhs[1]
def diag_full(lhs, rhs):
return (
lhs[0]*rhs[0],
lhs[0]*rhs[1],
lhs[1]*rhs[2],
lhs[1]*rhs[3])
def full_scalar(lhs, rhs):
return (
lhs[0]*rhs,
lhs[1]*rhs,
lhs[2]*rhs,
lhs[3]*rhs)
def full_diag(lhs, rhs):
return (
lhs[0]*rhs[0],
lhs[1]*rhs[1],
lhs[2]*rhs[0],
lhs[3]*rhs[1])
def full_full(lhs, rhs):
return (
lhs[0]*rhs[0] + lhs[1]*rhs[2],
lhs[0]*rhs[1] + lhs[1]*rhs[3],
lhs[2]*rhs[0] + lhs[3]*rhs[2],
lhs[2]*rhs[1] + lhs[3]*rhs[3])
def hermitian_scalar(jones):
return np.conj(jones)
def hermitian_diag(jones):
return (np.conj(jones[0]), np.conj(jones[1]))
def hermitian_full(jones):
return (np.conj(jones[0]),
np.conj(jones[2]),
np.conj(jones[1]),
np.conj(jones[3]))
_jones_typ_map = {
("scalar", "scalar"): scalar_scalar,
("scalar", "diag"): scalar_diag,
("scalar", "full"): scalar_full,
("diag", "scalar"): diag_scalar,
("diag", "diag"): diag_diag,
("diag", "full"): diag_full,
("full", "scalar"): full_scalar,
("full", "diag"): full_diag,
("full", "full"): full_full
}
def classify_arg(arg):
"""
Returns
-------
arg_type : {"scalar", "diag", "full", None}
A string describing the argument type, else `None`
if this is not possible
"""
if isinstance(arg, types.Number):
return "scalar"
elif isinstance(arg, types.BaseTuple):
if len(arg) == 2:
return "diag"
elif len(arg) == 4:
return "full"
return None
def term_mul(lhs, rhs):
"""
Parameters
----------
lhs : :class:`numba.Type`
rhs : :class:`numba.Type`
Returns
-------
multiplier : callable
Function multiplying arguments of types lhs and rhs together
"""
lhs_type = classify_arg(lhs)
rhs_type = classify_arg(rhs)
try:
return _jones_typ_map[(lhs_type, rhs_type)]
except KeyError:
raise TypingError(f"No known multiplication "
f"function for {lhs} and {rhs}")
_hermitian_map = {
"scalar": hermitian_scalar,
"diag": hermitian_diag,
"full": hermitian_full
}
def hermitian(jones):
jones_type = classify_arg(jones)
try:
return _hermitian_map[jones_type]
except KeyError:
raise TypingError(f"No known hermitian function "
f"for {jones}: {jones_type}.")
def unify_jones_terms(typingctx, lhs, rhs):
"""
Unify Jones Term Types.
"""
lhs_type = classify_arg(lhs)
rhs_type = classify_arg(rhs)
corr_map = {"scalar": 1, "diag": 2, "full": 4}
try:
lhs_corrs = corr_map[lhs_type]
rhs_corrs = corr_map[rhs_type]
except KeyError:
raise TypingError(f"{lhs} or {rhs} has no "
f"entry in the {corr_map} "
f"mapping")
lhs_types = (lhs,) if lhs_corrs == 1 else tuple(lhs)
rhs_types = (rhs,) if rhs_corrs == 1 else tuple(rhs)
out_type = typingctx.unify_types(*lhs_types, *rhs_types)
out_corrs = max(lhs_corrs, rhs_corrs)
return out_type if out_corrs == 1 else types.Tuple((out_type,)*out_corrs)
@intrinsic
def tuple_adder(typingctx, t1, t2):
if not isinstance(t1, types.BaseTuple):
raise TypingError(f"{t1} must be a Tuple")
if not isinstance(t2, types.BaseTuple):
raise TypingError(f"{t2} must be a Tuple")
if not len(t1) == len(t2):
raise TypingError(f"len({t1}) != len({t2})")
sig = t1(t1, t2)
def codegen(context, builder, signature, args):
def _add(x, y):
return x + y
[t1, t2] = args
[t1_type, t2_type] = signature.args
return_type = signature.return_type
llvm_ret_type = context.get_value_type(return_type)
ret_tuple = cgutils.get_null_value(llvm_ret_type)
for i, (t1e, t2e) in enumerate(zip(t1_type, t2_type)):
v1 = builder.extract_value(t1, i)
v2 = builder.extract_value(t2, i)
vr = typingctx.unify_types(t1e, t2e)
data = context.compile_internal(builder, _add,
vr(t1e, t2e), [v1, v2])
ret_tuple = builder.insert_value(ret_tuple, data, i)
return ret_tuple
return sig, codegen
class IntrinsicFactory:
KEY_ARGS = ("utime", "time_index",
"uantenna", "antenna1_index", "antenna2_index",
"ufeed", "feed1_index", "feed2_index")
def __init__(self, arg_dependencies):
self.argdeps = arg_dependencies
def _resolve_arg_dependencies(self):
argdeps = self.argdeps
# KEY_ARGS will be created
supplied_args = set(argdeps.names) | set(self.KEY_ARGS)
missing = set(argdeps.desired.keys()) - supplied_args
available_args = set(argdeps.names) | supplied_args
failed_transforms = defaultdict(list)
can_create = {}
# Try create missing argument with transformers
for arg in list(missing):
# We already know how to create it
if arg in can_create:
continue
# We don't know how to create
if arg not in argdeps.maybe_create:
continue
for transformer in argdeps.maybe_create[arg]:
# We didn't have the arguments, make a note of this
if not set(transformer.ARGS).issubset(available_args):
failed_transforms[arg].append(
(transformer, set(transformer.ARGS)))
continue
# The transformer can create arg
if arg not in failed_transforms:
can_create[arg] = transformer
missing.remove(arg)
# Fail if required arguments are missing
for arg in missing:
terms_wanting = argdeps.desired[arg]
err_msgs = []
err_msgs.append(f"{set(terms_wanting)} need(s) '{arg}'.")
if arg in failed_transforms:
for transformer, needed in failed_transforms[arg]:
err_msgs.append(f"{transformer} can create {arg} "
f"but needs {needed}, of which "
f"{needed - set(argdeps.names)} is "
f"missing from the input arguments.")
raise ValueError("\n".join(err_msgs))
opt_defaults = {}
for transformer in can_create.values():
for k, d in transformer.KWARGS.items():
argdeps.optional[k].append((transformer, d))
for k, v in argdeps.optional.items():
_, defaults = zip(*v)
defaults = set(defaults)
if len(defaults) != 1:
raise ValueError(f"Multiple terms: {argdeps.terms} have "
f"contradicting definitions for "
f"{k}: {defaults}")
opt_defaults[k] = defaults.pop()
for name in argdeps.names:
opt_defaults.pop(name, None)
return opt_defaults, can_create
def pack_optionals_and_indices_fn(self):
argdeps = self.argdeps
out_names = (argdeps.names +
tuple(argdeps.optional_defaults.keys()) +
tuple(argdeps.KEY_ARGS))
@intrinsic
def pack_index(typingctx, args):
assert len(args) == len(argdeps.names)
it = zip(argdeps.names, args, range(len(argdeps.names)))
arg_info = {n: (t, i) for n, t, i in it}
key_types = {
"utime": arg_info["time"][0],
"time_index": types.int64[:],
"uantenna": arg_info["antenna1"][0],
"antenna1_index": types.int64[:],
"antenna2_index": types.int64[:],
"ufeed": arg_info["feed1"][0],
"feed1_index": types.int64[:],
"feed2_index": types.int64[:]
}
if tuple(key_types.keys()) != argdeps.KEY_ARGS:
raise RuntimeError(
f"{tuple(key_types.keys())} != {argdeps.KEY_ARGS}")
rvt = typingctx.resolve_value_type_prefer_literal
optionals = [(n, rvt(d), d) for n, d
in argdeps.optional_defaults.items()]
optional_types = tuple(p[1] for p in optionals)
return_type = types.Tuple(args.types + optional_types +
tuple(key_types.values()))
sig = return_type(args)
def codegen(context, builder, signature, args):
return_type = signature.return_type
llvm_ret_type = context.get_value_type(return_type)
ret_tuple = cgutils.get_null_value(llvm_ret_type)
# Extract supplied arguments from original arg tuple
# and insert into the new one
for i, typ in enumerate(signature.args[0]):
value = builder.extract_value(args[0], i)
context.nrt.incref(builder, signature.args[0][i], value)
ret_tuple = builder.insert_value(ret_tuple, value, i)
n = len(signature.args[0])
# Insert necessary optional defaults (kwargs) into the
# new argument tuple
for i, (name, typ, default) in enumerate(optionals):
if name != out_names[i + n]:
raise TypingError(f"{name} != {out_names[i + n]}")
value = context.get_constant_generic(builder, typ, default)
ret_tuple = builder.insert_value(ret_tuple, value, i + n)
# Compute indexing arguments and insert into
# the new tuple
fn_args = [builder.extract_value(args[0], arg_info[a][1])
for a in argdeps.REQUIRED_ARGS]
fn_arg_types = tuple(arg_info[k][0] for k
in argdeps.REQUIRED_ARGS)
fn_sig = types.Tuple(list(key_types.values()))(*fn_arg_types)
def _indices(time, antenna1, antenna2, feed1, feed2):
utime, _, time_index, _ = _unique_internal(time)
uants = np.unique(np.concatenate((antenna1, antenna2)))
ufeeds = np.unique(np.concatenate((feed1, feed2)))
antenna1_index = np.searchsorted(uants, antenna1)
antenna2_index = np.searchsorted(uants, antenna2)
feed1_index = np.searchsorted(ufeeds, feed1)
feed2_index = np.searchsorted(ufeeds, feed2)
return (utime, time_index,
uants, antenna1_index, antenna2_index,
ufeeds, feed1_index, feed2_index)
index = context.compile_internal(builder, _indices,
fn_sig, fn_args)
n += len(optionals)
for i, (name, value) in enumerate(key_types.items()):
if name != out_names[i + n]:
raise TypingError(f"{name} != {out_names[i + n]}")
value = builder.extract_value(index, i)
ret_tuple = builder.insert_value(ret_tuple, value, i + n)
return ret_tuple
return sig, codegen
return out_names, pack_index
def pack_transformed_fn(self, arg_names):
argdeps = self.argdeps
transformers = list(set(t for _, t in argdeps.can_create.items()))
out_names = arg_names + tuple(o for t in transformers
for o in t.OUTPUTS)
@intrinsic
def pack_transformed(typingctx, args):
assert len(args) == len(arg_names)
it = zip(arg_names, args, range(len(arg_names)))
arg_info = {n: (t, i) for n, t, i in it}
rvt = typingctx.resolve_value_type_prefer_literal
transform_output_types = []
for transformer in transformers:
# Figure out argument types for calling init_fields
kw = {}
for a in transformer.ARGS:
kw[a] = arg_info[a][0]
for a, d in transformer.KWARGS.items():
try:
kw[a] = arg_info[a][0]
except KeyError:
kw[a] = rvt(d)
fields, _ = transformer.init_fields(typingctx, **kw)
if len(transformer.OUTPUTS) == 0:
raise TypingError(f"{transformer} produces no outputs")
elif len(transformer.OUTPUTS) > 1:
if len(transformer.OUTPUTS) != len(fields):
raise TypingError(
f"{transformer} produces {transformer.OUTPUTS} "
f"but {transformer}.init_fields does not return "
f"a tuple of the same length, but {fields}")
transform_output_types.extend(t for _, t in fields)
# Create a return tuple containing the existing arguments
# with the transformed outputs added to the end
return_type = types.Tuple(args.types +
tuple(transform_output_types))
# Sanity check
if len(return_type) != len(out_names):
raise TypingError(f"len(return_type): {len(return_type)} != "
f"len(out_names): {len(out_names)}")
sig = return_type(args)
def codegen(context, builder, signature, args):
return_type = signature.return_type
llvm_ret_type = context.get_value_type(return_type)
ret_tuple = cgutils.get_null_value(llvm_ret_type)
# Extract supplied arguments from original arg tuple
# and insert into the new one
for i, typ in enumerate(signature.args[0]):
value = builder.extract_value(args[0], i)
context.nrt.incref(builder, signature.args[0][i], value)
ret_tuple = builder.insert_value(ret_tuple, value, i)
# Apply any argument transforms and insert their results
# into the new argument tuple
n = len(signature.args[0])
i = 0
for transformer in transformers:
# Check that outputs line up with output names
for j, o in enumerate(transformer.OUTPUTS):
if o != out_names[i + j + n]:
raise TypingError(f"{o} != {out_names[i + j + n]}")
transform_args = []
transform_types = []
# Get required arguments out of the argument pack
for name in transformer.ARGS:
try:
typ, j = arg_info[name]
except KeyError:
raise TypingError(
f"{name} is not present in arg_types")
value = builder.extract_value(args[0], j)
transform_args.append(value)
transform_types.append(typ)
# Generate defaults
for name, default in transformer.KWARGS.items():
default_typ = rvt(default)
default_value = context.get_constant_generic(
builder,
default_typ,
default)
transform_types.append(default_typ)
transform_args.append(default_value)
# Get the transformer fields and function
transform_fields, transform_fn = transformer.init_fields(
typingctx, *transform_types)
single_return = len(transform_fields) == 1
# Handle singleton vs tuple return types
if single_return:
ret_type = transform_fields[0][1]
else:
typs = [t for _, t in transform_fields]
ret_type = types.Tuple(typs)
# Call the transform function
transform_sig = ret_type(*transform_types)
value = context.compile_internal(builder, # noqa
transform_fn,
transform_sig,
transform_args)
# Unpack the returned value and insert into
# return_tuple
if single_return:
ret_tuple = builder.insert_value(ret_tuple, value,
i + n)
i += 1
else:
for j, o in enumerate(transformer.OUTPUTS):
element = builder.extract_value(value, j)
ret_tuple = builder.insert_value(ret_tuple,
element,
i + n)
i += 1
return ret_tuple
return sig, codegen
return out_names, pack_transformed
def term_state_fn(self, arg_names):
argdeps = self.argdeps
@intrinsic
def term_state(typingctx, args):
if not isinstance(args, types.Tuple):
raise TypingError(f"args must be a Tuple but is {args}")
if len(arg_names) != len(args):
raise TypingError(f"len(arg_names): {len(arg_names)} != "
f"len(args): {len(args)}")
arg_pack = ArgumentPack(arg_names, args, tuple(range(len(args))))
state_fields = []
term_fields = []
constructors = []
# Query Terms for fields and their associated types
# that should be created on the State object
for term in argdeps.terms:
it = zip(term.ALL_ARGS, arg_pack.indices(*term.ALL_ARGS))
arg_types = {a: args[i] for a, i in it}
fields, constructor = term.init_fields(typingctx, **arg_types)
term.validate_constructor(constructor)
term_fields.append(fields)
state_fields.extend(fields)
constructors.append(constructor)
# Now define all fields for the State type
arg_fields = [(k, args[i]) for k, (_, i) in arg_pack.items()]
state_type = StateStructRef(arg_fields + state_fields)
sig = state_type(args)
def codegen(context, builder, signature, args):
if not len(args) == 1:
raise TypingError("args must contain a single value")
typingctx = context.typing_context
rvt = typingctx.resolve_value_type_prefer_literal
def make_struct():
""" Allocate the structure """
return structref.new(state_type)
state = context.compile_internal(builder, make_struct,
state_type(), [])
U = structref._Utils(context, builder, state_type)
data_struct = U.get_data_struct(state)
for arg_name, (_, i) in arg_pack.items():
value = builder.extract_value(args[0], i)
value_type = signature.args[0][i]
# We increment the reference count here
# as we're taking a reference from data in
# the args tuple and placing it on the structref
context.nrt.incref(builder, value_type, value)
field_type = state_type.field_dict[arg_name]
casted = context.cast(builder, value,
value_type, field_type)
context.nrt.incref(builder, value_type, casted)
# The old value on the structref is being replaced,
# decrease it's reference count
old_value = getattr(data_struct, arg_name)
context.nrt.decref(builder, value_type, old_value)
setattr(data_struct, arg_name, casted)
constructor_args = []
constructor_types = []
# Our single argument is a tuple of arguments, but we
# need to extract those arguments necessary to construct
# the term StructRef
for term in argdeps.terms:
cargs = []
ctypes = []
arg_types = arg_pack.types(*term.ALL_ARGS)
arg_index = arg_pack.indices(*term.ALL_ARGS)
for typ, i in zip(arg_types, arg_index):
if isinstance(typ, types.Omitted):
const_type = rvt(typ.value)
const = context.get_constant_generic(
builder, const_type, typ.value)
cargs.append(const)
ctypes.append(const_type)
else:
assert not isinstance(typ, types.Omitted)
assert i != -1
cargs.append(builder.extract_value(args[0], i))
ctypes.append(typ)
constructor_args.append(cargs)
constructor_types.append(ctypes)
for ti in range(len(argdeps.terms)):
fields = term_fields[ti]
nfields = len(fields)
if nfields == 0:
return_type = types.none
elif nfields == 1:
return_type = fields[0][1]
else:
return_types = [f[1] for f in fields]
return_type = types.Tuple(return_types)
constructor_sig = return_type(*constructor_types[ti])
return_value = context.compile_internal(
builder, constructors[ti],
constructor_sig, constructor_args[ti])
if nfields == 0:
pass
elif nfields == 1:
arg_name, typ = fields[0]
old_value = getattr(data_struct, arg_name)
context.nrt.decref(builder, typ, old_value)
setattr(data_struct, arg_name, return_value)
else:
for i, (arg_name, typ) in enumerate(fields):
value = builder.extract_value(return_value, i)
context.nrt.incref(builder, typ, value)
old_value = getattr(data_struct, arg_name)
context.nrt.decref(builder, typ, old_value)
setattr(data_struct, arg_name, value)
return state
return sig, codegen
return term_state
def term_sampler_fn(self):
argdeps = self.argdeps
terms = argdeps.terms
samplers = [term.sampler() for term in terms]
for term, sampler in zip(terms, samplers):
term.validate_sampler(sampler)
nterms = len(terms)
@intrinsic
def term_sampler(typingctx, state, s, r, t, f1, f2, a1, a2, c):
if not isinstance(state, StateStructRef):
raise TypingError(f"{state} must be a StateStructRef")
sampler_ir = list(map(compiler.run_frontend, samplers))
ir_args = (state, s, r, t, f1, f2, a1, a2, c)
if NUMBA_MAJOR > 0 or NUMBA_MINOR >= 54:
# NOTE(sjperkins)
# numba 0.54 wants a targetctx for type_inference_stage
# Assume we're dealing with a CPU Target in order to derive
# the targetctx. This is a fair assumption given that we're
# writing CPU intrinsics. Note that numba is also assuming
# CPU Targets in their code base in 0.54, at least. Look for
# the ability to figure out the current target context manager
# in future releases in order to find a better solution here.
from numba.core.registry import cpu_target
if cpu_target.typing_context != typingctx:
raise TypingError("typingctx's don't match")
tis = partial(type_inference_stage,
typingctx=typingctx,
targetctx=cpu_target.target_context,
args=ir_args,
return_type=None)
else:
tis = partial(type_inference_stage,
typingctx=typingctx,
args=ir_args,
return_type=None)
type_infer = [tis(interp=ir) for ir in sampler_ir]
sampler_return_types = [ti.return_type for ti in type_infer]
# Sanity check the sampler return types
for typ, sampler in zip(sampler_return_types, samplers):
if isinstance(typ, types.Number):
continue
err = TypingError(
f"{sampler} should return:\n"
f"(1) a single scalar correlation\n"
f"(2) a Tuple containing 2 scalar correlations\n"
f"(3) a Tuple containing 4 scalar correlations\n"
f"but instead got a {typ}")
if isinstance(typ, types.BaseTuple):
if len(typ) not in (2, 4):
raise err
if not all(isinstance(e, types.Number) for e in typ):
raise err
continue
raise err
sampler_ret_type = sampler_return_types[0]
for typ in sampler_return_types[1:]:
sampler_ret_type = unify_jones_terms(typingctx,
sampler_ret_type, typ)
sig = sampler_ret_type(state, s, r, t, f1, f2, a1, a2, c)
def codegen(context, builder, signature, args):
[state, s, r, t, f1, f2, a1, a2, c] = args
[state_type, _, _, _, _, _, _, _, _] = signature.args
jones = []
for ti in range(nterms):
sampling_fn = samplers[ti]
# Build signature for the sampling function
ret_type = sampler_return_types[ti]
sampler_arg_types = (state_type,) + signature.args[1:]
sampler_sig = ret_type(*sampler_arg_types)
# Build LLVM arguments for the sampling function
sampler_args = [state, s, r, t, f1, f2, a1, a2, c]
# Call the sampling function
data = context.compile_internal(builder, # noqa
sampling_fn,
sampler_sig,
sampler_args)
# Apply hermitian transform if this is a right term
if terms[ti].configuration == "right":
data = context.compile_internal(builder, # noqa
hermitian(ret_type),
ret_type(ret_type),
[data])
jones.append(data)
prev = jones[0]
prev_t = sampler_return_types[0]
for jrt, j in zip(sampler_return_types[1:], jones[1:]):
jones_mul = term_mul(prev_t, jrt)
jones_mul_typ = unify_jones_terms(context.typing_context,
prev_t, jrt)
jones_sig = jones_mul_typ(prev_t, jrt)
prev = context.compile_internal(builder, jones_mul,
jones_sig,
[prev, j])
prev_t = jones_mul_typ
return prev
return sig, codegen
return term_sampler
| [
"africanus.experimental.rime.fused.terms.core.StateStructRef",
"numba.core.cgutils.get_null_value",
"numpy.conj",
"numba.core.types.Tuple",
"numpy.searchsorted",
"africanus.averaging.support._unique_internal",
"numba.core.errors.TypingError",
"numba.experimental.structref.new",
"collections.defaultd... | [((1685, 1699), 'numpy.conj', 'np.conj', (['jones'], {}), '(jones)\n', (1692, 1699), True, 'import numpy as np\n'), ((614, 645), 'distutils.version.LooseVersion', 'LooseVersion', (['numba.__version__'], {}), '(numba.__version__)\n', (626, 645), False, 'from distutils.version import LooseVersion\n'), ((1741, 1758), 'numpy.conj', 'np.conj', (['jones[0]'], {}), '(jones[0])\n', (1748, 1758), True, 'import numpy as np\n'), ((1760, 1777), 'numpy.conj', 'np.conj', (['jones[1]'], {}), '(jones[1])\n', (1767, 1777), True, 'import numpy as np\n'), ((1820, 1837), 'numpy.conj', 'np.conj', (['jones[0]'], {}), '(jones[0])\n', (1827, 1837), True, 'import numpy as np\n'), ((1851, 1868), 'numpy.conj', 'np.conj', (['jones[2]'], {}), '(jones[2])\n', (1858, 1868), True, 'import numpy as np\n'), ((1882, 1899), 'numpy.conj', 'np.conj', (['jones[1]'], {}), '(jones[1])\n', (1889, 1899), True, 'import numpy as np\n'), ((1913, 1930), 'numpy.conj', 'np.conj', (['jones[3]'], {}), '(jones[3])\n', (1920, 1930), True, 'import numpy as np\n'), ((4304, 4340), 'numba.core.types.Tuple', 'types.Tuple', (['((out_type,) * out_corrs)'], {}), '((out_type,) * out_corrs)\n', (4315, 4340), False, 'from numba.core import compiler, cgutils, types\n'), ((4446, 4482), 'numba.core.errors.TypingError', 'TypingError', (['f"""{t1} must be a Tuple"""'], {}), "(f'{t1} must be a Tuple')\n", (4457, 4482), False, 'from numba.core.errors import TypingError\n'), ((4542, 4578), 'numba.core.errors.TypingError', 'TypingError', (['f"""{t2} must be a Tuple"""'], {}), "(f'{t2} must be a Tuple')\n", (4553, 4578), False, 'from numba.core.errors import TypingError\n'), ((4625, 4663), 'numba.core.errors.TypingError', 'TypingError', (['f"""len({t1}) != len({t2})"""'], {}), "(f'len({t1}) != len({t2})')\n", (4636, 4663), False, 'from numba.core.errors import TypingError\n'), ((4982, 5019), 'numba.core.cgutils.get_null_value', 'cgutils.get_null_value', (['llvm_ret_type'], {}), '(llvm_ret_type)\n', (5004, 5019), False, 'from numba.core import compiler, cgutils, types\n'), ((6060, 6077), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6071, 6077), False, 'from collections import defaultdict\n'), ((3122, 3190), 'numba.core.errors.TypingError', 'TypingError', (['f"""No known multiplication function for {lhs} and {rhs}"""'], {}), "(f'No known multiplication function for {lhs} and {rhs}')\n", (3133, 3190), False, 'from numba.core.errors import TypingError\n'), ((3479, 3549), 'numba.core.errors.TypingError', 'TypingError', (['f"""No known hermitian function for {jones}: {jones_type}."""'], {}), "(f'No known hermitian function for {jones}: {jones_type}.')\n", (3490, 3549), False, 'from numba.core.errors import TypingError\n'), ((3911, 3980), 'numba.core.errors.TypingError', 'TypingError', (['f"""{lhs} or {rhs} has no entry in the {corr_map} mapping"""'], {}), "(f'{lhs} or {rhs} has no entry in the {corr_map} mapping')\n", (3922, 3980), False, 'from numba.core.errors import TypingError\n'), ((20244, 20285), 'africanus.experimental.rime.fused.terms.core.StateStructRef', 'StateStructRef', (['(arg_fields + state_fields)'], {}), '(arg_fields + state_fields)\n', (20258, 20285), False, 'from africanus.experimental.rime.fused.terms.core import StateStructRef\n'), ((9965, 10002), 'numba.core.cgutils.get_null_value', 'cgutils.get_null_value', (['llvm_ret_type'], {}), '(llvm_ret_type)\n', (9987, 10002), False, 'from numba.core import compiler, cgutils, types\n'), ((15078, 15115), 'numba.core.cgutils.get_null_value', 'cgutils.get_null_value', (['llvm_ret_type'], {}), '(llvm_ret_type)\n', (15100, 15115), False, 'from numba.core import compiler, cgutils, types\n'), ((19128, 19178), 'numba.core.errors.TypingError', 'TypingError', (['f"""args must be a Tuple but is {args}"""'], {}), "(f'args must be a Tuple but is {args}')\n", (19139, 19178), False, 'from numba.core.errors import TypingError\n'), ((20912, 20958), 'numba.experimental.structref._Utils', 'structref._Utils', (['context', 'builder', 'state_type'], {}), '(context, builder, state_type)\n', (20928, 20958), False, 'from numba.experimental import structref\n'), ((25378, 25426), 'numba.core.errors.TypingError', 'TypingError', (['f"""{state} must be a StateStructRef"""'], {}), "(f'{state} must be a StateStructRef')\n", (25389, 25426), False, 'from numba.core.errors import TypingError\n'), ((26382, 26506), 'functools.partial', 'partial', (['type_inference_stage'], {'typingctx': 'typingctx', 'targetctx': 'cpu_target.target_context', 'args': 'ir_args', 'return_type': 'None'}), '(type_inference_stage, typingctx=typingctx, targetctx=cpu_target.\n target_context, args=ir_args, return_type=None)\n', (26389, 26506), False, 'from functools import partial\n'), ((26662, 26748), 'functools.partial', 'partial', (['type_inference_stage'], {'typingctx': 'typingctx', 'args': 'ir_args', 'return_type': 'None'}), '(type_inference_stage, typingctx=typingctx, args=ir_args,\n return_type=None)\n', (26669, 26748), False, 'from functools import partial\n'), ((27196, 27396), 'numba.core.errors.TypingError', 'TypingError', (['f"""{sampler} should return:\n(1) a single scalar correlation\n(2) a Tuple containing 2 scalar correlations\n(3) a Tuple containing 4 scalar correlations\nbut instead got a {typ}"""'], {}), '(\n f"""{sampler} should return:\n(1) a single scalar correlation\n(2) a Tuple containing 2 scalar correlations\n(3) a Tuple containing 4 scalar correlations\nbut instead got a {typ}"""\n )\n', (27207, 27396), False, 'from numba.core.errors import TypingError\n'), ((11439, 11461), 'africanus.averaging.support._unique_internal', '_unique_internal', (['time'], {}), '(time)\n', (11455, 11461), False, 'from africanus.averaging.support import _unique_internal\n'), ((11646, 11678), 'numpy.searchsorted', 'np.searchsorted', (['uants', 'antenna1'], {}), '(uants, antenna1)\n', (11661, 11678), True, 'import numpy as np\n'), ((11716, 11748), 'numpy.searchsorted', 'np.searchsorted', (['uants', 'antenna2'], {}), '(uants, antenna2)\n', (11731, 11748), True, 'import numpy as np\n'), ((11783, 11813), 'numpy.searchsorted', 'np.searchsorted', (['ufeeds', 'feed1'], {}), '(ufeeds, feed1)\n', (11798, 11813), True, 'import numpy as np\n'), ((11848, 11878), 'numpy.searchsorted', 'np.searchsorted', (['ufeeds', 'feed2'], {}), '(ufeeds, feed2)\n', (11863, 11878), True, 'import numpy as np\n'), ((13848, 13897), 'numba.core.errors.TypingError', 'TypingError', (['f"""{transformer} produces no outputs"""'], {}), "(f'{transformer} produces no outputs')\n", (13859, 13897), False, 'from numba.core.errors import TypingError\n'), ((20447, 20494), 'numba.core.errors.TypingError', 'TypingError', (['"""args must contain a single value"""'], {}), "('args must contain a single value')\n", (20458, 20494), False, 'from numba.core.errors import TypingError\n'), ((20727, 20752), 'numba.experimental.structref.new', 'structref.new', (['state_type'], {}), '(state_type)\n', (20740, 20752), False, 'from numba.experimental import structref\n'), ((26320, 26358), 'numba.core.errors.TypingError', 'TypingError', (['"""typingctx\'s don\'t match"""'], {}), '("typingctx\'s don\'t match")\n', (26331, 26358), False, 'from numba.core.errors import TypingError\n'), ((10693, 10737), 'numba.core.errors.TypingError', 'TypingError', (['f"""{name} != {out_names[i + n]}"""'], {}), "(f'{name} != {out_names[i + n]}')\n", (10704, 10737), False, 'from numba.core.errors import TypingError\n'), ((11500, 11536), 'numpy.concatenate', 'np.concatenate', (['(antenna1, antenna2)'], {}), '((antenna1, antenna2))\n', (11514, 11536), True, 'import numpy as np\n'), ((11577, 11607), 'numpy.concatenate', 'np.concatenate', (['(feed1, feed2)'], {}), '((feed1, feed2))\n', (11591, 11607), True, 'import numpy as np\n'), ((12378, 12422), 'numba.core.errors.TypingError', 'TypingError', (['f"""{name} != {out_names[i + n]}"""'], {}), "(f'{name} != {out_names[i + n]}')\n", (12389, 12422), False, 'from numba.core.errors import TypingError\n'), ((17689, 17706), 'numba.core.types.Tuple', 'types.Tuple', (['typs'], {}), '(typs)\n', (17700, 17706), False, 'from numba.core import compiler, cgutils, types\n'), ((14043, 14200), 'numba.core.errors.TypingError', 'TypingError', (['f"""{transformer} produces {transformer.OUTPUTS} but {transformer}.init_fields does not return a tuple of the same length, but {fields}"""'], {}), "(\n f'{transformer} produces {transformer.OUTPUTS} but {transformer}.init_fields does not return a tuple of the same length, but {fields}'\n )\n", (14054, 14200), False, 'from numba.core.errors import TypingError\n'), ((15959, 16004), 'numba.core.errors.TypingError', 'TypingError', (['f"""{o} != {out_names[i + j + n]}"""'], {}), "(f'{o} != {out_names[i + j + n]}')\n", (15970, 16004), False, 'from numba.core.errors import TypingError\n'), ((23741, 23766), 'numba.core.types.Tuple', 'types.Tuple', (['return_types'], {}), '(return_types)\n', (23752, 23766), False, 'from numba.core import compiler, cgutils, types\n'), ((16364, 16414), 'numba.core.errors.TypingError', 'TypingError', (['f"""{name} is not present in arg_types"""'], {}), "(f'{name} is not present in arg_types')\n", (16375, 16414), False, 'from numba.core.errors import TypingError\n')] |
import numpy as np
class Main:
def __init__(self):
self.n, self.m = map(int, input().split())
def output(self):
print(np.eye(self.n, self.m, k=0))
if __name__ == '__main__':
obj = Main()
obj.output()
| [
"numpy.eye"
] | [((152, 179), 'numpy.eye', 'np.eye', (['self.n', 'self.m'], {'k': '(0)'}), '(self.n, self.m, k=0)\n', (158, 179), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import scipy.ndimage
from operator import itemgetter
import math
from tkinter import *
from tkinter import messagebox
from tkinter import filedialog
from tkinter import ttk
from MyFunctions import *
Q_90=[
[3,2,2,3,5,8,10,12],
[2,2,3,4,5,12,12,11],
[3,3,3,5,8,11,14,11],
[3,3,4,6,10,17,16,12],
[4,4,7,11,14,22,21,15],
[5,7,11,13,16,12,23,18],
[10,13,16,17,21,24,24,21],
[14,18,19,20,22,20,20,20]
]
Q_50=[
[16,11,10,16,24,40,51,61],
[12,12,14,19,26,58,60,55],
[14,13,16,24,40,57,69,56],
[14,17,22,29,51,87,80,62],
[18,22,37,56,68,109,103,77],
[24,35,55,64,81,104,113,92],
[49,64,78,87,103,121,120,101],
[72,92,95,98,112,100,103,99]
]
Q_10=[
[80,60,50,80,120,200,255,255],
[55,60,70,95,130,255,255,255],
[70,65,80,120,200,255,255,255],
[70,85,110,145,255,255,255,255],
[90,110,185,255,255,255,255,255],
[120,175,255,255,255,255,255,255],
[245,255,255,255,255,255,255,255],
[255,255,255,255,255,255,255,255]
]
Q_CL=[
[16,11,10,16,24,40,51,61],
[12,12,14,19,26,58,60,55],
[14,13,16,24,40,57,69,56],
[14,17,22,29,51,87,80,62],
[18,22,37,56,68,109,103,77],
[24,35,55,64,81,104,113,92],
[49,64,78,87,103,121,120,101],
[72,92,95,98,112,100,103,99]
]
def OpenShowImage():
global img,firstimage,first_image_label
filename=filedialog.askopenfilename(initialdir = "../Forged Images/",title = "Open File",filetypes = (("png files","*.png"),("bmp files","*.bmp"),("jpeg files","*.jpg"),("All Files","*.*")))
firstimage=PhotoImage(file='{}'.format(filename))
first_image_label=Label(leftframe,image=firstimage)
first_image_label.pack()
img=cv2.imread('{}'.format(filename),0)
def AccuracyTest():
filename = filedialog.askopenfilename(initialdir = "../Forged Images/",title ="Open File",filetypes = (("png files","*.png"),("bmp files","*.bmp"),("jpeg files","*.jpg"),("All Files","*.*")))
img_for_accuracy = cv2.imread('{}'.format(filename),0)
dp=0
yp=0
yn=0
for i in range(height):
for j in range(width):
if(img_for_accuracy[i][j]==0 and img2[i][j] == 255):
yp+=1
elif(img_for_accuracy[i][j]==255 and img2[i][j] == 255):
dp+=1
elif(img_for_accuracy[i][j]==255 and img2[i][j] == 0):
yn+=1
precision = dp / (dp + yp)
recall = dp / (dp + yn)
f1 = 2 * (precision * recall) / (precision + recall)
messagebox.showinfo("Accuracy Result",f1)
def GetQuantizationMatrix(size,mainsize):
for i in range (0,size):
for j in range(0,size):
quantization_matrix[i][j]=(pow(2,size-2))
if (size != 2):
GetQuantizationMatrix(size-1,mainsize)
def MakeDCT():
global height,width
height, width = img.shape
vis0=np.zeros((height,width),np.float32)
vis0[:height,:width]=img
global quantization_matrix
quantization_matrix=[[] for i in range(0,8)]
for i in quantization_matrix:
for j in range(0,8):
i.append(0)
GetQuantizationMatrix(8,8)
global diagonaled_array
diagonaled_array=[[] for i in range((height-7)*(width-7))]
count=0
for i in range (0,height-7):
for j in range(0,width-7):
#### Make Quantization With Q_CL Matrix ####
if(quantization_matrix_selection_box.get() == "Q_CL"):
vis1 = cv2.dct(vis0[i:i+8,j:j+8])
for k in range(0,8):
for t in range(0,8):
vis1[k][t] = GetMinDistValue((vis1[k][t] / Q_CL[k][t]))
vis1 = getdiagonalarray(vis1,8,8)
#### Make Quantization With Q_90 Matrix ####
elif(quantization_matrix_selection_box.get() == "Q_90"):
vis1 = cv2.dct(vis0[i:i+8,j:j+8])
for k in range(0,8):
for t in range(0,8):
vis1[k][t] = GetMinDistValue((vis1[k][t] / Q_90[k][t]))
vis1 = getdiagonalarray(vis1,8,8)
#### Make Quantization With Q_50 Matrix ####
elif(quantization_matrix_selection_box.get() == "Q_50"):
vis1 = cv2.dct(vis0[i:i+8,j:j+8])
for k in range(0,8):
for t in range(0,8):
vis1[k][t] = GetMinDistValue((vis1[k][t] / Q_50[k][t]))
vis1 = getdiagonalarray(vis1,8,8)
#### Make Quantization With Q_10 Matrix ####
elif(quantization_matrix_selection_box.get() == "Q_10"):
vis1 = cv2.dct(vis0[i:i+8,j:j+8])
for k in range(0,8):
for t in range(0,8):
vis1[k][t] = GetMinDistValue((vis1[k][t] / Q_10[k][t]))
vis1 = getdiagonalarray(vis1,8,8)
#### Make Quantization With Divide By 16 ####
elif(quantization_matrix_selection_box.get() == "Divide by 16"):
vis1 = cv2.dct(vis0[i:i+8,j:j+8])
for k in range(0,8):
for t in range(0,8):
vis1[k][t] = GetMinDistValue((vis1[k][t] / 16))
vis1 = getdiagonalarray(vis1,8,8)
############## Not Make Quantization ##############
elif(quantization_matrix_selection_box.get() == "Not Selected"):
vis1 = getdiagonalarray(cv2.dct(vis0[i:i+8,j:j+8]), 8, 8)
#### Make Quantization With Quantization Matrix ####
elif(quantization_matrix_selection_box.get() == "QTable in Article"):
vis1 = cv2.dct(vis0[i:i+8,j:j+8])
for k in range(0,8):
for t in range(0,8):
vis1[k][t] = GetMinDistValue(vis1[k][t] / quantization_matrix[k][t])
vis1 = getdiagonalarray(vis1,8,8)
diagonaled_array[count].append(vis1)
diagonaled_array[count].append([i,j])
count+=1
messagebox.showinfo("Success","DCT Dönüşüm Tamamlandı")
number_of_vector_to_compare=10
max_euclidean_distance=1.0
threshold_distance_for_similar_blocks=5
min_count_for_similar_shift_vectors=10
def TryToDetectForgery():
global hough_space,img2,result_image,result_image_label
hough_space=[]
diagonaled_array.sort(key=itemgetter(0))
for i in range(0,len(diagonaled_array) - number_of_vector_to_compare + 1):
toplam = 0.0
for j in range(i + 1,i + number_of_vector_to_compare):
for k in range(0, 15):
toplam += pow(( diagonaled_array[i][0][k] - diagonaled_array[j][0][k]), 2)
if(math.sqrt(toplam) < max_euclidean_distance):
if(math.sqrt(pow((diagonaled_array[i][1][0]-diagonaled_array[j][1][0]),2) + pow((diagonaled_array[i][1][1]-diagonaled_array[j][1][1]),2)) > threshold_distance_for_similar_blocks):
hough_space.append(diagonaled_array[i][1][0])
hough_space.append(diagonaled_array[i][1][1])
hough_space.append(diagonaled_array[j][1][0])
hough_space.append(diagonaled_array[j][1][1])
hough_space.append([abs(diagonaled_array[i][1][0]-diagonaled_array[j][1][0]),abs(diagonaled_array[i][1][1]-diagonaled_array[j][1][1])])
img2 = np.zeros((height,width,1),np.uint8)
for i in range(4,len(hough_space),5):
if(hough_space.count(hough_space[i]) > min_count_for_similar_shift_vectors):
for j in range(0,8):
for k in range(0,8):
img2[hough_space[i-4]+j,hough_space[i-3]+k,0]=255
img2[hough_space[i-2]+j,hough_space[i-1]+k,0]=255
filename = filedialog.asksaveasfilename(initialdir = "../Forged Images/",title = "Save File",filetypes = (("png files","*.png"),("bmp files","*.bmp"),("jpeg files","*.jpg"),("All Files","*.*")))
cv2.imwrite('{}'.format(filename),img2)
result_image=PhotoImage(file='{}'.format(filename))
result_image_label=Label(rightframe,image=result_image)
result_image_label.pack()
def GetNumberOfVectorToCompare():
global number_of_vector_to_compare
number_of_vector_to_compare=int(number_of_vector_to_compare_spin.get())
def GetMaxEuclideanDistance():
global max_euclidean_distance
max_euclidean_distance=float(maximum_Euclidean_distance_spin.get())
def GetThresholdDistanceForSimilarBlocks():
global threshold_distance_for_similar_blocks
threshold_distance_for_similar_blocks=int(threshold_distance_for_similar_blocks_spin.get())
def GetMinCountForSimilarShiftVectors():
global min_count_for_similar_shift_vectors
min_count_for_similar_shift_vectors=int(min_count_for_similar_shift_vectors_spin.get())
root=Tk()
root.geometry("800x325")
root.title("Copy Move Forgery Detection")
menubar=Menu(root)
filemenu=Menu(menubar,tearoff=0)
filemenu.add_command(label="Open", command=OpenShowImage)
filemenu.add_command(label="DCT to Image Blocks", command=MakeDCT)
filemenu.add_command(label="Try to Detect Forgery",command=TryToDetectForgery)
filemenu.add_separator()
filemenu.add_command(label="Exit",command=root.quit)
menubar.add_cascade(label="File",menu=filemenu)
root.config(menu=menubar)
mainframe=Frame(root)
mainframe.pack(fill=BOTH)
leftframe=Frame(mainframe,width=256,height=256)
leftframe.pack(side=LEFT,padx=15,pady=25,anchor=W)
middleframe=Frame(mainframe)
middleframe.pack(side=LEFT,anchor=CENTER)
rightframe=Frame(mainframe,width=256,height=256)
rightframe.pack(side=LEFT,anchor=E,padx=15)
quantization_matrix_selection_box_label=Label(middleframe,text="Select Quantization Matrix")
quantization_matrix_selection_box_label.pack()
quantization_matrix_selection_box=ttk.Combobox(middleframe,width=15)
quantization_matrix_selection_box['values'] = ("Not Selected","QTable in Article","Q_CL","Q_90","Q_50","Q_10","Divide by 16")
quantization_matrix_selection_box.pack(anchor=CENTER,pady=3)
quantization_matrix_selection_box.current(0)
number_of_vector_to_compare_spin_label=Label(middleframe,text="Number of Vector to Compare")
number_of_vector_to_compare_spin_label.pack(pady=3)
number_of_vector_to_compare_spin = Spinbox(middleframe, from_=0, to=100,width=5,command=GetNumberOfVectorToCompare)
number_of_vector_to_compare_spin.pack(anchor=CENTER,pady=3)
maximum_Euclidean_distance_spin_label=Label(middleframe,text="Maximum Euclidean Distance")
maximum_Euclidean_distance_spin_label.pack(pady=3)
maximum_Euclidean_distance_spin = Spinbox(middleframe, from_=0, to=100,width=5,format="%.2f",increment=0.1,command=GetMaxEuclideanDistance)
maximum_Euclidean_distance_spin.pack(anchor=CENTER)
threshold_distance_for_similar_blocks_label=Label(middleframe,text="Threshold Distance for Similar Blocks")
threshold_distance_for_similar_blocks_label.pack(pady=3)
threshold_distance_for_similar_blocks_spin = Spinbox(middleframe, from_=5, to=100,width=5,command=GetThresholdDistanceForSimilarBlocks)
threshold_distance_for_similar_blocks_spin.pack(anchor=CENTER,pady=3)
min_count_for_similar_shift_vectors_label = Label(middleframe,text="Minimum Count for Similar Shift Vectors")
min_count_for_similar_shift_vectors_label.pack(pady=3)
min_count_for_similar_shift_vectors_spin = Spinbox(middleframe, from_=25, to=1000,width=5,command=GetMinCountForSimilarShiftVectors)
min_count_for_similar_shift_vectors_spin.pack(anchor=CENTER,pady=3)
accuracy_test_button=Button(middleframe,text="Accuracy Test",bg='gray' ,width=15,command=AccuracyTest)
accuracy_test_button.pack(anchor=CENTER,pady=7)
status=Label(root,text="Made By zumrudu-anka",bd=1,relief=SUNKEN)
status.pack(side=BOTTOM,fill=X)
root.mainloop() | [
"cv2.dct",
"tkinter.filedialog.asksaveasfilename",
"math.sqrt",
"numpy.zeros",
"tkinter.ttk.Combobox",
"operator.itemgetter",
"tkinter.messagebox.showinfo",
"tkinter.filedialog.askopenfilename"
] | [((8647, 8682), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['middleframe'], {'width': '(15)'}), '(middleframe, width=15)\n', (8659, 8682), False, 'from tkinter import ttk\n'), ((1317, 1510), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'initialdir': '"""../Forged Images/"""', 'title': '"""Open File"""', 'filetypes': "(('png files', '*.png'), ('bmp files', '*.bmp'), ('jpeg files', '*.jpg'), (\n 'All Files', '*.*'))"}), "(initialdir='../Forged Images/', title=\n 'Open File', filetypes=(('png files', '*.png'), ('bmp files', '*.bmp'),\n ('jpeg files', '*.jpg'), ('All Files', '*.*')))\n", (1343, 1510), False, 'from tkinter import filedialog\n'), ((1703, 1896), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'initialdir': '"""../Forged Images/"""', 'title': '"""Open File"""', 'filetypes': "(('png files', '*.png'), ('bmp files', '*.bmp'), ('jpeg files', '*.jpg'), (\n 'All Files', '*.*'))"}), "(initialdir='../Forged Images/', title=\n 'Open File', filetypes=(('png files', '*.png'), ('bmp files', '*.bmp'),\n ('jpeg files', '*.jpg'), ('All Files', '*.*')))\n", (1729, 1896), False, 'from tkinter import filedialog\n'), ((2320, 2362), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Accuracy Result"""', 'f1'], {}), "('Accuracy Result', f1)\n", (2339, 2362), False, 'from tkinter import messagebox\n'), ((2630, 2667), 'numpy.zeros', 'np.zeros', (['(height, width)', 'np.float32'], {}), '((height, width), np.float32)\n', (2638, 2667), True, 'import numpy as np\n'), ((5161, 5217), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Success"""', '"""DCT Dönüşüm Tamamlandı"""'], {}), "('Success', 'DCT Dönüşüm Tamamlandı')\n", (5180, 5217), False, 'from tkinter import messagebox\n'), ((6340, 6378), 'numpy.zeros', 'np.zeros', (['(height, width, 1)', 'np.uint8'], {}), '((height, width, 1), np.uint8)\n', (6348, 6378), True, 'import numpy as np\n'), ((6667, 6862), 'tkinter.filedialog.asksaveasfilename', 'filedialog.asksaveasfilename', ([], {'initialdir': '"""../Forged Images/"""', 'title': '"""Save File"""', 'filetypes': "(('png files', '*.png'), ('bmp files', '*.bmp'), ('jpeg files', '*.jpg'), (\n 'All Files', '*.*'))"}), "(initialdir='../Forged Images/', title=\n 'Save File', filetypes=(('png files', '*.png'), ('bmp files', '*.bmp'),\n ('jpeg files', '*.jpg'), ('All Files', '*.*')))\n", (6695, 6862), False, 'from tkinter import filedialog\n'), ((5482, 5495), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (5492, 5495), False, 'from operator import itemgetter\n'), ((3138, 3169), 'cv2.dct', 'cv2.dct', (['vis0[i:i + 8, j:j + 8]'], {}), '(vis0[i:i + 8, j:j + 8])\n', (3145, 3169), False, 'import cv2\n'), ((5757, 5774), 'math.sqrt', 'math.sqrt', (['toplam'], {}), '(toplam)\n', (5766, 5774), False, 'import math\n'), ((3444, 3475), 'cv2.dct', 'cv2.dct', (['vis0[i:i + 8, j:j + 8]'], {}), '(vis0[i:i + 8, j:j + 8])\n', (3451, 3475), False, 'import cv2\n'), ((3750, 3781), 'cv2.dct', 'cv2.dct', (['vis0[i:i + 8, j:j + 8]'], {}), '(vis0[i:i + 8, j:j + 8])\n', (3757, 3781), False, 'import cv2\n'), ((4053, 4084), 'cv2.dct', 'cv2.dct', (['vis0[i:i + 8, j:j + 8]'], {}), '(vis0[i:i + 8, j:j + 8])\n', (4060, 4084), False, 'import cv2\n'), ((4368, 4399), 'cv2.dct', 'cv2.dct', (['vis0[i:i + 8, j:j + 8]'], {}), '(vis0[i:i + 8, j:j + 8])\n', (4375, 4399), False, 'import cv2\n'), ((4694, 4725), 'cv2.dct', 'cv2.dct', (['vis0[i:i + 8, j:j + 8]'], {}), '(vis0[i:i + 8, j:j + 8])\n', (4701, 4725), False, 'import cv2\n'), ((4875, 4906), 'cv2.dct', 'cv2.dct', (['vis0[i:i + 8, j:j + 8]'], {}), '(vis0[i:i + 8, j:j + 8])\n', (4882, 4906), False, 'import cv2\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/detector.ipynb (unless otherwise specified).
__all__ = ['FormatDetector']
# Cell
import numpy as np
import pandas as pd
from typing import Any, Dict, Callable
from tqdm import tqdm
from .judge import FormatJudge
from .utils import PatternGenerator
# Cell
class FormatDetector:
def __init__(self, skip: list = None):
self.skip = skip
def fit(self, df: pd.DataFrame, generator: PatternGenerator or List['str': PatternGenerator],
n: int = 3, dim: int = 1):
self.judges = {}
self.df = df
with tqdm(total=len(self.df.columns)) as pbar:
if isinstance(generator, PatternGenerator):
for col in self.df.columns:
if col in self.skip:
continue
col_values = self.df[col].tolist()
format_judge = FormatJudge(generator, n, dim)
format_judge.fit(col_values)
self.judges[col] = format_judge
pbar.update(1)
else:
for col in self.df.columns:
if col in self.skip:
continue
col_values = self.df[col].tolist()
gen = generator.get(col, PatternGenerator())
format_judge = FormatJudge(gen, n, dim)
format_judge.fit(col_values)
self.judges[col] = format_judge
pbar.update(1)
def detect(self, reduction: Callable = np.min, softmax: bool = True) -> dict:
scores = []
with tqdm(total=len(self.df)) as pbar:
for index, row in self.df.iterrows():
tuple_score = []
for col in self.df.columns:
if col in self.skip:
continue
judge = self.judges[col]
score = np.mean(judge(row[col]))
tuple_score.append(score)
if softmax:
tuple_score = np.exp(tuple_score)
softmax_tuple_score = [score / sum(tuple_score) for score in tuple_score]
if reduction == np.ptp:
scores.append(reduction(softmax_tuple_score))
else:
scores.append(1 - reduction(softmax_tuple_score))
else:
if reduction == np.ptp:
scores.append(reduction(tuple_score))
else:
scores.append(1 - reduction(tuple_score))
pbar.update(1)
assessed_df = self.df.copy()
assessed_df['p'] = scores
return assessed_df | [
"numpy.exp"
] | [((2079, 2098), 'numpy.exp', 'np.exp', (['tuple_score'], {}), '(tuple_score)\n', (2085, 2098), True, 'import numpy as np\n')] |
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import tensorflow as tf
import numpy as np
tf.compat.v1.enable_eager_execution()
def range_with_max(r, max_range):
range_values = tf.cond(
tf.greater(r, 0.0), lambda: tf.range(r), lambda: -1 * tf.ones(1, tf.float32)
) # when duration is padded
# pad to max range in sample
paddings = [[0, max_range - tf.shape(range_values)[0]]]
return tf.pad(range_values, paddings, "CONSTANT", constant_values=-1)
def durations_range(duration, max_mel_len):
# for one sample
# cur_range = tf.ragged.range(tf.ones(tf.shape(duration)[0]), duration+tf.ones(tf.shape(duration)[0])).flat_values
max_range = tf.cast(tf.reduce_max(duration), tf.int32)
cur_range = tf.map_fn(
fn=lambda r: range_with_max(r, max_range), elems=duration
) # input_length x max_range
cur_range = tf.reshape(cur_range, [-1])
# drop zeros
# mask = tf.cast(cur_range, dtype=tf.bool)
mask = tf.greater(cur_range, -1)
cur_range = tf.boolean_mask(cur_range, mask)
# pad to max_mel_len in batch
pad_num = tf.cond(
tf.greater(max_mel_len - tf.shape(cur_range)[0], 0),
lambda: max_mel_len - tf.shape(cur_range)[0],
lambda: 0,
)
paddings = [[0, pad_num]]
return tf.cond(
tf.greater(max_mel_len - tf.shape(cur_range)[0], 0),
lambda: tf.pad(cur_range, paddings, "CONSTANT", constant_values=149),
lambda: cur_range[:max_mel_len],
)
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
angle_rads = get_angles(
np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model
)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
class PositionalEncoding:
""""""
def __init__(self, embed_dim):
"""
Args:
input_dim: int, encoder_output vector size corresponding to one phoneme/char
"""
super(PositionalEncoding, self).__init__()
# self.embed_dim = embed_dim
# self.w = tf.constant([1/(pow(10000,2*i/embed_dim)) for i in range(embed_dim)], tf.float32)
n = 150
pos_encoding = positional_encoding(n, embed_dim)
self.pos_encoding_table = tf.reshape(pos_encoding[0], (n, embed_dim))
def __call__(self, durations, max_mel_len=None):
# durations - durations in frames (batch_size x seq_len)
# раскрыть все длительности in range()
# mel_len = tf.reduce_max(tf.reduce_sum(durations, axis = 1))
if max_mel_len is not None:
max_mel_len = tf.cast(max_mel_len, tf.int32)
else:
max_mel_len = tf.cast(
tf.reduce_max(tf.reduce_sum(durations, axis=1)), tf.int32
)
ranges = tf.map_fn(
fn=lambda t: durations_range(t, max_mel_len), elems=durations
)
return tf.nn.embedding_lookup(
self.pos_encoding_table, tf.cast(ranges, tf.int32)
)
d = 100
p = PositionalEncoding(32)
durations = tf.Variable(
[
[
0,
3,
2,
3,
1,
4,
1,
2,
3,
3,
2,
2,
3,
1,
2,
2,
2,
3,
6,
6,
3,
5,
1,
2,
1,
2,
1,
2,
2,
2,
3,
3,
2,
1,
3,
1,
1,
3,
3,
2,
2,
6,
4,
2,
5,
7,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
[
4,
1,
1,
3,
1,
1,
2,
3,
2,
3,
4,
2,
4,
5,
3,
2,
3,
1,
1,
1,
1,
1,
2,
2,
2,
3,
1,
2,
3,
2,
1,
1,
1,
2,
3,
1,
2,
5,
2,
2,
3,
4,
3,
1,
2,
3,
3,
6,
7,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
],
dtype=tf.float32,
)
res = p(durations, 222)
tf.print(res, summarize=-1)
| [
"tensorflow.shape",
"tensorflow.pad",
"tensorflow.boolean_mask",
"tensorflow.Variable",
"tensorflow.ones",
"numpy.sin",
"tensorflow.reduce_sum",
"tensorflow.reduce_max",
"tensorflow.range",
"numpy.cos",
"tensorflow.compat.v1.enable_eager_execution",
"tensorflow.reshape",
"tensorflow.print",
... | [((1161, 1198), 'tensorflow.compat.v1.enable_eager_execution', 'tf.compat.v1.enable_eager_execution', ([], {}), '()\n', (1196, 1198), True, 'import tensorflow as tf\n'), ((4446, 5026), 'tensorflow.Variable', 'tf.Variable', (['[[0, 3, 2, 3, 1, 4, 1, 2, 3, 3, 2, 2, 3, 1, 2, 2, 2, 3, 6, 6, 3, 5, 1, 2, 1,\n 2, 1, 2, 2, 2, 3, 3, 2, 1, 3, 1, 1, 3, 3, 2, 2, 6, 4, 2, 5, 7, 2, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4, 1, 1, 3, 1, 1, 2, 3, 2, 3, \n 4, 2, 4, 5, 3, 2, 3, 1, 1, 1, 1, 1, 2, 2, 2, 3, 1, 2, 3, 2, 1, 1, 1, 2,\n 3, 1, 2, 5, 2, 2, 3, 4, 3, 1, 2, 3, 3, 6, 7, 1, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0]]'], {'dtype': 'tf.float32'}), '([[0, 3, 2, 3, 1, 4, 1, 2, 3, 3, 2, 2, 3, 1, 2, 2, 2, 3, 6, 6, 3,\n 5, 1, 2, 1, 2, 1, 2, 2, 2, 3, 3, 2, 1, 3, 1, 1, 3, 3, 2, 2, 6, 4, 2, 5,\n 7, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4, 1, 1, 3, 1, 1, \n 2, 3, 2, 3, 4, 2, 4, 5, 3, 2, 3, 1, 1, 1, 1, 1, 2, 2, 2, 3, 1, 2, 3, 2,\n 1, 1, 1, 2, 3, 1, 2, 5, 2, 2, 3, 4, 3, 1, 2, 3, 3, 6, 7, 1, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0]], dtype=tf.float32)\n', (4457, 5026), True, 'import tensorflow as tf\n'), ((7142, 7169), 'tensorflow.print', 'tf.print', (['res'], {'summarize': '(-1)'}), '(res, summarize=-1)\n', (7150, 7169), True, 'import tensorflow as tf\n'), ((1485, 1547), 'tensorflow.pad', 'tf.pad', (['range_values', 'paddings', '"""CONSTANT"""'], {'constant_values': '(-1)'}), "(range_values, paddings, 'CONSTANT', constant_values=-1)\n", (1491, 1547), True, 'import tensorflow as tf\n'), ((1936, 1963), 'tensorflow.reshape', 'tf.reshape', (['cur_range', '[-1]'], {}), '(cur_range, [-1])\n', (1946, 1963), True, 'import tensorflow as tf\n'), ((2039, 2064), 'tensorflow.greater', 'tf.greater', (['cur_range', '(-1)'], {}), '(cur_range, -1)\n', (2049, 2064), True, 'import tensorflow as tf\n'), ((2081, 2113), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['cur_range', 'mask'], {}), '(cur_range, mask)\n', (2096, 2113), True, 'import tensorflow as tf\n'), ((2932, 2959), 'numpy.sin', 'np.sin', (['angle_rads[:, 0::2]'], {}), '(angle_rads[:, 0::2])\n', (2938, 2959), True, 'import numpy as np\n'), ((3037, 3064), 'numpy.cos', 'np.cos', (['angle_rads[:, 1::2]'], {}), '(angle_rads[:, 1::2])\n', (3043, 3064), True, 'import numpy as np\n'), ((3125, 3164), 'tensorflow.cast', 'tf.cast', (['pos_encoding'], {'dtype': 'tf.float32'}), '(pos_encoding, dtype=tf.float32)\n', (3132, 3164), True, 'import tensorflow as tf\n'), ((1271, 1289), 'tensorflow.greater', 'tf.greater', (['r', '(0.0)'], {}), '(r, 0.0)\n', (1281, 1289), True, 'import tensorflow as tf\n'), ((1758, 1781), 'tensorflow.reduce_max', 'tf.reduce_max', (['duration'], {}), '(duration)\n', (1771, 1781), True, 'import tensorflow as tf\n'), ((3663, 3706), 'tensorflow.reshape', 'tf.reshape', (['pos_encoding[0]', '(n, embed_dim)'], {}), '(pos_encoding[0], (n, embed_dim))\n', (3673, 3706), True, 'import tensorflow as tf\n'), ((1299, 1310), 'tensorflow.range', 'tf.range', (['r'], {}), '(r)\n', (1307, 1310), True, 'import tensorflow as tf\n'), ((2439, 2499), 'tensorflow.pad', 'tf.pad', (['cur_range', 'paddings', '"""CONSTANT"""'], {'constant_values': '(149)'}), "(cur_range, paddings, 'CONSTANT', constant_values=149)\n", (2445, 2499), True, 'import tensorflow as tf\n'), ((2771, 2790), 'numpy.arange', 'np.arange', (['position'], {}), '(position)\n', (2780, 2790), True, 'import numpy as np\n'), ((2807, 2825), 'numpy.arange', 'np.arange', (['d_model'], {}), '(d_model)\n', (2816, 2825), True, 'import numpy as np\n'), ((4005, 4035), 'tensorflow.cast', 'tf.cast', (['max_mel_len', 'tf.int32'], {}), '(max_mel_len, tf.int32)\n', (4012, 4035), True, 'import tensorflow as tf\n'), ((4361, 4386), 'tensorflow.cast', 'tf.cast', (['ranges', 'tf.int32'], {}), '(ranges, tf.int32)\n', (4368, 4386), True, 'import tensorflow as tf\n'), ((1325, 1347), 'tensorflow.ones', 'tf.ones', (['(1)', 'tf.float32'], {}), '(1, tf.float32)\n', (1332, 1347), True, 'import tensorflow as tf\n'), ((2638, 2657), 'numpy.float32', 'np.float32', (['d_model'], {}), '(d_model)\n', (2648, 2657), True, 'import numpy as np\n'), ((1446, 1468), 'tensorflow.shape', 'tf.shape', (['range_values'], {}), '(range_values)\n', (1454, 1468), True, 'import tensorflow as tf\n'), ((2204, 2223), 'tensorflow.shape', 'tf.shape', (['cur_range'], {}), '(cur_range)\n', (2212, 2223), True, 'import tensorflow as tf\n'), ((2262, 2281), 'tensorflow.shape', 'tf.shape', (['cur_range'], {}), '(cur_range)\n', (2270, 2281), True, 'import tensorflow as tf\n'), ((2395, 2414), 'tensorflow.shape', 'tf.shape', (['cur_range'], {}), '(cur_range)\n', (2403, 2414), True, 'import tensorflow as tf\n'), ((4115, 4147), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['durations'], {'axis': '(1)'}), '(durations, axis=1)\n', (4128, 4147), True, 'import tensorflow as tf\n')] |
"""
msg_map
- messages type for map of the world
part of mavsim_python
- Beard & McLain, PUP, 2012
- Last update:
4/10/2019 - RWB
"""
import numpy as np
import parameters.planner_parameters as PLAN
class msgMap:
def __init__(self):
# flag to indicate if the map has changed
self.flag_map_changed = 0
# the city is of size (width)x(width)
self.city_width = PLAN.city_width
# number of blocks in city
self.num_city_blocks = PLAN.num_blocks
# percent of block that is street.
self.street_width = PLAN.city_width / PLAN.num_blocks * PLAN.street_width
# maximum height of buildings
self.building_max_height = PLAN.building_height
# an array of building heights
self.building_height = PLAN.building_height * np.random.rand(PLAN.num_blocks, PLAN.num_blocks)
# the width of the buildings (all the same)
self.building_width = PLAN.city_width / PLAN.num_blocks * (1 - PLAN.street_width)
# north coordinate of center of buildings
self.building_north = np.zeros((1,PLAN.num_blocks))
for i in range(PLAN.num_blocks):
self.building_north[0, i] = 0.5 * (PLAN.city_width / PLAN.num_blocks) * (2 * i + 1)
# east coordinate of center of buildings
self.building_east = np.copy(self.building_north)
| [
"numpy.copy",
"numpy.zeros",
"numpy.random.rand"
] | [((1099, 1129), 'numpy.zeros', 'np.zeros', (['(1, PLAN.num_blocks)'], {}), '((1, PLAN.num_blocks))\n', (1107, 1129), True, 'import numpy as np\n'), ((1344, 1372), 'numpy.copy', 'np.copy', (['self.building_north'], {}), '(self.building_north)\n', (1351, 1372), True, 'import numpy as np\n'), ((828, 876), 'numpy.random.rand', 'np.random.rand', (['PLAN.num_blocks', 'PLAN.num_blocks'], {}), '(PLAN.num_blocks, PLAN.num_blocks)\n', (842, 876), True, 'import numpy as np\n')] |
# Copyright (c) 2020-2021 impersonator.org authors (<NAME> and <NAME>). All rights reserved.
import unittest
import numpy as np
import torch
from tqdm import tqdm
from iPERCore.tools.human_digitalizer import deformers
from iPERCore.tools.human_digitalizer import renders
from iPERCore.tools.human_digitalizer.bodynets import SMPL, SMPLH
from iPERCore.tools.utils.filesio.persistence import load_pickle_file
from iPERCore.tools.utils.visualizers.visdom_visualizer import VisdomVisualizer
visualizer = VisdomVisualizer(
env='test_deformers',
ip='http://10.10.10.100', port=31102
)
IMAGE_SIZE = 512
device = torch.device("cuda:0")
smpl = SMPL(model_path="assets/checkpoints/pose3d/smpl_model.pkl").to(device)
smplh = SMPLH(model_path="./assets/checkpoints/pose3d/smpl_model_with_hand_v2.pkl").to(device)
render = renders.SMPLRenderer(image_size=IMAGE_SIZE).to(device)
render.set_ambient_light()
texs = render.color_textures()[None].to(device)
def cloth_link_animate_visual(links_ids, cams, pose, shape, ref_smpl_path):
"""
Args:
links_ids:
cams:
pose:
shape:
ref_smpl_path (str):
Returns:
"""
global smpl, visualizer, render, device, texs
cams = torch.tensor(cams).float().to(device)
pose = torch.tensor(pose).float().to(device)
shape = torch.tensor(shape).float().to(device)
src_verts, _, _ = smpl.forward(shape, pose, offsets=0, links_ids=links_ids, get_skin=True)
src_img, _ = render.render(cams, src_verts, texs)
visualizer.vis_named_img("src_img", src_img)
ref_smpl_info = load_pickle_file(ref_smpl_path)
ref_cams = torch.tensor(ref_smpl_info["cams"]).float().to(device)
ref_poses = torch.tensor(ref_smpl_info["pose"]).float().to(device)
ref_shapes = torch.tensor(ref_smpl_info["shape"]).float().to(device)
length = ref_poses.shape[0]
for i in tqdm(range(length)):
ref_pose = ref_poses[i:i+1]
animate_verts, _, _ = smpl.forward(shape, ref_pose, offsets=0, links_ids=links_ids, get_skin=True)
animate_img, _ = render.render(cams, animate_verts, texs)
visualizer.vis_named_img("animate_img", animate_img)
class TestDeformers(unittest.TestCase):
def test_01_clothlinks_deformer(self):
device = torch.device("cuda:0")
src_path = "/root/projects/iPERDance/iPERDanceCore-dev/tests/debug/primitives/skirts/processed/images/skirts.png"
smpl_path = "/root/projects/iPERDance/iPERDanceCore-dev/tests/debug/primitives/skirts/processed/vid_info.pkl"
smpls_data = load_pickle_file(smpl_path)["processed_pose3d"]
ref_smpl_pkl = "/p300/projects/iPERDance/experiments/primitives/Av37667655_2.mp4/processed/pose_shape.pkl"
cloth_link = deformers.ClothSmplLinkDeformer(
cloth_parse_ckpt_path="./assets/checkpoints/mattors/exp-schp-lip.pth",
smpl_model="assets/checkpoints/pose3d/smpl_model.pkl",
part_path="assets/configs/pose3d/smpl_part_info.json",
device=device
)
init_smpls = np.concatenate([smpls_data["cams"], smpls_data["pose"], smpls_data["shape"]], axis=1)
has_linked, linked_ids = cloth_link.find_links(src_path, init_smpls)
print(f"has_linked = {has_linked}")
if has_linked:
cloth_link_animate_visual(linked_ids,
cams=smpls_data["cams"],
pose=smpls_data["pose"],
shape=smpls_data["shape"],
ref_smpl_path=ref_smpl_pkl)
if __name__ == '__main__':
unittest.main()
| [
"iPERCore.tools.human_digitalizer.bodynets.SMPLH",
"iPERCore.tools.human_digitalizer.deformers.ClothSmplLinkDeformer",
"iPERCore.tools.human_digitalizer.bodynets.SMPL",
"torch.tensor",
"numpy.concatenate",
"unittest.main",
"iPERCore.tools.utils.filesio.persistence.load_pickle_file",
"iPERCore.tools.ut... | [((506, 582), 'iPERCore.tools.utils.visualizers.visdom_visualizer.VisdomVisualizer', 'VisdomVisualizer', ([], {'env': '"""test_deformers"""', 'ip': '"""http://10.10.10.100"""', 'port': '(31102)'}), "(env='test_deformers', ip='http://10.10.10.100', port=31102)\n", (522, 582), False, 'from iPERCore.tools.utils.visualizers.visdom_visualizer import VisdomVisualizer\n'), ((621, 643), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (633, 643), False, 'import torch\n'), ((1589, 1620), 'iPERCore.tools.utils.filesio.persistence.load_pickle_file', 'load_pickle_file', (['ref_smpl_path'], {}), '(ref_smpl_path)\n', (1605, 1620), False, 'from iPERCore.tools.utils.filesio.persistence import load_pickle_file\n'), ((3630, 3645), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3643, 3645), False, 'import unittest\n'), ((651, 710), 'iPERCore.tools.human_digitalizer.bodynets.SMPL', 'SMPL', ([], {'model_path': '"""assets/checkpoints/pose3d/smpl_model.pkl"""'}), "(model_path='assets/checkpoints/pose3d/smpl_model.pkl')\n", (655, 710), False, 'from iPERCore.tools.human_digitalizer.bodynets import SMPL, SMPLH\n'), ((730, 805), 'iPERCore.tools.human_digitalizer.bodynets.SMPLH', 'SMPLH', ([], {'model_path': '"""./assets/checkpoints/pose3d/smpl_model_with_hand_v2.pkl"""'}), "(model_path='./assets/checkpoints/pose3d/smpl_model_with_hand_v2.pkl')\n", (735, 805), False, 'from iPERCore.tools.human_digitalizer.bodynets import SMPL, SMPLH\n'), ((827, 870), 'iPERCore.tools.human_digitalizer.renders.SMPLRenderer', 'renders.SMPLRenderer', ([], {'image_size': 'IMAGE_SIZE'}), '(image_size=IMAGE_SIZE)\n', (847, 870), False, 'from iPERCore.tools.human_digitalizer import renders\n'), ((2278, 2300), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (2290, 2300), False, 'import torch\n'), ((2749, 2991), 'iPERCore.tools.human_digitalizer.deformers.ClothSmplLinkDeformer', 'deformers.ClothSmplLinkDeformer', ([], {'cloth_parse_ckpt_path': '"""./assets/checkpoints/mattors/exp-schp-lip.pth"""', 'smpl_model': '"""assets/checkpoints/pose3d/smpl_model.pkl"""', 'part_path': '"""assets/configs/pose3d/smpl_part_info.json"""', 'device': 'device'}), "(cloth_parse_ckpt_path=\n './assets/checkpoints/mattors/exp-schp-lip.pth', smpl_model=\n 'assets/checkpoints/pose3d/smpl_model.pkl', part_path=\n 'assets/configs/pose3d/smpl_part_info.json', device=device)\n", (2780, 2991), False, 'from iPERCore.tools.human_digitalizer import deformers\n'), ((3057, 3147), 'numpy.concatenate', 'np.concatenate', (["[smpls_data['cams'], smpls_data['pose'], smpls_data['shape']]"], {'axis': '(1)'}), "([smpls_data['cams'], smpls_data['pose'], smpls_data['shape']\n ], axis=1)\n", (3071, 3147), True, 'import numpy as np\n'), ((2563, 2590), 'iPERCore.tools.utils.filesio.persistence.load_pickle_file', 'load_pickle_file', (['smpl_path'], {}), '(smpl_path)\n', (2579, 2590), False, 'from iPERCore.tools.utils.filesio.persistence import load_pickle_file\n'), ((1230, 1248), 'torch.tensor', 'torch.tensor', (['cams'], {}), '(cams)\n', (1242, 1248), False, 'import torch\n'), ((1279, 1297), 'torch.tensor', 'torch.tensor', (['pose'], {}), '(pose)\n', (1291, 1297), False, 'import torch\n'), ((1329, 1348), 'torch.tensor', 'torch.tensor', (['shape'], {}), '(shape)\n', (1341, 1348), False, 'import torch\n'), ((1636, 1671), 'torch.tensor', 'torch.tensor', (["ref_smpl_info['cams']"], {}), "(ref_smpl_info['cams'])\n", (1648, 1671), False, 'import torch\n'), ((1707, 1742), 'torch.tensor', 'torch.tensor', (["ref_smpl_info['pose']"], {}), "(ref_smpl_info['pose'])\n", (1719, 1742), False, 'import torch\n'), ((1779, 1815), 'torch.tensor', 'torch.tensor', (["ref_smpl_info['shape']"], {}), "(ref_smpl_info['shape'])\n", (1791, 1815), False, 'import torch\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.